diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_50_km_h.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_50_km_h.py new file mode 100644 index 00000000..b698bd8e --- /dev/null +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_50_km_h.py @@ -0,0 +1,191 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import csv +import cv2 +import math +import numpy as np +import threading +import time +import carla +from os import path +from albumentations import ( + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare +) +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from utils.logger import logger +from traceback import print_exc + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'CARLA/' + +from tensorflow.python.framework.errors_impl import NotFoundError +from tensorflow.python.framework.errors_impl import UnimplementedError +import tensorflow as tf + + +#import os +#os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +gpus = tf.config.experimental.list_physical_devices('GPU') +for gpu in gpus: + tf.config.experimental.set_memory_growth(gpu, True) + +class Brain: + + def __init__(self, sensors, actuators, handler, model, config=None): + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + + self.cameras_first_images = [] + + self.pose = sensors.get_pose3d('pose3d_0') + + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + self.inference_times = [] + self.gpu_inference = True if tf.test.gpu_device_name() else False + + self.threshold_image = np.zeros((640, 360, 3), np.uint8) + self.color_image = np.zeros((640, 360, 3), np.uint8) + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + if model: + if not path.exists(PRETRAINED_MODELS + model): + logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) + logger.info("** Load TF model **") + self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model, compile=False) + logger.info("** Loaded TF model **") + else: + logger.info("** Brain not loaded **") + logger.info("- Models path: " + PRETRAINED_MODELS) + logger.info("- Model: " + str(model)) + + self.previous_bird_eye_view_image = 0 + self.bird_eye_view_images = 0 + self.bird_eye_view_unique_images = 0 + + self.first_acceleration = True + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def update_pose(self, pose_data): + self.handler.update_pose3d(pose_data) + + def execute(self): + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + if self.cameras_first_images == []: + self.cameras_first_images.append(image) + self.cameras_first_images.append(image_1) + self.cameras_first_images.append(image_2) + self.cameras_first_images.append(image_3) + self.cameras_first_images.append(bird_eye_view_1) + + self.cameras_last_images = [ + image, + image_1, + image_2, + image_3, + bird_eye_view_1 + ] + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + self.update_pose(self.pose.getPose3d()) + + image_shape=(66, 200) + img_base = cv2.resize(bird_eye_view_1, image_shape) + + AUGMENTATIONS_TEST = Compose([ + Normalize() + ]) + image = AUGMENTATIONS_TEST(image=img_base) + img = image["image"] + + self.bird_eye_view_images += 1 + if (self.previous_bird_eye_view_image==img).all() == False: + self.bird_eye_view_unique_images += 1 + self.previous_bird_eye_view_image = img + + img = np.expand_dims(img, axis=0) + start_time = time.time() + try: + prediction = self.net.predict(img, verbose=0) + self.inference_times.append(time.time() - start_time) + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + + if vehicle_speed < 50 and self.first_acceleration: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.first_acceleration = False + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + except NotFoundError as ex: + logger.info('Error inside brain: NotFoundError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except UnimplementedError as ex: + logger.info('Error inside brain: UnimplementedError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except Exception as ex: + logger.info('Error inside brain: Exception!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + + + + + diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_70_km_h.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_70_km_h.py index 80e75693..1e8d8b65 100644 --- a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_70_km_h.py +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_70_km_h.py @@ -134,7 +134,7 @@ def execute(self): self.update_pose(self.pose.getPose3d()) - image_shape=(50, 150) + image_shape=(66, 200) img_base = cv2.resize(bird_eye_view_1, image_shape) AUGMENTATIONS_TEST = Compose([ diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_V_MAX_30.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_V_MAX_30.py index bda34764..4e7bea2b 100644 --- a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_V_MAX_30.py +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_V_MAX_30.py @@ -131,11 +131,10 @@ def execute(self): self.update_pose(self.pose.getPose3d()) - image_shape=(50, 150) + image_shape=(66, 200) img_base = cv2.resize(bird_eye_view_1, image_shape) AUGMENTATIONS_TEST = Compose([ - #ChannelDropout(p=1.0), Normalize() ]) image = AUGMENTATIONS_TEST(image=img_base) diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_broken_input.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_broken_input.py index 37fcd41e..0448a2d1 100644 --- a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_broken_input.py +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_broken_input.py @@ -125,7 +125,7 @@ def execute(self): ] AUGMENTATIONS_TEST = Compose([ - GridDropout(p=1.0, ratio=0.9) + GridDropout(p=1.0) ]) bird_eye_view_1 = AUGMENTATIONS_TEST(image=bird_eye_view_1) @@ -139,7 +139,7 @@ def execute(self): self.update_pose(self.pose.getPose3d()) - image_shape=(50, 150) + image_shape=(66, 200) img_base = cv2.resize(bird_eye_view_1, image_shape) AUGMENTATIONS_TEST = Compose([ diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_broken_input_2.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_broken_input_2.py index 37fcd41e..a9eab9b0 100644 --- a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_broken_input_2.py +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_broken_input_2.py @@ -139,7 +139,7 @@ def execute(self): self.update_pose(self.pose.getPose3d()) - image_shape=(50, 150) + image_shape=(66, 200) img_base = cv2.resize(bird_eye_view_1, image_shape) AUGMENTATIONS_TEST = Compose([ diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v.py index 9ab3105a..1db1f353 100644 --- a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v.py +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v.py @@ -133,7 +133,7 @@ def execute(self): self.update_pose(self.pose.getPose3d()) - image_shape=(50, 150) + image_shape=(66, 200) img_base = cv2.resize(bird_eye_view_1, image_shape) AUGMENTATIONS_TEST = Compose([ @@ -147,7 +147,7 @@ def execute(self): self.bird_eye_view_unique_images += 1 self.previous_bird_eye_view_image = img - velocity_dim = np.full((150, 50), self.previous_speed/30) + velocity_dim = np.full((200, 66), self.previous_speed/30) new_img_vel = np.dstack((img, velocity_dim)) img = new_img_vel diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_50_km_h.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_50_km_h.py new file mode 100644 index 00000000..c788baf9 --- /dev/null +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_50_km_h.py @@ -0,0 +1,197 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import csv +import cv2 +import math +import numpy as np +import threading +import time +import carla +from os import path +from albumentations import ( + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare +) +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from utils.logger import logger +from traceback import print_exc + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'CARLA/' + +from tensorflow.python.framework.errors_impl import NotFoundError +from tensorflow.python.framework.errors_impl import UnimplementedError +import tensorflow as tf + +import os +os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +#gpus = tf.config.experimental.list_physical_devices('GPU') +#for gpu in gpus: +# tf.config.experimental.set_memory_growth(gpu, True) + + +class Brain: + + def __init__(self, sensors, actuators, handler, model, config=None): + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + + self.cameras_first_images = [] + + self.pose = sensors.get_pose3d('pose3d_0') + + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + self.inference_times = [] + self.gpu_inference = True if tf.test.gpu_device_name() else False + + self.threshold_image = np.zeros((640, 360, 3), np.uint8) + self.color_image = np.zeros((640, 360, 3), np.uint8) + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + if model: + if not path.exists(PRETRAINED_MODELS + model): + logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) + logger.info("** Load TF model **") + self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model) + logger.info("** Loaded TF model **") + else: + logger.info("** Brain not loaded **") + logger.info("- Models path: " + PRETRAINED_MODELS) + logger.info("- Model: " + str(model)) + + self.previous_speed = 0 + self.previous_bird_eye_view_image = 0 + self.bird_eye_view_images = 0 + self.bird_eye_view_unique_images = 0 + + self.first_acceleration = True + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def update_pose(self, pose_data): + self.handler.update_pose3d(pose_data) + + def execute(self): + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + if self.cameras_first_images == []: + self.cameras_first_images.append(image) + self.cameras_first_images.append(image_1) + self.cameras_first_images.append(image_2) + self.cameras_first_images.append(image_3) + self.cameras_first_images.append(bird_eye_view_1) + + self.cameras_last_images = [ + image, + image_1, + image_2, + image_3, + bird_eye_view_1 + ] + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + self.update_pose(self.pose.getPose3d()) + + image_shape=(66, 200) + img_base = cv2.resize(bird_eye_view_1, image_shape) + + AUGMENTATIONS_TEST = Compose([ + Normalize() + ]) + image = AUGMENTATIONS_TEST(image=img_base) + img = image["image"] + + self.bird_eye_view_images += 1 + if (self.previous_bird_eye_view_image==img).all() == False: + self.bird_eye_view_unique_images += 1 + self.previous_bird_eye_view_image = img + + velocity_dim = np.full((200, 66), self.previous_speed/30) + new_img_vel = np.dstack((img, velocity_dim)) + img = new_img_vel + + img = np.expand_dims(img, axis=0) + start_time = time.time() + try: + prediction = self.net.predict(img, verbose=0) + self.inference_times.append(time.time() - start_time) + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + + if vehicle_speed < 50 and self.first_acceleration: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.first_acceleration = False + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + + except NotFoundError as ex: + logger.info('Error inside brain: NotFoundError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except UnimplementedError as ex: + logger.info('Error inside brain: UnimplementedError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except Exception as ex: + logger.info('Error inside brain: Exception!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + + + + + diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_70_km_h.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_70_km_h.py index 62952e24..70100b04 100644 --- a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_70_km_h.py +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_70_km_h.py @@ -135,7 +135,7 @@ def execute(self): self.update_pose(self.pose.getPose3d()) - image_shape=(50, 150) + image_shape=(66, 200) img_base = cv2.resize(bird_eye_view_1, image_shape) AUGMENTATIONS_TEST = Compose([ @@ -149,7 +149,7 @@ def execute(self): self.bird_eye_view_unique_images += 1 self.previous_bird_eye_view_image = img - velocity_dim = np.full((150, 50), self.previous_speed/30) + velocity_dim = np.full((200, 66), self.previous_speed/30) new_img_vel = np.dstack((img, velocity_dim)) img = new_img_vel diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_broken_input.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_broken_input.py index 2ac2ae31..c5bf0edc 100644 --- a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_broken_input.py +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_broken_input.py @@ -126,7 +126,7 @@ def execute(self): ] AUGMENTATIONS_TEST = Compose([ - GridDropout(p=1.0, ratio=0.9) + GridDropout(p=1.0) ]) bird_eye_view_1 = AUGMENTATIONS_TEST(image=bird_eye_view_1) @@ -140,7 +140,7 @@ def execute(self): self.update_pose(self.pose.getPose3d()) - image_shape=(50, 150) + image_shape=(66, 200) img_base = cv2.resize(bird_eye_view_1, image_shape) AUGMENTATIONS_TEST = Compose([ @@ -154,7 +154,7 @@ def execute(self): self.bird_eye_view_unique_images += 1 self.previous_bird_eye_view_image = img - velocity_dim = np.full((150, 50), self.previous_speed/30) + velocity_dim = np.full((200, 66), self.previous_speed/30) new_img_vel = np.dstack((img, velocity_dim)) img = new_img_vel diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_broken_input_2.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_broken_input_2.py index 2ac2ae31..ae23f5fb 100644 --- a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_broken_input_2.py +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_previous_v_broken_input_2.py @@ -140,7 +140,7 @@ def execute(self): self.update_pose(self.pose.getPose3d()) - image_shape=(50, 150) + image_shape=(66, 200) img_base = cv2.resize(bird_eye_view_1, image_shape) AUGMENTATIONS_TEST = Compose([ @@ -154,7 +154,7 @@ def execute(self): self.bird_eye_view_unique_images += 1 self.previous_bird_eye_view_image = img - velocity_dim = np.full((150, 50), self.previous_speed/30) + velocity_dim = np.full((200, 66), self.previous_speed/30) new_img_vel = np.dstack((img, velocity_dim)) img = new_img_vel diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_50_km_h.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_50_km_h.py new file mode 100644 index 00000000..d10595b4 --- /dev/null +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_50_km_h.py @@ -0,0 +1,299 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import csv +import cv2 +import math +import numpy as np +import threading +import time +import carla +from os import path +from albumentations import ( + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare +) +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from utils.logger import logger +from traceback import print_exc + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'CARLA/' + +from tensorflow.python.framework.errors_impl import NotFoundError +from tensorflow.python.framework.errors_impl import UnimplementedError +import tensorflow as tf + +import os +os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +#gpus = tf.config.experimental.list_physical_devices('GPU') +#for gpu in gpus: +# tf.config.experimental.set_memory_growth(gpu, True) + + +class Brain: + + def __init__(self, sensors, actuators, handler, model, config=None): + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + + self.cameras_first_images = [] + + self.pose = sensors.get_pose3d('pose3d_0') + + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + self.inference_times = [] + self.gpu_inference = True if tf.test.gpu_device_name() else False + + self.threshold_image = np.zeros((640, 360, 3), np.uint8) + self.color_image = np.zeros((640, 360, 3), np.uint8) + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + if model: + if not path.exists(PRETRAINED_MODELS + model): + logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) + logger.info("** Load TF model **") + self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model) + logger.info("** Loaded TF model **") + else: + logger.info("** Brain not loaded **") + logger.info("- Models path: " + PRETRAINED_MODELS) + logger.info("- Model: " + str(model)) + + self.previous_speed = 0 + self.previous_bird_eye_view_image = 0 + self.bird_eye_view_images = 0 + self.bird_eye_view_unique_images = 0 + + self.image_1 = 0 + self.image_2 = 0 + self.image_3 = 0 + self.image_4 = 0 + self.image_5 = 0 + self.image_6 = 0 + self.image_7 = 0 + self.image_8 = 0 + self.image_9 = 0 + + self.image_1_V = 0 + self.image_2_V = 0 + self.image_3_V = 0 + self.image_4_V = 0 + self.image_5_V = 0 + self.image_6_V = 0 + self.image_7_V = 0 + self.image_8_V = 0 + self.image_9_V = 0 + + self.first_acceleration = True + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def update_pose(self, pose_data): + self.handler.update_pose3d(pose_data) + + def execute(self): + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + if self.cameras_first_images == []: + self.cameras_first_images.append(image) + self.cameras_first_images.append(image_1) + self.cameras_first_images.append(image_2) + self.cameras_first_images.append(image_3) + self.cameras_first_images.append(bird_eye_view_1) + + self.cameras_last_images = [ + image, + image_1, + image_2, + image_3, + bird_eye_view_1 + ] + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + self.update_pose(self.pose.getPose3d()) + + image_shape=(50, 150) + img_base = cv2.resize(bird_eye_view_1, image_shape) + + AUGMENTATIONS_TEST = Compose([ + Normalize() + ]) + image = AUGMENTATIONS_TEST(image=img_base) + img = image["image"] + + self.bird_eye_view_images += 1 + if (self.previous_bird_eye_view_image==img).all() == False: + self.bird_eye_view_unique_images += 1 + self.previous_bird_eye_view_image = img + + if type(self.image_1) is int: + self.image_1 = img + self.image_1_V = 0 + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_2) is int: + self.image_2 = img + self.image_2_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_3) is int: + self.image_3 = img + self.image_3_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_4) is int: + self.image_4 = img + self.image_4_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_5) is int: + self.image_5 = img + self.image_5_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_6) is int: + self.image_6 = img + self.image_6_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_7) is int: + self.image_7 = img + self.image_7_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_8) is int: + self.image_8 = img + self.image_8_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + elif type(self.image_9) is int: + self.image_9 = img + self.image_9_V = self.previous_speed + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + else: + self.image_1 = self.image_2 + self.image_2 = self.image_3 + self.image_3 = self.image_4 + self.image_4 = self.image_5 + self.image_5 = self.image_6 + self.image_6 = self.image_7 + self.image_7 = self.image_8 + self.image_8 = self.image_9 + self.image_9 = img + + self.image_1_V = self.image_2_V + self.image_2_V = self.image_3_V + self.image_3_V = self.image_4_V + self.image_4_V = self.image_5_V + self.image_5_V = self.image_6_V + self.image_6_V = self.image_7_V + self.image_7_V = self.image_8_V + self.image_8_V = self.image_9_V + self.image_9_V = self.previous_speed + + velocity_dim_1 = np.full((150, 50), self.image_1_V/30) + image_1 = np.dstack((self.image_1, velocity_dim_1)) + + velocity_dim_4 = np.full((150, 50), self.image_4_V/30) + image_4 = np.dstack((self.image_4, velocity_dim_4)) + + velocity_dim_9 = np.full((150, 50), self.image_9_V/30) + image_9 = np.dstack((self.image_9, velocity_dim_9)) + + img = [image_1, image_4 , image_9] + + img = np.expand_dims(img, axis=0) + + start_time = time.time() + try: + prediction = self.net.predict(img, verbose=0) + self.inference_times.append(time.time() - start_time) + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + self.previous_speed = vehicle_speed + if vehicle_speed < 50 and self.first_acceleration: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.first_acceleration = False + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + + except NotFoundError as ex: + logger.info('Error inside brain: NotFoundError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except UnimplementedError as ex: + logger.info('Error inside brain: UnimplementedError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except Exception as ex: + logger.info('Error inside brain: Exception!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + + + + + diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input.py index 373c55e8..53399b98 100644 --- a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input.py +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input.py @@ -147,7 +147,7 @@ def execute(self): AUGMENTATIONS_TEST = Compose([ - GridDropout(p=1.0, ratio=0.9) + GridDropout(p=1.0) ]) bird_eye_view_1 = AUGMENTATIONS_TEST(image=bird_eye_view_1) diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input_2.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input_2.py index 469d3055..373c55e8 100644 --- a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input_2.py +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_previous_v_t_t-4_t-9_broken_input_2.py @@ -9,7 +9,7 @@ import carla from os import path from albumentations import ( - Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare, GridDropout, ChannelDropout, GaussNoise + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare, GridDropout, ChannelDropout ) from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH from utils.logger import logger @@ -147,8 +147,7 @@ def execute(self): AUGMENTATIONS_TEST = Compose([ - #GridDropout(p=1.0, ratio=0.5), - GaussNoise(p=1.0, var_limit=(500.0, 1500.0)) + GridDropout(p=1.0, ratio=0.9) ]) bird_eye_view_1 = AUGMENTATIONS_TEST(image=bird_eye_view_1) diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9.py index 8b895393..f31c666b 100644 --- a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9.py +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9.py @@ -170,9 +170,11 @@ def execute(self): self.image_8 = img elif type(self.image_9) is int: self.image_9 = img + elif type(self.image_10) is int: + self.image_10 = img else: self.bird_eye_view_images += 1 - if (self.image_9==img).all() == False: + if (self.image_10==img).all() == False: self.bird_eye_view_unique_images += 1 self.image_1 = self.image_2 self.image_2 = self.image_3 @@ -182,9 +184,11 @@ def execute(self): self.image_6 = self.image_7 self.image_7 = self.image_8 self.image_8 = self.image_9 - self.image_9 = img + self.image_9 = self.image_10 + self.image_10 = img - img = [self.image_1, self.image_4, self.image_9] + #img = [self.image_1, self.image_4, self.image_9] + img = [self.image_1, self.image_5, self.image_10] img = np.expand_dims(img, axis=0) start_time = time.time() diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_50_km_h.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_50_km_h.py new file mode 100644 index 00000000..3f018dec --- /dev/null +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_50_km_h.py @@ -0,0 +1,228 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import csv +import cv2 +import math +import numpy as np +import threading +import time +import carla +from os import path +from albumentations import ( + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare +) +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from utils.logger import logger +from traceback import print_exc + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'CARLA/' + +from tensorflow.python.framework.errors_impl import NotFoundError +from tensorflow.python.framework.errors_impl import UnimplementedError +import tensorflow as tf + +import os +os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +#gpus = tf.config.experimental.list_physical_devices('GPU') +#for gpu in gpus: +# tf.config.experimental.set_memory_growth(gpu, True) + + + +class Brain: + + def __init__(self, sensors, actuators, handler, model, config=None): + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + + self.cameras_first_images = [] + + self.pose = sensors.get_pose3d('pose3d_0') + + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + self.inference_times = [] + self.gpu_inference = True if tf.test.gpu_device_name() else False + + self.threshold_image = np.zeros((640, 360, 3), np.uint8) + self.color_image = np.zeros((640, 360, 3), np.uint8) + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + if model: + if not path.exists(PRETRAINED_MODELS + model): + logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) + logger.info("** Load TF model **") + self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model) + logger.info("** Loaded TF model **") + else: + logger.info("** Brain not loaded **") + logger.info("- Models path: " + PRETRAINED_MODELS) + logger.info("- Model: " + str(model)) + + self.image_1 = 0 + self.image_2 = 0 + self.image_3 = 0 + self.image_4 = 0 + self.image_5 = 0 + self.image_6 = 0 + self.image_7 = 0 + self.image_8 = 0 + self.image_9 = 0 + self.image_10 = 0 + + self.bird_eye_view_images = 0 + self.bird_eye_view_unique_images = 0 + + self.first_acceleration = True + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def update_pose(self, pose_data): + self.handler.update_pose3d(pose_data) + + def execute(self): + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + if self.cameras_first_images == []: + self.cameras_first_images.append(image) + self.cameras_first_images.append(image_1) + self.cameras_first_images.append(image_2) + self.cameras_first_images.append(image_3) + self.cameras_first_images.append(bird_eye_view_1) + + self.cameras_last_images = [ + image, + image_1, + image_2, + image_3, + bird_eye_view_1 + ] + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + self.update_pose(self.pose.getPose3d()) + + image_shape=(50, 150) + img_base = cv2.resize(bird_eye_view_1, image_shape) + + AUGMENTATIONS_TEST = Compose([ + Normalize() + ]) + image = AUGMENTATIONS_TEST(image=img_base) + img = image["image"] + + if type(self.image_1) is int: + self.image_1 = img + elif type(self.image_2) is int: + self.image_2 = img + elif type(self.image_3) is int: + self.image_3 = img + elif type(self.image_4) is int: + self.image_4 = img + elif type(self.image_5) is int: + self.image_5 = img + elif type(self.image_6) is int: + self.image_6 = img + elif type(self.image_7) is int: + self.image_7 = img + elif type(self.image_8) is int: + self.image_8 = img + elif type(self.image_9) is int: + self.image_9 = img + elif type(self.image_10) is int: + self.image_10 = img + else: + self.bird_eye_view_images += 1 + if (self.image_10==img).all() == False: + self.bird_eye_view_unique_images += 1 + self.image_1 = self.image_2 + self.image_2 = self.image_3 + self.image_3 = self.image_4 + self.image_4 = self.image_5 + self.image_5 = self.image_6 + self.image_6 = self.image_7 + self.image_7 = self.image_8 + self.image_8 = self.image_9 + self.image_9 = self.image_10 + self.image_10 = img + + img = [self.image_1, self.image_5, self.image_10] + img = np.expand_dims(img, axis=0) + + start_time = time.time() + try: + prediction = self.net.predict(img, verbose=0) + self.inference_times.append(time.time() - start_time) + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + + if vehicle_speed < 50 and self.first_acceleration: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + + except NotFoundError as ex: + logger.info('Error inside brain: NotFoundError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except UnimplementedError as ex: + logger.info('Error inside brain: UnimplementedError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except Exception as ex: + logger.info('Error inside brain: Exception!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) \ No newline at end of file diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_70_km_h.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_70_km_h.py index 93ce6163..9ee732b9 100644 --- a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_70_km_h.py +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_70_km_h.py @@ -172,9 +172,11 @@ def execute(self): self.image_8 = img elif type(self.image_9) is int: self.image_9 = img + elif type(self.image_10) is int: + self.image_10 = img else: self.bird_eye_view_images += 1 - if (self.image_9==img).all() == False: + if (self.image_10==img).all() == False: self.bird_eye_view_unique_images += 1 self.image_1 = self.image_2 self.image_2 = self.image_3 @@ -184,9 +186,10 @@ def execute(self): self.image_6 = self.image_7 self.image_7 = self.image_8 self.image_8 = self.image_9 - self.image_9 = img + self.image_9 = self.image_10 + self.image_10 = img - img = [self.image_1, self.image_4, self.image_9] + img = [self.image_1, self.image_5, self.image_10] img = np.expand_dims(img, axis=0) start_time = time.time() diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_V_MAX_30.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_V_MAX_30.py index db355b88..9ceb5b7b 100644 --- a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_V_MAX_30.py +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t-4_t-9_V_MAX_30.py @@ -239,7 +239,7 @@ def execute(self): self.motors.sendSteer(0.0) self.motors.sendBrake(0) else: - self.motors.sendThrottle(0.75) + self.motors.sendThrottle(throttle) self.motors.sendSteer(steer) self.motors.sendBrake(break_command) diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t_10_t_20.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t_10_t_20.py new file mode 100644 index 00000000..ad5dc033 --- /dev/null +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t_10_t_20.py @@ -0,0 +1,280 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import csv +import cv2 +import math +import numpy as np +import threading +import time +import carla +from os import path +from albumentations import ( + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare +) +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from utils.logger import logger +from traceback import print_exc + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'CARLA/' + +from tensorflow.python.framework.errors_impl import NotFoundError +from tensorflow.python.framework.errors_impl import UnimplementedError +import tensorflow as tf + +import os +os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +#gpus = tf.config.experimental.list_physical_devices('GPU') +#for gpu in gpus: +# tf.config.experimental.set_memory_growth(gpu, True) + + + +class Brain: + + def __init__(self, sensors, actuators, handler, model, config=None): + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + + self.cameras_first_images = [] + + self.pose = sensors.get_pose3d('pose3d_0') + + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + self.inference_times = [] + self.gpu_inference = True if tf.test.gpu_device_name() else False + + self.threshold_image = np.zeros((640, 360, 3), np.uint8) + self.color_image = np.zeros((640, 360, 3), np.uint8) + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + if model: + if not path.exists(PRETRAINED_MODELS + model): + logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) + logger.info("** Load TF model **") + self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model) + logger.info("** Loaded TF model **") + else: + logger.info("** Brain not loaded **") + logger.info("- Models path: " + PRETRAINED_MODELS) + logger.info("- Model: " + str(model)) + + self.image_1 = 0 + self.image_2 = 0 + self.image_3 = 0 + self.image_4 = 0 + self.image_5 = 0 + self.image_6 = 0 + self.image_7 = 0 + self.image_8 = 0 + self.image_9 = 0 + self.image_10 = 0 + + self.image_11 = 0 + self.image_12 = 0 + self.image_13 = 0 + self.image_14 = 0 + self.image_15 = 0 + self.image_16 = 0 + self.image_17 = 0 + self.image_18 = 0 + self.image_19 = 0 + self.image_20 = 0 + + self.bird_eye_view_images = 0 + self.bird_eye_view_unique_images = 0 + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def update_pose(self, pose_data): + self.handler.update_pose3d(pose_data) + + def execute(self): + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + if self.cameras_first_images == []: + self.cameras_first_images.append(image) + self.cameras_first_images.append(image_1) + self.cameras_first_images.append(image_2) + self.cameras_first_images.append(image_3) + self.cameras_first_images.append(bird_eye_view_1) + + self.cameras_last_images = [ + image, + image_1, + image_2, + image_3, + bird_eye_view_1 + ] + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + self.update_pose(self.pose.getPose3d()) + + image_shape=(50, 150) + img_base = cv2.resize(bird_eye_view_1, image_shape) + + AUGMENTATIONS_TEST = Compose([ + Normalize() + ]) + image = AUGMENTATIONS_TEST(image=img_base) + img = image["image"] + + if type(self.image_1) is int: + self.image_1 = img + elif type(self.image_2) is int: + self.image_2 = img + elif type(self.image_3) is int: + self.image_3 = img + elif type(self.image_4) is int: + self.image_4 = img + elif type(self.image_5) is int: + self.image_5 = img + elif type(self.image_6) is int: + self.image_6 = img + elif type(self.image_7) is int: + self.image_7 = img + elif type(self.image_8) is int: + self.image_8 = img + elif type(self.image_9) is int: + self.image_9 = img + + elif type(self.image_10) is int: + self.image_10 = img + elif type(self.image_11) is int: + self.image_11 = img + elif type(self.image_12) is int: + self.image_12 = img + elif type(self.image_13) is int: + self.image_13 = img + elif type(self.image_14) is int: + self.image_14 = img + elif type(self.image_15) is int: + self.image_15 = img + elif type(self.image_16) is int: + self.image_16 = img + elif type(self.image_17) is int: + self.image_17 = img + elif type(self.image_18) is int: + self.image_18 = img + elif type(self.image_19) is int: + self.image_19 = img + elif type(self.image_20) is int: + self.image_20 = img + else: + self.bird_eye_view_images += 1 + if (self.image_20==img).all() == False: + self.bird_eye_view_unique_images += 1 + self.image_1 = self.image_2 + self.image_2 = self.image_3 + self.image_3 = self.image_4 + self.image_4 = self.image_5 + self.image_5 = self.image_6 + self.image_6 = self.image_7 + self.image_7 = self.image_8 + self.image_8 = self.image_9 + self.image_9 = self.image_10 + + self.image_10 = self.image_11 + self.image_11 = self.image_12 + self.image_12 = self.image_13 + self.image_13 = self.image_14 + self.image_14 = self.image_15 + self.image_15 = self.image_16 + self.image_16 = self.image_17 + self.image_17 = self.image_18 + self.image_18 = self.image_19 + + self.image_19 = self.image_20 + self.image_20 = img + + + #img = [self.image_1, self.image_4, self.image_9] + #img = [self.image_1, self.image_4, self.image_9, self.image_14, self.image_19, self.image_24, self.image_29, self.image_34, self.image_39] + + + img = [self.image_1, self.image_10, self.image_20] + img = np.expand_dims(img, axis=0) + + start_time = time.time() + try: + prediction = self.net.predict(img, verbose=0) + self.inference_times.append(time.time() - start_time) + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + + if vehicle_speed > 30: + self.motors.sendThrottle(0.0) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + else: + if vehicle_speed < 5: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(0.75) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + + except NotFoundError as ex: + logger.info('Error inside brain: NotFoundError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except UnimplementedError as ex: + logger.info('Error inside brain: UnimplementedError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except Exception as ex: + logger.info('Error inside brain: Exception!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) \ No newline at end of file diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t_1_t_2.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t_1_t_2.py new file mode 100644 index 00000000..f80cd7cb --- /dev/null +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t_1_t_2.py @@ -0,0 +1,205 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import csv +import cv2 +import math +import numpy as np +import threading +import time +import carla +from os import path +from albumentations import ( + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare +) +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from utils.logger import logger +from traceback import print_exc + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'CARLA/' + +from tensorflow.python.framework.errors_impl import NotFoundError +from tensorflow.python.framework.errors_impl import UnimplementedError +import tensorflow as tf + +import os +os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +#gpus = tf.config.experimental.list_physical_devices('GPU') +#for gpu in gpus: +# tf.config.experimental.set_memory_growth(gpu, True) + + + +class Brain: + + def __init__(self, sensors, actuators, handler, model, config=None): + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + + self.cameras_first_images = [] + + self.pose = sensors.get_pose3d('pose3d_0') + + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + self.inference_times = [] + self.gpu_inference = True if tf.test.gpu_device_name() else False + + self.threshold_image = np.zeros((640, 360, 3), np.uint8) + self.color_image = np.zeros((640, 360, 3), np.uint8) + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + if model: + if not path.exists(PRETRAINED_MODELS + model): + logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) + logger.info("** Load TF model **") + self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model) + logger.info("** Loaded TF model **") + else: + logger.info("** Brain not loaded **") + logger.info("- Models path: " + PRETRAINED_MODELS) + logger.info("- Model: " + str(model)) + + self.image_1 = 0 + self.image_2 = 0 + self.image_3 = 0 + + self.bird_eye_view_images = 0 + self.bird_eye_view_unique_images = 0 + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def update_pose(self, pose_data): + self.handler.update_pose3d(pose_data) + + def execute(self): + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + if self.cameras_first_images == []: + self.cameras_first_images.append(image) + self.cameras_first_images.append(image_1) + self.cameras_first_images.append(image_2) + self.cameras_first_images.append(image_3) + self.cameras_first_images.append(bird_eye_view_1) + + self.cameras_last_images = [ + image, + image_1, + image_2, + image_3, + bird_eye_view_1 + ] + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + self.update_pose(self.pose.getPose3d()) + + image_shape=(50, 150) + img_base = cv2.resize(bird_eye_view_1, image_shape) + + AUGMENTATIONS_TEST = Compose([ + Normalize() + ]) + image = AUGMENTATIONS_TEST(image=img_base) + img = image["image"] + + if type(self.image_1) is int: + self.image_1 = img + elif type(self.image_2) is int: + self.image_2 = img + elif type(self.image_3) is int: + self.image_3 = img + else: + self.bird_eye_view_images += 1 + if (self.image_3==img).all() == False: + self.bird_eye_view_unique_images += 1 + self.image_1 = self.image_2 + self.image_2 = self.image_3 + self.image_3 = img + + #img = [self.image_1, self.image_4, self.image_9] + img = [self.image_1, self.image_2, self.image_3] + img = np.expand_dims(img, axis=0) + + start_time = time.time() + try: + prediction = self.net.predict(img, verbose=0) + self.inference_times.append(time.time() - start_time) + #print(prediction) + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + + if vehicle_speed > 30: + self.motors.sendThrottle(0.0) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + else: + if vehicle_speed < 5: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(throttle) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + + except NotFoundError as ex: + logger.info('Error inside brain: NotFoundError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except UnimplementedError as ex: + logger.info('Error inside brain: UnimplementedError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except Exception as ex: + logger.info('Error inside brain: Exception!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) \ No newline at end of file diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t_20_t_40.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t_20_t_40.py new file mode 100644 index 00000000..5acec599 --- /dev/null +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x3_t_t_20_t_40.py @@ -0,0 +1,359 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import csv +import cv2 +import math +import numpy as np +import threading +import time +import carla +from os import path +from albumentations import ( + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare +) +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from utils.logger import logger +from traceback import print_exc + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'CARLA/' + +from tensorflow.python.framework.errors_impl import NotFoundError +from tensorflow.python.framework.errors_impl import UnimplementedError +import tensorflow as tf + +import os +os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +#gpus = tf.config.experimental.list_physical_devices('GPU') +#for gpu in gpus: +# tf.config.experimental.set_memory_growth(gpu, True) + + + +class Brain: + + def __init__(self, sensors, actuators, handler, model, config=None): + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + + self.cameras_first_images = [] + + self.pose = sensors.get_pose3d('pose3d_0') + + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + self.inference_times = [] + self.gpu_inference = True if tf.test.gpu_device_name() else False + + self.threshold_image = np.zeros((640, 360, 3), np.uint8) + self.color_image = np.zeros((640, 360, 3), np.uint8) + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + if model: + if not path.exists(PRETRAINED_MODELS + model): + logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) + logger.info("** Load TF model **") + self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model) + logger.info("** Loaded TF model **") + else: + logger.info("** Brain not loaded **") + logger.info("- Models path: " + PRETRAINED_MODELS) + logger.info("- Model: " + str(model)) + + self.image_1 = 0 + self.image_2 = 0 + self.image_3 = 0 + self.image_4 = 0 + self.image_5 = 0 + self.image_6 = 0 + self.image_7 = 0 + self.image_8 = 0 + self.image_9 = 0 + self.image_10 = 0 + + self.image_11 = 0 + self.image_12 = 0 + self.image_13 = 0 + self.image_14 = 0 + self.image_15 = 0 + self.image_16 = 0 + self.image_17 = 0 + self.image_18 = 0 + self.image_19 = 0 + self.image_20 = 0 + + self.image_21 = 0 + self.image_22 = 0 + self.image_23 = 0 + self.image_24 = 0 + self.image_25 = 0 + self.image_26 = 0 + self.image_27 = 0 + self.image_28 = 0 + self.image_29 = 0 + self.image_30 = 0 + + self.image_31 = 0 + self.image_32 = 0 + self.image_33 = 0 + self.image_34 = 0 + self.image_35 = 0 + self.image_36 = 0 + self.image_37 = 0 + self.image_38 = 0 + self.image_39 = 0 + self.image_40 = 0 + + self.bird_eye_view_images = 0 + self.bird_eye_view_unique_images = 0 + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def update_pose(self, pose_data): + self.handler.update_pose3d(pose_data) + + def execute(self): + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + if self.cameras_first_images == []: + self.cameras_first_images.append(image) + self.cameras_first_images.append(image_1) + self.cameras_first_images.append(image_2) + self.cameras_first_images.append(image_3) + self.cameras_first_images.append(bird_eye_view_1) + + self.cameras_last_images = [ + image, + image_1, + image_2, + image_3, + bird_eye_view_1 + ] + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + self.update_pose(self.pose.getPose3d()) + + image_shape=(50, 150) + img_base = cv2.resize(bird_eye_view_1, image_shape) + + AUGMENTATIONS_TEST = Compose([ + Normalize() + ]) + image = AUGMENTATIONS_TEST(image=img_base) + img = image["image"] + + if type(self.image_1) is int: + self.image_1 = img + elif type(self.image_2) is int: + self.image_2 = img + elif type(self.image_3) is int: + self.image_3 = img + elif type(self.image_4) is int: + self.image_4 = img + elif type(self.image_5) is int: + self.image_5 = img + elif type(self.image_6) is int: + self.image_6 = img + elif type(self.image_7) is int: + self.image_7 = img + elif type(self.image_8) is int: + self.image_8 = img + elif type(self.image_9) is int: + self.image_9 = img + + elif type(self.image_10) is int: + self.image_10 = img + elif type(self.image_11) is int: + self.image_11 = img + elif type(self.image_12) is int: + self.image_12 = img + elif type(self.image_13) is int: + self.image_13 = img + elif type(self.image_14) is int: + self.image_14 = img + elif type(self.image_15) is int: + self.image_15 = img + elif type(self.image_16) is int: + self.image_16 = img + elif type(self.image_17) is int: + self.image_17 = img + elif type(self.image_18) is int: + self.image_18 = img + elif type(self.image_19) is int: + self.image_19 = img + elif type(self.image_20) is int: + self.image_20 = img + elif type(self.image_21) is int: + self.image_21 = img + elif type(self.image_22) is int: + self.image_22 = img + elif type(self.image_23) is int: + self.image_23 = img + elif type(self.image_24) is int: + self.image_24 = img + elif type(self.image_25) is int: + self.image_25 = img + elif type(self.image_26) is int: + self.image_26 = img + elif type(self.image_27) is int: + self.image_27 = img + elif type(self.image_28) is int: + self.image_28 = img + elif type(self.image_29) is int: + self.image_29 = img + elif type(self.image_30) is int: + self.image_30 = img + elif type(self.image_31) is int: + self.image_31 = img + elif type(self.image_32) is int: + self.image_32 = img + elif type(self.image_33) is int: + self.image_33 = img + elif type(self.image_34) is int: + self.image_34 = img + elif type(self.image_35) is int: + self.image_35 = img + elif type(self.image_36) is int: + self.image_36 = img + elif type(self.image_37) is int: + self.image_37 = img + elif type(self.image_38) is int: + self.image_38 = img + elif type(self.image_39) is int: + self.image_39 = img + elif type(self.image_40) is int: + self.image_40 = img + else: + self.bird_eye_view_images += 1 + if (self.image_40==img).all() == False: + self.bird_eye_view_unique_images += 1 + self.image_1 = self.image_2 + self.image_2 = self.image_3 + self.image_3 = self.image_4 + self.image_4 = self.image_5 + self.image_5 = self.image_6 + self.image_6 = self.image_7 + self.image_7 = self.image_8 + self.image_8 = self.image_9 + self.image_9 = self.image_10 + + self.image_10 = self.image_11 + self.image_11 = self.image_12 + self.image_12 = self.image_13 + self.image_13 = self.image_14 + self.image_14 = self.image_15 + self.image_15 = self.image_16 + self.image_16 = self.image_17 + self.image_17 = self.image_18 + self.image_18 = self.image_19 + + self.image_19 = self.image_20 + self.image_20 = self.image_21 + self.image_21 = self.image_22 + self.image_22 = self.image_23 + self.image_23 = self.image_24 + self.image_24 = self.image_25 + self.image_25 = self.image_26 + self.image_26 = self.image_27 + self.image_27 = self.image_28 + + self.image_28 = self.image_29 + self.image_29 = self.image_30 + self.image_30 = self.image_31 + self.image_31 = self.image_32 + self.image_32 = self.image_33 + self.image_33 = self.image_34 + self.image_34 = self.image_35 + self.image_35 = self.image_36 + self.image_36 = self.image_37 + + self.image_37 = self.image_38 + self.image_38 = self.image_39 + self.image_39 = self.image_40 + self.image_40 = img + + img = [self.image_1, self.image_20, self.image_40] + img = np.expand_dims(img, axis=0) + + start_time = time.time() + try: + prediction = self.net.predict(img, verbose=0) + self.inference_times.append(time.time() - start_time) + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + + if vehicle_speed > 30: + self.motors.sendThrottle(0.0) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + else: + if vehicle_speed < 5: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(0.75) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + + except NotFoundError as ex: + logger.info('Error inside brain: NotFoundError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except UnimplementedError as ex: + logger.info('Error inside brain: UnimplementedError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except Exception as ex: + logger.info('Error inside brain: Exception!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) \ No newline at end of file diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x5_V_MAX_30.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x5_V_MAX_30.py new file mode 100644 index 00000000..f5db3576 --- /dev/null +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x5_V_MAX_30.py @@ -0,0 +1,275 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import csv +import cv2 +import math +import numpy as np +import threading +import time +import carla +from os import path +from albumentations import ( + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare +) +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from utils.logger import logger +from traceback import print_exc + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'CARLA/' + +from tensorflow.python.framework.errors_impl import NotFoundError +from tensorflow.python.framework.errors_impl import UnimplementedError +import tensorflow as tf + +import os +os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +#gpus = tf.config.experimental.list_physical_devices('GPU') +#for gpu in gpus: +# tf.config.experimental.set_memory_growth(gpu, True) + + + +class Brain: + + def __init__(self, sensors, actuators, handler, model, config=None): + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + + self.cameras_first_images = [] + + self.pose = sensors.get_pose3d('pose3d_0') + + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + self.inference_times = [] + self.gpu_inference = True if tf.test.gpu_device_name() else False + + self.threshold_image = np.zeros((640, 360, 3), np.uint8) + self.color_image = np.zeros((640, 360, 3), np.uint8) + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + if model: + if not path.exists(PRETRAINED_MODELS + model): + logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) + logger.info("** Load TF model **") + self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model) + logger.info("** Loaded TF model **") + else: + logger.info("** Brain not loaded **") + logger.info("- Models path: " + PRETRAINED_MODELS) + logger.info("- Model: " + str(model)) + + self.image_1 = 0 + self.image_2 = 0 + self.image_3 = 0 + self.image_4 = 0 + self.image_5 = 0 + self.image_6 = 0 + self.image_7 = 0 + self.image_8 = 0 + self.image_9 = 0 + self.image_10 = 0 + + self.image_11 = 0 + self.image_12 = 0 + self.image_13 = 0 + self.image_14 = 0 + self.image_15 = 0 + self.image_16 = 0 + self.image_17 = 0 + self.image_18 = 0 + self.image_19 = 0 + self.image_20 = 0 + + self.bird_eye_view_images = 0 + self.bird_eye_view_unique_images = 0 + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def update_pose(self, pose_data): + self.handler.update_pose3d(pose_data) + + def execute(self): + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + if self.cameras_first_images == []: + self.cameras_first_images.append(image) + self.cameras_first_images.append(image_1) + self.cameras_first_images.append(image_2) + self.cameras_first_images.append(image_3) + self.cameras_first_images.append(bird_eye_view_1) + + self.cameras_last_images = [ + image, + image_1, + image_2, + image_3, + bird_eye_view_1 + ] + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + self.update_pose(self.pose.getPose3d()) + + image_shape=(50, 150) + img_base = cv2.resize(bird_eye_view_1, image_shape) + + AUGMENTATIONS_TEST = Compose([ + Normalize() + ]) + image = AUGMENTATIONS_TEST(image=img_base) + img = image["image"] + + if type(self.image_1) is int: + self.image_1 = img + elif type(self.image_2) is int: + self.image_2 = img + elif type(self.image_3) is int: + self.image_3 = img + elif type(self.image_4) is int: + self.image_4 = img + elif type(self.image_5) is int: + self.image_5 = img + elif type(self.image_6) is int: + self.image_6 = img + elif type(self.image_7) is int: + self.image_7 = img + elif type(self.image_8) is int: + self.image_8 = img + elif type(self.image_9) is int: + self.image_9 = img + + elif type(self.image_10) is int: + self.image_10 = img + elif type(self.image_11) is int: + self.image_11 = img + elif type(self.image_12) is int: + self.image_12 = img + elif type(self.image_13) is int: + self.image_13 = img + elif type(self.image_14) is int: + self.image_14 = img + elif type(self.image_15) is int: + self.image_15 = img + elif type(self.image_16) is int: + self.image_16 = img + elif type(self.image_17) is int: + self.image_17 = img + elif type(self.image_18) is int: + self.image_18 = img + elif type(self.image_19) is int: + self.image_19 = img + elif type(self.image_20) is int: + self.image_20 = img + else: + self.bird_eye_view_images += 1 + if (self.image_20==img).all() == False: + self.bird_eye_view_unique_images += 1 + self.image_1 = self.image_2 + self.image_2 = self.image_3 + self.image_3 = self.image_4 + self.image_4 = self.image_5 + self.image_5 = self.image_6 + self.image_6 = self.image_7 + self.image_7 = self.image_8 + self.image_8 = self.image_9 + self.image_9 = self.image_10 + + self.image_10 = self.image_11 + self.image_11 = self.image_12 + self.image_12 = self.image_13 + self.image_13 = self.image_14 + self.image_14 = self.image_15 + self.image_15 = self.image_16 + self.image_16 = self.image_17 + self.image_17 = self.image_18 + self.image_18 = self.image_19 + + self.image_19 = self.image_20 + self.image_20 = img + + img = [self.image_1, self.image_5, self.image_10, self.image_15, self.image_20] + img = np.expand_dims(img, axis=0) + + start_time = time.time() + try: + prediction = self.net.predict(img, verbose=0) + self.inference_times.append(time.time() - start_time) + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + + if vehicle_speed > 30: + self.motors.sendThrottle(0.0) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + else: + if vehicle_speed < 5: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(0.75) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + + except NotFoundError as ex: + logger.info('Error inside brain: NotFoundError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except UnimplementedError as ex: + logger.info('Error inside brain: UnimplementedError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except Exception as ex: + logger.info('Error inside brain: Exception!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) \ No newline at end of file diff --git a/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x9_V_MAX_30.py b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x9_V_MAX_30.py new file mode 100644 index 00000000..418c8ddd --- /dev/null +++ b/behavior_metrics/brains/CARLA/tensorflow/brain_carla_bird_eye_deep_learning_x9_V_MAX_30.py @@ -0,0 +1,359 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +import csv +import cv2 +import math +import numpy as np +import threading +import time +import carla +from os import path +from albumentations import ( + Compose, Normalize, RandomRain, RandomBrightness, RandomShadow, RandomSnow, RandomFog, RandomSunFlare +) +from utils.constants import PRETRAINED_MODELS_DIR, ROOT_PATH +from utils.logger import logger +from traceback import print_exc + +PRETRAINED_MODELS = ROOT_PATH + '/' + PRETRAINED_MODELS_DIR + 'CARLA/' + +from tensorflow.python.framework.errors_impl import NotFoundError +from tensorflow.python.framework.errors_impl import UnimplementedError +import tensorflow as tf + +import os +os.environ['CUDA_VISIBLE_DEVICES'] = '-1' + +#gpus = tf.config.experimental.list_physical_devices('GPU') +#for gpu in gpus: +# tf.config.experimental.set_memory_growth(gpu, True) + + + +class Brain: + + def __init__(self, sensors, actuators, handler, model, config=None): + self.camera_0 = sensors.get_camera('camera_0') + self.camera_1 = sensors.get_camera('camera_1') + self.camera_2 = sensors.get_camera('camera_2') + self.camera_3 = sensors.get_camera('camera_3') + + self.cameras_first_images = [] + + self.pose = sensors.get_pose3d('pose3d_0') + + self.bird_eye_view = sensors.get_bird_eye_view('bird_eye_view_0') + + self.motors = actuators.get_motor('motors_0') + self.handler = handler + self.config = config + self.inference_times = [] + self.gpu_inference = True if tf.test.gpu_device_name() else False + + self.threshold_image = np.zeros((640, 360, 3), np.uint8) + self.color_image = np.zeros((640, 360, 3), np.uint8) + + client = carla.Client('localhost', 2000) + client.set_timeout(10.0) # seconds + world = client.get_world() + + time.sleep(5) + self.vehicle = world.get_actors().filter('vehicle.*')[0] + + if model: + if not path.exists(PRETRAINED_MODELS + model): + logger.info("File " + model + " cannot be found in " + PRETRAINED_MODELS) + logger.info("** Load TF model **") + self.net = tf.keras.models.load_model(PRETRAINED_MODELS + model) + logger.info("** Loaded TF model **") + else: + logger.info("** Brain not loaded **") + logger.info("- Models path: " + PRETRAINED_MODELS) + logger.info("- Model: " + str(model)) + + self.image_1 = 0 + self.image_2 = 0 + self.image_3 = 0 + self.image_4 = 0 + self.image_5 = 0 + self.image_6 = 0 + self.image_7 = 0 + self.image_8 = 0 + self.image_9 = 0 + self.image_10 = 0 + + self.image_11 = 0 + self.image_12 = 0 + self.image_13 = 0 + self.image_14 = 0 + self.image_15 = 0 + self.image_16 = 0 + self.image_17 = 0 + self.image_18 = 0 + self.image_19 = 0 + self.image_20 = 0 + + self.image_21 = 0 + self.image_22 = 0 + self.image_23 = 0 + self.image_24 = 0 + self.image_25 = 0 + self.image_26 = 0 + self.image_27 = 0 + self.image_28 = 0 + self.image_29 = 0 + self.image_30 = 0 + + self.image_31 = 0 + self.image_32 = 0 + self.image_33 = 0 + self.image_34 = 0 + self.image_35 = 0 + self.image_36 = 0 + self.image_37 = 0 + self.image_38 = 0 + self.image_39 = 0 + self.image_40 = 0 + + self.bird_eye_view_images = 0 + self.bird_eye_view_unique_images = 0 + + + def update_frame(self, frame_id, data): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if data.shape[0] != data.shape[1]: + if data.shape[0] > data.shape[1]: + difference = data.shape[0] - data.shape[1] + extra_left, extra_right = int(difference/2), int(difference/2) + extra_top, extra_bottom = 0, 0 + else: + difference = data.shape[1] - data.shape[0] + extra_left, extra_right = 0, 0 + extra_top, extra_bottom = int(difference/2), int(difference/2) + + + data = np.pad(data, ((extra_top, extra_bottom), (extra_left, extra_right), (0, 0)), mode='constant', constant_values=0) + + self.handler.update_frame(frame_id, data) + + def update_pose(self, pose_data): + self.handler.update_pose3d(pose_data) + + def execute(self): + image = self.camera_0.getImage().data + image_1 = self.camera_1.getImage().data + image_2 = self.camera_2.getImage().data + image_3 = self.camera_3.getImage().data + + bird_eye_view_1 = self.bird_eye_view.getImage(self.vehicle) + bird_eye_view_1 = cv2.cvtColor(bird_eye_view_1, cv2.COLOR_BGR2RGB) + + if self.cameras_first_images == []: + self.cameras_first_images.append(image) + self.cameras_first_images.append(image_1) + self.cameras_first_images.append(image_2) + self.cameras_first_images.append(image_3) + self.cameras_first_images.append(bird_eye_view_1) + + self.cameras_last_images = [ + image, + image_1, + image_2, + image_3, + bird_eye_view_1 + ] + + self.update_frame('frame_1', image_1) + self.update_frame('frame_2', image_2) + self.update_frame('frame_3', image_3) + + self.update_frame('frame_0', bird_eye_view_1) + + self.update_pose(self.pose.getPose3d()) + + image_shape=(50, 150) + img_base = cv2.resize(bird_eye_view_1, image_shape) + + AUGMENTATIONS_TEST = Compose([ + Normalize() + ]) + image = AUGMENTATIONS_TEST(image=img_base) + img = image["image"] + + if type(self.image_1) is int: + self.image_1 = img + elif type(self.image_2) is int: + self.image_2 = img + elif type(self.image_3) is int: + self.image_3 = img + elif type(self.image_4) is int: + self.image_4 = img + elif type(self.image_5) is int: + self.image_5 = img + elif type(self.image_6) is int: + self.image_6 = img + elif type(self.image_7) is int: + self.image_7 = img + elif type(self.image_8) is int: + self.image_8 = img + elif type(self.image_9) is int: + self.image_9 = img + + elif type(self.image_10) is int: + self.image_10 = img + elif type(self.image_11) is int: + self.image_11 = img + elif type(self.image_12) is int: + self.image_12 = img + elif type(self.image_13) is int: + self.image_13 = img + elif type(self.image_14) is int: + self.image_14 = img + elif type(self.image_15) is int: + self.image_15 = img + elif type(self.image_16) is int: + self.image_16 = img + elif type(self.image_17) is int: + self.image_17 = img + elif type(self.image_18) is int: + self.image_18 = img + elif type(self.image_19) is int: + self.image_19 = img + elif type(self.image_20) is int: + self.image_20 = img + elif type(self.image_21) is int: + self.image_21 = img + elif type(self.image_22) is int: + self.image_22 = img + elif type(self.image_23) is int: + self.image_23 = img + elif type(self.image_24) is int: + self.image_24 = img + elif type(self.image_25) is int: + self.image_25 = img + elif type(self.image_26) is int: + self.image_26 = img + elif type(self.image_27) is int: + self.image_27 = img + elif type(self.image_28) is int: + self.image_28 = img + elif type(self.image_29) is int: + self.image_29 = img + elif type(self.image_30) is int: + self.image_30 = img + elif type(self.image_31) is int: + self.image_31 = img + elif type(self.image_32) is int: + self.image_32 = img + elif type(self.image_33) is int: + self.image_33 = img + elif type(self.image_34) is int: + self.image_34 = img + elif type(self.image_35) is int: + self.image_35 = img + elif type(self.image_36) is int: + self.image_36 = img + elif type(self.image_37) is int: + self.image_37 = img + elif type(self.image_38) is int: + self.image_38 = img + elif type(self.image_39) is int: + self.image_39 = img + elif type(self.image_40) is int: + self.image_40 = img + else: + self.bird_eye_view_images += 1 + if (self.image_40==img).all() == False: + self.bird_eye_view_unique_images += 1 + self.image_1 = self.image_2 + self.image_2 = self.image_3 + self.image_3 = self.image_4 + self.image_4 = self.image_5 + self.image_5 = self.image_6 + self.image_6 = self.image_7 + self.image_7 = self.image_8 + self.image_8 = self.image_9 + self.image_9 = self.image_10 + + self.image_10 = self.image_11 + self.image_11 = self.image_12 + self.image_12 = self.image_13 + self.image_13 = self.image_14 + self.image_14 = self.image_15 + self.image_15 = self.image_16 + self.image_16 = self.image_17 + self.image_17 = self.image_18 + self.image_18 = self.image_19 + + self.image_19 = self.image_20 + self.image_20 = self.image_21 + self.image_21 = self.image_22 + self.image_22 = self.image_23 + self.image_23 = self.image_24 + self.image_24 = self.image_25 + self.image_25 = self.image_26 + self.image_26 = self.image_27 + self.image_27 = self.image_28 + + self.image_28 = self.image_29 + self.image_29 = self.image_30 + self.image_30 = self.image_31 + self.image_31 = self.image_32 + self.image_32 = self.image_33 + self.image_33 = self.image_34 + self.image_34 = self.image_35 + self.image_35 = self.image_36 + self.image_36 = self.image_37 + + self.image_37 = self.image_38 + self.image_38 = self.image_39 + self.image_39 = self.image_40 + self.image_40 = img + + img = [self.image_1, self.image_5, self.image_10, self.image_15, self.image_20, self.image_25, self.image_30, self.image_35, self.image_40] + img = np.expand_dims(img, axis=0) + + start_time = time.time() + try: + prediction = self.net.predict(img, verbose=0) + self.inference_times.append(time.time() - start_time) + throttle = prediction[0][0] + steer = prediction[0][1] * (1 - (-1)) + (-1) + break_command = prediction[0][2] + speed = self.vehicle.get_velocity() + vehicle_speed = 3.6 * math.sqrt(speed.x**2 + speed.y**2 + speed.z**2) + + if vehicle_speed > 30: + self.motors.sendThrottle(0.0) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + else: + if vehicle_speed < 5: + self.motors.sendThrottle(1.0) + self.motors.sendSteer(0.0) + self.motors.sendBrake(0) + else: + self.motors.sendThrottle(0.75) + self.motors.sendSteer(steer) + self.motors.sendBrake(break_command) + + except NotFoundError as ex: + logger.info('Error inside brain: NotFoundError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except UnimplementedError as ex: + logger.info('Error inside brain: UnimplementedError!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) + except Exception as ex: + logger.info('Error inside brain: Exception!') + logger.warning(type(ex).__name__) + print_exc() + raise Exception(ex) \ No newline at end of file diff --git a/behavior_metrics/driver_carla.py b/behavior_metrics/driver_carla.py index 0f9c2ed7..699d9d49 100644 --- a/behavior_metrics/driver_carla.py +++ b/behavior_metrics/driver_carla.py @@ -127,7 +127,7 @@ def is_config_correct(app_configuration): return is_correct -def generate_agregated_experiments_metrics(experiments_starting_time, experiments_elapsed_times): +def generate_agregated_experiments_metrics(experiments_starting_time, experiments_elapsed_times, app_configuration): result = metrics_carla.get_aggregated_experiments_list(experiments_starting_time) experiments_starting_time_dt = datetime.fromtimestamp(experiments_starting_time) @@ -252,28 +252,33 @@ def generate_agregated_experiments_metrics(experiments_starting_time, experiment 'metric': 'suddenness_distance_speed_per_km', 'title': 'Suddenness distance speed per km per experiment' }, - { - 'metric': 'dangerous_distance_pct_km', - 'title': 'Percentage of dangerous distance per km' - }, - { - 'metric': 'close_distance_pct_km', - 'title': 'Percentage of close distance per km' - }, - { - 'metric': 'medium_distance_pct_km', - 'title': 'Percentage of medium distance per km' - }, - { - 'metric': 'great_distance_pct_km', - 'title': 'Percentage of great distance per km' - }, + { 'metric': 'completed_laps', 'title': 'Completed laps per experiment' }, ] + if app_configuration.task == 'follow_lane_traffic': + experiments_metrics_and_titles.append( + { + 'metric': 'dangerous_distance_pct_km', + 'title': 'Percentage of dangerous distance per km' + }, + { + 'metric': 'close_distance_pct_km', + 'title': 'Percentage of close distance per km' + }, + { + 'metric': 'medium_distance_pct_km', + 'title': 'Percentage of medium distance per km' + }, + { + 'metric': 'great_distance_pct_km', + 'title': 'Percentage of great distance per km' + }, + ) + metrics_carla.get_all_experiments_aggregated_metrics(result, experiments_starting_time_str, experiments_metrics_and_titles) metrics_carla.get_per_model_aggregated_metrics(result, experiments_starting_time_str, experiments_metrics_and_titles) metrics_carla.get_all_experiments_aggregated_metrics_boxplot(result, experiments_starting_time_str, experiments_metrics_and_titles) @@ -413,7 +418,7 @@ def main(): logger.info('Invalid task type. Try "follow_route", "follow_lane" or "follow_lane_traffic". Killing program...') sys.exit(-1) experiments_elapsed_times['total_experiments_elapsed_time'] = time.time() - experiments_starting_time - generate_agregated_experiments_metrics(experiments_starting_time, experiments_elapsed_times) + generate_agregated_experiments_metrics(experiments_starting_time, experiments_elapsed_times, app_configuration) if app_configuration.experiment_random_spawn_point == True or app_configuration.task == 'follow_route': if os.path.isfile('tmp_circuit.launch'): os.remove('tmp_circuit.launch')