From a5d2b4fae6614d5a24767f9f125761b09861b2ee Mon Sep 17 00:00:00 2001 From: Luigi Freda Date: Fri, 20 Dec 2024 14:31:54 +0100 Subject: [PATCH] Added visualization of 3d trajectory error (gt vs aligned estimated). Added some improvements for avoiding the first keyframe is without image. --- feature_matcher.py | 8 +- initializer.py | 4 +- loop_closing.py | 6 +- loop_detecting_process.py | 17 +++- loop_detector_base.py | 5 +- loop_detector_dbow2.py | 1 + loop_detector_dbow3.py | 1 + loop_detector_vlad.py | 9 +++ loop_detector_vpr.py | 32 ++++---- mplot_thread.py | 3 +- slam_plot_drawer.py | 77 ++++++++++--------- .../test_loop_detecting_process.py | 2 +- test/loopclosing/test_loop_detector.py | 2 +- tracking.py | 1 + utils_data.py | 4 +- utils_geom.py | 53 ++++++------- viewer3D.py | 12 ++- volumetric_integrator.py | 10 ++- 18 files changed, 142 insertions(+), 105 deletions(-) diff --git a/feature_matcher.py b/feature_matcher.py index 546296e2..5e5d9d31 100644 --- a/feature_matcher.py +++ b/feature_matcher.py @@ -436,8 +436,12 @@ def match(self, img1, img2, des1, des2, kps1=None, kps2=None, ratio_test=None, idxs1, idxs2 = MatcherUtils.rowMatchesWithRatioTest(matcher, kps1, des1, kps2, des2, max_descriptor_distance, max_disparity=max_disparity, ratio_test=ratio_test) else: idxs1, idxs2 = MatcherUtils.rowMatches(matcher, kps1, des1, kps2, des2, max_descriptor_distance, max_disparity=max_disparity) - result.idxs1 = idxs1 - result.idxs2 = idxs2 + + # Change suggested here https://github.com/luigifreda/pyslam/issues/125#issuecomment-2555299806 + #result.idxs1 = idxs1 + #result.idxs2 = idxs2 + result.idxs1 = idxs1.astype(np.int64) + result.idxs2 = idxs2.astype(np.int64) return result diff --git a/initializer.py b/initializer.py index 2f4fe4d5..f7f7ab5b 100644 --- a/initializer.py +++ b/initializer.py @@ -120,12 +120,14 @@ def check_num_failures(self): # push the first image def init(self, f_cur : Frame, img_cur): + f_cur.img = img_cur # enforce the image storage for the first frames: we need the keyframes to store images for loop detection self.frames.append(f_cur) self.f_ref = f_cur # actually initialize having two available images def initialize(self, f_cur: Frame, img_cur): + f_cur.img = img_cur # enforce the image storage for the first frames: we need the keyframes to store images for loop detection return self.initialize_simple(f_cur, img_cur) # try to inizialize current frame with reference frame: reference position is adjusted in the buffer #return self.initialize_enumerate(f_cur, img_cur) # try to inizialize current frame with all frames we can use in the buffer @@ -227,7 +229,7 @@ def process_frames(self, f_cur: Frame, img_cur, f_ref: Frame): #map.add_frame(f_ref) #map.add_frame(f_cur) - kf_ref = KeyFrame(f_ref) + kf_ref = KeyFrame(f_ref, f_ref.img) kf_cur = KeyFrame(f_cur, img_cur) map.add_keyframe(kf_ref) map.add_keyframe(kf_cur) diff --git a/loop_closing.py b/loop_closing.py index b5fc48af..1dd8ffa6 100644 --- a/loop_closing.py +++ b/loop_closing.py @@ -787,7 +787,8 @@ def run(self): print(f'LoopClosing: processing KF: {keyframe.id}, detection: qin size: {self.loop_detecting_process.q_in.qsize()}, qout size: {self.loop_detecting_process.q_out.qsize()}') # update the keyframe with the detection output - keyframe.g_des = detection_output.g_des_vec + if keyframe.g_des is None: + keyframe.g_des = detection_output.g_des_vec # for viz debugging if self.store_kf_imgs: @@ -809,7 +810,8 @@ def run(self): if cov_kf_id in self.keyframes_map: cov_kf = self.keyframes_map[cov_kf_id] # update the cov keyframe with the detection output if needed - if not cov_kf.is_bad and cov_kf.g_des is None: + #if not cov_kf.is_bad and cov_kf.g_des is None: + if cov_kf.g_des is None: cov_kf.g_des = detection_output.covisible_gdes_vecs[i] got_loop = False diff --git a/loop_detecting_process.py b/loop_detecting_process.py index 4eeaa0be..b0a6005b 100644 --- a/loop_detecting_process.py +++ b/loop_detecting_process.py @@ -41,11 +41,11 @@ from keyframe import KeyFrame from frame import Frame -from loop_detector_configs import LoopDetectorConfigs, loop_detector_factory, loop_detector_config_check, SlamFeatureManagerInfo +from loop_detector_configs import LoopDetectorConfigs, loop_detector_factory, loop_detector_config_check, GlobalDescriptorType, SlamFeatureManagerInfo from loop_detector_base import LoopDetectorTask, LoopDetectorTaskType, LoopDetectorBase, LoopDetectorOutput import traceback - +import torch.multiprocessing as mp from typing import TYPE_CHECKING if TYPE_CHECKING: @@ -76,6 +76,19 @@ class LoopDetectingProcess: def __init__(self, slam: 'Slam', loop_detector_config = LoopDetectorConfigs.DBOW3): set_rlimit() + global_descriptor_type = loop_detector_config['global_descriptor_type'] + # NOTE: the following set_start_method() is needed by multiprocessing for using CUDA acceleration (for instance with torch). + if global_descriptor_type == GlobalDescriptorType.COSPLACE or \ + global_descriptor_type == GlobalDescriptorType.ALEXNET or \ + global_descriptor_type == GlobalDescriptorType.NETVLAD or \ + global_descriptor_type == GlobalDescriptorType.VLAD or \ + global_descriptor_type == GlobalDescriptorType.EIGENPLACES: + if mp.get_start_method() != 'spawn': + mp.set_start_method('spawn', force=True) # NOTE: This may generate some pickling problems with multiprocessing + # in combination with torch and we need to check it in other places. + # This set start method can be checked with MultiprocessingManager.is_start_method_spawn() + + self.loop_detector_config = loop_detector_config self.slam_info = SlamFeatureManagerInfo(slam=slam) diff --git a/loop_detector_base.py b/loop_detector_base.py index 8ae39433..7eb573fe 100644 --- a/loop_detector_base.py +++ b/loop_detector_base.py @@ -108,7 +108,7 @@ class LoopDetectorTask: def __init__(self, keyframe: KeyFrame, img, task_type=LoopDetectorTaskType.NONE, covisible_keyframes=[], connected_keyframes=[], load_save_path=None): self.task_type = task_type self.keyframe_data = LoopDetectKeyframeData(keyframe, img) - self.covisible_keyframes_data = [LoopDetectKeyframeData(kf) for kf in covisible_keyframes if not kf.is_bad] + self.covisible_keyframes_data = [LoopDetectKeyframeData(kf) for kf in covisible_keyframes if not kf.is_bad and kf.id != self.keyframe_data.id] self.connected_keyframes_ids = [kf.id for kf in connected_keyframes] self.load_save_path = load_save_path # # for loop closing @@ -262,7 +262,8 @@ def compute_reference_similarity_score(self, task: LoopDetectorTask, vector_type #print(f'LoopDetectorBase: covisible keyframe {cov_kf.id} has no g_des, computing on img shape: {cov_kf.img.shape}, dtype: {cov_kf.img.dtype}') if cov_kf.img.dtype != np.uint8: print(f'LoopDetectorBase: covisible keyframe {cov_kf.id} has img dtype: {cov_kf.img.dtype}, converting to uint8') - cov_kf.img = cov_kf.img.astype(np.uint8) + cov_kf.img = cov_kf.img.astype(np.uint8) + print(f'LoopDetectorBase: computing global descriptor for keyframe {cov_kf.id}') cov_kf.g_des = self.compute_global_des(cov_kf.des, cov_kf.img) if cov_kf.g_des is not None: if not isinstance(cov_kf.g_des, vector_type): diff --git a/loop_detector_dbow2.py b/loop_detector_dbow2.py index 30884373..5f4d2065 100644 --- a/loop_detector_dbow2.py +++ b/loop_detector_dbow2.py @@ -148,6 +148,7 @@ def run_task(self, task: LoopDetectorTask): # compute global descriptor if keyframe.g_des is None: + print(f'LoopDetectorDBoW2: computing global descriptor for keyframe {keyframe.id}') keyframe.g_des = self.compute_global_des(keyframe.des, keyframe.img) # get bow vector g_des_vec = keyframe.g_des.toVec() # transform it to vector(numpy array) to make it pickable else: diff --git a/loop_detector_dbow3.py b/loop_detector_dbow3.py index 181d893c..89b1c352 100644 --- a/loop_detector_dbow3.py +++ b/loop_detector_dbow3.py @@ -122,6 +122,7 @@ def run_task(self, task: LoopDetectorTask): # compute global descriptor if keyframe.g_des is None: + print(f'LoopDetectorDBoW3: computing global descriptor for keyframe {keyframe.id}') keyframe.g_des = self.compute_global_des(keyframe.des, keyframe.img) # get bow vector g_des_vec = keyframe.g_des.toVec() # transform it to a vector(numpy array) to make it pickable else: diff --git a/loop_detector_vlad.py b/loop_detector_vlad.py index fbd74660..64ba81fc 100644 --- a/loop_detector_vlad.py +++ b/loop_detector_vlad.py @@ -67,6 +67,14 @@ class LoopDetectorVlad(LoopDetectorBase): def __init__(self, vocabulary_data: VocabularyData, local_feature_manager=None): super().__init__() + + import torch.multiprocessing as mp + # NOTE: the following set_start_method() is needed by multiprocessing for using CUDA acceleration (for instance with torch). + if mp.get_start_method() != 'spawn': + mp.set_start_method('spawn', force=True) # NOTE: This may generate some pickling problems with multiprocessing + # in combination with torch and we need to check it in other places. + # This set start method can be checked with MultiprocessingManager.is_start_method_spawn() + self.local_feature_manager = local_feature_manager self.use_torch_vectors = False # use torch vectors with a simple database implementation @@ -145,6 +153,7 @@ def run_task(self, task: LoopDetectorTask): # compute global descriptor if keyframe.g_des is None: + print(f'LoopDetectorVlad: computing global descriptor for keyframe {keyframe.id}') keyframe.g_des = self.compute_global_des(keyframe.des, keyframe.img) # get global descriptor #print(f'LoopDetectorVlad: g_des = {keyframe.g_des}, type: {type(keyframe.g_des)}, shape: {keyframe.g_des.shape}, dim: {keyframe.g_des.dim()}') diff --git a/loop_detector_vpr.py b/loop_detector_vpr.py index a27d2fb1..d45f9fc8 100644 --- a/loop_detector_vpr.py +++ b/loop_detector_vpr.py @@ -111,7 +111,7 @@ def __init__(self, global_descriptor_name= None, local_feature_manager=None, nam raise ValueError('LoopDetectorVprBase: global_descriptor_name cannot be None') self.score = None - if self.global_descriptor_name == 'SAD': + if self.global_descriptor_name.lower() == 'sad': self.score = ScoreSad() self.min_score = -100 if Parameters.kLoopClosingDebugWithSimmetryMatrix: @@ -127,15 +127,16 @@ def __init__(self, global_descriptor_name= None, local_feature_manager=None, nam self.global_feature_extractor = None self.global_db = None - # NOTE: the following set_start_method() is needed by multiprocessing for using CUDA acceleration (with torch). - if global_descriptor_name == 'CosPlace' or \ - global_descriptor_name == 'AlexNet' or \ - global_descriptor_name == 'NetVLAD' or \ - global_descriptor_name == 'EigenPlaces': + # NOTE: The following set_start_method() is needed by multiprocessing for using CUDA acceleration (for instance with torch). + if global_descriptor_name.lower() == 'cosplace' or \ + global_descriptor_name.lower() == 'alexnet' or \ + global_descriptor_name.lower() == 'netvlad' or \ + global_descriptor_name.lower() == 'eigenplaces': import torch.multiprocessing as mp - mp.set_start_method('spawn', force=True) # NOTE: This generates some pickling problems with multiprocessing - # in combination with torch and we need to check it in other places. - # This set start method can be checked with MultiprocessingManager.is_start_method_spawn() + if mp.get_start_method() != 'spawn': + mp.set_start_method('spawn', force=True) # NOTE: This may generate some pickling problems with multiprocessing + # in combination with torch and we need to check it in other places. + # This set start method can be checked with MultiprocessingManager.is_start_method_spawn() #self.init() # NOTE: We call init() in the run_task() method at its first call to # initialize the global feature extractor in the potentially launched parallel process. @@ -185,16 +186,16 @@ def init_db(self): def init_global_feature_extractor(self, global_descriptor_name): print(f'LoopDetectorVprBase: init_global_feature_extractor: global_descriptor_name: {global_descriptor_name}') global_feature_extractor = None - if global_descriptor_name == 'HDC-DELF': + if global_descriptor_name.lower() == 'hdc-delf': from feature_extraction.feature_extractor_holistic import HDCDELF global_feature_extractor = HDCDELF() - elif global_descriptor_name == 'AlexNet': + elif global_descriptor_name.lower() == 'alexnet': from feature_extraction.feature_extractor_holistic import AlexNetConv3Extractor global_feature_extractor = AlexNetConv3Extractor() - elif global_descriptor_name == 'SAD': + elif global_descriptor_name.lower() == 'sad': from feature_extraction.feature_extractor_holistic import SAD global_feature_extractor = SAD() - elif global_descriptor_name == 'NetVLAD': + elif global_descriptor_name.lower() == 'netvlad': from feature_extraction.feature_extractor_patchnetvlad import PatchNetVLADFeatureExtractor from patchnetvlad.tools import PATCHNETVLAD_ROOT_DIR import configparser @@ -204,10 +205,10 @@ def init_global_feature_extractor(self, global_descriptor_name): config = configparser.ConfigParser() config.read(configfile) global_feature_extractor = PatchNetVLADFeatureExtractor(config) - elif global_descriptor_name == 'CosPlace': + elif global_descriptor_name.lower() == 'cosplace': from feature_extraction.feature_extractor_cosplace import CosPlaceFeatureExtractor global_feature_extractor = CosPlaceFeatureExtractor() - elif global_descriptor_name == 'EigenPlaces': + elif global_descriptor_name.lower() == 'eigenplaces': from feature_extraction.feature_extractor_eigenplaces import EigenPlacesFeatureExtractor global_feature_extractor = EigenPlacesFeatureExtractor() else: @@ -242,6 +243,7 @@ def run_task(self, task: LoopDetectorTask): # compute global descriptor if keyframe.g_des is None: + print(f'LoopDetectorVprBase: computing global descriptor for keyframe {keyframe.id}') keyframe.g_des = self.compute_global_des(keyframe.des, keyframe.img) # get global descriptor if task.task_type != LoopDetectorTaskType.RELOCALIZATION: diff --git a/mplot_thread.py b/mplot_thread.py index abee9a60..2e5ae79e 100644 --- a/mplot_thread.py +++ b/mplot_thread.py @@ -116,7 +116,8 @@ def __init__(self, xlabel='', ylabel='', title=''): self.data = None self.got_data = False - self.handle_map = {} + self.handle_map = {} + self.fig = None self.axis_computed = False self.xlim = [float("inf"),float("-inf")] diff --git a/slam_plot_drawer.py b/slam_plot_drawer.py index 934af749..7a264a73 100644 --- a/slam_plot_drawer.py +++ b/slam_plot_drawer.py @@ -32,7 +32,7 @@ from qtplot_thread import Qplot2d import matplotlib.colors as mcolors -from utils_geom import AlignmentGroundTruthData, Sim3Pose +from utils_geom import AlignmentEstimatedAndGroundTruthData, Sim3Pose kUseQtplot2d = False if platform.system() == 'Darwin': @@ -55,41 +55,34 @@ def __init__(self, slam: Slam, viewer3D: Viewer3D = None): self.info_3dpoints_plt = None self.chi2_error_plt = None self.timing_plt = None - self.traj_error_plt = None + self.last_alignment_timestamp = None - self.last_alignment_gt_data = AlignmentGroundTruthData() + self.last_alignment_gt_data = AlignmentEstimatedAndGroundTruthData() # To disable one of them just comment it out self.matched_points_plt = factory_plot2d(xlabel='img id', ylabel='# matches',title='# matches') #self.info_3dpoints_plt = factory_plot2d(xlabel='img id', ylabel='# points',title='info 3d points') self.chi2_error_plt = factory_plot2d(xlabel='img id', ylabel='error',title='mean chi2 error') self.timing_plt = factory_plot2d(xlabel='img id', ylabel='s',title='timing') - self.traj_error_plt = factory_plot2d(xlabel='time [s]', ylabel='error',title='trajectories: gt vs estimated') + self.traj_error_plt = factory_plot2d(xlabel='time [s]', ylabel='error',title='trajectories: gt vs (aligned)estimated') + + + self.plt_list = [self.matched_points_plt, self.info_3dpoints_plt, self.chi2_error_plt, self.timing_plt, self.traj_error_plt] self.last_processed_kf_img_id = -1 def quit(self): - if self.matched_points_plt is not None: - self.matched_points_plt.quit() - if self.info_3dpoints_plt is not None: - self.info_3dpoints_plt.quit() - if self.chi2_error_plt is not None: - self.chi2_error_plt.quit() - if self.timing_plt is not None: - self.timing_plt.quit() + for plt in self.plt_list: + if plt is not None: + plt.quit() def get_key(self): - key = self.matched_points_plt.get_key() if self.matched_points_plt is not None else None - if key == '' or key is None: - key = self.info_3dpoints_plt.get_key() if self.info_3dpoints_plt is not None else None - if key == '' or key is None: - key = self.chi2_error_plt.get_key() if self.chi2_error_plt is not None else None - if key == '' or key is None: - key = self.timing_plt.get_key() if self.timing_plt is not None else None - if key == '' or key is None: - key = self.traj_error_plt.get_key() if self.traj_error_plt is not None else None - return key + for plt in self.plt_list: + if plt is not None: + key = plt.get_key() + if key != '': + return key def draw(self, img_id): try: @@ -132,6 +125,7 @@ def draw(self, img_id): num_static_stereo_map_points_signal = [img_id, self.slam.tracking.last_num_static_stereo_map_points] self.info_3dpoints_plt.draw(num_static_stereo_map_points_signal,'# static triangulated pts',color='k') + # draw chi2 error if self.chi2_error_plt is not None: if self.slam.tracking.mean_pose_opt_chi2_error is not None: mean_pose_opt_chi2_error_signal = [img_id, self.slam.tracking.mean_pose_opt_chi2_error] @@ -147,6 +141,7 @@ def draw(self, img_id): mean_BA_chi2_error_signal = [img_id, self.slam.GBA.mean_squared_error.value] self.chi2_error_plt.draw(mean_BA_chi2_error_signal,'GBA chi2 error',color='k') + # draw timings if self.timing_plt is not None: if self.slam.tracking.time_track is not None: time_track_signal = [img_id, self.slam.tracking.time_track] @@ -181,19 +176,21 @@ def draw(self, img_id): time_volumetric_integration_signal = [img_id, self.slam.volumetric_integrator.time_volumetric_integration.value] self.timing_plt.draw(time_volumetric_integration_signal,'volumetric integration',color=mcolors.CSS4_COLORS['darkviolet'], marker='+') - - # we must empty the alignment queue in any case + + # draw trajectory and alignment error + # NOTE: we must empty the alignment queue in any case new_alignment_data = None while not self.viewer3D.alignment_gt_data_queue.empty(): new_alignment_data = self.viewer3D.alignment_gt_data_queue.get_nowait() if self.traj_error_plt is not None and new_alignment_data is not None: num_samples = len(new_alignment_data.timestamps_associations) new_alignment_timestamp = new_alignment_data.timestamps_associations[-1] if num_samples > 20 else None - print(f'SlamPlotDrawer: new gt alignment timestamp: {new_alignment_timestamp}, error: {new_alignment_data.error}') - if new_alignment_data.error >= 0 and self.last_alignment_timestamp != new_alignment_timestamp: + print(f'SlamPlotDrawer: new gt alignment timestamp: {new_alignment_timestamp}, error: {new_alignment_data.error}, max_error: {new_alignment_data.max_error}, is_est_aligned: {new_alignment_data.is_est_aligned}') + if new_alignment_data.error > 0 and self.last_alignment_timestamp != new_alignment_timestamp: self.last_alignment_timestamp = new_alignment_timestamp - new_alignment_data.copyTo(self.last_alignment_gt_data) - # if not self.last_alignment_gt_data.is_aligned: + #new_alignment_data.copyTo(self.last_alignment_gt_data) + self.last_alignment_gt_data = new_alignment_data + # if not self.last_alignment_gt_data.is_est_aligned: # print(f'SlamPlotDrawer: realigning estimated and gt trajectories') # if self.slam.sensor_type != SensorType.MONOCULAR: # # align the gt to estimated trajectory (T_gt_est is estimated in SE(3)) @@ -203,19 +200,20 @@ def draw(self, img_id): # # align the estimated trajectory to the gt (T_gt_est is estimated in Sim(3)) # #T_est_gt = np.linalg.inv(self.last_alignment_gt_data.T_gt_est) # T_est_gt = Sim3Pose().from_matrix(self.last_alignment_gt_data.T_gt_est).inverse_matrix() - # for i in range(len(self.last_alignment_gt_data.filter_t_w_i)): - # self.last_alignment_gt_data.filter_t_w_i[i] = np.dot(T_est_gt[:3, :3], self.last_alignment_gt_data.filter_t_w_i[i]) + T_est_gt[:3, 3] + # for i in range(len(self.last_alignment_gt_data.estimated_t_w_i)): + # self.last_alignment_gt_data.estimated_t_w_i[i] = np.dot(T_est_gt[:3, :3], self.last_alignment_gt_data.estimated_t_w_i[i]) + T_est_gt[:3, 3] gt_traj = np.array(self.last_alignment_gt_data.gt_t_w_i, dtype=float) - filter_traj = np.array(self.last_alignment_gt_data.filter_t_w_i, dtype=float) + estimated_traj = np.array(self.last_alignment_gt_data.estimated_t_w_i, dtype=float) filter_timestamps = np.array(self.last_alignment_gt_data.timestamps_associations, dtype=float) + #print(f'SlamPlotDrawer: gt_traj: {gt_traj.shape}, estimated_traj: {estimated_traj.shape}, filter_timestamps: {filter_timestamps.shape}') if False: time_gt_x_signal = [filter_timestamps, gt_traj[:, 0]] time_gt_y_signal = [filter_timestamps, gt_traj[:, 1]] time_gt_z_signal = [filter_timestamps, gt_traj[:, 2]] - time_filter_x_signal = [filter_timestamps, filter_traj[:, 0]] - time_filter_y_signal = [filter_timestamps, filter_traj[:, 1]] - time_filter_z_signal = [filter_timestamps, filter_traj[:, 2]] + time_filter_x_signal = [filter_timestamps, estimated_traj[:, 0]] + time_filter_y_signal = [filter_timestamps, estimated_traj[:, 1]] + time_filter_z_signal = [filter_timestamps, estimated_traj[:, 2]] self.traj_error_plt.draw(time_filter_x_signal, 'filter_x', color='r', append=False) self.traj_error_plt.draw(time_filter_y_signal, 'filter_y', color='g', append=False) self.traj_error_plt.draw(time_filter_z_signal, 'filter_z', color='b', append=False) @@ -223,9 +221,14 @@ def draw(self, img_id): self.traj_error_plt.draw(time_gt_y_signal, 'gt_y', color='g', linestyle=':', append=False) self.traj_error_plt.draw(time_gt_z_signal, 'gt_z', color='b', linestyle=':', append=False) else: - time_errx_signal = [filter_timestamps, gt_traj[:, 0] - filter_traj[:, 0]] - time_erry_signal = [filter_timestamps, gt_traj[:, 1] - filter_traj[:, 1]] - time_errz_signal = [filter_timestamps, gt_traj[:, 2] - filter_traj[:, 2]] + traj_error = gt_traj - estimated_traj + err_x_max = np.max(np.abs(traj_error[:, 0])) + err_y_max = np.max(np.abs(traj_error[:, 1])) + err_z_max = np.max(np.abs(traj_error[:, 2])) + print(f'SlamPlotDrawer: err_x_max: {err_x_max}, err_y_max: {err_y_max}, err_z_max: {err_z_max}') + time_errx_signal = [filter_timestamps, traj_error[:, 0]] + time_erry_signal = [filter_timestamps, traj_error[:, 1]] + time_errz_signal = [filter_timestamps, traj_error[:, 2]] self.traj_error_plt.draw(time_errx_signal, 'err_x', color='r', append=False) self.traj_error_plt.draw(time_erry_signal, 'err_y', color='g', append=False) self.traj_error_plt.draw(time_errz_signal, 'err_z', color='b', append=False) diff --git a/test/loopclosing/test_loop_detecting_process.py b/test/loopclosing/test_loop_detecting_process.py index 1de0e376..0fca6d2b 100644 --- a/test/loopclosing/test_loop_detecting_process.py +++ b/test/loopclosing/test_loop_detecting_process.py @@ -50,7 +50,7 @@ # Select your loop closing configuration (see the file loop_detector_configs.py). Set it to None to disable loop closing. # LoopDetectorConfigs: DBOW2, DBOW3, etc. - loop_detection_config = LoopDetectorConfigs.DBOW3 + loop_detection_config = LoopDetectorConfigs.NETVLAD Printer.green('loop_detection_config: ',loop_detection_config) loop_detecting_process = LoopDetectingProcess(slam=None,loop_detector_config=loop_detection_config) diff --git a/test/loopclosing/test_loop_detector.py b/test/loopclosing/test_loop_detector.py index b68182a2..6d62b3c3 100644 --- a/test/loopclosing/test_loop_detector.py +++ b/test/loopclosing/test_loop_detector.py @@ -43,7 +43,7 @@ # Select your loop closing configuration (see the file loop_detector_configs.py). Set it to None to disable loop closing. # LoopDetectorConfigs: DBOW2, DBOW3, etc. - loop_detection_config = LoopDetectorConfigs.DBOW3 + loop_detection_config = LoopDetectorConfigs.NETVLAD Printer.green('loop_detection_config: ',loop_detection_config) loop_detector = loop_detector_factory(**loop_detection_config, slam_info=SlamFeatureManagerInfo(feature_manager=feature_tracker.feature_manager)) diff --git a/tracking.py b/tracking.py index a324ba85..04605940 100644 --- a/tracking.py +++ b/tracking.py @@ -871,6 +871,7 @@ def track(self, img, img_right, depth, img_id, timestamp=None): #new_pts_count = self.map.add_stereo_points(initializer_output.pts, None, f_cur, kf_cur, np.arange(len(initializer_output.pts), dtype=np.int), img) Printer.green(f"map: initialized with kfs {kf_ref.id}, {kf_cur.id} and {new_pts_count} new map points") + # update covisibility graph connections kf_ref.update_connections() kf_cur.update_connections() diff --git a/utils_data.py b/utils_data.py index 5def5ae6..af0392e3 100644 --- a/utils_data.py +++ b/utils_data.py @@ -15,7 +15,7 @@ def empty_queue(queue, verbose=True): if platform.system() == 'Darwin': try: while not queue.empty(): - queue.get(timeout=0.001) + queue.get_nowait() except Exception as e: if verbose: Printer.red(f'EXCEPTION in empty_queue: {e}') @@ -25,7 +25,7 @@ def empty_queue(queue, verbose=True): else: try: while queue.qsize()>0: - queue.get(timeout=0.001) + queue.get_nowait() except Exception as e: if verbose: Printer.red(f'EXCEPTION in empty_queue: {e}') diff --git a/utils_geom.py b/utils_geom.py index 714bb278..4a626f9e 100644 --- a/utils_geom.py +++ b/utils_geom.py @@ -453,22 +453,15 @@ def closest_rotation_matrix(A): return Q -class AlignmentGroundTruthData: - def __init__(self, timestamps_associations=[], filter_t_w_i=[], gt_t_w_i=[], T_gt_est=None, error=-1.0, is_aligned=False): +class AlignmentEstimatedAndGroundTruthData: + def __init__(self, timestamps_associations=[], estimated_t_w_i=[], gt_t_w_i=[], T_gt_est=None, error=-1.0, is_est_aligned=False, max_error=-1.0): self.timestamps_associations = timestamps_associations - self.filter_t_w_i = filter_t_w_i + self.estimated_t_w_i = estimated_t_w_i self.gt_t_w_i = gt_t_w_i self.T_gt_est = T_gt_est - self.error = error - self.is_aligned = is_aligned - - def copyTo(self, other): - other.timestamps_associations = self.timestamps_associations - other.filter_t_w_i = self.filter_t_w_i - other.gt_t_w_i = self.gt_t_w_i - other.T_gt_est = self.T_gt_est - other.error = self.error - other.is_aligned = self.is_aligned + self.error = error # average alignment error + self.max_error = max_error # max alignement error + self.is_est_aligned = is_est_aligned # is estimated traj aligned? # align filter trajectory with ground truth trajectory by computing the SE(3) transformation between the two @@ -477,7 +470,7 @@ def copyTo(self, other): # - gt_timestamps [Nx1] # - gt_t_w_i [Nx3] # - find_scale allows to compute the full Sim(3) transformation in case the scale is unknown -def align_trajs_with_svd(filter_timestamps, filter_t_w_i, gt_timestamps, gt_t_w_i, align_gt=True, compute_align_error=True, find_scale=False, verbose=False): +def align_trajs_with_svd(filter_timestamps, filter_t_w_i, gt_timestamps, gt_t_w_i, align_gt=True, compute_align_error=True, find_scale=False, align_est_associations=True, verbose=False): est_associations = [] gt_associations = [] timestamps_associations = [] @@ -528,12 +521,8 @@ def align_trajs_with_svd(filter_timestamps, filter_t_w_i, gt_timestamps, gt_t_w_ if verbose: print(f'num associations: {num_samples}') - gt = np.zeros((3, num_samples)) - est = np.zeros((3, num_samples)) - - for i in range(num_samples): - gt[:, i] = gt_associations[i] - est[:, i] = est_associations[i] + gt = np.array(gt_associations).T + est = np.array(est_associations).T mean_gt = np.mean(gt, axis=1) mean_est = np.mean(est, axis=1) @@ -551,7 +540,7 @@ def align_trajs_with_svd(filter_timestamps, filter_t_w_i, gt_timestamps, gt_t_w_ U, D, Vt = np.linalg.svd(cov) except: Printer.red('[align_trajs_with_svd] SVD failed!!!\n') - return np.eye(4), -1, AlignmentGroundTruthData() + return np.eye(4), -1, AlignmentEstimatedAndGroundTruthData() c = 1 S = np.eye(3) @@ -585,19 +574,21 @@ def align_trajs_with_svd(filter_timestamps, filter_t_w_i, gt_timestamps, gt_t_w_ # Update gt_t_w_i with transformation if align_gt: - for i in range(len(gt_t_w_i)): - gt_t_w_i[i] = np.dot(T_est_gt[:3, :3], gt_t_w_i[i]) + T_est_gt[:3, 3] + gt_t_w_i[:,:] = (T_est_gt[:3, :3] @ np.array(gt_t_w_i).T).T + T_est_gt[:3, 3] # Compute error error = 0 + max_error = float("-inf") if compute_align_error: - for i in range(len(est_associations)): - res = (np.dot(T_gt_est[:3, :3], est_associations[i]) + T_gt_est[:3, 3]) - gt_associations[i] - error += np.dot(res.T, res) - - error /= len(est_associations) - error = np.sqrt(error) - - aligned_gt_data = AlignmentGroundTruthData(timestamps_associations, est_associations, gt_associations, T_gt_est, error, is_aligned=align_gt) + if align_est_associations: + est_associations = (T_gt_est[:3, :3] @ np.array(est_associations).T).T + T_gt_est[:3, 3] + residuals = est_associations - np.array(gt_associations) + else: + residuals = ((T_gt_est[:3, :3] @ np.array(est_associations).T).T + T_gt_est[:3, 3]) - np.array(gt_associations) + squared_errors = np.sum(residuals**2, axis=1) + error = np.sqrt(np.mean(squared_errors)) + max_error = np.sqrt(np.max(squared_errors)) + + aligned_gt_data = AlignmentEstimatedAndGroundTruthData(timestamps_associations, est_associations, gt_associations, T_gt_est, error, max_error=max_error, is_est_aligned=align_est_associations) return T_gt_est, error, aligned_gt_data \ No newline at end of file diff --git a/viewer3D.py b/viewer3D.py index ca5853dd..82690716 100644 --- a/viewer3D.py +++ b/viewer3D.py @@ -33,7 +33,7 @@ from map import Map -from utils_geom import inv_T, align_trajs_with_svd, AlignmentGroundTruthData +from utils_geom import inv_T, align_trajs_with_svd, AlignmentEstimatedAndGroundTruthData from utils_sys import Printer from utils_mp import MultiprocessingManager from utils_data import empty_queue @@ -58,6 +58,7 @@ kAlignGroundTruthNumMinKeyframes = 10 kAlignGroundTruthEveryNKeyframes = 10 kAlignGroundTruthEveryNFrames = 30 +kAlignGroundTruthEveryTimeInterval = 2 # [s] kRefreshDurationTime = 0.03 # [s] @@ -174,6 +175,7 @@ def viewer_run(self, qmap, qvo, qdense, is_running, is_paused, is_map_save, do_s self.thread_gt_aligned = False self.thread_last_num_poses_gt_was_aligned = 0 self.thread_last_frame_id_gt_was_aligned = 0 + self.thread_last_time_gt_was_aligned = time.time() self.thread_alignment_gt_data_queue = alignment_gt_data_queue while not pangolin.ShouldQuit() and (is_running.value == 1): ts = time.time() @@ -353,13 +355,15 @@ def viewer_refresh(self, qmap, qvo, qdense, is_paused, is_map_save, do_step, do_ # align the gt to the estimated trajectory every 'kAlignGroundTruthEveryNKeyframes' frames; # the more estimated frames we have the better the alignment! num_kfs = len(self.map_state.poses) - condition1 = num_kfs > kAlignGroundTruthNumMinKeyframes + condition1 = (time.time() - self.thread_last_time_gt_was_aligned) > kAlignGroundTruthEveryTimeInterval condition2 = len(self.map_state.poses) > kAlignGroundTruthEveryNKeyframes + self.thread_last_num_poses_gt_was_aligned condition3 = self.map_state.cur_frame_id > kAlignGroundTruthEveryNFrames + self.thread_last_frame_id_gt_was_aligned # this is useful when we are not generating new kfs - if condition1 and (condition2 or condition3): + if self._is_running and (num_kfs > kAlignGroundTruthNumMinKeyframes) and (condition1 or condition2 or condition3): # we want at least kAlignGroundTruthNumMinKeyframes keyframes to align try: + self.thread_last_time_gt_was_aligned = time.time() estimated_trajectory = np.array([self.map_state.poses[i][0:3,3] for i in range(len(self.map_state.poses))], dtype=float) - T_gt_est, error, alignment_gt_data = align_trajs_with_svd(self.map_state.pose_timestamps, estimated_trajectory, self.thread_gt_timestamps, self.thread_gt_trajectory, align_gt=True, compute_align_error=False, find_scale=self.thread_align_gt_with_scale) + T_gt_est, error, alignment_gt_data = align_trajs_with_svd(self.map_state.pose_timestamps, estimated_trajectory, self.thread_gt_timestamps, self.thread_gt_trajectory, \ + align_gt=True, align_est_associations=True, compute_align_error=True, find_scale=self.thread_align_gt_with_scale) self.thread_alignment_gt_data_queue.put(alignment_gt_data) self.thread_gt_aligned = True self.thread_last_num_poses_gt_was_aligned = len(self.map_state.poses) diff --git a/volumetric_integrator.py b/volumetric_integrator.py index ae26c915..ae365603 100644 --- a/volumetric_integrator.py +++ b/volumetric_integrator.py @@ -161,10 +161,12 @@ def __init__(self, task_type, id=-1, point_cloud:VolumeIntegratorPointCloud =Non class VolumetricIntegrator: def __init__(self, slam: 'Slam'): import torch.multiprocessing as mp - mp.set_start_method('spawn', force=True) # NOTE: This generates some pickling problems with multiprocessing - # in combination with torch and we need to check it in other places. - # This set start method can be checked with MultiprocessingManager.is_start_method_spawn() - + # NOTE: The following set_start_method() is needed by multiprocessing for using CUDA acceleration (for instance with torch). + if mp.get_start_method() != 'spawn': + mp.set_start_method('spawn', force=True) # NOTE: This may generate some pickling problems with multiprocessing + # in combination with torch and we need to check it in other places. + # This set start method can be checked with MultiprocessingManager.is_start_method_spawn() + set_rlimit() self.camera = slam.camera