Skip to content

Commit

Permalink
WIP: integrate absolute pose estimation and refactor code.
Browse files Browse the repository at this point in the history
  • Loading branch information
Odd Kiva committed Apr 25, 2024
1 parent 13908ef commit 8ffbfda
Show file tree
Hide file tree
Showing 5 changed files with 120 additions and 100 deletions.
24 changes: 12 additions & 12 deletions cpp/src/DO/Sara/SfM/BuildingBlocks/CameraPoseEstimator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@

#pragma once

#include "DO/Sara/MultiViewGeometry/PointRayCorrespondenceList.hpp"
#include <DO/Sara/Core/PhysicalQuantities.hpp>
#include <DO/Sara/MultiViewGeometry/Camera/v2/BrownConradyCamera.hpp>
#include <DO/Sara/MultiViewGeometry/Camera/v2/PinholeCamera.hpp>
#include <DO/Sara/MultiViewGeometry/MinimalSolvers/P3PSolver.hpp>
#include <DO/Sara/MultiViewGeometry/PointRayCorrespondenceList.hpp>
#include <DO/Sara/SfM/BuildingBlocks/PointCloudGenerator.hpp>
#include <DO/Sara/SfM/Graph/CameraPoseGraph.hpp>
#include <DO/Sara/SfM/Graph/FeatureGraph.hpp>
Expand All @@ -26,7 +26,7 @@ namespace DO::Sara {
{
public:
using FeatureTrack = PointCloudGenerator::FeatureTrack;
using CameraIntrinsicModel = v2::BrownConradyDistortionModel<double>;
using CameraIntrinsicModel = v2::PinholeCamera<double>;
using PoseMatrix = Eigen::Matrix<double, 3, 4>;
using Inlier = Tensor_<bool, 1>;
using MinimalSample = Tensor_<int, 1>;
Expand All @@ -38,10 +38,10 @@ namespace DO::Sara {
}

//! @brief Set robust estimation parameters.
auto set_estimation_params(const PixelUnit error_max = 0.5_px,
const int ransac_iter_max = 1000u,
const double ransac_confidence_min = 0.99)
-> void
auto
set_estimation_params(const PixelUnit error_max = 0.5_px,
const int ransac_iter_max = 1000u,
const double ransac_confidence_min = 0.99) -> void
{
_inlier_predicate.ε = error_max.value;
_ransac_iter_max = ransac_iter_max;
Expand All @@ -54,11 +54,11 @@ namespace DO::Sara {
const CameraIntrinsicModel&)
-> std::tuple<PoseMatrix, Inlier, MinimalSample>;

auto estimate_pose(const std::vector<FeatureTrack>&,
const CameraPoseGraph::Vertex, //
const CameraIntrinsicModel&, //
const PointCloudGenerator&)
-> std::pair<PoseMatrix, bool>;
auto
estimate_pose(const std::vector<FeatureTrack>&,
const CameraPoseGraph::Vertex, //
const CameraIntrinsicModel&, //
const PointCloudGenerator&) -> std::pair<PoseMatrix, bool>;

private:
P3PSolver<double> _solver;
Expand Down
10 changes: 5 additions & 5 deletions cpp/src/DO/Sara/SfM/BuildingBlocks/PointCloudGenerator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,8 @@ auto PointCloudGenerator::filter_by_non_max_suppression(
}

auto PointCloudGenerator::find_feature_vertex_at_pose(
const FeatureTrack& track, const PoseVertex pose_vertex) const
-> std::optional<FeatureVertex>
const FeatureTrack& track,
const PoseVertex pose_vertex) const -> std::optional<FeatureVertex>
{
auto v = std::find_if(track.begin(), track.end(),
[this, pose_vertex](const auto& v) {
Expand Down Expand Up @@ -148,7 +148,7 @@ auto PointCloudGenerator::retrieve_scene_point_color(
const Eigen::Vector3d& scene_point, //
const ImageView<Rgb8>& image, //
const QuaternionBasedPose<double>& pose,
const v2::BrownConradyDistortionModel<double>& camera) const -> Rgb64f
const v2::PinholeCamera<double>& camera) const -> Rgb64f
{
const auto& w = image.width();
const auto& h = image.height();
Expand Down Expand Up @@ -270,8 +270,8 @@ auto PointCloudGenerator::compress_point_cloud(
auto PointCloudGenerator::grow_point_cloud(
const std::vector<FeatureTrack>& ftracks_without_scene_point,
const ImageView<Rgb8>& image, //
const PoseEdge pose_edge,
const v2::BrownConradyDistortionModel<double>& camera) -> void
const PoseEdge pose_edge, //
const v2::PinholeCamera<double>& camera) -> void
{
auto& logger = Logger::get();

Expand Down
9 changes: 5 additions & 4 deletions cpp/src/DO/Sara/SfM/BuildingBlocks/PointCloudGenerator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@

#pragma once

#include "DO/Sara/MultiViewGeometry/Camera/v2/PinholeCamera.hpp"
#include <DO/Sara/SfM/BuildingBlocks/RgbColoredPoint.hpp>
#include <DO/Sara/SfM/Graph/CameraPoseGraph.hpp>
#include <DO/Sara/SfM/Graph/FeatureTracker.hpp>
Expand Down Expand Up @@ -75,7 +76,7 @@ namespace DO::Sara {
const Eigen::Vector3d& scene_point, //
const ImageView<Rgb8>& image, //
const QuaternionBasedPose<double>& pose,
const v2::BrownConradyDistortionModel<double>& camera) const -> Rgb64f;
const v2::PinholeCamera<double>& camera) const -> Rgb64f;

public: /* data transformation methods */
//! @brief Calculate the barycentric scene point.
Expand All @@ -101,8 +102,8 @@ namespace DO::Sara {
//! - The scene point is recalculated as a the barycenter of the
//! possibly multiple scene points we have found after recalculating the
//! feature tracks.
auto propagate_scene_point_indices(const std::vector<FeatureTrack>&)
-> void;
auto
propagate_scene_point_indices(const std::vector<FeatureTrack>&) -> void;

//! - The point cloud compression reassigns a unique scene point cloud to
//! each feature tracks.
Expand All @@ -121,7 +122,7 @@ namespace DO::Sara {
const std::vector<FeatureTrack>& feature_tracks_without_scene_point,
const ImageView<Rgb8>& image, //
const PoseEdge pose_edge,
const v2::BrownConradyDistortionModel<double>& camera) -> void;
const v2::PinholeCamera<double>& camera) -> void;

private: /* data members */
const CameraPoseGraph& _pose_graph;
Expand Down
160 changes: 88 additions & 72 deletions cpp/src/DO/Sara/SfM/OdometryV2/OdometryPipeline.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
// you can obtain one at http://mozilla.org/MPL/2.0/.
// ========================================================================== //

#include "DO/Sara/MultiViewGeometry/Geometry/QuaternionBasedPose.hpp"
#include <DO/Sara/SfM/OdometryV2/OdometryPipeline.hpp>

#include <DO/Sara/Logging/Logger.hpp>
Expand All @@ -29,7 +30,12 @@ auto v2::OdometryPipeline::set_config(
{
// Build the dependency graph.
_video_streamer.open(video_path);
// The original camera.
_camera = camera;
// The virtual camera for the undistorted image.
_camera_corrected.focal_lengths() << camera.fx(), camera.fy();
_camera_corrected.shear() = camera.shear();
_camera_corrected.principal_point() << camera.u0(), camera.v0();

// Computer vision tasks.
_distortion_corrector = std::make_unique<ImageDistortionCorrector>(
Expand Down Expand Up @@ -99,8 +105,7 @@ auto v2::OdometryPipeline::estimate_relative_pose(
matches.resize(_feature_params.num_matches_max);

auto [two_view_geometry, inliers, sample_best] =
_rel_pose_estimator.estimate_relative_pose(keys_src, keys_dst,
matches);
_rel_pose_estimator.estimate_relative_pose(keys_src, keys_dst, matches);
SARA_LOGI(logger, "[Relative Pose] Estimated relative pose...");

const auto res = std::make_pair( //
Expand Down Expand Up @@ -157,20 +162,92 @@ auto v2::OdometryPipeline::grow_geometry() -> bool
}
SARA_LOGI(logger, "[SfM] Relative pose succeeded!");


// if (_pose_graph.num_vertices() == 1)
// {
auto abs_pose_curr = QuaternionBasedPose<double>{
.q = Eigen::Quaterniond{rel_pose_data.motion.R},
.t = rel_pose_data.motion.t //
// 1. Add the absolute pose vertex.
auto abs_pose_curr =
_pose_graph.num_vertices() == 1
? QuaternionBasedPose<double>{}
: QuaternionBasedPose<double>{
.q = Eigen::Quaterniond{rel_pose_data.motion.R},
.t = rel_pose_data.motion.t //
};
auto abs_pose_data = AbsolutePoseData{
frame_number, //
std::move(keys_curr), //
std::move(abs_pose_curr) //
};
_pose_curr = _pose_graph.add_absolute_pose(std::move(abs_pose_data));

// 2. Update the feature tracks by adding the feature matches that are
// verified by the relative pose estimation.
// Notice move semantics which will the relative pose data after this call.
//
// Note that in the case of only two views, feature tracks are "compressed"
// feature matches.
const auto pose_edge = _pose_graph.add_relative_pose( //
_pose_prev, _pose_curr, //
std::move(rel_pose_data));
_feature_tracker.update_feature_tracks(_pose_graph, pose_edge);

// 3. Recalculate the feature tracks that are still alive.
std::tie(_tracks_alive, _track_visibility_count) =
_feature_tracker.calculate_alive_feature_tracks(_pose_curr);

// Extra steps for when the pose graph contains 2 poses.
if (_pose_graph.num_vertices() == 2)
{
_tracks_alive_without_scene_point = _tracks_alive;
}
// Extra steps for when the pose graph contains 3 poses or more.
else
{
// 4. Propagate the scene point to the feature tracks that grew longer.
// The feature tracks that grew longer can only be those among the
// tracks that are still alive.
SARA_LOGI(
logger,
"Propagating the scene points to feature tracks that grew longer...");
_point_cloud_generator->propagate_scene_point_indices(_tracks_alive);

// 5. Reassign a unique scene point cloud to each feature tracks by
// compressing the point cloud.
SARA_LOGI(logger, "Compressing the point cloud...");
_point_cloud_generator->compress_point_cloud(
_feature_tracker._feature_tracks);

// 6. Determine the current absolute pose from the alive tracks using a PnP
// approach.
std::tie(_tracks_alive_with_known_scene_point,
_tracks_alive_without_scene_point) =
_point_cloud_generator->split_by_scene_point_knowledge(_tracks_alive);
const auto [abs_pose_mat, abs_pose_est_successful] =
_abs_pose_estimator.estimate_pose(_tracks_alive_with_known_scene_point,
_pose_curr, _camera_corrected,
*_point_cloud_generator);
if (!abs_pose_est_successful)
return false;

// 7. Update the current absolute pose, which was initialized dummily.
abs_pose_curr = QuaternionBasedPose<double>{
Eigen::Quaterniond{abs_pose_mat.leftCols<3>()}, //
abs_pose_mat.col(3) //
};
_pose_graph[_pose_curr].pose = abs_pose_curr;
}

// 8. Grow point cloud by triangulation.
//
// TODO: don't add 3D scene points that are too far, like points in the sky
const auto frame_corrected = _distortion_corrector->frame_rgb8();
_point_cloud_generator->grow_point_cloud(_tracks_alive_without_scene_point,
frame_corrected, pose_edge,
_camera_corrected);

// The rotation is expressed in the camera coordinates.
// But the calculation is done in the automotive/aeronautics coordinate
// system.
//
// The z-coordinate of the camera coordinates is the x-axis of the automotive
// coordinates
// The z-coordinate of the camera coordinates is the x-axis of the
// automotive coordinates
//
// clang-format off
static const auto P = (Eigen::Matrix3d{} <<
Expand All @@ -180,7 +257,7 @@ auto v2::OdometryPipeline::grow_geometry() -> bool
).finished();
// clang-format on

const auto& R = rel_pose_data.motion.R;
const auto& R = _pose_graph[pose_edge].motion.R;
const Eigen::Matrix3d R_delta_abs = P * R.transpose() * P.transpose();
_current_global_rotation = R_delta_abs * _current_global_rotation;

Expand All @@ -191,66 +268,5 @@ auto v2::OdometryPipeline::grow_geometry() -> bool
SARA_LOGI(logger, "Global pitch = {} deg", angles(1) * degrees);
SARA_LOGI(logger, "Global roll = {} deg", angles(2) * degrees);

auto abs_pose_data = AbsolutePoseData{
frame_number, //
std::move(keys_curr), //
std::move(abs_pose_curr) //
};

// 1. Add the absolute pose vertex.
_pose_curr = _pose_graph.add_absolute_pose(std::move(abs_pose_data));

// 2. Add the pose edge, which will invalidate the relative pose data.
const auto pose_edge = _pose_graph.add_relative_pose(
_pose_prev, _pose_curr, std::move(rel_pose_data));

// 3. Grow the feature graph by adding the feature matches.
_feature_tracker.update_feature_tracks(_pose_graph, pose_edge);
std::tie(_tracks_alive, _track_visibility_count) =
_feature_tracker.calculate_alive_feature_tracks(_pose_curr);

// 4. Initialize the point cloud.
//
// TODO: don't add 3D scene points that are too far, like point in the
// sky
//
// TODO: don't clear next time we just need to debug at this time.
_point_cloud.clear();
const auto frame_rgb8 = _distortion_corrector->frame_rgb8();
_point_cloud_generator->grow_point_cloud(_tracks_alive, frame_rgb8, pose_edge,
_camera);

return true;
// }

// // 1. Update the feature tracks by adding the feature matches that are
// // verified by the relative pose estimation.
// const auto pose_edge = _pose_graph.add_relative_pose( //
// _pose_prev, _pose_curr, //
// std::move(rel_pose_data));
// _feature_tracker.update_feature_tracks(_pose_graph, pose_edge);

// // 2. Recalculate the feature tracks that are still alive.
// std::tie(_tracks_alive, _track_visibility_count) =
// _feature_tracker.calculate_alive_feature_tracks(_pose_curr);

// // 2. Propagate the scene point to the feature tracks that grew longer.
// // The feature tracks that grew longer can only be those among the
// tracks
// // still alive.
// SARA_LOGI(logger, "Propagating the scene points to new features...");
// _point_cloud_generator->propagate_scene_point_indices(_tracks_alive);

// // 3. Reassign a unique scene point cloud to each feature tracks by
// // compressing the point cloud.
// SARA_LOGI(logger, "Compressing the point cloud...");
// _point_cloud_generator->compress_point_cloud(
// _feature_tracker._feature_tracks);

// // 4. Determine the current absolute pose from the alive tracks.

// // TODO: Grow point cloud by triangulation.
// _point_cloud_generator->grow_point_cloud(_ftracks_without_scene_point,
// frame_rgb8, pose_edge, _camera);
// return false;
}
17 changes: 10 additions & 7 deletions cpp/src/DO/Sara/SfM/OdometryV2/OdometryPipeline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@

#pragma once

#include "DO/Sara/MultiViewGeometry/Camera/v2/PinholeCamera.hpp"
#include "DO/Sara/SfM/BuildingBlocks/CameraPoseEstimator.hpp"
#include <DO/Sara/Features/KeypointList.hpp>
#include <DO/Sara/SfM/BuildingBlocks/PointCloudGenerator.hpp>
Expand All @@ -25,9 +26,9 @@ namespace DO::Sara::v2 {
class OdometryPipeline
{
public:
auto set_config(const std::filesystem::path& video_path,
const v2::BrownConradyDistortionModel<double>& camera)
-> void;
auto
set_config(const std::filesystem::path& video_path,
const v2::BrownConradyDistortionModel<double>& camera) -> void;

auto read() -> bool;

Expand All @@ -44,17 +45,17 @@ namespace DO::Sara::v2 {
auto detect_keypoints(const ImageView<float>&) const
-> KeypointList<OERegion, float>;

auto
estimate_relative_pose(const KeypointList<OERegion, float>& keys_src,
const KeypointList<OERegion, float>& keys_dst) const
-> std::pair<RelativePoseData, TwoViewGeometry>;
auto estimate_relative_pose(const KeypointList<OERegion, float>& keys_src,
const KeypointList<OERegion, float>& keys_dst)
const -> std::pair<RelativePoseData, TwoViewGeometry>;

private: /* graph update tasks */
auto grow_geometry() -> bool;

public: /* data members */
VideoStreamer _video_streamer;
v2::BrownConradyDistortionModel<double> _camera;
v2::PinholeCamera<double> _camera_corrected;

//! @brief Data mutators.
//! @{
Expand All @@ -78,6 +79,8 @@ namespace DO::Sara::v2 {
CameraPoseGraph::Vertex _pose_curr;
CameraPoseGraph::Edge _relative_pose_edge;
FeatureTracker::TrackArray _tracks_alive;
FeatureTracker::TrackArray _tracks_alive_with_known_scene_point;
FeatureTracker::TrackArray _tracks_alive_without_scene_point;
FeatureTracker::TrackVisibilityCountArray _track_visibility_count;
Eigen::Matrix3d _current_global_rotation = Eigen::Matrix3d::Identity();
//! @}
Expand Down

0 comments on commit 8ffbfda

Please sign in to comment.