diff --git a/glomap/estimators/relpose_estimation.cc b/glomap/estimators/relpose_estimation.cc index 8cd3b380..b3cee97f 100644 --- a/glomap/estimators/relpose_estimation.cc +++ b/glomap/estimators/relpose_estimation.cc @@ -54,12 +54,14 @@ void EstimateRelativePoses(ViewGraph& view_graph, // Collect the original 2D points points2D_1.clear(); points2D_2.clear(); - for (size_t idx = 0; idx < matches.rows(); idx++) { - points2D_1.push_back(image1.features[matches(idx, 0)]); - points2D_2.push_back(image2.features[matches(idx, 1)]); - } - // If the camera model is not supported by poselib - if (!valid_camera_model) { + + if (valid_camera_model) { + for (size_t idx = 0; idx < matches.rows(); idx++) { + points2D_1.push_back(image1.features[matches(idx, 0)]); + points2D_2.push_back(image2.features[matches(idx, 1)]); + } + } else { + // If the camera model is not supported by poselib // Undistort points // Note that here, we still rescale by the focal length (to avoid // change the RANSAC threshold) @@ -70,8 +72,15 @@ void EstimateRelativePoses(ViewGraph& view_graph, K2_new(0, 0) = camera2.FocalLengthX(); K2_new(1, 1) = camera2.FocalLengthY(); for (size_t idx = 0; idx < matches.rows(); idx++) { - points2D_1[idx] = K1_new * camera1.CamFromImg(points2D_1[idx]); - points2D_2[idx] = K2_new * camera2.CamFromImg(points2D_2[idx]); + std::optional p1 = + camera1.CamFromImg(image1.features[matches(idx, 0)]); + std::optional p2 = + camera1.CamFromImg(image2.features[matches(idx, 1)]); + + if (!p1 || !p2) continue; + + points2D_1.push_back(p1.value()); + points2D_2.push_back(p2.value()); } // Reset the camera to be the pinhole camera with original focal diff --git a/glomap/processors/image_undistorter.cc b/glomap/processors/image_undistorter.cc index 012465d1..05c375aa 100644 --- a/glomap/processors/image_undistorter.cc +++ b/glomap/processors/image_undistorter.cc @@ -31,8 +31,12 @@ void UndistortImages(std::unordered_map& cameras, image.features_undist.clear(); image.features_undist.reserve(num_points); for (int i = 0; i < num_points; i++) { - image.features_undist.emplace_back( - camera.CamFromImg(image.features[i]).homogeneous().normalized()); + std::optional feat_undist = + camera.CamFromImg(image.features[i]); + + if (feat_undist) image.features_undist.emplace_back( + feat_undist.value().homogeneous().normalized() + ); } }); } diff --git a/glomap/processors/track_filter.cc b/glomap/processors/track_filter.cc index c3a78f7d..a2821c2a 100644 --- a/glomap/processors/track_filter.cc +++ b/glomap/processors/track_filter.cc @@ -17,7 +17,7 @@ int TrackFilter::FilterTracksByReprojection( for (auto& [image_id, feature_id] : track.observations) { const Image& image = images.at(image_id); Eigen::Vector3d pt_calc = image.cam_from_world * track.xyz; - if (pt_calc(2) < EPS) continue; + if (pt_calc(2) < std::numeric_limits::epsilon()) continue; double reprojection_error = max_reprojection_error; if (in_normalized_image) { @@ -29,10 +29,12 @@ int TrackFilter::FilterTracksByReprojection( (pt_reproj - feature_undist.head(2) / (feature_undist(2) + EPS)) .norm(); } else { - Eigen::Vector2d pt_reproj = pt_calc.head(2) / pt_calc(2); - Eigen::Vector2d pt_dist; - pt_dist = cameras.at(image.camera_id).ImgFromCam(pt_reproj); - reprojection_error = (pt_dist - image.features.at(feature_id)).norm(); + // The epsilon-depth check should be enough, + // but leaving this in case the implementation changes + std::optional pt_dist = + cameras.at(image.camera_id).ImgFromCam(pt_calc); + if (!pt_dist) continue; + reprojection_error = (pt_dist.value() - image.features.at(feature_id)).norm(); } // If the reprojection error is smaller than the threshold, then keep it