diff --git a/perception/image_projection_based_fusion/include/image_projection_based_fusion/roi_detected_object_fusion/node.hpp b/perception/image_projection_based_fusion/include/image_projection_based_fusion/roi_detected_object_fusion/node.hpp index f65ff4ad322a5..7d7132309fb91 100644 --- a/perception/image_projection_based_fusion/include/image_projection_based_fusion/roi_detected_object_fusion/node.hpp +++ b/perception/image_projection_based_fusion/include/image_projection_based_fusion/roi_detected_object_fusion/node.hpp @@ -18,8 +18,6 @@ #include "image_projection_based_fusion/fusion_node.hpp" #include "tier4_autoware_utils/ros/debug_publisher.hpp" -#include <image_projection_based_fusion/utils/utils.hpp> - #include "autoware_auto_perception_msgs/msg/object_classification.hpp" #include <map> @@ -47,8 +45,7 @@ class RoiDetectedObjectFusionNode std::map<std::size_t, DetectedObjectWithFeature> generateDetectedObjectRoIs( const DetectedObjects & input_object_msg, const double image_width, const double image_height, - const Eigen::Affine3d & object2camera_affine, - const image_geometry::PinholeCameraModel & pinhole_camera_model); + const Eigen::Affine3d & object2camera_affine, const Eigen::Matrix4d & camera_projection); void fuseObjectsOnImage( const DetectedObjects & input_object_msg, diff --git a/perception/image_projection_based_fusion/include/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp b/perception/image_projection_based_fusion/include/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp index 4d4fd8d2dac42..4cbc0990352e6 100644 --- a/perception/image_projection_based_fusion/include/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp +++ b/perception/image_projection_based_fusion/include/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp @@ -17,8 +17,6 @@ #include "image_projection_based_fusion/fusion_node.hpp" -#include <image_projection_based_fusion/utils/utils.hpp> - #include <string> #include <vector> namespace image_projection_based_fusion diff --git a/perception/image_projection_based_fusion/include/image_projection_based_fusion/segmentation_pointcloud_fusion/node.hpp b/perception/image_projection_based_fusion/include/image_projection_based_fusion/segmentation_pointcloud_fusion/node.hpp index 446417f060b9b..4168c483469ab 100644 --- a/perception/image_projection_based_fusion/include/image_projection_based_fusion/segmentation_pointcloud_fusion/node.hpp +++ b/perception/image_projection_based_fusion/include/image_projection_based_fusion/segmentation_pointcloud_fusion/node.hpp @@ -17,8 +17,6 @@ #include "image_projection_based_fusion/fusion_node.hpp" -#include <image_projection_based_fusion/utils/utils.hpp> - #include <string> #include <vector> diff --git a/perception/image_projection_based_fusion/include/image_projection_based_fusion/utils/utils.hpp b/perception/image_projection_based_fusion/include/image_projection_based_fusion/utils/utils.hpp index 04c010523b02a..1851d3faaee6e 100644 --- a/perception/image_projection_based_fusion/include/image_projection_based_fusion/utils/utils.hpp +++ b/perception/image_projection_based_fusion/include/image_projection_based_fusion/utils/utils.hpp @@ -42,7 +42,6 @@ #include "autoware_auto_perception_msgs/msg/shape.hpp" #include "tier4_perception_msgs/msg/detected_object_with_feature.hpp" -#include <image_geometry/pinhole_camera_model.h> #include <pcl/point_cloud.h> #include <pcl/point_types.h> #include <pcl_conversions/pcl_conversions.h> @@ -61,10 +60,6 @@ struct PointData float distance; size_t orig_index; }; - -Eigen::Vector2d calcRawImageProjectedPoint( - const image_geometry::PinholeCameraModel & pinhole_camera_model, const cv::Point3d & point3d); - std::optional<geometry_msgs::msg::TransformStamped> getTransformStamped( const tf2_ros::Buffer & tf_buffer, const std::string & target_frame_id, const std::string & source_frame_id, const rclcpp::Time & time); diff --git a/perception/image_projection_based_fusion/package.xml b/perception/image_projection_based_fusion/package.xml index f6eef1d0c2ace..a5b41a30be166 100644 --- a/perception/image_projection_based_fusion/package.xml +++ b/perception/image_projection_based_fusion/package.xml @@ -20,7 +20,6 @@ <depend>autoware_point_types</depend> <depend>cv_bridge</depend> <depend>euclidean_cluster</depend> - <depend>image_geometry</depend> <depend>image_transport</depend> <depend>lidar_centerpoint</depend> <depend>message_filters</depend> diff --git a/perception/image_projection_based_fusion/src/pointpainting_fusion/node.cpp b/perception/image_projection_based_fusion/src/pointpainting_fusion/node.cpp index 99fe667c354d5..2c33df0b65afc 100644 --- a/perception/image_projection_based_fusion/src/pointpainting_fusion/node.cpp +++ b/perception/image_projection_based_fusion/src/pointpainting_fusion/node.cpp @@ -307,9 +307,6 @@ void PointPaintingFusionNode::fuseOnSingleImage( const auto class_offset = painted_pointcloud_msg.fields.at(4).offset; const auto p_step = painted_pointcloud_msg.point_step; // projection matrix - image_geometry::PinholeCameraModel pinhole_camera_model; - pinhole_camera_model.fromCameraInfo(camera_info); - Eigen::Matrix3f camera_projection; // use only x,y,z camera_projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2), camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6); @@ -345,15 +342,15 @@ dc | dc dc dc dc ||zc| continue; } // project - Eigen::Vector2d projected_point = - calcRawImageProjectedPoint(pinhole_camera_model, cv::Point3d(p_x, p_y, p_z)); - + Eigen::Vector3f normalized_projected_point = camera_projection * Eigen::Vector3f(p_x, p_y, p_z); // iterate 2d bbox for (const auto & feature_object : objects) { sensor_msgs::msg::RegionOfInterest roi = feature_object.feature.roi; // paint current point if it is inside bbox int label2d = feature_object.object.classification.front().label; - if (!isUnknown(label2d) && isInsideBbox(projected_point.x(), projected_point.y(), roi, p_z)) { + if ( + !isUnknown(label2d) && + isInsideBbox(normalized_projected_point.x(), normalized_projected_point.y(), roi, p_z)) { data = &painted_pointcloud_msg.data[0]; auto p_class = reinterpret_cast<float *>(&output[stride + class_offset]); for (const auto & cls : isClassTable_) { @@ -364,7 +361,7 @@ dc | dc dc dc dc ||zc| #if 0 // Parallelizing loop don't support push_back if (debugger_) { - debug_image_points.push_back(projected_point); + debug_image_points.push_back(normalized_projected_point); } #endif } diff --git a/perception/image_projection_based_fusion/src/roi_cluster_fusion/node.cpp b/perception/image_projection_based_fusion/src/roi_cluster_fusion/node.cpp index bfe313a838fd6..feaf1ad495ce8 100644 --- a/perception/image_projection_based_fusion/src/roi_cluster_fusion/node.cpp +++ b/perception/image_projection_based_fusion/src/roi_cluster_fusion/node.cpp @@ -83,8 +83,10 @@ void RoiClusterFusionNode::fuseOnSingleImage( const DetectedObjectsWithFeature & input_roi_msg, const sensor_msgs::msg::CameraInfo & camera_info, DetectedObjectsWithFeature & output_cluster_msg) { - image_geometry::PinholeCameraModel pinhole_camera_model; - pinhole_camera_model.fromCameraInfo(camera_info); + Eigen::Matrix4d projection; + projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2), camera_info.p.at(3), + camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6), camera_info.p.at(7), + camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10), camera_info.p.at(11); // get transform from cluster frame id to camera optical frame id geometry_msgs::msg::TransformStamped transform_stamped; @@ -131,19 +133,23 @@ void RoiClusterFusionNode::fuseOnSingleImage( continue; } - Eigen::Vector2d projected_point = - calcRawImageProjectedPoint(pinhole_camera_model, cv::Point3d(*iter_x, *iter_y, *iter_z)); + Eigen::Vector4d projected_point = + projection * Eigen::Vector4d(*iter_x, *iter_y, *iter_z, 1.0); + Eigen::Vector2d normalized_projected_point = Eigen::Vector2d( + projected_point.x() / projected_point.z(), projected_point.y() / projected_point.z()); if ( - 0 <= static_cast<int>(projected_point.x()) && - static_cast<int>(projected_point.x()) <= static_cast<int>(camera_info.width) - 1 && - 0 <= static_cast<int>(projected_point.y()) && - static_cast<int>(projected_point.y()) <= static_cast<int>(camera_info.height) - 1) { - min_x = std::min(static_cast<int>(projected_point.x()), min_x); - min_y = std::min(static_cast<int>(projected_point.y()), min_y); - max_x = std::max(static_cast<int>(projected_point.x()), max_x); - max_y = std::max(static_cast<int>(projected_point.y()), max_y); - projected_points.push_back(projected_point); - if (debugger_) debugger_->obstacle_points_.push_back(projected_point); + 0 <= static_cast<int>(normalized_projected_point.x()) && + static_cast<int>(normalized_projected_point.x()) <= + static_cast<int>(camera_info.width) - 1 && + 0 <= static_cast<int>(normalized_projected_point.y()) && + static_cast<int>(normalized_projected_point.y()) <= + static_cast<int>(camera_info.height) - 1) { + min_x = std::min(static_cast<int>(normalized_projected_point.x()), min_x); + min_y = std::min(static_cast<int>(normalized_projected_point.y()), min_y); + max_x = std::max(static_cast<int>(normalized_projected_point.x()), max_x); + max_y = std::max(static_cast<int>(normalized_projected_point.y()), max_y); + projected_points.push_back(normalized_projected_point); + if (debugger_) debugger_->obstacle_points_.push_back(normalized_projected_point); } } if (projected_points.empty()) { diff --git a/perception/image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp b/perception/image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp index 7d85ecb2f9200..762d3e0ee51b1 100644 --- a/perception/image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp +++ b/perception/image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp @@ -86,12 +86,15 @@ void RoiDetectedObjectFusionNode::fuseOnSingleImage( object2camera_affine = transformToEigen(transform_stamped_optional.value().transform); } - image_geometry::PinholeCameraModel pinhole_camera_model; - pinhole_camera_model.fromCameraInfo(camera_info); + Eigen::Matrix4d camera_projection; + camera_projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2), + camera_info.p.at(3), camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6), + camera_info.p.at(7), camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10), + camera_info.p.at(11); const auto object_roi_map = generateDetectedObjectRoIs( input_object_msg, static_cast<double>(camera_info.width), - static_cast<double>(camera_info.height), object2camera_affine, pinhole_camera_model); + static_cast<double>(camera_info.height), object2camera_affine, camera_projection); fuseObjectsOnImage(input_object_msg, input_roi_msg.feature_objects, object_roi_map); if (debugger_) { @@ -106,8 +109,7 @@ void RoiDetectedObjectFusionNode::fuseOnSingleImage( std::map<std::size_t, DetectedObjectWithFeature> RoiDetectedObjectFusionNode::generateDetectedObjectRoIs( const DetectedObjects & input_object_msg, const double image_width, const double image_height, - const Eigen::Affine3d & object2camera_affine, - const image_geometry::PinholeCameraModel & pinhole_camera_model) + const Eigen::Affine3d & object2camera_affine, const Eigen::Matrix4d & camera_projection) { std::map<std::size_t, DetectedObjectWithFeature> object_roi_map; int64_t timestamp_nsec = @@ -146,8 +148,13 @@ RoiDetectedObjectFusionNode::generateDetectedObjectRoIs( continue; } - Eigen::Vector2d proj_point = calcRawImageProjectedPoint( - pinhole_camera_model, cv::Point3d(point.x(), point.y(), point.z())); + Eigen::Vector2d proj_point; + { + Eigen::Vector4d proj_point_hom = + camera_projection * Eigen::Vector4d(point.x(), point.y(), point.z(), 1.0); + proj_point = Eigen::Vector2d( + proj_point_hom.x() / (proj_point_hom.z()), proj_point_hom.y() / (proj_point_hom.z())); + } min_x = std::min(proj_point.x(), min_x); min_y = std::min(proj_point.y(), min_y); diff --git a/perception/image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp b/perception/image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp index b7f792bfe6e75..3f78ee159330f 100644 --- a/perception/image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp +++ b/perception/image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp @@ -101,9 +101,10 @@ void RoiPointCloudFusionNode::fuseOnSingleImage( } // transform pointcloud to camera optical frame id - image_geometry::PinholeCameraModel pinhole_camera_model; - pinhole_camera_model.fromCameraInfo(camera_info); - + Eigen::Matrix4d projection; + projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2), camera_info.p.at(3), + camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6), camera_info.p.at(7), + camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10), camera_info.p.at(11); geometry_msgs::msg::TransformStamped transform_stamped; { const auto transform_stamped_optional = getTransformStamped( @@ -142,8 +143,10 @@ void RoiPointCloudFusionNode::fuseOnSingleImage( if (transformed_z <= 0.0) { continue; } - Eigen::Vector2d projected_point = calcRawImageProjectedPoint( - pinhole_camera_model, cv::Point3d(transformed_x, transformed_y, transformed_z)); + Eigen::Vector4d projected_point = + projection * Eigen::Vector4d(transformed_x, transformed_y, transformed_z, 1.0); + Eigen::Vector2d normalized_projected_point = Eigen::Vector2d( + projected_point.x() / projected_point.z(), projected_point.y() / projected_point.z()); for (std::size_t i = 0; i < output_objs.size(); ++i) { auto & feature_obj = output_objs.at(i); const auto & check_roi = feature_obj.feature.roi; @@ -153,9 +156,10 @@ void RoiPointCloudFusionNode::fuseOnSingleImage( continue; } if ( - check_roi.x_offset <= projected_point.x() && check_roi.y_offset <= projected_point.y() && - check_roi.x_offset + check_roi.width >= projected_point.x() && - check_roi.y_offset + check_roi.height >= projected_point.y()) { + check_roi.x_offset <= normalized_projected_point.x() && + check_roi.y_offset <= normalized_projected_point.y() && + check_roi.x_offset + check_roi.width >= normalized_projected_point.x() && + check_roi.y_offset + check_roi.height >= normalized_projected_point.y()) { std::memcpy( &cluster.data[clusters_data_size.at(i)], &input_pointcloud_msg.data[offset], point_step); clusters_data_size.at(i) += point_step; diff --git a/perception/image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp b/perception/image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp index 2693a58366b9d..1c9c865b8d21e 100644 --- a/perception/image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp +++ b/perception/image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp @@ -65,9 +65,11 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage( if (mask.cols == 0 || mask.rows == 0) { return; } - image_geometry::PinholeCameraModel pinhole_camera_model; - pinhole_camera_model.fromCameraInfo(camera_info); - + Eigen::Matrix4d projection; + projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2), camera_info.p.at(3), + camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6), camera_info.p.at(7), + camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10), camera_info.p.at(11), 0.0, 0.0, + 0.0, 1.0; geometry_msgs::msg::TransformStamped transform_stamped; // transform pointcloud from frame id to camera optical frame id { @@ -97,11 +99,13 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage( continue; } - Eigen::Vector2d projected_point = - calcRawImageProjectedPoint(pinhole_camera_model, cv::Point3d(*iter_x, *iter_y, *iter_z)); + Eigen::Vector4d projected_point = projection * Eigen::Vector4d(*iter_x, *iter_y, *iter_z, 1.0); + Eigen::Vector2d normalized_projected_point = Eigen::Vector2d( + projected_point.x() / projected_point.z(), projected_point.y() / projected_point.z()); - bool is_inside_image = projected_point.x() > 0 && projected_point.x() < camera_info.width && - projected_point.y() > 0 && projected_point.y() < camera_info.height; + bool is_inside_image = + normalized_projected_point.x() > 0 && normalized_projected_point.x() < camera_info.width && + normalized_projected_point.y() > 0 && normalized_projected_point.y() < camera_info.height; if (!is_inside_image) { output_cloud.push_back(pcl::PointXYZ(*iter_orig_x, *iter_orig_y, *iter_orig_z)); continue; @@ -109,7 +113,8 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage( // skip filtering pointcloud where semantic id out of the defined list uint8_t semantic_id = mask.at<uint8_t>( - static_cast<uint16_t>(projected_point.y()), static_cast<uint16_t>(projected_point.x())); + static_cast<uint16_t>(normalized_projected_point.y()), + static_cast<uint16_t>(normalized_projected_point.x())); if (static_cast<size_t>(semantic_id) >= filter_semantic_label_target_.size()) { output_cloud.push_back(pcl::PointXYZ(*iter_orig_x, *iter_orig_y, *iter_orig_z)); continue; diff --git a/perception/image_projection_based_fusion/src/utils/utils.cpp b/perception/image_projection_based_fusion/src/utils/utils.cpp index 8a7650d9fd5e7..fd25696243951 100644 --- a/perception/image_projection_based_fusion/src/utils/utils.cpp +++ b/perception/image_projection_based_fusion/src/utils/utils.cpp @@ -13,17 +13,8 @@ // limitations under the License. #include "image_projection_based_fusion/utils/utils.hpp" - namespace image_projection_based_fusion { -Eigen::Vector2d calcRawImageProjectedPoint( - const image_geometry::PinholeCameraModel & pinhole_camera_model, const cv::Point3d & point3d) -{ - cv::Point2d rectified_image_point = pinhole_camera_model.project3dToPixel(point3d); - - cv::Point2d raw_image_point = pinhole_camera_model.unrectifyPoint(rectified_image_point); - return Eigen::Vector2d(raw_image_point.x, raw_image_point.y); -} std::optional<geometry_msgs::msg::TransformStamped> getTransformStamped( const tf2_ros::Buffer & tf_buffer, const std::string & target_frame_id,