Skip to content

Commit fc88868

Browse files
authored
Revert "feat(image projection based fusion): revert #6902 (unrecify 3d point …"
This reverts commit dd19bb8.
1 parent 3d849e9 commit fc88868

File tree

11 files changed

+68
-65
lines changed

11 files changed

+68
-65
lines changed

perception/image_projection_based_fusion/include/image_projection_based_fusion/roi_detected_object_fusion/node.hpp

+4-1
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@
1818
#include "image_projection_based_fusion/fusion_node.hpp"
1919
#include "tier4_autoware_utils/ros/debug_publisher.hpp"
2020

21+
#include <image_projection_based_fusion/utils/utils.hpp>
22+
2123
#include "autoware_auto_perception_msgs/msg/object_classification.hpp"
2224

2325
#include <map>
@@ -45,7 +47,8 @@ class RoiDetectedObjectFusionNode
4547

4648
std::map<std::size_t, DetectedObjectWithFeature> generateDetectedObjectRoIs(
4749
const DetectedObjects & input_object_msg, const double image_width, const double image_height,
48-
const Eigen::Affine3d & object2camera_affine, const Eigen::Matrix4d & camera_projection);
50+
const Eigen::Affine3d & object2camera_affine,
51+
const image_geometry::PinholeCameraModel & pinhole_camera_model);
4952

5053
void fuseObjectsOnImage(
5154
const DetectedObjects & input_object_msg,

perception/image_projection_based_fusion/include/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp

+2
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,8 @@
1717

1818
#include "image_projection_based_fusion/fusion_node.hpp"
1919

20+
#include <image_projection_based_fusion/utils/utils.hpp>
21+
2022
#include <string>
2123
#include <vector>
2224
namespace image_projection_based_fusion

perception/image_projection_based_fusion/include/image_projection_based_fusion/segmentation_pointcloud_fusion/node.hpp

+2
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,8 @@
1717

1818
#include "image_projection_based_fusion/fusion_node.hpp"
1919

20+
#include <image_projection_based_fusion/utils/utils.hpp>
21+
2022
#include <string>
2123
#include <vector>
2224

perception/image_projection_based_fusion/include/image_projection_based_fusion/utils/utils.hpp

+5
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@
4242
#include "autoware_auto_perception_msgs/msg/shape.hpp"
4343
#include "tier4_perception_msgs/msg/detected_object_with_feature.hpp"
4444

45+
#include <image_geometry/pinhole_camera_model.h>
4546
#include <pcl/point_cloud.h>
4647
#include <pcl/point_types.h>
4748
#include <pcl_conversions/pcl_conversions.h>
@@ -60,6 +61,10 @@ struct PointData
6061
float distance;
6162
size_t orig_index;
6263
};
64+
65+
Eigen::Vector2d calcRawImageProjectedPoint(
66+
const image_geometry::PinholeCameraModel & pinhole_camera_model, const cv::Point3d & point3d);
67+
6368
std::optional<geometry_msgs::msg::TransformStamped> getTransformStamped(
6469
const tf2_ros::Buffer & tf_buffer, const std::string & target_frame_id,
6570
const std::string & source_frame_id, const rclcpp::Time & time);

perception/image_projection_based_fusion/package.xml

+1
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
<depend>autoware_point_types</depend>
2121
<depend>cv_bridge</depend>
2222
<depend>euclidean_cluster</depend>
23+
<depend>image_geometry</depend>
2324
<depend>image_transport</depend>
2425
<depend>lidar_centerpoint</depend>
2526
<depend>message_filters</depend>

perception/image_projection_based_fusion/src/pointpainting_fusion/node.cpp

+8-5
Original file line numberDiff line numberDiff line change
@@ -307,6 +307,9 @@ void PointPaintingFusionNode::fuseOnSingleImage(
307307
const auto class_offset = painted_pointcloud_msg.fields.at(4).offset;
308308
const auto p_step = painted_pointcloud_msg.point_step;
309309
// projection matrix
310+
image_geometry::PinholeCameraModel pinhole_camera_model;
311+
pinhole_camera_model.fromCameraInfo(camera_info);
312+
310313
Eigen::Matrix3f camera_projection; // use only x,y,z
311314
camera_projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2),
312315
camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6);
@@ -342,15 +345,15 @@ dc | dc dc dc dc ||zc|
342345
continue;
343346
}
344347
// project
345-
Eigen::Vector3f normalized_projected_point = camera_projection * Eigen::Vector3f(p_x, p_y, p_z);
348+
Eigen::Vector2d projected_point =
349+
calcRawImageProjectedPoint(pinhole_camera_model, cv::Point3d(p_x, p_y, p_z));
350+
346351
// iterate 2d bbox
347352
for (const auto & feature_object : objects) {
348353
sensor_msgs::msg::RegionOfInterest roi = feature_object.feature.roi;
349354
// paint current point if it is inside bbox
350355
int label2d = feature_object.object.classification.front().label;
351-
if (
352-
!isUnknown(label2d) &&
353-
isInsideBbox(normalized_projected_point.x(), normalized_projected_point.y(), roi, p_z)) {
356+
if (!isUnknown(label2d) && isInsideBbox(projected_point.x(), projected_point.y(), roi, p_z)) {
354357
data = &painted_pointcloud_msg.data[0];
355358
auto p_class = reinterpret_cast<float *>(&output[stride + class_offset]);
356359
for (const auto & cls : isClassTable_) {
@@ -361,7 +364,7 @@ dc | dc dc dc dc ||zc|
361364
#if 0
362365
// Parallelizing loop don't support push_back
363366
if (debugger_) {
364-
debug_image_points.push_back(normalized_projected_point);
367+
debug_image_points.push_back(projected_point);
365368
}
366369
#endif
367370
}

perception/image_projection_based_fusion/src/roi_cluster_fusion/node.cpp

+14-20
Original file line numberDiff line numberDiff line change
@@ -83,10 +83,8 @@ void RoiClusterFusionNode::fuseOnSingleImage(
8383
const DetectedObjectsWithFeature & input_roi_msg,
8484
const sensor_msgs::msg::CameraInfo & camera_info, DetectedObjectsWithFeature & output_cluster_msg)
8585
{
86-
Eigen::Matrix4d projection;
87-
projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2), camera_info.p.at(3),
88-
camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6), camera_info.p.at(7),
89-
camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10), camera_info.p.at(11);
86+
image_geometry::PinholeCameraModel pinhole_camera_model;
87+
pinhole_camera_model.fromCameraInfo(camera_info);
9088

9189
// get transform from cluster frame id to camera optical frame id
9290
geometry_msgs::msg::TransformStamped transform_stamped;
@@ -133,23 +131,19 @@ void RoiClusterFusionNode::fuseOnSingleImage(
133131
continue;
134132
}
135133

136-
Eigen::Vector4d projected_point =
137-
projection * Eigen::Vector4d(*iter_x, *iter_y, *iter_z, 1.0);
138-
Eigen::Vector2d normalized_projected_point = Eigen::Vector2d(
139-
projected_point.x() / projected_point.z(), projected_point.y() / projected_point.z());
134+
Eigen::Vector2d projected_point =
135+
calcRawImageProjectedPoint(pinhole_camera_model, cv::Point3d(*iter_x, *iter_y, *iter_z));
140136
if (
141-
0 <= static_cast<int>(normalized_projected_point.x()) &&
142-
static_cast<int>(normalized_projected_point.x()) <=
143-
static_cast<int>(camera_info.width) - 1 &&
144-
0 <= static_cast<int>(normalized_projected_point.y()) &&
145-
static_cast<int>(normalized_projected_point.y()) <=
146-
static_cast<int>(camera_info.height) - 1) {
147-
min_x = std::min(static_cast<int>(normalized_projected_point.x()), min_x);
148-
min_y = std::min(static_cast<int>(normalized_projected_point.y()), min_y);
149-
max_x = std::max(static_cast<int>(normalized_projected_point.x()), max_x);
150-
max_y = std::max(static_cast<int>(normalized_projected_point.y()), max_y);
151-
projected_points.push_back(normalized_projected_point);
152-
if (debugger_) debugger_->obstacle_points_.push_back(normalized_projected_point);
137+
0 <= static_cast<int>(projected_point.x()) &&
138+
static_cast<int>(projected_point.x()) <= static_cast<int>(camera_info.width) - 1 &&
139+
0 <= static_cast<int>(projected_point.y()) &&
140+
static_cast<int>(projected_point.y()) <= static_cast<int>(camera_info.height) - 1) {
141+
min_x = std::min(static_cast<int>(projected_point.x()), min_x);
142+
min_y = std::min(static_cast<int>(projected_point.y()), min_y);
143+
max_x = std::max(static_cast<int>(projected_point.x()), max_x);
144+
max_y = std::max(static_cast<int>(projected_point.y()), max_y);
145+
projected_points.push_back(projected_point);
146+
if (debugger_) debugger_->obstacle_points_.push_back(projected_point);
153147
}
154148
}
155149
if (projected_points.empty()) {

perception/image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp

+7-14
Original file line numberDiff line numberDiff line change
@@ -86,15 +86,12 @@ void RoiDetectedObjectFusionNode::fuseOnSingleImage(
8686
object2camera_affine = transformToEigen(transform_stamped_optional.value().transform);
8787
}
8888

89-
Eigen::Matrix4d camera_projection;
90-
camera_projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2),
91-
camera_info.p.at(3), camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6),
92-
camera_info.p.at(7), camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10),
93-
camera_info.p.at(11);
89+
image_geometry::PinholeCameraModel pinhole_camera_model;
90+
pinhole_camera_model.fromCameraInfo(camera_info);
9491

9592
const auto object_roi_map = generateDetectedObjectRoIs(
9693
input_object_msg, static_cast<double>(camera_info.width),
97-
static_cast<double>(camera_info.height), object2camera_affine, camera_projection);
94+
static_cast<double>(camera_info.height), object2camera_affine, pinhole_camera_model);
9895
fuseObjectsOnImage(input_object_msg, input_roi_msg.feature_objects, object_roi_map);
9996

10097
if (debugger_) {
@@ -109,7 +106,8 @@ void RoiDetectedObjectFusionNode::fuseOnSingleImage(
109106
std::map<std::size_t, DetectedObjectWithFeature>
110107
RoiDetectedObjectFusionNode::generateDetectedObjectRoIs(
111108
const DetectedObjects & input_object_msg, const double image_width, const double image_height,
112-
const Eigen::Affine3d & object2camera_affine, const Eigen::Matrix4d & camera_projection)
109+
const Eigen::Affine3d & object2camera_affine,
110+
const image_geometry::PinholeCameraModel & pinhole_camera_model)
113111
{
114112
std::map<std::size_t, DetectedObjectWithFeature> object_roi_map;
115113
int64_t timestamp_nsec =
@@ -148,13 +146,8 @@ RoiDetectedObjectFusionNode::generateDetectedObjectRoIs(
148146
continue;
149147
}
150148

151-
Eigen::Vector2d proj_point;
152-
{
153-
Eigen::Vector4d proj_point_hom =
154-
camera_projection * Eigen::Vector4d(point.x(), point.y(), point.z(), 1.0);
155-
proj_point = Eigen::Vector2d(
156-
proj_point_hom.x() / (proj_point_hom.z()), proj_point_hom.y() / (proj_point_hom.z()));
157-
}
149+
Eigen::Vector2d proj_point = calcRawImageProjectedPoint(
150+
pinhole_camera_model, cv::Point3d(point.x(), point.y(), point.z()));
158151

159152
min_x = std::min(proj_point.x(), min_x);
160153
min_y = std::min(proj_point.y(), min_y);

perception/image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp

+8-12
Original file line numberDiff line numberDiff line change
@@ -101,10 +101,9 @@ void RoiPointCloudFusionNode::fuseOnSingleImage(
101101
}
102102

103103
// transform pointcloud to camera optical frame id
104-
Eigen::Matrix4d projection;
105-
projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2), camera_info.p.at(3),
106-
camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6), camera_info.p.at(7),
107-
camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10), camera_info.p.at(11);
104+
image_geometry::PinholeCameraModel pinhole_camera_model;
105+
pinhole_camera_model.fromCameraInfo(camera_info);
106+
108107
geometry_msgs::msg::TransformStamped transform_stamped;
109108
{
110109
const auto transform_stamped_optional = getTransformStamped(
@@ -143,10 +142,8 @@ void RoiPointCloudFusionNode::fuseOnSingleImage(
143142
if (transformed_z <= 0.0) {
144143
continue;
145144
}
146-
Eigen::Vector4d projected_point =
147-
projection * Eigen::Vector4d(transformed_x, transformed_y, transformed_z, 1.0);
148-
Eigen::Vector2d normalized_projected_point = Eigen::Vector2d(
149-
projected_point.x() / projected_point.z(), projected_point.y() / projected_point.z());
145+
Eigen::Vector2d projected_point = calcRawImageProjectedPoint(
146+
pinhole_camera_model, cv::Point3d(transformed_x, transformed_y, transformed_z));
150147
for (std::size_t i = 0; i < output_objs.size(); ++i) {
151148
auto & feature_obj = output_objs.at(i);
152149
const auto & check_roi = feature_obj.feature.roi;
@@ -156,10 +153,9 @@ void RoiPointCloudFusionNode::fuseOnSingleImage(
156153
continue;
157154
}
158155
if (
159-
check_roi.x_offset <= normalized_projected_point.x() &&
160-
check_roi.y_offset <= normalized_projected_point.y() &&
161-
check_roi.x_offset + check_roi.width >= normalized_projected_point.x() &&
162-
check_roi.y_offset + check_roi.height >= normalized_projected_point.y()) {
156+
check_roi.x_offset <= projected_point.x() && check_roi.y_offset <= projected_point.y() &&
157+
check_roi.x_offset + check_roi.width >= projected_point.x() &&
158+
check_roi.y_offset + check_roi.height >= projected_point.y()) {
163159
std::memcpy(
164160
&cluster.data[clusters_data_size.at(i)], &input_pointcloud_msg.data[offset], point_step);
165161
clusters_data_size.at(i) += point_step;

perception/image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp

+8-13
Original file line numberDiff line numberDiff line change
@@ -65,11 +65,9 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage(
6565
if (mask.cols == 0 || mask.rows == 0) {
6666
return;
6767
}
68-
Eigen::Matrix4d projection;
69-
projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2), camera_info.p.at(3),
70-
camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6), camera_info.p.at(7),
71-
camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10), camera_info.p.at(11), 0.0, 0.0,
72-
0.0, 1.0;
68+
image_geometry::PinholeCameraModel pinhole_camera_model;
69+
pinhole_camera_model.fromCameraInfo(camera_info);
70+
7371
geometry_msgs::msg::TransformStamped transform_stamped;
7472
// transform pointcloud from frame id to camera optical frame id
7573
{
@@ -99,22 +97,19 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage(
9997
continue;
10098
}
10199

102-
Eigen::Vector4d projected_point = projection * Eigen::Vector4d(*iter_x, *iter_y, *iter_z, 1.0);
103-
Eigen::Vector2d normalized_projected_point = Eigen::Vector2d(
104-
projected_point.x() / projected_point.z(), projected_point.y() / projected_point.z());
100+
Eigen::Vector2d projected_point =
101+
calcRawImageProjectedPoint(pinhole_camera_model, cv::Point3d(*iter_x, *iter_y, *iter_z));
105102

106-
bool is_inside_image =
107-
normalized_projected_point.x() > 0 && normalized_projected_point.x() < camera_info.width &&
108-
normalized_projected_point.y() > 0 && normalized_projected_point.y() < camera_info.height;
103+
bool is_inside_image = projected_point.x() > 0 && projected_point.x() < camera_info.width &&
104+
projected_point.y() > 0 && projected_point.y() < camera_info.height;
109105
if (!is_inside_image) {
110106
output_cloud.push_back(pcl::PointXYZ(*iter_orig_x, *iter_orig_y, *iter_orig_z));
111107
continue;
112108
}
113109

114110
// skip filtering pointcloud where semantic id out of the defined list
115111
uint8_t semantic_id = mask.at<uint8_t>(
116-
static_cast<uint16_t>(normalized_projected_point.y()),
117-
static_cast<uint16_t>(normalized_projected_point.x()));
112+
static_cast<uint16_t>(projected_point.y()), static_cast<uint16_t>(projected_point.x()));
118113
if (static_cast<size_t>(semantic_id) >= filter_semantic_label_target_.size()) {
119114
output_cloud.push_back(pcl::PointXYZ(*iter_orig_x, *iter_orig_y, *iter_orig_z));
120115
continue;

perception/image_projection_based_fusion/src/utils/utils.cpp

+9
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,17 @@
1313
// limitations under the License.
1414

1515
#include "image_projection_based_fusion/utils/utils.hpp"
16+
1617
namespace image_projection_based_fusion
1718
{
19+
Eigen::Vector2d calcRawImageProjectedPoint(
20+
const image_geometry::PinholeCameraModel & pinhole_camera_model, const cv::Point3d & point3d)
21+
{
22+
cv::Point2d rectified_image_point = pinhole_camera_model.project3dToPixel(point3d);
23+
24+
cv::Point2d raw_image_point = pinhole_camera_model.unrectifyPoint(rectified_image_point);
25+
return Eigen::Vector2d(raw_image_point.x, raw_image_point.y);
26+
}
1827

1928
std::optional<geometry_msgs::msg::TransformStamped> getTransformStamped(
2029
const tf2_ros::Buffer & tf_buffer, const std::string & target_frame_id,

0 commit comments

Comments
 (0)