Skip to content

Commit 5e8cabc

Browse files
yukkysaitovividf
authored andcommitted
feat(image projection based fusion): revert autowarefoundation#6902 (unrecify 3d point for image projection based fusion) (autowarefoundation#6954)
Revert "feat(image projection based fusion): unrecify 3d point for image proj…" This reverts commit 2c9a936. Signed-off-by: vividf <yihsiang.fang@tier4.jp>
1 parent 3f84639 commit 5e8cabc

File tree

11 files changed

+65
-68
lines changed

11 files changed

+65
-68
lines changed

perception/image_projection_based_fusion/include/image_projection_based_fusion/roi_detected_object_fusion/node.hpp

+1-4
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,6 @@
1818
#include "image_projection_based_fusion/fusion_node.hpp"
1919
#include "tier4_autoware_utils/ros/debug_publisher.hpp"
2020

21-
#include <image_projection_based_fusion/utils/utils.hpp>
22-
2321
#include "autoware_auto_perception_msgs/msg/object_classification.hpp"
2422

2523
#include <map>
@@ -47,8 +45,7 @@ class RoiDetectedObjectFusionNode
4745

4846
std::map<std::size_t, DetectedObjectWithFeature> generateDetectedObjectRoIs(
4947
const DetectedObjects & input_object_msg, const double image_width, const double image_height,
50-
const Eigen::Affine3d & object2camera_affine,
51-
const image_geometry::PinholeCameraModel & pinhole_camera_model);
48+
const Eigen::Affine3d & object2camera_affine, const Eigen::Matrix4d & camera_projection);
5249

5350
void fuseObjectsOnImage(
5451
const DetectedObjects & input_object_msg,

perception/image_projection_based_fusion/include/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp

-2
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,6 @@
1717

1818
#include "image_projection_based_fusion/fusion_node.hpp"
1919

20-
#include <image_projection_based_fusion/utils/utils.hpp>
21-
2220
#include <string>
2321
#include <vector>
2422
namespace image_projection_based_fusion

perception/image_projection_based_fusion/include/image_projection_based_fusion/segmentation_pointcloud_fusion/node.hpp

-2
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,6 @@
1717

1818
#include "image_projection_based_fusion/fusion_node.hpp"
1919

20-
#include <image_projection_based_fusion/utils/utils.hpp>
21-
2220
#include <string>
2321
#include <vector>
2422

perception/image_projection_based_fusion/include/image_projection_based_fusion/utils/utils.hpp

-5
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,6 @@
4242
#include "autoware_auto_perception_msgs/msg/shape.hpp"
4343
#include "tier4_perception_msgs/msg/detected_object_with_feature.hpp"
4444

45-
#include <image_geometry/pinhole_camera_model.h>
4645
#include <pcl/point_cloud.h>
4746
#include <pcl/point_types.h>
4847
#include <pcl_conversions/pcl_conversions.h>
@@ -61,10 +60,6 @@ struct PointData
6160
float distance;
6261
size_t orig_index;
6362
};
64-
65-
Eigen::Vector2d calcRawImageProjectedPoint(
66-
const image_geometry::PinholeCameraModel & pinhole_camera_model, const cv::Point3d & point3d);
67-
6863
std::optional<geometry_msgs::msg::TransformStamped> getTransformStamped(
6964
const tf2_ros::Buffer & tf_buffer, const std::string & target_frame_id,
7065
const std::string & source_frame_id, const rclcpp::Time & time);

perception/image_projection_based_fusion/package.xml

-1
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@
2020
<depend>autoware_point_types</depend>
2121
<depend>cv_bridge</depend>
2222
<depend>euclidean_cluster</depend>
23-
<depend>image_geometry</depend>
2423
<depend>image_transport</depend>
2524
<depend>lidar_centerpoint</depend>
2625
<depend>message_filters</depend>

perception/image_projection_based_fusion/src/pointpainting_fusion/node.cpp

+5-8
Original file line numberDiff line numberDiff line change
@@ -307,9 +307,6 @@ void PointPaintingFusionNode::fuseOnSingleImage(
307307
const auto class_offset = painted_pointcloud_msg.fields.at(4).offset;
308308
const auto p_step = painted_pointcloud_msg.point_step;
309309
// projection matrix
310-
image_geometry::PinholeCameraModel pinhole_camera_model;
311-
pinhole_camera_model.fromCameraInfo(camera_info);
312-
313310
Eigen::Matrix3f camera_projection; // use only x,y,z
314311
camera_projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2),
315312
camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6);
@@ -345,15 +342,15 @@ dc | dc dc dc dc ||zc|
345342
continue;
346343
}
347344
// project
348-
Eigen::Vector2d projected_point =
349-
calcRawImageProjectedPoint(pinhole_camera_model, cv::Point3d(p_x, p_y, p_z));
350-
345+
Eigen::Vector3f normalized_projected_point = camera_projection * Eigen::Vector3f(p_x, p_y, p_z);
351346
// iterate 2d bbox
352347
for (const auto & feature_object : objects) {
353348
sensor_msgs::msg::RegionOfInterest roi = feature_object.feature.roi;
354349
// paint current point if it is inside bbox
355350
int label2d = feature_object.object.classification.front().label;
356-
if (!isUnknown(label2d) && isInsideBbox(projected_point.x(), projected_point.y(), roi, p_z)) {
351+
if (
352+
!isUnknown(label2d) &&
353+
isInsideBbox(normalized_projected_point.x(), normalized_projected_point.y(), roi, p_z)) {
357354
data = &painted_pointcloud_msg.data[0];
358355
auto p_class = reinterpret_cast<float *>(&output[stride + class_offset]);
359356
for (const auto & cls : isClassTable_) {
@@ -364,7 +361,7 @@ dc | dc dc dc dc ||zc|
364361
#if 0
365362
// Parallelizing loop don't support push_back
366363
if (debugger_) {
367-
debug_image_points.push_back(projected_point);
364+
debug_image_points.push_back(normalized_projected_point);
368365
}
369366
#endif
370367
}

perception/image_projection_based_fusion/src/roi_cluster_fusion/node.cpp

+20-14
Original file line numberDiff line numberDiff line change
@@ -83,8 +83,10 @@ void RoiClusterFusionNode::fuseOnSingleImage(
8383
const DetectedObjectsWithFeature & input_roi_msg,
8484
const sensor_msgs::msg::CameraInfo & camera_info, DetectedObjectsWithFeature & output_cluster_msg)
8585
{
86-
image_geometry::PinholeCameraModel pinhole_camera_model;
87-
pinhole_camera_model.fromCameraInfo(camera_info);
86+
Eigen::Matrix4d projection;
87+
projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2), camera_info.p.at(3),
88+
camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6), camera_info.p.at(7),
89+
camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10), camera_info.p.at(11);
8890

8991
// get transform from cluster frame id to camera optical frame id
9092
geometry_msgs::msg::TransformStamped transform_stamped;
@@ -131,19 +133,23 @@ void RoiClusterFusionNode::fuseOnSingleImage(
131133
continue;
132134
}
133135

134-
Eigen::Vector2d projected_point =
135-
calcRawImageProjectedPoint(pinhole_camera_model, cv::Point3d(*iter_x, *iter_y, *iter_z));
136+
Eigen::Vector4d projected_point =
137+
projection * Eigen::Vector4d(*iter_x, *iter_y, *iter_z, 1.0);
138+
Eigen::Vector2d normalized_projected_point = Eigen::Vector2d(
139+
projected_point.x() / projected_point.z(), projected_point.y() / projected_point.z());
136140
if (
137-
0 <= static_cast<int>(projected_point.x()) &&
138-
static_cast<int>(projected_point.x()) <= static_cast<int>(camera_info.width) - 1 &&
139-
0 <= static_cast<int>(projected_point.y()) &&
140-
static_cast<int>(projected_point.y()) <= static_cast<int>(camera_info.height) - 1) {
141-
min_x = std::min(static_cast<int>(projected_point.x()), min_x);
142-
min_y = std::min(static_cast<int>(projected_point.y()), min_y);
143-
max_x = std::max(static_cast<int>(projected_point.x()), max_x);
144-
max_y = std::max(static_cast<int>(projected_point.y()), max_y);
145-
projected_points.push_back(projected_point);
146-
if (debugger_) debugger_->obstacle_points_.push_back(projected_point);
141+
0 <= static_cast<int>(normalized_projected_point.x()) &&
142+
static_cast<int>(normalized_projected_point.x()) <=
143+
static_cast<int>(camera_info.width) - 1 &&
144+
0 <= static_cast<int>(normalized_projected_point.y()) &&
145+
static_cast<int>(normalized_projected_point.y()) <=
146+
static_cast<int>(camera_info.height) - 1) {
147+
min_x = std::min(static_cast<int>(normalized_projected_point.x()), min_x);
148+
min_y = std::min(static_cast<int>(normalized_projected_point.y()), min_y);
149+
max_x = std::max(static_cast<int>(normalized_projected_point.x()), max_x);
150+
max_y = std::max(static_cast<int>(normalized_projected_point.y()), max_y);
151+
projected_points.push_back(normalized_projected_point);
152+
if (debugger_) debugger_->obstacle_points_.push_back(normalized_projected_point);
147153
}
148154
}
149155
if (projected_points.empty()) {

perception/image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp

+14-7
Original file line numberDiff line numberDiff line change
@@ -86,12 +86,15 @@ void RoiDetectedObjectFusionNode::fuseOnSingleImage(
8686
object2camera_affine = transformToEigen(transform_stamped_optional.value().transform);
8787
}
8888

89-
image_geometry::PinholeCameraModel pinhole_camera_model;
90-
pinhole_camera_model.fromCameraInfo(camera_info);
89+
Eigen::Matrix4d camera_projection;
90+
camera_projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2),
91+
camera_info.p.at(3), camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6),
92+
camera_info.p.at(7), camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10),
93+
camera_info.p.at(11);
9194

9295
const auto object_roi_map = generateDetectedObjectRoIs(
9396
input_object_msg, static_cast<double>(camera_info.width),
94-
static_cast<double>(camera_info.height), object2camera_affine, pinhole_camera_model);
97+
static_cast<double>(camera_info.height), object2camera_affine, camera_projection);
9598
fuseObjectsOnImage(input_object_msg, input_roi_msg.feature_objects, object_roi_map);
9699

97100
if (debugger_) {
@@ -106,8 +109,7 @@ void RoiDetectedObjectFusionNode::fuseOnSingleImage(
106109
std::map<std::size_t, DetectedObjectWithFeature>
107110
RoiDetectedObjectFusionNode::generateDetectedObjectRoIs(
108111
const DetectedObjects & input_object_msg, const double image_width, const double image_height,
109-
const Eigen::Affine3d & object2camera_affine,
110-
const image_geometry::PinholeCameraModel & pinhole_camera_model)
112+
const Eigen::Affine3d & object2camera_affine, const Eigen::Matrix4d & camera_projection)
111113
{
112114
std::map<std::size_t, DetectedObjectWithFeature> object_roi_map;
113115
int64_t timestamp_nsec =
@@ -146,8 +148,13 @@ RoiDetectedObjectFusionNode::generateDetectedObjectRoIs(
146148
continue;
147149
}
148150

149-
Eigen::Vector2d proj_point = calcRawImageProjectedPoint(
150-
pinhole_camera_model, cv::Point3d(point.x(), point.y(), point.z()));
151+
Eigen::Vector2d proj_point;
152+
{
153+
Eigen::Vector4d proj_point_hom =
154+
camera_projection * Eigen::Vector4d(point.x(), point.y(), point.z(), 1.0);
155+
proj_point = Eigen::Vector2d(
156+
proj_point_hom.x() / (proj_point_hom.z()), proj_point_hom.y() / (proj_point_hom.z()));
157+
}
151158

152159
min_x = std::min(proj_point.x(), min_x);
153160
min_y = std::min(proj_point.y(), min_y);

perception/image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp

+12-8
Original file line numberDiff line numberDiff line change
@@ -101,9 +101,10 @@ void RoiPointCloudFusionNode::fuseOnSingleImage(
101101
}
102102

103103
// transform pointcloud to camera optical frame id
104-
image_geometry::PinholeCameraModel pinhole_camera_model;
105-
pinhole_camera_model.fromCameraInfo(camera_info);
106-
104+
Eigen::Matrix4d projection;
105+
projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2), camera_info.p.at(3),
106+
camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6), camera_info.p.at(7),
107+
camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10), camera_info.p.at(11);
107108
geometry_msgs::msg::TransformStamped transform_stamped;
108109
{
109110
const auto transform_stamped_optional = getTransformStamped(
@@ -142,8 +143,10 @@ void RoiPointCloudFusionNode::fuseOnSingleImage(
142143
if (transformed_z <= 0.0) {
143144
continue;
144145
}
145-
Eigen::Vector2d projected_point = calcRawImageProjectedPoint(
146-
pinhole_camera_model, cv::Point3d(transformed_x, transformed_y, transformed_z));
146+
Eigen::Vector4d projected_point =
147+
projection * Eigen::Vector4d(transformed_x, transformed_y, transformed_z, 1.0);
148+
Eigen::Vector2d normalized_projected_point = Eigen::Vector2d(
149+
projected_point.x() / projected_point.z(), projected_point.y() / projected_point.z());
147150
for (std::size_t i = 0; i < output_objs.size(); ++i) {
148151
auto & feature_obj = output_objs.at(i);
149152
const auto & check_roi = feature_obj.feature.roi;
@@ -153,9 +156,10 @@ void RoiPointCloudFusionNode::fuseOnSingleImage(
153156
continue;
154157
}
155158
if (
156-
check_roi.x_offset <= projected_point.x() && check_roi.y_offset <= projected_point.y() &&
157-
check_roi.x_offset + check_roi.width >= projected_point.x() &&
158-
check_roi.y_offset + check_roi.height >= projected_point.y()) {
159+
check_roi.x_offset <= normalized_projected_point.x() &&
160+
check_roi.y_offset <= normalized_projected_point.y() &&
161+
check_roi.x_offset + check_roi.width >= normalized_projected_point.x() &&
162+
check_roi.y_offset + check_roi.height >= normalized_projected_point.y()) {
159163
std::memcpy(
160164
&cluster.data[clusters_data_size.at(i)], &input_pointcloud_msg.data[offset], point_step);
161165
clusters_data_size.at(i) += point_step;

perception/image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp

+13-8
Original file line numberDiff line numberDiff line change
@@ -65,9 +65,11 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage(
6565
if (mask.cols == 0 || mask.rows == 0) {
6666
return;
6767
}
68-
image_geometry::PinholeCameraModel pinhole_camera_model;
69-
pinhole_camera_model.fromCameraInfo(camera_info);
70-
68+
Eigen::Matrix4d projection;
69+
projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2), camera_info.p.at(3),
70+
camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6), camera_info.p.at(7),
71+
camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10), camera_info.p.at(11), 0.0, 0.0,
72+
0.0, 1.0;
7173
geometry_msgs::msg::TransformStamped transform_stamped;
7274
// transform pointcloud from frame id to camera optical frame id
7375
{
@@ -97,19 +99,22 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage(
9799
continue;
98100
}
99101

100-
Eigen::Vector2d projected_point =
101-
calcRawImageProjectedPoint(pinhole_camera_model, cv::Point3d(*iter_x, *iter_y, *iter_z));
102+
Eigen::Vector4d projected_point = projection * Eigen::Vector4d(*iter_x, *iter_y, *iter_z, 1.0);
103+
Eigen::Vector2d normalized_projected_point = Eigen::Vector2d(
104+
projected_point.x() / projected_point.z(), projected_point.y() / projected_point.z());
102105

103-
bool is_inside_image = projected_point.x() > 0 && projected_point.x() < camera_info.width &&
104-
projected_point.y() > 0 && projected_point.y() < camera_info.height;
106+
bool is_inside_image =
107+
normalized_projected_point.x() > 0 && normalized_projected_point.x() < camera_info.width &&
108+
normalized_projected_point.y() > 0 && normalized_projected_point.y() < camera_info.height;
105109
if (!is_inside_image) {
106110
output_cloud.push_back(pcl::PointXYZ(*iter_orig_x, *iter_orig_y, *iter_orig_z));
107111
continue;
108112
}
109113

110114
// skip filtering pointcloud where semantic id out of the defined list
111115
uint8_t semantic_id = mask.at<uint8_t>(
112-
static_cast<uint16_t>(projected_point.y()), static_cast<uint16_t>(projected_point.x()));
116+
static_cast<uint16_t>(normalized_projected_point.y()),
117+
static_cast<uint16_t>(normalized_projected_point.x()));
113118
if (static_cast<size_t>(semantic_id) >= filter_semantic_label_target_.size()) {
114119
output_cloud.push_back(pcl::PointXYZ(*iter_orig_x, *iter_orig_y, *iter_orig_z));
115120
continue;

perception/image_projection_based_fusion/src/utils/utils.cpp

-9
Original file line numberDiff line numberDiff line change
@@ -13,17 +13,8 @@
1313
// limitations under the License.
1414

1515
#include "image_projection_based_fusion/utils/utils.hpp"
16-
1716
namespace image_projection_based_fusion
1817
{
19-
Eigen::Vector2d calcRawImageProjectedPoint(
20-
const image_geometry::PinholeCameraModel & pinhole_camera_model, const cv::Point3d & point3d)
21-
{
22-
cv::Point2d rectified_image_point = pinhole_camera_model.project3dToPixel(point3d);
23-
24-
cv::Point2d raw_image_point = pinhole_camera_model.unrectifyPoint(rectified_image_point);
25-
return Eigen::Vector2d(raw_image_point.x, raw_image_point.y);
26-
}
2718

2819
std::optional<geometry_msgs::msg::TransformStamped> getTransformStamped(
2920
const tf2_ros::Buffer & tf_buffer, const std::string & target_frame_id,

0 commit comments

Comments
 (0)