Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(image projection based fusion): revert #6902 (unrecify 3d point for image projection based fusion) #6954

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,6 @@
#include "image_projection_based_fusion/fusion_node.hpp"
#include "tier4_autoware_utils/ros/debug_publisher.hpp"

#include <image_projection_based_fusion/utils/utils.hpp>

#include "autoware_auto_perception_msgs/msg/object_classification.hpp"

#include <map>
Expand Down Expand Up @@ -47,8 +45,7 @@ class RoiDetectedObjectFusionNode

std::map<std::size_t, DetectedObjectWithFeature> generateDetectedObjectRoIs(
const DetectedObjects & input_object_msg, const double image_width, const double image_height,
const Eigen::Affine3d & object2camera_affine,
const image_geometry::PinholeCameraModel & pinhole_camera_model);
const Eigen::Affine3d & object2camera_affine, const Eigen::Matrix4d & camera_projection);

void fuseObjectsOnImage(
const DetectedObjects & input_object_msg,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,6 @@

#include "image_projection_based_fusion/fusion_node.hpp"

#include <image_projection_based_fusion/utils/utils.hpp>

#include <string>
#include <vector>
namespace image_projection_based_fusion
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,6 @@

#include "image_projection_based_fusion/fusion_node.hpp"

#include <image_projection_based_fusion/utils/utils.hpp>

#include <string>
#include <vector>

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@
#include "autoware_auto_perception_msgs/msg/shape.hpp"
#include "tier4_perception_msgs/msg/detected_object_with_feature.hpp"

#include <image_geometry/pinhole_camera_model.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl_conversions/pcl_conversions.h>
Expand All @@ -61,10 +60,6 @@ struct PointData
float distance;
size_t orig_index;
};

Eigen::Vector2d calcRawImageProjectedPoint(
const image_geometry::PinholeCameraModel & pinhole_camera_model, const cv::Point3d & point3d);

std::optional<geometry_msgs::msg::TransformStamped> getTransformStamped(
const tf2_ros::Buffer & tf_buffer, const std::string & target_frame_id,
const std::string & source_frame_id, const rclcpp::Time & time);
Expand Down
1 change: 0 additions & 1 deletion perception/image_projection_based_fusion/package.xml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
<depend>autoware_point_types</depend>
<depend>cv_bridge</depend>
<depend>euclidean_cluster</depend>
<depend>image_geometry</depend>
<depend>image_transport</depend>
<depend>lidar_centerpoint</depend>
<depend>message_filters</depend>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -307,9 +307,6 @@ void PointPaintingFusionNode::fuseOnSingleImage(
const auto class_offset = painted_pointcloud_msg.fields.at(4).offset;
const auto p_step = painted_pointcloud_msg.point_step;
// projection matrix
image_geometry::PinholeCameraModel pinhole_camera_model;
pinhole_camera_model.fromCameraInfo(camera_info);

Eigen::Matrix3f camera_projection; // use only x,y,z
camera_projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2),
camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6);
Expand Down Expand Up @@ -345,15 +342,15 @@ dc | dc dc dc dc ||zc|
continue;
}
// project
Eigen::Vector2d projected_point =
calcRawImageProjectedPoint(pinhole_camera_model, cv::Point3d(p_x, p_y, p_z));

Eigen::Vector3f normalized_projected_point = camera_projection * Eigen::Vector3f(p_x, p_y, p_z);
// iterate 2d bbox
for (const auto & feature_object : objects) {
sensor_msgs::msg::RegionOfInterest roi = feature_object.feature.roi;
// paint current point if it is inside bbox
int label2d = feature_object.object.classification.front().label;
if (!isUnknown(label2d) && isInsideBbox(projected_point.x(), projected_point.y(), roi, p_z)) {
if (
!isUnknown(label2d) &&
isInsideBbox(normalized_projected_point.x(), normalized_projected_point.y(), roi, p_z)) {
data = &painted_pointcloud_msg.data[0];
auto p_class = reinterpret_cast<float *>(&output[stride + class_offset]);
for (const auto & cls : isClassTable_) {
Expand All @@ -364,7 +361,7 @@ dc | dc dc dc dc ||zc|
#if 0
// Parallelizing loop don't support push_back
if (debugger_) {
debug_image_points.push_back(projected_point);
debug_image_points.push_back(normalized_projected_point);
}
#endif
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,67 +83,73 @@
const DetectedObjectsWithFeature & input_roi_msg,
const sensor_msgs::msg::CameraInfo & camera_info, DetectedObjectsWithFeature & output_cluster_msg)
{
image_geometry::PinholeCameraModel pinhole_camera_model;
pinhole_camera_model.fromCameraInfo(camera_info);
Eigen::Matrix4d projection;
projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2), camera_info.p.at(3),
camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6), camera_info.p.at(7),
camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10), camera_info.p.at(11);

// get transform from cluster frame id to camera optical frame id
geometry_msgs::msg::TransformStamped transform_stamped;
{
const auto transform_stamped_optional = getTransformStamped(
tf_buffer_, /*target*/ camera_info.header.frame_id,
/*source*/ input_cluster_msg.header.frame_id, camera_info.header.stamp);
if (!transform_stamped_optional) {
RCLCPP_WARN_STREAM(
get_logger(), "Failed to get transform from " << input_cluster_msg.header.frame_id << " to "
<< camera_info.header.frame_id);
return;
}
transform_stamped = transform_stamped_optional.value();
}

std::map<std::size_t, RegionOfInterest> m_cluster_roi;
for (std::size_t i = 0; i < input_cluster_msg.feature_objects.size(); ++i) {
if (input_cluster_msg.feature_objects.at(i).feature.cluster.data.empty()) {
continue;
}

if (is_far_enough(input_cluster_msg.feature_objects.at(i), fusion_distance_)) {
continue;
}

// filter point out of scope
if (debugger_ && out_of_scope(input_cluster_msg.feature_objects.at(i))) {
continue;
}

sensor_msgs::msg::PointCloud2 transformed_cluster;
tf2::doTransform(
input_cluster_msg.feature_objects.at(i).feature.cluster, transformed_cluster,
transform_stamped);

int min_x(camera_info.width), min_y(camera_info.height), max_x(0), max_y(0);
std::vector<Eigen::Vector2d> projected_points;
projected_points.reserve(transformed_cluster.data.size());
for (sensor_msgs::PointCloud2ConstIterator<float> iter_x(transformed_cluster, "x"),
iter_y(transformed_cluster, "y"), iter_z(transformed_cluster, "z");
iter_x != iter_x.end(); ++iter_x, ++iter_y, ++iter_z) {
if (*iter_z <= 0.0) {
continue;
}

Eigen::Vector2d projected_point =
calcRawImageProjectedPoint(pinhole_camera_model, cv::Point3d(*iter_x, *iter_y, *iter_z));
Eigen::Vector4d projected_point =
projection * Eigen::Vector4d(*iter_x, *iter_y, *iter_z, 1.0);
Eigen::Vector2d normalized_projected_point = Eigen::Vector2d(
projected_point.x() / projected_point.z(), projected_point.y() / projected_point.z());
if (
0 <= static_cast<int>(projected_point.x()) &&
static_cast<int>(projected_point.x()) <= static_cast<int>(camera_info.width) - 1 &&
0 <= static_cast<int>(projected_point.y()) &&
static_cast<int>(projected_point.y()) <= static_cast<int>(camera_info.height) - 1) {
min_x = std::min(static_cast<int>(projected_point.x()), min_x);
min_y = std::min(static_cast<int>(projected_point.y()), min_y);
max_x = std::max(static_cast<int>(projected_point.x()), max_x);
max_y = std::max(static_cast<int>(projected_point.y()), max_y);
projected_points.push_back(projected_point);
if (debugger_) debugger_->obstacle_points_.push_back(projected_point);
0 <= static_cast<int>(normalized_projected_point.x()) &&
static_cast<int>(normalized_projected_point.x()) <=
static_cast<int>(camera_info.width) - 1 &&
0 <= static_cast<int>(normalized_projected_point.y()) &&
static_cast<int>(normalized_projected_point.y()) <=
static_cast<int>(camera_info.height) - 1) {
min_x = std::min(static_cast<int>(normalized_projected_point.x()), min_x);
min_y = std::min(static_cast<int>(normalized_projected_point.y()), min_y);
max_x = std::max(static_cast<int>(normalized_projected_point.x()), max_x);
max_y = std::max(static_cast<int>(normalized_projected_point.y()), max_y);
projected_points.push_back(normalized_projected_point);
if (debugger_) debugger_->obstacle_points_.push_back(normalized_projected_point);

Check warning on line 152 in perception/image_projection_based_fusion/src/roi_cluster_fusion/node.cpp

View check run for this annotation

CodeScene Delta Analysis / CodeScene Cloud Delta Analysis (main)

❌ Getting worse: Complex Method

RoiClusterFusionNode::fuseOnSingleImage already has high cyclomatic complexity, and now it increases in Lines of Code from 131 to 137. This function has many conditional statements (e.g. if, for, while), leading to lower code health. Avoid adding more conditionals and code to it without refactoring.
}
}
if (projected_points.empty()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,12 +86,15 @@
object2camera_affine = transformToEigen(transform_stamped_optional.value().transform);
}

image_geometry::PinholeCameraModel pinhole_camera_model;
pinhole_camera_model.fromCameraInfo(camera_info);
Eigen::Matrix4d camera_projection;
camera_projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2),
camera_info.p.at(3), camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6),
camera_info.p.at(7), camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10),
camera_info.p.at(11);

const auto object_roi_map = generateDetectedObjectRoIs(
input_object_msg, static_cast<double>(camera_info.width),
static_cast<double>(camera_info.height), object2camera_affine, pinhole_camera_model);
static_cast<double>(camera_info.height), object2camera_affine, camera_projection);
fuseObjectsOnImage(input_object_msg, input_roi_msg.feature_objects, object_roi_map);

if (debugger_) {
Expand All @@ -106,48 +109,52 @@
std::map<std::size_t, DetectedObjectWithFeature>
RoiDetectedObjectFusionNode::generateDetectedObjectRoIs(
const DetectedObjects & input_object_msg, const double image_width, const double image_height,
const Eigen::Affine3d & object2camera_affine,
const image_geometry::PinholeCameraModel & pinhole_camera_model)
const Eigen::Affine3d & object2camera_affine, const Eigen::Matrix4d & camera_projection)
{
std::map<std::size_t, DetectedObjectWithFeature> object_roi_map;
int64_t timestamp_nsec =
input_object_msg.header.stamp.sec * (int64_t)1e9 + input_object_msg.header.stamp.nanosec;
if (passthrough_object_flags_map_.size() == 0) {
return object_roi_map;
}
if (passthrough_object_flags_map_.count(timestamp_nsec) == 0) {
return object_roi_map;
}
const auto & passthrough_object_flags = passthrough_object_flags_map_.at(timestamp_nsec);
for (std::size_t obj_i = 0; obj_i < input_object_msg.objects.size(); ++obj_i) {
std::vector<Eigen::Vector3d> vertices_camera_coord;
const auto & object = input_object_msg.objects.at(obj_i);

if (passthrough_object_flags.at(obj_i)) {
continue;
}

// filter point out of scope
if (debugger_ && out_of_scope(object)) {
continue;
}

{
std::vector<Eigen::Vector3d> vertices;
objectToVertices(object.kinematics.pose_with_covariance.pose, object.shape, vertices);
transformPoints(vertices, object2camera_affine, vertices_camera_coord);
}

double min_x(std::numeric_limits<double>::max()), min_y(std::numeric_limits<double>::max()),
max_x(std::numeric_limits<double>::min()), max_y(std::numeric_limits<double>::min());
std::size_t point_on_image_cnt = 0;
for (const auto & point : vertices_camera_coord) {
if (point.z() <= 0.0) {
continue;
}

Eigen::Vector2d proj_point = calcRawImageProjectedPoint(
pinhole_camera_model, cv::Point3d(point.x(), point.y(), point.z()));
Eigen::Vector2d proj_point;
{
Eigen::Vector4d proj_point_hom =
camera_projection * Eigen::Vector4d(point.x(), point.y(), point.z(), 1.0);
proj_point = Eigen::Vector2d(
proj_point_hom.x() / (proj_point_hom.z()), proj_point_hom.y() / (proj_point_hom.z()));
}

Check warning on line 157 in perception/image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp

View check run for this annotation

CodeScene Delta Analysis / CodeScene Cloud Delta Analysis (main)

❌ Getting worse: Complex Method

RoiDetectedObjectFusionNode::generateDetectedObjectRoIs already has high cyclomatic complexity, and now it increases in Lines of Code from 74 to 78. This function has many conditional statements (e.g. if, for, while), leading to lower code health. Avoid adding more conditionals and code to it without refactoring.

min_x = std::min(proj_point.x(), min_x);
min_y = std::min(proj_point.y(), min_y);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,9 +101,10 @@ void RoiPointCloudFusionNode::fuseOnSingleImage(
}

// transform pointcloud to camera optical frame id
image_geometry::PinholeCameraModel pinhole_camera_model;
pinhole_camera_model.fromCameraInfo(camera_info);

Eigen::Matrix4d projection;
projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2), camera_info.p.at(3),
camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6), camera_info.p.at(7),
camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10), camera_info.p.at(11);
geometry_msgs::msg::TransformStamped transform_stamped;
{
const auto transform_stamped_optional = getTransformStamped(
Expand Down Expand Up @@ -142,8 +143,10 @@ void RoiPointCloudFusionNode::fuseOnSingleImage(
if (transformed_z <= 0.0) {
continue;
}
Eigen::Vector2d projected_point = calcRawImageProjectedPoint(
pinhole_camera_model, cv::Point3d(transformed_x, transformed_y, transformed_z));
Eigen::Vector4d projected_point =
projection * Eigen::Vector4d(transformed_x, transformed_y, transformed_z, 1.0);
Eigen::Vector2d normalized_projected_point = Eigen::Vector2d(
projected_point.x() / projected_point.z(), projected_point.y() / projected_point.z());
for (std::size_t i = 0; i < output_objs.size(); ++i) {
auto & feature_obj = output_objs.at(i);
const auto & check_roi = feature_obj.feature.roi;
Expand All @@ -153,9 +156,10 @@ void RoiPointCloudFusionNode::fuseOnSingleImage(
continue;
}
if (
check_roi.x_offset <= projected_point.x() && check_roi.y_offset <= projected_point.y() &&
check_roi.x_offset + check_roi.width >= projected_point.x() &&
check_roi.y_offset + check_roi.height >= projected_point.y()) {
check_roi.x_offset <= normalized_projected_point.x() &&
check_roi.y_offset <= normalized_projected_point.y() &&
check_roi.x_offset + check_roi.width >= normalized_projected_point.x() &&
check_roi.y_offset + check_roi.height >= normalized_projected_point.y()) {
std::memcpy(
&cluster.data[clusters_data_size.at(i)], &input_pointcloud_msg.data[offset], point_step);
clusters_data_size.at(i) += point_step;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,9 +65,11 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage(
if (mask.cols == 0 || mask.rows == 0) {
return;
}
image_geometry::PinholeCameraModel pinhole_camera_model;
pinhole_camera_model.fromCameraInfo(camera_info);

Eigen::Matrix4d projection;
projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2), camera_info.p.at(3),
camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6), camera_info.p.at(7),
camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10), camera_info.p.at(11), 0.0, 0.0,
0.0, 1.0;
geometry_msgs::msg::TransformStamped transform_stamped;
// transform pointcloud from frame id to camera optical frame id
{
Expand Down Expand Up @@ -97,19 +99,22 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage(
continue;
}

Eigen::Vector2d projected_point =
calcRawImageProjectedPoint(pinhole_camera_model, cv::Point3d(*iter_x, *iter_y, *iter_z));
Eigen::Vector4d projected_point = projection * Eigen::Vector4d(*iter_x, *iter_y, *iter_z, 1.0);
Eigen::Vector2d normalized_projected_point = Eigen::Vector2d(
projected_point.x() / projected_point.z(), projected_point.y() / projected_point.z());

bool is_inside_image = projected_point.x() > 0 && projected_point.x() < camera_info.width &&
projected_point.y() > 0 && projected_point.y() < camera_info.height;
bool is_inside_image =
normalized_projected_point.x() > 0 && normalized_projected_point.x() < camera_info.width &&
normalized_projected_point.y() > 0 && normalized_projected_point.y() < camera_info.height;
if (!is_inside_image) {
output_cloud.push_back(pcl::PointXYZ(*iter_orig_x, *iter_orig_y, *iter_orig_z));
continue;
}

// skip filtering pointcloud where semantic id out of the defined list
uint8_t semantic_id = mask.at<uint8_t>(
static_cast<uint16_t>(projected_point.y()), static_cast<uint16_t>(projected_point.x()));
static_cast<uint16_t>(normalized_projected_point.y()),
static_cast<uint16_t>(normalized_projected_point.x()));
if (static_cast<size_t>(semantic_id) >= filter_semantic_label_target_.size()) {
output_cloud.push_back(pcl::PointXYZ(*iter_orig_x, *iter_orig_y, *iter_orig_z));
continue;
Expand Down
9 changes: 0 additions & 9 deletions perception/image_projection_based_fusion/src/utils/utils.cpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright 2022 TIER IV, Inc.

Check warning on line 1 in perception/image_projection_based_fusion/src/utils/utils.cpp

View check run for this annotation

CodeScene Delta Analysis / CodeScene Cloud Delta Analysis (main)

❌ New issue: Overall Code Complexity

This module has a mean cyclomatic complexity of 4.38 across 8 functions. The mean complexity threshold is 4. This file has many conditional statements (e.g. if, for, while) across its implementation, leading to lower code health. Avoid adding more conditionals.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
Expand All @@ -13,17 +13,8 @@
// limitations under the License.

#include "image_projection_based_fusion/utils/utils.hpp"

namespace image_projection_based_fusion
{
Eigen::Vector2d calcRawImageProjectedPoint(
const image_geometry::PinholeCameraModel & pinhole_camera_model, const cv::Point3d & point3d)
{
cv::Point2d rectified_image_point = pinhole_camera_model.project3dToPixel(point3d);

cv::Point2d raw_image_point = pinhole_camera_model.unrectifyPoint(rectified_image_point);
return Eigen::Vector2d(raw_image_point.x, raw_image_point.y);
}

std::optional<geometry_msgs::msg::TransformStamped> getTransformStamped(
const tf2_ros::Buffer & tf_buffer, const std::string & target_frame_id,
Expand Down
Loading