Skip to content

Commit 934093f

Browse files
committed
Merge remote-tracking branch 'origin/awf-latest' into refactor/control_launch_xml
2 parents 798c219 + cfcf1e7 commit 934093f

File tree

91 files changed

+618
-647
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

91 files changed

+618
-647
lines changed

localization/autoware_landmark_based_localizer/autoware_ar_tag_based_localizer/README.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ ros2 launch autoware_launch ... \
4646

4747
### Rosbag
4848

49-
#### [Sample rosbag and map (AWSIM data)](https://drive.google.com/file/d/1uMVwQQFcfs8JOqfoA1FqfH_fLPwQ71jK/view)
49+
#### [Sample rosbag and map (AWSIM data)](https://drive.google.com/file/d/1ZPsfDvOXFrMxtx7fb1W5sOXdAK1e71hY/view)
5050

5151
This data is simulated data created by [AWSIM](https://tier4.github.io/AWSIM/).
5252
Essentially, AR tag-based self-localization is not intended for such public road driving, but for driving in a smaller area, so the max driving speed is set at 15 km/h.
@@ -55,7 +55,7 @@ It is a known problem that the timing of when each AR tag begins to be detected
5555

5656
![sample_result_in_awsim](./doc_image/sample_result_in_awsim.png)
5757

58-
#### [Sample rosbag and map (Real world data)](https://drive.google.com/file/d/1wiCQjyjRnYbb0dg8G6mRecdSGh8tv3zR/view)
58+
#### [Sample rosbag and map (Real world data)](https://drive.google.com/file/d/1VQCQ_qiEZpCMI3-z6SNs__zJ-4HJFQjx/view)
5959

6060
Please remap the topic names and play it.
6161

localization/pose_estimator_arbiter/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ The following video demonstrates the switching of four different pose estimators
4545

4646
Users can reproduce the demonstration using the following data and launch command:
4747

48-
[sample data (rosbag & map)](https://drive.google.com/file/d/1MxLo1Sw6PdvfkyOYf_9A5dZ9uli1vPvS/view)
48+
[sample data (rosbag & map)](https://drive.google.com/file/d/1ZNlkyCtwe04iKFREdeZ5xuMU_jWpwM3W/view)
4949
The rosbag is simulated data created by [AWSIM](https://tier4.github.io/AWSIM/).
5050
The map is an edited version of the [original map data](https://github.com/tier4/AWSIM/releases/download/v1.1.0/nishishinjuku_autoware_map.zip) published on the AWSIM documentation page to make it suitable for multiple pose_estimators.
5151

perception/image_projection_based_fusion/include/image_projection_based_fusion/roi_detected_object_fusion/node.hpp

+4-1
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@
1818
#include "autoware/universe_utils/ros/debug_publisher.hpp"
1919
#include "image_projection_based_fusion/fusion_node.hpp"
2020

21+
#include <image_projection_based_fusion/utils/utils.hpp>
22+
2123
#include "autoware_perception_msgs/msg/object_classification.hpp"
2224

2325
#include <map>
@@ -45,7 +47,8 @@ class RoiDetectedObjectFusionNode
4547

4648
std::map<std::size_t, DetectedObjectWithFeature> generateDetectedObjectRoIs(
4749
const DetectedObjects & input_object_msg, const double image_width, const double image_height,
48-
const Eigen::Affine3d & object2camera_affine, const Eigen::Matrix4d & camera_projection);
50+
const Eigen::Affine3d & object2camera_affine,
51+
const image_geometry::PinholeCameraModel & pinhole_camera_model);
4952

5053
void fuseObjectsOnImage(
5154
const DetectedObjects & input_object_msg,

perception/image_projection_based_fusion/include/image_projection_based_fusion/roi_pointcloud_fusion/node.hpp

+2
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,8 @@
1717

1818
#include "image_projection_based_fusion/fusion_node.hpp"
1919

20+
#include <image_projection_based_fusion/utils/utils.hpp>
21+
2022
#include <string>
2123
#include <vector>
2224
namespace image_projection_based_fusion

perception/image_projection_based_fusion/include/image_projection_based_fusion/segmentation_pointcloud_fusion/node.hpp

+2
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,8 @@
1717

1818
#include "image_projection_based_fusion/fusion_node.hpp"
1919

20+
#include <image_projection_based_fusion/utils/utils.hpp>
21+
2022
#include <string>
2123
#include <vector>
2224

perception/image_projection_based_fusion/include/image_projection_based_fusion/utils/utils.hpp

+5
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@
4242
#include "autoware_perception_msgs/msg/shape.hpp"
4343
#include "tier4_perception_msgs/msg/detected_object_with_feature.hpp"
4444

45+
#include <image_geometry/pinhole_camera_model.h>
4546
#include <pcl/point_cloud.h>
4647
#include <pcl/point_types.h>
4748
#include <pcl_conversions/pcl_conversions.h>
@@ -60,6 +61,10 @@ struct PointData
6061
float distance;
6162
size_t orig_index;
6263
};
64+
65+
Eigen::Vector2d calcRawImageProjectedPoint(
66+
const image_geometry::PinholeCameraModel & pinhole_camera_model, const cv::Point3d & point3d);
67+
6368
std::optional<geometry_msgs::msg::TransformStamped> getTransformStamped(
6469
const tf2_ros::Buffer & tf_buffer, const std::string & target_frame_id,
6570
const std::string & source_frame_id, const rclcpp::Time & time);

perception/image_projection_based_fusion/package.xml

+1
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
<depend>autoware_universe_utils</depend>
2222
<depend>cv_bridge</depend>
2323
<depend>euclidean_cluster</depend>
24+
<depend>image_geometry</depend>
2425
<depend>image_transport</depend>
2526
<depend>lidar_centerpoint</depend>
2627
<depend>message_filters</depend>

perception/image_projection_based_fusion/src/pointpainting_fusion/node.cpp

+8-8
Original file line numberDiff line numberDiff line change
@@ -307,9 +307,9 @@ void PointPaintingFusionNode::fuseOnSingleImage(
307307
const auto class_offset = painted_pointcloud_msg.fields.at(4).offset;
308308
const auto p_step = painted_pointcloud_msg.point_step;
309309
// projection matrix
310-
Eigen::Matrix3f camera_projection; // use only x,y,z
311-
camera_projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2),
312-
camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6);
310+
image_geometry::PinholeCameraModel pinhole_camera_model;
311+
pinhole_camera_model.fromCameraInfo(camera_info);
312+
313313
Eigen::Vector3f point_lidar, point_camera;
314314
/** dc : don't care
315315
@@ -342,15 +342,15 @@ dc | dc dc dc dc ||zc|
342342
continue;
343343
}
344344
// project
345-
Eigen::Vector3f normalized_projected_point = camera_projection * Eigen::Vector3f(p_x, p_y, p_z);
345+
Eigen::Vector2d projected_point =
346+
calcRawImageProjectedPoint(pinhole_camera_model, cv::Point3d(p_x, p_y, p_z));
347+
346348
// iterate 2d bbox
347349
for (const auto & feature_object : objects) {
348350
sensor_msgs::msg::RegionOfInterest roi = feature_object.feature.roi;
349351
// paint current point if it is inside bbox
350352
int label2d = feature_object.object.classification.front().label;
351-
if (
352-
!isUnknown(label2d) &&
353-
isInsideBbox(normalized_projected_point.x(), normalized_projected_point.y(), roi, p_z)) {
353+
if (!isUnknown(label2d) && isInsideBbox(projected_point.x(), projected_point.y(), roi, p_z)) {
354354
data = &painted_pointcloud_msg.data[0];
355355
auto p_class = reinterpret_cast<float *>(&output[stride + class_offset]);
356356
for (const auto & cls : isClassTable_) {
@@ -361,7 +361,7 @@ dc | dc dc dc dc ||zc|
361361
#if 0
362362
// Parallelizing loop don't support push_back
363363
if (debugger_) {
364-
debug_image_points.push_back(normalized_projected_point);
364+
debug_image_points.push_back(projected_point);
365365
}
366366
#endif
367367
}

perception/image_projection_based_fusion/src/roi_cluster_fusion/node.cpp

+14-20
Original file line numberDiff line numberDiff line change
@@ -83,10 +83,8 @@ void RoiClusterFusionNode::fuseOnSingleImage(
8383
const DetectedObjectsWithFeature & input_roi_msg,
8484
const sensor_msgs::msg::CameraInfo & camera_info, DetectedObjectsWithFeature & output_cluster_msg)
8585
{
86-
Eigen::Matrix4d projection;
87-
projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2), camera_info.p.at(3),
88-
camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6), camera_info.p.at(7),
89-
camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10), camera_info.p.at(11);
86+
image_geometry::PinholeCameraModel pinhole_camera_model;
87+
pinhole_camera_model.fromCameraInfo(camera_info);
9088

9189
// get transform from cluster frame id to camera optical frame id
9290
geometry_msgs::msg::TransformStamped transform_stamped;
@@ -133,23 +131,19 @@ void RoiClusterFusionNode::fuseOnSingleImage(
133131
continue;
134132
}
135133

136-
Eigen::Vector4d projected_point =
137-
projection * Eigen::Vector4d(*iter_x, *iter_y, *iter_z, 1.0);
138-
Eigen::Vector2d normalized_projected_point = Eigen::Vector2d(
139-
projected_point.x() / projected_point.z(), projected_point.y() / projected_point.z());
134+
Eigen::Vector2d projected_point =
135+
calcRawImageProjectedPoint(pinhole_camera_model, cv::Point3d(*iter_x, *iter_y, *iter_z));
140136
if (
141-
0 <= static_cast<int>(normalized_projected_point.x()) &&
142-
static_cast<int>(normalized_projected_point.x()) <=
143-
static_cast<int>(camera_info.width) - 1 &&
144-
0 <= static_cast<int>(normalized_projected_point.y()) &&
145-
static_cast<int>(normalized_projected_point.y()) <=
146-
static_cast<int>(camera_info.height) - 1) {
147-
min_x = std::min(static_cast<int>(normalized_projected_point.x()), min_x);
148-
min_y = std::min(static_cast<int>(normalized_projected_point.y()), min_y);
149-
max_x = std::max(static_cast<int>(normalized_projected_point.x()), max_x);
150-
max_y = std::max(static_cast<int>(normalized_projected_point.y()), max_y);
151-
projected_points.push_back(normalized_projected_point);
152-
if (debugger_) debugger_->obstacle_points_.push_back(normalized_projected_point);
137+
0 <= static_cast<int>(projected_point.x()) &&
138+
static_cast<int>(projected_point.x()) <= static_cast<int>(camera_info.width) - 1 &&
139+
0 <= static_cast<int>(projected_point.y()) &&
140+
static_cast<int>(projected_point.y()) <= static_cast<int>(camera_info.height) - 1) {
141+
min_x = std::min(static_cast<int>(projected_point.x()), min_x);
142+
min_y = std::min(static_cast<int>(projected_point.y()), min_y);
143+
max_x = std::max(static_cast<int>(projected_point.x()), max_x);
144+
max_y = std::max(static_cast<int>(projected_point.y()), max_y);
145+
projected_points.push_back(projected_point);
146+
if (debugger_) debugger_->obstacle_points_.push_back(projected_point);
153147
}
154148
}
155149
if (projected_points.empty()) {

perception/image_projection_based_fusion/src/roi_detected_object_fusion/node.cpp

+7-14
Original file line numberDiff line numberDiff line change
@@ -86,15 +86,12 @@ void RoiDetectedObjectFusionNode::fuseOnSingleImage(
8686
object2camera_affine = transformToEigen(transform_stamped_optional.value().transform);
8787
}
8888

89-
Eigen::Matrix4d camera_projection;
90-
camera_projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2),
91-
camera_info.p.at(3), camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6),
92-
camera_info.p.at(7), camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10),
93-
camera_info.p.at(11);
89+
image_geometry::PinholeCameraModel pinhole_camera_model;
90+
pinhole_camera_model.fromCameraInfo(camera_info);
9491

9592
const auto object_roi_map = generateDetectedObjectRoIs(
9693
input_object_msg, static_cast<double>(camera_info.width),
97-
static_cast<double>(camera_info.height), object2camera_affine, camera_projection);
94+
static_cast<double>(camera_info.height), object2camera_affine, pinhole_camera_model);
9895
fuseObjectsOnImage(input_object_msg, input_roi_msg.feature_objects, object_roi_map);
9996

10097
if (debugger_) {
@@ -109,7 +106,8 @@ void RoiDetectedObjectFusionNode::fuseOnSingleImage(
109106
std::map<std::size_t, DetectedObjectWithFeature>
110107
RoiDetectedObjectFusionNode::generateDetectedObjectRoIs(
111108
const DetectedObjects & input_object_msg, const double image_width, const double image_height,
112-
const Eigen::Affine3d & object2camera_affine, const Eigen::Matrix4d & camera_projection)
109+
const Eigen::Affine3d & object2camera_affine,
110+
const image_geometry::PinholeCameraModel & pinhole_camera_model)
113111
{
114112
std::map<std::size_t, DetectedObjectWithFeature> object_roi_map;
115113
int64_t timestamp_nsec =
@@ -148,13 +146,8 @@ RoiDetectedObjectFusionNode::generateDetectedObjectRoIs(
148146
continue;
149147
}
150148

151-
Eigen::Vector2d proj_point;
152-
{
153-
Eigen::Vector4d proj_point_hom =
154-
camera_projection * Eigen::Vector4d(point.x(), point.y(), point.z(), 1.0);
155-
proj_point = Eigen::Vector2d(
156-
proj_point_hom.x() / (proj_point_hom.z()), proj_point_hom.y() / (proj_point_hom.z()));
157-
}
149+
Eigen::Vector2d proj_point = calcRawImageProjectedPoint(
150+
pinhole_camera_model, cv::Point3d(point.x(), point.y(), point.z()));
158151

159152
min_x = std::min(proj_point.x(), min_x);
160153
min_y = std::min(proj_point.y(), min_y);

perception/image_projection_based_fusion/src/roi_pointcloud_fusion/node.cpp

+8-12
Original file line numberDiff line numberDiff line change
@@ -101,10 +101,9 @@ void RoiPointCloudFusionNode::fuseOnSingleImage(
101101
}
102102

103103
// transform pointcloud to camera optical frame id
104-
Eigen::Matrix4d projection;
105-
projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2), camera_info.p.at(3),
106-
camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6), camera_info.p.at(7),
107-
camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10), camera_info.p.at(11);
104+
image_geometry::PinholeCameraModel pinhole_camera_model;
105+
pinhole_camera_model.fromCameraInfo(camera_info);
106+
108107
geometry_msgs::msg::TransformStamped transform_stamped;
109108
{
110109
const auto transform_stamped_optional = getTransformStamped(
@@ -143,10 +142,8 @@ void RoiPointCloudFusionNode::fuseOnSingleImage(
143142
if (transformed_z <= 0.0) {
144143
continue;
145144
}
146-
Eigen::Vector4d projected_point =
147-
projection * Eigen::Vector4d(transformed_x, transformed_y, transformed_z, 1.0);
148-
Eigen::Vector2d normalized_projected_point = Eigen::Vector2d(
149-
projected_point.x() / projected_point.z(), projected_point.y() / projected_point.z());
145+
Eigen::Vector2d projected_point = calcRawImageProjectedPoint(
146+
pinhole_camera_model, cv::Point3d(transformed_x, transformed_y, transformed_z));
150147
for (std::size_t i = 0; i < output_objs.size(); ++i) {
151148
auto & feature_obj = output_objs.at(i);
152149
const auto & check_roi = feature_obj.feature.roi;
@@ -156,10 +153,9 @@ void RoiPointCloudFusionNode::fuseOnSingleImage(
156153
continue;
157154
}
158155
if (
159-
check_roi.x_offset <= normalized_projected_point.x() &&
160-
check_roi.y_offset <= normalized_projected_point.y() &&
161-
check_roi.x_offset + check_roi.width >= normalized_projected_point.x() &&
162-
check_roi.y_offset + check_roi.height >= normalized_projected_point.y()) {
156+
check_roi.x_offset <= projected_point.x() && check_roi.y_offset <= projected_point.y() &&
157+
check_roi.x_offset + check_roi.width >= projected_point.x() &&
158+
check_roi.y_offset + check_roi.height >= projected_point.y()) {
163159
std::memcpy(
164160
&cluster.data[clusters_data_size.at(i)], &input_pointcloud_msg.data[offset], point_step);
165161
clusters_data_size.at(i) += point_step;

perception/image_projection_based_fusion/src/segmentation_pointcloud_fusion/node.cpp

+8-14
Original file line numberDiff line numberDiff line change
@@ -65,11 +65,9 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage(
6565
if (mask.cols == 0 || mask.rows == 0) {
6666
return;
6767
}
68-
Eigen::Matrix4d projection;
69-
projection << camera_info.p.at(0), camera_info.p.at(1), camera_info.p.at(2), camera_info.p.at(3),
70-
camera_info.p.at(4), camera_info.p.at(5), camera_info.p.at(6), camera_info.p.at(7),
71-
camera_info.p.at(8), camera_info.p.at(9), camera_info.p.at(10), camera_info.p.at(11), 0.0, 0.0,
72-
0.0, 1.0;
68+
image_geometry::PinholeCameraModel pinhole_camera_model;
69+
pinhole_camera_model.fromCameraInfo(camera_info);
70+
7371
geometry_msgs::msg::TransformStamped transform_stamped;
7472
// transform pointcloud from frame id to camera optical frame id
7573
{
@@ -113,14 +111,11 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage(
113111
continue;
114112
}
115113

116-
Eigen::Vector4d projected_point =
117-
projection * Eigen::Vector4d(transformed_x, transformed_y, transformed_z, 1.0);
118-
Eigen::Vector2d normalized_projected_point = Eigen::Vector2d(
119-
projected_point.x() / projected_point.z(), projected_point.y() / projected_point.z());
114+
Eigen::Vector2d projected_point = calcRawImageProjectedPoint(
115+
pinhole_camera_model, cv::Point3d(transformed_x, transformed_y, transformed_z));
120116

121-
bool is_inside_image =
122-
normalized_projected_point.x() > 0 && normalized_projected_point.x() < camera_info.width &&
123-
normalized_projected_point.y() > 0 && normalized_projected_point.y() < camera_info.height;
117+
bool is_inside_image = projected_point.x() > 0 && projected_point.x() < camera_info.width &&
118+
projected_point.y() > 0 && projected_point.y() < camera_info.height;
124119
if (!is_inside_image) {
125120
copyPointCloud(
126121
input_pointcloud_msg, point_step, global_offset, output_cloud, output_pointcloud_size);
@@ -129,8 +124,7 @@ void SegmentPointCloudFusionNode::fuseOnSingleImage(
129124

130125
// skip filtering pointcloud where semantic id out of the defined list
131126
uint8_t semantic_id = mask.at<uint8_t>(
132-
static_cast<uint16_t>(normalized_projected_point.y()),
133-
static_cast<uint16_t>(normalized_projected_point.x()));
127+
static_cast<uint16_t>(projected_point.y()), static_cast<uint16_t>(projected_point.x()));
134128
if (static_cast<size_t>(semantic_id) >= filter_semantic_label_target_.size()) {
135129
copyPointCloud(
136130
input_pointcloud_msg, point_step, global_offset, output_cloud, output_pointcloud_size);

perception/image_projection_based_fusion/src/utils/utils.cpp

+10
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,18 @@
1313
// limitations under the License.
1414

1515
#include "image_projection_based_fusion/utils/utils.hpp"
16+
1617
namespace image_projection_based_fusion
1718
{
19+
Eigen::Vector2d calcRawImageProjectedPoint(
20+
const image_geometry::PinholeCameraModel & pinhole_camera_model, const cv::Point3d & point3d)
21+
{
22+
const cv::Point2d rectified_image_point = pinhole_camera_model.project3dToPixel(point3d);
23+
24+
const cv::Point2d raw_image_point = pinhole_camera_model.unrectifyPoint(rectified_image_point);
25+
26+
return Eigen::Vector2d(raw_image_point.x, raw_image_point.y);
27+
}
1828

1929
std::optional<geometry_msgs::msg::TransformStamped> getTransformStamped(
2030
const tf2_ros::Buffer & tf_buffer, const std::string & target_frame_id,

perception/object_range_splitter/CMakeLists.txt

+4-4
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,12 @@ project(object_range_splitter)
44
find_package(autoware_cmake REQUIRED)
55
autoware_package()
66

7-
ament_auto_add_library(object_range_splitter SHARED
8-
src/node.cpp
7+
ament_auto_add_library(${PROJECT_NAME} SHARED
8+
src/object_range_splitter_node.cpp
99
)
1010

11-
rclcpp_components_register_node(object_range_splitter
12-
PLUGIN "object_range_splitter::ObjectRangeSplitterNode"
11+
rclcpp_components_register_node(${PROJECT_NAME}
12+
PLUGIN "autoware::object_range_splitter::ObjectRangeSplitterNode"
1313
EXECUTABLE object_range_splitter_node
1414
)
1515

perception/object_range_splitter/package.xml

-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414
<depend>autoware_perception_msgs</depend>
1515
<depend>rclcpp</depend>
1616
<depend>rclcpp_components</depend>
17-
<depend>sensor_msgs</depend>
1817

1918
<test_depend>ament_lint_auto</test_depend>
2019
<test_depend>autoware_lint_common</test_depend>

perception/object_range_splitter/src/node.cpp perception/object_range_splitter/src/object_range_splitter_node.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -12,9 +12,9 @@
1212
// See the License for the specific language governing permissions and
1313
// limitations under the License.
1414

15-
#include "object_range_splitter/node.hpp"
15+
#include "object_range_splitter_node.hpp"
1616

17-
namespace object_range_splitter
17+
namespace autoware::object_range_splitter
1818
{
1919
ObjectRangeSplitterNode::ObjectRangeSplitterNode(const rclcpp::NodeOptions & node_options)
2020
: Node("object_range_splitter_node", node_options)
@@ -59,7 +59,7 @@ void ObjectRangeSplitterNode::objectCallback(
5959
long_range_object_pub_->publish(output_long_range_object_msg);
6060
short_range_object_pub_->publish(output_short_range_object_msg);
6161
}
62-
} // namespace object_range_splitter
62+
} // namespace autoware::object_range_splitter
6363

6464
#include <rclcpp_components/register_node_macro.hpp>
65-
RCLCPP_COMPONENTS_REGISTER_NODE(object_range_splitter::ObjectRangeSplitterNode)
65+
RCLCPP_COMPONENTS_REGISTER_NODE(autoware::object_range_splitter::ObjectRangeSplitterNode)

0 commit comments

Comments
 (0)