Skip to content

Commit c8d17ae

Browse files
authored
Merge branch 'main' into fix/lidar_transfusion_param
2 parents 9c595c7 + 0c20459 commit c8d17ae

File tree

144 files changed

+1516
-1656
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

144 files changed

+1516
-1656
lines changed

.github/CODEOWNERS

+8-8
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ common/autoware_motion_utils/** fumiya.watanabe@tier4.jp kosuke.takeuchi@tier4.j
66
common/autoware_overlay_rviz_plugin/autoware_mission_details_overlay_rviz_plugin/** ahmed.ebrahim@leodrive.ai
77
common/autoware_overlay_rviz_plugin/autoware_overlay_rviz_plugin/** khalil@leodrive.ai
88
common/autoware_perception_rviz_plugin/** opensource@apex.ai satoshi.tanaka@tier4.jp shunsuke.miura@tier4.jp taiki.tanaka@tier4.jp takeshi.miura@tier4.jp yoshi.ri@tier4.jp
9-
common/autoware_point_types/** taichi.higashide@tier4.jp
9+
common/autoware_point_types/** david.wong@tier4.jp max.schmeller@tier4.jp
1010
common/autoware_test_utils/** kyoichi.sugahara@tier4.jp mamoru.sobue@tier4.jp takamasa.horibe@tier4.jp zulfaqar.azmi@tier4.jp
1111
common/autoware_testing/** adam.dabrowski@robotec.ai satoshi.ota@tier4.jp shumpei.wakabayashi@tier4.jp tomoya.kimura@tier4.jp
1212
common/autoware_universe_utils/** mamoru.sobue@tier4.jp takamasa.horibe@tier4.jp takayuki.murooka@tier4.jp
@@ -115,9 +115,9 @@ perception/elevation_map_loader/** kosuke.takeuchi@tier4.jp shintaro.tomie@tier4
115115
perception/euclidean_cluster/** dai.nguyen@tier4.jp yukihiro.saito@tier4.jp
116116
perception/ground_segmentation/** abrahammonrroy@yahoo.com dai.nguyen@tier4.jp shunsuke.miura@tier4.jp yoshi.ri@tier4.jp yukihiro.saito@tier4.jp
117117
perception/image_projection_based_fusion/** dai.nguyen@tier4.jp koji.minoda@tier4.jp kotaro.uetake@tier4.jp shunsuke.miura@tier4.jp tao.zhong@tier4.jp yoshi.ri@tier4.jp yukihiro.saito@tier4.jp
118-
perception/lidar_apollo_instance_segmentation/** yukihiro.saito@tier4.jp
119-
perception/lidar_apollo_segmentation_tvm/** ambroise.vincent@arm.com xinyu.wang@tier4.jp
120-
perception/lidar_apollo_segmentation_tvm_nodes/** ambroise.vincent@arm.com xinyu.wang@tier4.jp
118+
perception/lidar_apollo_instance_segmentation/** yoshi.ri@tier4.jp yukihiro.saito@tier4.jp
119+
perception/lidar_apollo_segmentation_tvm/** ambroise.vincent@arm.com yoshi.ri@tier4.jp
120+
perception/lidar_apollo_segmentation_tvm_nodes/** ambroise.vincent@arm.com yoshi.ri@tier4.jp
121121
perception/lidar_centerpoint/** kenzo.lobos@tier4.jp koji.minoda@tier4.jp
122122
perception/lidar_transfusion/** amadeusz.szymko.2@tier4.jp kenzo.lobos@tier4.jp satoshi.tanaka@tier4.jp
123123
perception/multi_object_tracker/** taekjin.lee@tier4.jp yoshi.ri@tier4.jp yukihiro.saito@tier4.jp
@@ -192,10 +192,10 @@ planning/behavior_velocity_planner/autoware_behavior_velocity_traffic_light_modu
192192
planning/behavior_velocity_planner/autoware_behavior_velocity_virtual_traffic_light_module/** kosuke.takeuchi@tier4.jp shumpei.wakabayashi@tier4.jp tomoya.kimura@tier4.jp
193193
planning/behavior_velocity_planner/autoware_behavior_velocity_walkway_module/** satoshi.ota@tier4.jp shumpei.wakabayashi@tier4.jp takayuki.murooka@tier4.jp tomoya.kimura@tier4.jp
194194
planning/motion_velocity_planner/autoware_motion_velocity_dynamic_obstacle_stop_module/** mamoru.sobue@tier4.jp maxime.clement@tier4.jp
195-
planning/motion_velocity_planner/autoware_motion_velocity_obstacle_velocity_limiter_module/** maxime.clement@tier4.jp daniel.sanchez@tier4.jp
195+
planning/motion_velocity_planner/autoware_motion_velocity_obstacle_velocity_limiter_module/** maxime.clement@tier4.jp
196196
planning/motion_velocity_planner/autoware_motion_velocity_out_of_lane_module/** maxime.clement@tier4.jp shumpei.wakabayashi@tier4.jp takayuki.murooka@tier4.jp tomoya.kimura@tier4.jp
197-
planning/motion_velocity_planner/autoware_motion_velocity_planner_common/** maxime.clement@tier4.jp daniel.sanchez@tier4.jp alqudah.mohammad@tier4.jp
198-
planning/motion_velocity_planner/autoware_motion_velocity_planner_node/** maxime.clement@tier4.jp daniel.sanchez@tier4.jp alqudah.mohammad@tier4.jp
197+
planning/motion_velocity_planner/autoware_motion_velocity_planner_common/** maxime.clement@tier4.jp
198+
planning/motion_velocity_planner/autoware_motion_velocity_planner_node/** maxime.clement@tier4.jp
199199
planning/sampling_based_planner/autoware_bezier_sampler/** maxime.clement@tier4.jp
200200
planning/sampling_based_planner/autoware_frenet_planner/** maxime.clement@tier4.jp
201201
planning/sampling_based_planner/autoware_path_sampler/** maxime.clement@tier4.jp
@@ -204,7 +204,7 @@ sensing/gnss_poser/** anh.nguyen.2@tier4.jp kento.yabuuchi.2@tier4.jp koji.minod
204204
sensing/image_diagnostics/** dai.nguyen@tier4.jp yoshi.ri@tier4.jp
205205
sensing/image_transport_decompressor/** kenzo.lobos@tier4.jp yukihiro.saito@tier4.jp
206206
sensing/imu_corrector/** koji.minoda@tier4.jp taiki.yamada@tier4.jp yamato.ando@tier4.jp
207-
sensing/livox/livox_tag_filter/** kenzo.lobos@tier4.jp ryohsuke.mitsudome@tier4.jp
207+
sensing/livox/autoware_livox_tag_filter/** kenzo.lobos@tier4.jp ryohsuke.mitsudome@tier4.jp
208208
sensing/pointcloud_preprocessor/** abrahammonrroy@yahoo.com dai.nguyen@tier4.jp david.wong@tier4.jp kenzo.lobos@tier4.jp kyoichi.sugahara@tier4.jp melike@leodrive.ai shunsuke.miura@tier4.jp yihsiang.fang@tier4.jp yoshi.ri@tier4.jp yukihiro.saito@tier4.jp
209209
sensing/radar_scan_to_pointcloud2/** satoshi.tanaka@tier4.jp shunsuke.miura@tier4.jp taekjin.lee@tier4.jp yoshi.ri@tier4.jp
210210
sensing/radar_static_pointcloud_filter/** satoshi.tanaka@tier4.jp shunsuke.miura@tier4.jp taekjin.lee@tier4.jp yoshi.ri@tier4.jp

launch/tier4_perception_launch/launch/object_recognition/detection/detection.launch.xml

+2
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
<arg name="use_validator" description="use obstacle_pointcloud based validator"/>
1414
<arg name="objects_validation_method" description="options: `obstacle_pointcloud` or `occupancy_grid`"/>
1515
<arg name="use_low_intensity_cluster_filter"/>
16+
<arg name="use_image_segmentation_based_filter"/>
1617
<arg name="use_multi_channel_tracker_merger"/>
1718

1819
<!-- External interfaces -->
@@ -163,6 +164,7 @@
163164
<arg name="use_roi_based_cluster" value="$(var use_roi_based_cluster)"/>
164165
<arg name="detection_by_tracker_param_path" value="$(var detection_by_tracker_param_path)"/>
165166
<arg name="use_low_intensity_cluster_filter" value="$(var use_low_intensity_cluster_filter)"/>
167+
<arg name="use_image_segmentation_based_filter" value="$(var use_image_segmentation_based_filter)"/>
166168
</include>
167169
</group>
168170
<group if="$(var switch/detector/lidar_dnn)">

launch/tier4_perception_launch/launch/object_recognition/detection/detector/camera_lidar_detector.launch.xml

+12-1
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
<arg name="lidar_detection_model" description="options: `centerpoint`, `apollo`, `pointpainting`, `clustering`"/>
55
<arg name="use_roi_based_cluster"/>
66
<arg name="use_low_intensity_cluster_filter"/>
7+
<arg name="use_image_segmentation_based_filter"/>
78

89
<!-- External interfaces -->
910
<arg name="number_of_cameras"/>
@@ -95,14 +96,24 @@
9596
</include>
9697
</group>
9798

99+
<!-- Image_segmentation based filter, apply for camera0 only-->
100+
<group>
101+
<include file="$(find-pkg-share image_projection_based_fusion)/launch/segmentation_pointcloud_fusion.launch.xml" if="$(var use_image_segmentation_based_filter)">
102+
<arg name="input/pointcloud" value="$(var input/pointcloud_map/pointcloud)"/>
103+
<arg name="output/pointcloud" value="/perception/object_recognition/detection/segmentation_based_filtered/pointcloud"/>
104+
</include>
105+
</group>
106+
98107
<!-- Clustering -->
99108
<group>
100109
<push-ros-namespace namespace="clustering"/>
101110
<group>
111+
<let name="euclidean_cluster_input" value="$(var input/pointcloud_map/pointcloud)" unless="$(var use_image_segmentation_based_filter)"/>
112+
<let name="euclidean_cluster_input" value="/perception/object_recognition/detection/segmentation_based_filtered/pointcloud" if="$(var use_image_segmentation_based_filter)"/>
102113
<let name="euclidean_cluster_output" value="euclidean_cluster/clusters" if="$(var use_roi_based_cluster)"/>
103114
<let name="euclidean_cluster_output" value="clusters" unless="$(var use_roi_based_cluster)"/>
104115
<include file="$(find-pkg-share euclidean_cluster)/launch/voxel_grid_based_euclidean_cluster.launch.xml">
105-
<arg name="input_pointcloud" value="$(var input/pointcloud_map/pointcloud)"/>
116+
<arg name="input_pointcloud" value="$(var euclidean_cluster_input)"/>
106117
<arg name="output_clusters" value="$(var euclidean_cluster_output)"/>
107118
<arg name="use_low_height_cropbox" value="$(var use_low_height_cropbox)"/>
108119
</include>

launch/tier4_perception_launch/launch/perception.launch.xml

+2
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,7 @@
8080
<arg name="objects_filter_method" default="lanelet_filter" description="options: `lanelet_filter` or `position_filter`"/>
8181
<arg name="use_roi_based_cluster" default="true" description="use roi_based_cluster in clustering"/>
8282
<arg name="use_low_intensity_cluster_filter" default="true" description="use low_intensity_cluster_filter in clustering"/>
83+
<arg name="use_image_segmentation_based_filter" default="false" description="use image_segmentation_based_filter in clustering"/>
8384
<arg
8485
name="use_empty_dynamic_object_publisher"
8586
default="false"
@@ -240,6 +241,7 @@
240241
<arg name="use_pointcloud_map" value="$(var use_pointcloud_map)"/>
241242
<arg name="use_low_height_cropbox" value="$(var use_low_height_cropbox)"/>
242243
<arg name="use_low_intensity_cluster_filter" value="$(var use_low_intensity_cluster_filter)"/>
244+
<arg name="use_image_segmentation_based_filter" value="$(var use_image_segmentation_based_filter)"/>
243245
<arg name="use_object_filter" value="$(var use_object_filter)"/>
244246
<arg name="objects_filter_method" value="$(var objects_filter_method)"/>
245247
<arg name="use_roi_based_cluster" value="$(var use_roi_based_cluster)"/>

localization/ndt_scan_matcher/README.md

+10-2
Original file line numberDiff line numberDiff line change
@@ -241,18 +241,26 @@ This is a function that uses no ground LiDAR scan to estimate the scan matching
241241

242242
### Abstract
243243

244-
Calculate 2D covariance (xx, xy, yx, yy) in real time using the NDT convergence from multiple initial poses.
245-
The arrangement of multiple initial poses is efficiently limited by the Hessian matrix of the NDT score function.
244+
Initially, the covariance of NDT scan matching is a fixed value(FIXED_VALUE mode).
245+
So, three modes are provided for 2D covariance (xx, xy, yx, yy) estimation in real time: LAPLACE_APPROXIMATION, MULTI_NDT, and MULTI_NDT_SCORE.
246+
LAPLACE_APPROXIMATION calculates the inverse matrix of the XY (2x2) part of the Hessian obtained by NDT scan matching and uses it as the covariance matrix.
247+
On the other hand, MULTI_NDT, and MULTI_NDT_SCORE use NDT convergence from multiple initial poses to obtain 2D covariance.
248+
Ideally, the arrangement of multiple initial poses is efficiently limited by the Hessian matrix of the NDT score function.
246249
In this implementation, the number of initial positions is fixed to simplify the code.
250+
To obtain the covariance, MULTI_NDT computes until convergence at each initial position, while MULTI_NDT_SCORE uses the nearest voxel transformation likelihood.
247251
The covariance can be seen as error ellipse from ndt_pose_with_covariance setting on rviz2.
248252
[original paper](https://www.fujipress.jp/jrm/rb/robot003500020435/).
249253

254+
<img src="./media/calculation_of_ndt_covariance.png" alt="drawing" width="600"/>
255+
250256
Note that this function may spoil healthy system behavior if it consumes much calculation resources.
251257

252258
### Parameters
253259

260+
There are three types in the calculation of 2D covariance in real time.You can select the method by changing covariance_estimation_type.
254261
initial_pose_offset_model is rotated around (x,y) = (0,0) in the direction of the first principal component of the Hessian matrix.
255262
initial_pose_offset_model_x & initial_pose_offset_model_y must have the same number of elements.
263+
In MULTI_NDT_SCORE mode, the scale of the output 2D covariance can be adjusted according to the temperature.
256264

257265
{{ json_to_markdown("localization/ndt_scan_matcher/schema/sub/covariance_covariance_estimation.json") }}
258266

localization/ndt_scan_matcher/config/ndt_scan_matcher.param.yaml

+6-1
Original file line numberDiff line numberDiff line change
@@ -107,13 +107,18 @@
107107

108108
# 2D Real-time covariance estimation with multiple searches (output_pose_covariance is the minimum value)
109109
covariance_estimation:
110-
enable: false
110+
# Covariance estimation type
111+
# 0=FIXED_VALUE, 1=LAPLACE_APPROXIMATION, 2=MULTI_NDT, 3=MULTI_NDT_SCORE
112+
covariance_estimation_type: 0
111113

112114
# Offset arrangement in covariance estimation [m]
113115
# initial_pose_offset_model_x & initial_pose_offset_model_y must have the same number of elements.
114116
initial_pose_offset_model_x: [0.0, 0.0, 0.5, -0.5, 1.0, -1.0]
115117
initial_pose_offset_model_y: [0.5, -0.5, 0.0, 0.0, 0.0, 0.0]
116118

119+
# In MULTI_NDT_SCORE, the parameter that adjusts the estimated 2D covariance
120+
temperature: 0.1
121+
117122

118123
dynamic_map_loading:
119124
# Dynamic map loading distance

localization/ndt_scan_matcher/include/ndt_scan_matcher/hyper_parameters.hpp

+31-28
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,13 @@ enum class ConvergedParamType {
2929
NEAREST_VOXEL_TRANSFORMATION_LIKELIHOOD = 1
3030
};
3131

32+
enum class CovarianceEstimationType {
33+
FIXED_VALUE = 0,
34+
LAPLACE_APPROXIMATION = 1,
35+
MULTI_NDT = 2,
36+
MULTI_NDT_SCORE = 3,
37+
};
38+
3239
struct HyperParameters
3340
{
3441
struct Frame
@@ -80,8 +87,10 @@ struct HyperParameters
8087

8188
struct CovarianceEstimation
8289
{
83-
bool enable{};
84-
std::vector<Eigen::Vector2d> initial_pose_offset_model{};
90+
CovarianceEstimationType covariance_estimation_type{};
91+
std::vector<double> initial_pose_offset_model_x{};
92+
std::vector<double> initial_pose_offset_model_y{};
93+
double temperature{};
8594
} covariance_estimation{};
8695
} covariance{};
8796

@@ -148,33 +157,27 @@ struct HyperParameters
148157
for (std::size_t i = 0; i < output_pose_covariance.size(); ++i) {
149158
covariance.output_pose_covariance[i] = output_pose_covariance[i];
150159
}
151-
covariance.covariance_estimation.enable =
152-
node->declare_parameter<bool>("covariance.covariance_estimation.enable");
153-
if (covariance.covariance_estimation.enable) {
154-
std::vector<double> initial_pose_offset_model_x =
155-
node->declare_parameter<std::vector<double>>(
156-
"covariance.covariance_estimation.initial_pose_offset_model_x");
157-
std::vector<double> initial_pose_offset_model_y =
158-
node->declare_parameter<std::vector<double>>(
159-
"covariance.covariance_estimation.initial_pose_offset_model_y");
160-
161-
if (initial_pose_offset_model_x.size() == initial_pose_offset_model_y.size()) {
162-
const size_t size = initial_pose_offset_model_x.size();
163-
covariance.covariance_estimation.initial_pose_offset_model.resize(size);
164-
for (size_t i = 0; i < size; i++) {
165-
covariance.covariance_estimation.initial_pose_offset_model[i].x() =
166-
initial_pose_offset_model_x[i];
167-
covariance.covariance_estimation.initial_pose_offset_model[i].y() =
168-
initial_pose_offset_model_y[i];
169-
}
170-
} else {
171-
std::stringstream message;
172-
message << "Invalid initial pose offset model parameters."
173-
<< "Please make sure that the number of elements in "
174-
<< "initial_pose_offset_model_x and initial_pose_offset_model_y are the same.";
175-
throw std::runtime_error(message.str());
176-
}
160+
const int64_t covariance_estimation_type_tmp = node->declare_parameter<int64_t>(
161+
"covariance.covariance_estimation.covariance_estimation_type");
162+
covariance.covariance_estimation.covariance_estimation_type =
163+
static_cast<CovarianceEstimationType>(covariance_estimation_type_tmp);
164+
covariance.covariance_estimation.initial_pose_offset_model_x =
165+
node->declare_parameter<std::vector<double>>(
166+
"covariance.covariance_estimation.initial_pose_offset_model_x");
167+
covariance.covariance_estimation.initial_pose_offset_model_y =
168+
node->declare_parameter<std::vector<double>>(
169+
"covariance.covariance_estimation.initial_pose_offset_model_y");
170+
if (
171+
covariance.covariance_estimation.initial_pose_offset_model_x.size() !=
172+
covariance.covariance_estimation.initial_pose_offset_model_y.size()) {
173+
std::stringstream message;
174+
message << "Invalid initial pose offset model parameters."
175+
<< "Please make sure that the number of elements in "
176+
<< "initial_pose_offset_model_x and initial_pose_offset_model_y are the same.";
177+
throw std::runtime_error(message.str());
177178
}
179+
covariance.covariance_estimation.temperature =
180+
node->declare_parameter<double>("covariance.covariance_estimation.temperature");
178181

179182
dynamic_map_loading.update_distance =
180183
node->declare_parameter<double>("dynamic_map_loading.update_distance");

localization/ndt_scan_matcher/include/ndt_scan_matcher/ndt_scan_matcher_core.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,7 @@ class NDTScanMatcher : public rclcpp::Node
131131

132132
static int count_oscillation(const std::vector<geometry_msgs::msg::Pose> & result_pose_msg_array);
133133

134-
std::array<double, 36> estimate_covariance(
134+
Eigen::Matrix2d estimate_covariance(
135135
const pclomp::NdtResult & ndt_result, const Eigen::Matrix4f & initial_pose_matrix,
136136
const rclcpp::Time & sensor_ros_time);
137137

Loading

localization/ndt_scan_matcher/schema/sub/covariance_covariance_estimation.json

+18-5
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,12 @@
55
"covariance_estimation": {
66
"type": "object",
77
"properties": {
8-
"enable": {
9-
"type": "boolean",
10-
"description": "2D Real-time covariance estimation with multiple searches (output_pose_covariance is the minimum value).",
11-
"default": false
8+
"covariance_estimation_type": {
9+
"type": "number",
10+
"description": "2D Real-time Converged estimation type. 0=FIXED_VALUE, 1=LAPLACE_APPROXIMATION, 2=MULTI_NDT, 3=MULTI_NDT_SCORE (FIXED_VALUE mode does not perform 2D Real-time Converged estimation)",
11+
"default": 0,
12+
"minimum": 0,
13+
"maximum": 3
1214
},
1315
"initial_pose_offset_model_x": {
1416
"type": "array",
@@ -19,9 +21,20 @@
1921
"type": "array",
2022
"description": "Offset arrangement in covariance estimation [m]. initial_pose_offset_model_x & initial_pose_offset_model_y must have the same number of elements.",
2123
"default": [0.5, -0.5, 0.0, 0.0, 0.0, 0.0]
24+
},
25+
"temperature": {
26+
"type": "number",
27+
"description": "In MULTI_NDT_SCORE, the parameter that adjusts the estimated 2D covariance",
28+
"default": 0.1,
29+
"exclusiveMinimum": 0
2230
}
2331
},
24-
"required": ["enable", "initial_pose_offset_model_x", "initial_pose_offset_model_y"],
32+
33+
"required": [
34+
"covariance_estimation_type",
35+
"initial_pose_offset_model_x",
36+
"initial_pose_offset_model_y"
37+
],
2538
"additionalProperties": false
2639
}
2740
}

0 commit comments

Comments
 (0)