Skip to content

Commit 5e86df6

Browse files
tzhong518pre-commit-ci[bot]kminoda
authored andcommitted
chore(tensorrt_yolox): rework parameters (#6239)
* chore: use config Signed-off-by: tzhong518 <sworgun@gmail.com> * style(pre-commit): autofix * fix: rename Signed-off-by: tzhong518 <sworgun@gmail.com> * fix: add json schema Signed-off-by: tzhong518 <sworgun@gmail.com> * style(pre-commit): autofix * fix: add comment to param.yaml Signed-off-by: tzhong518 <sworgun@gmail.com> --------- Signed-off-by: tzhong518 <sworgun@gmail.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: kminoda <44218668+kminoda@users.noreply.github.com> Signed-off-by: Kotaro Yoshimoto <pythagora.yoshimoto@gmail.com>
1 parent 539461f commit 5e86df6

7 files changed

+249
-74
lines changed

perception/tensorrt_yolox/CMakeLists.txt

+1
Original file line numberDiff line numberDiff line change
@@ -114,4 +114,5 @@ endif()
114114

115115
ament_auto_package(INSTALL_TO_SHARE
116116
launch
117+
config
117118
)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
/**:
2+
ros__parameters:
3+
model_path: "$(var data_path)/tensorrt_yolox/$(var model_name).onnx"
4+
label_path: "$(var data_path)/tensorrt_yolox/label.txt"
5+
score_threshold: 0.35
6+
nms_threshold: 0.7
7+
precision: "int8" # Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8].
8+
calibration_algorithm: "Entropy" # Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax].
9+
dla_core_id: -1 # If positive ID value is specified, the node assign inference task to the DLA core.
10+
quantize_first_layer: false # If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8.
11+
quantize_last_layer: false # If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8.
12+
profile_per_layer: false # If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose.
13+
clip_value: 6.0 # If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration.
14+
preprocess_on_gpu: true # If true, pre-processing is performed on GPU.
15+
calibration_image_list_path: "" # Path to a file which contains path to images. Those images will be used for int8 quantization.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
/**:
2+
ros__parameters:
3+
model_path: "$(var data_path)/tensorrt_yolox/$(var model_name).onnx"
4+
label_path: "$(var data_path)/tensorrt_yolox/label.txt"
5+
score_threshold: 0.35
6+
nms_threshold: 0.7
7+
precision: "fp16" # Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8].
8+
calibration_algorithm: "MinMax" # Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax].
9+
dla_core_id: -1 # If positive ID value is specified, the node assign inference task to the DLA core.
10+
quantize_first_layer: false # If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8.
11+
quantize_last_layer: false # If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8.
12+
profile_per_layer: false # If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose.
13+
clip_value: 0.0 # If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration.
14+
preprocess_on_gpu: true # If true, pre-processing is performed on GPU.
15+
calibration_image_list_path: "" # Path to a file which contains path to images. Those images will be used for int8 quantization.

perception/tensorrt_yolox/launch/yolox_s_plus_opt.launch.xml

+2-37
Original file line numberDiff line numberDiff line change
@@ -5,30 +5,7 @@
55
<arg name="output/objects" default="/perception/object_recognition/detection/rois0"/>
66
<arg name="model_name" default="yolox-sPlus-T4-960x960-pseudo-finetune"/>
77
<arg name="data_path" default="$(env HOME)/autoware_data" description="packages data and artifacts directory path"/>
8-
<arg name="model_path" default="$(var data_path)/tensorrt_yolox"/>
9-
<arg name="score_threshold" default="0.35"/>
10-
<arg name="nms_threshold" default="0.7"/>
11-
<arg name="precision" default="int8" description="operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]"/>
12-
<arg
13-
name="calibration_algorithm"
14-
default="Entropy"
15-
description="Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax]"
16-
/>
17-
<arg name="dla_core_id" default="-1" description="If positive ID value is specified, the node assign inference task to the DLA core"/>
18-
<arg name="quantize_first_layer" default="false" description="If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8"/>
19-
<arg name="quantize_last_layer" default="false" description="If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8"/>
20-
<arg
21-
name="profile_per_layer"
22-
default="false"
23-
description="If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose."
24-
/>
25-
<arg
26-
name="clip_value"
27-
default="6.0"
28-
description="If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration."
29-
/>
30-
<arg name="preprocess_on_gpu" default="true" description="If true, pre-processing is performed on GPU"/>
31-
<arg name="calibration_image_list_path" default="" description="Path to a file which contains path to images. Those images will be used for int8 quantization."/>
8+
<arg name="yolox_param_path" default="$(find-pkg-share tensorrt_yolox)/config/yolox_s_plus_opt.param.yaml"/>
329
<arg name="use_decompress" default="true" description="use image decompress"/>
3310
<arg name="build_only" default="false" description="exit after trt engine is built"/>
3411

@@ -40,19 +17,7 @@
4017
<node pkg="tensorrt_yolox" exec="tensorrt_yolox_node_exe" name="tensorrt_yolox" output="screen">
4118
<remap from="~/in/image" to="$(var input/image)"/>
4219
<remap from="~/out/objects" to="$(var output/objects)"/>
43-
<param name="score_threshold" value="$(var score_threshold)"/>
44-
<param name="nms_threshold" value="$(var nms_threshold)"/>
45-
<param name="model_path" value="$(var model_path)/$(var model_name).onnx"/>
46-
<param name="label_path" value="$(var model_path)/label.txt"/>
47-
<param name="precision" value="$(var precision)"/>
48-
<param name="calibration_algorithm" value="$(var calibration_algorithm)"/>
49-
<param name="dla_core_id" value="$(var dla_core_id)"/>
50-
<param name="quantize_first_layer" value="$(var quantize_first_layer)"/>
51-
<param name="quantize_last_layer" value="$(var quantize_last_layer)"/>
52-
<param name="profile_per_layer" value="$(var profile_per_layer)"/>
53-
<param name="clip_value" value="$(var clip_value)"/>
54-
<param name="preprocess_on_gpu" value="$(var preprocess_on_gpu)"/>
55-
<param name="calibration_image_list_path" value="$(var calibration_image_list_path)"/>
20+
<param from="$(var yolox_param_path)" allow_substs="true"/>
5621
<param name="build_only" value="$(var build_only)"/>
5722
</node>
5823
</launch>

perception/tensorrt_yolox/launch/yolox_tiny.launch.xml

+2-37
Original file line numberDiff line numberDiff line change
@@ -4,30 +4,7 @@
44
<arg name="output/objects" default="/perception/object_recognition/detection/rois0"/>
55
<arg name="model_name" default="yolox-tiny"/>
66
<arg name="data_path" default="$(env HOME)/autoware_data" description="packages data and artifacts directory path"/>
7-
<arg name="model_path" default="$(var data_path)/tensorrt_yolox"/>
8-
<arg name="score_threshold" default="0.35"/>
9-
<arg name="nms_threshold" default="0.7"/>
10-
<arg name="precision" default="fp16" description="operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]"/>
11-
<arg
12-
name="calibration_algorithm"
13-
default="MinMax"
14-
description="Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax]"
15-
/>
16-
<arg name="dla_core_id" default="-1" description="If positive ID value is specified, the node assign inference task to the DLA core"/>
17-
<arg name="quantize_first_layer" default="false" description="If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8"/>
18-
<arg name="quantize_last_layer" default="false" description="If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8"/>
19-
<arg
20-
name="profile_per_layer"
21-
default="false"
22-
description="If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose."
23-
/>
24-
<arg
25-
name="clip_value"
26-
default="0.0"
27-
description="If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration."
28-
/>
29-
<arg name="preprocess_on_gpu" default="true" description="If true, pre-processing is performed on GPU"/>
30-
<arg name="calibration_image_list_path" default="" description="Path to a file which contains path to images. Those images will be used for int8 quantization."/>
7+
<arg name="yolox_param_path" default="$(find-pkg-share tensorrt_yolox)/config/yolox_tiny.param.yaml"/>
318
<arg name="use_decompress" default="true" description="use image decompress"/>
329
<arg name="build_only" default="false" description="exit after trt engine is built"/>
3310

@@ -39,19 +16,7 @@
3916
<node pkg="tensorrt_yolox" exec="tensorrt_yolox_node_exe" name="tensorrt_yolox" output="screen">
4017
<remap from="~/in/image" to="$(var input/image)"/>
4118
<remap from="~/out/objects" to="$(var output/objects)"/>
42-
<param name="score_threshold" value="$(var score_threshold)"/>
43-
<param name="nms_threshold" value="$(var nms_threshold)"/>
44-
<param name="model_path" value="$(var model_path)/$(var model_name).onnx"/>
45-
<param name="label_path" value="$(var model_path)/label.txt"/>
46-
<param name="precision" value="$(var precision)"/>
47-
<param name="calibration_algorithm" value="$(var calibration_algorithm)"/>
48-
<param name="dla_core_id" value="$(var dla_core_id)"/>
49-
<param name="quantize_first_layer" value="$(var quantize_first_layer)"/>
50-
<param name="quantize_last_layer" value="$(var quantize_last_layer)"/>
51-
<param name="profile_per_layer" value="$(var profile_per_layer)"/>
52-
<param name="clip_value" value="$(var clip_value)"/>
53-
<param name="preprocess_on_gpu" value="$(var preprocess_on_gpu)"/>
54-
<param name="calibration_image_list_path" value="$(var calibration_image_list_path)"/>
19+
<param from="$(var yolox_param_path)" allow_substs="true"/>
5520
<param name="build_only" value="$(var build_only)"/>
5621
</node>
5722
</launch>
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,107 @@
1+
{
2+
"$schema": "http://json-schema.org/draft-07/schema#",
3+
"title": "Parameters for tensorrt_yolox_s_plus_opt Nodes",
4+
"type": "object",
5+
"definitions": {
6+
"yolox_s_plus_opt": {
7+
"type": "object",
8+
"properties": {
9+
"model_path": {
10+
"type": "string",
11+
"default": "$(var data_path)/tensorrt_yolox/$(var model_name).onnx",
12+
"description": "Path to onnx model."
13+
},
14+
"label_path": {
15+
"type": "string",
16+
"default": "$(var data_path)/tensorrt_yolox/label.txt",
17+
"description": "Path to label file."
18+
},
19+
"score_threshold": {
20+
"type": "number",
21+
"default": 0.35,
22+
"minimum": 0.0,
23+
"maximum": 1.0,
24+
"description": "A threshold value of existence probability score, all of objects with score less than this threshold are ignored."
25+
},
26+
"nms_threshold": {
27+
"type": "number",
28+
"default": 0.7,
29+
"minimum": 0.0,
30+
"maximum": 1.0,
31+
"description": "A threshold value of NMS."
32+
},
33+
"precision": {
34+
"type": "string",
35+
"default": "int8",
36+
"description": "Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]."
37+
},
38+
"calibration_algorithm": {
39+
"type": "string",
40+
"default": "Entropy",
41+
"description": "Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax]."
42+
},
43+
"dla_core_id": {
44+
"type": "number",
45+
"default": -1,
46+
"description": "If positive ID value is specified, the node assign inference task to the DLA core."
47+
},
48+
"quantize_first_layer": {
49+
"type": "boolean",
50+
"default": false,
51+
"description": "If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8."
52+
},
53+
"quantize_last_layer": {
54+
"type": "boolean",
55+
"default": false,
56+
"description": "If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8."
57+
},
58+
"profile_per_layer": {
59+
"type": "boolean",
60+
"default": false,
61+
"description": "If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose."
62+
},
63+
"clip_value": {
64+
"type": "number",
65+
"default": 6.0,
66+
"description": "If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration."
67+
},
68+
"preprocess_on_gpu": {
69+
"type": "boolean",
70+
"default": true,
71+
"description": "If true, pre-processing is performed on GPU."
72+
},
73+
"calibration_image_list_path": {
74+
"type": "string",
75+
"default": "",
76+
"description": "Path to a file which contains path to images. Those images will be used for int8 quantization."
77+
}
78+
},
79+
"required": [
80+
"model_path",
81+
"label_path",
82+
"score_threshold",
83+
"nms_threshold",
84+
"precision",
85+
"calibration_algorithm",
86+
"dla_core_id",
87+
"quantize_first_layer",
88+
"quantize_last_layer",
89+
"profile_per_layer",
90+
"clip_value",
91+
"preprocess_on_gpu"
92+
]
93+
}
94+
},
95+
"properties": {
96+
"/**": {
97+
"type": "object",
98+
"properties": {
99+
"ros__parameters": {
100+
"$ref": "#/definitions/yolox_s_plus_opt"
101+
}
102+
},
103+
"required": ["ros__parameters"]
104+
}
105+
},
106+
"required": ["/**"]
107+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,107 @@
1+
{
2+
"$schema": "http://json-schema.org/draft-07/schema#",
3+
"title": "Parameters for tensorrt_yolox_tiny Nodes",
4+
"type": "object",
5+
"definitions": {
6+
"yolox_tiny": {
7+
"type": "object",
8+
"properties": {
9+
"model_path": {
10+
"type": "string",
11+
"default": "$(var data_path)/tensorrt_yolox/$(var model_name).onnx",
12+
"description": "Path to onnx model."
13+
},
14+
"label_path": {
15+
"type": "string",
16+
"default": "$(var data_path)/tensorrt_yolox/label.txt",
17+
"description": "Path to label file."
18+
},
19+
"score_threshold": {
20+
"type": "number",
21+
"default": 0.35,
22+
"minimum": 0.0,
23+
"maximum": 1.0,
24+
"description": "A threshold value of existence probability score, all of objects with score less than this threshold are ignored."
25+
},
26+
"nms_threshold": {
27+
"type": "number",
28+
"default": 0.7,
29+
"minimum": 0.0,
30+
"maximum": 1.0,
31+
"description": "A threshold value of NMS."
32+
},
33+
"precision": {
34+
"type": "string",
35+
"default": "fp16",
36+
"description": "Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]."
37+
},
38+
"calibration_algorithm": {
39+
"type": "string",
40+
"default": "MinMax",
41+
"description": "Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax]."
42+
},
43+
"dla_core_id": {
44+
"type": "number",
45+
"default": -1,
46+
"description": "If positive ID value is specified, the node assign inference task to the DLA core."
47+
},
48+
"quantize_first_layer": {
49+
"type": "boolean",
50+
"default": false,
51+
"description": "If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8."
52+
},
53+
"quantize_last_layer": {
54+
"type": "boolean",
55+
"default": false,
56+
"description": "If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8."
57+
},
58+
"profile_per_layer": {
59+
"type": "boolean",
60+
"default": false,
61+
"description": "If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose."
62+
},
63+
"clip_value": {
64+
"type": "number",
65+
"default": 0.0,
66+
"description": "If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration."
67+
},
68+
"preprocess_on_gpu": {
69+
"type": "boolean",
70+
"default": true,
71+
"description": "If true, pre-processing is performed on GPU."
72+
},
73+
"calibration_image_list_path": {
74+
"type": "string",
75+
"default": "",
76+
"description": "Path to a file which contains path to images. Those images will be used for int8 quantization."
77+
}
78+
},
79+
"required": [
80+
"model_path",
81+
"label_path",
82+
"score_threshold",
83+
"nms_threshold",
84+
"precision",
85+
"calibration_algorithm",
86+
"dla_core_id",
87+
"quantize_first_layer",
88+
"quantize_last_layer",
89+
"profile_per_layer",
90+
"clip_value",
91+
"preprocess_on_gpu"
92+
]
93+
}
94+
},
95+
"properties": {
96+
"/**": {
97+
"type": "object",
98+
"properties": {
99+
"ros__parameters": {
100+
"$ref": "#/definitions/yolox_tiny"
101+
}
102+
},
103+
"required": ["ros__parameters"]
104+
}
105+
},
106+
"required": ["/**"]
107+
}

0 commit comments

Comments
 (0)