From 51e55aa6607892b5e5860d94daa6bad33c924c9c Mon Sep 17 00:00:00 2001
From: tzhong518 <sworgun@gmail.com>
Date: Tue, 30 Jan 2024 22:31:01 +0900
Subject: [PATCH 1/6] chore: use config

Signed-off-by: tzhong518 <sworgun@gmail.com>
---
 perception/tensorrt_yolox/CMakeLists.txt      |  1 +
 .../config/yolox_s_plus_opt.yaml              | 15 +++++++
 .../config/yolox_tiny.param.yaml              | 15 +++++++
 .../launch/yolox_s_plus_opt.launch.xml        | 39 +------------------
 .../launch/yolox_tiny.launch.xml              | 39 +------------------
 5 files changed, 35 insertions(+), 74 deletions(-)
 create mode 100644 perception/tensorrt_yolox/config/yolox_s_plus_opt.yaml
 create mode 100644 perception/tensorrt_yolox/config/yolox_tiny.param.yaml

diff --git a/perception/tensorrt_yolox/CMakeLists.txt b/perception/tensorrt_yolox/CMakeLists.txt
index 21860854ccd5f..a5498a845e62e 100644
--- a/perception/tensorrt_yolox/CMakeLists.txt
+++ b/perception/tensorrt_yolox/CMakeLists.txt
@@ -114,4 +114,5 @@ endif()
 
 ament_auto_package(INSTALL_TO_SHARE
   launch
+  config
 )
diff --git a/perception/tensorrt_yolox/config/yolox_s_plus_opt.yaml b/perception/tensorrt_yolox/config/yolox_s_plus_opt.yaml
new file mode 100644
index 0000000000000..d88eadd9a6c98
--- /dev/null
+++ b/perception/tensorrt_yolox/config/yolox_s_plus_opt.yaml
@@ -0,0 +1,15 @@
+/**:
+  ros__parameters:
+    model_path: "$(var data_path)/tensorrt_yolox/$(var model_name).onnx"
+    label_path: "$(var data_path)/tensorrt_yolox/label.txt"
+    score_threshold: 0.35
+    nms_threshold: 0.7
+    precision: "int8"
+    calibration_algorithm: "Entropy"
+    dla_core_id: -1
+    quantize_first_layer: false
+    quantize_last_layer: false
+    profile_per_layer: false
+    clip_value: 6.0
+    preprocess_on_gpu: true
+    calibration_image_list_path: ""
\ No newline at end of file
diff --git a/perception/tensorrt_yolox/config/yolox_tiny.param.yaml b/perception/tensorrt_yolox/config/yolox_tiny.param.yaml
new file mode 100644
index 0000000000000..ea9faa5575d34
--- /dev/null
+++ b/perception/tensorrt_yolox/config/yolox_tiny.param.yaml
@@ -0,0 +1,15 @@
+/**:
+  ros__parameters:
+    model_path: "$(var data_path)/tensorrt_yolox/$(var model_name).onnx"
+    label_path: "$(var data_path)/tensorrt_yolox/label.txt"
+    score_threshold: 0.35
+    nms_threshold: 0.7
+    precision: "fp16"
+    calibration_algorithm: "MinMax"
+    dla_core_id: -1
+    quantize_first_layer: false
+    quantize_last_layer: false
+    profile_per_layer: false
+    clip_value: 0.0
+    preprocess_on_gpu: true
+    calibration_image_list_path: ""
\ No newline at end of file
diff --git a/perception/tensorrt_yolox/launch/yolox_s_plus_opt.launch.xml b/perception/tensorrt_yolox/launch/yolox_s_plus_opt.launch.xml
index 3f8d7897ab5d3..dd15eda2913ce 100644
--- a/perception/tensorrt_yolox/launch/yolox_s_plus_opt.launch.xml
+++ b/perception/tensorrt_yolox/launch/yolox_s_plus_opt.launch.xml
@@ -5,30 +5,7 @@
   <arg name="output/objects" default="/perception/object_recognition/detection/rois0"/>
   <arg name="model_name" default="yolox-sPlus-T4-960x960-pseudo-finetune"/>
   <arg name="data_path" default="$(env HOME)/autoware_data" description="packages data and artifacts directory path"/>
-  <arg name="model_path" default="$(var data_path)/tensorrt_yolox"/>
-  <arg name="score_threshold" default="0.35"/>
-  <arg name="nms_threshold" default="0.7"/>
-  <arg name="precision" default="int8" description="operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]"/>
-  <arg
-    name="calibration_algorithm"
-    default="Entropy"
-    description="Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax]"
-  />
-  <arg name="dla_core_id" default="-1" description="If positive ID value is specified, the node assign inference task to the DLA core"/>
-  <arg name="quantize_first_layer" default="false" description="If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8"/>
-  <arg name="quantize_last_layer" default="false" description="If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8"/>
-  <arg
-    name="profile_per_layer"
-    default="false"
-    description="If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose."
-  />
-  <arg
-    name="clip_value"
-    default="6.0"
-    description="If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration."
-  />
-  <arg name="preprocess_on_gpu" default="true" description="If true, pre-processing is performed on GPU"/>
-  <arg name="calibration_image_list_path" default="" description="Path to a file which contains path to images. Those images will be used for int8 quantization."/>
+  <arg name="yolox_param_path" default="$(find-pkg-share tensorrt_yolox)/config/yolox_s_plus_opt.param.yaml"/>
   <arg name="use_decompress" default="true" description="use image decompress"/>
   <arg name="build_only" default="false" description="exit after trt engine is built"/>
 
@@ -40,19 +17,7 @@
   <node pkg="tensorrt_yolox" exec="tensorrt_yolox_node_exe" name="tensorrt_yolox" output="screen">
     <remap from="~/in/image" to="$(var input/image)"/>
     <remap from="~/out/objects" to="$(var output/objects)"/>
-    <param name="score_threshold" value="$(var score_threshold)"/>
-    <param name="nms_threshold" value="$(var nms_threshold)"/>
-    <param name="model_path" value="$(var model_path)/$(var model_name).onnx"/>
-    <param name="label_path" value="$(var model_path)/label.txt"/>
-    <param name="precision" value="$(var precision)"/>
-    <param name="calibration_algorithm" value="$(var calibration_algorithm)"/>
-    <param name="dla_core_id" value="$(var dla_core_id)"/>
-    <param name="quantize_first_layer" value="$(var quantize_first_layer)"/>
-    <param name="quantize_last_layer" value="$(var quantize_last_layer)"/>
-    <param name="profile_per_layer" value="$(var profile_per_layer)"/>
-    <param name="clip_value" value="$(var clip_value)"/>
-    <param name="preprocess_on_gpu" value="$(var preprocess_on_gpu)"/>
-    <param name="calibration_image_list_path" value="$(var calibration_image_list_path)"/>
+    <param from="$(var yolox_param_path)" allow_substs="true"/>
     <param name="build_only" value="$(var build_only)"/>
   </node>
 </launch>
diff --git a/perception/tensorrt_yolox/launch/yolox_tiny.launch.xml b/perception/tensorrt_yolox/launch/yolox_tiny.launch.xml
index 2f08031ea159f..9e5d1c371b13b 100644
--- a/perception/tensorrt_yolox/launch/yolox_tiny.launch.xml
+++ b/perception/tensorrt_yolox/launch/yolox_tiny.launch.xml
@@ -4,30 +4,7 @@
   <arg name="output/objects" default="/perception/object_recognition/detection/rois0"/>
   <arg name="model_name" default="yolox-tiny"/>
   <arg name="data_path" default="$(env HOME)/autoware_data" description="packages data and artifacts directory path"/>
-  <arg name="model_path" default="$(var data_path)/tensorrt_yolox"/>
-  <arg name="score_threshold" default="0.35"/>
-  <arg name="nms_threshold" default="0.7"/>
-  <arg name="precision" default="fp16" description="operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]"/>
-  <arg
-    name="calibration_algorithm"
-    default="MinMax"
-    description="Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax]"
-  />
-  <arg name="dla_core_id" default="-1" description="If positive ID value is specified, the node assign inference task to the DLA core"/>
-  <arg name="quantize_first_layer" default="false" description="If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8"/>
-  <arg name="quantize_last_layer" default="false" description="If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8"/>
-  <arg
-    name="profile_per_layer"
-    default="false"
-    description="If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose."
-  />
-  <arg
-    name="clip_value"
-    default="0.0"
-    description="If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration."
-  />
-  <arg name="preprocess_on_gpu" default="true" description="If true, pre-processing is performed on GPU"/>
-  <arg name="calibration_image_list_path" default="" description="Path to a file which contains path to images. Those images will be used for int8 quantization."/>
+  <arg name="yolox_param_path" default="$(find-pkg-share tensorrt_yolox)/config/yolox_tiny.param.yaml"/>
   <arg name="use_decompress" default="true" description="use image decompress"/>
   <arg name="build_only" default="false" description="exit after trt engine is built"/>
 
@@ -39,19 +16,7 @@
   <node pkg="tensorrt_yolox" exec="tensorrt_yolox_node_exe" name="tensorrt_yolox" output="screen">
     <remap from="~/in/image" to="$(var input/image)"/>
     <remap from="~/out/objects" to="$(var output/objects)"/>
-    <param name="score_threshold" value="$(var score_threshold)"/>
-    <param name="nms_threshold" value="$(var nms_threshold)"/>
-    <param name="model_path" value="$(var model_path)/$(var model_name).onnx"/>
-    <param name="label_path" value="$(var model_path)/label.txt"/>
-    <param name="precision" value="$(var precision)"/>
-    <param name="calibration_algorithm" value="$(var calibration_algorithm)"/>
-    <param name="dla_core_id" value="$(var dla_core_id)"/>
-    <param name="quantize_first_layer" value="$(var quantize_first_layer)"/>
-    <param name="quantize_last_layer" value="$(var quantize_last_layer)"/>
-    <param name="profile_per_layer" value="$(var profile_per_layer)"/>
-    <param name="clip_value" value="$(var clip_value)"/>
-    <param name="preprocess_on_gpu" value="$(var preprocess_on_gpu)"/>
-    <param name="calibration_image_list_path" value="$(var calibration_image_list_path)"/>
+    <param from="$(var yolox_param_path)" allow_substs="true"/>
     <param name="build_only" value="$(var build_only)"/>
   </node>
 </launch>

From 5446bbe597fcf91af0dc127249a8f4b77656cb4d Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
 <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Tue, 30 Jan 2024 13:35:33 +0000
Subject: [PATCH 2/6] style(pre-commit): autofix

---
 perception/tensorrt_yolox/config/yolox_s_plus_opt.yaml | 2 +-
 perception/tensorrt_yolox/config/yolox_tiny.param.yaml | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/perception/tensorrt_yolox/config/yolox_s_plus_opt.yaml b/perception/tensorrt_yolox/config/yolox_s_plus_opt.yaml
index d88eadd9a6c98..76666e5106118 100644
--- a/perception/tensorrt_yolox/config/yolox_s_plus_opt.yaml
+++ b/perception/tensorrt_yolox/config/yolox_s_plus_opt.yaml
@@ -12,4 +12,4 @@
     profile_per_layer: false
     clip_value: 6.0
     preprocess_on_gpu: true
-    calibration_image_list_path: ""
\ No newline at end of file
+    calibration_image_list_path: ""
diff --git a/perception/tensorrt_yolox/config/yolox_tiny.param.yaml b/perception/tensorrt_yolox/config/yolox_tiny.param.yaml
index ea9faa5575d34..e8832952a1af5 100644
--- a/perception/tensorrt_yolox/config/yolox_tiny.param.yaml
+++ b/perception/tensorrt_yolox/config/yolox_tiny.param.yaml
@@ -12,4 +12,4 @@
     profile_per_layer: false
     clip_value: 0.0
     preprocess_on_gpu: true
-    calibration_image_list_path: ""
\ No newline at end of file
+    calibration_image_list_path: ""

From d88251c03f73f4c597850b6225d5e0efc23db8a3 Mon Sep 17 00:00:00 2001
From: tzhong518 <sworgun@gmail.com>
Date: Fri, 2 Feb 2024 09:21:08 +0900
Subject: [PATCH 3/6] fix: rename

Signed-off-by: tzhong518 <sworgun@gmail.com>
---
 .../config/{yolox_s_plus_opt.yaml => yolox_s_plus_opt.param.yaml} | 0
 1 file changed, 0 insertions(+), 0 deletions(-)
 rename perception/tensorrt_yolox/config/{yolox_s_plus_opt.yaml => yolox_s_plus_opt.param.yaml} (100%)

diff --git a/perception/tensorrt_yolox/config/yolox_s_plus_opt.yaml b/perception/tensorrt_yolox/config/yolox_s_plus_opt.param.yaml
similarity index 100%
rename from perception/tensorrt_yolox/config/yolox_s_plus_opt.yaml
rename to perception/tensorrt_yolox/config/yolox_s_plus_opt.param.yaml

From f6cae459f85abafb96401ec40975c5b7b13f0719 Mon Sep 17 00:00:00 2001
From: tzhong518 <sworgun@gmail.com>
Date: Mon, 19 Feb 2024 13:16:03 +0900
Subject: [PATCH 4/6] fix: add json schema

Signed-off-by: tzhong518 <sworgun@gmail.com>
---
 .../schema/yolox_s_plus_opt.schema.json       | 94 +++++++++++++++++++
 .../schema/yolox_tiny.schema.json             | 94 +++++++++++++++++++
 2 files changed, 188 insertions(+)
 create mode 100644 perception/tensorrt_yolox/schema/yolox_s_plus_opt.schema.json
 create mode 100644 perception/tensorrt_yolox/schema/yolox_tiny.schema.json

diff --git a/perception/tensorrt_yolox/schema/yolox_s_plus_opt.schema.json b/perception/tensorrt_yolox/schema/yolox_s_plus_opt.schema.json
new file mode 100644
index 0000000000000..88932769ac874
--- /dev/null
+++ b/perception/tensorrt_yolox/schema/yolox_s_plus_opt.schema.json
@@ -0,0 +1,94 @@
+{
+    "$schema": "http://json-schema.org/draft-07/schema#",
+    "title": "Parameters for tensorrt_yolox_s_plus_opt Nodes",
+    "type": "object",
+    "definitions": {
+      "yolox_s_plus_opt": {
+        "type": "object",
+        "properties": {
+          "model_path": {
+            "type": "string",
+            "default": "$(var data_path)/tensorrt_yolox/$(var model_name).onnx",
+            "description": "Path to onnx model."
+          },
+          "label_path": {
+            "type": "string",
+            "default": "$(var data_path)/tensorrt_yolox/label.txt",
+            "description": "Path to label file."
+          },
+          "score_threshold": {
+            "type": "number",
+            "default": 0.35,
+            "minimum": 0.0,
+            "maximum": 1.0,
+            "description": "A threshold value of existence probability score, all of objects with score less than this threshold are ignored."
+          },
+          "nms_threshold": {
+            "type": "number",
+            "default": 0.7,
+            "minimum": 0.0,
+            "maximum": 1.0,
+            "description": "A threshold value of NMS."
+          },
+          "precision": {
+            "type": "string",
+            "default": "int8",
+            "description": "Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]."
+          },
+          "calibration_algorithm": {
+            "type": "string",
+            "default": "Entropy",
+            "description": "Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax]."
+          },
+          "dla_core_id": {
+            "type": "number",
+            "default": -1,
+            "description": "If positive ID value is specified, the node assign inference task to the DLA core."
+          },
+          "quantize_first_layer": {
+            "type": "boolean",
+            "default": false,
+            "description": "If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8."
+          },
+          "quantize_last_layer": {
+            "type": "boolean",
+            "default": false,
+            "description": "If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8."
+          },
+          "profile_per_layer": {
+            "type": "boolean",
+            "default": false,
+            "description": "If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose."
+          },
+          "clip_value": {
+            "type": "number",
+            "default": 6.0,
+            "description": "If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration."
+          },
+          "preprocess_on_gpu": {
+            "type": "boolean",
+            "default": true,
+            "description": "If true, pre-processing is performed on GPU."
+          },
+          "calibration_image_list_path": {
+            "type": "string",
+            "default": "",
+            "description": "Path to a file which contains path to images. Those images will be used for int8 quantization."
+          }
+        },
+        "required": ["model_path", "label_path", "score_threshold", "nms_threshold", "precision", "calibration_algorithm", "dla_core_id", "quantize_first_layer", "quantize_last_layer", "profile_per_layer", "clip_value", "preprocess_on_gpu"]
+      }
+    },
+    "properties": {
+      "/**": {
+        "type": "object",
+        "properties": {
+          "ros__parameters": {
+            "$ref": "#/definitions/yolox_s_plus_opt"
+          }
+        },
+        "required": ["ros__parameters"]
+      }
+    },
+    "required": ["/**"]
+  }
\ No newline at end of file
diff --git a/perception/tensorrt_yolox/schema/yolox_tiny.schema.json b/perception/tensorrt_yolox/schema/yolox_tiny.schema.json
new file mode 100644
index 0000000000000..5f1803300344d
--- /dev/null
+++ b/perception/tensorrt_yolox/schema/yolox_tiny.schema.json
@@ -0,0 +1,94 @@
+{
+    "$schema": "http://json-schema.org/draft-07/schema#",
+    "title": "Parameters for tensorrt_yolox_tiny Nodes",
+    "type": "object",
+    "definitions": {
+      "yolox_tiny": {
+        "type": "object",
+        "properties": {
+          "model_path": {
+            "type": "string",
+            "default": "$(var data_path)/tensorrt_yolox/$(var model_name).onnx",
+            "description": "Path to onnx model."
+          },
+          "label_path": {
+            "type": "string",
+            "default": "$(var data_path)/tensorrt_yolox/label.txt",
+            "description": "Path to label file."
+          },
+          "score_threshold": {
+            "type": "number",
+            "default": 0.35,
+            "minimum": 0.0,
+            "maximum": 1.0,
+            "description": "A threshold value of existence probability score, all of objects with score less than this threshold are ignored."
+          },
+          "nms_threshold": {
+            "type": "number",
+            "default": 0.7,
+            "minimum": 0.0,
+            "maximum": 1.0,
+            "description": "A threshold value of NMS."
+          },
+          "precision": {
+            "type": "string",
+            "default": "fp16",
+            "description": "Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]."
+          },
+          "calibration_algorithm": {
+            "type": "string",
+            "default": "MinMax",
+            "description": "Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax]."
+          },
+          "dla_core_id": {
+            "type": "number",
+            "default": -1,
+            "description": "If positive ID value is specified, the node assign inference task to the DLA core."
+          },
+          "quantize_first_layer": {
+            "type": "boolean",
+            "default": false,
+            "description": "If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8."
+          },
+          "quantize_last_layer": {
+            "type": "boolean",
+            "default": false,
+            "description": "If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8."
+          },
+          "profile_per_layer": {
+            "type": "boolean",
+            "default": false,
+            "description": "If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose."
+          },
+          "clip_value": {
+            "type": "number",
+            "default": 0.0,
+            "description": "If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration."
+          },
+          "preprocess_on_gpu": {
+            "type": "boolean",
+            "default": true,
+            "description": "If true, pre-processing is performed on GPU."
+          },
+          "calibration_image_list_path": {
+            "type": "string",
+            "default": "",
+            "description": "Path to a file which contains path to images. Those images will be used for int8 quantization."
+          }
+        },
+        "required": ["model_path", "label_path", "score_threshold", "nms_threshold", "precision", "calibration_algorithm", "dla_core_id", "quantize_first_layer", "quantize_last_layer", "profile_per_layer", "clip_value", "preprocess_on_gpu"]
+      }
+    },
+    "properties": {
+      "/**": {
+        "type": "object",
+        "properties": {
+          "ros__parameters": {
+            "$ref": "#/definitions/yolox_tiny"
+          }
+        },
+        "required": ["ros__parameters"]
+      }
+    },
+    "required": ["/**"]
+  }
\ No newline at end of file

From 38232f1a0d9c441217246c9b453c694bbbc1650b Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
 <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Mon, 19 Feb 2024 04:18:04 +0000
Subject: [PATCH 5/6] style(pre-commit): autofix

---
 .../schema/yolox_s_plus_opt.schema.json       | 195 ++++++++++--------
 .../schema/yolox_tiny.schema.json             | 195 ++++++++++--------
 2 files changed, 208 insertions(+), 182 deletions(-)

diff --git a/perception/tensorrt_yolox/schema/yolox_s_plus_opt.schema.json b/perception/tensorrt_yolox/schema/yolox_s_plus_opt.schema.json
index 88932769ac874..ce1ad6c2d0caf 100644
--- a/perception/tensorrt_yolox/schema/yolox_s_plus_opt.schema.json
+++ b/perception/tensorrt_yolox/schema/yolox_s_plus_opt.schema.json
@@ -1,94 +1,107 @@
 {
-    "$schema": "http://json-schema.org/draft-07/schema#",
-    "title": "Parameters for tensorrt_yolox_s_plus_opt Nodes",
-    "type": "object",
-    "definitions": {
-      "yolox_s_plus_opt": {
-        "type": "object",
-        "properties": {
-          "model_path": {
-            "type": "string",
-            "default": "$(var data_path)/tensorrt_yolox/$(var model_name).onnx",
-            "description": "Path to onnx model."
-          },
-          "label_path": {
-            "type": "string",
-            "default": "$(var data_path)/tensorrt_yolox/label.txt",
-            "description": "Path to label file."
-          },
-          "score_threshold": {
-            "type": "number",
-            "default": 0.35,
-            "minimum": 0.0,
-            "maximum": 1.0,
-            "description": "A threshold value of existence probability score, all of objects with score less than this threshold are ignored."
-          },
-          "nms_threshold": {
-            "type": "number",
-            "default": 0.7,
-            "minimum": 0.0,
-            "maximum": 1.0,
-            "description": "A threshold value of NMS."
-          },
-          "precision": {
-            "type": "string",
-            "default": "int8",
-            "description": "Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]."
-          },
-          "calibration_algorithm": {
-            "type": "string",
-            "default": "Entropy",
-            "description": "Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax]."
-          },
-          "dla_core_id": {
-            "type": "number",
-            "default": -1,
-            "description": "If positive ID value is specified, the node assign inference task to the DLA core."
-          },
-          "quantize_first_layer": {
-            "type": "boolean",
-            "default": false,
-            "description": "If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8."
-          },
-          "quantize_last_layer": {
-            "type": "boolean",
-            "default": false,
-            "description": "If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8."
-          },
-          "profile_per_layer": {
-            "type": "boolean",
-            "default": false,
-            "description": "If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose."
-          },
-          "clip_value": {
-            "type": "number",
-            "default": 6.0,
-            "description": "If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration."
-          },
-          "preprocess_on_gpu": {
-            "type": "boolean",
-            "default": true,
-            "description": "If true, pre-processing is performed on GPU."
-          },
-          "calibration_image_list_path": {
-            "type": "string",
-            "default": "",
-            "description": "Path to a file which contains path to images. Those images will be used for int8 quantization."
-          }
+  "$schema": "http://json-schema.org/draft-07/schema#",
+  "title": "Parameters for tensorrt_yolox_s_plus_opt Nodes",
+  "type": "object",
+  "definitions": {
+    "yolox_s_plus_opt": {
+      "type": "object",
+      "properties": {
+        "model_path": {
+          "type": "string",
+          "default": "$(var data_path)/tensorrt_yolox/$(var model_name).onnx",
+          "description": "Path to onnx model."
         },
-        "required": ["model_path", "label_path", "score_threshold", "nms_threshold", "precision", "calibration_algorithm", "dla_core_id", "quantize_first_layer", "quantize_last_layer", "profile_per_layer", "clip_value", "preprocess_on_gpu"]
-      }
-    },
-    "properties": {
-      "/**": {
-        "type": "object",
-        "properties": {
-          "ros__parameters": {
-            "$ref": "#/definitions/yolox_s_plus_opt"
-          }
+        "label_path": {
+          "type": "string",
+          "default": "$(var data_path)/tensorrt_yolox/label.txt",
+          "description": "Path to label file."
         },
-        "required": ["ros__parameters"]
-      }
-    },
-    "required": ["/**"]
-  }
\ No newline at end of file
+        "score_threshold": {
+          "type": "number",
+          "default": 0.35,
+          "minimum": 0.0,
+          "maximum": 1.0,
+          "description": "A threshold value of existence probability score, all of objects with score less than this threshold are ignored."
+        },
+        "nms_threshold": {
+          "type": "number",
+          "default": 0.7,
+          "minimum": 0.0,
+          "maximum": 1.0,
+          "description": "A threshold value of NMS."
+        },
+        "precision": {
+          "type": "string",
+          "default": "int8",
+          "description": "Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]."
+        },
+        "calibration_algorithm": {
+          "type": "string",
+          "default": "Entropy",
+          "description": "Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax]."
+        },
+        "dla_core_id": {
+          "type": "number",
+          "default": -1,
+          "description": "If positive ID value is specified, the node assign inference task to the DLA core."
+        },
+        "quantize_first_layer": {
+          "type": "boolean",
+          "default": false,
+          "description": "If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8."
+        },
+        "quantize_last_layer": {
+          "type": "boolean",
+          "default": false,
+          "description": "If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8."
+        },
+        "profile_per_layer": {
+          "type": "boolean",
+          "default": false,
+          "description": "If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose."
+        },
+        "clip_value": {
+          "type": "number",
+          "default": 6.0,
+          "description": "If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration."
+        },
+        "preprocess_on_gpu": {
+          "type": "boolean",
+          "default": true,
+          "description": "If true, pre-processing is performed on GPU."
+        },
+        "calibration_image_list_path": {
+          "type": "string",
+          "default": "",
+          "description": "Path to a file which contains path to images. Those images will be used for int8 quantization."
+        }
+      },
+      "required": [
+        "model_path",
+        "label_path",
+        "score_threshold",
+        "nms_threshold",
+        "precision",
+        "calibration_algorithm",
+        "dla_core_id",
+        "quantize_first_layer",
+        "quantize_last_layer",
+        "profile_per_layer",
+        "clip_value",
+        "preprocess_on_gpu"
+      ]
+    }
+  },
+  "properties": {
+    "/**": {
+      "type": "object",
+      "properties": {
+        "ros__parameters": {
+          "$ref": "#/definitions/yolox_s_plus_opt"
+        }
+      },
+      "required": ["ros__parameters"]
+    }
+  },
+  "required": ["/**"]
+}
diff --git a/perception/tensorrt_yolox/schema/yolox_tiny.schema.json b/perception/tensorrt_yolox/schema/yolox_tiny.schema.json
index 5f1803300344d..f47b28e47a3f8 100644
--- a/perception/tensorrt_yolox/schema/yolox_tiny.schema.json
+++ b/perception/tensorrt_yolox/schema/yolox_tiny.schema.json
@@ -1,94 +1,107 @@
 {
-    "$schema": "http://json-schema.org/draft-07/schema#",
-    "title": "Parameters for tensorrt_yolox_tiny Nodes",
-    "type": "object",
-    "definitions": {
-      "yolox_tiny": {
-        "type": "object",
-        "properties": {
-          "model_path": {
-            "type": "string",
-            "default": "$(var data_path)/tensorrt_yolox/$(var model_name).onnx",
-            "description": "Path to onnx model."
-          },
-          "label_path": {
-            "type": "string",
-            "default": "$(var data_path)/tensorrt_yolox/label.txt",
-            "description": "Path to label file."
-          },
-          "score_threshold": {
-            "type": "number",
-            "default": 0.35,
-            "minimum": 0.0,
-            "maximum": 1.0,
-            "description": "A threshold value of existence probability score, all of objects with score less than this threshold are ignored."
-          },
-          "nms_threshold": {
-            "type": "number",
-            "default": 0.7,
-            "minimum": 0.0,
-            "maximum": 1.0,
-            "description": "A threshold value of NMS."
-          },
-          "precision": {
-            "type": "string",
-            "default": "fp16",
-            "description": "Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]."
-          },
-          "calibration_algorithm": {
-            "type": "string",
-            "default": "MinMax",
-            "description": "Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax]."
-          },
-          "dla_core_id": {
-            "type": "number",
-            "default": -1,
-            "description": "If positive ID value is specified, the node assign inference task to the DLA core."
-          },
-          "quantize_first_layer": {
-            "type": "boolean",
-            "default": false,
-            "description": "If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8."
-          },
-          "quantize_last_layer": {
-            "type": "boolean",
-            "default": false,
-            "description": "If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8."
-          },
-          "profile_per_layer": {
-            "type": "boolean",
-            "default": false,
-            "description": "If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose."
-          },
-          "clip_value": {
-            "type": "number",
-            "default": 0.0,
-            "description": "If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration."
-          },
-          "preprocess_on_gpu": {
-            "type": "boolean",
-            "default": true,
-            "description": "If true, pre-processing is performed on GPU."
-          },
-          "calibration_image_list_path": {
-            "type": "string",
-            "default": "",
-            "description": "Path to a file which contains path to images. Those images will be used for int8 quantization."
-          }
+  "$schema": "http://json-schema.org/draft-07/schema#",
+  "title": "Parameters for tensorrt_yolox_tiny Nodes",
+  "type": "object",
+  "definitions": {
+    "yolox_tiny": {
+      "type": "object",
+      "properties": {
+        "model_path": {
+          "type": "string",
+          "default": "$(var data_path)/tensorrt_yolox/$(var model_name).onnx",
+          "description": "Path to onnx model."
         },
-        "required": ["model_path", "label_path", "score_threshold", "nms_threshold", "precision", "calibration_algorithm", "dla_core_id", "quantize_first_layer", "quantize_last_layer", "profile_per_layer", "clip_value", "preprocess_on_gpu"]
-      }
-    },
-    "properties": {
-      "/**": {
-        "type": "object",
-        "properties": {
-          "ros__parameters": {
-            "$ref": "#/definitions/yolox_tiny"
-          }
+        "label_path": {
+          "type": "string",
+          "default": "$(var data_path)/tensorrt_yolox/label.txt",
+          "description": "Path to label file."
         },
-        "required": ["ros__parameters"]
-      }
-    },
-    "required": ["/**"]
-  }
\ No newline at end of file
+        "score_threshold": {
+          "type": "number",
+          "default": 0.35,
+          "minimum": 0.0,
+          "maximum": 1.0,
+          "description": "A threshold value of existence probability score, all of objects with score less than this threshold are ignored."
+        },
+        "nms_threshold": {
+          "type": "number",
+          "default": 0.7,
+          "minimum": 0.0,
+          "maximum": 1.0,
+          "description": "A threshold value of NMS."
+        },
+        "precision": {
+          "type": "string",
+          "default": "fp16",
+          "description": "Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]."
+        },
+        "calibration_algorithm": {
+          "type": "string",
+          "default": "MinMax",
+          "description": "Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax]."
+        },
+        "dla_core_id": {
+          "type": "number",
+          "default": -1,
+          "description": "If positive ID value is specified, the node assign inference task to the DLA core."
+        },
+        "quantize_first_layer": {
+          "type": "boolean",
+          "default": false,
+          "description": "If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8."
+        },
+        "quantize_last_layer": {
+          "type": "boolean",
+          "default": false,
+          "description": "If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8."
+        },
+        "profile_per_layer": {
+          "type": "boolean",
+          "default": false,
+          "description": "If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose."
+        },
+        "clip_value": {
+          "type": "number",
+          "default": 0.0,
+          "description": "If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration."
+        },
+        "preprocess_on_gpu": {
+          "type": "boolean",
+          "default": true,
+          "description": "If true, pre-processing is performed on GPU."
+        },
+        "calibration_image_list_path": {
+          "type": "string",
+          "default": "",
+          "description": "Path to a file which contains path to images. Those images will be used for int8 quantization."
+        }
+      },
+      "required": [
+        "model_path",
+        "label_path",
+        "score_threshold",
+        "nms_threshold",
+        "precision",
+        "calibration_algorithm",
+        "dla_core_id",
+        "quantize_first_layer",
+        "quantize_last_layer",
+        "profile_per_layer",
+        "clip_value",
+        "preprocess_on_gpu"
+      ]
+    }
+  },
+  "properties": {
+    "/**": {
+      "type": "object",
+      "properties": {
+        "ros__parameters": {
+          "$ref": "#/definitions/yolox_tiny"
+        }
+      },
+      "required": ["ros__parameters"]
+    }
+  },
+  "required": ["/**"]
+}

From 148ae19673b566f8de7d0bd6407d0d35ebec52ef Mon Sep 17 00:00:00 2001
From: tzhong518 <sworgun@gmail.com>
Date: Mon, 26 Feb 2024 09:53:13 +0900
Subject: [PATCH 6/6] fix: add comment to param.yaml

Signed-off-by: tzhong518 <sworgun@gmail.com>
---
 .../config/yolox_s_plus_opt.param.yaml         | 18 +++++++++---------
 .../config/yolox_tiny.param.yaml               | 18 +++++++++---------
 2 files changed, 18 insertions(+), 18 deletions(-)

diff --git a/perception/tensorrt_yolox/config/yolox_s_plus_opt.param.yaml b/perception/tensorrt_yolox/config/yolox_s_plus_opt.param.yaml
index 76666e5106118..bc67173442094 100644
--- a/perception/tensorrt_yolox/config/yolox_s_plus_opt.param.yaml
+++ b/perception/tensorrt_yolox/config/yolox_s_plus_opt.param.yaml
@@ -4,12 +4,12 @@
     label_path: "$(var data_path)/tensorrt_yolox/label.txt"
     score_threshold: 0.35
     nms_threshold: 0.7
-    precision: "int8"
-    calibration_algorithm: "Entropy"
-    dla_core_id: -1
-    quantize_first_layer: false
-    quantize_last_layer: false
-    profile_per_layer: false
-    clip_value: 6.0
-    preprocess_on_gpu: true
-    calibration_image_list_path: ""
+    precision: "int8" # Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8].
+    calibration_algorithm: "Entropy" # Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax].
+    dla_core_id: -1 # If positive ID value is specified, the node assign inference task to the DLA core.
+    quantize_first_layer: false # If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8.
+    quantize_last_layer: false # If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8.
+    profile_per_layer: false # If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose.
+    clip_value: 6.0 # If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration.
+    preprocess_on_gpu: true # If true, pre-processing is performed on GPU.
+    calibration_image_list_path: "" # Path to a file which contains path to images. Those images will be used for int8 quantization.
diff --git a/perception/tensorrt_yolox/config/yolox_tiny.param.yaml b/perception/tensorrt_yolox/config/yolox_tiny.param.yaml
index e8832952a1af5..e45742a7afb95 100644
--- a/perception/tensorrt_yolox/config/yolox_tiny.param.yaml
+++ b/perception/tensorrt_yolox/config/yolox_tiny.param.yaml
@@ -4,12 +4,12 @@
     label_path: "$(var data_path)/tensorrt_yolox/label.txt"
     score_threshold: 0.35
     nms_threshold: 0.7
-    precision: "fp16"
-    calibration_algorithm: "MinMax"
-    dla_core_id: -1
-    quantize_first_layer: false
-    quantize_last_layer: false
-    profile_per_layer: false
-    clip_value: 0.0
-    preprocess_on_gpu: true
-    calibration_image_list_path: ""
+    precision: "fp16" # Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8].
+    calibration_algorithm: "MinMax" # Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax].
+    dla_core_id: -1 # If positive ID value is specified, the node assign inference task to the DLA core.
+    quantize_first_layer: false # If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8.
+    quantize_last_layer: false # If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8.
+    profile_per_layer: false # If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose.
+    clip_value: 0.0 # If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration.
+    preprocess_on_gpu: true # If true, pre-processing is performed on GPU.
+    calibration_image_list_path: "" # Path to a file which contains path to images. Those images will be used for int8 quantization.