Skip to content

Commit 38232f1

Browse files
style(pre-commit): autofix
1 parent f6cae45 commit 38232f1

File tree

2 files changed

+208
-182
lines changed

2 files changed

+208
-182
lines changed
Original file line numberDiff line numberDiff line change
@@ -1,94 +1,107 @@
11
{
2-
"$schema": "http://json-schema.org/draft-07/schema#",
3-
"title": "Parameters for tensorrt_yolox_s_plus_opt Nodes",
4-
"type": "object",
5-
"definitions": {
6-
"yolox_s_plus_opt": {
7-
"type": "object",
8-
"properties": {
9-
"model_path": {
10-
"type": "string",
11-
"default": "$(var data_path)/tensorrt_yolox/$(var model_name).onnx",
12-
"description": "Path to onnx model."
13-
},
14-
"label_path": {
15-
"type": "string",
16-
"default": "$(var data_path)/tensorrt_yolox/label.txt",
17-
"description": "Path to label file."
18-
},
19-
"score_threshold": {
20-
"type": "number",
21-
"default": 0.35,
22-
"minimum": 0.0,
23-
"maximum": 1.0,
24-
"description": "A threshold value of existence probability score, all of objects with score less than this threshold are ignored."
25-
},
26-
"nms_threshold": {
27-
"type": "number",
28-
"default": 0.7,
29-
"minimum": 0.0,
30-
"maximum": 1.0,
31-
"description": "A threshold value of NMS."
32-
},
33-
"precision": {
34-
"type": "string",
35-
"default": "int8",
36-
"description": "Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]."
37-
},
38-
"calibration_algorithm": {
39-
"type": "string",
40-
"default": "Entropy",
41-
"description": "Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax]."
42-
},
43-
"dla_core_id": {
44-
"type": "number",
45-
"default": -1,
46-
"description": "If positive ID value is specified, the node assign inference task to the DLA core."
47-
},
48-
"quantize_first_layer": {
49-
"type": "boolean",
50-
"default": false,
51-
"description": "If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8."
52-
},
53-
"quantize_last_layer": {
54-
"type": "boolean",
55-
"default": false,
56-
"description": "If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8."
57-
},
58-
"profile_per_layer": {
59-
"type": "boolean",
60-
"default": false,
61-
"description": "If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose."
62-
},
63-
"clip_value": {
64-
"type": "number",
65-
"default": 6.0,
66-
"description": "If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration."
67-
},
68-
"preprocess_on_gpu": {
69-
"type": "boolean",
70-
"default": true,
71-
"description": "If true, pre-processing is performed on GPU."
72-
},
73-
"calibration_image_list_path": {
74-
"type": "string",
75-
"default": "",
76-
"description": "Path to a file which contains path to images. Those images will be used for int8 quantization."
77-
}
2+
"$schema": "http://json-schema.org/draft-07/schema#",
3+
"title": "Parameters for tensorrt_yolox_s_plus_opt Nodes",
4+
"type": "object",
5+
"definitions": {
6+
"yolox_s_plus_opt": {
7+
"type": "object",
8+
"properties": {
9+
"model_path": {
10+
"type": "string",
11+
"default": "$(var data_path)/tensorrt_yolox/$(var model_name).onnx",
12+
"description": "Path to onnx model."
7813
},
79-
"required": ["model_path", "label_path", "score_threshold", "nms_threshold", "precision", "calibration_algorithm", "dla_core_id", "quantize_first_layer", "quantize_last_layer", "profile_per_layer", "clip_value", "preprocess_on_gpu"]
80-
}
81-
},
82-
"properties": {
83-
"/**": {
84-
"type": "object",
85-
"properties": {
86-
"ros__parameters": {
87-
"$ref": "#/definitions/yolox_s_plus_opt"
88-
}
14+
"label_path": {
15+
"type": "string",
16+
"default": "$(var data_path)/tensorrt_yolox/label.txt",
17+
"description": "Path to label file."
8918
},
90-
"required": ["ros__parameters"]
91-
}
92-
},
93-
"required": ["/**"]
94-
}
19+
"score_threshold": {
20+
"type": "number",
21+
"default": 0.35,
22+
"minimum": 0.0,
23+
"maximum": 1.0,
24+
"description": "A threshold value of existence probability score, all of objects with score less than this threshold are ignored."
25+
},
26+
"nms_threshold": {
27+
"type": "number",
28+
"default": 0.7,
29+
"minimum": 0.0,
30+
"maximum": 1.0,
31+
"description": "A threshold value of NMS."
32+
},
33+
"precision": {
34+
"type": "string",
35+
"default": "int8",
36+
"description": "Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]."
37+
},
38+
"calibration_algorithm": {
39+
"type": "string",
40+
"default": "Entropy",
41+
"description": "Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax]."
42+
},
43+
"dla_core_id": {
44+
"type": "number",
45+
"default": -1,
46+
"description": "If positive ID value is specified, the node assign inference task to the DLA core."
47+
},
48+
"quantize_first_layer": {
49+
"type": "boolean",
50+
"default": false,
51+
"description": "If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8."
52+
},
53+
"quantize_last_layer": {
54+
"type": "boolean",
55+
"default": false,
56+
"description": "If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8."
57+
},
58+
"profile_per_layer": {
59+
"type": "boolean",
60+
"default": false,
61+
"description": "If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose."
62+
},
63+
"clip_value": {
64+
"type": "number",
65+
"default": 6.0,
66+
"description": "If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration."
67+
},
68+
"preprocess_on_gpu": {
69+
"type": "boolean",
70+
"default": true,
71+
"description": "If true, pre-processing is performed on GPU."
72+
},
73+
"calibration_image_list_path": {
74+
"type": "string",
75+
"default": "",
76+
"description": "Path to a file which contains path to images. Those images will be used for int8 quantization."
77+
}
78+
},
79+
"required": [
80+
"model_path",
81+
"label_path",
82+
"score_threshold",
83+
"nms_threshold",
84+
"precision",
85+
"calibration_algorithm",
86+
"dla_core_id",
87+
"quantize_first_layer",
88+
"quantize_last_layer",
89+
"profile_per_layer",
90+
"clip_value",
91+
"preprocess_on_gpu"
92+
]
93+
}
94+
},
95+
"properties": {
96+
"/**": {
97+
"type": "object",
98+
"properties": {
99+
"ros__parameters": {
100+
"$ref": "#/definitions/yolox_s_plus_opt"
101+
}
102+
},
103+
"required": ["ros__parameters"]
104+
}
105+
},
106+
"required": ["/**"]
107+
}
Original file line numberDiff line numberDiff line change
@@ -1,94 +1,107 @@
11
{
2-
"$schema": "http://json-schema.org/draft-07/schema#",
3-
"title": "Parameters for tensorrt_yolox_tiny Nodes",
4-
"type": "object",
5-
"definitions": {
6-
"yolox_tiny": {
7-
"type": "object",
8-
"properties": {
9-
"model_path": {
10-
"type": "string",
11-
"default": "$(var data_path)/tensorrt_yolox/$(var model_name).onnx",
12-
"description": "Path to onnx model."
13-
},
14-
"label_path": {
15-
"type": "string",
16-
"default": "$(var data_path)/tensorrt_yolox/label.txt",
17-
"description": "Path to label file."
18-
},
19-
"score_threshold": {
20-
"type": "number",
21-
"default": 0.35,
22-
"minimum": 0.0,
23-
"maximum": 1.0,
24-
"description": "A threshold value of existence probability score, all of objects with score less than this threshold are ignored."
25-
},
26-
"nms_threshold": {
27-
"type": "number",
28-
"default": 0.7,
29-
"minimum": 0.0,
30-
"maximum": 1.0,
31-
"description": "A threshold value of NMS."
32-
},
33-
"precision": {
34-
"type": "string",
35-
"default": "fp16",
36-
"description": "Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]."
37-
},
38-
"calibration_algorithm": {
39-
"type": "string",
40-
"default": "MinMax",
41-
"description": "Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax]."
42-
},
43-
"dla_core_id": {
44-
"type": "number",
45-
"default": -1,
46-
"description": "If positive ID value is specified, the node assign inference task to the DLA core."
47-
},
48-
"quantize_first_layer": {
49-
"type": "boolean",
50-
"default": false,
51-
"description": "If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8."
52-
},
53-
"quantize_last_layer": {
54-
"type": "boolean",
55-
"default": false,
56-
"description": "If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8."
57-
},
58-
"profile_per_layer": {
59-
"type": "boolean",
60-
"default": false,
61-
"description": "If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose."
62-
},
63-
"clip_value": {
64-
"type": "number",
65-
"default": 0.0,
66-
"description": "If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration."
67-
},
68-
"preprocess_on_gpu": {
69-
"type": "boolean",
70-
"default": true,
71-
"description": "If true, pre-processing is performed on GPU."
72-
},
73-
"calibration_image_list_path": {
74-
"type": "string",
75-
"default": "",
76-
"description": "Path to a file which contains path to images. Those images will be used for int8 quantization."
77-
}
2+
"$schema": "http://json-schema.org/draft-07/schema#",
3+
"title": "Parameters for tensorrt_yolox_tiny Nodes",
4+
"type": "object",
5+
"definitions": {
6+
"yolox_tiny": {
7+
"type": "object",
8+
"properties": {
9+
"model_path": {
10+
"type": "string",
11+
"default": "$(var data_path)/tensorrt_yolox/$(var model_name).onnx",
12+
"description": "Path to onnx model."
7813
},
79-
"required": ["model_path", "label_path", "score_threshold", "nms_threshold", "precision", "calibration_algorithm", "dla_core_id", "quantize_first_layer", "quantize_last_layer", "profile_per_layer", "clip_value", "preprocess_on_gpu"]
80-
}
81-
},
82-
"properties": {
83-
"/**": {
84-
"type": "object",
85-
"properties": {
86-
"ros__parameters": {
87-
"$ref": "#/definitions/yolox_tiny"
88-
}
14+
"label_path": {
15+
"type": "string",
16+
"default": "$(var data_path)/tensorrt_yolox/label.txt",
17+
"description": "Path to label file."
8918
},
90-
"required": ["ros__parameters"]
91-
}
92-
},
93-
"required": ["/**"]
94-
}
19+
"score_threshold": {
20+
"type": "number",
21+
"default": 0.35,
22+
"minimum": 0.0,
23+
"maximum": 1.0,
24+
"description": "A threshold value of existence probability score, all of objects with score less than this threshold are ignored."
25+
},
26+
"nms_threshold": {
27+
"type": "number",
28+
"default": 0.7,
29+
"minimum": 0.0,
30+
"maximum": 1.0,
31+
"description": "A threshold value of NMS."
32+
},
33+
"precision": {
34+
"type": "string",
35+
"default": "fp16",
36+
"description": "Operation precision to be used on inference. Valid value is one of: [fp32, fp16, int8]."
37+
},
38+
"calibration_algorithm": {
39+
"type": "string",
40+
"default": "MinMax",
41+
"description": "Calibration algorithm to be used for quantization when precision==int8. Valid value is one of: [Entropy, (Legacy | Percentile), MinMax]."
42+
},
43+
"dla_core_id": {
44+
"type": "number",
45+
"default": -1,
46+
"description": "If positive ID value is specified, the node assign inference task to the DLA core."
47+
},
48+
"quantize_first_layer": {
49+
"type": "boolean",
50+
"default": false,
51+
"description": "If true, set the operating precision for the first (input) layer to be fp16. This option is valid only when precision==int8."
52+
},
53+
"quantize_last_layer": {
54+
"type": "boolean",
55+
"default": false,
56+
"description": "If true, set the operating precision for the last (output) layer to be fp16. This option is valid only when precision==int8."
57+
},
58+
"profile_per_layer": {
59+
"type": "boolean",
60+
"default": false,
61+
"description": "If true, profiler function will be enabled. Since the profile function may affect execution speed, it is recommended to set this flag true only for development purpose."
62+
},
63+
"clip_value": {
64+
"type": "number",
65+
"default": 0.0,
66+
"description": "If positive value is specified, the value of each layer output will be clipped between [0.0, clip_value]. This option is valid only when precision==int8 and used to manually specify the dynamic range instead of using any calibration."
67+
},
68+
"preprocess_on_gpu": {
69+
"type": "boolean",
70+
"default": true,
71+
"description": "If true, pre-processing is performed on GPU."
72+
},
73+
"calibration_image_list_path": {
74+
"type": "string",
75+
"default": "",
76+
"description": "Path to a file which contains path to images. Those images will be used for int8 quantization."
77+
}
78+
},
79+
"required": [
80+
"model_path",
81+
"label_path",
82+
"score_threshold",
83+
"nms_threshold",
84+
"precision",
85+
"calibration_algorithm",
86+
"dla_core_id",
87+
"quantize_first_layer",
88+
"quantize_last_layer",
89+
"profile_per_layer",
90+
"clip_value",
91+
"preprocess_on_gpu"
92+
]
93+
}
94+
},
95+
"properties": {
96+
"/**": {
97+
"type": "object",
98+
"properties": {
99+
"ros__parameters": {
100+
"$ref": "#/definitions/yolox_tiny"
101+
}
102+
},
103+
"required": ["ros__parameters"]
104+
}
105+
},
106+
"required": ["/**"]
107+
}

0 commit comments

Comments
 (0)