22
22
* @brief Main with support Unicode paths, wide strings
23
23
*/
24
24
int tmain (int argc, tchar* argv[]) {
25
- try {
25
+ {
26
26
// -------- Get OpenVINO runtime version --------
27
27
slog::info << ov::get_openvino_version () << slog::endl;
28
28
29
29
// -------- Parsing and validation of input arguments --------
30
- if (argc != 4 ) {
31
- slog::info << " Usage : " << argv[0 ] << " <path_to_model> <path_to_image > <device_name>" << slog::endl;
32
- return EXIT_FAILURE;
30
+ if (argc != 3 ) {
31
+ slog::info << " Usage : " << argv[0 ] << " <path_to_model> <device_name>" << slog::endl;
32
+ return EXIT_FAILURE;
33
33
}
34
-
35
- const std::string args = TSTRING2STRING (argv[0 ]);
34
+ //
35
+ // const std::string args = TSTRING2STRING(argv[0]);
36
36
const std::string model_path = TSTRING2STRING (argv[1 ]);
37
- const std::string image_path = TSTRING2STRING (argv[2 ]);
38
- const std::string device_name = TSTRING2STRING (argv[3 ]);
37
+ // const std::string image_path = TSTRING2STRING(argv[2]);
38
+ const std::string device_name = TSTRING2STRING (argv[2 ]);
39
39
40
40
// -------- Step 1. Initialize OpenVINO Runtime Core --------
41
41
ov::Core core;
42
42
43
+ // std::vector<std::string> availableDevices = core.get_available_devices();
44
+ // for (auto&& device : availableDevices) {
45
+ // slog::info << device << slog::endl;
46
+
47
+ // // Query supported properties and print all of them
48
+ // slog::info << "\tSUPPORTED_PROPERTIES: " << slog::endl;
49
+ // auto supported_properties = core.get_property(device, ov::supported_properties);
50
+ // for (auto&& property : supported_properties) {
51
+ // if (property != ov::supported_properties.name()) {
52
+ // slog::info << "\t\t" << (property.is_mutable() ? "Mutable: " : "Immutable: ") << property << " : "
53
+ // << slog::flush;
54
+
55
+ // }
56
+ // }
57
+
58
+ // slog::info << slog::endl;
59
+ // }
60
+ // core.set_property(ov::cache_dir("C:\\Intel\\XeSDK"));
43
61
// -------- Step 2. Read a model --------
44
- slog::info << " Loading model files: " << model_path << slog::endl;
62
+ // slog::info << "Loading model files: " << model_path << slog::endl;
45
63
std::shared_ptr<ov::Model> model = core.read_model (model_path);
46
- printInputAndOutputsInfo (*model);
64
+ // printInputAndOutputsInfo(*model);
47
65
48
66
OPENVINO_ASSERT (model->inputs ().size () == 1 , " Sample supports models with 1 input only" );
49
- OPENVINO_ASSERT (model->outputs ().size () == 1 , " Sample supports models with 1 output only" );
67
+ // OPENVINO_ASSERT(model->outputs().size() == 1, "Sample supports models with 1 output only");
50
68
51
69
// -------- Step 3. Set up input
52
70
53
71
// Read input image to a tensor and set it to an infer request
54
72
// without resize and layout conversions
55
- FormatReader::ReaderPtr reader (image_path.c_str ());
56
- if (reader.get () == nullptr ) {
57
- std::stringstream ss;
58
- ss << " Image " + image_path + " cannot be read!" ;
59
- throw std::logic_error (ss.str ());
60
- }
61
-
62
- ov::element::Type input_type = ov::element::u8;
63
- ov::Shape input_shape = {1 , reader->height (), reader->width (), 3 };
64
- std::shared_ptr<unsigned char > input_data = reader->getData ();
65
-
66
- // just wrap image data by ov::Tensor without allocating of new memory
67
- ov::Tensor input_tensor = ov::Tensor (input_type, input_shape, input_data.get ());
68
-
69
- const ov::Layout tensor_layout{" NHWC" };
70
-
71
- // -------- Step 4. Configure preprocessing --------
72
-
73
- ov::preprocess::PrePostProcessor ppp (model);
74
-
75
- // 1) Set input tensor information:
76
- // - input() provides information about a single model input
77
- // - reuse precision and shape from already available `input_tensor`
78
- // - layout of data is 'NHWC'
79
- ppp.input ().tensor ().set_shape (input_shape).set_element_type (input_type).set_layout (tensor_layout);
80
- // 2) Adding explicit preprocessing steps:
81
- // - convert layout to 'NCHW' (from 'NHWC' specified above at tensor layout)
82
- // - apply linear resize from tensor spatial dims to model spatial dims
83
- ppp.input ().preprocess ().resize (ov::preprocess::ResizeAlgorithm::RESIZE_LINEAR);
84
- // 4) Here we suppose model has 'NCHW' layout for input
85
- ppp.input ().model ().set_layout (" NCHW" );
86
- // 5) Set output tensor information:
87
- // - precision of tensor is supposed to be 'f32'
88
- ppp.output ().tensor ().set_element_type (ov::element::f32);
89
-
90
- // 6) Apply preprocessing modifying the original 'model'
91
- model = ppp.build ();
73
+ // FormatReader::ReaderPtr reader(image_path.c_str());
74
+ // if (reader.get() == nullptr) {
75
+ // std::stringstream ss;
76
+ // ss << "Image " + image_path + " cannot be read!";
77
+ // throw std::logic_error(ss.str());
78
+ // }
79
+
80
+ // ov::element::Type input_type = ov::element::u8;
81
+ // ov::Shape input_shape = {1, reader->height(), reader->width(), 3};
82
+ // std::shared_ptr<unsigned char> input_data = reader->getData();
83
+ //
84
+ // // just wrap image data by ov::Tensor without allocating of new memory
85
+ // ov::Tensor input_tensor = ov::Tensor(input_type, input_shape, input_data.get());
86
+ //
87
+ // const ov::Layout tensor_layout{"NHWC"};
88
+ //
89
+ // // -------- Step 4. Configure preprocessing --------
90
+ //
91
+ // ov::preprocess::PrePostProcessor ppp(model);
92
+ //
93
+ // // 1) Set input tensor information:
94
+ // // - input() provides information about a single model input
95
+ // // - reuse precision and shape from already available `input_tensor`
96
+ // // - layout of data is 'NHWC'
97
+ // ppp.input().tensor().set_shape(input_shape).set_element_type(input_type).set_layout(tensor_layout);
98
+ // // 2) Adding explicit preprocessing steps:
99
+ // // - convert layout to 'NCHW' (from 'NHWC' specified above at tensor layout)
100
+ // // - apply linear resize from tensor spatial dims to model spatial dims
101
+ // ppp.input().preprocess().resize(ov::preprocess::ResizeAlgorithm::RESIZE_LINEAR);
102
+ // // 4) Here we suppose model has 'NCHW' layout for input
103
+ // ppp.input().model().set_layout("NCHW");
104
+ // // 5) Set output tensor information:
105
+ // // - precision of tensor is supposed to be 'f32'
106
+ // ppp.output().tensor().set_element_type(ov::element::f32);
107
+ //
108
+ // // 6) Apply preprocessing modifying the original 'model'
109
+ // model = ppp.build();
92
110
93
111
// -------- Step 5. Loading a model to the device --------
94
112
ov::CompiledModel compiled_model = core.compile_model (model, device_name);
95
113
96
114
// -------- Step 6. Create an infer request --------
97
115
ov::InferRequest infer_request = compiled_model.create_infer_request ();
116
+
98
117
// -----------------------------------------------------------------------------------------------------
99
118
100
119
// -------- Step 7. Prepare input --------
101
- infer_request.set_input_tensor (input_tensor);
102
-
103
- // -------- Step 8. Do inference synchronously --------
104
- infer_request.infer ();
105
-
106
- // -------- Step 9. Process output
107
- const ov::Tensor& output_tensor = infer_request.get_output_tensor ();
108
-
109
- // Print classification results
110
- ClassificationResult classification_result (output_tensor, {image_path});
111
- classification_result.show ();
120
+ // infer_request.set_input_tensor(input_tensor);
121
+ //
122
+ // // -------- Step 8. Do inference synchronously --------
123
+ // infer_request.infer();
124
+ //
125
+ // // -------- Step 9. Process output
126
+ // const ov::Tensor& output_tensor = infer_request.get_output_tensor();
127
+ //
128
+ // // Print classification results
129
+ // ClassificationResult classification_result(output_tensor, {image_path});
130
+ // classification_result.show();
112
131
// -----------------------------------------------------------------------------------------------------
113
- } catch (const std::exception & ex) {
114
- std::cerr << ex.what () << std::endl;
115
- return EXIT_FAILURE;
132
+ // } catch (const std::exception& ex) {
133
+ // std::cerr << ex.what() << std::endl;
134
+ // return EXIT_FAILURE;
116
135
}
117
136
118
137
return EXIT_SUCCESS;
119
- }
138
+ }
0 commit comments