Skip to content

Commit 3f06d87

Browse files
Haiqi Panwgzintelilyachur
authored
Remove catch(...) code style (openvinotoolkit#14433)
* remove ... in /src/bindings/c and resolve format * remove ... in auto and resolve format * remove ... in auto batch and resolve format * remove ... in hetero and resolve format * IE exception * IE exception * const * const * resolve format and ov::Exception * ov::Exception * reset IE::Exception * remove ov::Exception * InferenceEngine->IE in auto_schedule.cpp * fix coredump while runing test case smoke_AutoMultiMock_NumStreamsAndDefaultPerfHintToHWTest * fix coredump issue of running LoadNetworkWithSecondaryConfigsMockTest * fix commpile fail in CI * resolve core dump of running exec_network_get_metrics.cpp and disable some test cases for KEEMBAY and MYRIAD in exec_network_get_metrics.cpp * remove test cases of MYRIAD and resolve core dump of running KEEMBAYin exec_network_get_metrics.cpp * resolve conflic of common.h * resolve conflic of common.h * resolve common.h complie fail --------- Co-authored-by: guozhong <guozhong.wang@intel.com> Co-authored-by: Ilya Churaev <ilya.churaev@intel.com>
1 parent 0a224a8 commit 3f06d87

12 files changed

+24
-16
lines changed

src/bindings/c/src/ie_c_api.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,7 @@ std::map<IE::ColorFormat, colorformat_e> colorformat_map = {{IE::ColorFormat::RA
196196
CATCH_IE_EXCEPTION(INFER_NOT_STARTED, InferNotStarted) \
197197
CATCH_IE_EXCEPTION(NETWORK_NOT_READ, NetworkNotRead) \
198198
CATCH_IE_EXCEPTION(INFER_CANCELLED, InferCancelled) \
199-
catch (...) { \
199+
catch (const std::exception&) { \
200200
return IEStatusCode::UNEXPECTED; \
201201
}
202202

src/plugins/auto/auto_executable_network.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ IE::Parameter AutoExecutableNetwork::GetMetric(const std::string& name) const {
9292
bThroughputEnabledInPlugin =
9393
_autoSContext->_core->GetConfig(deviceInfo.deviceName,
9494
CONFIG_KEY(PERFORMANCE_HINT)).as<std::string>() == CONFIG_VALUE(THROUGHPUT);
95-
} catch (...) {
95+
} catch (const IE::Exception&) {
9696
LOG_DEBUG_TAG("GetMetric:%s for %s", "PERF_HINT config not supported",
9797
deviceInfo.deviceName.c_str());
9898
}
@@ -117,7 +117,7 @@ IE::Parameter AutoExecutableNetwork::GetMetric(const std::string& name) const {
117117
METRIC_KEY(OPTIMAL_BATCH_SIZE), options).as<unsigned int>();
118118
LOG_DEBUG_TAG("BATCHING:%s:%ld", "optimal batch size",
119119
optimalBatchSize);
120-
} catch (...) {
120+
} catch (const IE::Exception&) {
121121
LOG_DEBUG_TAG("BATCHING:%s", "metric OPTIMAL_BATCH_SIZE not supported");
122122
}
123123
}

src/plugins/auto/auto_schedule.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -212,7 +212,7 @@ void AutoSchedule::init(const ScheduleContext::Ptr& sContext) {
212212
deviceName.c_str(),
213213
cfg.c_str(),
214214
contextPtr->executableNetwork->GetConfig(cfg).as<std::string>().c_str());
215-
} catch (...) {
215+
} catch (const IE::Exception&) {
216216
}
217217
}
218218
});
@@ -323,7 +323,7 @@ void AutoSchedule::TryToLoadNetWork(AutoLoadContext& context, const std::string&
323323
int maxNumThreads = 0;
324324
try {
325325
maxNumThreads = _autoSContext->_core->GetConfig(device, GPU_CONFIG_KEY(MAX_NUM_THREADS)).as<int>();
326-
} catch (...) {
326+
} catch (const IE::Exception&) {
327327
LOG_DEBUG_TAG("cannot get MAX_NUM_THREADS from GPU");
328328
}
329329
if (maxNumThreads == static_cast<int>(std::thread::hardware_concurrency())) {
@@ -537,7 +537,7 @@ IInferPtr AutoSchedule::CreateInferRequest() {
537537
try {
538538
perfmode = _passthroughExeNet->GetConfig(
539539
CONFIG_KEY(PERFORMANCE_HINT)).as<std::string>();
540-
} catch(...) {
540+
} catch (const IE::Exception&) {
541541
LOG_INFO("query perf hint from passthrough network failed");
542542
}
543543
if (_autoSContext->_batchingDisabled || perfmode != CONFIG_VALUE(THROUGHPUT)) {

src/plugins/auto/multi_schedule.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -308,7 +308,7 @@ IInferPtr MultiSchedule::CreateInferRequest() {
308308
try {
309309
perfmode = _passthroughExeNet->GetConfig(
310310
CONFIG_KEY(PERFORMANCE_HINT)).as<std::string>();
311-
} catch(...) {
311+
} catch (const IE::Exception&) {
312312
LOG_INFO("query perf hint from passthrough network failed");
313313
}
314314
if (_multiSContext->_batchingDisabled || perfmode != CONFIG_VALUE(THROUGHPUT)) {

src/plugins/auto/plugin.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -533,7 +533,7 @@ IExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadNetworkImpl(cons
533533
deviceName.c_str(),
534534
sThreadNums.c_str());
535535
}
536-
} catch (...) {
536+
} catch (const IE::Exception&) {
537537
LOG_DEBUG_TAG("deviceName:%s cannot get streamNums and threadNums from exec_net", deviceName.c_str());
538538
}
539539
std::unique_lock<std::mutex> lock{load_mutex};
@@ -592,7 +592,7 @@ IExecutableNetworkInternal::Ptr MultiDeviceInferencePlugin::LoadNetworkImpl(cons
592592
num_plugins_supporting_perf_counters +=
593593
n.second->GetConfig(PluginConfigParams::KEY_PERF_COUNT).as<std::string>() ==
594594
PluginConfigParams::YES;
595-
} catch (...) {
595+
} catch (const IE::Exception&) {
596596
}
597597
}
598598
// MULTI can enable the perf counters only if all devices support/enable that

src/plugins/auto/utils/config.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ struct PluginConfig {
148148
<< " for key: " << kvp.first;
149149
}
150150
_batchTimeout = kvp.second;
151-
} catch (...) {
151+
} catch (const std::exception&) {
152152
IE_THROW() << "Unsupported config value: " << kvp.second
153153
<< " for key: " << kvp.first;
154154
}

src/plugins/auto/utils/log.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ inline int parseInteger(const char* str) {
5252
std::string var(str ? str : "");
5353
try {
5454
return std::stoi(var);
55-
} catch (...) {
55+
} catch (const std::exception&) {
5656
return INT32_MAX;
5757
}
5858
}

src/plugins/auto_batch/auto_batch.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -902,7 +902,7 @@ InferenceEngine::IExecutableNetworkInternal::Ptr AutoBatchInferencePlugin::LoadN
902902
if (!batched_inputs.size() || !batched_outputs.size())
903903
IE_THROW(NotImplemented)
904904
<< "Auto-batching supports only networks with inputs/outputs featuring batched dim!";
905-
} catch (...) {
905+
} catch (const InferenceEngine::Exception&) {
906906
metaDevice.batchForDevice = 1;
907907
}
908908

@@ -969,7 +969,7 @@ InferenceEngine::IExecutableNetworkInternal::Ptr AutoBatchInferencePlugin::LoadN
969969
reshaped.reshape(shapes);
970970
executableNetworkWithBatch = ctx ? core->LoadNetwork(reshaped, ctx, deviceConfigNoAutoBatch)
971971
: core->LoadNetwork(reshaped, deviceName, deviceConfigNoAutoBatch);
972-
} catch (...) {
972+
} catch (const InferenceEngine::Exception&) {
973973
metaDevice.batchForDevice = 1;
974974
}
975975
}

src/plugins/hetero/async_infer_request.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ StatusCode HeteroAsyncInferRequest::Wait(int64_t millis_timeout) {
4848
auto waitStatus = StatusCode::OK;
4949
try {
5050
waitStatus = AsyncInferRequestThreadSafeDefault::Wait(millis_timeout);
51-
} catch (...) {
51+
} catch (const InferenceEngine::Exception&) {
5252
for (auto&& requestDesc : _heteroInferRequest->_inferRequests) {
5353
requestDesc._request->Wait(InferRequest::RESULT_READY);
5454
}

src/tests/unit/auto/auto_default_perf_hint_test.cpp

+2
Original file line numberDiff line numberDiff line change
@@ -295,6 +295,8 @@ class AutoDefaultPerfHintTest : public ::testing::TestWithParam<ConfigParams> {
295295
std::vector<std::string> configKeys = {"SUPPORTED_CONFIG_KEYS", "NUM_STREAMS"};
296296
ON_CALL(*core, GetMetric(_, StrEq(METRIC_KEY(SUPPORTED_CONFIG_KEYS)), _)).WillByDefault(Return(configKeys));
297297

298+
ON_CALL(*core, GetConfig(_, StrEq(GPU_CONFIG_KEY(MAX_NUM_THREADS)))).WillByDefault(Return(12));
299+
298300
ON_CALL(*plugin, ParseMetaDevices)
299301
.WillByDefault(
300302
[this](const std::string& priorityDevices, const std::map<std::string, std::string>& config) {

src/tests/unit/auto/auto_load_network_properties_test.cpp

+2
Original file line numberDiff line numberDiff line change
@@ -174,6 +174,8 @@ class LoadNetworkWithSecondaryConfigsMockTest : public ::testing::TestWithParam<
174174
std::vector<std::string> configKeys = {"SUPPORTED_CONFIG_KEYS", "NUM_STREAMS"};
175175
ON_CALL(*core, GetMetric(_, StrEq(METRIC_KEY(SUPPORTED_CONFIG_KEYS)), _)).WillByDefault(Return(configKeys));
176176

177+
ON_CALL(*core, GetConfig(_, StrEq(GPU_CONFIG_KEY(MAX_NUM_THREADS)))).WillByDefault(Return(12));
178+
177179
ON_CALL(*plugin, ParseMetaDevices)
178180
.WillByDefault(
179181
[this](const std::string& priorityDevices, const std::map<std::string, std::string>& config) {

src/tests/unit/auto/exec_network_get_metrics.cpp

+6-2
Original file line numberDiff line numberDiff line change
@@ -194,10 +194,13 @@ TEST_P(ExecNetworkGetMetric, OPTIMAL_NUMBER_OF_INFER_REQUESTS) {
194194
metaDevices.push_back({actualDeviceName, {{CONFIG_KEY(PERFORMANCE_HINT),
195195
InferenceEngine::PluginConfigParams::THROUGHPUT}}, actualCustomerNum, ""});
196196
// enable autoBatch
197-
IE_SET_METRIC(OPTIMAL_BATCH_SIZE, optimalBatchNum, 8);
197+
IE_SET_METRIC(OPTIMAL_BATCH_SIZE, gpuOptimalBatchNum, 8);
198+
IE_SET_METRIC(OPTIMAL_BATCH_SIZE, keembayOptimalBatchNum, 1);
198199
IE_SET_METRIC(RANGE_FOR_STREAMS, rangeOfStreams, std::make_tuple<unsigned int, unsigned int>(1, 3));
199200
ON_CALL(*core.get(), GetMetric(StrEq(CommonTestUtils::DEVICE_GPU), StrEq(METRIC_KEY(OPTIMAL_BATCH_SIZE)), _))
200-
.WillByDefault(RETURN_MOCK_VALUE(optimalBatchNum));
201+
.WillByDefault(RETURN_MOCK_VALUE(gpuOptimalBatchNum));
202+
ON_CALL(*core.get(), GetMetric(StrEq(CommonTestUtils::DEVICE_KEEMBAY), StrEq(METRIC_KEY(OPTIMAL_BATCH_SIZE)), _))
203+
.WillByDefault(RETURN_MOCK_VALUE(keembayOptimalBatchNum));
201204
ON_CALL(*core.get(), GetMetric(_, StrEq(METRIC_KEY(RANGE_FOR_STREAMS)), _))
202205
.WillByDefault(RETURN_MOCK_VALUE(rangeOfStreams));
203206
ON_CALL(*core.get(), GetConfig(_, StrEq(CONFIG_KEY(PERFORMANCE_HINT))))
@@ -212,6 +215,7 @@ TEST_P(ExecNetworkGetMetric, OPTIMAL_NUMBER_OF_INFER_REQUESTS) {
212215
} else {
213216
metaDevices.push_back({CommonTestUtils::DEVICE_CPU, {}, cpuCustomerNum, ""});
214217
metaDevices.push_back({actualDeviceName, {}, actualCustomerNum, ""});
218+
ON_CALL(*core, GetConfig(_, StrEq(GPU_CONFIG_KEY(MAX_NUM_THREADS)))).WillByDefault(Return(8));
215219
}
216220
ON_CALL(*plugin, SelectDevice(_, _, _)).WillByDefault(Return(metaDevices[1]));
217221
ON_CALL(*plugin, ParseMetaDevices(_, _)).WillByDefault(Return(metaDevices));

0 commit comments

Comments
 (0)