Skip to content

Commit 848a7c3

Browse files
authored
[CPU] Enable and address google-* clang-tidy remarks (openvinotoolkit#28537)
### Details: - Enable `google-*` checks in clang-tidy for CPU plugin implementation - Exclude `google-explicit-constructor` from `google-*` scope, because it is error prone, `google-default-arguments` - to be able to have default function arguments in virtual functions, `google-readability-casting` - potentially may be enabled later ### Tickets: - N/A
1 parent 5981aab commit 848a7c3

File tree

284 files changed

+6121
-3254
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

284 files changed

+6121
-3254
lines changed

src/common/snippets/src/utils/debug_caps_config.cpp

+3-2
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,11 @@ namespace snippets {
1111
void DebugCapsConfig::readProperties() {
1212
auto readEnv = [](const char* envVar) {
1313
const char* env = std::getenv(envVar);
14-
if (env && *env)
14+
if (env && *env) {
1515
return env;
16+
}
1617

17-
return (const char*)nullptr;
18+
return static_cast<const char*>(nullptr);
1819
};
1920

2021
const char* envVarValue = nullptr;

src/plugins/intel_cpu/src/.clang-tidy

+7-3
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
### Scopes to be enabled:
77
#
88
# cppcoreguidelines-*,
9-
# google-*,
109
# readability-*,
1110
# modernize-*,
1211
# bugprone-*,
@@ -26,7 +25,9 @@
2625
# -bugprone-fold-init-type
2726
# -bugprone-implicit-widening-of-multiplication-result
2827
# -cppcoreguidelines-narrowing-conversions
29-
# -google-readability-braces-around-statements
28+
# -google-default-arguments,
29+
# -google-explicit-constructor,
30+
# -google-readability-casting,
3031
# -readability-implicit-bool-conversion,
3132
# -readability-magic-numbers, cppcoreguidelines-avoid-magic-numbers
3233
# -readability-function-cognitive-complexity. Reasonable way to enforce splitting complex code into simple functions
@@ -35,6 +36,7 @@
3536
Checks: >
3637
-*,
3738
performance-*,
39+
google-*,
3840
modernize-pass-by-value,
3941
cppcoreguidelines-prefer-member-initializer,
4042
-bugprone-easily-swappable-parameters,
@@ -44,9 +46,11 @@ Checks: >
4446
-cppcoreguidelines-narrowing-conversions,
4547
-cppcoreguidelines-pro-bounds-pointer-arithmetic,
4648
-google-build-using-namespace,
49+
-google-default-arguments,
50+
-google-explicit-constructor,
51+
-google-readability-casting,
4752
-google-readability-todo,
4853
-readability-braces-around-statements,
49-
-google-readability-braces-around-statements,
5054
-modernize-use-trailing-return-type,
5155
-readability-identifier-length,
5256
-readability-implicit-bool-conversion,

src/plugins/intel_cpu/src/compiled_model.cpp

+26-20
Original file line numberDiff line numberDiff line change
@@ -57,8 +57,9 @@ CompiledModel::CompiledModel(const std::shared_ptr<ov::Model>& model,
5757
m_sub_memory_manager(std::move(sub_memory_manager)) {
5858
m_mutex = std::make_shared<std::mutex>();
5959
const auto& core = m_plugin->get_core();
60-
if (!core)
60+
if (!core) {
6161
OPENVINO_THROW("Unable to get API version. Core is unavailable");
62+
}
6263

6364
IStreamsExecutor::Config executor_config;
6465
if (m_cfg.exclusiveAsyncRequests) {
@@ -81,10 +82,12 @@ CompiledModel::CompiledModel(const std::shared_ptr<ov::Model>& model,
8182
m_callback_executor = m_task_executor;
8283
}
8384

84-
if (m_task_executor)
85+
if (m_task_executor) {
8586
set_task_executor(m_task_executor);
86-
if (m_callback_executor)
87+
}
88+
if (m_callback_executor) {
8789
set_callback_executor(m_callback_executor);
90+
}
8891

8992
int streams = std::max(1, executor_config.get_streams());
9093
std::vector<Task> tasks;
@@ -208,15 +211,17 @@ std::shared_ptr<ov::IAsyncInferRequest> CompiledModel::create_infer_request() co
208211
}
209212

210213
std::shared_ptr<const ov::Model> CompiledModel::get_runtime_model() const {
211-
if (m_graphs.empty())
214+
if (m_graphs.empty()) {
212215
OPENVINO_THROW("No graph was found");
216+
}
213217

214218
return get_graph()._graph.dump();
215219
}
216220

217221
ov::Any CompiledModel::get_property(const std::string& name) const {
218-
if (m_graphs.empty())
222+
if (m_graphs.empty()) {
219223
OPENVINO_THROW("No graph was found");
224+
}
220225

221226
if (name == ov::loaded_from_cache) {
222227
return m_loaded_from_cache;
@@ -275,30 +280,30 @@ ov::Any CompiledModel::get_property(const std::string& name) const {
275280
return decltype(ov::model_name)::value_type(modelName);
276281
} else if (name == ov::optimal_number_of_infer_requests) {
277282
const auto streams = config.streamExecutorConfig.get_streams();
278-
return decltype(ov::optimal_number_of_infer_requests)::value_type(
283+
return static_cast<decltype(ov::optimal_number_of_infer_requests)::value_type>(
279284
streams > 0 ? streams : 1); // ov::optimal_number_of_infer_requests has no negative values
280285
} else if (name == ov::num_streams) {
281286
const auto streams = config.streamExecutorConfig.get_streams();
282287
return decltype(ov::num_streams)::value_type(
283288
streams); // ov::num_streams has special negative values (AUTO = -1, NUMA = -2)
284289
} else if (name == ov::inference_num_threads) {
285290
const auto num_threads = config.streamExecutorConfig.get_threads();
286-
return decltype(ov::inference_num_threads)::value_type(num_threads);
291+
return static_cast<decltype(ov::inference_num_threads)::value_type>(num_threads);
287292
} else if (name == ov::enable_profiling.name()) {
288293
const bool perfCount = config.collectPerfCounters;
289-
return decltype(ov::enable_profiling)::value_type(perfCount);
294+
return static_cast<decltype(ov::enable_profiling)::value_type>(perfCount);
290295
} else if (name == ov::hint::inference_precision) {
291296
return decltype(ov::hint::inference_precision)::value_type(config.inferencePrecision);
292297
} else if (name == ov::hint::performance_mode) {
293-
return decltype(ov::hint::performance_mode)::value_type(config.hintPerfMode);
298+
return static_cast<decltype(ov::hint::performance_mode)::value_type>(config.hintPerfMode);
294299
} else if (name == ov::log::level) {
295-
return decltype(ov::log::level)::value_type(config.logLevel);
300+
return static_cast<decltype(ov::log::level)::value_type>(config.logLevel);
296301
} else if (name == ov::hint::enable_cpu_pinning.name()) {
297302
const bool use_pin = config.enableCpuPinning;
298-
return decltype(ov::hint::enable_cpu_pinning)::value_type(use_pin);
303+
return static_cast<decltype(ov::hint::enable_cpu_pinning)::value_type>(use_pin);
299304
} else if (name == ov::hint::enable_cpu_reservation.name()) {
300305
const bool use_reserve = config.enableCpuReservation;
301-
return decltype(ov::hint::enable_cpu_reservation)::value_type(use_reserve);
306+
return static_cast<decltype(ov::hint::enable_cpu_reservation)::value_type>(use_reserve);
302307
} else if (name == ov::hint::scheduling_core_type) {
303308
const auto stream_mode = config.schedulingCoreType;
304309
return stream_mode;
@@ -307,31 +312,32 @@ ov::Any CompiledModel::get_property(const std::string& name) const {
307312
return distribution_policy;
308313
} else if (name == ov::hint::enable_hyper_threading.name()) {
309314
const bool use_ht = config.enableHyperThreading;
310-
return decltype(ov::hint::enable_hyper_threading)::value_type(use_ht);
315+
return static_cast<decltype(ov::hint::enable_hyper_threading)::value_type>(use_ht);
311316
} else if (name == ov::hint::execution_mode) {
312317
return config.executionMode;
313318
} else if (name == ov::hint::num_requests) {
314-
return decltype(ov::hint::num_requests)::value_type(config.hintNumRequests);
319+
return static_cast<decltype(ov::hint::num_requests)::value_type>(config.hintNumRequests);
315320
} else if (name == ov::execution_devices) {
316321
return decltype(ov::execution_devices)::value_type{m_plugin->get_device_name()};
317322
} else if (name == ov::intel_cpu::denormals_optimization) {
318-
return decltype(ov::intel_cpu::denormals_optimization)::value_type(config.denormalsOptMode ==
319-
Config::DenormalsOptMode::DO_On);
323+
return static_cast<decltype(ov::intel_cpu::denormals_optimization)::value_type>(
324+
config.denormalsOptMode == Config::DenormalsOptMode::DO_On);
320325
} else if (name == ov::intel_cpu::sparse_weights_decompression_rate) {
321-
return decltype(ov::intel_cpu::sparse_weights_decompression_rate)::value_type(
326+
return static_cast<decltype(ov::intel_cpu::sparse_weights_decompression_rate)::value_type>(
322327
config.fcSparseWeiDecompressionRate);
323328
} else if (name == ov::hint::dynamic_quantization_group_size) {
324-
return decltype(ov::hint::dynamic_quantization_group_size)::value_type(config.fcDynamicQuantizationGroupSize);
329+
return static_cast<decltype(ov::hint::dynamic_quantization_group_size)::value_type>(
330+
config.fcDynamicQuantizationGroupSize);
325331
} else if (name == ov::hint::kv_cache_precision) {
326332
return decltype(ov::hint::kv_cache_precision)::value_type(config.kvCachePrecision);
327333
} else if (name == ov::key_cache_precision) {
328334
return decltype(ov::key_cache_precision)::value_type(config.keyCachePrecision);
329335
} else if (name == ov::value_cache_precision) {
330336
return decltype(ov::value_cache_precision)::value_type(config.valueCachePrecision);
331337
} else if (name == ov::key_cache_group_size) {
332-
return decltype(ov::key_cache_group_size)::value_type(config.keyCacheGroupSize);
338+
return static_cast<decltype(ov::key_cache_group_size)::value_type>(config.keyCacheGroupSize);
333339
} else if (name == ov::value_cache_group_size) {
334-
return decltype(ov::value_cache_group_size)::value_type(config.valueCacheGroupSize);
340+
return static_cast<decltype(ov::value_cache_group_size)::value_type>(config.valueCacheGroupSize);
335341
}
336342
OPENVINO_THROW("Unsupported property: ", name);
337343
}

src/plugins/intel_cpu/src/config.cpp

+19-12
Original file line numberDiff line numberDiff line change
@@ -94,8 +94,9 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) {
9494
try {
9595
ov::Any value = val.as<std::string>();
9696
int val_i = value.as<int>();
97-
if (val_i < 0)
97+
if (val_i < 0) {
9898
OPENVINO_THROW("invalid value.");
99+
}
99100
hintNumRequests = static_cast<uint32_t>(val_i);
100101
} catch (const ov::Exception&) {
101102
OPENVINO_THROW("Wrong value ",
@@ -278,14 +279,15 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) {
278279
} else if (key == ov::intel_cpu::snippets_mode.name()) {
279280
try {
280281
auto const mode = val.as<ov::intel_cpu::SnippetsMode>();
281-
if (mode == ov::intel_cpu::SnippetsMode::ENABLE)
282+
if (mode == ov::intel_cpu::SnippetsMode::ENABLE) {
282283
snippetsMode = SnippetsMode::Enable;
283-
else if (mode == ov::intel_cpu::SnippetsMode::IGNORE_CALLBACK)
284+
} else if (mode == ov::intel_cpu::SnippetsMode::IGNORE_CALLBACK) {
284285
snippetsMode = SnippetsMode::IgnoreCallback;
285-
else if (mode == ov::intel_cpu::SnippetsMode::DISABLE)
286+
} else if (mode == ov::intel_cpu::SnippetsMode::DISABLE) {
286287
snippetsMode = SnippetsMode::Disable;
287-
else
288+
} else {
288289
OPENVINO_THROW("invalid value");
290+
}
289291
} catch (ov::Exception&) {
290292
OPENVINO_THROW("Wrong value ",
291293
val.as<std::string>(),
@@ -396,8 +398,9 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) {
396398
inferencePrecision = ov::element::f16;
397399
}
398400
#endif
399-
if (mayiuse(avx512_core_bf16))
401+
if (mayiuse(avx512_core_bf16)) {
400402
inferencePrecision = ov::element::bf16;
403+
}
401404
} else {
402405
inferencePrecision = ov::element::undefined;
403406
}
@@ -431,8 +434,9 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) {
431434
}
432435
}
433436

434-
if (!prop.empty())
437+
if (!prop.empty()) {
435438
_config.clear();
439+
}
436440

437441
if (exclusiveAsyncRequests) { // Exclusive request feature disables the streams
438442
streams = 1;
@@ -453,17 +457,20 @@ void Config::readProperties(const ov::AnyMap& prop, const ModelType modelType) {
453457
}
454458

455459
void Config::updateProperties() {
456-
if (!_config.empty())
460+
if (!_config.empty()) {
457461
return;
462+
}
458463

459-
if (collectPerfCounters == true)
464+
if (collectPerfCounters == true) {
460465
_config.insert({ov::enable_profiling.name(), "YES"});
461-
else
466+
} else {
462467
_config.insert({ov::enable_profiling.name(), "NO"});
463-
if (exclusiveAsyncRequests == true)
468+
}
469+
if (exclusiveAsyncRequests == true) {
464470
_config.insert({ov::internal::exclusive_async_requests.name(), "YES"});
465-
else
471+
} else {
466472
_config.insert({ov::internal::exclusive_async_requests.name(), "NO"});
473+
}
467474

468475
_config.insert({ov::device::id.name(), device_id});
469476

src/plugins/intel_cpu/src/cpu_memory.cpp

+16-12
Original file line numberDiff line numberDiff line change
@@ -135,8 +135,9 @@ void Memory::load(const IMemory& src, bool ftz) const {
135135

136136
void Memory::nullify() {
137137
void* dataPtr = getData();
138-
if (dataPtr != nullptr)
138+
if (dataPtr != nullptr) {
139139
memset(dataPtr, 0, getDesc().getCurrentMemSize());
140+
}
140141
}
141142

142143
void Memory::redefineDesc(MemoryDescPtr desc) {
@@ -194,8 +195,9 @@ dnnl::memory Memory::DnnlMemPrimHandle::getPrim() const {
194195

195196
void* Memory::getData() const {
196197
void* data = getDataNoThrow();
197-
if (data == nullptr && m_pMemDesc->getShape().isStatic() && m_pMemDesc->getShape().getElementsCount() != 0)
198+
if (data == nullptr && m_pMemDesc->getShape().isStatic() && m_pMemDesc->getShape().getElementsCount() != 0) {
198199
OPENVINO_THROW("Memory has not been allocated");
200+
}
199201
return data;
200202
}
201203

@@ -492,8 +494,9 @@ dnnl::memory StaticMemory::getPrimitive() const {
492494

493495
void StaticMemory::nullify() {
494496
void* dataPtr = getData();
495-
if (dataPtr != nullptr)
497+
if (dataPtr != nullptr) {
496498
memset(dataPtr, 0, getSize());
499+
}
497500
}
498501

499502
StaticMemory::StaticMemoryBlock::StaticMemoryBlock(size_t size) : m_size(size) {
@@ -539,13 +542,14 @@ void StaticMemory::StaticMemoryBlock::unregisterMemory(Memory* memPtr) {
539542
# if !defined(__NR_mbind) && defined(__x86_64__)
540543
# define __NR_mbind 237
541544
# endif
542-
static long mbind(void* start,
543-
unsigned long len,
544-
int mode,
545-
const unsigned long* nmask,
546-
unsigned long maxnode,
547-
unsigned flags) {
548-
return syscall(__NR_mbind, (long)start, len, mode, (long)nmask, maxnode, flags);
545+
static int64_t mbind(void* start, uint64_t len, int mode, const uint64_t* nmask, uint64_t maxnode, unsigned flags) {
546+
return syscall(__NR_mbind,
547+
reinterpret_cast<uint64_t>(start),
548+
len,
549+
mode,
550+
reinterpret_cast<uint64_t>(nmask),
551+
maxnode,
552+
flags);
549553
}
550554
#endif
551555

@@ -555,8 +559,8 @@ bool mbind_move(void* data, size_t size, int targetNode) {
555559
auto pagesize = getpagesize();
556560
auto page_count = (size + pagesize - 1) / pagesize;
557561
char* pages = reinterpret_cast<char*>( // NOLINT(performance-no-int-to-ptr)
558-
(((uintptr_t)data) & ~((uintptr_t)(pagesize - 1))));
559-
unsigned long mask = 0;
562+
((reinterpret_cast<uintptr_t>(data)) & ~(static_cast<uintptr_t>(pagesize - 1))));
563+
uint64_t mask = 0;
560564
unsigned flags = 0;
561565
if (realNode < 0) {
562566
// restore default policy

src/plugins/intel_cpu/src/cpu_tensor.cpp

+2-1
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,9 @@ void Tensor::set_shape(ov::Shape new_shape) {
3333
vec2str(shape.getStaticDims()),
3434
" -> ",
3535
new_shape.to_string());
36-
if (shape.getStaticDims() == new_shape)
36+
if (shape.getStaticDims() == new_shape) {
3737
return;
38+
}
3839
}
3940

4041
auto desc = m_memptr->getDescPtr();

src/plugins/intel_cpu/src/dnnl_extension_utils.cpp

+6-3
Original file line numberDiff line numberDiff line change
@@ -186,8 +186,9 @@ size_t DnnlExtensionUtils::getMemSizeForDnnlDesc(const dnnl::memory::desc& desc)
186186
"Unexpected non zero offset for a dnnl blocked memory desc");
187187

188188
size_t size = desc.get_size();
189-
if (size == DNNL_RUNTIME_SIZE_VAL)
189+
if (size == DNNL_RUNTIME_SIZE_VAL) {
190190
return MemoryDesc::UNDEFINED_SIZE;
191+
}
191192

192193
return size;
193194
}
@@ -207,17 +208,19 @@ DnnlMemoryDescPtr DnnlExtensionUtils::query_md(const const_dnnl_primitive_desc_t
207208
auto query = dnnl::convert_to_c(what);
208209
const auto* cdesc = dnnl_primitive_desc_query_md(pd, query, idx);
209210

210-
if (!cdesc)
211+
if (!cdesc) {
211212
OPENVINO_THROW("query_md failed for query=", query, " idx=", idx, ".");
213+
}
212214

213215
return DnnlExtensionUtils::makeDescriptor(cdesc);
214216
}
215217

216218
std::string DnnlExtensionUtils::query_impl_info_str(const const_dnnl_primitive_desc_t& pd) {
217219
const char* res;
218220
dnnl_status_t status = dnnl_primitive_desc_query(pd, dnnl_query_impl_info_str, 0, &res);
219-
if (status != dnnl_success)
221+
if (status != dnnl_success) {
220222
OPENVINO_THROW("query_impl_info_str failed.");
223+
}
221224
return std::string(res);
222225
}
223226

0 commit comments

Comments
 (0)