Skip to content

Commit 0274d16

Browse files
cyyeverpytorchmergebot
authored andcommitted
Fix clang-tidy warnings in jit code (pytorch#138974)
Fixes #ISSUE_NUMBER Pull Request resolved: pytorch#138974 Approved by: https://github.com/ezyang
1 parent 48b55ca commit 0274d16

19 files changed

+43
-47
lines changed

torch/csrc/jit/backends/backend_exception.h

+3-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
#pragma once
22
#include <c10/util/Exception.h>
33

4+
#include <utility>
5+
46
namespace c10 {
57
class TORCH_API BackendRuntimeException : public c10::Error {
68
public:
@@ -9,7 +11,7 @@ class TORCH_API BackendRuntimeException : public c10::Error {
911
SourceLocation loc,
1012
std::string msg,
1113
int64_t debug_handle)
12-
: c10::Error(loc, msg) {
14+
: c10::Error(loc, std::move(msg)) {
1315
debug_handles.push_back(debug_handle);
1416
}
1517
// If rethrowing, can push another debug_handle

torch/csrc/jit/codegen/onednn/LlgaTensorImpl.h

+5-4
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55

66
#include <oneapi/dnnl/dnnl_graph.hpp>
77
#include <torch/csrc/jit/ir/ir.h>
8+
#include <utility>
89

910
namespace torch::jit::fuser::onednn {
1011

@@ -42,8 +43,8 @@ struct LlgaTensorDesc {
4243
desc::data_type dtype,
4344
desc::property_type property_type)
4445
: tid_(tid),
45-
sizes_(sizes),
46-
strides_(strides),
46+
sizes_(std::move(sizes)),
47+
strides_(std::move(strides)),
4748
dtype_(dtype),
4849
property_type_(property_type),
4950
layout_type_(desc::layout_type::strided),
@@ -221,7 +222,7 @@ struct LlgaTensorDesc {
221222

222223
private:
223224
bool is_dimensionality_unknown() const {
224-
return sizes_.size() == 0;
225+
return sizes_.empty();
225226
}
226227

227228
size_t tid_;
@@ -236,7 +237,7 @@ struct LlgaTensorDesc {
236237
// compute_inplace would be true, and input_tensor_index would be the index of
237238
// the corresponding input tensor in inputSpecs_ of the LlgaKernel object.
238239
bool compute_inplace_ = false;
239-
size_t input_tensor_index_;
240+
size_t input_tensor_index_{};
240241
};
241242

242243
// Initially, oneDNN Graph also used to have blocked layout for tensors between

torch/csrc/jit/codegen/onednn/kernel.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ std::tuple<RunArgs, RunArgs> LlgaKernel::prepareRunArgs(
126126
auto numInputs = runArgsIdx_.size();
127127
for (const auto i : c10::irange(numInputs)) {
128128
auto spec = inputSpecs_[i];
129-
auto input = inputs[runArgsIdx_[i]];
129+
const auto& input = inputs[runArgsIdx_[i]];
130130
runInputs.push_back(
131131
{spec.logical_tensor(), Engine::getEngine(), input.data_ptr()});
132132
}

torch/csrc/jit/mobile/import.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -339,8 +339,7 @@ void BytecodeDeserializer::parseMethods(
339339
auto element = std::move(vals[i]);
340340
auto m_tuple = std::move(element.toTupleRef()).elements();
341341
const std::string& function_name = m_tuple[0].toStringRef();
342-
auto codeTableElements =
343-
std::move(std::move(m_tuple[1]).toTupleRef()).elements();
342+
auto codeTableElements = std::move(m_tuple[1].toTupleRef()).elements();
344343
IValue* schemaTable = // older files do not store function schema
345344
(bytecode_version_ > 0x4L ||
346345
(bytecode_version_ == 0x4L && m_tuple.size() >= 3))

torch/csrc/jit/mobile/nnc/context.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,7 @@ c10::IValue Function::serialize() const {
196196
}
197197

198198
void Function::init_execution_state() const {
199-
if (execution_state_.get() != nullptr) {
199+
if (execution_state_ != nullptr) {
200200
return;
201201
}
202202

torch/csrc/jit/passes/frozen_ops_to_mkldnn.cpp

+9-9
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ void merge_sets(
8585
}
8686

8787
// no uses of tensors in container types
88-
void assertNonTensorTypeDoesNotContainTensors(TypePtr type) {
88+
void assertNonTensorTypeDoesNotContainTensors(const TypePtr& type) {
8989
if (type->cast<TensorType>()) {
9090
return;
9191
}
@@ -94,7 +94,7 @@ void assertNonTensorTypeDoesNotContainTensors(TypePtr type) {
9494
}
9595
}
9696

97-
void InplaceMKLDNNSubgraph(std::shared_ptr<Graph> graph) {
97+
void InplaceMKLDNNSubgraph(const std::shared_ptr<Graph>& graph) {
9898
// This function first calculates aliasing sets,
9999
// then calculates the last node each aliasing set is alive for.
100100
// Then we go through each node, if it's a node which has an equivalent
@@ -234,7 +234,7 @@ void InplaceMKLDNNSubgraph(std::shared_ptr<Graph> graph) {
234234
// innermost dimension is padded with 0s. The precondition, `aten_op(0) == 0`
235235
// allows us to avoid any special casing of padded elements.
236236
Operation createUnaryOp(
237-
std::function<void(at::Tensor output, at::Tensor input)> aten_op,
237+
const std::function<void(at::Tensor output, at::Tensor input)>& aten_op,
238238
bool inplace = false) {
239239
return [aten_op, inplace](Stack& stack) {
240240
auto a = pop(stack).toTensor();
@@ -395,7 +395,7 @@ static std::function<void(at::Tensor output, at::Tensor input)> hardtanh_helper(
395395
const Node* n) {
396396
auto min_val = n->f(attr::min_val);
397397
auto max_val = n->f(attr::max_val);
398-
return [min_val, max_val](at::Tensor output, at::Tensor input) {
398+
return [min_val, max_val](at::Tensor output, const at::Tensor& input) {
399399
at::cpu::hardtanh_out(output, input, min_val, max_val);
400400
};
401401
}
@@ -404,7 +404,7 @@ static std::function<void(at::Tensor output, at::Tensor input)> clamp_helper(
404404
const Node* n) {
405405
auto min_val = n->f(attr::min_val);
406406
auto max_val = n->f(attr::max_val);
407-
return [min_val, max_val](at::Tensor output, at::Tensor input) {
407+
return [min_val, max_val](at::Tensor output, const at::Tensor& input) {
408408
at::cpu::clamp_out(output, input, min_val, max_val);
409409
};
410410
}
@@ -415,15 +415,15 @@ const RegisterOperators MKLDNNHardSwishOpReg({
415415
torch::jit::Operator(
416416
"prim::MKLDNNHardSwish_(Tensor(a!) self) -> Tensor(a!)",
417417
createUnaryOp(
418-
[](at::Tensor output, at::Tensor input) {
418+
[](at::Tensor output, const at::Tensor& input) {
419419
at::cpu::hardswish_out(output, input);
420420
},
421421
true),
422422
AliasAnalysisKind::FROM_SCHEMA),
423423
torch::jit::Operator(
424424
"prim::MKLDNNHardSigmoid_(Tensor(a!) self) -> Tensor(a!)",
425425
createUnaryOp(
426-
[](at::Tensor output, at::Tensor input) {
426+
[](at::Tensor output, const at::Tensor& input) {
427427
at::cpu::hardsigmoid_out(output, input);
428428
},
429429
true),
@@ -443,15 +443,15 @@ const RegisterOperators MKLDNNHardSwishOpReg({
443443
torch::jit::Operator(
444444
"prim::MKLDNNHardSwish(Tensor a) -> Tensor",
445445
createUnaryOp(
446-
[](at::Tensor output, at::Tensor input) {
446+
[](at::Tensor output, const at::Tensor& input) {
447447
at::cpu::hardswish_out(output, input);
448448
},
449449
false),
450450
AliasAnalysisKind::FROM_SCHEMA),
451451
torch::jit::Operator(
452452
"prim::MKLDNNHardSigmoid(Tensor a) -> Tensor",
453453
createUnaryOp(
454-
[](at::Tensor output, at::Tensor input) {
454+
[](at::Tensor output, const at::Tensor& input) {
455455
at::cpu::hardsigmoid_out(output, input);
456456
},
457457
false),

torch/csrc/jit/passes/onnx/naming.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ namespace torch::jit::onnx {
77

88
namespace ONNXScopeName {
99

10-
using NameFunc = std::string (*)(torch::jit::ScopePtr scope);
10+
using NameFunc = std::string (*)(const torch::jit::ScopePtr& scope);
1111

1212
const std::string name_separator = "::";
1313

@@ -48,7 +48,7 @@ std::string createFullScopeName(
4848
return std::string(class_name).append(name_separator).append(variable_name);
4949
}
5050

51-
std::string variableName(torch::jit::ScopePtr scope) {
51+
std::string variableName(const torch::jit::ScopePtr& scope) {
5252
return parseNameFromScope(scope).second;
5353
}
5454

@@ -58,7 +58,7 @@ std::string variableNameFromRoot(
5858
return nameFromRoot(scope, layer_separator, &variableName);
5959
}
6060

61-
std::string className(torch::jit::ScopePtr scope) {
61+
std::string className(const torch::jit::ScopePtr& scope) {
6262
return parseNameFromScope(scope).first;
6363
}
6464

torch/csrc/jit/passes/onnx/naming.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -9,11 +9,11 @@ namespace ONNXScopeName {
99
std::string createFullScopeName(
1010
const std::string& class_name,
1111
const std::string& variable_name);
12-
std::string variableName(torch::jit::ScopePtr scope);
12+
std::string variableName(const torch::jit::ScopePtr& scope);
1313
std::string variableNameFromRoot(
1414
const torch::jit::ScopePtr& scope,
1515
const std::string& layer_separator);
16-
std::string className(torch::jit::ScopePtr scope);
16+
std::string className(const torch::jit::ScopePtr& scope);
1717
std::string classNameFromRoot(
1818
const torch::jit::ScopePtr& scope,
1919
const std::string& layer_separator);

torch/csrc/jit/passes/onnx/remove_inplace_ops_for_onnx.cpp

-1
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
#include <torch/csrc/jit/frontend/error_report.h>
77
#include <torch/csrc/jit/jit_log.h>
88
#include <torch/csrc/jit/passes/dead_code_elimination.h>
9-
#include <torch/csrc/jit/passes/onnx/helper.h>
109
#include <torch/csrc/jit/passes/onnx/pattern_conversion/pattern_encapsulation.h>
1110

1211
#include <c10/util/irange.h>

torch/csrc/jit/passes/quantization/quantization_patterns.h

+3-1
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,9 @@ std::string getExtraArgList(std::vector<std::string> extra_args) {
2525
extra_args.begin(),
2626
extra_args.end(),
2727
std::string(),
28-
[](std::string acc, const std::string& arg) { return acc + ", " + arg; });
28+
[](const std::string& acc, const std::string& arg) {
29+
return acc + ", " + arg;
30+
});
2931
}
3032

3133
// Get the pattern we want to replace the match with

torch/csrc/jit/python/init.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1732,7 +1732,7 @@ void initJITBindings(PyObject* module) {
17321732
bool allow_numbers_as_tensors = opAllowsNumbersAsTensors(symbol);
17331733
ToIValueAllowNumbersAsTensors g(allow_numbers_as_tensors);
17341734
const auto overloads = getAllSortedOperatorsFor(symbol);
1735-
auto opWithStack = getOpWithStack(overloads, std::move(args), kwargs);
1735+
auto opWithStack = getOpWithStack(overloads, args, kwargs);
17361736
std::shared_ptr<Operator> overload = std::get<0>(opWithStack);
17371737
auto result = overload->schema().overload_name();
17381738
if (result.empty()) {

torch/csrc/jit/python/script_init.cpp

-1
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,6 @@
4848
#include <torch/csrc/jit/runtime/instruction.h>
4949
#include <torch/csrc/jit/runtime/interpreter.h>
5050
#include <torch/csrc/jit/runtime/logging.h>
51-
#include <torch/csrc/jit/serialization/export_bytecode.h>
5251
#include <torch/csrc/jit/serialization/import_source.h>
5352
#include <torch/csrc/jit/serialization/pickle.h>
5453
#include <torch/csrc/jit/serialization/python_print.h>

torch/csrc/jit/runtime/decomposition_registry.cpp

+1-4
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,7 @@
1010
#include <c10/util/Exception.h>
1111
#include <torch/csrc/autograd/jit_decomp_interface.h>
1212
#include <torch/csrc/jit/ir/ir.h>
13-
#include <torch/csrc/jit/passes/constant_propagation.h>
1413
#include <torch/csrc/jit/passes/inliner.h>
15-
#include <torch/csrc/jit/passes/peephole.h>
1614
#include <torch/csrc/jit/runtime/graph_executor.h>
1715
#include <memory>
1816
#include <unordered_map>
@@ -79,8 +77,7 @@ static void DecomposeOp(Node* n) {
7977
return;
8078
}
8179
WithInsertPoint guard(n);
82-
auto outputs =
83-
insertGraph(*n->owningGraph(), *decomposition->get(), n->inputs());
80+
auto outputs = insertGraph(*n->owningGraph(), **decomposition, n->inputs());
8481
TORCH_INTERNAL_ASSERT(outputs.size() == n->outputs().size());
8582
for (size_t i : c10::irange(outputs.size())) {
8683
n->outputs().at(i)->replaceAllUsesWith(outputs[i]);

torch/csrc/jit/runtime/interpreter/code_impl.h

+6-1
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
#include <memory>
44
#include <unordered_map>
5+
#include <utility>
56
#include <vector>
67

78
#include <c10/util/irange.h>
@@ -945,7 +946,11 @@ struct MobileCodeImpl : CodeImpl {
945946
bool support_default_args_before_out,
946947
bool emit_promoted_ops,
947948
size_t remaining_bailout_depth)
948-
: CodeImpl(graph, function_name, remaining_bailout_depth, false),
949+
: CodeImpl(
950+
graph,
951+
std::move(function_name),
952+
remaining_bailout_depth,
953+
false),
949954
emit_default_input_instructions_(emit_default_input_instructions),
950955
support_default_args_before_out_(support_default_args_before_out),
951956
emit_promoted_ops_(emit_promoted_ops) {

torch/csrc/jit/runtime/interpreter/preprocess_graph.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -209,6 +209,6 @@ PreprocessGraph::PreprocessGraph(Graph& g) : graph(g.copy()) {
209209
dropUnused(graph->block());
210210
// fill in move_flags by scanning blocks;
211211
insertLastUses(*graph);
212-
can_emit_inline = std::move(CanEmitInline(*graph.get()).can_emit_inline_);
212+
can_emit_inline = std::move(CanEmitInline(*graph).can_emit_inline_);
213213
}
214214
} // namespace torch::jit::interpreter

torch/csrc/jit/runtime/script_profile.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,8 @@ class TORCH_API SourceStats : public CustomClassHolder {
4646
public:
4747
using LineMap = c10::Dict<int64_t, c10::intrusive_ptr<InstructionStats>>;
4848

49-
SourceStats(SourceRef source, LineMap lineMap)
50-
: source_(std::move(source)), lineMap_(std::move(lineMap)) {}
49+
SourceStats(SourceRef source, const LineMap& lineMap)
50+
: source_(std::move(source)), lineMap_(lineMap) {}
5151

5252
const SourceRef& getSourceRef() const {
5353
return source_;

torch/csrc/jit/tensorexpr/ir.h

+2-5
Original file line numberDiff line numberDiff line change
@@ -859,12 +859,9 @@ class TORCH_API Intrinsics : public ExprNode<Intrinsics> {
859859
}
860860
}
861861

862-
Intrinsics(
863-
IntrinsicsOp op_type,
864-
Dtype dtype,
865-
const std::vector<ExprPtr>& params)
862+
Intrinsics(IntrinsicsOp op_type, Dtype dtype, std::vector<ExprPtr> params)
866863
: ExprNodeBase(IntrinsicsDtype(op_type, dtype)),
867-
params_(params),
864+
params_(std::move(params)),
868865
op_type_(op_type) {
869866
if (OpArgCount(op_type) != nparams()) {
870867
throw malformed_input("bad arg count in Intrinsics");

torch/csrc/jit/tensorexpr/loopnest.cpp

-5
Original file line numberDiff line numberDiff line change
@@ -25,11 +25,6 @@
2525
#include <torch/csrc/jit/tensorexpr/ir_verifier.h>
2626
#include <torch/csrc/jit/tensorexpr/tensor.h>
2727

28-
#include <stdexcept>
29-
#include <unordered_map>
30-
#include <unordered_set>
31-
#include <vector>
32-
3328
namespace torch::jit::tensorexpr {
3429

3530
LoopNest::LoopNest(const LoopNest& other)

torch/csrc/jit/testing/hooks_for_testing.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -7,14 +7,14 @@ namespace torch::jit {
77
static ModuleHook emit_module_callback;
88
void didFinishEmitModule(Module module) {
99
if (emit_module_callback) {
10-
emit_module_callback(module);
10+
emit_module_callback(std::move(module));
1111
}
1212
}
1313

1414
static FunctionHook emit_function_callback;
1515
void didFinishEmitFunction(StrongFunctionPtr fn) {
1616
if (emit_function_callback) {
17-
emit_function_callback(fn);
17+
emit_function_callback(std::move(fn));
1818
}
1919
}
2020

0 commit comments

Comments
 (0)