Skip to content

Commit 7bbdf87

Browse files
cyyeverpytorchmergebot
authored andcommittedSep 19, 2024
[22/N] Fix clang-tidy warnings in jit (pytorch#134829)
Follows pytorch#134537 Pull Request resolved: pytorch#134829 Approved by: https://github.com/ezyang
1 parent b71802f commit 7bbdf87

26 files changed

+34
-81
lines changed
 

‎torch/csrc/jit/api/module.cpp

-1
Original file line numberDiff line numberDiff line change
@@ -564,7 +564,6 @@ std::string Module::dump_to_str(
564564
std::stringstream parameters_ss;
565565
std::stringstream attributes_ss;
566566
std::stringstream methods_ss;
567-
std::stringstream submodules_ss;
568567

569568
for (const NameTensor& p : named_parameters(/*recurse=*/false)) {
570569
parameters_ss << p.name << " = ";

‎torch/csrc/jit/backends/backend_detail.cpp

+3-7
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,7 @@
1111
#include <stack>
1212
#include <unordered_map>
1313

14-
namespace torch {
15-
namespace jit {
16-
namespace detail {
14+
namespace torch::jit::detail {
1715
namespace {
1816

1917
/*
@@ -361,7 +359,7 @@ Module codegen_backend_module(
361359

362360
wrapper_method_te.v("def_inputs", def_inputs);
363361
wrapper_method_te.v("fwd_inputs", fwd_inputs);
364-
wrapper_methods.push_back(wrapper_method_ct.format(wrapper_method_te));
362+
wrapper_methods.emplace_back(wrapper_method_ct.format(wrapper_method_te));
365363

366364
// If the output type is a single element tuple then add an extra comma
367365
// to ensure the final output maintains this type.
@@ -408,6 +406,4 @@ Module codegen_backend_module(
408406

409407
return wrapper;
410408
}
411-
} // namespace detail
412-
} // namespace jit
413-
} // namespace torch
409+
} // namespace torch::jit::detail

‎torch/csrc/jit/backends/nnapi/nnapi_backend_lib.cpp

+3-5
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,7 @@
66
#include <torch/csrc/jit/mobile/import.h>
77
#include <torch/csrc/jit/mobile/module.h>
88

9-
namespace torch {
10-
namespace jit {
9+
namespace torch::jit {
1110

1211
// Implementation of Android NNAPI Backend delegate
1312

@@ -107,7 +106,7 @@ class NnapiBackend : public PyTorchBackendInterface {
107106

108107
// Runs once per model initialization
109108
// Cannot be moved to compile(), because init() requires actual inputs
110-
void init(c10::IValue handle, c10::List<at::Tensor> inputs) {
109+
void init(const c10::IValue& handle, const c10::List<at::Tensor>& inputs) {
111110
TORCH_CHECK(comp_ == nullptr);
112111
auto dict = handle.toGenericDict();
113112

@@ -134,5 +133,4 @@ constexpr auto backend_name = "nnapi";
134133
static auto cls = torch::jit::backend<NnapiBackend>(backend_name);
135134
} // namespace
136135

137-
} // namespace jit
138-
} // namespace torch
136+
} // namespace torch::jit

‎torch/csrc/jit/codegen/fuser/compiler.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -201,7 +201,7 @@ std::shared_ptr<FusedKernel> compileKernel(
201201
const KernelSpec& spec,
202202
const ArgSpec& arg_spec,
203203
const std::vector<int64_t>& map_size,
204-
const at::Device device) {
204+
const at::Device& device) {
205205
const std::vector<TensorDesc>& input_desc = arg_spec.descs();
206206

207207
auto graph = spec.graph()->copy();

‎torch/csrc/jit/codegen/fuser/compiler.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ TORCH_API std::shared_ptr<FusedKernel> compileKernel(
2525
const KernelSpec& spec,
2626
const ArgSpec& arg_spec,
2727
const std::vector<int64_t>& map_size,
28-
const at::Device device);
28+
const at::Device& device);
2929

3030
TORCH_API size_t nCompiledKernels();
3131

‎torch/csrc/jit/codegen/fuser/cpu/fused_kernel.cpp

+2-8
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,7 @@
1111
#include <iostream>
1212
#include <string>
1313

14-
namespace torch {
15-
namespace jit {
16-
namespace fuser {
17-
namespace cpu {
14+
namespace torch::jit::fuser::cpu {
1815

1916
#ifdef _MSC_VER
2017
static const std::string getTempPath() {
@@ -357,7 +354,4 @@ static std::shared_ptr<FusedKernel> createFusionKernel(
357354
}
358355

359356
RegisterFusionBackend reg(DeviceType::CPU, createFusionKernel);
360-
} // namespace cpu
361-
} // namespace fuser
362-
} // namespace jit
363-
} // namespace torch
357+
} // namespace torch::jit::fuser::cpu

‎torch/csrc/jit/mobile/compatibility/model_compatibility.cpp

-5
Original file line numberDiff line numberDiff line change
@@ -284,10 +284,6 @@ std::unordered_set<std::string> _get_mobile_model_contained_types(
284284
std::unordered_set<std::string> _get_mobile_model_contained_types(
285285
const std::vector<IValue>& bytecode_ivalues) {
286286
std::unordered_set<std::string> contained_types;
287-
// To avoid parsing same type twice, declare $parsed_type_names_records and
288-
// use type name (string, ex: "Dict[int, Tuple[Tensor, Tensor, Tensor]]") as
289-
// the hash to record which types are parsed.
290-
std::unordered_set<std::string> parsed_type_names_records;
291287
for (const auto i : c10::irange(1, bytecode_ivalues.size())) {
292288
const auto& method_tuple = bytecode_ivalues.at(i).toTupleRef().elements();
293289
auto type_table_tuple =
@@ -299,7 +295,6 @@ std::unordered_set<std::string> _get_mobile_model_contained_types(
299295
// for example: "Dict[int, Tuple[Tensor, Tensor, Tensor]]"
300296
std::vector<std::string> type_name_list;
301297
for (const auto& type_definition : type_table) {
302-
std::unordered_set<std::string> type_tokens;
303298
std::string type_name = type_definition.toStringRef();
304299
type_name_list.emplace_back(type_name);
305300
}

‎torch/csrc/jit/passes/create_functional_graphs.cpp

-1
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,6 @@ struct FunctionalGraphSlicer {
8080
graph_->createWithSubgraph(prim::FunctionalGraph)
8181
->insertBefore(block->return_node());
8282
auto reverse_iter = block->nodes().reverse();
83-
std::vector<Value*> graph_outputs;
8483
for (auto it = reverse_iter.begin(); it != reverse_iter.end();) {
8584
Node* n = *it++;
8685

‎torch/csrc/jit/passes/lower_tuples.cpp

-1
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,6 @@ static void flattenTupleInLoopParams(Node* n, size_t index) {
4040
Block* block = n->blocks().at(0);
4141
Node* block_node = n;
4242

43-
std::vector<Value*> new_node_inputs = {};
4443
auto new_construct_node =
4544
block->prependNode(block->owningGraph()->create(prim::TupleConstruct));
4645
for (size_t j = 0; j < tt->elements().size(); ++j) {

‎torch/csrc/jit/passes/quantization/insert_quant_dequant.cpp

-2
Original file line numberDiff line numberDiff line change
@@ -513,7 +513,6 @@ void ReplicateChooseQParamsQuantDequant(std::shared_ptr<Graph>& graph) {
513513
Node* pattern_choose_qparam = choose_qparam_val->node();
514514

515515
std::vector<DynamicQuantOps> nodes_to_rewrite;
516-
std::vector<Node*> choose_qparam_nodes_to_rewrite;
517516
for (const Match& match : matches) {
518517
Node* matched_dequantize = match.nodes_map.at(pattern_dequant);
519518
Node* matched_quantize = match.nodes_map.at(pattern_quant);
@@ -1557,7 +1556,6 @@ QuantOpParams InsertQuantDeQuantHelper::insertCalculateQParams(
15571556
"getQSchemeAndParamMap expects the corresponding observer for ",
15581557
v->debugName(),
15591558
" exists.");
1560-
std::vector<Value*> qparams_graph_values;
15611559
QuantOpParams quant_op_params;
15621560

15631561
TORCH_CHECK(

‎torch/csrc/jit/passes/quantization/quantization_patterns.h

-7
Original file line numberDiff line numberDiff line change
@@ -808,13 +808,6 @@ graph(%a_quant, %alpha, %scale, %input_scale, %r_scale, %r_zero_point, %r_dtype)
808808
"%count_include_pad",
809809
"%divisor_override"});
810810

811-
std::string common_general_value_op = R"(
812-
%r_scale : float = aten::q_scale(%a_quant)
813-
%r_zero_point : int = aten::q_zero_point(%a_quant)
814-
%r_dtype : int = prim::dtype(%a_quant)
815-
%r_quant = aten::quantize_per_tensor(%r, %r_scale, %r_zero_point, %r_dtype)
816-
return (%r_quant) )";
817-
818811
auto avg_pool3d = getInputTensorQParamOpFusionInfo(
819812
"aten::avg_pool3d",
820813
{"%kernel_size",

‎torch/csrc/jit/passes/quantization/register_packed_params.cpp

-1
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,6 @@ std::unordered_set<std::string> RegisterPrePackParams(
5959
int64_t uid = 0; // int + method name gives unique identifier
6060
auto graph = m.get_method(method_name).graph();
6161
std::stack<Block*> blocks_to_visit;
62-
std::unordered_set<Node*> nodes_to_delete;
6362
blocks_to_visit.push(graph->block());
6463
std::string attr_name_base =
6564
attr_prefix + "_" + method_name + "_ondevice_ptq_packed_weight_";

‎torch/csrc/jit/passes/utils/subgraph_utils.cpp

-1
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,6 @@ void mergeSubgraph(Node* mergeTo, Node* mergeFrom) {
133133
}
134134
++it;
135135

136-
std::vector<Node*> merged_nodes;
137136
while (it != end_it) {
138137
Node* node = *it;
139138
++it;

‎torch/csrc/jit/runtime/static/impl.cpp

-2
Original file line numberDiff line numberDiff line change
@@ -399,8 +399,6 @@ ManagedTensorRanges::ManagedTensorRanges(
399399
const AliasDb& alias_db,
400400
const c10::FastSet<const Value*>& managed_tensor_values) {
401401
const std::vector<Node*> nodes(block.nodes().begin(), block.nodes().end());
402-
const c10::FastSet<const Value*> graph_inputs(
403-
block.inputs().begin(), block.inputs().end());
404402

405403
const auto num_nodes = static_cast<uint32_t>(nodes.size());
406404
for (const auto i : c10::irange(num_nodes)) {

‎torch/csrc/jit/serialization/export_bytecode.cpp

-1
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,6 @@ mobile::Code compileGraphToMobileCode(
149149

150150
// operator names
151151
std::vector<std::string> method_names;
152-
std::vector<int64_t> op_debug_handles;
153152
int next_new_op_index = 0;
154153

155154
auto op_to_specified_args = code.op_to_num_specified_args();

‎torch/csrc/jit/serialization/flatbuffer_serializer.cpp

-1
Original file line numberDiff line numberDiff line change
@@ -518,7 +518,6 @@ flatbuffers::Offset<mobile::serialization::ObjectType> FlatbufferSerializer::
518518
} else {
519519
size_t num_attr = class_ptr->numAttributes();
520520
std::vector<flatbuffers::Offset<flatbuffers::String>> names;
521-
std::vector<uint32_t> type_index;
522521
for (size_t i = 0; i < num_attr; ++i) {
523522
names.push_back(fbb.CreateSharedString(class_ptr->getAttributeName(i)));
524523
}

‎torch/csrc/jit/tensorexpr/codegen.h

+3-4
Original file line numberDiff line numberDiff line change
@@ -244,13 +244,12 @@ class RegisterCodeGen {
244244
RegisterCodeGenList& codegen_list = RegisterCodeGenList::GetInstance();
245245
codegen_list.AddStmtFactoryMethod(
246246
name,
247-
[](StmtPtr stmt,
247+
[](const StmtPtr& stmt,
248248
const std::vector<CodeGen::BufferArg>& params,
249249
at::Device device,
250250
const std::string& kernel_func_name) {
251-
std::unique_ptr<CodeGen> method(
252-
new CodeGenType(stmt, params, device, kernel_func_name));
253-
return method;
251+
return std::make_unique<CodeGenType>(
252+
stmt, params, device, kernel_func_name);
254253
});
255254
}
256255
};

‎torch/csrc/jit/tensorexpr/eval.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1044,7 +1044,7 @@ class SimpleIREvaluatorImpl : public IRVisitor {
10441044
v->buffer_var()->name_hint());
10451045
}
10461046
buffer_mapping_[b] = buffer->data();
1047-
internal_buffers_.insert(std::make_pair(b, std::move(buffer)));
1047+
internal_buffers_.emplace(std::move(b), std::move(buffer));
10481048
}
10491049

10501050
void visit(const PlacementAllocatePtr& v) override {

‎torch/csrc/jit/tensorexpr/expr.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -552,7 +552,7 @@ bool Buf::is_stride_one(int cur_dim) const {
552552
return exprEquals(strides_[cur_dim], alloc<LongImm>(1));
553553
}
554554

555-
ExprHandle expr_to_vec(ExprHandle v, int lanes) {
555+
ExprHandle expr_to_vec(const ExprHandle& v, int lanes) {
556556
if (lanes == 1) {
557557
return v;
558558
} else {

‎torch/csrc/jit/tensorexpr/expr.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -488,6 +488,6 @@ TORCH_API ExprHandle Relu(const ExprHandle& v1);
488488
TORCH_API ExprHandle
489489
ifThenElse(const ExprHandle& c, const ExprHandle& t, const ExprHandle& f);
490490

491-
TORCH_API ExprHandle expr_to_vec(ExprHandle v, int lanes);
491+
TORCH_API ExprHandle expr_to_vec(const ExprHandle& v, int lanes);
492492

493493
} // namespace torch::jit::tensorexpr

‎torch/csrc/jit/tensorexpr/ir_simplifier.cpp

-2
Original file line numberDiff line numberDiff line change
@@ -2885,7 +2885,6 @@ ExprPtr SimplifierUnderContext::mutate(const DivPtr& v) {
28852885
ExprPtr lhs = v->lhs();
28862886
ExprPtr rhs = v->rhs();
28872887

2888-
std::ostringstream oss;
28892888
if (auto ret = distributeDiv(lhs, rhs, var_bound_info_)) {
28902889
GRAPH_DEBUG("SimplifierUnderContext: ", *v, " => ", *ret);
28912890
return ret->accept_mutator(this);
@@ -3005,7 +3004,6 @@ ExprPtr SimplifierUnderContext::mutate(const ModPtr& v) {
30053004
ExprPtr lhs = v->lhs();
30063005
ExprPtr rhs = v->rhs();
30073006

3008-
std::ostringstream oss;
30093007
if (auto ret = distributeMod(lhs, rhs, var_bound_info_)) {
30103008
GRAPH_DEBUG("SimplifierUnderContext: ", *v, " => ", *ret);
30113009
return ret->accept_mutator(this);

‎torch/csrc/jit/tensorexpr/kernel.cpp

-2
Original file line numberDiff line numberDiff line change
@@ -984,7 +984,6 @@ TensorExprKernel::BackendType TensorExprKernel::inferBackendTypeFromDevice(
984984
// we use the debug names in printing cuda code, they need to be removed
985985
// of characters that can't be used in a variable identifier
986986
void TensorExprKernel::genInputDebugNames() {
987-
std::unordered_map<std::string, const torch::jit::Value*> name_to_value;
988987
std::unordered_set<std::string> name_set;
989988
std::unordered_map<const torch::jit::Value*, std::string> value_to_name;
990989
for (const torch::jit::Value* input : graph_->inputs()) {
@@ -1747,7 +1746,6 @@ void TensorExprKernel::compile() {
17471746
VarPtr v = t.buf()->base_handle();
17481747
scalars_[output] = VarHandle(v);
17491748
block->append_stmt(t.stmt());
1750-
std::vector<ExprPtr> dims;
17511749
BufHandle buf(
17521750
"scalar_" + sanitizeName(output->debugName()), {}, v->dtype());
17531751
StmtPtr store = Store::make(buf, {}, ExprHandle(v));

‎torch/csrc/jit/tensorexpr/loopnest_randomization.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -480,7 +480,7 @@ void loopnestRandomization(int64_t seed, LoopNest& l) {
480480
}
481481

482482
int index = rand() % (int)all_nested_loops.size();
483-
auto nested_loops = all_nested_loops.at(index);
483+
auto const& nested_loops = all_nested_loops.at(index);
484484
if (nested_loops.size() < 2) {
485485
break;
486486
}
@@ -554,7 +554,7 @@ void loopnestRandomization(int64_t seed, LoopNest& l) {
554554

555555
// Randomly pick a set of consecutive loops to flatten
556556
int index = rand() % (int)all_nested_loops.size();
557-
auto nested_loops = all_nested_loops.at(index);
557+
auto const& nested_loops = all_nested_loops.at(index);
558558

559559
// Generate a good history message
560560
std::vector<std::string> indices;

‎torch/csrc/jit/tensorexpr/operators/reduction.cpp

-1
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,6 @@ Tensor computeMax(
146146
}
147147
BufHandle ResultBuf("max", outputShape, dtype);
148148
BufHandle InputBuf = std::get<BufHandle>(inputs[0]);
149-
std::vector<ExprHandle> max_dims_expr;
150149
auto max_dim = std::get<int64_t>(inputs[1]);
151150
auto keep_dim = std::get<bool>(inputs[2]);
152151
return Tensor(

0 commit comments

Comments
 (0)
Please sign in to comment.