Skip to content

Commit 88012c7

Browse files
CodemodService FBSourceClangFormatLinterBotfacebook-github-bot
CodemodService FBSourceClangFormatLinterBot
authored andcommitted
[AutoAccept][Codemod][FBSourceClangFormatLinter] Daily arc lint --take CLANGFORMAT
Reviewed By: zertosh Differential Revision: D33577744 fbshipit-source-id: 7ecc8367998ee1dffde54c2f4dd3cfafe19a53c9
1 parent 3a0c680 commit 88012c7

13 files changed

+23
-25
lines changed

test/cpp/jit/test_code_template.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
#include <gtest/gtest.h>
22

3-
#include <test/cpp/jit/test_utils.h>
43
#include <ATen/code_template.h>
4+
#include <test/cpp/jit/test_utils.h>
55

66
namespace torch {
77
namespace jit {

test/cpp/tensorexpr/test_kernel.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
#include <gtest/gtest.h>
22

3+
#include <ATen/code_template.h>
34
#include <c10/util/irange.h>
45
#include <test/cpp/tensorexpr/test_base.h>
5-
#include <ATen/code_template.h>
66
#include <torch/csrc/jit/ir/ir.h>
77
#include <torch/csrc/jit/ir/irparser.h>
88
#include <torch/csrc/jit/passes/constant_propagation.h>

torch/csrc/jit/backends/backend_detail.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
#include <torch/csrc/jit/backends/backend_detail.h>
22

3+
#include <ATen/code_template.h>
34
#include <ATen/core/jit_type.h>
45
#include <torch/csrc/jit/backends/backend.h>
56
#include <torch/csrc/jit/backends/backend_debug_handler.h>
67
#include <torch/csrc/jit/backends/backend_debug_info.h>
78
#include <torch/csrc/jit/backends/backend_resolver.h>
8-
#include <ATen/code_template.h>
99

1010
#include <memory>
1111
#include <stack>

torch/csrc/jit/codegen/fuser/codegen.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
#include <torch/csrc/jit/codegen/fuser/codegen.h>
22

33
#include <ATen/ATen.h>
4+
#include <ATen/code_template.h>
45
#include <c10/util/Exception.h>
56
#include <torch/csrc/jit/codegen/fuser/compiler.h>
67
#include <torch/csrc/jit/codegen/fuser/interface.h>
78
#include <torch/csrc/jit/codegen/fuser/tensor_info.h>
8-
#include <ATen/code_template.h>
99
#include <torch/csrc/jit/ir/ir.h>
1010

1111
#include <torch/csrc/jit/codegen/fuser/cpu/resource_strings.h>

torch/csrc/jit/codegen/fuser/cpu/fused_kernel.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
#include <torch/csrc/jit/codegen/fuser/cpu/fused_kernel.h>
22

33
#include <ATen/DynamicLibrary.h>
4+
#include <ATen/code_template.h>
45
#include <c10/util/Exception.h>
56
#include <c10/util/Optional.h>
67
#include <torch/csrc/jit/codegen/fuser/compiler.h>
78
#include <torch/csrc/jit/codegen/fuser/cpu/temp_file.h>
8-
#include <ATen/code_template.h>
99
#include <torch/csrc/utils/memory.h>
1010

1111
#include <cstdlib>

torch/csrc/jit/codegen/fuser/cuda/resource_strings.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
#pragma once
22

3-
#include <torch/csrc/Export.h>
43
#include <ATen/code_template.h>
4+
#include <torch/csrc/Export.h>
55

66
namespace torch {
77
namespace jit {

torch/csrc/jit/frontend/builtin_functions.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
#include <torch/csrc/jit/frontend/builtin_functions.h>
22

3+
#include <ATen/code_template.h>
34
#include <caffe2/serialize/versions.h>
45
#include <torch/csrc/api/include/torch/jit.h>
5-
#include <ATen/code_template.h>
66
#include <torch/csrc/jit/frontend/resolver.h>
77

88
namespace torch {

torch/csrc/jit/passes/frozen_conv_add_relu_fusion_cuda.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
#include <ATen/Utils.h>
22

3-
#include <ATen/cuda/CUDAConfig.h>
43
#include <ATen/code_template.h>
4+
#include <ATen/cuda/CUDAConfig.h>
55
#include <torch/csrc/jit/ir/constants.h>
66
#include <torch/csrc/jit/ir/ir.h>
77
#include <torch/csrc/jit/ir/subgraph_matcher.h>

torch/csrc/jit/passes/shape_analysis.cpp

+8-8
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ bool mergeTypes(
5050
return changed;
5151
}
5252

53-
void applyTypes(ArrayRef<Value*> src, ArrayRef<Value*> dst){
53+
void applyTypes(ArrayRef<Value*> src, ArrayRef<Value*> dst) {
5454
AT_ASSERT(src.size() == dst.size());
5555
for (const auto i : c10::irange(src.size())) {
5656
dst[i]->setType(src[i]->type());
@@ -90,7 +90,9 @@ void PropertyPropBase::processLoop(Node* node) {
9090
// note: inserting expands is unsafe at this point, we don't know
9191
// if the types are stable yet, so the arguments to expand may change
9292
} while (mergeTypes(
93-
loop.bodyCarriedInputs(), loop.bodyCarriedOutputs(), loop.bodyCarriedInputs()));
93+
loop.bodyCarriedInputs(),
94+
loop.bodyCarriedOutputs(),
95+
loop.bodyCarriedInputs()));
9496

9597
// now that the types are stable, we can insert the expands
9698
propagateBlock(loop.bodyBlock(), /*insert_expands=*/true);
@@ -111,7 +113,6 @@ namespace prim {
111113
using namespace ::c10::prim;
112114
}
113115

114-
115116
#define SHAPE_ASSERT(cond) \
116117
if (!(cond)) \
117118
throw propagation_error()
@@ -189,7 +190,7 @@ c10::optional<std::vector<TensorTypePtr>> gatherTensorTypes(
189190

190191
int64_t wrapDim(int64_t dim, at::IntArrayRef sizes) {
191192
if (dim < 0) {
192-
dim += (int64_t) sizes.size();
193+
dim += (int64_t)sizes.size();
193194
}
194195
return dim;
195196
}
@@ -204,7 +205,6 @@ c10::ScalarType unionScalarTypes(
204205
}
205206
}
206207

207-
208208
// Promotes result types for arithmetic operations on Tensor operands using
209209
// new type promotion logic. See tensor_attributes.rst for details.
210210
// This doesn't handle the case of arithmetic ops with Scalar arguments (when
@@ -251,7 +251,7 @@ c10::optional<c10::ScalarType> getPromotedTypeForArithmeticOp(Node* node) {
251251
return zerodim;
252252
}
253253

254-
class ShapePropagator: public PropertyPropBase {
254+
class ShapePropagator : public PropertyPropBase {
255255
public:
256256
explicit ShapePropagator(const std::shared_ptr<Graph>& graph)
257257
: PropertyPropBase(graph), aliasDb_(graph) {
@@ -585,7 +585,7 @@ class ShapePropagator: public PropertyPropBase {
585585
return in_resize;
586586
}
587587

588-
void propagateNode(Node* node, bool insert_expands = true) override {
588+
void propagateNode(Node* node, bool insert_expands = true) override {
589589
// Certain ops like resize_ change the input tensors size. Because our
590590
// analysis is flow invariant, we set any Tensor that can alias a resized
591591
// Tensor to the base Tensor Type without size information.
@@ -1469,7 +1469,7 @@ class ShapePropagator: public PropertyPropBase {
14691469
if (auto type =
14701470
node->namedInput(attr::self)->type()->cast<TensorType>()) {
14711471
if (type->dim()) {
1472-
return factory_like_with_ndim(node, (int) *type->dim());
1472+
return factory_like_with_ndim(node, (int)*type->dim());
14731473
}
14741474
}
14751475
return {};

torch/csrc/jit/passes/shape_analysis.h

+4-5
Original file line numberDiff line numberDiff line change
@@ -11,12 +11,11 @@ struct Graph;
1111

1212
struct propagation_error : std::exception {};
1313

14-
class PropertyPropBase{
14+
class PropertyPropBase {
1515
// Used for both Shape Propagation and Dtype/Device Propagation
16-
public:
16+
public:
1717
explicit PropertyPropBase(std::shared_ptr<Graph> graph)
18-
: graph_(std::move(graph)) {
19-
}
18+
: graph_(std::move(graph)) {}
2019
virtual ~PropertyPropBase() = default;
2120

2221
void propagateBlock(Block* block, bool insert_expands = true);
@@ -25,7 +24,7 @@ class PropertyPropBase{
2524
void processIf(Node* node);
2625
void processLoop(Node* node);
2726

28-
protected:
27+
protected:
2928
virtual void propagateNode(Node* node, bool insert_expands = true) = 0;
3029
void setUnshapedType(Value* o);
3130
void setUnshapedType(Node* node);

torch/csrc/jit/python/init.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@
2323
#include <torch/csrc/jit/passes/cuda_graph_fuser.h>
2424
#include <torch/csrc/jit/passes/dead_code_elimination.h>
2525
#include <torch/csrc/jit/passes/decompose_ops.h>
26-
#include <torch/csrc/jit/passes/dtype_analysis.h>
2726
#include <torch/csrc/jit/passes/device_type_analysis.h>
27+
#include <torch/csrc/jit/passes/dtype_analysis.h>
2828
#include <torch/csrc/jit/passes/erase_number_types.h>
2929
#include <torch/csrc/jit/passes/fold_conv_bn.h>
3030
#include <torch/csrc/jit/passes/freeze_module.h>

torch/csrc/jit/python/python_ir.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -821,8 +821,7 @@ void initPythonIRBindings(PyObject* module_) {
821821
.def(
822822
"device",
823823
[](Type& t) -> py::object {
824-
auto device =
825-
t.expectRef<TensorType>().device();
824+
auto device = t.expectRef<TensorType>().device();
826825
if (!device) {
827826
return py::none();
828827
}

torch/csrc/jit/tensorexpr/ir_simplifier.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1866,7 +1866,7 @@ class ModRound {
18661866
ExprPtr mod_divisor;
18671867
};
18681868

1869-
c10::optional< class ModRound > isModRound(TermPtr e) {
1869+
c10::optional<class ModRound> isModRound(TermPtr e) {
18701870
DivPtr div{nullptr};
18711871
ModPtr mod{nullptr};
18721872
ExprPtr denom{nullptr};

0 commit comments

Comments
 (0)