Skip to content

Commit 622947a

Browse files
malfetpytorchmergebot
authored andcommitted
[BE] Use nested namespace in ATen/native (pytorch#115938)
It's a C++17 feature that usually makes code a bit more compact, and should have no side-effects otherwise. Pull Request resolved: pytorch#115938 Approved by: https://github.com/Skylion007
1 parent e3aefe2 commit 622947a

File tree

118 files changed

+315
-512
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

118 files changed

+315
-512
lines changed

aten/src/ATen/native/Activation.cpp

+4-5
Original file line numberDiff line numberDiff line change
@@ -81,8 +81,7 @@
8181
#include <utility>
8282
#endif
8383

84-
namespace at {
85-
namespace meta {
84+
namespace at::meta {
8685
// computes `result = self <= threshold ? value : other`
8786
// other is `self` in threshold() and `grad` in threshold_backward()
8887
TORCH_META_FUNC(threshold)(const Tensor& self, const Scalar& threshold, const Scalar& value) {
@@ -238,9 +237,9 @@ TORCH_META_FUNC(gelu_backward) (
238237
build_borrowing_binary_op(maybe_get_output(), grad, self);
239238
}
240239

241-
} // namespace meta
240+
} // namespace at::meta
242241

243-
namespace native {
242+
namespace at::native {
244243

245244
static const double SELU_ALPHA = 1.6732632423543772848170429916717;
246245
static const double SELU_SCALE = 1.0507009873554804934193349852946;
@@ -829,4 +828,4 @@ Tensor& log_sigmoid_backward_cpu_out(const Tensor& grad_output,
829828
DEFINE_DISPATCH(GeluKernel);
830829
DEFINE_DISPATCH(GeluBackwardKernel);
831830

832-
}} // namespace at::native
831+
} // namespace at::native

aten/src/ATen/native/AdaptiveAveragePooling.cpp

+2-4
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,7 @@
1717
#endif
1818

1919

20-
namespace at {
21-
namespace native {
20+
namespace at::native {
2221

2322
namespace {
2423

@@ -153,5 +152,4 @@ namespace {
153152
DEFINE_DISPATCH(adaptive_avg_pool2d_kernel);
154153
DEFINE_DISPATCH(adaptive_avg_pool2d_backward_kernel);
155154

156-
} // at::native
157-
} // at
155+
} // namespace at::native

aten/src/ATen/native/AdaptiveAveragePooling3d.cpp

+2-4
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,7 @@
1919
#include <ATen/ops/zeros_like.h>
2020
#endif
2121

22-
namespace at {
23-
namespace native {
22+
namespace at::native {
2423

2524
namespace {
2625

@@ -339,5 +338,4 @@ Tensor adaptive_avg_pool3d_backward_cpu(const Tensor& gradOutput_,
339338
return gradInput;
340339
}
341340

342-
} // namespace native
343-
} // namespace at
341+
} // namespace at::native

aten/src/ATen/native/AdaptiveMaxPooling2d.cpp

+4-6
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,7 @@
1111
#include <ATen/ops/adaptive_max_pool2d_native.h>
1212
#endif
1313

14-
namespace at {
15-
namespace meta {
14+
namespace at::meta {
1615
TORCH_META_FUNC(adaptive_max_pool2d) (const Tensor& input, IntArrayRef output_size) {
1716
int ndim = input.ndimension();
1817
TORCH_CHECK(ndim == 3 || ndim == 4,
@@ -67,9 +66,9 @@ TORCH_META_FUNC(adaptive_max_pool2d_backward)
6766

6867
set_output_raw_strided(0, input.sizes(), {}, input.options().memory_format(input.suggest_memory_format()));
6968
}
70-
} // namespace meta
69+
} // namespace at::meta
7170

72-
namespace native {
71+
namespace at::native {
7372

7473
TORCH_IMPL_FUNC(adaptive_max_pool2d_out_cpu)
7574
(const Tensor& input, IntArrayRef output_size, const Tensor& output, const Tensor& indices) {
@@ -85,5 +84,4 @@ TORCH_IMPL_FUNC(adaptive_max_pool2d_backward_out_cpu)
8584
DEFINE_DISPATCH(adaptive_max_pool2d_kernel);
8685
DEFINE_DISPATCH(adaptive_max_pool2d_backward_kernel);
8786

88-
} // at::native
89-
} // at
87+
} // namespace at::native

aten/src/ATen/native/AdaptiveMaxPooling3d.cpp

+3-5
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,7 @@
1414
#include <ATen/ops/adaptive_max_pool3d_native.h>
1515
#endif
1616

17-
namespace at {
18-
namespace meta {
17+
namespace at::meta {
1918
TORCH_META_FUNC(adaptive_max_pool3d) (const Tensor& input, IntArrayRef output_size) {
2019
auto ndim = input.ndimension();
2120
TORCH_CHECK(
@@ -72,7 +71,7 @@ TORCH_META_FUNC(adaptive_max_pool3d_backward)
7271
}
7372
} // namespace meta
7473

75-
namespace native {
74+
namespace at::native {
7675

7776
namespace {
7877

@@ -433,5 +432,4 @@ TORCH_IMPL_FUNC(adaptive_max_pool3d_backward_out_cpu)
433432
});
434433
}
435434
}
436-
} // at::native
437-
} // at
435+
} // namespace at::native

aten/src/ATen/native/AffineGridGenerator.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
#include <ATen/ops/tensor.h>
1414
#endif
1515

16-
namespace at { namespace native {
16+
namespace at::native {
1717

1818
static at::Tensor linspace_from_neg_one(const Tensor& grid, int64_t num_steps,
1919
bool align_corners) {
@@ -143,4 +143,4 @@ Tensor affine_grid_generator_backward(const Tensor& grad, IntArrayRef size, bool
143143
}
144144
}
145145

146-
}} // namespace at::native
146+
} // namespace at::native

aten/src/ATen/native/AutogradComposite.cpp

+2-5
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,7 @@
1414
#include <ATen/ops/zeros.h>
1515
#endif
1616

17-
namespace at {
18-
namespace native {
17+
namespace at::native {
1918

2019
// We expect this code to only be reached in inference mode and when all inputs are inference tensors
2120
Tensor _make_dual(const Tensor& primal, const Tensor& tangent, int64_t level) {
@@ -90,6 +89,4 @@ bool _has_same_storage_numel(const at::Tensor& base, const at::Tensor& other) {
9089
return base.storage().nbytes() / base.itemsize() == other.storage().nbytes() / other.itemsize();
9190
}
9291

93-
} // namespace native
94-
95-
} // namespace at
92+
} // namespace at::native

aten/src/ATen/native/AveragePool2d.cpp

+5-8
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,8 @@
1111
#include <ATen/ops/avg_pool2d_native.h>
1212
#endif
1313

14-
namespace at {
15-
16-
namespace meta{
17-
using namespace native;
14+
namespace at::meta {
15+
using namespace ::at::native;
1816

1917
TORCH_PRECOMPUTE_META_FUNC(avg_pool2d)
2018
(const Tensor& input,
@@ -147,9 +145,9 @@ TORCH_META_FUNC(avg_pool2d_backward) (
147145
set_output_raw_strided(0, input.sizes(), {}, input.options().memory_format(memory_format));
148146
}
149147

150-
} // namespace meta
148+
} // namespace at::meta
151149

152-
namespace native {
150+
namespace at::native {
153151

154152
TORCH_IMPL_FUNC(avg_pool2d_out_cpu)
155153
(const Tensor& input,
@@ -215,5 +213,4 @@ TORCH_IMPL_FUNC(avg_pool2d_backward_out_cpu) (
215213
DEFINE_DISPATCH(avg_pool2d_kernel);
216214
DEFINE_DISPATCH(avg_pool2d_backward_kernel);
217215

218-
} // at::native
219-
} // at
216+
} // namespace at::native

aten/src/ATen/native/AveragePool3d.cpp

+5-8
Original file line numberDiff line numberDiff line change
@@ -15,10 +15,8 @@
1515
#include <ATen/ops/avg_pool3d_native.h>
1616
#endif
1717

18-
namespace at {
19-
20-
namespace meta{
21-
using namespace native;
18+
namespace at::meta {
19+
using namespace ::at::native;
2220

2321
TORCH_META_FUNC(avg_pool3d) (
2422
const Tensor& input,
@@ -149,9 +147,9 @@ TORCH_META_FUNC(avg_pool3d_backward) (
149147
set_output_raw_strided(0, input.sizes(), {}, input.options());
150148
}
151149

152-
} // namespace meta
150+
} // namespace at::meta
153151

154-
namespace native {
152+
namespace at::native {
155153

156154
namespace {
157155

@@ -525,5 +523,4 @@ TORCH_IMPL_FUNC(avg_pool3d_backward_out_cpu) (
525523
}
526524
}
527525

528-
} // at::native
529-
} // at
526+
} // namespace at::native

aten/src/ATen/native/BatchLinearAlgebra.cpp

+4-5
Original file line numberDiff line numberDiff line change
@@ -409,8 +409,7 @@ extern "C" void dtrsm_(char *side, char *uplo, char *trans, char *diag, int *n,
409409
extern "C" void strsm_(char *side, char *uplo, char *trans, char *diag, int *n, int *nrhs, float *alpha, float *a, int *lda, float *b, int *ldb);
410410
#endif
411411

412-
namespace at {
413-
namespace meta {
412+
namespace at::meta {
414413

415414
TORCH_META_FUNC(linalg_ldl_factor_ex)
416415
(const Tensor& self, bool hermitian, bool check_errors) {
@@ -775,9 +774,9 @@ TORCH_META_FUNC(linalg_lu)(const Tensor& A, bool pivot) {
775774
set_output_raw_strided(2, sizes, {}, A.options(), {});
776775
}
777776

778-
} // namespace meta
777+
} // namespace at::meta
779778

780-
namespace native {
779+
namespace at::native {
781780

782781
#if AT_BUILD_WITH_LAPACK()
783782
// Define the per-batch functions to be used in the main implementation of the batched
@@ -4036,4 +4035,4 @@ Tensor linalg_vander_symint(
40364035
auto ones = result.new_ones_symint(shape);
40374036
return at::cat({std::move(ones), std::move(result)}, /*dim=*/ -1);
40384037
}
4039-
}} // namespace at::native
4038+
} // namespace at::native

aten/src/ATen/native/BinaryOps.cpp

+4-7
Original file line numberDiff line numberDiff line change
@@ -146,9 +146,7 @@
146146
#include <ATen/ops/xor_native.h>
147147
#endif
148148

149-
namespace at {
150-
151-
namespace meta {
149+
namespace at::meta {
152150

153151
TORCH_META_FUNC2(add, Tensor) (
154152
const Tensor& self, const Tensor& other, const Scalar& alpha
@@ -371,10 +369,10 @@ CREATE_COMPARISON_SCALAR_TENSOR_META_FUNC(le);
371369
CREATE_COMPARISON_SCALAR_TENSOR_META_FUNC(gt);
372370
CREATE_COMPARISON_SCALAR_TENSOR_META_FUNC(ge);
373371

374-
} // namespace meta
372+
} // namespace at::meta
375373

376374

377-
namespace native {
375+
namespace at::native {
378376

379377
DEFINE_DISPATCH(add_clamp_stub);
380378
DEFINE_DISPATCH(mul_stub);
@@ -1609,5 +1607,4 @@ Tensor special_xlogy(const Tensor& x, const Scalar& y) {
16091607
return at::xlogy(x, y);
16101608
}
16111609

1612-
} // namespace native
1613-
} // namespace at
1610+
} // namespace at::native

aten/src/ATen/native/Blas.cpp

+4-5
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,7 @@
2626
#include <ATen/ops/vdot_native.h>
2727
#endif
2828

29-
namespace at {
30-
namespace meta {
29+
namespace at::meta {
3130
TORCH_META_FUNC(addmv)(const Tensor &self, const Tensor &mat, const Tensor &vec, const Scalar& beta, const Scalar& alpha) {
3231
TORCH_CHECK((mat.dim() == 2 && vec.dim() == 1 && self.dim() <= 1),
3332
"vector + matrix @ vector expected, got ", self.dim(), ", ", mat.dim(), ", ", vec.dim());
@@ -37,9 +36,9 @@ TORCH_META_FUNC(addmv)(const Tensor &self, const Tensor &mat, const Tensor &vec,
3736
auto names = at::namedinference::propagate_names_for_addmv(mat, vec, self);
3837
set_output_raw_strided(0, IntArrayRef(mat.sizes().data(), 1), {}, vec.options(), names);
3938
}
40-
}
39+
} // namespace at::meta
4140

42-
namespace native {
41+
namespace at::native {
4342

4443
template<typename scalar_t>
4544
void gemv(char trans, int64_t m, int64_t n, scalar_t alpha, const scalar_t *a, int64_t lda, const scalar_t *x, int64_t incx, scalar_t beta, scalar_t *y, int64_t incy);
@@ -223,4 +222,4 @@ Tensor vdot(const Tensor &self, const Tensor &other){
223222

224223
}
225224

226-
}} // namespace at::native
225+
} // namespace at::native

aten/src/ATen/native/BlasKernel.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ extern "C" void sgemv_(char *trans, int *m, int *n, float *alpha, float *a, int
7171
#endif // AT_BLAS_USE_CBLAS_DOT
7272
#endif // AT_BUILD_WITH_BLAS
7373

74-
namespace at { namespace native {
74+
namespace at::native {
7575

7676
namespace blas_impl {
7777

@@ -403,4 +403,4 @@ INSTANTIATE_VDOT_IMPL(c10::complex<double>);
403403

404404
#undef INSTANTIATE_DOT_IMPL
405405

406-
}} // namespace at::native
406+
} // namespace at::native

aten/src/ATen/native/Bucketization.cpp

+2-3
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,7 @@
3333
* - Restrictions are defined in searchsorted_pre_check()
3434
*/
3535

36-
namespace at {
37-
namespace native {
36+
namespace at::native {
3837

3938
namespace {
4039

@@ -244,4 +243,4 @@ Tensor bucketize_cpu(const Scalar& self, const Tensor& boundaries, bool out_int3
244243
return bucketize_cpu(searchsorted_scalar_tensor(self, boundaries.device()), boundaries, out_int32, right);
245244
}
246245

247-
}} // namespace at::native
246+
} // namespace at::native

aten/src/ATen/native/CPUBlas.cpp

+2-4
Original file line numberDiff line numberDiff line change
@@ -41,9 +41,7 @@ extern "C" void zaxpy_(int *n, void *a, const void *x, int *incx, void *y, int *
4141
#include <fbgemm/FbgemmI64.h>
4242
#endif // USE_FBGEMM
4343

44-
namespace at {
45-
namespace native {
46-
namespace cpublas {
44+
namespace at::native::cpublas {
4745
namespace internal {
4846

4947
void normalize_last_dims(
@@ -782,4 +780,4 @@ void copy(int64_t n, const c10::complex<float> *x, int64_t incx, c10::complex<fl
782780
n, x, incx, y, incy);
783781
}
784782

785-
}}} // namespace at::native::cpublas
783+
} // namespace at::native::cpublas

aten/src/ATen/native/CPUFallback.cpp

+2-3
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
#endif
1818

1919

20-
namespace at { namespace native {
20+
namespace at::native {
2121

2222
// convenience helper for converting tensors to cpu
2323

@@ -293,5 +293,4 @@ void cpu_fallback(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool
293293
}
294294
}
295295

296-
} // namespace native
297-
} // namespace at
296+
} // namespace at::native

aten/src/ATen/native/ChanelShuffle.cpp

+2-3
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,7 @@
1717
#include <ATen/ops/native_channel_shuffle_native.h>
1818
#endif
1919

20-
namespace at {
21-
namespace native {
20+
namespace at::native {
2221

2322
Tensor channel_shuffle_cpu(const Tensor& self, int64_t groups) {
2423
auto memory_format = self.suggest_memory_format();
@@ -86,4 +85,4 @@ Tensor math_channel_shuffle(const Tensor& self, int64_t groups) {
8685

8786
DEFINE_DISPATCH(channel_shuffle_kernel);
8887

89-
}} // namespace at::native
88+
} // namespace at::native

aten/src/ATen/native/Col2Im.cpp

+2-4
Original file line numberDiff line numberDiff line change
@@ -68,8 +68,7 @@
6868
//
6969
// ALSO do vol2col
7070

71-
namespace at {
72-
namespace native {
71+
namespace at::native {
7372
namespace {
7473

7574
static void col2im_out_cpu_template(
@@ -215,5 +214,4 @@ Tensor col2im_cpu(
215214
return output;
216215
}
217216

218-
} // namespace native
219-
} // namespace at
217+
} // namespace at::native

0 commit comments

Comments
 (0)