Skip to content

Commit 1dd503c

Browse files
cyyeverpytorchmergebot
authored andcommitted
[4/N] Fix Wextra-semi warning (pytorch#139256)
Fixes #ISSUE_NUMBER Pull Request resolved: pytorch#139256 Approved by: https://github.com/ezyang
1 parent bd88d40 commit 1dd503c

File tree

71 files changed

+422
-422
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

71 files changed

+422
-422
lines changed

aten/src/ATen/CPUApplyUtils.h

+6-6
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,7 @@ inline int64_t _max_dim_tensors(ArrayRef<Tensor> tensors) {
157157
return dim;
158158
}
159159

160-
inline void iterate(int64_t /*size*/){};
160+
inline void iterate(int64_t /*size*/) {}
161161

162162
template <typename Arg, typename... Args>
163163
inline void iterate(int64_t size, Arg& iter, Args&... iter_tail) {
@@ -168,7 +168,7 @@ inline void iterate(int64_t size, Arg& iter, Args&... iter_tail) {
168168

169169
inline bool iterate_continue() {
170170
return true;
171-
};
171+
}
172172

173173
template <typename Arg, typename... Args>
174174
inline bool iterate_continue(Arg& iter, Args&... iter_tail) {
@@ -178,7 +178,7 @@ inline bool iterate_continue(Arg& iter, Args&... iter_tail) {
178178

179179
inline int64_t max_iterate_size() {
180180
return std::numeric_limits<int64_t>::max();
181-
};
181+
}
182182

183183
template <typename Arg, typename... Args>
184184
inline int64_t max_iterate_size(Arg& iter, Args&... iter_tail) {
@@ -187,7 +187,7 @@ inline int64_t max_iterate_size(Arg& iter, Args&... iter_tail) {
187187
max_iterate_size(iter_tail...));
188188
}
189189

190-
inline void iterate_overflow(){};
190+
inline void iterate_overflow() {}
191191

192192
template <typename Arg, typename... Args>
193193
inline void iterate_overflow(Arg& iter, Args&... iter_tail) {
@@ -204,7 +204,7 @@ inline void iterate_overflow(Arg& iter, Args&... iter_tail) {
204204
iterate_overflow(iter_tail...);
205205
}
206206

207-
inline void forward(int64_t /*offset*/){};
207+
inline void forward(int64_t /*offset*/) {}
208208

209209
template <typename Arg, typename... Args>
210210
inline void forward(int64_t offset, Arg& iter, Args&... iter_tail) {
@@ -227,7 +227,7 @@ inline int64_t max_dim(Arg& iter, Args&... iter_tail) {
227227
return std::max(iter.dim_, max_dim(iter_tail...));
228228
}
229229

230-
inline void apply_op(){};
230+
inline void apply_op() {}
231231

232232
template <typename Op, typename... Args>
233233
inline void apply_op(

aten/src/ATen/functorch/BatchRulesLinearAlgebra.cpp

+7-7
Original file line numberDiff line numberDiff line change
@@ -686,43 +686,43 @@ _scaled_dot_product_cudnn_attention_batch_rule(
686686
#endif
687687

688688
#define LINALG_CHECK_MATRIX_UNARY_ONE_OUT(fn, op_name) \
689-
LINALG_STRING_CONST(fn, op_name);\
689+
LINALG_STRING_CONST(fn, op_name)\
690690
TORCH_LIBRARY_IMPL(aten, FuncTorchBatched, m) {\
691691
VMAP_SUPPORT(fn, LINALG_CHECK_MATRIX_UNARY_BATCH_RULE(fn, one));\
692692
}
693693

694694
#define LINALG_CHECK_MATRIX_UNARY_ONE_OUT2(fn, overload, op_name) \
695-
LINALG_STRING_CONST2(fn, overload, op_name);\
695+
LINALG_STRING_CONST2(fn, overload, op_name)\
696696
TORCH_LIBRARY_IMPL(aten, FuncTorchBatched, m) {\
697697
VMAP_SUPPORT2(fn, overload, LINALG_CHECK_MATRIX_UNARY_BATCH_RULE2(fn, overload, one));\
698698
}
699699

700700
#define LINALG_CHECK_MATRIX_UNARY_TWO_OUT(fn, op_name) \
701-
LINALG_STRING_CONST(fn, op_name);\
701+
LINALG_STRING_CONST(fn, op_name)\
702702
TORCH_LIBRARY_IMPL(aten, FuncTorchBatched, m) {\
703703
VMAP_SUPPORT(fn, LINALG_CHECK_MATRIX_UNARY_BATCH_RULE(fn, two));\
704704
}
705705

706706
#define LINALG_CHECK_MATRIX_UNARY_THREE_OUT(fn, op_name) \
707-
LINALG_STRING_CONST(fn, op_name);\
707+
LINALG_STRING_CONST(fn, op_name)\
708708
TORCH_LIBRARY_IMPL(aten, FuncTorchBatched, m) {\
709709
VMAP_SUPPORT(fn, LINALG_CHECK_MATRIX_UNARY_BATCH_RULE(fn, three));\
710710
}
711711

712712
#define LINALG_CHECK_MATRIX_UNARY_FOUR_OUT(fn, op_name) \
713-
LINALG_STRING_CONST(fn, op_name);\
713+
LINALG_STRING_CONST(fn, op_name)\
714714
TORCH_LIBRARY_IMPL(aten, FuncTorchBatched, m) {\
715715
VMAP_SUPPORT(fn, LINALG_CHECK_MATRIX_UNARY_BATCH_RULE(fn, four));\
716716
}
717717

718718
#define LINALG_CHECK_MATRIX_BINARY_ONE_OUT(fn, op_name) \
719-
LINALG_STRING_CONST(fn, op_name);\
719+
LINALG_STRING_CONST(fn, op_name)\
720720
TORCH_LIBRARY_IMPL(aten, FuncTorchBatched, m) {\
721721
VMAP_SUPPORT(fn, LINALG_CHECK_MATRIX_BINARY_BATCH_RULE(fn, one));\
722722
}
723723

724724
#define LINALG_CHECK_MATRIX_BINARY_TWO_OUT(fn, op_name) \
725-
LINALG_STRING_CONST(fn, op_name);\
725+
LINALG_STRING_CONST(fn, op_name)\
726726
TORCH_LIBRARY_IMPL(aten, FuncTorchBatched, m) {\
727727
VMAP_SUPPORT(fn, LINALG_CHECK_MATRIX_BINARY_BATCH_RULE(fn, two));\
728728
}

aten/src/ATen/native/BinaryOps.cpp

+38-38
Original file line numberDiff line numberDiff line change
@@ -321,14 +321,14 @@ TORCH_META_FUNC(tanh_backward) (const Tensor& grad_output, const Tensor& output)
321321
build_borrowing_binary_op(maybe_get_output(), self, other); \
322322
}
323323

324-
CREATE_BINARY_META_FUNC(logaddexp);
325-
CREATE_BINARY_META_FUNC(logaddexp2);
326-
CREATE_BINARY_META_FUNC(gcd);
327-
CREATE_BINARY_META_FUNC(lcm);
328-
CREATE_BINARY_META_FUNC(hypot);
329-
CREATE_BINARY_META_FUNC(igamma);
330-
CREATE_BINARY_META_FUNC(igammac);
331-
CREATE_BINARY_META_FUNC(nextafter);
324+
CREATE_BINARY_META_FUNC(logaddexp)
325+
CREATE_BINARY_META_FUNC(logaddexp2)
326+
CREATE_BINARY_META_FUNC(gcd)
327+
CREATE_BINARY_META_FUNC(lcm)
328+
CREATE_BINARY_META_FUNC(hypot)
329+
CREATE_BINARY_META_FUNC(igamma)
330+
CREATE_BINARY_META_FUNC(igammac)
331+
CREATE_BINARY_META_FUNC(nextafter)
332332

333333
TORCH_META_FUNC(maximum) (const Tensor& self, const Tensor& other) {
334334
TORCH_CHECK(!self.is_complex() && !other.is_complex(), "maximum not implemented for complex tensors.");
@@ -362,12 +362,12 @@ TORCH_META_FUNC(fmin) (const Tensor& self, const Tensor& other) {
362362
build_borrowing_except_last_argument_comparison_op(maybe_get_output(), self, other_tensor); \
363363
}
364364

365-
CREATE_COMPARISON_SCALAR_TENSOR_META_FUNC(eq);
366-
CREATE_COMPARISON_SCALAR_TENSOR_META_FUNC(ne);
367-
CREATE_COMPARISON_SCALAR_TENSOR_META_FUNC(lt);
368-
CREATE_COMPARISON_SCALAR_TENSOR_META_FUNC(le);
369-
CREATE_COMPARISON_SCALAR_TENSOR_META_FUNC(gt);
370-
CREATE_COMPARISON_SCALAR_TENSOR_META_FUNC(ge);
365+
CREATE_COMPARISON_SCALAR_TENSOR_META_FUNC(eq)
366+
CREATE_COMPARISON_SCALAR_TENSOR_META_FUNC(ne)
367+
CREATE_COMPARISON_SCALAR_TENSOR_META_FUNC(lt)
368+
CREATE_COMPARISON_SCALAR_TENSOR_META_FUNC(le)
369+
CREATE_COMPARISON_SCALAR_TENSOR_META_FUNC(gt)
370+
CREATE_COMPARISON_SCALAR_TENSOR_META_FUNC(ge)
371371

372372
} // namespace at::meta
373373

@@ -532,24 +532,24 @@ TORCH_IMPL_FUNC(func_out) (const Tensor& self, const Tensor& other, const Tensor
532532
func_stub(device_type(), *this); \
533533
}
534534

535-
CREATE_BINARY_TORCH_IMPL_FUNC(bitwise_and_out, bitwise_and_stub);
536-
CREATE_BINARY_TORCH_IMPL_FUNC(bitwise_or_out, bitwise_or_stub);
537-
CREATE_BINARY_TORCH_IMPL_FUNC(bitwise_xor_out, bitwise_xor_stub);
538-
CREATE_BINARY_TORCH_IMPL_FUNC(maximum_out, maximum_stub);
539-
CREATE_BINARY_TORCH_IMPL_FUNC(minimum_out, minimum_stub);
540-
CREATE_BINARY_TORCH_IMPL_FUNC(fmax_out, fmax_stub);
541-
CREATE_BINARY_TORCH_IMPL_FUNC(fmin_out, fmin_stub);
542-
CREATE_BINARY_TORCH_IMPL_FUNC(fmod_out, fmod_stub);
543-
CREATE_BINARY_TORCH_IMPL_FUNC(logaddexp_out, logaddexp_stub);
544-
CREATE_BINARY_TORCH_IMPL_FUNC(logaddexp2_out, logaddexp2_stub);
545-
CREATE_BINARY_TORCH_IMPL_FUNC(gcd_out, gcd_stub);
546-
CREATE_BINARY_TORCH_IMPL_FUNC(lcm_out, lcm_stub);
547-
CREATE_BINARY_TORCH_IMPL_FUNC(hypot_out, hypot_stub);
548-
CREATE_BINARY_TORCH_IMPL_FUNC(igamma_out, igamma_stub);
549-
CREATE_BINARY_TORCH_IMPL_FUNC(igammac_out, igammac_stub);
550-
CREATE_BINARY_TORCH_IMPL_FUNC(nextafter_out, nextafter_stub);
551-
CREATE_BINARY_TORCH_IMPL_FUNC(remainder_out, remainder_stub);
552-
CREATE_BINARY_TORCH_IMPL_FUNC(xlogy_out, xlogy_stub);
535+
CREATE_BINARY_TORCH_IMPL_FUNC(bitwise_and_out, bitwise_and_stub)
536+
CREATE_BINARY_TORCH_IMPL_FUNC(bitwise_or_out, bitwise_or_stub)
537+
CREATE_BINARY_TORCH_IMPL_FUNC(bitwise_xor_out, bitwise_xor_stub)
538+
CREATE_BINARY_TORCH_IMPL_FUNC(maximum_out, maximum_stub)
539+
CREATE_BINARY_TORCH_IMPL_FUNC(minimum_out, minimum_stub)
540+
CREATE_BINARY_TORCH_IMPL_FUNC(fmax_out, fmax_stub)
541+
CREATE_BINARY_TORCH_IMPL_FUNC(fmin_out, fmin_stub)
542+
CREATE_BINARY_TORCH_IMPL_FUNC(fmod_out, fmod_stub)
543+
CREATE_BINARY_TORCH_IMPL_FUNC(logaddexp_out, logaddexp_stub)
544+
CREATE_BINARY_TORCH_IMPL_FUNC(logaddexp2_out, logaddexp2_stub)
545+
CREATE_BINARY_TORCH_IMPL_FUNC(gcd_out, gcd_stub)
546+
CREATE_BINARY_TORCH_IMPL_FUNC(lcm_out, lcm_stub)
547+
CREATE_BINARY_TORCH_IMPL_FUNC(hypot_out, hypot_stub)
548+
CREATE_BINARY_TORCH_IMPL_FUNC(igamma_out, igamma_stub)
549+
CREATE_BINARY_TORCH_IMPL_FUNC(igammac_out, igammac_stub)
550+
CREATE_BINARY_TORCH_IMPL_FUNC(nextafter_out, nextafter_stub)
551+
CREATE_BINARY_TORCH_IMPL_FUNC(remainder_out, remainder_stub)
552+
CREATE_BINARY_TORCH_IMPL_FUNC(xlogy_out, xlogy_stub)
553553

554554
Tensor special_xlog1py(const Scalar& x, const Tensor& y) {
555555
return at::special_xlog1py(wrapped_scalar_tensor(x), y);
@@ -1462,12 +1462,12 @@ Tensor& greater_equal_(Tensor& self, const Scalar& other) { return self.ge_(othe
14621462
func##_stub(device_type(), *this); \
14631463
}
14641464

1465-
CREATE_COMPARISON_SCALAR_TENSOR_IMPL_FUNC(eq);
1466-
CREATE_COMPARISON_SCALAR_TENSOR_IMPL_FUNC(ne);
1467-
CREATE_COMPARISON_SCALAR_TENSOR_IMPL_FUNC(gt);
1468-
CREATE_COMPARISON_SCALAR_TENSOR_IMPL_FUNC(ge);
1469-
CREATE_COMPARISON_SCALAR_TENSOR_IMPL_FUNC(lt);
1470-
CREATE_COMPARISON_SCALAR_TENSOR_IMPL_FUNC(le);
1465+
CREATE_COMPARISON_SCALAR_TENSOR_IMPL_FUNC(eq)
1466+
CREATE_COMPARISON_SCALAR_TENSOR_IMPL_FUNC(ne)
1467+
CREATE_COMPARISON_SCALAR_TENSOR_IMPL_FUNC(gt)
1468+
CREATE_COMPARISON_SCALAR_TENSOR_IMPL_FUNC(ge)
1469+
CREATE_COMPARISON_SCALAR_TENSOR_IMPL_FUNC(lt)
1470+
CREATE_COMPARISON_SCALAR_TENSOR_IMPL_FUNC(le)
14711471

14721472
// not_equal, alias for torch.ne
14731473
Tensor& not_equal_out(const Tensor& self, const Tensor& other, Tensor& result) { return at::ne_out(result, self, other); }

aten/src/ATen/native/NaiveConvolutionTranspose3d.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -943,6 +943,6 @@ static std::tuple<Tensor, Tensor, Tensor> slow_conv_transpose3d_backward_cpu(
943943
return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias);
944944
}
945945

946-
REGISTER_ALL_CPU_DISPATCH(slow_conv_transpose3d_backward_stub, &slow_conv_transpose3d_backward_cpu);
946+
REGISTER_ALL_CPU_DISPATCH(slow_conv_transpose3d_backward_stub, &slow_conv_transpose3d_backward_cpu)
947947

948948
} // namespace at::native

aten/src/ATen/native/TensorAdvancedIndexing.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -479,8 +479,8 @@ DEFINE_DISPATCH(index_put_with_sort_stub);
479479
DEFINE_DISPATCH(put_stub);
480480
DEFINE_DISPATCH(take_stub);
481481
DEFINE_DISPATCH(masked_fill_stub);
482-
REGISTER_NO_CPU_DISPATCH(index_put_with_sort_stub);
483-
REGISTER_NO_CPU_DISPATCH(index_put_with_sort_quantized_stub);
482+
REGISTER_NO_CPU_DISPATCH(index_put_with_sort_stub)
483+
REGISTER_NO_CPU_DISPATCH(index_put_with_sort_quantized_stub)
484484
DEFINE_DISPATCH(masked_select_serial_stub);
485485
DEFINE_DISPATCH(masked_select_stub);
486486
DEFINE_DISPATCH(masked_scatter_stub);

aten/src/ATen/native/cpu/Activation.cpp

+11-11
Original file line numberDiff line numberDiff line change
@@ -1400,17 +1400,17 @@ void prelu_backward_kernel(TensorIterator& iter) {
14001400
} // namespace
14011401

14021402

1403-
REGISTER_DISPATCH(hardsigmoid_stub, &hardsigmoid_kernel);
1404-
REGISTER_DISPATCH(hardsigmoid_backward_stub, &hardsigmoid_backward_kernel);
1405-
REGISTER_DISPATCH(threshold_stub, &threshold_kernel);
1406-
REGISTER_DISPATCH(leaky_relu_stub, &leaky_relu_kernel);
1407-
REGISTER_DISPATCH(leaky_relu_backward_stub, &leaky_relu_backward_kernel);
1408-
REGISTER_DISPATCH(prelu_stub, &prelu_kernel);
1409-
REGISTER_DISPATCH(prelu_backward_stub, &prelu_backward_kernel);
1410-
REGISTER_DISPATCH(hardtanh_backward_stub, &hardtanh_backward_kernel);
1411-
REGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel);
1412-
REGISTER_DISPATCH(softshrink_stub, &softshrink_kernel);
1413-
REGISTER_DISPATCH(shrink_backward_stub, &shrink_backward_kernel);
1403+
REGISTER_DISPATCH(hardsigmoid_stub, &hardsigmoid_kernel)
1404+
REGISTER_DISPATCH(hardsigmoid_backward_stub, &hardsigmoid_backward_kernel)
1405+
REGISTER_DISPATCH(threshold_stub, &threshold_kernel)
1406+
REGISTER_DISPATCH(leaky_relu_stub, &leaky_relu_kernel)
1407+
REGISTER_DISPATCH(leaky_relu_backward_stub, &leaky_relu_backward_kernel)
1408+
REGISTER_DISPATCH(prelu_stub, &prelu_kernel)
1409+
REGISTER_DISPATCH(prelu_backward_stub, &prelu_backward_kernel)
1410+
REGISTER_DISPATCH(hardtanh_backward_stub, &hardtanh_backward_kernel)
1411+
REGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel)
1412+
REGISTER_DISPATCH(softshrink_stub, &softshrink_kernel)
1413+
REGISTER_DISPATCH(shrink_backward_stub, &shrink_backward_kernel)
14141414

14151415
ALSO_REGISTER_AVX512_DISPATCH(log_sigmoid_cpu_stub, &log_sigmoid_cpu_kernel);
14161416
ALSO_REGISTER_AVX512_DISPATCH(log_sigmoid_backward_stub, &log_sigmoid_backward_cpu_kernel);

aten/src/ATen/native/cpu/AdaptiveAvgPoolKernel.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -854,9 +854,9 @@ void adapative_avg_pool3d_backward_kernel_impl(
854854

855855
} // anonymous namespace
856856

857-
REGISTER_DISPATCH(adaptive_avg_pool2d_kernel, &adaptive_avg_pool2d_kernel_impl);
858-
REGISTER_DISPATCH(adaptive_avg_pool2d_backward_kernel, &adapative_avg_pool2d_backward_kernel_impl);
859-
REGISTER_DISPATCH(adaptive_avg_pool3d_kernel, &adaptive_avg_pool3d_kernel_impl);
860-
REGISTER_DISPATCH(adaptive_avg_pool3d_backward_kernel, &adapative_avg_pool3d_backward_kernel_impl);
857+
REGISTER_DISPATCH(adaptive_avg_pool2d_kernel, &adaptive_avg_pool2d_kernel_impl)
858+
REGISTER_DISPATCH(adaptive_avg_pool2d_backward_kernel, &adapative_avg_pool2d_backward_kernel_impl)
859+
REGISTER_DISPATCH(adaptive_avg_pool3d_kernel, &adaptive_avg_pool3d_kernel_impl)
860+
REGISTER_DISPATCH(adaptive_avg_pool3d_backward_kernel, &adapative_avg_pool3d_backward_kernel_impl)
861861

862862
} // at::native

aten/src/ATen/native/cpu/AdaptiveMaxPoolKernel.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -980,9 +980,9 @@ void adaptive_max_pool3d_backward_kernel_impl(
980980

981981
} // anonymous namespace
982982

983-
REGISTER_DISPATCH(adaptive_max_pool2d_kernel, &adaptive_max_pool2d_kernel_impl);
984-
REGISTER_DISPATCH(adaptive_max_pool2d_backward_kernel, &adaptive_max_pool2d_backward_kernel_impl);
985-
REGISTER_DISPATCH(adaptive_max_pool3d_kernel, &adaptive_max_pool3d_kernel_impl);
986-
REGISTER_DISPATCH(adaptive_max_pool3d_backward_kernel, &adaptive_max_pool3d_backward_kernel_impl);
983+
REGISTER_DISPATCH(adaptive_max_pool2d_kernel, &adaptive_max_pool2d_kernel_impl)
984+
REGISTER_DISPATCH(adaptive_max_pool2d_backward_kernel, &adaptive_max_pool2d_backward_kernel_impl)
985+
REGISTER_DISPATCH(adaptive_max_pool3d_kernel, &adaptive_max_pool3d_kernel_impl)
986+
REGISTER_DISPATCH(adaptive_max_pool3d_backward_kernel, &adaptive_max_pool3d_backward_kernel_impl)
987987

988988
} // at::native

aten/src/ATen/native/cpu/AmpGradScalerKernels.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -192,7 +192,7 @@ at::Tensor& _amp_update_scale_cpu_kernel(
192192

193193
} // namespace
194194

195-
REGISTER_DISPATCH(_amp_foreach_non_finite_check_and_unscale_cpu_stub, &_amp_foreach_non_finite_check_and_unscale_cpu_kernel);
196-
REGISTER_DISPATCH(_amp_update_scale_cpu_stub, &_amp_update_scale_cpu_kernel);
195+
REGISTER_DISPATCH(_amp_foreach_non_finite_check_and_unscale_cpu_stub, &_amp_foreach_non_finite_check_and_unscale_cpu_kernel)
196+
REGISTER_DISPATCH(_amp_update_scale_cpu_stub, &_amp_update_scale_cpu_kernel)
197197

198198
} // namespace at::native

aten/src/ATen/native/cpu/AvgPoolKernel.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -1130,9 +1130,9 @@ void avg_pool3d_backward_kernel_impl(
11301130

11311131
} // anonymous namespace
11321132

1133-
REGISTER_DISPATCH(avg_pool2d_kernel, &avg_pool2d_kernel_impl);
1134-
REGISTER_DISPATCH(avg_pool2d_backward_kernel, &avg_pool2d_backward_kernel_impl);
1135-
REGISTER_DISPATCH(avg_pool3d_kernel, &avg_pool3d_kernel_impl);
1136-
REGISTER_DISPATCH(avg_pool3d_backward_kernel, &avg_pool3d_backward_kernel_impl);
1133+
REGISTER_DISPATCH(avg_pool2d_kernel, &avg_pool2d_kernel_impl)
1134+
REGISTER_DISPATCH(avg_pool2d_backward_kernel, &avg_pool2d_backward_kernel_impl)
1135+
REGISTER_DISPATCH(avg_pool3d_kernel, &avg_pool3d_kernel_impl)
1136+
REGISTER_DISPATCH(avg_pool3d_backward_kernel, &avg_pool3d_backward_kernel_impl)
11371137

11381138
} // at::native

0 commit comments

Comments
 (0)