Skip to content

Commit b0f84aa

Browse files
cyyeverpytorchmergebot
authored andcommitted
[3/N] Fix Wextra-semi warnings (pytorch#139165)
Fixes #ISSUE_NUMBER Pull Request resolved: pytorch#139165 Approved by: https://github.com/ezyang
1 parent 5861279 commit b0f84aa

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

67 files changed

+503
-503
lines changed

aten/src/ATen/AccumulateType.h

+75-75
Original file line numberDiff line numberDiff line change
@@ -86,84 +86,84 @@ using acc_type = typename AccumulateType<T, is_cuda>::type;
8686
#define CUDA_ACC_TYPE(t, acc_t) ACC_TYPE(t, acc_t, c10::DeviceType::CUDA)
8787
#define CPU_ACC_TYPE(t, acc_t) ACC_TYPE(t, acc_t, c10::DeviceType::CPU)
8888

89-
MPS_ACC_TYPE(BFloat16, float);
90-
MPS_ACC_TYPE(Half, float);
91-
MPS_ACC_TYPE(Float8_e5m2, float);
92-
MPS_ACC_TYPE(Float8_e4m3fn, float);
93-
MPS_ACC_TYPE(Float8_e5m2fnuz, float);
94-
MPS_ACC_TYPE(Float8_e4m3fnuz, float);
95-
MPS_ACC_TYPE(float, float);
96-
MPS_ACC_TYPE(double, float);
97-
MPS_ACC_TYPE(int8_t, int64_t);
98-
MPS_ACC_TYPE(uint8_t, int64_t);
99-
MPS_ACC_TYPE(char, int64_t);
100-
MPS_ACC_TYPE(int16_t, int64_t);
101-
MPS_ACC_TYPE(int32_t, int64_t);
102-
MPS_ACC_TYPE(int64_t, int64_t);
103-
MPS_ACC_TYPE(bool, bool);
104-
MPS_ACC_TYPE(c10::complex<Half>, c10::complex<float>);
105-
MPS_ACC_TYPE(c10::complex<float>, c10::complex<float>);
106-
MPS_ACC_TYPE(c10::complex<double>, c10::complex<float>);
107-
108-
XPU_ACC_TYPE(BFloat16, float);
109-
XPU_ACC_TYPE(Half, float);
110-
XPU_ACC_TYPE(Float8_e5m2, float);
111-
XPU_ACC_TYPE(Float8_e4m3fn, float);
112-
XPU_ACC_TYPE(Float8_e5m2fnuz, float);
113-
XPU_ACC_TYPE(Float8_e4m3fnuz, float);
114-
XPU_ACC_TYPE(float, float);
115-
XPU_ACC_TYPE(double, double);
116-
XPU_ACC_TYPE(int8_t, int64_t);
117-
XPU_ACC_TYPE(uint8_t, int64_t);
118-
XPU_ACC_TYPE(char, int64_t);
119-
XPU_ACC_TYPE(int16_t, int64_t);
120-
XPU_ACC_TYPE(int32_t, int64_t);
121-
XPU_ACC_TYPE(int64_t, int64_t);
122-
XPU_ACC_TYPE(bool, bool);
123-
XPU_ACC_TYPE(c10::complex<Half>, c10::complex<float>);
124-
XPU_ACC_TYPE(c10::complex<float>, c10::complex<float>);
125-
XPU_ACC_TYPE(c10::complex<double>, c10::complex<double>);
89+
MPS_ACC_TYPE(BFloat16, float)
90+
MPS_ACC_TYPE(Half, float)
91+
MPS_ACC_TYPE(Float8_e5m2, float)
92+
MPS_ACC_TYPE(Float8_e4m3fn, float)
93+
MPS_ACC_TYPE(Float8_e5m2fnuz, float)
94+
MPS_ACC_TYPE(Float8_e4m3fnuz, float)
95+
MPS_ACC_TYPE(float, float)
96+
MPS_ACC_TYPE(double, float)
97+
MPS_ACC_TYPE(int8_t, int64_t)
98+
MPS_ACC_TYPE(uint8_t, int64_t)
99+
MPS_ACC_TYPE(char, int64_t)
100+
MPS_ACC_TYPE(int16_t, int64_t)
101+
MPS_ACC_TYPE(int32_t, int64_t)
102+
MPS_ACC_TYPE(int64_t, int64_t)
103+
MPS_ACC_TYPE(bool, bool)
104+
MPS_ACC_TYPE(c10::complex<Half>, c10::complex<float>)
105+
MPS_ACC_TYPE(c10::complex<float>, c10::complex<float>)
106+
MPS_ACC_TYPE(c10::complex<double>, c10::complex<float>)
107+
108+
XPU_ACC_TYPE(BFloat16, float)
109+
XPU_ACC_TYPE(Half, float)
110+
XPU_ACC_TYPE(Float8_e5m2, float)
111+
XPU_ACC_TYPE(Float8_e4m3fn, float)
112+
XPU_ACC_TYPE(Float8_e5m2fnuz, float)
113+
XPU_ACC_TYPE(Float8_e4m3fnuz, float)
114+
XPU_ACC_TYPE(float, float)
115+
XPU_ACC_TYPE(double, double)
116+
XPU_ACC_TYPE(int8_t, int64_t)
117+
XPU_ACC_TYPE(uint8_t, int64_t)
118+
XPU_ACC_TYPE(char, int64_t)
119+
XPU_ACC_TYPE(int16_t, int64_t)
120+
XPU_ACC_TYPE(int32_t, int64_t)
121+
XPU_ACC_TYPE(int64_t, int64_t)
122+
XPU_ACC_TYPE(bool, bool)
123+
XPU_ACC_TYPE(c10::complex<Half>, c10::complex<float>)
124+
XPU_ACC_TYPE(c10::complex<float>, c10::complex<float>)
125+
XPU_ACC_TYPE(c10::complex<double>, c10::complex<double>)
126126

127127
#if defined(__CUDACC__) || defined(__HIPCC__)
128-
CUDA_ACC_TYPE(half, float);
128+
CUDA_ACC_TYPE(half, float)
129129
#endif
130-
CUDA_ACC_TYPE(BFloat16, float);
131-
CUDA_ACC_TYPE(Half, float);
132-
CUDA_ACC_TYPE(Float8_e5m2, float);
133-
CUDA_ACC_TYPE(Float8_e4m3fn, float);
134-
CUDA_ACC_TYPE(Float8_e5m2fnuz, float);
135-
CUDA_ACC_TYPE(Float8_e4m3fnuz, float);
136-
CUDA_ACC_TYPE(float, float);
137-
CUDA_ACC_TYPE(double, double);
138-
CUDA_ACC_TYPE(int8_t, int64_t);
139-
CUDA_ACC_TYPE(uint8_t, int64_t);
140-
CUDA_ACC_TYPE(char, int64_t);
141-
CUDA_ACC_TYPE(int16_t, int64_t);
142-
CUDA_ACC_TYPE(int32_t, int64_t);
143-
CUDA_ACC_TYPE(int64_t, int64_t);
144-
CUDA_ACC_TYPE(bool, bool);
145-
CUDA_ACC_TYPE(c10::complex<Half>, c10::complex<float>);
146-
CUDA_ACC_TYPE(c10::complex<float>, c10::complex<float>);
147-
CUDA_ACC_TYPE(c10::complex<double>, c10::complex<double>);
148-
149-
CPU_ACC_TYPE(BFloat16, float);
150-
CPU_ACC_TYPE(Half, float);
151-
CPU_ACC_TYPE(Float8_e5m2, float);
152-
CPU_ACC_TYPE(Float8_e4m3fn, float);
153-
CPU_ACC_TYPE(Float8_e5m2fnuz, float);
154-
CPU_ACC_TYPE(Float8_e4m3fnuz, float);
155-
CPU_ACC_TYPE(float, double);
156-
CPU_ACC_TYPE(double, double);
157-
CPU_ACC_TYPE(int8_t, int64_t);
158-
CPU_ACC_TYPE(uint8_t, int64_t);
159-
CPU_ACC_TYPE(char, int64_t);
160-
CPU_ACC_TYPE(int16_t, int64_t);
161-
CPU_ACC_TYPE(int32_t, int64_t);
162-
CPU_ACC_TYPE(int64_t, int64_t);
163-
CPU_ACC_TYPE(bool, bool);
164-
CPU_ACC_TYPE(c10::complex<Half>, c10::complex<float>);
165-
CPU_ACC_TYPE(c10::complex<float>, c10::complex<double>);
166-
CPU_ACC_TYPE(c10::complex<double>, c10::complex<double>);
130+
CUDA_ACC_TYPE(BFloat16, float)
131+
CUDA_ACC_TYPE(Half, float)
132+
CUDA_ACC_TYPE(Float8_e5m2, float)
133+
CUDA_ACC_TYPE(Float8_e4m3fn, float)
134+
CUDA_ACC_TYPE(Float8_e5m2fnuz, float)
135+
CUDA_ACC_TYPE(Float8_e4m3fnuz, float)
136+
CUDA_ACC_TYPE(float, float)
137+
CUDA_ACC_TYPE(double, double)
138+
CUDA_ACC_TYPE(int8_t, int64_t)
139+
CUDA_ACC_TYPE(uint8_t, int64_t)
140+
CUDA_ACC_TYPE(char, int64_t)
141+
CUDA_ACC_TYPE(int16_t, int64_t)
142+
CUDA_ACC_TYPE(int32_t, int64_t)
143+
CUDA_ACC_TYPE(int64_t, int64_t)
144+
CUDA_ACC_TYPE(bool, bool)
145+
CUDA_ACC_TYPE(c10::complex<Half>, c10::complex<float>)
146+
CUDA_ACC_TYPE(c10::complex<float>, c10::complex<float>)
147+
CUDA_ACC_TYPE(c10::complex<double>, c10::complex<double>)
148+
149+
CPU_ACC_TYPE(BFloat16, float)
150+
CPU_ACC_TYPE(Half, float)
151+
CPU_ACC_TYPE(Float8_e5m2, float)
152+
CPU_ACC_TYPE(Float8_e4m3fn, float)
153+
CPU_ACC_TYPE(Float8_e5m2fnuz, float)
154+
CPU_ACC_TYPE(Float8_e4m3fnuz, float)
155+
CPU_ACC_TYPE(float, double)
156+
CPU_ACC_TYPE(double, double)
157+
CPU_ACC_TYPE(int8_t, int64_t)
158+
CPU_ACC_TYPE(uint8_t, int64_t)
159+
CPU_ACC_TYPE(char, int64_t)
160+
CPU_ACC_TYPE(int16_t, int64_t)
161+
CPU_ACC_TYPE(int32_t, int64_t)
162+
CPU_ACC_TYPE(int64_t, int64_t)
163+
CPU_ACC_TYPE(bool, bool)
164+
CPU_ACC_TYPE(c10::complex<Half>, c10::complex<float>)
165+
CPU_ACC_TYPE(c10::complex<float>, c10::complex<double>)
166+
CPU_ACC_TYPE(c10::complex<double>, c10::complex<double>)
167167

168168
TORCH_API c10::ScalarType toAccumulateType(
169169
c10::ScalarType type,

aten/src/ATen/EmptyTensor.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -343,7 +343,7 @@ struct MetaAllocator final : public at::Allocator {
343343

344344
static MetaAllocator g_meta_alloc;
345345

346-
REGISTER_ALLOCATOR(kMeta, &g_meta_alloc);
346+
REGISTER_ALLOCATOR(kMeta, &g_meta_alloc)
347347

348348
TensorBase empty_meta(IntArrayRef size, ScalarType dtype,
349349
std::optional<c10::MemoryFormat> memory_format_opt) {

aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -1137,8 +1137,8 @@ inline void load_fp32_from_##name(const type *data, Vectorized<float>& out1, Vec
11371137
data += Vectorized<float>::size(); \
11381138
load_fp32_from_##name(data, out2); \
11391139
}
1140-
LOAD_FP32_NON_VECTORIZED_INIT(BFloat16, bf16);
1141-
LOAD_FP32_NON_VECTORIZED_INIT(Half, fp16);
1140+
LOAD_FP32_NON_VECTORIZED_INIT(BFloat16, bf16)
1141+
LOAD_FP32_NON_VECTORIZED_INIT(Half, fp16)
11421142

11431143
#endif
11441144
}} // namsepace at::vec::CPU_CAPABILITY

aten/src/ATen/detail/CPUGuardImpl.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,6 @@
22

33
namespace at::detail {
44

5-
C10_REGISTER_GUARD_IMPL(CPU, c10::impl::NoOpDeviceGuardImpl<DeviceType::CPU>);
5+
C10_REGISTER_GUARD_IMPL(CPU, c10::impl::NoOpDeviceGuardImpl<DeviceType::CPU>)
66

77
} // namespace at::detail

aten/src/ATen/detail/MetaGuardImpl.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,6 @@
33

44
namespace at::detail {
55

6-
C10_REGISTER_GUARD_IMPL(Meta, c10::impl::NoOpDeviceGuardImpl<DeviceType::Meta>);
6+
C10_REGISTER_GUARD_IMPL(Meta, c10::impl::NoOpDeviceGuardImpl<DeviceType::Meta>)
77

88
} // namespace at::detail

aten/src/ATen/functorch/BatchRulesLoss.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ loss_batch_rule_helper(const at::Tensor& self, std::optional<int64_t> self_bdim,
4747
return std::make_tuple(result.mean(-1), 0);
4848
}
4949
TORCH_INTERNAL_ASSERT(false);
50-
};
50+
}
5151

5252
static std::tuple<at::Tensor, std::optional<int64_t>>
5353
mse_loss_batch_rule(const at::Tensor& self, std::optional<int64_t> self_bdim, const at::Tensor& target,
@@ -56,7 +56,7 @@ mse_loss_batch_rule(const at::Tensor& self, std::optional<int64_t> self_bdim, co
5656
reduction, [](const at::Tensor& self, const at::Tensor& target, int64_t reduction) {
5757
return at::mse_loss(self, target, reduction);
5858
});
59-
};
59+
}
6060

6161
static std::tuple<at::Tensor, std::optional<int64_t>>
6262
huber_loss_batch_rule(const at::Tensor& self, std::optional<int64_t> self_bdim, const at::Tensor& target,
@@ -65,7 +65,7 @@ huber_loss_batch_rule(const at::Tensor& self, std::optional<int64_t> self_bdim,
6565
reduction, [delta](const at::Tensor& self, const at::Tensor& target, int64_t reduction) {
6666
return at::huber_loss(self, target, reduction, delta);
6767
});
68-
};
68+
}
6969

7070
static std::tuple<at::Tensor, std::optional<int64_t>>
7171
smooth_l1_loss_batch_rule(const at::Tensor& self, std::optional<int64_t> self_bdim, const at::Tensor& target,
@@ -74,7 +74,7 @@ smooth_l1_loss_batch_rule(const at::Tensor& self, std::optional<int64_t> self_bd
7474
reduction, [beta](const at::Tensor& self, const at::Tensor& target, int64_t reduction) {
7575
return at::smooth_l1_loss(self, target, reduction, beta);
7676
});
77-
};
77+
}
7878

7979
static Tensor apply_loss_reduction(const at::Tensor& unreduced, int64_t reduction) {
8080
if (reduction == at::Reduction::Mean) {

aten/src/ATen/native/AmpKernels.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,8 @@ using _amp_update_scale_cpu__fn = Tensor& (*)(
2121
double,
2222
int64_t);
2323

24-
DECLARE_DISPATCH(_amp_foreach_non_finite_check_and_unscale_cpu__fn, _amp_foreach_non_finite_check_and_unscale_cpu_stub);
25-
DECLARE_DISPATCH(_amp_update_scale_cpu__fn, _amp_update_scale_cpu_stub);
24+
DECLARE_DISPATCH(_amp_foreach_non_finite_check_and_unscale_cpu__fn, _amp_foreach_non_finite_check_and_unscale_cpu_stub)
25+
DECLARE_DISPATCH(_amp_update_scale_cpu__fn, _amp_update_scale_cpu_stub)
2626

2727
} // namespace native
2828
} // namespace at

aten/src/ATen/native/BatchLinearAlgebra.h

+15-15
Original file line numberDiff line numberDiff line change
@@ -226,32 +226,32 @@ void blasTriangularSolve(char side, char uplo, char trans, char diag, int n, int
226226
#endif
227227

228228
using cholesky_fn = void (*)(const Tensor& /*input*/, const Tensor& /*info*/, bool /*upper*/);
229-
DECLARE_DISPATCH(cholesky_fn, cholesky_stub);
229+
DECLARE_DISPATCH(cholesky_fn, cholesky_stub)
230230

231231
using cholesky_inverse_fn = Tensor& (*)(Tensor& /*result*/, Tensor& /*infos*/, bool /*upper*/);
232232

233-
DECLARE_DISPATCH(cholesky_inverse_fn, cholesky_inverse_stub);
233+
DECLARE_DISPATCH(cholesky_inverse_fn, cholesky_inverse_stub)
234234

235235
using linalg_eig_fn = void (*)(Tensor& /*eigenvalues*/, Tensor& /*eigenvectors*/, Tensor& /*infos*/, const Tensor& /*input*/, bool /*compute_eigenvectors*/);
236236

237-
DECLARE_DISPATCH(linalg_eig_fn, linalg_eig_stub);
237+
DECLARE_DISPATCH(linalg_eig_fn, linalg_eig_stub)
238238

239239
using geqrf_fn = void (*)(const Tensor& /*input*/, const Tensor& /*tau*/);
240-
DECLARE_DISPATCH(geqrf_fn, geqrf_stub);
240+
DECLARE_DISPATCH(geqrf_fn, geqrf_stub)
241241

242242
using orgqr_fn = Tensor& (*)(Tensor& /*result*/, const Tensor& /*tau*/);
243-
DECLARE_DISPATCH(orgqr_fn, orgqr_stub);
243+
DECLARE_DISPATCH(orgqr_fn, orgqr_stub)
244244

245245
using ormqr_fn = void (*)(const Tensor& /*input*/, const Tensor& /*tau*/, const Tensor& /*other*/, bool /*left*/, bool /*transpose*/);
246-
DECLARE_DISPATCH(ormqr_fn, ormqr_stub);
246+
DECLARE_DISPATCH(ormqr_fn, ormqr_stub)
247247

248248
using linalg_eigh_fn = void (*)(
249249
const Tensor& /*eigenvalues*/,
250250
const Tensor& /*eigenvectors*/,
251251
const Tensor& /*infos*/,
252252
bool /*upper*/,
253253
bool /*compute_eigenvectors*/);
254-
DECLARE_DISPATCH(linalg_eigh_fn, linalg_eigh_stub);
254+
DECLARE_DISPATCH(linalg_eigh_fn, linalg_eigh_stub)
255255

256256
using lstsq_fn = void (*)(
257257
const Tensor& /*a*/,
@@ -261,7 +261,7 @@ using lstsq_fn = void (*)(
261261
Tensor& /*infos*/,
262262
double /*rcond*/,
263263
std::string /*driver_name*/);
264-
DECLARE_DISPATCH(lstsq_fn, lstsq_stub);
264+
DECLARE_DISPATCH(lstsq_fn, lstsq_stub)
265265

266266
using triangular_solve_fn = void (*)(
267267
const Tensor& /*A*/,
@@ -270,35 +270,35 @@ using triangular_solve_fn = void (*)(
270270
bool /*upper*/,
271271
TransposeType /*transpose*/,
272272
bool /*unitriangular*/);
273-
DECLARE_DISPATCH(triangular_solve_fn, triangular_solve_stub);
273+
DECLARE_DISPATCH(triangular_solve_fn, triangular_solve_stub)
274274

275275
using lu_factor_fn = void (*)(
276276
const Tensor& /*input*/,
277277
const Tensor& /*pivots*/,
278278
const Tensor& /*infos*/,
279279
bool /*compute_pivots*/);
280-
DECLARE_DISPATCH(lu_factor_fn, lu_factor_stub);
280+
DECLARE_DISPATCH(lu_factor_fn, lu_factor_stub)
281281

282282
using unpack_pivots_fn = void(*)(
283283
TensorIterator& iter,
284284
const int64_t dim_size,
285285
const int64_t max_pivot);
286-
DECLARE_DISPATCH(unpack_pivots_fn, unpack_pivots_stub);
286+
DECLARE_DISPATCH(unpack_pivots_fn, unpack_pivots_stub)
287287

288288
using lu_solve_fn = void (*)(
289289
const Tensor& /*LU*/,
290290
const Tensor& /*pivots*/,
291291
const Tensor& /*B*/,
292292
TransposeType /*trans*/);
293-
DECLARE_DISPATCH(lu_solve_fn, lu_solve_stub);
293+
DECLARE_DISPATCH(lu_solve_fn, lu_solve_stub)
294294

295295
using ldl_factor_fn = void (*)(
296296
const Tensor& /*LD*/,
297297
const Tensor& /*pivots*/,
298298
const Tensor& /*info*/,
299299
bool /*upper*/,
300300
bool /*hermitian*/);
301-
DECLARE_DISPATCH(ldl_factor_fn, ldl_factor_stub);
301+
DECLARE_DISPATCH(ldl_factor_fn, ldl_factor_stub)
302302

303303
using svd_fn = void (*)(
304304
const Tensor& /*A*/,
@@ -309,13 +309,13 @@ using svd_fn = void (*)(
309309
const Tensor& /*S*/,
310310
const Tensor& /*Vh*/,
311311
const Tensor& /*info*/);
312-
DECLARE_DISPATCH(svd_fn, svd_stub);
312+
DECLARE_DISPATCH(svd_fn, svd_stub)
313313

314314
using ldl_solve_fn = void (*)(
315315
const Tensor& /*LD*/,
316316
const Tensor& /*pivots*/,
317317
const Tensor& /*result*/,
318318
bool /*upper*/,
319319
bool /*hermitian*/);
320-
DECLARE_DISPATCH(ldl_solve_fn, ldl_solve_stub);
320+
DECLARE_DISPATCH(ldl_solve_fn, ldl_solve_stub)
321321
} // namespace at::native

0 commit comments

Comments
 (0)