Skip to content

Commit c28a439

Browse files
kiszkpytorchmergebot
authored andcommitted
Fix typo under aten/src/ATen/native directory (pytorch#119686)
This PR fixes typo in comments and msgs under `aten/src/ATen/native` directory Pull Request resolved: pytorch#119686 Approved by: https://github.com/lezcano, https://github.com/malfet
1 parent 389b56b commit c28a439

File tree

123 files changed

+216
-216
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

123 files changed

+216
-216
lines changed

aten/src/ATen/native/BatchLinearAlgebra.cpp

+5-5
Original file line numberDiff line numberDiff line change
@@ -2772,7 +2772,7 @@ Tensor linalg_eigvalsh(const Tensor& A, c10::string_view uplo) {
27722772

27732773
Tensor& linalg_eigvalsh_out(const Tensor& A, c10::string_view uplo, Tensor& L) {
27742774
auto V = at::empty({0}, A.options());
2775-
at::_linalg_eigh_out(L, V, A, uplo, /*comptue_v=*/false);
2775+
at::_linalg_eigh_out(L, V, A, uplo, /*compute_v=*/false);
27762776
return L;
27772777
}
27782778

@@ -3153,7 +3153,7 @@ TORCH_IMPL_FUNC(_linalg_svd_out)(const Tensor& A,
31533153
TORCH_CHECK(use_cusolver || !driver.has_value(),
31543154
"torch.linalg.svd: keyword argument `driver=` is only supported on CUDA inputs with cuSOLVER backend.");
31553155

3156-
// A always needs to be copied as its contents will be destroyed during the computaton of the SVD
3156+
// A always needs to be copied as its contents will be destroyed during the computation of the SVD
31573157
// Now, MAGMA needs the copy to be on CPU, while cuSOLVER needs it to be on CUDA, so we'll defer
31583158
// the copy as a column major matrix to the backends.
31593159
const auto info = at::zeros(IntArrayRef(A.sizes().begin(), A.sizes().end() - 2), A.options().dtype(kInt));
@@ -3202,7 +3202,7 @@ Tensor& linalg_svdvals_out(const Tensor& A, c10::optional<c10::string_view> driv
32023202
// Dummies
32033203
auto U = at::empty({0}, A.options());
32043204
auto Vh = at::empty({0}, A.options());
3205-
at::_linalg_svd_out(U, S, Vh, A, /*full_matrices=*/false, /*comptue_uv=*/false, /*driver=*/driver);
3205+
at::_linalg_svd_out(U, S, Vh, A, /*full_matrices=*/false, /*compute_uv=*/false, /*driver=*/driver);
32063206
return S;
32073207
}
32083208

@@ -3900,7 +3900,7 @@ Tensor& linalg_solve_triangular_out(
39003900
}
39013901

39023902
// No need to conjugate anything if out_f is conj as AX = conj(B) <=> conj(A)conj(X) = B
3903-
// and X = B after the algortihm. We just anotate that A is conjugated later on
3903+
// and X = B after the algorithm. We just annotate that A is conjugated later on
39043904
// The solution will be written into out_f, so it'll be conjugated already
39053905

39063906
Tensor A_f = std::move(A_); // The A that will go into fortran
@@ -3909,7 +3909,7 @@ Tensor& linalg_solve_triangular_out(
39093909
bool A_is_neg = A_f.is_neg() != out_f.is_neg();
39103910
bool A_is_f_contig = (A_f.stride(-1) == 1) == transpose_A;
39113911
if C10_UNLIKELY (!is_row_or_column_contiguous(A_f)) {
3912-
// We first anotate with flags on A_f all the conj / transpose / neg coming from out
3912+
// We first annotate with flags on A_f all the conj / transpose / neg coming from out
39133913
// and then we clone the resulting tensor to resolve all of them in memory
39143914
if (out_f.is_conj()) {
39153915
A_f = A_f.conj();

aten/src/ATen/native/BinaryOps.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1416,7 +1416,7 @@ Tensor& comparison_op_(Tensor& self, const Scalar& other, OutImpl& out_impl) {
14161416
}
14171417

14181418
// We need explicit cast to OutFunc because each *_out func is overloaded twice. Without An explicit cast, merely
1419-
// referring to *_out function is ambiguious.
1419+
// referring to *_out function is ambiguous.
14201420
using OutFunc = std::add_const<Tensor&(&)(Tensor&, const Tensor&, const Tensor&)>::type;
14211421

14221422
// less, alias for torch.lt

aten/src/ATen/native/Bucketization.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,7 @@ Tensor& searchsorted_out_cpu(
162162
return result;
163163
}
164164

165-
// for non-contiguous result tensors, we write the output to a contiguous copy so we can later copy back, maintaing the original result tensor
165+
// for non-contiguous result tensors, we write the output to a contiguous copy so we can later copy back, maintaining the original result tensor
166166
Tensor out = result;
167167
if (!result.is_contiguous()) {
168168
out = result.contiguous();

aten/src/ATen/native/CPUFallback.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,7 @@ void cpu_fallback(const c10::OperatorHandle& op, torch::jit::Stack* stack, bool
167167
// the temporary CPU output tensor that we created.
168168
//
169169
// Note [CPU Fallback Does Not Handle View Operators]
170-
// Also note that we are incapable of handling immutable alises properly.
170+
// Also note that we are incapable of handling immutable aliases properly.
171171
// Why?
172172
// Schemas with an immutable alias'd tensor outputs correspond to view operators.
173173
// For example, the `view_as` schema from native_functions.yaml:

aten/src/ATen/native/ChanelShuffle.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ Tensor math_channel_shuffle(const Tensor& self, int64_t groups) {
6969
// It is not clear, however from initial looking around it feels that
7070
// this may not be correct.
7171
// In this case channels last will likely require custom implementation
72-
// if we want to preseve the memory order.
72+
// if we want to preserve the memory order.
7373
// XNNPACK has channel shuffle op for NHWC. For mobile usecase this is good.
7474
// For server we will have to do a custom implementation.
7575
// For ChannelsFirst, a.k.a Contiguous, memory format we will also need

aten/src/ATen/native/CompositeRandomAccessorCommon.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ class CompositeRandomAccessor {
118118
using value_type = composite_value_type;
119119
using reference = references_holder<composite_value_type, composite_reference>;
120120
// Note that CompositeRandomAccessor does not hold key and values
121-
// in a specific datastrcture, which means that a pointer to a (key, value)
121+
// in a specific datastructure, which means that a pointer to a (key, value)
122122
// is not defined. Hence we just use a pointer type of the KeyAccessor.
123123
using pointer = typename std::iterator_traits<KeyAccessor>::pointer;
124124
using difference_type = typename std::iterator_traits<KeyAccessor>::difference_type;

aten/src/ATen/native/Distance.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -316,7 +316,7 @@ Tensor cosine_similarity(const Tensor& x1_, const Tensor& x2_, int64_t dim, doub
316316
// We want to divide each tensor by its norm first, as it's more numerically stable.
317317
// This keeps the result between -1.0 and 1.0
318318
// We clone them, as we're going to modify them in-place
319-
// This allows the gradients to propagate propertly all the way to x1 and x2
319+
// This allows the gradients to propagate properly all the way to x1 and x2
320320
auto x1_norm = at::linalg_vector_norm(*x1, 2, /*dim=*/dim, /*keepdim=*/true).clone();
321321
auto x2_norm = at::linalg_vector_norm(*x2, 2, /*dim=*/dim, /*keepdim=*/true).clone();
322322

aten/src/ATen/native/Dropout.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ ALIAS_SPECIALIZATION(_feature_dropout, true, false)
9999
ALIAS_SPECIALIZATION(_alpha_dropout, false, true )
100100
ALIAS_SPECIALIZATION(_feature_alpha_dropout, true, true )
101101

102-
} // anomymous namepsace
102+
} // anonymous namespace
103103

104104
std::tuple<Tensor,Tensor>
105105
native_dropout_cpu(const Tensor& input, double p, c10::optional<bool> train) {

aten/src/ATen/native/GridSampler.cpp

+5-5
Original file line numberDiff line numberDiff line change
@@ -181,7 +181,7 @@ namespace {
181181
int64_t iy_nearest = static_cast<int64_t>(std::nearbyint(iy));
182182
int64_t iz_nearest = static_cast<int64_t>(std::nearbyint(iz));
183183

184-
// assign nearest neighor pixel value to output pixel
184+
// assign nearest neighbour pixel value to output pixel
185185
scalar_t *out_ptr_NCDHW = out_ptr + n * out_sN + d * out_sD + h * out_sH + w * out_sW;
186186
scalar_t *inp_ptr_NC = inp_ptr_N;
187187
for (int64_t c = 0; c < C; ++c, out_ptr_NCDHW += out_sC, inp_ptr_NC += inp_sC) {
@@ -422,7 +422,7 @@ namespace {
422422
int64_t iy_nearest = static_cast<int64_t>(std::nearbyint(iy));
423423
int64_t iz_nearest = static_cast<int64_t>(std::nearbyint(iz));
424424

425-
// assign nearest neighor pixel value to output pixel
425+
// assign nearest neighbour pixel value to output pixel
426426
scalar_t *gOut_ptr_NCDHW = gOut_ptr + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW;
427427
if (input_requires_grad) {
428428
scalar_t *gInp_ptr_NC = gInp_ptr + n * gInp_sN;
@@ -652,7 +652,7 @@ Tensor _grid_sampler_2d_cpu_fallback(const Tensor& input, const Tensor& grid,
652652
int64_t ix_nearest = static_cast<int64_t>(std::nearbyint(ix));
653653
int64_t iy_nearest = static_cast<int64_t>(std::nearbyint(iy));
654654

655-
// assign nearest neighor pixel value to output pixel
655+
// assign nearest neighbour pixel value to output pixel
656656
scalar_t *out_ptr_NCHW = out_ptr + n * out_sN + h * out_sH + w * out_sW;
657657
scalar_t *inp_ptr_NC = inp_ptr_N;
658658
for (int64_t c = 0; c < C; ++c, out_ptr_NCHW += out_sC, inp_ptr_NC += inp_sC) {
@@ -682,7 +682,7 @@ Tensor _grid_sampler_2d_cpu_fallback(const Tensor& input, const Tensor& grid,
682682
// NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays)
683683
scalar_t coefficients[4];
684684

685-
// Interpolate 4 values in the x directon
685+
// Interpolate 4 values in the x direction
686686
for (const auto i : c10::irange(4)) {
687687
coefficients[i] = cubic_interp1d<scalar_t>(
688688
get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw - 1, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners),
@@ -847,7 +847,7 @@ _grid_sampler_2d_cpu_fallback_backward(const Tensor& grad_output,
847847
int64_t ix_nearest = static_cast<int64_t>(std::nearbyint(ix));
848848
int64_t iy_nearest = static_cast<int64_t>(std::nearbyint(iy));
849849

850-
// assign nearest neighor pixel value to output pixel
850+
// assign nearest neighbour pixel value to output pixel
851851
scalar_t *gOut_ptr_NCHW = gOut_ptr + n * gOut_sN + h * gOut_sH + w * gOut_sW;
852852
scalar_t *gInp_ptr_NC = gInp_ptr + n * gInp_sN;
853853
for (int64_t c = 0; c < C; ++c, gOut_ptr_NCHW += gOut_sC, gInp_ptr_NC += gInp_sC) {

aten/src/ATen/native/Linear.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -814,7 +814,7 @@ Tensor tensordot(const Tensor& input1, const Tensor& input2, IntArrayRef dims1,
814814
rsizes.emplace_back(t2.sym_size(i));
815815
}
816816
}
817-
// permut and reshape for matrix multiplication
817+
// permute and reshape for matrix multiplication
818818
t1 = t1.permute(p1).reshape_symint({size1, csize});
819819
t2 = t2.permute(p2).reshape_symint({csize, size2});
820820
// multiply and reshape to target size

aten/src/ATen/native/LinearAlgebra.cpp

+6-6
Original file line numberDiff line numberDiff line change
@@ -1022,7 +1022,7 @@ Tensor multi_dot_impl(TensorList _tensors, c10::optional<Tensor> _out) {
10221022

10231023
// If the last and last tensors have shapes (a, b) and (b, c) the
10241024
// output has shape (a, c). If either the first or last tensor is 1D
1025-
// a and/or c dimensions will be implicitely size 1 and will be ommited
1025+
// a and/or c dimensions will be implicitly size 1 and will be omitted
10261026
// from the output. e.g. for inputs (a, b) x (b) the output has shape (a,).
10271027
at::native::resize_output(out, out_shape);
10281028

@@ -1809,7 +1809,7 @@ static inline void bmm_out_or_baddbmm_(const Tensor& self_or_result_, const Tens
18091809
* vs. other threads, leading to undefined behavior.
18101810
* Thus it is recommended to not use at::parallel_for where lambdas do
18111811
* ops that go through dispatcher.
1812-
* For now we circument this by InferenceMode guard in order to unlock
1812+
* For now we circumvent this by InferenceMode guard in order to unlock
18131813
* performance.
18141814
* Longer term we probably want a separate API that explicitly calls out
18151815
* the TLS that it propagates.
@@ -1946,7 +1946,7 @@ static bool should_fold(const Tensor& tensor1, const Tensor& tensor2, bool has_o
19461946
// The output gradient g of this operation would have shape [b, m, k]
19471947
// The backward wrt. t2 of bmm would be given by t1.mH @ g, which has shape [b, n, k]
19481948
// Then, the backward of expand is simply `sum(0)`. As such, we are instantiating a tensor
1949-
// of shape [b, n, k] unnacessarily, which may cause a large memory footprint, and in the
1949+
// of shape [b, n, k] unnecessarily, which may cause a large memory footprint, and in the
19501950
// worst case, an OOM
19511951
bool t2_requires_grad = tensor1_larger ? tensor2.requires_grad() : tensor1.requires_grad();
19521952
if (t2_requires_grad && !has_out) {
@@ -2602,7 +2602,7 @@ Tensor compute_T18_scale_square(
26022602
auto scs = section_values. template data_ptr<int64_t>();
26032603
auto pts = &scs[section_numel];
26042604

2605-
// We now will do the matrix muplication in a batch, with above example:
2605+
// We now will do the matrix multiplication in a batch, with above example:
26062606
// 1. Multiply all matrices by 0 (`mul_times[0]`) times, then do `slice`
26072607
// to get the remain matrices by acc[1:] (`split_counts[0]`),
26082608
// 2. Multiply remain matrices by 1 times and slice to acc[2:]
@@ -2761,7 +2761,7 @@ Tensor backward_analytic_function_of_a_matrix(
27612761
} // end anon namespace
27622762

27632763
// Computes the matrix exponential for a given batch of squared matrices.
2764-
// The implementaion is based on:
2764+
// The implementation is based on:
27652765
//
27662766
// Bader, P.; Blanes, S.; Casas, F.
27672767
// Computing the Matrix Exponential with an Optimized Taylor Polynomial Approximation.
@@ -2812,7 +2812,7 @@ TORCH_IMPL_FUNC(linalg_vector_norm_out)(const Tensor& self, const Scalar& scalar
28122812
// Reductions always use `std::abs` to compute the absolute value. In the backward of this
28132813
// function, we need to locate the index that was selected as the largest value. To do so
28142814
// we do self.abs() == result to locate the index of the largest element.
2815-
// Now, self.abs() may dispatch to a vectorized implementation which gives sliiightly different
2815+
// Now, self.abs() may dispatch to a vectorized implementation which gives slightly different
28162816
// results to the std::abs(std::complex<T>) implementation.
28172817
// As such, to be able to compute the correct index in the backward, we need to use self.abs()
28182818
// both in the forward and in the backward

aten/src/ATen/native/NaiveConvolutionTranspose2d.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -304,7 +304,7 @@ void slow_conv_transpose2d_out_cpu_template(
304304
at::parallel_for(0, batch_size, 0, [&](int64_t begin, int64_t end) {
305305
// For each elt in batch, do:
306306
for (const auto elt : c10::irange(begin, end)) {
307-
// Matrix mulitply per output:
307+
// Matrix multiply per output:
308308
Tensor input_n = input_.select(0, elt);
309309
Tensor output_n = output.select(0, elt);
310310
Tensor columns_n = columns.select(0, elt);
@@ -501,7 +501,7 @@ static void slow_conv_transpose2d_backward_out_cpu_template(
501501

502502
// For each elt in batch, do:
503503
for (const auto elt : c10::irange(batch_size)) {
504-
// Matrix mulitply per sample:
504+
// Matrix multiply per sample:
505505
grad_input_n = grad_input.select(0, elt);
506506
grad_output_n = grad_output.select(0, elt);
507507

@@ -695,12 +695,12 @@ void slow_conv_transpose2d_acc_grad_parameters_cpu(
695695

696696
// For each elt in batch, do:
697697
for (const auto elt : c10::irange(batch_size)) {
698-
// Matrix mulitply per output:
698+
// Matrix multiply per output:
699699
grad_output_n = grad_output.select(0, elt);
700700

701701
// Do Weight:
702702
if (grad_weight.defined()) {
703-
// Matrix mulitply per output:
703+
// Matrix multiply per output:
704704
input_n = input.select(0, elt);
705705

706706
if (need_columns) {

aten/src/ATen/native/ReduceOps.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,7 @@ static void check_result_is_bytebool(const char* name, const Tensor& self, const
173173

174174
// Note [all, any : uint8 compatibility]:
175175
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
176-
// For NumPy comptability, `all` and `any` return
176+
// For NumPy compatibility, `all` and `any` return
177177
// Tensor of dtype `bool`. However for compatibility reason,
178178
// for `uint8`, they return Tensor of same dtype `uint8`.
179179
// Reference: https://github.com/pytorch/pytorch/pull/47878#issuecomment-747108561
@@ -510,7 +510,7 @@ static Tensor reversed_cumsum(const Tensor& w, int64_t dim) {
510510
Tensor cumprod_backward(const Tensor& grad, const Tensor& input, int64_t dim, const Tensor& output) {
511511
/*
512512
We show here how to derive an O(n) gradient formula for
513-
abitrary inputs. It follows via a basic application of the
513+
arbitrary inputs. It follows via a basic application of the
514514
chain rule together with a number of observations for different
515515
cases. We assume that x is an n-dimensional vector and y = cumprod(x).
516516
In the actual implementation we will need to play a bit with masks
@@ -527,7 +527,7 @@ Tensor cumprod_backward(const Tensor& grad, const Tensor& input, int64_t dim, co
527527
The term dF / dy_j is just grad_output[j] (assuming again
528528
everything is one-dimensional).
529529
530-
The term (dy_j / dx_k) is easilly seen to be
530+
The term (dy_j / dx_k) is easily seen to be
531531
532532
if j >= k
533533
dy_j / dx_k = prod_{1 <= i <= j, i != k} x_i
@@ -589,7 +589,7 @@ Tensor cumprod_backward(const Tensor& grad, const Tensor& input, int64_t dim, co
589589
590590
dy_j / dx_z1 = prod(x[:z1]) * (grad_output[z1] + sum(grad_output[z1+1:z2] * cumprod(x[z1+1:z2])))
591591
592-
When the imputs are complex, this is map is holomorphic. As such, to compute
592+
When the inputs are complex, this is map is holomorphic. As such, to compute
593593
its backwards is just the conjugate of the usual backwards. This simplifies to
594594
conjugating the input. We may also reuse the output as, since the map is holomorphic,
595595
cumprod(input.conj()) = cumprod(input).conj()

aten/src/ATen/native/SobolEngineOpsUtils.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
#include <ATen/native/SobolEngineOpsUtils.h>
44

55
/*
6-
The direction nubmers in this file were generated using the
6+
The direction numbers in this file were generated using the
77
python script below (thius this assumes that the file
88
https://web.maths.unsw.edu.au/~fkuo/sobol/new-joe-kuo-6.21201
99
is present in the working directory). For additional details see [1].

aten/src/ATen/native/TensorConversions.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -339,7 +339,7 @@ Tensor _to_copy(
339339
}
340340
// See Note [Explicit nullopt MemoryFormat argument]
341341
// TODO: empty_quantized does not work here. It raises an exception in CheckMemoryFormat.h prior to
342-
// empty_affine_quantizd/_empty_per_channel_affine_quantized calls
342+
// empty_affine_quantized/_empty_per_channel_affine_quantized calls
343343
// at::empty also does not work here because there is no proper at::empty support for quantized tensors
344344
// as it would return a quantized tensor with an UnknownQuantizer
345345
auto r = self.is_quantized() ? at::empty_like(self, memory_format)
@@ -653,7 +653,7 @@ Tensor sparse_compressed_to_dense(
653653
dense = dense.reshape(dense_reshaped_sizes);
654654

655655
// Calculate batch, row and column indices for non-zeros in the
656-
// sparse matrix, and use these to calculate correspoding indices
656+
// sparse matrix, and use these to calculate corresponding indices
657657
// into the dense matrix reshaped as above. Then, update dense
658658
// matrix by adding sparse matrix values into elements with indices
659659
// calculated this way.

aten/src/ATen/native/TensorIteratorDynamicCasting.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
#include <ATen/native/TensorIterator.h>
88

99

10-
// This file includes utilties for dynamic_casting done by TensorIterator, see CUDALoops.cuh and Loops.h.
10+
// This file includes utilities for dynamic_casting done by TensorIterator, see CUDALoops.cuh and Loops.h.
1111

1212
// dynamic_casting handles when the types expected by the iterator do not match the types of the arguments
1313
// to the function that is being called.

aten/src/ATen/native/TensorIteratorReduce.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ static void two_pass_reduction(TensorIteratorBase& iter, loop2d_t loop) {
5959
auto shape = first_reduce.shape();
6060
auto strides = first_reduce.get_strides();
6161

62-
// Bump output ptr so each thread has its own ouput slice
62+
// Bump output ptr so each thread has its own output slice
6363
auto base_ptrs = first_reduce.get_base_ptrs();
6464
base_ptrs[0] += buffer_stride * thread_num;
6565

0 commit comments

Comments
 (0)