Skip to content

Commit deb800e

Browse files
kiszkpytorchmergebot
authored andcommitted
Fix typo under test directory (pytorch#111304)
This PR fixes typo in comments under `test` directory. Pull Request resolved: pytorch#111304 Approved by: https://github.com/Skylion007
1 parent 1e70f4d commit deb800e

25 files changed

+65
-65
lines changed

test/ao/sparsity/test_activation_sparsifier.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,7 @@ def _check_squash_mask(self, activation_sparsifier, data):
172172
data (torch tensor)
173173
dummy batched data
174174
"""
175-
# create a forward hook for checking ouput == layer(input * mask)
175+
# create a forward hook for checking output == layer(input * mask)
176176
def check_output(name):
177177
mask = activation_sparsifier.get_mask(name)
178178
features = activation_sparsifier.data_groups[name].get('features')
@@ -271,7 +271,7 @@ def reduce_fn(x):
271271
return torch.mean(x, dim=0)
272272

273273
def _vanilla_norm_sparsifier(data, sparsity_level):
274-
r"""Similar to data norm spasifier but block_shape = (1,1).
274+
r"""Similar to data norm sparsifier but block_shape = (1,1).
275275
Simply, flatten the data, sort it and mask out the values less than threshold
276276
"""
277277
data_norm = torch.abs(data).flatten()

test/ao/sparsity/test_composability.py

+12-12
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ def test_s_prep_before_q_prep(self):
110110
self.assertTrue(hasattr(mod[5], "parametrizations"))
111111

112112
# check that correct observers were inserted and that matching
113-
# occured successfully
113+
# occurred successfully
114114
self.assertTrue(hasattr(mod[5], "activation_post_process"))
115115

116116
_squash_mask_calibrate_and_convert(
@@ -141,7 +141,7 @@ def test_convert_without_squash_mask(self):
141141
self.assertTrue(hasattr(mod[5], "parametrizations"))
142142

143143
# check that correct observers were inserted and that matching
144-
# occured successfully
144+
# occurred successfully
145145
self.assertTrue(hasattr(mod[5], "activation_post_process"))
146146
sparsifier.step()
147147
sparsity_level = _calculate_sparsity(mod[5].weight)
@@ -180,7 +180,7 @@ def test_s_prep_before_fusion(self):
180180
self.assertTrue(hasattr(mod[5][0], "parametrizations"))
181181

182182
# check that correct observers were inserted and that matching
183-
# occured successfully
183+
# occurred successfully
184184
self.assertTrue(hasattr(mod[5], "activation_post_process"))
185185
_squash_mask_calibrate_and_convert(
186186
mod, sparsifier, torch.randn(1, 4, 4, 4)
@@ -221,7 +221,7 @@ def test_fusion_before_s_prep(self):
221221
self.assertTrue(hasattr(mod[5][0], "parametrizations"))
222222

223223
# check that correct observers were inserted and that matching
224-
# occured successfully
224+
# occurred successfully
225225
self.assertTrue(hasattr(mod[5], "activation_post_process"))
226226
sparsifier.step()
227227
sparsity_level = _calculate_sparsity(mod[5][0].weight)
@@ -242,7 +242,7 @@ def test_fusion_before_s_prep(self):
242242

243243
# This tests whether performing sparse prepare before qat prepare causes issues.
244244
# The primary worries were that qat_prep wouldn't recognize the parametrized
245-
# modules and that the convert step for qat would remove the paramerizations
245+
# modules and that the convert step for qat would remove the parametrizations
246246
# from the modules.
247247
def test_s_prep_before_qat_prep(self):
248248
(
@@ -258,7 +258,7 @@ def test_s_prep_before_qat_prep(self):
258258
self.assertTrue(hasattr(mod[5], "parametrizations"))
259259

260260
# check that correct observers were inserted and that matching
261-
# occured successfully
261+
# occurred successfully
262262
self.assertTrue(hasattr(mod[5], "activation_post_process"))
263263
self.assertTrue(isinstance(mod[5], torch.ao.nn.qat.Linear))
264264
_squash_mask_calibrate_and_convert(
@@ -297,7 +297,7 @@ def test_qat_prep_before_s_prep(self):
297297
self.assertTrue(hasattr(mod[5], "parametrizations"))
298298

299299
# check that correct observers were inserted and that matching
300-
# occured successfully
300+
# occurred successfully
301301
self.assertTrue(hasattr(mod[5], "activation_post_process"))
302302
self.assertTrue(isinstance(mod[5], torch.ao.nn.qat.Linear))
303303

@@ -366,7 +366,7 @@ def test_q_prep_fx_before_s_prep(self):
366366
self.assertTrue(hasattr(fqn_to_module(mod, "5.0"), "parametrizations"))
367367

368368
# check that correct observers were inserted and that matching
369-
# occured successfully
369+
# occurred successfully
370370
self.assertTrue(_module_has_activation_post_process(mod, "5"))
371371
sparsifier.step()
372372
sparsity_level = _calculate_sparsity(fqn_to_module(mod, "5.0.weight"))
@@ -424,7 +424,7 @@ def test_q_prep_fx_s_prep_ref_conv(self):
424424
self.assertTrue(hasattr(fqn_to_module(mod, "5.0"), "parametrizations"))
425425

426426
# check that correct observers were inserted and that matching
427-
# occured successfully
427+
# occurred successfully
428428
self.assertTrue(_module_has_activation_post_process(mod, "5"))
429429
sparsifier.step()
430430
sparsity_level = _calculate_sparsity(fqn_to_module(mod, "5.0.weight"))
@@ -470,7 +470,7 @@ def test_s_prep_before_q_prep_fx(self):
470470
self.assertTrue(hasattr(fqn_to_module(mod, "5.0"), "parametrizations"))
471471

472472
# check that correct observers were inserted and that matching
473-
# occured successfully
473+
# occurred successfully
474474
self.assertTrue(_module_has_activation_post_process(mod, "5"))
475475
sparsifier.step()
476476
sparsity_level = _calculate_sparsity(fqn_to_module(mod, "5.0.weight"))
@@ -516,7 +516,7 @@ def test_s_prep_before_qat_prep_fx(self):
516516
self.assertTrue(isinstance(fqn_to_module(mod, "5"), torch.ao.nn.intrinsic.qat.LinearReLU))
517517

518518
# check that correct observers were inserted and that matching
519-
# occured successfully
519+
# occurred successfully
520520
self.assertTrue(_module_has_activation_post_process(mod, "5"))
521521
sparsifier.step()
522522
sparsity_level = _calculate_sparsity(fqn_to_module(mod, "5.weight"))
@@ -561,7 +561,7 @@ def test_s_prep_q_prep_fx_ref(self):
561561
self.assertTrue(hasattr(fqn_to_module(mod, "5.0"), "parametrizations"))
562562

563563
# check that correct observers were inserted and that matching
564-
# occured successfully
564+
# occurred successfully
565565
self.assertTrue(_module_has_activation_post_process(mod, "5"))
566566
sparsifier.step()
567567
sparsity_level = _calculate_sparsity(fqn_to_module(mod, "5.0.weight"))

test/ao/sparsity/test_data_sparsifier.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -518,7 +518,7 @@ def test_ptq_sparsify_first(self):
518518
This unit test checks that
519519
1. Embeddings and EmbeddingBags are sparsified to the right sparsity levels
520520
2. Embeddings and EmbeddingBags are quantized
521-
3. Linear modules are not quanitzed
521+
3. Linear modules are not quantized
522522
"""
523523
model = Model()
524524

@@ -557,7 +557,7 @@ def test_ptq_quantize_first(self):
557557
This unit test checks that
558558
1. Embeddings and EmbeddingBags are sparsified to the right sparsity levels
559559
2. Embeddings and EmbeddingBags are quantized
560-
3. Linear modules are not quanitzed
560+
3. Linear modules are not quantized
561561
"""
562562
model = Model()
563563

test/ao/sparsity/test_parametrization.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ def test_state_dict_preserved(self):
108108
assert hasattr(model_load.seq[1], 'parametrizations')
109109
assert parametrize.is_parametrized(model_load.linear, 'weight')
110110

111-
# Check the weigths are preserved
111+
# Check the weights are preserved
112112
self.assertEqual(model_save.linear.parametrizations['weight'].original,
113113
model_load.linear.parametrizations['weight'].original)
114114
self.assertEqual(model_save.seq[0].parametrizations['weight'].original,

test/ao/sparsity/test_structured_sparsifier.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -596,15 +596,15 @@ def test_prune_conv2d_activation_conv2d(self):
596596
# Conv2d with Activation and no Bias
597597
configs, shapes = [], []
598598

599-
# conv2d(no bias) -> activatation -> conv2d(no bias)
599+
# conv2d(no bias) -> activation -> conv2d(no bias)
600600
configs.append(
601601
[
602602
{"tensor_fqn": "seq.4.weight"},
603603
]
604604
)
605605
shapes.append((1, 52, 18, 18))
606606

607-
# conv2d(bias) -> activatation -> conv2d(bias)
607+
# conv2d(bias) -> activation -> conv2d(bias)
608608
configs.append(
609609
[
610610
{"tensor_fqn": "seq.0.weight"},

test/autograd/test_functional.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ def wrapper(*args, **kwargs):
4343
subtest(logging_tensor_ctors, name="logging_tensor",
4444
decorators=[unittest.expectedFailure])])
4545

46-
# NB: This is equivalent to having both @parmetrize("vectorized", [True, False]) and
46+
# NB: This is equivalent to having both @parametrize("vectorized", [True, False]) and
4747
# FIXME_base_and_xfail_logging_tensor, except the non-vectorized logging_tensor case is
4848
# actually expected to succeed
4949
FIXME_xfail_vectorized_logging_tensor = (

test/cpp/api/dataloader.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -1344,7 +1344,7 @@ struct Barrier {
13441344
// each worker thread has a unique ID in `0...kNumberOfWorkers-1`.
13451345
// There is a hard-coded ordering, `kOrderInWhichWorkersReturnTheirBatch`, in
13461346
// which we want the worker threads to return. For this, an iterator into this
1347-
// order is maintained. When the derferenced iterator (the current order index)
1347+
// order is maintained. When the dereferenced iterator (the current order index)
13481348
// matches the thread ID of a worker, it knows it can now return its index as
13491349
// well as progress the iterator. Inside the dataloader, the sequencer should
13501350
// buffer these indices such that they are ultimately returned in order.
@@ -2003,7 +2003,7 @@ TEST(DataLoaderTest, ChunkDatasetSave) {
20032003
// statues, there are three possible scenarios for the writer thread:
20042004
// 1. it hasn't started loading the next chunk data yet, so the
20052005
// sequential sampler index is still 0;
2006-
// 2. it started to load the second chunk, so the sequencial sampler
2006+
// 2. it started to load the second chunk, so the sequential sampler
20072007
// index is at 1;
20082008
// 3. it finished loading the second chunk, and start to load the
20092009
// third chunk, because the cache is still fully occupied by the data
@@ -2124,7 +2124,7 @@ TEST(DataLoaderTest, ChunkDatasetCrossChunkShuffle) {
21242124
indices_.resize(size_);
21252125
size_t index = 0;
21262126

2127-
// Repeatly sample every 5 indices.
2127+
// Repeatedly sample every 5 indices.
21282128
for (const auto i : c10::irange(batch_size)) {
21292129
for (size_t j = 0; j < size_ / batch_size; ++j) {
21302130
indices_[index++] = i + batch_size * j;

test/cpp/api/tensor_flatten.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ TEST(UnflattenDenseTensorTest, TestEmptyTensor) {
2828
ASSERT_EQ(unflatten_results.at(2).data_ptr(), nullptr);
2929
// without fix in unflatten_dense_tensors() for empty tensors,
3030
// unflattend empty tensor unflatten_results.at(1) will share the same storage
31-
// as other non-empty tenosr like unflatten_results.at(3).
31+
// as other non-empty tensor like unflatten_results.at(3).
3232
// after fix, the empty tensor and non-empty tensor do not share the same
3333
// storage.
3434
ASSERT_NE(

test/cpp/c10d/ProcessGroupNCCLTest.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -438,7 +438,7 @@ void testSparseAllreduce(const std::string& path, int rank, int size) {
438438
// row indices
439439
EXPECT_EQ(sizes[1], inputDim);
440440
} else if (sizes[0] == 2) {
441-
// coorindate indices
441+
// coordinate indices
442442
EXPECT_EQ(sizes[1], inputDim * inputDim);
443443
}
444444

@@ -489,7 +489,7 @@ void testSparseAllreduceLarge(const std::string& path, int rank, int size) {
489489
// row indices
490490
EXPECT_EQ(sizes[1], inputDim);
491491
} else if (sizes[0] == 2) {
492-
// coorindate indices
492+
// coordinate indices
493493
EXPECT_EQ(sizes[1], inputDim * inputDim);
494494
}
495495

test/cpp/jit/test_exception.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ TEST(TestException, TestCustomException) {
133133
{def},
134134
// class PythonResolver is defined in
135135
// torch/csrc/jit/python/script_init.cpp. It's not in a header file so I
136-
// can not use it. Create a SimpleResolver insteand
136+
// can not use it. Create a SimpleResolver instead
137137
{std::make_shared<SimpleResolver>()},
138138
nullptr);
139139
torch::jit::GraphFunction* gf =

test/cpp/jit/test_misc.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -3049,7 +3049,7 @@ TEST(TestFunctionExecutor, RunDecompositionTest) {
30493049
TEST(TestShapeGraphLinting, Basic) {
30503050
auto schemas = RegisteredShapeComputeSchemas();
30513051
for (const auto& schema : schemas) {
3052-
// arange does not acually support complex, leave as
3052+
// arange does not actually support complex, leave as
30533053
// union[int, float] for now
30543054
if (schema->name() == "aten::arange") {
30553055
continue;

test/cpp/jit/test_module_api.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ TEST(ModuleAPITest, MethodRunAsync) {
7777

7878
future->wait();
7979

80-
// expect 2 forks and 2 wait callbacks being excuted on provided taskLauncher
80+
// expect 2 forks and 2 wait callbacks being executed on provided taskLauncher
8181
// but ivalue::Future would be marked completed and release wait before
8282
// finishing all callbacks
8383
ASSERT_GE(counter, 2);

test/cpp/jit/test_shape_analysis.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -401,7 +401,7 @@ TEST(ShapeAnalysisTest, SymbolicShapeCaching) {
401401
EXPECT_EQ(get_shape_cache_size(), 1);
402402

403403
// Same shape but different symbols should return same shape
404-
// but different symbolic indicies
404+
// but different symbolic indices
405405
res = calculateSymbolicShapesOnOp(schema, {ss2, const_size_2});
406406
auto res3_val = res->at(0);
407407

test/cpp/lazy/test_lazy_ops_util.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ std::unordered_set<std::string>* CreateIgnoredCounters() {
2121
std::unordered_set<std::string>* icounters =
2222
new std::unordered_set<std::string>();
2323
// Add below the counters whose name need to be ignored when doing
24-
// is-any-counter-changed assertins.
24+
// is-any-counter-changed assertions.
2525
icounters->insert("aten::rand");
2626
return icounters;
2727
}

test/cpp/lite_interpreter_runtime/test_mobile_profiler.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ bool checkMetaData(
3232
if (line.find(metadata_val) != std::string::npos ||
3333
!metadata_val.size()) {
3434
/* if found the right metadata_val OR if expected
35-
* metadata value is an empty string then ignore the matadata_val */
35+
* metadata value is an empty string then ignore the metadata_val */
3636
return true;
3737
}
3838
}

test/cpp/rpc/test_tensorpipe_serialization.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ TEST(TensorpipeSerialize, Base) {
7272
recvingTpAllocation.payloads[i];
7373
if (srcPayload.length) {
7474
// Empty vector's data() can return nullptr, use the length to avoid
75-
// coying into nullptr
75+
// copying into nullptr
7676
memcpy(dstPayload.data, srcPayload.data, srcPayload.length);
7777
}
7878
}

test/cpp/tensorexpr/test_cuda.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -2119,7 +2119,7 @@ TEST(Cuda, MaskMultiDimMultiAxis_CUDA) {
21192119
std::ostringstream oss;
21202120
oss << *cuda_cg.stmt();
21212121

2122-
// Both stores masked agaist the other thread dim < 1.
2122+
// Both stores masked against the other thread dim < 1.
21232123
const std::string& verification_pattern =
21242124
R"IR(
21252125
# CHECK: if (threadIdx.y<1

test/cpp/tensorexpr/test_expr.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ TEST(Expr, IsChannelsLastContiguous) {
132132
}
133133
};
134134

135-
// channels-last contigous
135+
// channels-last contiguous
136136
for (size_t i = 0; i < dims.size(); i++) {
137137
auto shape_info = shape_gen_fn(dims[i], channels_last_cont_shape_conf);
138138
for (size_t j = 0; j < shape_info.second.size(); j++) {
@@ -141,7 +141,7 @@ TEST(Expr, IsChannelsLastContiguous) {
141141
}
142142
}
143143

144-
// channels-last non-contigous
144+
// channels-last non-contiguous
145145
for (size_t i = 0; i < dims.size(); i++) {
146146
auto shape_info = shape_gen_fn(dims[i], channels_last_non_cont_shape_conf);
147147
for (size_t j = 0; j < shape_info.second.size(); j++) {

test/cpp/tensorexpr/test_ir_verifier.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ TEST(IRVerifier, Block) {
145145
StmtPtr block1 = alloc<Block>(std::vector<StmtPtr>({store}));
146146
// NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks)
147147
StmtPtr block2 = alloc<Block>(std::vector<StmtPtr>({store}));
148-
// Stmt can't have multiple parrents, thus inserting it into several blocks
148+
// Stmt can't have multiple parents, thus inserting it into several blocks
149149
// is illegal
150150
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto,clang-analyzer-cplusplus.NewDeleteLeaks)
151151
EXPECT_ANY_THROW(verify(block2));

test/cpp/tensorexpr/test_kernel.cpp

+5-5
Original file line numberDiff line numberDiff line change
@@ -1113,7 +1113,7 @@ TEST_F(Kernel, Softmax2D) {
11131113
const auto verification_pattern =
11141114
format(verification_template, ver_env);
11151115

1116-
// verication sting temporarily disabled until
1116+
// verification sting temporarily disabled until
11171117
// inlining of exp() is benchmarked and determined
11181118
// torch::jit::testing::FileCheck().run(verification_pattern,
11191119
// oss.str());
@@ -1192,7 +1192,7 @@ TEST_F(Kernel, Softmax3D) {
11921192
ver_env.d("softmax_dim_size", softmax_dim_size);
11931193
const auto verification_pattern = format(verification_template, ver_env);
11941194

1195-
// verication sting temporarily disabled until
1195+
// verification sting temporarily disabled until
11961196
// inlining of exp() is benchmarked and determined
11971197
// torch::jit::testing::FileCheck().run(verification_pattern, oss.str());
11981198

@@ -1275,7 +1275,7 @@ TEST_F(Kernel, Softmax4D) {
12751275
ver_env.d("softmax_dim_size", softmax_dim_size);
12761276
const auto verification_pattern = format(verification_template, ver_env);
12771277

1278-
// verication sting temporarily disabled until
1278+
// verification sting temporarily disabled until
12791279
// inlining of exp() is benchmarked and determined
12801280
// torch::jit::testing::FileCheck().run(verification_pattern, oss.str());
12811281

@@ -1548,7 +1548,7 @@ TEST_F(Kernel, ConstantTensorsNonContiguous) {
15481548
auto graph = std::make_shared<Graph>();
15491549
parseIR(graph_string, &*graph);
15501550
// IRParser doesn't support tensor constants, so we generate several aten
1551-
// calls to produce non-contiguos constant tensor and then const-prop it
1551+
// calls to produce non-contiguous constant tensor and then const-prop it
15521552
ConstantPropagation(graph);
15531553

15541554
TensorExprKernel k(graph);
@@ -1637,7 +1637,7 @@ TEST_F(Kernel, CodegenInspection) {
16371637
auto graph = std::make_shared<Graph>();
16381638
parseIR(graph_string, &*graph);
16391639
// IRParser doesn't support tensor constants, so we generate several aten
1640-
// calls to produce non-contiguos constant tensor and then const-prop it
1640+
// calls to produce non-contiguous constant tensor and then const-prop it
16411641
ConstantPropagation(graph);
16421642

16431643
TensorExprKernel k(graph);

test/cpp/tensorexpr/test_loopnest.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -6292,7 +6292,7 @@ TEST(LoopNest, areLoopsPerfectlyNested) {
62926292
ASSERT_FALSE(LoopNest::areLoopsPerfectlyNested({forI, forK, forJ}));
62936293
ASSERT_FALSE(LoopNest::areLoopsPerfectlyNested({forK, forJ, forI}));
62946294

6295-
// Adding a statment to forK body should be OK.
6295+
// Adding a statement to forK body should be OK.
62966296
auto init = Store::make(a_buf, {i, j}, 0);
62976297
forK->body()->insert_stmt_before(init, store);
62986298
ASSERT_TRUE(LoopNest::areLoopsPerfectlyNested({forI, forJ, forK}));

0 commit comments

Comments
 (0)