@@ -110,7 +110,7 @@ def test_s_prep_before_q_prep(self):
110
110
self .assertTrue (hasattr (mod [5 ], "parametrizations" ))
111
111
112
112
# check that correct observers were inserted and that matching
113
- # occured successfully
113
+ # occurred successfully
114
114
self .assertTrue (hasattr (mod [5 ], "activation_post_process" ))
115
115
116
116
_squash_mask_calibrate_and_convert (
@@ -141,7 +141,7 @@ def test_convert_without_squash_mask(self):
141
141
self .assertTrue (hasattr (mod [5 ], "parametrizations" ))
142
142
143
143
# check that correct observers were inserted and that matching
144
- # occured successfully
144
+ # occurred successfully
145
145
self .assertTrue (hasattr (mod [5 ], "activation_post_process" ))
146
146
sparsifier .step ()
147
147
sparsity_level = _calculate_sparsity (mod [5 ].weight )
@@ -180,7 +180,7 @@ def test_s_prep_before_fusion(self):
180
180
self .assertTrue (hasattr (mod [5 ][0 ], "parametrizations" ))
181
181
182
182
# check that correct observers were inserted and that matching
183
- # occured successfully
183
+ # occurred successfully
184
184
self .assertTrue (hasattr (mod [5 ], "activation_post_process" ))
185
185
_squash_mask_calibrate_and_convert (
186
186
mod , sparsifier , torch .randn (1 , 4 , 4 , 4 )
@@ -221,7 +221,7 @@ def test_fusion_before_s_prep(self):
221
221
self .assertTrue (hasattr (mod [5 ][0 ], "parametrizations" ))
222
222
223
223
# check that correct observers were inserted and that matching
224
- # occured successfully
224
+ # occurred successfully
225
225
self .assertTrue (hasattr (mod [5 ], "activation_post_process" ))
226
226
sparsifier .step ()
227
227
sparsity_level = _calculate_sparsity (mod [5 ][0 ].weight )
@@ -242,7 +242,7 @@ def test_fusion_before_s_prep(self):
242
242
243
243
# This tests whether performing sparse prepare before qat prepare causes issues.
244
244
# The primary worries were that qat_prep wouldn't recognize the parametrized
245
- # modules and that the convert step for qat would remove the paramerizations
245
+ # modules and that the convert step for qat would remove the parametrizations
246
246
# from the modules.
247
247
def test_s_prep_before_qat_prep (self ):
248
248
(
@@ -258,7 +258,7 @@ def test_s_prep_before_qat_prep(self):
258
258
self .assertTrue (hasattr (mod [5 ], "parametrizations" ))
259
259
260
260
# check that correct observers were inserted and that matching
261
- # occured successfully
261
+ # occurred successfully
262
262
self .assertTrue (hasattr (mod [5 ], "activation_post_process" ))
263
263
self .assertTrue (isinstance (mod [5 ], torch .ao .nn .qat .Linear ))
264
264
_squash_mask_calibrate_and_convert (
@@ -297,7 +297,7 @@ def test_qat_prep_before_s_prep(self):
297
297
self .assertTrue (hasattr (mod [5 ], "parametrizations" ))
298
298
299
299
# check that correct observers were inserted and that matching
300
- # occured successfully
300
+ # occurred successfully
301
301
self .assertTrue (hasattr (mod [5 ], "activation_post_process" ))
302
302
self .assertTrue (isinstance (mod [5 ], torch .ao .nn .qat .Linear ))
303
303
@@ -366,7 +366,7 @@ def test_q_prep_fx_before_s_prep(self):
366
366
self .assertTrue (hasattr (fqn_to_module (mod , "5.0" ), "parametrizations" ))
367
367
368
368
# check that correct observers were inserted and that matching
369
- # occured successfully
369
+ # occurred successfully
370
370
self .assertTrue (_module_has_activation_post_process (mod , "5" ))
371
371
sparsifier .step ()
372
372
sparsity_level = _calculate_sparsity (fqn_to_module (mod , "5.0.weight" ))
@@ -424,7 +424,7 @@ def test_q_prep_fx_s_prep_ref_conv(self):
424
424
self .assertTrue (hasattr (fqn_to_module (mod , "5.0" ), "parametrizations" ))
425
425
426
426
# check that correct observers were inserted and that matching
427
- # occured successfully
427
+ # occurred successfully
428
428
self .assertTrue (_module_has_activation_post_process (mod , "5" ))
429
429
sparsifier .step ()
430
430
sparsity_level = _calculate_sparsity (fqn_to_module (mod , "5.0.weight" ))
@@ -470,7 +470,7 @@ def test_s_prep_before_q_prep_fx(self):
470
470
self .assertTrue (hasattr (fqn_to_module (mod , "5.0" ), "parametrizations" ))
471
471
472
472
# check that correct observers were inserted and that matching
473
- # occured successfully
473
+ # occurred successfully
474
474
self .assertTrue (_module_has_activation_post_process (mod , "5" ))
475
475
sparsifier .step ()
476
476
sparsity_level = _calculate_sparsity (fqn_to_module (mod , "5.0.weight" ))
@@ -516,7 +516,7 @@ def test_s_prep_before_qat_prep_fx(self):
516
516
self .assertTrue (isinstance (fqn_to_module (mod , "5" ), torch .ao .nn .intrinsic .qat .LinearReLU ))
517
517
518
518
# check that correct observers were inserted and that matching
519
- # occured successfully
519
+ # occurred successfully
520
520
self .assertTrue (_module_has_activation_post_process (mod , "5" ))
521
521
sparsifier .step ()
522
522
sparsity_level = _calculate_sparsity (fqn_to_module (mod , "5.weight" ))
@@ -561,7 +561,7 @@ def test_s_prep_q_prep_fx_ref(self):
561
561
self .assertTrue (hasattr (fqn_to_module (mod , "5.0" ), "parametrizations" ))
562
562
563
563
# check that correct observers were inserted and that matching
564
- # occured successfully
564
+ # occurred successfully
565
565
self .assertTrue (_module_has_activation_post_process (mod , "5" ))
566
566
sparsifier .step ()
567
567
sparsity_level = _calculate_sparsity (fqn_to_module (mod , "5.0.weight" ))
0 commit comments