Skip to content

Commit 548c460

Browse files
XuehaiPanpytorchmergebot
authored andcommitted
[BE][Easy][7/19] enforce style for empty lines in import segments in test/[a-c]*/ and test/[q-z]*/ (pytorch#129758)
See pytorch#129751 (comment). Most changes are auto-generated by linter. You can review these PRs via: ```bash git diff --ignore-all-space --ignore-blank-lines HEAD~1 ``` Pull Request resolved: pytorch#129758 Approved by: https://github.com/ezyang
1 parent 46994e7 commit 548c460

File tree

84 files changed

+75
-119
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

84 files changed

+75
-119
lines changed

test/ao/sparsity/test_activation_sparsifier.py

+1
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
from torch.ao.pruning.sparsifier.utils import module_to_fqn
1414
from torch.testing._internal.common_utils import skipIfTorchDynamo, TestCase
1515

16+
1617
logging.basicConfig(
1718
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
1819
)

test/ao/sparsity/test_composability.py

+1
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
)
1717
from torch.testing._internal.common_utils import TestCase
1818

19+
1920
logging.basicConfig(
2021
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
2122
)

test/ao/sparsity/test_data_scheduler.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,10 @@
88
import torch
99
from torch import nn
1010
from torch.ao.pruning._experimental.data_scheduler import BaseDataScheduler
11-
1211
from torch.ao.pruning._experimental.data_sparsifier import DataNormSparsifier
1312
from torch.testing._internal.common_utils import TestCase
1413

14+
1515
logging.basicConfig(
1616
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
1717
)

test/ao/sparsity/test_data_sparsifier.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,10 @@
44
import itertools
55
import logging
66
import math
7-
87
from typing import Tuple
98

109
import torch
1110
from torch import nn
12-
1311
from torch.ao.pruning._experimental.data_sparsifier import (
1412
BaseDataSparsifier,
1513
DataNormSparsifier,
@@ -20,6 +18,7 @@
2018
from torch.nn.utils.parametrize import is_parametrized
2119
from torch.testing._internal.common_utils import TestCase
2220

21+
2322
logging.basicConfig(
2423
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
2524
)

test/ao/sparsity/test_kernels.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99

1010
import torch
1111
import torch.ao.quantization as tq
12-
1312
from torch import nn
1413
from torch.ao.pruning.sparsifier.utils import fqn_to_module
1514
from torch.testing._internal.common_quantized import (
@@ -20,9 +19,9 @@
2019
qengine_is_qnnpack,
2120
qengine_is_x86,
2221
)
23-
2422
from torch.testing._internal.common_utils import run_tests, skipIfTorchDynamo, TestCase
2523

24+
2625
# TODO: Once more test files are created, move the contents to a ao folder.
2726

2827
logging.basicConfig(

test/ao/sparsity/test_parametrization.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,12 @@
44
import logging
55

66
import torch
7-
87
from torch import nn
98
from torch.ao.pruning.sparsifier import utils
109
from torch.nn.utils import parametrize
1110
from torch.testing._internal.common_utils import TestCase
1211

12+
1313
logging.basicConfig(
1414
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
1515
)

test/ao/sparsity/test_scheduler.py

-2
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,7 @@
33
import warnings
44

55
from torch import nn
6-
76
from torch.ao.pruning import BaseScheduler, CubicSL, LambdaSL, WeightNormSparsifier
8-
97
from torch.testing._internal.common_utils import TestCase
108

119

test/ao/sparsity/test_sparsifier.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,9 @@
1818
MockSparseLinear,
1919
SimpleLinear,
2020
)
21-
2221
from torch.testing._internal.common_utils import TestCase
2322

23+
2424
logging.basicConfig(
2525
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
2626
)

test/ao/sparsity/test_sparsity_utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99
get_arg_info_from_tensor_fqn,
1010
module_to_fqn,
1111
)
12-
1312
from torch.testing._internal.common_quantization import (
1413
ConvBnReLUModel,
1514
ConvModel,
@@ -21,6 +20,7 @@
2120
)
2221
from torch.testing._internal.common_utils import TestCase
2322

23+
2424
logging.basicConfig(
2525
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
2626
)

test/ao/sparsity/test_structured_sparsifier.py

-1
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@
2929
SimpleConv2d,
3030
SimpleLinear,
3131
)
32-
3332
from torch.testing._internal.common_utils import skipIfTorchDynamo, TestCase
3433

3534

test/autograd/test_complex.py

-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
# Owner(s): ["module: autograd"]
22

33
import torch
4-
54
from torch.testing._internal.common_utils import gradcheck, run_tests, TestCase
65

76

test/autograd/test_functional.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66

77
import torch
88
import torch.autograd.functional as autogradF
9-
109
from torch.testing._internal.common_cuda import TEST_CUDA
1110
from torch.testing._internal.common_utils import (
1211
gradcheck,
@@ -19,6 +18,7 @@
1918
)
2019
from torch.testing._internal.logging_tensor import LoggingTensor
2120

21+
2222
# Utilities for parametrizing the tensor constructors used in autograd tests
2323
#
2424
# TODO: maybe move somewhere so other tests can also use

test/bottleneck_test/test.py

+1
Original file line numberDiff line numberDiff line change
@@ -2,5 +2,6 @@
22

33
import torch
44

5+
56
x = torch.ones((3, 3), requires_grad=True)
67
(3 * x).sum().backward()

test/bottleneck_test/test_args.py

+1
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44

55
import torch
66

7+
78
if __name__ == "__main__":
89
parser = argparse.ArgumentParser()
910

test/cpp/aoti_inference/compile_model.py

-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
import torch
2-
32
from torch.export import Dim
43

54

test/cpp/aoti_inference/test.py

+1
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
from torch._export import aot_compile
33
from torch.export import Dim
44

5+
56
torch.manual_seed(1337)
67

78

test/cpp/api/init_baseline.py

+1
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44

55
import torch
66

7+
78
HEADER = """
89
#include <torch/types.h>
910

test/cpp_api_parity/functional_impl_check.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
from string import Template
2222

2323
import torch
24-
2524
from cpp_api_parity.sample_functional import SAMPLE_FUNCTIONAL_CPP_SOURCE
2625
from cpp_api_parity.utils import (
2726
add_test,
@@ -40,6 +39,7 @@
4039
try_remove_folder,
4140
)
4241

42+
4343
# Expected substitutions:
4444
#
4545
# ${functional_variant_name} (e.g. `BCELoss_no_reduce`)

test/cpp_api_parity/module_impl_check.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
from string import Template
2222

2323
import torch
24-
2524
from cpp_api_parity.sample_module import SAMPLE_MODULE_CPP_SOURCE
2625
from cpp_api_parity.utils import (
2726
add_test,
@@ -40,6 +39,7 @@
4039
try_remove_folder,
4140
)
4241

42+
4343
# Expected substitutions:
4444
#
4545
# ${module_variant_name} (e.g. `Linear_no_bias_cpu`)

test/cpp_api_parity/parity_table_parser.py

+1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
from collections import namedtuple
22

3+
34
ParityStatus = namedtuple("ParityStatus", ["has_impl_parity", "has_doc_parity"])
45

56
"""

test/cpp_api_parity/sample_functional.py

+1
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
import torch.nn.functional as F
33
from torch.testing._internal.common_nn import wrap_functional
44

5+
56
"""
67
`sample_functional` is used by `test_cpp_api_parity.py` to test that Python / C++ API
78
parity test harness works for `torch.nn.functional` functions.

test/cpp_api_parity/sample_module.py

+1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import torch
22

3+
34
"""
45
`SampleModule` is used by `test_cpp_api_parity.py` to test that Python / C++ API
56
parity test harness works for `torch.nn.Module` subclasses.

test/cpp_api_parity/utils.py

+1
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
import torch.utils.cpp_extension
1010
from torch.testing._internal.common_cuda import TEST_CUDA
1111

12+
1213
# Note that this namedtuple is for C++ parity test mechanism's internal use.
1314
# For guidance on how to add a new C++ parity test, please see
1415
# NOTE [How to check NN module / functional API parity between Python and C++ frontends]

test/cpp_extensions/no_python_abi_suffix_test/setup.py

+1
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
from torch.utils.cpp_extension import BuildExtension, CppExtension
44

5+
56
setup(
67
name="no_python_abi_suffix_test",
78
ext_modules=[

test/cpp_extensions/setup.py

+1
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
ROCM_HOME,
1414
)
1515

16+
1617
if sys.platform == "win32":
1718
vc_version = os.getenv("VCToolsVersion", "")
1819
if vc_version.startswith("14.16."):

test/custom_operator/my_custom_ops.py

+1
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
import torch
44

5+
56
torch.ops.load_library(get_custom_op_library_path())
67

78

test/custom_operator/my_custom_ops2.py

+1
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
import torch
44

5+
56
torch.ops.load_library(get_custom_op_library_path())
67

78

test/custom_operator/pointwise.py

+1
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
import torch
44

5+
56
torch.ops.load_library(get_custom_op_library_path())
67

78

test/custom_operator/test_custom_ops.py

+1
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
from torch import ops
1313
from torch.testing._internal.common_utils import IS_WINDOWS, run_tests, TestCase
1414

15+
1516
torch.ops.import_module("pointwise")
1617

1718

test/quantization/bc/test_backward_compatibility.py

-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
import torch.ao.nn.quantized.dynamic as nnqd
1313
import torch.ao.quantization.quantize_fx as quantize_fx
1414
import torch.nn as nn
15-
1615
from torch.ao.quantization import MinMaxObserver, PerChannelMinMaxObserver
1716
from torch.fx import GraphModule
1817
from torch.testing._internal.common_quantization import skipIfNoFBGEMM

test/quantization/core/experimental/test_adaround_eager.py

-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
from torch.ao.quantization.experimental.adaround_optimization import (
88
AdaptiveRoundingOptimizer,
99
)
10-
1110
from torch.nn import functional as F
1211
from torch.quantization.observer import MinMaxObserver
1312
from torch.testing._internal.common_quantization import QuantizationTestCase

test/quantization/core/experimental/test_float8.py

+1
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
TestCase,
1818
)
1919

20+
2021
FLOAT8_DTYPES = [
2122
torch.float8_e5m2,
2223
torch.float8_e5m2fnuz,

test/quantization/jit/test_ondevice_quantization.py

-6
Original file line numberDiff line numberDiff line change
@@ -5,25 +5,19 @@
55

66
import torch
77
import torch._C
8-
98
from torch.ao.quantization import default_dynamic_qconfig, per_channel_dynamic_qconfig
10-
119
from torch.ao.quantization.quantize_jit import (
1210
_prepare_ondevice_dynamic_jit,
1311
_quantize_ondevice_dynamic_jit,
1412
convert_dynamic_jit,
1513
prepare_dynamic_jit,
1614
)
17-
1815
from torch.jit.mobile import _load_for_lite_interpreter, LiteScriptModule
19-
2016
from torch.testing import FileCheck
21-
2217
from torch.testing._internal.common_quantization import (
2318
get_script_module,
2419
LinearAddModel,
2520
)
26-
2721
from torch.testing._internal.common_utils import TestCase
2822
from torch.utils import bundled_inputs as bundled_inputs
2923

test/quantization/jit/test_quantize_jit.py

-4
Original file line numberDiff line numberDiff line change
@@ -43,13 +43,10 @@
4343
prepare_jit,
4444
script_qconfig,
4545
)
46-
4746
from torch.jit._recursive import wrap_cpp_module
48-
4947
from torch.testing import FileCheck
5048

5149
# Annotated models
52-
5350
from torch.testing._internal.common_quantization import (
5451
AnnotatedConvBnModel,
5552
AnnotatedConvModel,
@@ -76,7 +73,6 @@
7673
qengine_is_fbgemm,
7774
qengine_is_qnnpack,
7875
)
79-
8076
from torch.testing._internal.common_utils import set_default_dtype
8177
from torch.testing._internal.jit_utils import (
8278
attrs_with_prefix,

test/quantization/pt2e/test_duplicate_dq.py

-2
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55

66
import torch
77
from torch._export import capture_pre_autograd_graph
8-
98
from torch.ao.quantization.observer import (
109
HistogramObserver,
1110
MinMaxObserver,
@@ -25,7 +24,6 @@
2524
OP_TO_ANNOTATOR,
2625
QuantizationConfig,
2726
)
28-
2927
from torch.testing._internal.common_quantization import QuantizationTestCase
3028
from torch.testing._internal.common_utils import IS_WINDOWS
3129

test/quantization/pt2e/test_graph_utils.py

-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44

55
import torch
66
import torch._dynamo as torchdynamo
7-
87
from torch.ao.quantization.pt2e.graph_utils import (
98
find_sequential_partitions,
109
get_equivalent_types,

0 commit comments

Comments
 (0)