Skip to content

Commit 7715561

Browse files
[ruff] Enable pydoc rules (#3288)
### Changes Enable rule https://docs.astral.sh/ruff/rules/#pydocstyle-d ignored rules: ``` "D100", # undocumented-public-module "D101", # undocumented-public-class "D102", # undocumented-public-method "D103", # undocumented-public-function "D104", # undocumented-public-package "D105", # undocumented-magic-method "D106", # undocumented-public-nested-class "D107", # undocumented-public-init "D200", # unnecessary-multiline-docstring "D203", # incorrect-blank-line-before-class "D205", # missing-blank-line-after-summary "D212", # multi-line-summary-first-line "D400", # missing-trailing-period "D401", # non-imperative-mood "D402", # signature-in-docstring "D404", # docstring-starts-with-this "D413", # missing-blank-line-after-last-section "D415", # missing-terminal-punctuation "D417", # undocumented-param ``` Some rules is not supported because used not supported style or require add or change documentation in too many files
1 parent 5951153 commit 7715561

File tree

82 files changed

+68
-131
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

82 files changed

+68
-131
lines changed

nncf/common/composite_compression.py

-1
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,6 @@ def calculate(self, *args: Any, **kwargs: Any) -> Any:
7575
7676
:return: The compression loss value.
7777
"""
78-
7978
if len(self._child_losses) == 0:
8079
msg = "Cannot calculate the loss value because the number of child loss is 0."
8180
raise nncf.InternalError(msg)

nncf/common/graph/graph.py

-2
Original file line numberDiff line numberDiff line change
@@ -343,7 +343,6 @@ def get_previous_nodes(self, node: NNCFNode) -> List[NNCFNode]:
343343
:param node: Consumer node.
344344
:return: List of producers nodes of provided node.
345345
"""
346-
347346
nx_node_keys = self._nx_graph.pred[self._node_id_to_key_dict[node.node_id]]
348347
return [self._nodes[key] for key in nx_node_keys]
349348

@@ -723,7 +722,6 @@ def get_nncf_graph_pattern_io(self, match: List[str]) -> NNCFGraphPatternIO:
723722
`match` list
724723
:return: NNCFGraphPatternIO object describing the inputs and outputs of the matched subgraph
725724
"""
726-
727725
in_edge_boundary, out_edge_boundary = NNCFGraph._get_edge_boundaries(match, self._nx_graph)
728726
boundary = in_edge_boundary + out_edge_boundary
729727
input_nncf_edges = []

nncf/common/graph/graph_matching.py

+2
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,7 @@ def _is_subgraph_matching_strict(graph: nx.DiGraph, pattern: nx.DiGraph, subgrap
5757
3) External successors or predecessors of the nodes which are not starting and last.
5858
If any of these conditions is True, than returns False, otherwise - True.
5959
The checks are skipped for NON_PATTERN_NODE_TYPE.
60+
6061
Example:
6162
This subgraph matching is not strict.
6263
(conv2d + BN + ReLU pattern):
@@ -71,6 +72,7 @@ def _is_subgraph_matching_strict(graph: nx.DiGraph, pattern: nx.DiGraph, subgrap
7172
(cat)----/
7273
|
7374
...
75+
7476
:param graph: The model graph.
7577
:param pattern: The matched pattern.
7678
:param subgraph: A subgraph of the model graph including the nodes outside the pattern.

nncf/common/graph/patterns/patterns.py

-1
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,6 @@ def __add__(self, other: "GraphPattern") -> "GraphPattern":
101101
:param other: GraphPattern that will be added.
102102
:return: resulted GraphPattern.
103103
"""
104-
105104
final_pattern = GraphPattern()
106105
for self_subgraph in self.get_weakly_connected_subgraphs():
107106
for other_subgraph in other.get_weakly_connected_subgraphs():

nncf/common/insertion_point_graph.py

-2
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,6 @@ def __init__(
8787
If left unspecified, every node in `nncf_graph` will be allowed to have a single post-hook for its output
8888
(post-hooking separate tensors in an operation's output is not currently supported)
8989
"""
90-
9190
super().__init__()
9291
self._base_nx_graph = deepcopy(nncf_graph.get_nx_graph_copy())
9392

@@ -320,7 +319,6 @@ def get_ip_graph_with_merged_hw_optimized_operations(
320319
:param full_fusing_pattern: The GraphPatttern object representing a composition of fusing pattern variants.
321320
:return: The InsertionPointGraph with nodes fused according to pattern matching.
322321
"""
323-
324322
merged_ip_graph = deepcopy(self)
325323
matches = find_subgraphs_matching_pattern(merged_ip_graph.get_base_nx_graph(), full_fusing_pattern)
326324
for match in matches:

nncf/common/logging/track_progress.py

-1
Original file line numberDiff line numberDiff line change
@@ -179,7 +179,6 @@ def __init__(
179179
it takes to process sequence elements. Useful when processing time is strongly non-uniform.
180180
:return: An iterable of the values in the sequence.
181181
"""
182-
183182
self.sequence = sequence
184183
self.weights = weights
185184
self.total = sum(self.weights) if self.weights is not None else total

nncf/common/pruning/mask_propagation.py

-1
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,6 @@ def symbolic_mask_propagation(
9494
are supported by the NNCF pruning algorithm.
9595
:return: Dict of node indices vs the decision made by symbolic mask propagation algorithm.
9696
"""
97-
9897
can_be_closing_convs = self._get_can_closing_convs(prunable_layers_types)
9998
can_prune_by_dim: Dict[int, PruningAnalysisDecision] = {k: None for k in can_be_closing_convs} # type: ignore
10099
for node in self._graph.topological_sort():

nncf/common/pruning/node_selector.py

-2
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,6 @@ def create_pruning_groups(self, graph: NNCFGraph) -> Clusterization[NNCFNode]:
9191
:param graph: Graph to work with and their initialization parameters as values.
9292
:return: Clusterization of pruned nodes.
9393
"""
94-
9594
all_nodes_to_prune = graph.get_nodes_by_types(self._prune_operations_types) # NNCFNodes here
9695

9796
# 1. Clusters for special ops
@@ -218,7 +217,6 @@ def _pruning_dimensions_analysis(
218217
are supported by the NNCF pruning algorithm
219218
:return: Pruning node analysis after model analyzer, pruning algo compatibility and pruning dimensions checks.
220219
"""
221-
222220
nodes_of_group_with_non_eq_pruning_dim = self._check_internal_groups_dim(pruned_nodes_clusterization)
223221
can_prune_after_check_updated = can_prune_after_check.copy()
224222
for node_id, val in nodes_of_group_with_non_eq_pruning_dim.items():

nncf/common/quantization/initialization/range.py

-1
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,6 @@ def __init__(
8787
specified type of range initialization will be applied. It can be
8888
quantizers group for activations or weights.
8989
"""
90-
9190
super().__init__(
9291
range_init_config.init_type, range_init_config.num_init_samples, range_init_config.init_type_specific_params
9392
)

nncf/common/quantization/quantizer_propagation/graph.py

-2
Original file line numberDiff line numberDiff line change
@@ -797,7 +797,6 @@ def all_outputs_are_quantized(self, node_key: str) -> bool:
797797
:return: True if all paths from the given node to the first
798798
input quantizable nodes have an activation quantizer, False otherwise.
799799
"""
800-
801800
nodes_keys_stack = deque(self.successors(node_key))
802801
while nodes_keys_stack:
803802
node_key = nodes_keys_stack.popleft()
@@ -1424,7 +1423,6 @@ def _handle_output_quantizers_for_weights_as_outputs_ops(
14241423
:return: A MultiConfigQuantizerSetup with weights-as-outputs-dependent quantizers removed where possible
14251424
and shared inputs/unified scales group adjusted to reflect the change.
14261425
"""
1427-
14281426
# For the weights-are-outputs quantized operations, need to find out the dependent activation quantizers in
14291427
# the multiconfig setup and see if it is possible to avoid requantization by selecting a common configuration
14301428
# subset. If yes and the activation quantizer becomes unnecessary, need to unify the scales of the weight

nncf/common/quantization/quantizer_propagation/solver.py

-4
Original file line numberDiff line numberDiff line change
@@ -676,7 +676,6 @@ def propagation_step(
676676
:param quant_prop_graph: The propagation state graph for `curr_prop_quantizer` to be propagated in.
677677
:return: The new state of `quant_prop_graph` with `curr_prop_quantizer` propagated one step further.
678678
"""
679-
680679
curr_node_key = curr_prop_quantizer.current_location_node_key
681680
curr_node = quant_prop_graph.nodes[curr_node_key]
682681
curr_node_type = curr_node[QuantizerPropagationStateGraph.NODE_TYPE_NODE_ATTR]
@@ -1002,7 +1001,6 @@ def coalesce_insertion_points(
10021001
corresponding TargetPoints.
10031002
:return: A list of TargetPoint groups; each group is a list of TargetPoint's.
10041003
"""
1005-
10061004
if linked_scopes_groups_list is None:
10071005
return [[ip] for ip in target_insertion_points]
10081006
retval: List[List[TargetPoint]] = []
@@ -1313,7 +1311,6 @@ def check_transition_via_path(
13131311
cloned before transition, which impacts the logic of the function.
13141312
:return: The status of the transition determining how it should proceed.
13151313
"""
1316-
13171314
for from_node_key, to_node_key in path:
13181315
from_node = quant_prop_graph.nodes[from_node_key]
13191316

@@ -1390,7 +1387,6 @@ def get_merged_qconfigs_for_downward_branching_case(
13901387
of the merged quantizer, if any, and the second element corresponds to configurations of the quantizers
13911388
that would have to remain on the branches (if any).
13921389
"""
1393-
13941390
if self._propagation_strategy == QuantizerPropagationRule.DO_NOT_MERGE_BRANCHES:
13951391
# Do not merge at all
13961392
return None, potential_qconfigs_for_each_branch

nncf/common/scopes.py

-1
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,6 @@ def matches_any(tested_str: str, strs_to_match_to: Union[Iterable[str], str, Non
3434
:return: A boolean value specifying whether a tested_str should matches at least one element
3535
in strs_to_match_to.
3636
"""
37-
3837
if strs_to_match_to is None:
3938
return False
4039

nncf/common/utils/backend.py

-1
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,6 @@ def get_backend(model: Any) -> BackendType:
136136
:param model: The framework-specific model.
137137
:return: A BackendType representing the correct NNCF backend to be used when working with the framework.
138138
"""
139-
140139
verify_map = {
141140
is_torch_fx_model: BackendType.TORCH_FX,
142141
is_torch_model: BackendType.TORCH,

nncf/common/utils/dot_file_rw.py

-1
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,6 @@ def relabel_graph_for_dot_visualization(nx_graph: nx.Graph, from_reference: bool
8585
:param nx_graph: NetworkX graph to visualize via dot.
8686
:return: NetworkX graph with reserved symbols in nodes keys replaced.
8787
"""
88-
8988
nx_graph = copy.deepcopy(nx_graph)
9089

9190
# .dot format reserves ':' character in node names

nncf/common/utils/patcher.py

-1
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,6 @@ def patch( # noqa: C901
4040
:param wrapper: Wrapper function to override with.
4141
:param force: Whether to override previously applied patches or not.
4242
"""
43-
4443
obj_cls, fn_name = self.import_obj(obj_cls)
4544

4645
# wrap only if function does exist

nncf/config/config.py

+4-3
Original file line numberDiff line numberDiff line change
@@ -29,12 +29,14 @@
2929

3030
@api(canonical_alias="nncf.NNCFConfig")
3131
class NNCFConfig(dict[str, Any]):
32-
"""Contains the configuration parameters required for NNCF to apply the selected algorithms.
32+
"""
33+
Contains the configuration parameters required for NNCF to apply the selected algorithms.
3334
3435
This is a regular dictionary object extended with some utility functions, such as the ability to attach well-defined
3536
structures to pass non-serializable objects as parameters. It is primarily built from a .json file, or from a
3637
Python JSON-like dictionary - both data types will be checked against a JSONSchema. See the definition of the
37-
schema at https://openvinotoolkit.github.io/nncf/schema/, or by calling NNCFConfig.schema()."""
38+
schema at https://openvinotoolkit.github.io/nncf/schema/, or by calling NNCFConfig.schema().
39+
"""
3840

3941
def __init__(self, *args: Any, **kwargs: Any) -> None:
4042
super().__init__(*args, **kwargs)
@@ -47,7 +49,6 @@ def from_dict(cls, nncf_dict: Dict[str, Any]) -> "NNCFConfig":
4749
4850
:param nncf_dict: A Python dict with the JSON-style configuration for NNCF.
4951
"""
50-
5152
cls.validate(nncf_dict)
5253
return cls(deepcopy(nncf_dict))
5354

nncf/config/schema.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -151,8 +151,10 @@
151151
def validate_single_compression_algo_schema(
152152
single_compression_algo_dict: Dict[str, Any], ref_vs_algo_schema: Dict[str, Any]
153153
) -> None:
154-
"""single_compression_algo_dict must conform to BASIC_COMPRESSION_ALGO_SCHEMA (and possibly has other
155-
algo-specific properties"""
154+
"""
155+
single_compression_algo_dict must conform to BASIC_COMPRESSION_ALGO_SCHEMA (and possibly has other
156+
algo-specific properties
157+
"""
156158
algo_name = single_compression_algo_dict["algorithm"]
157159
if algo_name not in ref_vs_algo_schema:
158160
msg = f"Incorrect algorithm name - must be one of {str(list(ref_vs_algo_schema.keys()))}"

nncf/data/generators.py

-1
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,6 @@ def generate_text_data(
4747
:param dataset_size: Size of the data.
4848
:return: List of the text data ready to use.
4949
"""
50-
5150
try:
5251
import torch
5352
except ImportError:

nncf/experimental/common/graph/netron.py

-3
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,6 @@ def convert_nncf_dtype_to_ov_dtype(dtype: Dtype) -> str:
159159
:param dtype: The data type to be converted.
160160
:return: The openvino dtype string corresponding to the given data type.
161161
"""
162-
163162
dummy_precision_map: Dict[Dtype, str] = {Dtype.INTEGER: "i32", Dtype.FLOAT: "f32"}
164163

165164
return dummy_precision_map[dtype]
@@ -178,7 +177,6 @@ def get_graph_desc(
178177
:return: A tuple containing lists of NodeDesc and EdgeDesc objects
179178
representing the nodes and edges of the NNCFGraph.
180179
"""
181-
182180
if get_attributes_fn is None:
183181
get_attributes_fn = lambda x: {
184182
"metatype": str(x.metatype.name),
@@ -257,7 +255,6 @@ def save_for_netron(
257255
:param get_attributes_fn: A function to retrieve additional attributes for nodes.
258256
Defaults to a function returning {"metatype": str(x.metatype.name)}.
259257
"""
260-
261258
node_descs, edge_descs = get_graph_desc(graph, include_fq_params, get_attributes_fn)
262259

263260
net = ET.Element(Tags.NET, name=graph_name)

nncf/experimental/common/pruning/block_hierarchy.py

-1
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@ def __init__(self, root_groups: List[PropagationGroup]) -> None:
3232
:param roots: list of the root groups
3333
:return: networkx graph that represents the hierarchy of propagation blocks/groups.
3434
"""
35-
3635
self._id_counter = 0
3736
self._graph = nx.DiGraph()
3837
self._visited_block_ids_map: Dict[int, int] = {}

nncf/experimental/common/tensor_statistics/collectors.py

-1
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,6 @@ def __init__(
125125
:param window_size: Number of samples from the end of the list of collected samples to aggregate.
126126
Aggregates all available collected statistics in case parameter is None.
127127
"""
128-
129128
self._aggregation_axes = (0,) if aggregation_axes is None else aggregation_axes
130129
self._keepdims = True
131130
self._num_samples = num_samples

nncf/experimental/quantization/algorithms/post_training/pipeline.py

-1
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,6 @@ def experimental_create_ptq_pipeline(
6262
for each item of the batch or for the entire batch, default is False.
6363
:return: An experimental post-training quantization pipeline.
6464
"""
65-
6665
# Build the post-training quantization pipeline.
6766
pipeline_steps = []
6867

nncf/experimental/tensorflow/graph/converter.py

-1
Original file line numberDiff line numberDiff line change
@@ -286,7 +286,6 @@ def _collect_tfgraph_descs(graph: tf.Graph, op_names: List[str]) -> Tuple[List[N
286286
:param op_names: A list of names for the input operations ()
287287
:return: A description of nodes and edges which should be included to the NNCF graph.
288288
"""
289-
290289
# Traverse the `graph` and mark all ops reachable from the `input_ops`.
291290
# The op `u` is reachable from the `input_ops` if a directed path
292291
# from at least one op in `input_ops` to `u` exists in the `graph.`

nncf/experimental/torch/fx/model_transformer.py

-2
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,6 @@ def _traverse_graph(
7777
:param stop_nodes: Given stop nodes.
7878
:param visited: Set of already visited nodes.
7979
"""
80-
8180
while input_nodes:
8281
in_node = input_nodes.pop()
8382
if in_node.name in visited or in_node.name in stop_nodes:
@@ -104,7 +103,6 @@ def _apply_model_extraction(
104103
more than one element this function raises an assert.
105104
:return: Returns a submodel extracted from the given model by the given transformation.
106105
"""
107-
108106
transformation = transformations[-1]
109107
stop_nodes = set(transformation.input_node_names + transformation.output_node_names)
110108
visited = set()

nncf/experimental/torch/fx/quantization/quantize_pt2e.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,8 @@ def quantize_pt2e(
143143

144144

145145
def _quant_node_constraint(n: torch.fx.Node) -> bool:
146-
"""If there is any pure ops between get_attr and quantize op they will be const propagated
146+
"""
147+
If there is any pure ops between get_attr and quantize op they will be const propagated
147148
e.g. get_attr(weight) -> transpose -> quantize -> dequantize*
148149
(Note: dequantize op is not going to be constant propagated)
149150

nncf/experimental/torch/fx/transformations.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ def _set_new_node_meta(
7575
model: torch.fx.GraphModule,
7676
):
7777
"""
78-
Sets correct meta \"val\" value to the new node.
78+
Sets correct meta 'val' value to the new node.
7979
8080
:param new_node: The new node.
8181
:param prev_node: Input node of the new node.
@@ -316,7 +316,6 @@ def insert_one_qdq(model: torch.fx.GraphModule, target_point: PTTargetPoint, qua
316316
target node.
317317
:param quantizer: Quantizer module to inherit quantization parameters from.
318318
"""
319-
320319
# Copied from torch.ao.quantization.quantize_pt2e.convert_pt2e
321320
# 1. extract information for inserting q/dq node from activation_post_process
322321
node_type = "call_function"
@@ -613,7 +612,6 @@ def _compress_qdq_constant_transformation(model: torch.fx.GraphModule, matches)
613612
614613
:param: model: Model to apply transformations to.
615614
"""
616-
617615
for match in matches:
618616
mul_node = match.replacements[0]
619617
sub_node = match.replacements[1]

nncf/experimental/torch/nas/bootstrapNAS/elasticity/elastic_width.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -294,7 +294,7 @@ def __init__(
294294
@property
295295
def width_list(self) -> List[int]:
296296
"""
297-
list of all available widths to select from. Each value corresponds to a single element in the search space of
297+
List of all available widths to select from. Each value corresponds to a single element in the search space of
298298
operation. The search space of the model is cartesian product of search spaces of operation.
299299
If all widths starting from 1 to maximum number of channels with step size 1 are available, the search space
300300
would be prohibitively large to efficiently train and search.

nncf/experimental/torch/nas/bootstrapNAS/search/evaluator.py

-1
Original file line numberDiff line numberDiff line change
@@ -216,7 +216,6 @@ def update_from_state(self, state: Dict[str, Any]) -> NoReturn:
216216
:param state: dict with state that should be used for updating this evaluator
217217
:return:
218218
"""
219-
220219
super().update_from_state(state)
221220
new_dict = state.copy()
222221
self._is_top1 = new_dict["is_top1"]

nncf/experimental/torch/nas/bootstrapNAS/training/training_algorithm.py

-1
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,6 @@ def run(
126126
:param tensorboard_writer: The tensorboard object to be used for logging.
127127
:return: the fine-tuned model and elasticity controller
128128
"""
129-
130129
if train_iters is None:
131130
train_iters = len(train_loader)
132131
self._training_ctrl.set_training_lr_scheduler_args(optimizer, train_iters) # len(train_loader))

nncf/experimental/torch/search_building_blocks/search_blocks.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -304,7 +304,7 @@ def get_potential_candidate_for_block(search_graph: SearchGraph) -> Tuple[ShapeV
304304

305305
def itemgetter_force_tuple(*indexes):
306306
"""
307-
itemgetter wrapper that always returns a tuple. The original function may return both: iterable and a single
307+
Itemgetter wrapper that always returns a tuple. The original function may return both: iterable and a single
308308
non-iterable element, which is not convenient in the general case.
309309
"""
310310
getter = itemgetter(*indexes)

nncf/experimental/torch/sparsify_activations/sparsify_activations_impl.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -225,7 +225,7 @@ def sparsify_activations(
225225
representing the layers to match in the model's NNCF graph; the corresponding value
226226
is a float number in the range [0, 1] representing the target sparsity level.
227227
228-
Example:
228+
Example:
229229
.. code-block:: python
230230
{
231231
# Target sparsity is 60% for node "Dummy/Linear[layer]/linear_0" in the model graph
@@ -239,7 +239,6 @@ def sparsify_activations(
239239
filtered out internally, so there is no need to mention them in `ignored_scope`.
240240
:return: The sparsified model.
241241
"""
242-
243242
for scope, target_sparsity in target_sparsity_by_scope.items():
244243
if target_sparsity < 0.0 or target_sparsity > 1.0:
245244
msg = f'Target sparsity for scope "{scope}" should be in range [0, 1].'

nncf/experimental/torch/sparsify_activations/target_scope.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121

2222
@dataclass
2323
class TargetScope(IgnoredScope):
24-
"""
24+
r"""
2525
Specifies the target portions in a model graph.
2626
2727
Example:

0 commit comments

Comments
 (0)