Skip to content

Commit ddc34fc

Browse files
committed
Fix pylint issues
1 parent ade0bc3 commit ddc34fc

File tree

8 files changed

+35
-25
lines changed

8 files changed

+35
-25
lines changed

.pylintrc

+1-1
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ disable = arguments-differ,
2828

2929
max-line-length = 120
3030
ignore-docstrings = yes
31-
ignored-modules = numpy,torch,cv2,openvino
31+
ignored-modules = numpy,torch,cv2,openvino,onnx.onnx_cpp2py_export
3232
extension-pkg-whitelist = torch,cv2,scipy
3333

3434
[SIMILARITIES]

requirements.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,4 +27,4 @@ optuna
2727
timm
2828
addict
2929
randaugment
30-
pylint==2.6.0
30+
pylint==2.11.1

scripts/default_config.py

+6-3
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33

44
from yacs.config import CfgNode as CN
55

6+
# pylint: disable=protected-access,too-many-statements,unspecified-encoding
67

78
def get_default_config():
89

@@ -109,7 +110,8 @@ def get_default_config():
109110

110111
# custom_datasets
111112
cfg.custom_datasets = CN() # this node contains information about custom classification datasets
112-
cfg.custom_datasets.roots = [] # a list of root folders in case of ImagesFolder fromat or list of annotation files with paths relative to the list's parent folder
113+
cfg.custom_datasets.roots = [] # a list of root folders in case of ImagesFolder fromat
114+
# or list of annotation files with paths relative to the list's parent folder
113115
cfg.custom_datasets.types = [] # a list of types (classification or classification_image_folder)
114116
cfg.custom_datasets.names = [] # aliases for custom datasets that can be used in the data section. Should be unique
115117

@@ -136,7 +138,8 @@ def get_default_config():
136138
cfg.train.new_layers = ['classifier'] # newly added layers with default lr
137139
cfg.train.base_lr_mult = 0.1 # learning rate multiplier for base layers
138140
cfg.train.lr_scheduler = 'single_step'
139-
cfg.train.target_metric = 'train_loss' # define which metric to use with reduce_on_plateau scheduler. Two possible variants are available: 'test_acc' and 'train_loss'
141+
cfg.train.target_metric = 'train_loss' # define which metric to use with reduce_on_plateau scheduler.
142+
# Two possible variants are available: 'test_acc' and 'train_loss'
140143
cfg.train.base_scheduler = ''
141144
cfg.train.stepsize = [20] # stepsize to decay learning rate
142145
cfg.train.gamma = 0.1 # learning rate decay multiplier
@@ -464,7 +467,7 @@ def get_default_config():
464467
def merge_from_files_with_base(cfg, cfg_path):
465468
def _get_list_of_files(cur_path, set_of_files=None):
466469
if not (cur_path.lower().endswith('.yml') or cur_path.lower().endswith('.yaml')):
467-
raise RuntimeError(f'Wrong extension of config file {cur_path}')
470+
raise RuntimeError(f'Wrong extension of config file {cur_path}')
468471
if set_of_files is None:
469472
set_of_files = {cur_path}
470473
elif cur_path in set_of_files:

scripts/script_utils.py

+18-13
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
model_kwargs, optimizer_kwargs,
1414
lr_scheduler_kwargs, merge_from_files_with_base)
1515

16+
1617
def build_base_argparser():
1718
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
1819
parser.add_argument('--config-file', type=str, default='',
@@ -61,6 +62,7 @@ def reset_config(cfg, args):
6162
def build_datamanager(cfg, classification_classes_filter=None):
6263
return torchreid.data.ImageDataManager(filter_classes=classification_classes_filter, **imagedata_kwargs(cfg))
6364

65+
6466
def build_auxiliary_model(config_file, num_classes, use_gpu,
6567
device_ids, num_iter, lr=None,
6668
nncf_aux_config_file=None,
@@ -76,14 +78,16 @@ def build_auxiliary_model(config_file, num_classes, use_gpu,
7678
f'the changes are:\n{pformat(aux_config_opts)}')
7779
aux_cfg.merge_from_list(aux_config_opts)
7880

79-
print('\nShow auxiliary configuration\n{}\n'.format(aux_cfg))
81+
print(f'\nShow auxiliary configuration\n{aux_cfg}\n')
8082

8183
if lr is not None:
8284
aux_cfg.train.lr = lr
8385
print(f"setting learning rate from main model: {lr}")
8486
model = torchreid.models.build_model(**model_kwargs(aux_cfg, num_classes))
8587
optimizer = torchreid.optim.build_optimizer(model, **optimizer_kwargs(aux_cfg))
86-
scheduler = torchreid.optim.build_lr_scheduler(optimizer=optimizer, num_iter=num_iter, **lr_scheduler_kwargs(aux_cfg))
88+
scheduler = torchreid.optim.build_lr_scheduler(optimizer=optimizer,
89+
num_iter=num_iter,
90+
**lr_scheduler_kwargs(aux_cfg))
8791

8892
if aux_cfg.model.resume and check_isfile(aux_cfg.model.resume):
8993
aux_cfg.train.start_epoch = resume_from_checkpoint(
@@ -132,7 +136,7 @@ def is_config_parameter_set_from_command_line(cmd_line_opts, parameter_name):
132136
if not cmd_line_opts:
133137
return False
134138
key_names = cmd_line_opts[0::2]
135-
return (parameter_name in key_names)
139+
return parameter_name in key_names
136140

137141

138142
def put_main_model_on_the_device(model, use_gpu=True, gpu_num=1, num_aux_models=0, split_models=False):
@@ -173,29 +177,30 @@ def check_classes_consistency(ref_classes, probe_classes, strict=False):
173177
if len(ref_classes) != len(probe_classes):
174178
return False
175179
return sorted(probe_classes.keys()) == sorted(ref_classes.keys())
176-
else:
177-
if len(ref_classes) > len(probe_classes):
180+
181+
if len(ref_classes) > len(probe_classes):
182+
return False
183+
probe_names = probe_classes.keys()
184+
for cl in ref_classes.keys():
185+
if cl not in probe_names:
178186
return False
179-
probe_names = probe_classes.keys()
180-
for cl in ref_classes.keys():
181-
if cl not in probe_names:
182-
return False
187+
183188
return True
184189

185190
classes_map = {v : k for k, v in enumerate(sorted(classes))} if classes else {}
186191
if test_only:
187192
for name, dataloader in datamanager.test_loader.items():
188-
if not len(dataloader['query'].dataset.classes): # current text annotation doesn't contain classes names
193+
if not dataloader['query'].dataset.classes: # current text annotation doesn't contain classes names
189194
print(f'Warning: classes are not defined for validation dataset {name}')
190195
continue
191-
if not len(get_model_attr(model, 'classification_classes')):
192-
print(f'Warning: classes are not provided in the current snapshot. Consistency checks are skipped.')
196+
if not get_model_attr(model, 'classification_classes'):
197+
print('Warning: classes are not provided in the current snapshot. Consistency checks are skipped.')
193198
continue
194199
if not check_classes_consistency(get_model_attr(model, 'classification_classes'),
195200
dataloader['query'].dataset.classes, strict=False):
196201
raise ValueError('Inconsistent classes in evaluation dataset')
197202
if classes and not check_classes_consistency(classes_map,
198-
get_model_attr(model, 'classification_classes'), strict=True):
203+
get_model_attr(model, 'classification_classes'), strict=True):
199204
raise ValueError('Classes provided via --classes should be the same as in the loaded model')
200205
elif classes:
201206
if not check_classes_consistency(classes_map,

torchreid/apis/export.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ def export_onnx(model, cfg, output_file_path='model', disable_dyn_axes=True,
7070
onnx.checker.check_model(net_from_onnx)
7171
print('ONNX check passed.')
7272
except onnx.onnx_cpp2py_export.checker.ValidationError as ex:
73-
print('ONNX check failed: {}.'.format(ex))
73+
print(f'ONNX check failed: {ex}.')
7474

7575
return output_file_path
7676

torchreid/apis/training.py

+4-3
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
limitations under the License.
1515
"""
1616

17+
import sys
1718
from copy import deepcopy
1819

1920
import torchreid
@@ -42,7 +43,7 @@ def run_lr_finder(cfg, datamanager, model, optimizer, scheduler, classes,
4243
print(f"Estimated learning rate: {aux_lr}")
4344
if cfg.lr_finder.stop_after:
4445
print("Finding learning rate finished. Terminate the training process")
45-
exit()
46+
sys.exit(0)
4647

4748
# reload all parts of the training
4849
# we do not check classification parameters
@@ -76,7 +77,7 @@ def run_training(cfg, datamanager, model, optimizer, scheduler, extra_device_ids
7677
num_train_classes = datamanager.num_train_pids
7778

7879
if num_aux_models > 0:
79-
print('Enabled mutual learning between {} models.'.format(len(cfg.mutual_learning.aux_configs) + 1))
80+
print(f'Enabled mutual learning between {len(cfg.mutual_learning.aux_configs) + 1} models.')
8081

8182
models, optimizers, schedulers = [model], [optimizer], [scheduler]
8283
for config_file, device_ids in zip(cfg.mutual_learning.aux_configs, extra_device_ids):
@@ -91,7 +92,7 @@ def run_training(cfg, datamanager, model, optimizer, scheduler, extra_device_ids
9192
schedulers.append(aux_scheduler)
9293
else:
9394
models, optimizers, schedulers = model, optimizer, scheduler
94-
print('Building {}-engine'.format(cfg.loss.name))
95+
print(f'Building {cfg.loss.name}-engine')
9596
engine = build_engine(cfg, datamanager, models, optimizers, schedulers,
9697
should_freeze_aux_models=should_freeze_aux_models,
9798
nncf_metainfo=nncf_metainfo,

torchreid/integration/nncf/compression.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77

88
from torchreid.utils.tools import random_image
99

10+
# pylint: disable=line-too-long
1011

1112
@contextmanager
1213
def nullcontext():
@@ -67,7 +68,7 @@ def _load_checkpoint_for_nncf(model, filename, map_location=None, strict=False):
6768
state_dict = checkpoint['state_dict']
6869
else:
6970
raise RuntimeError(
70-
'No state_dict found in checkpoint file {}'.format(filename))
71+
f'No state_dict found in checkpoint file {filename}')
7172
_ = load_state(model, state_dict, strict)
7273
return checkpoint
7374

torchreid/integration/nncf/compression_script_utils.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
from scripts.default_config import merge_from_files_with_base
77
from scripts.script_utils import build_datamanager, is_config_parameter_set_from_command_line
88

9-
from .compression import (is_checkpoint_nncf, wrap_nncf_model)
9+
from torchreid.integration.nncf.compression import (is_checkpoint_nncf, wrap_nncf_model)
1010

1111
def get_coeff_decrease_lr_for_nncf(nncf_training_config):
1212
if nncf_training_config and nncf_training_config.get('coeff_decrease_lr_for_nncf'):
@@ -158,7 +158,7 @@ def make_nncf_changes_in_eval(model, cfg):
158158
print(f'NNCF config path = {nncf_config_path}')
159159
checkpoint_path = cfg.model.load_weights
160160
datamanager_for_nncf = None
161-
compression_ctrl, model, _ = \
161+
_, model, _ = \
162162
wrap_nncf_model(model, cfg, datamanager_for_nncf,
163163
checkpoint_path=checkpoint_path,
164164
nncf_config_path=nncf_config_path)

0 commit comments

Comments
 (0)