Skip to content

Commit f21215b

Browse files
Enable pyupgrade and flake8-errmsg rules in ruff (#3221)
### Changes https://docs.astral.sh/ruff/rules/#pyupgrade-up https://docs.astral.sh/ruff/rules/#flake8-errmsg-em Rules regarding annotations will be enableв in next pr ``` "UP006", # non-pep585-annotation "UP007", # non-pep604-annotation-union "UP035", # deprecated-import "UP038", # non-pep604-isinstance "UP045", # non-pep604-annotation-optional ```
1 parent 41a79c8 commit f21215b

File tree

411 files changed

+2195
-1679
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

411 files changed

+2195
-1679
lines changed

custom_version.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,8 @@ def get_custom_version() -> str:
6060
r"^__version__ = ['\"]((\d+\.\d+\.\d+)([^'\"]*))['\"]", Path(NNCF_VERSION_FILE).read_text(), re.M
6161
)
6262
if not version_match:
63-
raise RuntimeError("Unable to find version string.")
63+
msg = "Unable to find version string."
64+
raise RuntimeError(msg)
6465

6566
version_full = version_match.group(1)
6667
version_value = version_match.group(2)

examples/common/paths.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
def configure_paths(config: SampleConfig, run_name: str):
1818
config.name = run_name
1919
d = datetime.datetime.now()
20-
run_id = "{:%Y-%m-%d__%H-%M-%S}".format(d)
20+
run_id = f"{d:%Y-%m-%d__%H-%M-%S}"
2121
log_dir = Path(config.log_dir) / run_name / run_id
2222
log_dir.mkdir(parents=True, exist_ok=True)
2323
config.log_dir = str(log_dir)

examples/experimental/torch/classification/bootstrap_nas.py

+6-14
Original file line numberDiff line numberDiff line change
@@ -204,31 +204,23 @@ def validate_model_fn_top1(model_, loader_):
204204
validate_model_fn_top1, val_loader, config.checkpoint_save_dir, tensorboard_writer=config.tb
205205
)
206206

207-
logger.info("Best config: {best_config}".format(best_config=best_config))
208-
logger.info("Performance metrics: {performance_metrics}".format(performance_metrics=performance_metrics))
207+
logger.info(f"Best config: {best_config}")
208+
logger.info(f"Performance metrics: {performance_metrics}")
209209
search_algo.visualize_search_progression()
210210

211211
# Maximal subnet
212212
elasticity_ctrl.multi_elasticity_handler.activate_maximum_subnet()
213213
search_algo.bn_adaptation.run(nncf_network)
214214
top1_acc = validate_model_fn_top1(nncf_network, val_loader)
215-
logger.info(
216-
"Maximal subnet Top1 acc: {top1_acc}, Macs: {macs}".format(
217-
top1_acc=top1_acc,
218-
macs=elasticity_ctrl.multi_elasticity_handler.count_flops_and_weights_for_active_subnet()[0] / 2000000,
219-
)
220-
)
215+
macs = elasticity_ctrl.multi_elasticity_handler.count_flops_and_weights_for_active_subnet()[0] / 2000000
216+
logger.info(f"Maximal subnet Top1 acc: {top1_acc}, Macs: {macs}")
221217

222218
# Best found subnet
223219
elasticity_ctrl.multi_elasticity_handler.activate_subnet_for_config(best_config)
224220
search_algo.bn_adaptation.run(nncf_network)
225221
top1_acc = validate_model_fn_top1(nncf_network, val_loader)
226-
logger.info(
227-
"Best found subnet Top1 acc: {top1_acc}, Macs: {macs}".format(
228-
top1_acc=top1_acc,
229-
macs=elasticity_ctrl.multi_elasticity_handler.count_flops_and_weights_for_active_subnet()[0] / 2000000,
230-
)
231-
)
222+
macs = elasticity_ctrl.multi_elasticity_handler.count_flops_and_weights_for_active_subnet()[0] / 2000000
223+
logger.info(f"Best found subnet Top1 acc: {top1_acc}, Macs: {macs}")
232224
elasticity_ctrl.export_model(osp.join(config.log_dir, "best_subnet.onnx"))
233225

234226
if "test" in config.mode:

examples/experimental/torch/classification/bootstrap_nas_search.py

+5-13
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,7 @@ def validate_model_fn_top1(model_, loader_):
147147
load_state(model, model_weights, is_resume=True)
148148

149149
top1_acc = validate_model_fn_top1(model, val_loader)
150-
logger.info("SuperNetwork Top 1: {top1_acc}".format(top1_acc=top1_acc))
150+
logger.info(f"SuperNetwork Top 1: {top1_acc}")
151151

152152
search_algo = BaseSearchAlgorithm.from_config(model, elasticity_ctrl, nncf_config)
153153

@@ -163,23 +163,15 @@ def validate_model_fn_top1(model_, loader_):
163163
elasticity_ctrl.multi_elasticity_handler.activate_maximum_subnet()
164164
search_algo.bn_adaptation.run(nncf_network)
165165
top1_acc = validate_model_fn_top1(nncf_network, val_loader)
166-
logger.info(
167-
"Maximal subnet Top1 acc: {top1_acc}, Macs: {macs}".format(
168-
top1_acc=top1_acc,
169-
macs=elasticity_ctrl.multi_elasticity_handler.count_flops_and_weights_for_active_subnet()[0] / 2000000,
170-
)
171-
)
166+
macs = elasticity_ctrl.multi_elasticity_handler.count_flops_and_weights_for_active_subnet()[0] / 2000000
167+
logger.info(f"Maximal subnet Top1 acc: {top1_acc}, Macs: {macs}")
172168

173169
# Best found subnet
174170
elasticity_ctrl.multi_elasticity_handler.activate_subnet_for_config(best_config)
175171
search_algo.bn_adaptation.run(nncf_network)
176172
top1_acc = validate_model_fn_top1(nncf_network, val_loader)
177-
logger.info(
178-
"Best found subnet Top1 acc: {top1_acc}, Macs: {macs}".format(
179-
top1_acc=top1_acc,
180-
macs=elasticity_ctrl.multi_elasticity_handler.count_flops_and_weights_for_active_subnet()[0] / 2000000,
181-
)
182-
)
173+
macs = elasticity_ctrl.multi_elasticity_handler.count_flops_and_weights_for_active_subnet()[0] / 2000000
174+
logger.info(f"Best found subnet Top1 acc: {top1_acc}, Macs: {macs}")
183175
elasticity_ctrl.export_model(osp.join(config.log_dir, "best_subnet.onnx"))
184176

185177
search_algo.search_progression_to_csv()

examples/post_training_quantization/openvino/anomaly_stfpm_quantize_with_accuracy_control/main.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ def run_example():
132132
download_and_extract(MODEL_PATH, MODEL_INFO)
133133
ov_model = ov.Core().read_model(MODEL_PATH / "stfpm_capsule.xml")
134134

135-
with open(MODEL_PATH / "meta_data_stfpm_capsule.json", "r", encoding="utf-8") as f:
135+
with open(MODEL_PATH / "meta_data_stfpm_capsule.json", encoding="utf-8") as f:
136136
validation_params = json.load(f)
137137

138138
###############################################################################

examples/post_training_quantization/torch/ssd300_vgg16/main.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ def __getitem__(self, item: int) -> Tuple[torch.Tensor, Dict]:
8686
target = dict(image_id=[image_id], boxes=[], labels=[])
8787
label_filepath = self.labels_path / f"{image_id:012d}.txt"
8888
if label_filepath.exists():
89-
with open(label_filepath, "r", encoding="utf-8") as f:
89+
with open(label_filepath, encoding="utf-8") as f:
9090
for box_descr in f.readlines():
9191
category_id, rel_x, rel_y, rel_w, rel_h = tuple(map(float, box_descr.split(" ")))
9292
box_x1, box_y1 = img_w * (rel_x - rel_w / 2), img_h * (rel_y - rel_h / 2)

examples/post_training_quantization/torch_fx/resnet18/main.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ def prepare_tiny_imagenet_200(dataset_dir: Path):
153153
return
154154

155155
val_annotations_file = val_data_dir / "val_annotations.txt"
156-
with open(val_annotations_file, "r") as f:
156+
with open(val_annotations_file) as f:
157157
val_annotation_data = map(lambda line: line.split("\t")[:2], f.readlines())
158158
for image_filename, image_label in val_annotation_data:
159159
from_image_filepath = val_images_dir / image_filename

examples/quantization_aware_training/torch/resnet18/main.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -195,7 +195,7 @@ def prepare_tiny_imagenet_200(dataset_dir: Path):
195195
return
196196

197197
val_annotations_file = val_data_dir / "val_annotations.txt"
198-
with open(val_annotations_file, "r") as f:
198+
with open(val_annotations_file) as f:
199199
val_annotation_data = map(lambda line: line.split("\t")[:2], f.readlines())
200200
for image_filename, image_label in val_annotation_data:
201201
from_image_filepath = val_images_dir / image_filename

examples/tensorflow/classification/datasets/builder.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,8 @@ def dtype(self):
6565
}
6666
dtype = dtype_map.get(self._dtype, None)
6767
if dtype is None:
68-
raise nncf.ValidationError("Invalid DType provided. Supported types: {}".format(dtype_map.keys()))
68+
msg = f"Invalid DType provided. Supported types: {dtype_map.keys()}"
69+
raise nncf.ValidationError(msg)
6970

7071
return dtype
7172

examples/tensorflow/classification/datasets/preprocessing/utils.py

+8-4
Original file line numberDiff line numberDiff line change
@@ -41,10 +41,12 @@ def mean_image_subtraction(
4141
:return: the centered image.
4242
"""
4343
if image.get_shape().ndims != 3:
44-
raise nncf.ValidationError("Input must be of size [height, width, C>0]")
44+
msg = "Input must be of size [height, width, C>0]"
45+
raise nncf.ValidationError(msg)
4546

4647
if len(means) != num_channels:
47-
raise nncf.ValidationError("len(means) must match the number of channels")
48+
msg = "len(means) must match the number of channels"
49+
raise nncf.ValidationError(msg)
4850

4951
means = tf.broadcast_to(means, tf.shape(image))
5052
if dtype is not None:
@@ -66,10 +68,12 @@ def standardize_image(
6668
:return: the centered image.
6769
"""
6870
if image.get_shape().ndims != 3:
69-
raise nncf.ValidationError("Input must be of size [height, width, C>0]")
71+
msg = "Input must be of size [height, width, C>0]"
72+
raise nncf.ValidationError(msg)
7073

7174
if len(stddev) != num_channels:
72-
raise nncf.ValidationError("len(stddev) must match the number of channels")
75+
msg = "len(stddev) must match the number of channels"
76+
raise nncf.ValidationError(msg)
7377

7478
stddev = tf.broadcast_to(stddev, tf.shape(image))
7579
if dtype is not None:

examples/tensorflow/classification/datasets/preprocessing_selector.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -30,9 +30,8 @@ def get_preprocessing(dataset_name, model_name, preset=None):
3030
if not preset:
3131
preset = dataset_name
3232
if preset not in PREPROCESSING_FN_MAP:
33-
raise nncf.ValidationError(
34-
"Preprocessing for dataset {} and model {} was not recognized".format(dataset_name, model_name)
35-
)
33+
msg = f"Preprocessing for dataset {dataset_name} and model {model_name} was not recognized"
34+
raise nncf.ValidationError(msg)
3635

3736
ext_kwargs = {}
3837
if preset == "imagenet2012":

examples/tensorflow/classification/main.py

+8-7
Original file line numberDiff line numberDiff line change
@@ -96,25 +96,26 @@ def get_num_classes(dataset):
9696
else:
9797
num_classes = 1000
9898

99-
logger.info("The sample is started with {} classes".format(num_classes))
99+
logger.info(f"The sample is started with {num_classes} classes")
100100
return num_classes
101101

102102

103103
def load_checkpoint(checkpoint, ckpt_path):
104104
logger.info("Load from checkpoint is enabled.")
105105
if tf.io.gfile.isdir(ckpt_path):
106106
path_to_checkpoint = tf.train.latest_checkpoint(ckpt_path)
107-
logger.info("Latest checkpoint: {}".format(path_to_checkpoint))
107+
logger.info(f"Latest checkpoint: {path_to_checkpoint}")
108108
else:
109109
path_to_checkpoint = ckpt_path if tf.io.gfile.exists(ckpt_path + ".index") else None
110-
logger.info("Provided checkpoint: {}".format(path_to_checkpoint))
110+
logger.info(f"Provided checkpoint: {path_to_checkpoint}")
111111

112112
if not path_to_checkpoint:
113113
logger.info("No checkpoint detected.")
114114
if ckpt_path:
115-
raise nncf.ValidationError(f"ckpt_path was given, but no checkpoint detected in path: {ckpt_path}")
115+
msg = f"ckpt_path was given, but no checkpoint detected in path: {ckpt_path}"
116+
raise nncf.ValidationError(msg)
116117

117-
logger.info("Checkpoint file {} found and restoring from checkpoint".format(path_to_checkpoint))
118+
logger.info(f"Checkpoint file {path_to_checkpoint} found and restoring from checkpoint")
118119

119120
status = checkpoint.restore(path_to_checkpoint)
120121
status.expect_partial()
@@ -284,7 +285,7 @@ def run(config):
284285
if "export" in config.mode:
285286
save_path, save_format = get_saving_parameters(config)
286287
export_model(compression_ctrl.strip(), save_path, save_format)
287-
logger.info("Saved to {}".format(save_path))
288+
logger.info(f"Saved to {save_path}")
288289

289290

290291
def export(config):
@@ -319,7 +320,7 @@ def export(config):
319320

320321
save_path, save_format = get_saving_parameters(config)
321322
export_model(compression_ctrl.strip(), save_path, save_format)
322-
logger.info("Saved to {}".format(save_path))
323+
logger.info(f"Saved to {save_path}")
323324

324325

325326
def main(argv):

examples/tensorflow/common/dataset_builder.py

+6-4
Original file line numberDiff line numberDiff line change
@@ -88,15 +88,16 @@ def build(self):
8888

8989
builder = dataset_builders.get(self._dataset_type, None)
9090
if builder is None:
91-
raise nncf.UnknownDatasetError("Unknown dataset type {}".format(self._dataset_type))
91+
msg = f"Unknown dataset type {self._dataset_type}"
92+
raise nncf.UnknownDatasetError(msg)
9293

9394
dataset = builder()
9495
dataset = self._pipeline(dataset)
9596

9697
return dataset
9798

9899
def _load_tfds(self):
99-
logger.info("Using TFDS to load {} data.".format(self._split))
100+
logger.info(f"Using TFDS to load {self._split} data.")
100101

101102
set_hard_limit_num_open_files()
102103

@@ -119,13 +120,14 @@ def _load_tfds(self):
119120
return dataset
120121

121122
def _load_tfrecords(self):
122-
logger.info("Using TFRecords to load {} data.".format(self._split))
123+
logger.info(f"Using TFRecords to load {self._split} data.")
123124

124125
dataset_key = self._dataset_name.replace("/", "")
125126
if dataset_key in self._tfrecord_datasets:
126127
self._dataset_loader = self._tfrecord_datasets[dataset_key](config=self._config, is_train=self._is_train)
127128
else:
128-
raise nncf.UnknownDatasetError("Unknown dataset name: {}".format(self._dataset_name))
129+
msg = f"Unknown dataset name: {self._dataset_name}"
130+
raise nncf.UnknownDatasetError(msg)
129131

130132
dataset = self._dataset_loader.as_dataset()
131133

examples/tensorflow/common/distributed.py

+4-3
Original file line numberDiff line numberDiff line change
@@ -28,12 +28,13 @@ def get_distribution_strategy(config):
2828
if "CUDA_VISIBLE_DEVICES" not in os.environ or _gpu_id in os.environ["CUDA_VISIBLE_DEVICES"].split(","):
2929
os.environ["CUDA_VISIBLE_DEVICES"] = _gpu_id
3030
else:
31-
raise nncf.ValidationError(
32-
"GPU with id = {id} was not found in the specified "
31+
msg = (
32+
f"GPU with id = {_gpu_id} was not found in the specified "
3333
"CUDA_VISIBLE_DEVICES environment variable. "
3434
"Please do not export the CUDA_VISIBLE_DEVICES environment variable "
35-
"or specify GPU with id = {id} in it".format(id=_gpu_id)
35+
f"or specify GPU with id = {_gpu_id} in it"
3636
)
37+
raise nncf.ValidationError(msg)
3738

3839
gpus = tf.config.list_physical_devices("GPU")
3940

examples/tensorflow/common/model_loader.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,8 @@ def get_model(model_name, input_shape=None, pretrained=True, num_classes=1000, w
2121
if model_name in AVAILABLE_MODELS:
2222
model = AVAILABLE_MODELS[model_name]
2323
else:
24-
raise Exception("Undefined model name: {}".format(model_name))
24+
msg = f"Undefined model name: {model_name}"
25+
raise Exception(msg)
2526

2627
model_params = {"classes": num_classes}
2728
if weights is not None:

examples/tensorflow/common/models.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -81,15 +81,15 @@ def MobileNetV3(stack_fn, last_point_ch, input_shape=None, model_type="large", *
8181
x = tf.keras.layers.Activation(activation="softmax", name="Predictions")(x)
8282

8383
# Create model.
84-
model = tf.keras.Model(img_input, x, name="MobilenetV3{}".format(model_type))
84+
model = tf.keras.Model(img_input, x, name=f"MobilenetV3{model_type}")
8585

8686
BASE_WEIGHT_PATH = "https://storage.googleapis.com/tensorflow/keras-applications/mobilenet_v3/"
8787
WEIGHTS_HASHES = {
8888
"large": "59e551e166be033d707958cf9e29a6a7",
8989
"small": "8768d4c2e7dee89b9d02b2d03d65d862",
9090
}
9191

92-
file_name = "weights_mobilenet_v3_{}_224_1.0_float.h5".format(model_type)
92+
file_name = f"weights_mobilenet_v3_{model_type}_224_1.0_float.h5"
9393
file_hash = WEIGHTS_HASHES[model_type]
9494

9595
weights_path = tf.keras.utils.get_file(
@@ -185,7 +185,7 @@ def _inverted_res_block(x, expansion, filters, kernel_size, stride, se_ratio, ac
185185
infilters = tf.keras.backend.int_shape(x)[channel_axis]
186186
if block_id:
187187
# Expand
188-
prefix = "expanded_conv_{}/".format(block_id)
188+
prefix = f"expanded_conv_{block_id}/"
189189
x = tf.keras.layers.Conv2D(
190190
_depth(infilters * expansion), kernel_size=1, padding="same", use_bias=False, name=prefix + "expand"
191191
)(x)

examples/tensorflow/common/object_detection/architecture/factory.py

+6-3
Original file line numberDiff line numberDiff line change
@@ -37,14 +37,17 @@ def backbone_generator(params):
3737
norm_activation=norm_activation_generator(params.model_params.norm_activation),
3838
)
3939
else:
40-
raise ValueError("Backbone {} is not supported for {} model.".format(backbone_name, params.model))
40+
msg = f"Backbone {backbone_name} is not supported for {params.model} model."
41+
raise ValueError(msg)
4142
elif params.model == "YOLOv4":
4243
if backbone_name == "darknet":
4344
backbone_fn = darknet.CSPDarknet53()
4445
else:
45-
raise ValueError("Backbone {} is not supported for {} model.".format(backbone_name, params.model))
46+
msg = f"Backbone {backbone_name} is not supported for {params.model} model."
47+
raise ValueError(msg)
4648
else:
47-
raise ValueError("Model {} is not supported.".format(params.model))
49+
msg = f"Model {params.model} is not supported."
50+
raise ValueError(msg)
4851

4952
return backbone_fn
5053

examples/tensorflow/common/object_detection/architecture/fpn.py

+8-7
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,8 @@ def __init__(
6060
elif activation == "swish":
6161
self._activation_op = tf.nn.swish
6262
else:
63-
raise ValueError("Unsupported activation `{}`.".format(activation))
63+
msg = f"Unsupported activation `{activation}`."
64+
raise ValueError(msg)
6465

6566
self._use_batch_norm = use_batch_norm
6667
self._norm_activation = norm_activation
@@ -72,22 +73,22 @@ def __init__(
7273

7374
for level in range(self._min_level, self._max_level + 1):
7475
if self._use_batch_norm:
75-
self._norm_activations[level] = norm_activation(use_activation=False, name="p%d-bn" % level)
76+
self._norm_activations[level] = norm_activation(use_activation=False, name=f"p{level}-bn")
7677

7778
self._lateral_conv2d_op[level] = self._conv2d_op(
78-
filters=self._fpn_feat_dims, kernel_size=(1, 1), padding="same", name="l%d" % level
79+
filters=self._fpn_feat_dims, kernel_size=(1, 1), padding="same", name=f"l{level}"
7980
)
8081

8182
self._post_hoc_conv2d_op[level] = self._conv2d_op(
8283
filters=self._fpn_feat_dims,
8384
strides=(1, 1),
8485
kernel_size=(3, 3),
8586
padding="same",
86-
name="post_hoc_d%d" % level,
87+
name=f"post_hoc_d{level}",
8788
)
8889

8990
self._coarse_conv2d_op[level] = self._conv2d_op(
90-
filters=self._fpn_feat_dims, strides=(2, 2), kernel_size=(3, 3), padding="same", name="p%d" % level
91+
filters=self._fpn_feat_dims, strides=(2, 2), kernel_size=(3, 3), padding="same", name=f"p{level}"
9192
)
9293

9394
def __call__(self, multilevel_features, is_training=None):
@@ -108,8 +109,8 @@ def __call__(self, multilevel_features, is_training=None):
108109
input_levels = list(multilevel_features.keys())
109110
if min(input_levels) > self._min_level:
110111
raise ValueError(
111-
"The minimum backbone level {} should be ".format(min(input_levels))
112-
+ "less or equal to FPN minimum level {}.".format(self._min_level)
112+
f"The minimum backbone level {min(input_levels)} should be "
113+
+ f"less or equal to FPN minimum level {self._min_level}."
113114
)
114115

115116
backbone_max_level = min(max(input_levels), self._max_level)

0 commit comments

Comments
 (0)