Skip to content

Commit b1ea59d

Browse files
Improve examples (#3091)
### Changes - Add pattern based on file extension to gitignore for examples - Use pathlib style instead os.path - Fix issue with creating ../nncf/~/.cache/datasets directory - Replace tqdm to nncf.common.logging.track_progress - Remove unused function arguments and if conditions - Clean logs, disable progress bar, remove log of validataion - Pass list of command arguments to subprocess instead of string - Remove nncf dependencies from tests/cross_fw/examples - Add `--reuse-venv` to use venv from example directory - Add progress bar to collect_values_for_each_item_using_prepared_model ### Tests https://github.com/openvinotoolkit/nncf/actions/runs/11900403097 nightly/job/test_examples/618 nightly/job/windows/job/test-examples/278/
1 parent 75ba2f7 commit b1ea59d

File tree

22 files changed

+349
-320
lines changed

22 files changed

+349
-320
lines changed

.github/workflows/examples.yml

+1-2
Original file line numberDiff line numberDiff line change
@@ -44,9 +44,8 @@ jobs:
4444
cache: pip
4545
- name: cpuinfo
4646
run: cat /proc/cpuinfo
47-
- name: Install NNCF and test requirements
47+
- name: Install test requirements
4848
run: |
49-
pip install -e .
5049
pip install -r tests/cross_fw/examples/requirements.txt
5150
- name: Print installed modules
5251
run: pip list

.gitignore

+10-13
Original file line numberDiff line numberDiff line change
@@ -119,23 +119,20 @@ nncf_debug/
119119

120120
# NNCF examples
121121
examples/torch/object_detection/eval/
122-
examples/post_training_quantization/onnx/mobilenet_v2/mobilenet_v2_*
123-
examples/post_training_quantization/openvino/mobilenet_v2/mobilenet_v2_*
124-
examples/post_training_quantization/tensorflow/mobilenet_v2/mobilenet_v2_*
125-
examples/post_training_quantization/torch/mobilenet_v2/mobilenet_v2_*
126-
examples/post_training_quantization/torch/ssd300_vgg16/ssd300_vgg16_*
127-
examples/post_training_quantization/openvino/anomaly_stfpm_quantize_with_accuracy_control/stfpm_*
128-
examples/post_training_quantization/openvino/yolov8/yolov8n*
129-
examples/post_training_quantization/openvino/yolov8_quantize_with_accuracy_control/yolov8n*
130-
examples/**/runs/**
131-
examples/**/results/**
132-
examples/llm_compression/openvino/tiny_llama_find_hyperparams/statistics
133-
compressed_graph.dot
134-
original_graph.dot
122+
examples/**/*.xml
123+
examples/**/*.bin
124+
examples/**/*.pt
125+
examples/**/*.onnx
126+
examples/**/statistics
127+
examples/**/runs
128+
examples/**/results
129+
examples/**/metrics.json
135130
datasets/**
136131

137132
# Tests
138133
tests/**/runs/**
139134
tests/**/tmp*/**
140135
open_model_zoo/
141136
nncf-tests.xml
137+
compressed_graph.dot
138+
original_graph.dot

examples/post_training_quantization/onnx/mobilenet_v2/main.py

+21-16
Original file line numberDiff line numberDiff line change
@@ -12,36 +12,37 @@
1212
import re
1313
import subprocess
1414
from pathlib import Path
15-
from typing import List, Optional
15+
from typing import List
1616

1717
import numpy as np
1818
import onnx
1919
import openvino as ov
2020
import torch
2121
from fastdownload import FastDownload
2222
from fastdownload import download_url
23+
from rich.progress import track
2324
from sklearn.metrics import accuracy_score
2425
from torchvision import datasets
2526
from torchvision import transforms
26-
from tqdm import tqdm
2727

2828
import nncf
2929

3030
ROOT = Path(__file__).parent.resolve()
3131
MODEL_URL = "https://huggingface.co/alexsu52/mobilenet_v2_imagenette/resolve/main/mobilenet_v2_imagenette.onnx"
3232
DATASET_URL = "https://s3.amazonaws.com/fast-ai-imageclas/imagenette2-320.tgz"
33-
DATASET_PATH = "~/.cache/nncf/datasets"
34-
MODEL_PATH = "~/.cache/nncf/models"
33+
DATASET_PATH = Path().home() / ".cache" / "nncf" / "datasets"
34+
MODEL_PATH = Path().home() / ".cache" / "nncf" / "models"
3535
DATASET_CLASSES = 10
3636

3737

3838
def download_dataset() -> Path:
39-
downloader = FastDownload(base=DATASET_PATH, archive="downloaded", data="extracted")
39+
downloader = FastDownload(base=DATASET_PATH.as_posix(), archive="downloaded", data="extracted")
4040
return downloader.get(DATASET_URL)
4141

4242

4343
def download_model() -> Path:
44-
return download_url(MODEL_URL, Path(MODEL_PATH).resolve())
44+
MODEL_PATH.mkdir(exist_ok=True, parents=True)
45+
return download_url(MODEL_URL, MODEL_PATH.resolve())
4546

4647

4748
def validate(path_to_model: Path, validation_loader: torch.utils.data.DataLoader) -> float:
@@ -51,7 +52,7 @@ def validate(path_to_model: Path, validation_loader: torch.utils.data.DataLoader
5152
compiled_model = ov.compile_model(path_to_model, device_name="CPU")
5253
output = compiled_model.outputs[0]
5354

54-
for images, target in tqdm(validation_loader):
55+
for images, target in track(validation_loader, description="Validating"):
5556
pred = compiled_model(images)[output]
5657
predictions.append(np.argmax(pred, axis=1))
5758
references.append(target)
@@ -61,13 +62,17 @@ def validate(path_to_model: Path, validation_loader: torch.utils.data.DataLoader
6162
return accuracy_score(predictions, references)
6263

6364

64-
def run_benchmark(path_to_model: Path, shape: Optional[List[int]] = None, verbose: bool = True) -> float:
65-
command = f"benchmark_app -m {path_to_model} -d CPU -api async -t 15"
66-
if shape is not None:
67-
command += f' -shape [{",".join(str(x) for x in shape)}]'
68-
cmd_output = subprocess.check_output(command, shell=True) # nosec
69-
if verbose:
70-
print(*str(cmd_output).split("\\n")[-9:-1], sep="\n")
65+
def run_benchmark(path_to_model: Path, shape: List[int]) -> float:
66+
command = [
67+
"benchmark_app",
68+
"-m", path_to_model.as_posix(),
69+
"-d", "CPU",
70+
"-api", "async",
71+
"-t", "15",
72+
"-shape", str(shape),
73+
] # fmt: skip
74+
cmd_output = subprocess.check_output(command, text=True)
75+
print(*cmd_output.splitlines()[-8:], sep="\n")
7176
match = re.search(r"Throughput\: (.+?) FPS", str(cmd_output))
7277
return float(match.group(1))
7378

@@ -136,9 +141,9 @@ def transform_fn(data_item):
136141
print(f"[2/7] Save INT8 model: {int8_model_path}")
137142

138143
print("[3/7] Benchmark FP32 model:")
139-
fp32_fps = run_benchmark(fp32_model_path, shape=[1, 3, 224, 224], verbose=True)
144+
fp32_fps = run_benchmark(fp32_model_path, shape=[1, 3, 224, 224])
140145
print("[4/7] Benchmark INT8 model:")
141-
int8_fps = run_benchmark(int8_model_path, shape=[1, 3, 224, 224], verbose=True)
146+
int8_fps = run_benchmark(int8_model_path, shape=[1, 3, 224, 224])
142147

143148
print("[5/7] Validate ONNX FP32 model in OpenVINO:")
144149
fp32_top1 = validate(fp32_model_path, val_loader)

examples/post_training_quantization/onnx/yolov8_quantize_with_accuracy_control/deploy.py

+17-12
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,10 @@
1616

1717
import openvino as ov
1818
import torch
19-
from tqdm import tqdm
19+
from rich.progress import track
2020
from ultralytics.cfg import get_cfg
21-
from ultralytics.engine.validator import BaseValidator as Validator
2221
from ultralytics.models.yolo import YOLO
22+
from ultralytics.models.yolo.segment.val import SegmentationValidator
2323
from ultralytics.utils import DEFAULT_CFG
2424
from ultralytics.utils.metrics import ConfusionMatrix
2525

@@ -37,7 +37,7 @@
3737
def validate_ov_model(
3838
ov_model: ov.Model,
3939
data_loader: torch.utils.data.DataLoader,
40-
validator: Validator,
40+
validator: SegmentationValidator,
4141
num_samples: Optional[int] = None,
4242
) -> Tuple[Dict, int, int]:
4343
validator.seen = 0
@@ -47,7 +47,7 @@ def validate_ov_model(
4747
validator.confusion_matrix = ConfusionMatrix(nc=validator.nc)
4848
compiled_model = ov.compile_model(ov_model, device_name="CPU")
4949
num_outputs = len(compiled_model.outputs)
50-
for batch_i, batch in enumerate(data_loader):
50+
for batch_i, batch in enumerate(track(data_loader, description="Validating")):
5151
if num_samples is not None and batch_i == num_samples:
5252
break
5353
batch = validator.preprocess(batch)
@@ -65,12 +65,17 @@ def validate_ov_model(
6565
return stats, validator.seen, validator.nt_per_class.sum()
6666

6767

68-
def run_benchmark(model_path: str, config) -> float:
69-
command = f"benchmark_app -m {model_path} -d CPU -api async -t 30"
70-
command += f' -shape "[1,3,{config.imgsz},{config.imgsz}]"'
71-
cmd_output = subprocess.check_output(command, shell=True) # nosec
72-
73-
match = re.search(r"Throughput\: (.+?) FPS", str(cmd_output))
68+
def run_benchmark(model_path: Path, config) -> float:
69+
command = [
70+
"benchmark_app",
71+
"-m", model_path.as_posix(),
72+
"-d", "CPU",
73+
"-api", "async",
74+
"-t", "30",
75+
"-shape", str([1, 3, config.imgsz, config.imgsz]),
76+
] # fmt: skip
77+
cmd_output = subprocess.check_output(command, text=True)
78+
match = re.search(r"Throughput\: (.+?) FPS", cmd_output)
7479
return float(match.group(1))
7580

7681

@@ -96,11 +101,11 @@ def run_benchmark(model_path: str, config) -> float:
96101
validator, data_loader = prepare_validation(YOLO(ROOT / f"{MODEL_NAME}.pt"), args)
97102

98103
print("[5/7] Validate OpenVINO FP32 model:")
99-
fp32_stats, total_images, total_objects = validate_ov_model(fp32_ov_model, tqdm(data_loader), validator)
104+
fp32_stats, total_images, total_objects = validate_ov_model(fp32_ov_model, data_loader, validator)
100105
print_statistics(fp32_stats, total_images, total_objects)
101106

102107
print("[6/7] Validate OpenVINO INT8 model:")
103-
int8_stats, total_images, total_objects = validate_ov_model(int8_ov_model, tqdm(data_loader), validator)
108+
int8_stats, total_images, total_objects = validate_ov_model(int8_ov_model, data_loader, validator)
104109
print_statistics(int8_stats, total_images, total_objects)
105110

106111
print("[7/7] Report:")

examples/post_training_quantization/onnx/yolov8_quantize_with_accuracy_control/main.py

+34-37
Original file line numberDiff line numberDiff line change
@@ -13,28 +13,32 @@
1313
from pathlib import Path
1414
from typing import Any, Dict, Tuple
1515

16-
import numpy as np
1716
import onnx
1817
import onnxruntime
1918
import torch
20-
from tqdm import tqdm
19+
from rich.progress import track
2120
from ultralytics.cfg import get_cfg
2221
from ultralytics.data.converter import coco80_to_coco91_class
2322
from ultralytics.data.utils import check_det_dataset
24-
from ultralytics.engine.validator import BaseValidator as Validator
2523
from ultralytics.models.yolo import YOLO
24+
from ultralytics.models.yolo.segment.val import SegmentationValidator
2625
from ultralytics.utils import DATASETS_DIR
2726
from ultralytics.utils import DEFAULT_CFG
2827
from ultralytics.utils import ops
2928
from ultralytics.utils.metrics import ConfusionMatrix
3029

3130
import nncf
3231

32+
MODEL_NAME = "yolov8n-seg"
33+
3334
ROOT = Path(__file__).parent.resolve()
3435

3536

3637
def validate(
37-
model: onnx.ModelProto, data_loader: torch.utils.data.DataLoader, validator: Validator, num_samples: int = None
38+
model: onnx.ModelProto,
39+
data_loader: torch.utils.data.DataLoader,
40+
validator: SegmentationValidator,
41+
num_samples: int = None,
3842
) -> Tuple[Dict, int, int]:
3943
validator.seen = 0
4044
validator.jdict = []
@@ -49,7 +53,7 @@ def validate(
4953
output_names = [output.name for output in session.get_outputs()]
5054
num_outputs = len(output_names)
5155

52-
for batch_i, batch in enumerate(data_loader):
56+
for batch_i, batch in enumerate(track(data_loader, description="Validating")):
5357
if num_samples is not None and batch_i == num_samples:
5458
break
5559
batch = validator.preprocess(batch)
@@ -71,7 +75,7 @@ def validate(
7175
return stats, validator.seen, validator.nt_per_class.sum()
7276

7377

74-
def print_statistics(stats: np.ndarray, total_images: int, total_objects: int) -> None:
78+
def print_statistics(stats: Dict[str, float], total_images: int, total_objects: int) -> None:
7579
print("Metrics(Box):")
7680
mp, mr, map50, mean_ap = (
7781
stats["metrics/precision(B)"],
@@ -84,38 +88,35 @@ def print_statistics(stats: np.ndarray, total_images: int, total_objects: int) -
8488
pf = "%20s" + "%12i" * 2 + "%12.3g" * 4 # print format
8589
print(pf % ("all", total_images, total_objects, mp, mr, map50, mean_ap))
8690

87-
# print the mask metrics for segmentation
88-
if "metrics/precision(M)" in stats:
89-
print("Metrics(Mask):")
90-
s_mp, s_mr, s_map50, s_mean_ap = (
91-
stats["metrics/precision(M)"],
92-
stats["metrics/recall(M)"],
93-
stats["metrics/mAP50(M)"],
94-
stats["metrics/mAP50-95(M)"],
95-
)
96-
# Print results
97-
s = ("%20s" + "%12s" * 6) % ("Class", "Images", "Labels", "Precision", "Recall", "mAP@.5", "mAP@.5:.95")
98-
print(s)
99-
pf = "%20s" + "%12i" * 2 + "%12.3g" * 4 # print format
100-
print(pf % ("all", total_images, total_objects, s_mp, s_mr, s_map50, s_mean_ap))
101-
102-
103-
def prepare_validation(model: YOLO, args: Any) -> Tuple[Validator, torch.utils.data.DataLoader]:
104-
validator = model.task_map[model.task]["validator"](args=args)
105-
validator.data = check_det_dataset(args.data)
106-
validator.stride = 32
91+
print("Metrics(Mask):")
92+
s_mp, s_mr, s_map50, s_mean_ap = (
93+
stats["metrics/precision(M)"],
94+
stats["metrics/recall(M)"],
95+
stats["metrics/mAP50(M)"],
96+
stats["metrics/mAP50-95(M)"],
97+
)
98+
# Print results
99+
s = ("%20s" + "%12s" * 6) % ("Class", "Images", "Labels", "Precision", "Recall", "mAP@.5", "mAP@.5:.95")
100+
print(s)
101+
pf = "%20s" + "%12i" * 2 + "%12.3g" * 4 # print format
102+
print(pf % ("all", total_images, total_objects, s_mp, s_mr, s_map50, s_mean_ap))
107103

108-
data_loader = validator.get_dataloader(f"{DATASETS_DIR}/coco128-seg", 1)
109104

105+
def prepare_validation(model: YOLO, args: Any) -> Tuple[SegmentationValidator, torch.utils.data.DataLoader]:
106+
validator: SegmentationValidator = model.task_map[model.task]["validator"](args=args)
107+
validator.data = check_det_dataset(args.data)
108+
validator.stride = 32
110109
validator.is_coco = True
111110
validator.class_map = coco80_to_coco91_class()
112111
validator.names = model.model.names
113112
validator.metrics.names = validator.names
114113
validator.nc = model.model.model[-1].nc
115-
validator.nm = 32
116114
validator.process = ops.process_mask
117115
validator.plot_masks = []
118116

117+
coco_data_path = DATASETS_DIR / "coco128-seg"
118+
data_loader = validator.get_dataloader(coco_data_path.as_posix(), 1)
119+
119120
return validator, data_loader
120121

121122

@@ -129,7 +130,7 @@ def prepare_onnx_model(model: YOLO, model_name: str) -> Tuple[onnx.ModelProto, P
129130

130131

131132
def quantize_ac(
132-
model: onnx.ModelProto, data_loader: torch.utils.data.DataLoader, validator_ac: Validator
133+
model: onnx.ModelProto, data_loader: torch.utils.data.DataLoader, validator_ac: SegmentationValidator
133134
) -> onnx.ModelProto:
134135
input_name = model.graph.input[0].name
135136

@@ -140,7 +141,7 @@ def transform_fn(data_item: Dict):
140141
def validation_ac(
141142
val_model: onnx.ModelProto,
142143
validation_loader: torch.utils.data.DataLoader,
143-
validator: Validator,
144+
validator: SegmentationValidator,
144145
num_samples: int = None,
145146
) -> float:
146147
validator.seen = 0
@@ -155,7 +156,6 @@ def validation_ac(
155156
output_names = [output.name for output in session.get_outputs()]
156157
num_outputs = len(output_names)
157158

158-
counter = 0
159159
for batch_i, batch in enumerate(validation_loader):
160160
if num_samples is not None and batch_i == num_samples:
161161
break
@@ -172,13 +172,12 @@ def validation_ac(
172172
]
173173
preds = validator.postprocess(preds)
174174
validator.update_metrics(preds, batch)
175-
counter += 1
175+
176176
stats = validator.get_stats()
177177
if num_outputs == 1:
178178
stats_metrics = stats["metrics/mAP50-95(B)"]
179179
else:
180180
stats_metrics = stats["metrics/mAP50-95(M)"]
181-
print(f"Validate: dataset length = {counter}, metric value = {stats_metrics:.3f}")
182181
return stats_metrics, None
183182

184183
quantization_dataset = nncf.Dataset(data_loader, transform_fn)
@@ -213,8 +212,6 @@ def validation_ac(
213212

214213

215214
def run_example():
216-
MODEL_NAME = "yolov8n-seg"
217-
218215
model = YOLO(ROOT / f"{MODEL_NAME}.pt")
219216
args = get_cfg(cfg=DEFAULT_CFG)
220217
args.data = "coco128-seg.yaml"
@@ -231,11 +228,11 @@ def run_example():
231228
print(f"[2/5] Save INT8 model: {int8_model_path}")
232229

233230
print("[3/5] Validate ONNX FP32 model:")
234-
fp_stats, total_images, total_objects = validate(fp32_model, tqdm(data_loader), validator)
231+
fp_stats, total_images, total_objects = validate(fp32_model, data_loader, validator)
235232
print_statistics(fp_stats, total_images, total_objects)
236233

237234
print("[4/5] Validate ONNX INT8 model:")
238-
q_stats, total_images, total_objects = validate(int8_model, tqdm(data_loader), validator)
235+
q_stats, total_images, total_objects = validate(int8_model, data_loader, validator)
239236
print_statistics(q_stats, total_images, total_objects)
240237

241238
print("[5/5] Report:")
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
11
ultralytics==8.3.22
22
onnx==1.17.0
3+
onnxruntime==1.19.2
34
openvino==2024.5

0 commit comments

Comments
 (0)