Skip to content

Commit 033be9b

Browse files
authored
[TESTS] Make a common entry point for conformance tests (#3265)
### Changes Refactors confromance test suite to enhance maintainability and reduce potential bugs. 1) Unified test entry point 2) Centralized Fail/XFail logic ### Reason for changes 1) Reduce code complexity 2) Reduce potential number of bugs ### Tests WC run - https://github.com/openvinotoolkit/nncf/actions/runs/13239414168 - passed ptq run -job/manual/job/post_training_quantization/608/ - in progress WC CI - job/manual/job/post_training_weight_compression/317 - passed
1 parent 93192d6 commit 033be9b

11 files changed

+542
-563
lines changed

tests/post_training/README.md

+11
Original file line numberDiff line numberDiff line change
@@ -160,3 +160,14 @@ To mark a test as expected to fail (xfail) when a number of compression operatio
160160
...
161161
num_compressed_xfail_reason: "Issue-<jira ticket number>"
162162
```
163+
164+
To mark a test as expected to fail (xfail) during the compression process with an exception:
165+
166+
```yml
167+
<Name from model scopes>_backend_<BACKEND>:
168+
...
169+
exception_xfail_reason:
170+
type: "<ExceptionType>", e.g. TypeError
171+
error_message: "<Error message from Exception>"
172+
message: "Issue-<jira ticket number>"
173+
```

tests/post_training/conftest.py

+67
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,10 @@
99
# See the License for the specific language governing permissions and
1010
# limitations under the License.
1111

12+
from pathlib import Path
13+
14+
import pytest
15+
1216

1317
def pytest_addoption(parser):
1418
parser.addoption("--data", action="store", help="Data directory")
@@ -35,3 +39,66 @@ def pytest_addoption(parser):
3539
help="Report memory using MemoryMonitor from tools/memory_monitor.py. "
3640
"Warning: currently, reported memory values are not always reproducible.",
3741
)
42+
43+
44+
@pytest.fixture(scope="session", name="data_dir")
45+
def fixture_data(pytestconfig):
46+
if pytestconfig.getoption("data") is None:
47+
msg = "This test requires the --data argument to be specified."
48+
raise ValueError(msg)
49+
return Path(pytestconfig.getoption("data"))
50+
51+
52+
@pytest.fixture(scope="session", name="output_dir")
53+
def fixture_output(pytestconfig):
54+
return Path(pytestconfig.getoption("output"))
55+
56+
57+
@pytest.fixture(scope="session", name="no_eval")
58+
def fixture_no_eval(pytestconfig):
59+
return pytestconfig.getoption("no_eval")
60+
61+
62+
@pytest.fixture(scope="session", name="batch_size")
63+
def fixture_batch_size(pytestconfig):
64+
return pytestconfig.getoption("batch_size")
65+
66+
67+
@pytest.fixture(scope="session", name="subset_size")
68+
def fixture_subset_size(pytestconfig):
69+
return pytestconfig.getoption("subset_size")
70+
71+
72+
@pytest.fixture(scope="session", name="run_fp32_backend")
73+
def fixture_run_fp32_backend(pytestconfig):
74+
return pytestconfig.getoption("fp32")
75+
76+
77+
@pytest.fixture(scope="session", name="run_torch_cuda_backend")
78+
def fixture_run_torch_cuda_backend(pytestconfig):
79+
return pytestconfig.getoption("cuda")
80+
81+
82+
@pytest.fixture(scope="session", name="run_benchmark_app")
83+
def fixture_run_benchmark_app(pytestconfig):
84+
return pytestconfig.getoption("benchmark")
85+
86+
87+
@pytest.fixture(scope="session", name="torch_compile_validation")
88+
def fixture_torch_compile_validation(pytestconfig):
89+
return pytestconfig.getoption("torch_compile_validation")
90+
91+
92+
@pytest.fixture(scope="session", name="extra_columns")
93+
def fixture_extra_columns(pytestconfig):
94+
return pytestconfig.getoption("extra_columns")
95+
96+
97+
@pytest.fixture(scope="session", name="memory_monitor")
98+
def fixture_memory_monitor(pytestconfig):
99+
return pytestconfig.getoption("memory_monitor")
100+
101+
102+
@pytest.fixture(scope="session", name="forked")
103+
def fixture_forked(pytestconfig):
104+
return pytestconfig.getoption("forked")

tests/post_training/data/ptq_reference_data.yaml

+4-1
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,10 @@ hf/hf-internal-testing/tiny-random-GPTNeoXForCausalLM_statefull_backend_OPTIMUM:
1818
metric_value: null
1919
hf/hf-internal-testing/tiny-random-GPTNeoXForCausalLM_stateless_backend_OPTIMUM:
2020
metric_value: null
21-
xfail_reason: "Issue-161969"
21+
exception_xfail_reason:
22+
type: "TypeError"
23+
error_message: "cannot pickle 'openvino._pyopenvino.Tensor' object"
24+
message: "Issue-161969"
2225
hf/hf-internal-testing/tiny-random-gpt2_backend_FP32:
2326
metric_value: null
2427
hf/hf-internal-testing/tiny-random-gpt2_backend_OPTIMUM:

tests/post_training/experimental/sparsify_activations/model_scope.py

+5-34
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,11 @@
99
# See the License for the specific language governing permissions and
1010
# limitations under the License.
1111

12-
import copy
13-
from typing import Dict, List
14-
15-
import nncf
1612
from nncf.experimental.torch.sparsify_activations import TargetScope
1713
from nncf.parameters import CompressWeightsMode
1814
from tests.post_training.experimental.sparsify_activations.pipelines import ImageClassificationTimmSparsifyActivations
1915
from tests.post_training.experimental.sparsify_activations.pipelines import LMSparsifyActivations
16+
from tests.post_training.model_scope import generate_tests_scope
2017
from tests.post_training.pipelines.base import BackendType
2118

2219
SPARSIFY_ACTIVATIONS_MODELS = [
@@ -30,6 +27,7 @@
3027
{
3128
"reported_name": "tinyllama_ffn_sparse20",
3229
"model_id": "tinyllama/tinyllama-1.1b-step-50k-105b",
30+
"model_name": "tinyllama",
3331
"pipeline_cls": LMSparsifyActivations,
3432
"compression_params": {
3533
"compress_weights": None,
@@ -45,6 +43,7 @@
4543
{
4644
"reported_name": "tinyllama_int8_asym_data_free_ffn_sparse20",
4745
"model_id": "tinyllama/tinyllama-1.1b-step-50k-105b",
46+
"model_name": "tinyllama",
4847
"pipeline_cls": LMSparsifyActivations,
4948
"compression_params": {
5049
"compress_weights": {
@@ -62,6 +61,7 @@
6261
{
6362
"reported_name": "timm/deit3_small_patch16_224",
6463
"model_id": "deit3_small_patch16_224",
64+
"model_name": "timm/deit3_small_patch16_224",
6565
"pipeline_cls": ImageClassificationTimmSparsifyActivations,
6666
"compression_params": {},
6767
"backends": [BackendType.FP32],
@@ -70,6 +70,7 @@
7070
{
7171
"reported_name": "timm/deit3_small_patch16_224_qkv_sparse20_fc1_sparse20_fc2_sparse30",
7272
"model_id": "deit3_small_patch16_224",
73+
"model_name": "timm/deit3_small_patch16_224",
7374
"pipeline_cls": ImageClassificationTimmSparsifyActivations,
7475
"compression_params": {
7576
"sparsify_activations": {
@@ -85,34 +86,4 @@
8586
]
8687

8788

88-
def generate_tests_scope(models_list: List[Dict]) -> Dict[str, Dict]:
89-
"""
90-
Generate tests by names "{reported_name}_backend_{backend}"
91-
"""
92-
tests_scope = {}
93-
fp32_models = set()
94-
for test_model_param in models_list:
95-
model_id = test_model_param["model_id"]
96-
reported_name = test_model_param["reported_name"]
97-
98-
for backend in test_model_param["backends"]:
99-
model_param = copy.deepcopy(test_model_param)
100-
if "is_batch_size_supported" not in model_param: # Set default value of is_batch_size_supported.
101-
model_param["is_batch_size_supported"] = True
102-
test_case_name = f"{reported_name}_backend_{backend.value}"
103-
model_param["backend"] = backend
104-
model_param.pop("backends")
105-
if backend == BackendType.FP32:
106-
if model_id in fp32_models:
107-
msg = f"Duplicate test case for {model_id} with FP32 backend"
108-
raise nncf.ValidationError(msg)
109-
fp32_models.add(model_id)
110-
if test_case_name in tests_scope:
111-
msg = f"{test_case_name} already in tests_scope"
112-
raise nncf.ValidationError(msg)
113-
tests_scope[test_case_name] = model_param
114-
115-
return tests_scope
116-
117-
11889
SPARSIFY_ACTIVATIONS_TEST_CASES = generate_tests_scope(SPARSIFY_ACTIVATIONS_MODELS)

0 commit comments

Comments
 (0)