Skip to content

Commit 4cbdd2d

Browse files
t
1 parent 7c9728d commit 4cbdd2d

File tree

3 files changed

+21
-14
lines changed

3 files changed

+21
-14
lines changed

.github/scripts/pytest_md_summary.py

+6-2
Original file line numberDiff line numberDiff line change
@@ -44,8 +44,12 @@ def parse_xml_report(xml_file) -> None:
4444
elif testcase.find("error") is not None:
4545
status = "$${\color{red}Error}$$"
4646
elif testcase.find("skipped") is not None:
47-
status = "$${\color{orange}Skipped}$$"
48-
message = testcase.find("skipped").get("message", "")
47+
if "xfail" in testcase.find("skipped").get("type", ""):
48+
status = "$${\color{orange}xfail}$$"
49+
message = testcase.find("skipped").get("message", "")
50+
else:
51+
status = "$${\color{yellow}Skipped}$$"
52+
message = testcase.find("skipped").get("message", "")
4953
else:
5054
status = "$${\color{green}Ok}$$"
5155

.github/workflows/conformance_weight_compression.yml

+2-1
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,9 @@ on:
1515

1616
jobs:
1717
examples-cpu:
18-
name: Weight compression [${{ matrix.group }}/2]
18+
name: Weight compression [${{ matrix.group }}/3]
1919
runs-on: ubuntu-22.04-16-cores
20+
timeout-minutes: 40
2021
strategy:
2122
fail-fast: false
2223
matrix:
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,15 @@
11
{
2-
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_awq_backup_mode_none_backend_OV]": 269.111,
3-
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_awq_scale_estimation_backend_OV]": 421.082,
4-
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_awq_scale_estimation_stateful_backend_OV]": 374.507,
5-
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_awq_stateful_backend_OV]": 243.878,
6-
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_backend_OV]": 190.344,
7-
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_gptq_backend_OV]": 1261.154,
8-
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_lora_stateful_backend_OV]": 483.905,
9-
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_backend_OV]": 196.905,
10-
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_int8_data_free_backend_TORCH]": 154.724,
11-
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_NF4_scale_estimation_stateful_per_channel_backend_OV]": 256.375,
12-
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_scale_estimation_per_channel_backend_OV]": 258.622
2+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_awq_backup_mode_none_backend_OV]": 269,
3+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_awq_scale_estimation_backend_OV]": 421,
4+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_awq_scale_estimation_stateful_backend_OV]": 374,
5+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_awq_stateful_backend_OV]": 243,
6+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_backend_OV]": 190,
7+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_gptq_scale_estimation_stateful_backend_OV]": 1463,
8+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_aware_lora_stateful_backend_OV]": 483,
9+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_backend_FP32]": 0,
10+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_data_free_backend_OV]": 196,
11+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_int4_data_free_backend_TORCH]": 133,
12+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_int8_data_free_backend_TORCH]": 154,
13+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_NF4_scale_estimation_stateful_per_channel_backend_OV]": 256,
14+
"tests/post_training/test_quantize_conformance.py::test_weight_compression[tinyllama_scale_estimation_per_channel_backend_OV]": 258
1315
}

0 commit comments

Comments
 (0)