Skip to content

Commit 228dd37

Browse files
committed
add more strict check on exception for xfail
1 parent a1d92d5 commit 228dd37

File tree

3 files changed

+47
-12
lines changed

3 files changed

+47
-12
lines changed

tests/post_training/README.md

+4-1
Original file line numberDiff line numberDiff line change
@@ -166,5 +166,8 @@ To mark a test as expected to fail (xfail) during the compression process with a
166166
```yml
167167
<Name from model scopes>_backend_<BACKEND>:
168168
...
169-
exception_xfail_reason: "Issue-<jira ticket number>"
169+
exception_xfail_reason:
170+
type: "<ExceptionType>", e.g. TypeError
171+
error_message: "<Error message from Exception>"
172+
message: "Issue-<jira ticket number>"
170173
```

tests/post_training/data/ptq_reference_data.yaml

+4-1
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,10 @@ hf/hf-internal-testing/tiny-random-GPTNeoXForCausalLM_statefull_backend_OPTIMUM:
1818
metric_value: null
1919
hf/hf-internal-testing/tiny-random-GPTNeoXForCausalLM_stateless_backend_OPTIMUM:
2020
metric_value: null
21-
exception_xfail_reason: "Issue-161969"
21+
exception_xfail_reason:
22+
type: "TypeError"
23+
error_message: "cannot pickle 'openvino._pyopenvino.Tensor' object"
24+
message: "Issue-161969"
2225
hf/hf-internal-testing/tiny-random-gpt2_backend_FP32:
2326
metric_value: null
2427
hf/hf-internal-testing/tiny-random-gpt2_backend_OPTIMUM:

tests/post_training/test_quantize_conformance.py

+39-10
Original file line numberDiff line numberDiff line change
@@ -271,22 +271,53 @@ def create_pipeline_kwargs(test_model_param, subset_size, test_case_name, refere
271271
}
272272

273273

274-
def _update_status(pipeline: BaseTestPipeline, errors: List[ErrorReason]) -> List[str]:
274+
def _get_exception_type_name(report: ErrorReport) -> str:
275+
return report.msg.split("|")[0].replace("Exception Type: ", "")
276+
277+
278+
def _get_exception_error_message(report: ErrorReport) -> str:
279+
return report.msg.split("|")[1]
280+
281+
282+
def _are_exceptions_matched(report: ErrorReport, reference_exception: Dict[str, str]) -> bool:
283+
return reference_exception["error_message"] == _get_exception_error_message(report) and reference_exception[
284+
"type"
285+
] == _get_exception_type_name(report)
286+
287+
288+
def _is_error_xfailed(report: ErrorReport, xfail_reason: str, reference_data: Dict[str, Dict[str, str]]) -> bool:
289+
if xfail_reason not in reference_data:
290+
return False
291+
292+
if report.reason == ErrorReason.EXCEPTION:
293+
return _are_exceptions_matched(report, reference_data[xfail_reason])
294+
return True
295+
296+
297+
def _get_xfail_message(report: ErrorReport, xfail_reason: str, reference_data: Dict[str, Dict[str, str]]) -> str:
298+
if report.reason == ErrorReason.EXCEPTION:
299+
return f"XFAIL: {reference_data[xfail_reason]['message']} - {report.msg}"
300+
return f"XFAIL: {xfail_reason} - {report.msg}"
301+
302+
303+
def _update_status(pipeline: BaseTestPipeline, error_reports: List[ErrorReport]) -> List[str]:
275304
"""
276305
Updates status of the pipeline based on the errors encountered during the run.
277306
278307
:param pipeline: The pipeline object containing run information.
279-
:param errors: List of errors encountered during the run.
308+
:param error_reports: List of errors encountered during the run.
280309
:return: List of unexpected errors.
281310
"""
282311
pipeline.run_info.status = "" # Successful status
283312
xfails, unexpected_errors = [], []
284-
for report in errors:
313+
314+
for report in error_reports:
285315
xfail_reason = report.reason.value + XFAIL_SUFFIX
286-
if xfail_reason in pipeline.reference_data:
287-
xfails.append(f"XFAIL: {pipeline.reference_data[xfail_reason]} - {report.msg}")
316+
if _is_error_xfailed(report, xfail_reason, pipeline.reference_data):
317+
xfails.append(_get_xfail_message(report, xfail_reason, pipeline.reference_data))
288318
else:
289319
unexpected_errors.append(report.msg)
320+
290321
if xfails:
291322
pipeline.run_info.status = "\n".join(xfails)
292323
if unexpected_errors:
@@ -408,10 +439,8 @@ def run_pipeline(
408439
try:
409440
pipeline.run()
410441
except Exception as e:
411-
err_msg = str(e)
412-
if not err_msg:
413-
err_msg = "Unknown exception"
414-
exception_report = ErrorReport(ErrorReason.EXCEPTION, err_msg)
442+
message = f"Exception Type: {type(e).__name__}|{str(e)}"
443+
exception_report = ErrorReport(ErrorReason.EXCEPTION, message)
415444
traceback.print_exc()
416445
finally:
417446
if pipeline is not None:
@@ -424,7 +453,7 @@ def run_pipeline(
424453
if extra_columns:
425454
pipeline.collect_data_from_stdout(captured.out)
426455
else:
427-
run_info = create_short_run_info(test_model_param, err_msg, test_case_name)
456+
run_info = create_short_run_info(test_model_param, message, test_case_name)
428457
run_info.time_total = time.perf_counter() - start_time
429458

430459
errors = _collect_errors(pipeline, exception_report)

0 commit comments

Comments
 (0)