@@ -178,6 +178,8 @@ def fixture_ptq_report_data(output_dir, run_benchmark_app, pytestconfig):
178
178
if not run_benchmark_app :
179
179
df = df .drop (columns = ["FPS" ])
180
180
181
+ df = df .drop (columns = ["Num sparse activations" ])
182
+
181
183
output_dir .mkdir (parents = True , exist_ok = True )
182
184
output_file = output_dir / "results.csv"
183
185
@@ -202,6 +204,7 @@ def fixture_wc_report_data(output_dir, run_benchmark_app, pytestconfig):
202
204
df = df .drop (columns = ["FPS" ])
203
205
204
206
df = df .drop (columns = ["Num FQ" ])
207
+ df = df .drop (columns = ["Num sparse activations" ])
205
208
206
209
output_dir .mkdir (parents = True , exist_ok = True )
207
210
output_file = output_dir / "results.csv"
@@ -266,7 +269,7 @@ def create_pipeline_kwargs(test_model_param, subset_size, test_case_name, refere
266
269
print (f"PTQ params: { test_model_param ['compression_params' ]} " )
267
270
268
271
# Get target fp32 metric value
269
- model_name = test_case_name .split ("_backend_" )[0 ]
272
+ model_name = test_model_param . get ( "model_name" , test_case_name .split ("_backend_" )[0 ])
270
273
test_reference = reference_data [test_case_name ]
271
274
test_reference ["metric_value_fp32" ] = reference_data [f"{ model_name } _backend_FP32" ]["metric_value" ]
272
275
@@ -297,11 +300,14 @@ def _update_status(pipeline: BaseTestPipeline, errors: List[ErrorReason]) -> Lis
297
300
return unexpected_errors
298
301
299
302
300
- def _collect_errors (err_msg : str , pipeline : BaseTestPipeline ) -> List [ErrorReason ]:
303
+ def _collect_errors (
304
+ pipeline : BaseTestPipeline ,
305
+ exception_report : Optional [ErrorReport ] = None ,
306
+ ) -> List [ErrorReport ]:
301
307
errors = []
302
308
303
- if err_msg :
304
- errors .append (ErrorReport ( ErrorReason . EXCEPTION , err_msg ) )
309
+ if exception_report :
310
+ errors .append (exception_report )
305
311
return errors
306
312
307
313
run_info = pipeline .run_info
@@ -372,9 +378,7 @@ def run_pipeline(
372
378
memory_monitor : bool ,
373
379
use_avx2 : Optional [bool ] = None ,
374
380
):
375
- pipeline = None
376
- err_msg = None
377
- test_model_param = None
381
+ pipeline , exception_report , test_model_param = None , None , None
378
382
start_time = time .perf_counter ()
379
383
if test_case_name not in reference_data :
380
384
msg = f"{ test_case_name } does not exist in 'reference_data.yaml'"
@@ -409,6 +413,7 @@ def run_pipeline(
409
413
err_msg = str (e )
410
414
if not err_msg :
411
415
err_msg = "Unknown exception"
416
+ exception_report = ErrorReport (ErrorReason .EXCEPTION , err_msg )
412
417
traceback .print_exc ()
413
418
finally :
414
419
if pipeline is not None :
@@ -424,7 +429,7 @@ def run_pipeline(
424
429
run_info = create_short_run_info (test_model_param , err_msg , test_case_name )
425
430
run_info .time_total = time .perf_counter () - start_time
426
431
427
- errors = _collect_errors (err_msg , pipeline )
432
+ errors = _collect_errors (pipeline , exception_report )
428
433
unexpected_errors = _update_status (pipeline , errors )
429
434
result_data [test_case_name ] = run_info
430
435
@@ -495,7 +500,7 @@ def test_weight_compression(
495
500
WC_TEST_CASES ,
496
501
wc_result_data ,
497
502
output_dir ,
498
- None ,
503
+ None , # data_dir is not used in WC
499
504
no_eval ,
500
505
batch_size ,
501
506
run_fp32_backend ,
0 commit comments