diff --git a/tests/post_training/data/ptq_reference_data.yaml b/tests/post_training/data/ptq_reference_data.yaml index 54b03f8c2c7..338b1ebb0c8 100644 --- a/tests/post_training/data/ptq_reference_data.yaml +++ b/tests/post_training/data/ptq_reference_data.yaml @@ -42,16 +42,8 @@ torchvision/resnet18_backend_CUDA_TORCH: metric_value: 0.69152 torchvision/resnet18_backend_FX_TORCH: metric_value: 0.6946 - exception_xfail_reason: - type: "OpConversionFailure" - error_message: "Check 'is_conversion_successful' failed at src/frontends/pytorch/src/frontend.cpp:171:\nFrontEnd API failed with OpConversionFailure:\nModel wasn't fully converted. Failed operations detailed log:\n-- torch::None with a message:\nNone constant cannot be converted to OpenVINO opset and should be removed by consuming operation.\nSummary:\n-- No conversion rule found for operations: aten.adaptive_avg_pool2d.default, aten.conv2d.default, aten.linear.default, aten.max_pool2d.default\n-- Conversion is failed for: torch::None\n" - message: "Issue-162009" torchvision/resnet18_backend_CUDA_FX_TORCH: metric_value: 0.6946 - exception_xfail_reason: - type: "OpConversionFailure" - error_message: "Check 'is_conversion_successful' failed at src/frontends/pytorch/src/frontend.cpp:171:\nFrontEnd API failed with OpConversionFailure:\nModel wasn't fully converted. Failed operations detailed log:\n-- torch::None with a message:\nNone constant cannot be converted to OpenVINO opset and should be removed by consuming operation.\nSummary:\n-- No conversion rule found for operations: aten.adaptive_avg_pool2d.default, aten.conv2d.default, aten.linear.default, aten.max_pool2d.default\n-- Conversion is failed for: torch::None\n" - message: "Issue-162009" torchvision/mobilenet_v3_small_BC_backend_FP32: metric_value: 0.6766 torchvision/mobilenet_v3_small_BC_backend_OV: @@ -60,48 +52,24 @@ torchvision/mobilenet_v3_small_BC_backend_ONNX: metric_value: 0.6679 torchvision/mobilenet_v3_small_BC_backend_FX_TORCH: metric_value: 0.6679 - exception_xfail_reason: - type: "OpConversionFailure" - error_message: "Check 'is_conversion_successful' failed at src/frontends/pytorch/src/frontend.cpp:171:\nFrontEnd API failed with OpConversionFailure:\nModel wasn't fully converted. Failed operations detailed log:\n-- torch::None with a message:\nNone constant cannot be converted to OpenVINO opset and should be removed by consuming operation.\nSummary:\n-- No conversion rule found for operations: aten.adaptive_avg_pool2d.default, aten.conv2d.default, aten.linear.default\n-- Conversion is failed for: torch::None\n" - message: "Issue-162009" torchvision/mobilenet_v3_small_BC_backend_CUDA_FX_TORCH: metric_value: 0.6664 - exception_xfail_reason: - type: "OpConversionFailure" - error_message: "Check 'is_conversion_successful' failed at src/frontends/pytorch/src/frontend.cpp:171:\nFrontEnd API failed with OpConversionFailure:\nModel wasn't fully converted. Failed operations detailed log:\n-- torch::None with a message:\nNone constant cannot be converted to OpenVINO opset and should be removed by consuming operation.\nSummary:\n-- No conversion rule found for operations: aten.adaptive_avg_pool2d.default, aten.conv2d.default, aten.linear.default\n-- Conversion is failed for: torch::None\n" - message: "Issue-162009" torchvision/vit_b_16_backend_FP32: metric_value: 0.8107 torchvision/vit_b_16_backend_OV: metric_value: 0.80948 torchvision/vit_b_16_backend_FX_TORCH: metric_value: 0.80922 - exception_xfail_reason: - type: "OpConversionFailure" - error_message: "Check 'is_conversion_successful' failed at src/frontends/pytorch/src/frontend.cpp:171:\nFrontEnd API failed with OpConversionFailure:\nModel wasn't fully converted.\nSummary:\n-- No conversion rule found for operations: aten.conv2d.default, aten.layer_norm.default, aten.linear.default, aten.scaled_dot_product_attention.default\n" - message: "Issue-162009" torchvision/vit_b_16_backend_CUDA_FX_TORCH: metric_value: 0.80922 - exception_xfail_reason: - type: "OpConversionFailure" - error_message: "Check 'is_conversion_successful' failed at src/frontends/pytorch/src/frontend.cpp:171:\nFrontEnd API failed with OpConversionFailure:\nModel wasn't fully converted.\nSummary:\n-- No conversion rule found for operations: aten.conv2d.default, aten.layer_norm.default, aten.linear.default, aten.scaled_dot_product_attention.default\n" - message: "Issue-162009" torchvision/swin_v2_s_backend_FP32: metric_value: 0.83712 torchvision/swin_v2_s_backend_OV: metric_value: 0.83638 torchvision/swin_v2_s_backend_FX_TORCH: metric_value: 0.8360 - exception_xfail_reason: - type: "OpConversionFailure" - error_message: "Check 'is_conversion_successful' failed at src/frontends/pytorch/src/frontend.cpp:171:\nFrontEnd API failed with OpConversionFailure:\nModel wasn't fully converted.\nSummary:\n-- No conversion rule found for operations: aten.adaptive_avg_pool2d.default, aten.conv2d.default, aten.layer_norm.default, aten.linear.default, aten.matmul.default, aten.pad.default, aten.softmax.int, aten.where.ScalarSelf\n" - message: "Issue-162009" torchvision/swin_v2_s_backend_CUDA_FX_TORCH: metric_value: 0.8360 - exception_xfail_reason: - type: "OpConversionFailure" - error_message: "Check 'is_conversion_successful' failed at src/frontends/pytorch/src/frontend.cpp:171:\nFrontEnd API failed with OpConversionFailure:\nModel wasn't fully converted.\nSummary:\n-- No conversion rule found for operations: aten.adaptive_avg_pool2d.default, aten.conv2d.default, aten.layer_norm.default, aten.linear.default, aten.matmul.default, aten.pad.default, aten.softmax.int, aten.where.ScalarSelf\n" - message: "Issue-162009" timm/crossvit_9_240_backend_CUDA_TORCH: metric_value: 0.7275 timm/crossvit_9_240_backend_FP32: diff --git a/tests/post_training/pipelines/base.py b/tests/post_training/pipelines/base.py index b187da32224..fc8a9b695fb 100644 --- a/tests/post_training/pipelines/base.py +++ b/tests/post_training/pipelines/base.py @@ -485,6 +485,25 @@ def compress(self) -> None: self.run_info.compression_memory_usage = memory_usage(self._compress, max_usage=True) self.run_info.time_compression = time.perf_counter() - start_time + def _rename_files(self, folder_path, new_name): + model_folder = folder_path / "model" + bin_file = None + xml_file = None + for file in os.listdir(model_folder): + if file.endswith(".bin"): + bin_file = file + elif file.endswith(".xml"): + xml_file = file + if bin_file is None or xml_file is None: + return + bin_new_path = folder_path / f'{new_name}.bin' + xml_new_path = folder_path / f'{new_name}.xml' + + os.rename(os.path.join(model_folder, bin_file), bin_new_path) + os.rename(os.path.join(model_folder, xml_file), xml_new_path) + + os.rmdir(model_folder) + def save_compressed_model(self) -> None: """ Save compressed model to IR. @@ -500,9 +519,15 @@ def save_compressed_model(self) -> None: ov.serialize(ov_model, self.path_compressed_ir) elif self.backend in FX_BACKENDS: exported_model = torch.export.export(self.compressed_model.cpu(), (self.dummy_tensor.cpu(),)) - ov_model = ov.convert_model(exported_model, example_input=self.dummy_tensor.cpu(), input=self.input_size) - ov_model.reshape(self.input_size) - ov.serialize(ov_model, self.path_compressed_ir) + # TODO Uncomment these lines after Issue - 162009 + # ov_model = ov.convert_model(exported_model, example_input=self.dummy_tensor.cpu(), input=self.input_size) + # ov_model.reshape(self.input_size) + # ov.serialize(ov_model, self.path_compressed_ir) + # TODO Remove after Issue - 162009 + torch.export.save(exported_model, self.output_model_dir / "model.pt2") + mod = torch.compile(exported_model.module(), backend="openvino", options = {"model_caching" : True, "cache_dir": str(self.output_model_dir)}) + mod(self.dummy_tensor) + self._rename_files(self.output_model_dir, 'model') if self.backend == BackendType.CUDA_FX_TORCH: self.model = self.model.cuda() diff --git a/tests/post_training/pipelines/image_classification_base.py b/tests/post_training/pipelines/image_classification_base.py index b3ba58f8a36..b37e0a3ec6b 100644 --- a/tests/post_training/pipelines/image_classification_base.py +++ b/tests/post_training/pipelines/image_classification_base.py @@ -94,7 +94,7 @@ def _validate(self) -> None: predictions = np.zeros(dataset_size) references = -1 * np.ones(dataset_size) - if self.backend in FX_BACKENDS and self.torch_compile_validation: + if self.backend in FX_BACKENDS: predictions, references = self._validate_torch_compile(val_loader, predictions, references) else: predictions, references = self._validate_ov(val_loader, predictions, references, dataset_size) diff --git a/tests/post_training/pipelines/image_classification_torchvision.py b/tests/post_training/pipelines/image_classification_torchvision.py index 386ff834b86..54b2d00b75f 100644 --- a/tests/post_training/pipelines/image_classification_torchvision.py +++ b/tests/post_training/pipelines/image_classification_torchvision.py @@ -130,8 +130,11 @@ def _dump_model_fp32(self) -> None: if self.backend in FX_BACKENDS: exported_model = torch.export.export(self.model.cpu(), (self.dummy_tensor.cpu(),)) - ov_model = ov.convert_model(exported_model, example_input=self.dummy_tensor, input=self.input_size) - ov.serialize(ov_model, self.fp32_model_dir / "fx_model_fp32.xml") + # TODO Uncomment these lines after Issue - 162009 + # ov_model = ov.convert_model(exported_model, example_input=self.dummy_tensor, input=self.input_size) + # ov.serialize(ov_model, self.fp32_model_dir / "fx_model_fp32.xml") + # TODO Remove after Issue - 162009 + torch.export.save(exported_model, self.fp32_model_dir / "fx_model_fp32.pt2") if self.backend is BackendType.CUDA_FX_TORCH: self.model = self.model.cuda()