From 9b1593e6e554ac0f85954cbc39933e5b723d51e4 Mon Sep 17 00:00:00 2001 From: Nikita Savelyev Date: Thu, 27 Feb 2025 15:49:14 +0100 Subject: [PATCH] Add kwargs to quantize calls --- nncf/quantization/quantize_model.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/nncf/quantization/quantize_model.py b/nncf/quantization/quantize_model.py index 7c2126b720b..ba3b036e3c8 100644 --- a/nncf/quantization/quantize_model.py +++ b/nncf/quantization/quantize_model.py @@ -137,6 +137,7 @@ def quantize( model_type: Optional[ModelType] = None, ignored_scope: Optional[IgnoredScope] = None, advanced_parameters: Optional[AdvancedQuantizationParameters] = None, + **kwargs, ) -> TModel: """ Applies post-training quantization to the provided model. @@ -311,6 +312,7 @@ def quantize_with_accuracy_control( ignored_scope: Optional[IgnoredScope] = None, advanced_quantization_parameters: Optional[AdvancedQuantizationParameters] = None, advanced_accuracy_restorer_parameters: Optional[AdvancedAccuracyRestorerParameters] = None, + **kwargs, ) -> TModel: """ Applies post-training quantization algorithm with accuracy control to provided model. @@ -441,6 +443,7 @@ def compress_weights( lora_correction: Optional[bool] = None, backup_mode: Optional[BackupMode] = None, advanced_parameters: Optional[AdvancedCompressionParameters] = None, + **kwargs, ) -> TModel: """ Compress model weights. @@ -658,6 +661,7 @@ def quantize_with_tune_hyperparams( model_type: Optional[ModelType] = None, ignored_scope: Optional[IgnoredScope] = None, advanced_quantization_parameters: Optional[AdvancedQuantizationParameters] = None, + **kwargs, ) -> TModel: """ Applies post-training quantization algorithm with tune hyperparameters to provided model.