Skip to content

Commit ce84be1

Browse files
committed
Update code
1 parent 8a9dbb9 commit ce84be1

File tree

2 files changed

+3
-4
lines changed

2 files changed

+3
-4
lines changed

optimum/intel/neural_compressor/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
# limitations under the License.
1414

1515
from ..utils.import_utils import is_diffusers_available
16-
from .configuration import INCConfig, WeightOnlyQuantConfig
16+
from .configuration import INCConfig
1717
from .modeling_base import (
1818
INCModel,
1919
INCModelForMaskedLM,

optimum/intel/neural_compressor/configuration.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414

1515
from typing import Dict, Optional, Union
1616

17-
from intel_extension_for_transformers.transformers.utils import WeightOnlyQuantConfig
1817
from neural_compressor.config import DistillationConfig, WeightPruningConfig, _BaseQuantizationConfig
1918

2019
from optimum.configuration_utils import BaseConfig
@@ -36,7 +35,7 @@ class INCConfig(BaseConfig):
3635

3736
def __init__(
3837
self,
39-
quantization: Optional[Union[Dict, _BaseQuantizationConfig, WeightOnlyQuantConfig]] = None,
38+
quantization=None,
4039
pruning: Optional[Union[Dict, _BaseQuantizationConfig]] = None,
4140
distillation: Optional[Union[Dict, _BaseQuantizationConfig]] = None,
4241
save_onnx_model: bool = False,
@@ -51,7 +50,7 @@ def __init__(
5150
self.save_onnx_model = save_onnx_model
5251

5352
@staticmethod
54-
def _create_quantization_config(config: Union[Dict, _BaseQuantizationConfig, WeightOnlyQuantConfig]):
53+
def _create_quantization_config(config):
5554
# TODO : add activations_dtype and weights_dtype
5655
if isinstance(config, _BaseQuantizationConfig):
5756
approach = _quantization_model[config.approach]

0 commit comments

Comments
 (0)