Skip to content

Commit 0ece48b

Browse files
authored
Remove useless transformers version check (#556)
* Remove useless transformers version check * update setup
1 parent 2be2e75 commit 0ece48b

7 files changed

+11
-38
lines changed

optimum/intel/openvino/modeling_base.py

+4-13
Original file line numberDiff line numberDiff line change
@@ -22,22 +22,18 @@
2222
from huggingface_hub import hf_hub_download
2323
from openvino import Core, convert_model
2424
from openvino._offline_transformations import apply_moc_transformations, compress_model_transformation
25-
from transformers import PretrainedConfig
25+
from transformers import GenerationConfig, PretrainedConfig
2626
from transformers.file_utils import add_start_docstrings
27+
from transformers.generation import GenerationMixin
2728

2829
from optimum.exporters.onnx import OnnxConfig
2930
from optimum.modeling_base import OptimizedModel
3031

3132
from ...exporters.openvino import export, main_export
32-
from ..utils.import_utils import is_nncf_available, is_transformers_version
33+
from ..utils.import_utils import is_nncf_available
3334
from .utils import ONNX_WEIGHTS_NAME, OV_XML_FILE_NAME, _print_compiled_model_properties
3435

3536

36-
if is_transformers_version("<", "4.25.0"):
37-
from transformers.generation_utils import GenerationMixin
38-
else:
39-
from transformers.generation import GenerationMixin
40-
4137
core = Core()
4238

4339
logger = logging.getLogger(__name__)
@@ -92,12 +88,7 @@ def __init__(
9288
if enable_compilation:
9389
self.compile()
9490

95-
if is_transformers_version("<=", "4.25.1"):
96-
self.generation_config = None
97-
else:
98-
from transformers import GenerationConfig
99-
100-
self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None
91+
self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None
10192

10293
@staticmethod
10394
def load_model(file_name: Union[str, Path], load_in_8bit: bool = False):

optimum/intel/openvino/modeling_base_seq2seq.py

+2-9
Original file line numberDiff line numberDiff line change
@@ -21,11 +21,10 @@
2121
import openvino
2222
from huggingface_hub import hf_hub_download
2323
from openvino._offline_transformations import apply_moc_transformations, compress_model_transformation
24-
from transformers import PretrainedConfig
24+
from transformers import GenerationConfig, PretrainedConfig
2525
from transformers.file_utils import add_start_docstrings
2626

2727
from ...exporters.openvino import main_export
28-
from ..utils.import_utils import is_transformers_version
2928
from .modeling_base import OVBaseModel
3029
from .utils import (
3130
ONNX_DECODER_NAME,
@@ -75,13 +74,7 @@ def __init__(
7574
self.encoder_model = encoder
7675
self.decoder_model = decoder
7776
self.decoder_with_past_model = decoder_with_past
78-
79-
if is_transformers_version("<=", "4.25.1"):
80-
self.generation_config = None
81-
else:
82-
from transformers import GenerationConfig
83-
84-
self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None
77+
self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None
8578

8679
def _save_pretrained(self, save_directory: Union[str, Path]):
8780
"""

optimum/intel/openvino/modeling_decoder.py

+1-7
Original file line numberDiff line numberDiff line change
@@ -25,25 +25,19 @@
2525
from openvino.runtime import Core, Tensor, Type
2626
from transformers import AutoModelForCausalLM, PretrainedConfig
2727
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
28+
from transformers.generation import GenerationMixin
2829
from transformers.modeling_outputs import CausalLMOutputWithPast
2930

3031
from optimum.utils.normalized_config import NormalizedConfigManager
3132

3233
from ...exporters.openvino import ensure_stateful_is_available, main_export, patch_stateful
3334
from ...exporters.openvino.stateful import model_has_state
34-
from ..utils.import_utils import is_transformers_version
3535
from ..utils.modeling_utils import MULTI_QUERY_ATTN_MODELS
3636
from .modeling import _TOKENIZER_FOR_DOC, INPUTS_DOCSTRING, MODEL_START_DOCSTRING, OVModel
3737
from .utils import ONNX_WEIGHTS_NAME, OV_XML_FILE_NAME, STR_TO_OV_TYPE
3838
from .weight_quantization import OVWeightQuantizationConfig, compress_decoder_weights
3939

4040

41-
if is_transformers_version("<", "4.25.0"):
42-
from transformers.generation_utils import GenerationMixin
43-
else:
44-
from transformers.generation import GenerationMixin
45-
46-
4741
logger = logging.getLogger(__name__)
4842

4943
core = Core()

optimum/intel/openvino/modeling_seq2seq.py

+1-6
Original file line numberDiff line numberDiff line change
@@ -32,20 +32,15 @@
3232
WhisperForConditionalGeneration,
3333
)
3434
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
35+
from transformers.generation import GenerationMixin
3536
from transformers.generation.logits_process import WhisperTimeStampLogitsProcessor
3637
from transformers.modeling_outputs import BaseModelOutput, Seq2SeqLMOutput
3738
from transformers.models.whisper.tokenization_whisper import TASK_IDS, TO_LANGUAGE_CODE
3839

39-
from ..utils.import_utils import is_transformers_version
4040
from .modeling_base_seq2seq import OVBaseModelForSeq2SeqLM
4141
from .utils import _print_compiled_model_properties
4242

4343

44-
if is_transformers_version("<", "4.25.0"):
45-
from transformers.generation_utils import GenerationMixin
46-
else:
47-
from transformers.generation import GenerationMixin
48-
4944
if TYPE_CHECKING:
5045
from transformers import PretrainedConfig
5146

setup.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
INSTALL_REQUIRE = [
1515
"torch>=1.11",
1616
"optimum>=1.14.0",
17-
"transformers>=4.20.0",
17+
"transformers>=4.26.0",
1818
"datasets>=1.4.0",
1919
"sentencepiece",
2020
"scipy",

tests/ipex/test_inference.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ class IPEXIntegrationTest(unittest.TestCase):
6464
"gptj",
6565
"gpt2",
6666
"gpt_neo",
67-
"gpt_bigcode",
67+
# "gpt_bigcode",
6868
"llama",
6969
"opt",
7070
"mpt",

tests/ipex/test_modeling.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -213,7 +213,7 @@ class IPEXModelForCausalLMTest(unittest.TestCase):
213213
"opt",
214214
)
215215
GENERATION_LENGTH = 100
216-
SPEEDUP_CACHE = 1.1
216+
SPEEDUP_CACHE = 1.0
217217

218218
@parameterized.expand(SUPPORTED_ARCHITECTURES)
219219
def test_compare_to_transformers(self, model_arch):

0 commit comments

Comments
 (0)