Skip to content

Commit 84dbb20

Browse files
Adapt ipex xpu transformers version (#2134)
Signed-off-by: Kaihui-intel <kaihui.tang@intel.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
1 parent b6e5aea commit 84dbb20

File tree

1 file changed

+12
-3
lines changed

1 file changed

+12
-3
lines changed

neural_compressor/transformers/models/modeling_auto.py

+12-3
Original file line numberDiff line numberDiff line change
@@ -758,12 +758,21 @@ class AutoModelForSeq2SeqLM(_BaseINCAutoModelClass):
758758

759759

760760
class Qwen2VLForConditionalGeneration(_BaseINCAutoModelClass):
761-
ORIG_MODEL = transformers.Qwen2VLForConditionalGeneration
761+
if transformers.__version__ >= "4.46":
762+
ORIG_MODEL = transformers.Qwen2VLForConditionalGeneration
763+
else:
764+
logger.warning("please install transformers>=4.46 for quantizing Qwen2VLForConditionalGeneration.")
762765

763766

764767
class MllamaForConditionalGeneration(_BaseINCAutoModelClass):
765-
ORIG_MODEL = transformers.MllamaForConditionalGeneration
768+
if transformers.__version__ >= "4.46":
769+
ORIG_MODEL = transformers.MllamaForConditionalGeneration
770+
else:
771+
logger.warning("please install transformers>=4.46 for quantizing MllamaForConditionalGeneration.")
766772

767773

768774
class LlavaForConditionalGeneration(_BaseINCAutoModelClass):
769-
ORIG_MODEL = transformers.LlavaForConditionalGeneration
775+
if transformers.__version__ >= "4.46":
776+
ORIG_MODEL = transformers.LlavaForConditionalGeneration
777+
else:
778+
logger.warning("please install transformers>=4.46 for quantizing LlavaForConditionalGeneration.")

0 commit comments

Comments
 (0)