Skip to content

Commit 15ddb8f

Browse files
committed
fix ipex version check
Signed-off-by: jiqing-feng <jiqing.feng@intel.com>
1 parent 1b0dc0d commit 15ddb8f

File tree

2 files changed

+12
-1
lines changed

2 files changed

+12
-1
lines changed

optimum/intel/ipex/modeling_base.py

+11
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,17 @@
7171
_COMPILE_NOT_READY_MODEL_TYPES = ("llama", "falcon", "gpt2", "qwen2")
7272

7373

74+
try:
75+
import intel_extension_for_pytorch as ipex
76+
77+
if hasattr(torch, "xpu") and torch.xpu.is_available() and not ipex._C._has_xpu():
78+
logger.warning(
79+
"Detect you have XPU device but the ipex do not support XPU, please install a xpu version ipex by checking https://pytorch-extension.intel.com/installation?platform=gpu"
80+
)
81+
except ImportError:
82+
logger.warning("No intel_extension_for_pytorch found, please `pip install intel_extension_for_pytorch`")
83+
84+
7485
def _is_patched_with_ipex(model, task, use_cache: bool = True):
7586
if is_ipex_version("<", _IPEX_MINIMUM_VERSION_FOR_PATCHING):
7687
return False

setup.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@
6767
"nncf": ["nncf>=2.14.0"],
6868
"openvino": ["nncf>=2.14.0", "openvino>=2024.5.0", "openvino-tokenizers>=2024.5.0"],
6969
"neural-compressor": ["neural-compressor[pt]>3.0", "accelerate", "transformers<4.46"],
70-
"ipex": ["intel-extension-for-pytorch>=2.4", "transformers>4.48,<4.50", "accelerate"],
70+
"ipex": ["intel-extension-for-pytorch>=2.6", "transformers>4.48,<4.50", "accelerate"],
7171
"diffusers": ["diffusers"],
7272
"quality": QUALITY_REQUIRE,
7373
"tests": TESTS_REQUIRE,

0 commit comments

Comments
 (0)