Skip to content

Commit 3ff8dc1

Browse files
Update ipex Ci to torch 2.6 (#1176)
* update ipex CI Signed-off-by: jiqing-feng <jiqing.feng@intel.com> * update modeling base on torch2.6 Signed-off-by: jiqing-feng <jiqing.feng@intel.com> * fix format Signed-off-by: jiqing-feng <jiqing.feng@intel.com> * ipex model cannot use torch.compile for now Signed-off-by: jiqing-feng <jiqing.feng@intel.com> * add comments Signed-off-by: jiqing-feng <jiqing.feng@intel.com> * fix format Signed-off-by: jiqing-feng <jiqing.feng@intel.com> * add torch version check for compile Signed-off-by: jiqing-feng <jiqing.feng@intel.com> * add missing import --------- Signed-off-by: jiqing-feng <jiqing.feng@intel.com> Co-authored-by: Ella Charlaix <80481427+echarlaix@users.noreply.github.com> Co-authored-by: Ella Charlaix <ella@huggingface.co>
1 parent 4f79e05 commit 3ff8dc1

File tree

2 files changed

+8
-5
lines changed

2 files changed

+8
-5
lines changed

.github/workflows/test_ipex.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,8 @@ jobs:
1818
strategy:
1919
fail-fast: false
2020
matrix:
21-
transformers-version: ["4.47.0", "4.47.1"]
22-
torch-version: ["2.4.0", "2.5.*"]
21+
transformers-version: ["4.47.*"]
22+
torch-version: ["2.6.0"]
2323

2424
runs-on: ubuntu-22.04
2525

optimum/intel/ipex/modeling_base.py

+6-3
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@
5454
_patch_model,
5555
)
5656
from ..utils.constant import _TASK_ALIASES
57-
from ..utils.import_utils import is_ipex_version, is_transformers_version
57+
from ..utils.import_utils import is_ipex_version, is_torch_version, is_transformers_version
5858
from ..utils.modeling_utils import recursive_to_device
5959

6060

@@ -64,8 +64,11 @@
6464
_IPEX_SUPPORT_MODEL_TYPES = ("llama", "bert", "vit", "falcon", "gpt2", "qwen2")
6565
_IPEX_EXPORTED_GENERATION_METHODS = ("sample", "greedy_search", "beam_sample", "beam_search", "assisted_generation")
6666
_IPEX_MINIMUM_VERSION_FOR_COMPILE = "2.5.0"
67-
# TODO: Some models are already fixed in torch 2.6, will enable them when torch upgrading to 2.6
68-
_COMPILE_NOT_READY_MODEL_TYPES = ("electra", "roformer", "gpt_neox", "beit", "llama", "falcon", "gpt2", "qwen2")
67+
# Page attention model cannot use torch.compile for now.
68+
if is_torch_version("<", "2.6"):
69+
_COMPILE_NOT_READY_MODEL_TYPES = ("electra", "roformer", "gpt_neox", "beit", "llama", "falcon", "gpt2", "qwen2")
70+
else:
71+
_COMPILE_NOT_READY_MODEL_TYPES = ("llama", "falcon", "gpt2", "qwen2")
6972

7073

7174
def _is_patched_with_ipex(model, task, use_cache: bool = True):

0 commit comments

Comments
 (0)