Skip to content

Commit b2378e4

Browse files
authored
Add Test for Optimum Intel (#2674)
1 parent 4183b44 commit b2378e4

File tree

2 files changed

+22
-9
lines changed

2 files changed

+22
-9
lines changed

.github/workflows/test.yml

+20
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,25 @@ jobs:
3030
- run: helm-run --run-entries simple1:model=simple/model1 --max-eval-instances 10 --suite test
3131
- run: helm-summarize --suite test
3232
- run: helm-server --help
33+
34+
install_openvino:
35+
# Tests that the Optimum Intel command works when only installing required dependencies
36+
name: Run Optimum Intel with minimal dependencies only
37+
runs-on: ubuntu-latest
38+
strategy:
39+
matrix:
40+
python-version: ["3.8", "3.9", "3.10"]
41+
steps:
42+
- uses: actions/checkout@v4
43+
- name: Set up Python ${{ matrix.python-version }}
44+
uses: actions/setup-python@v5
45+
with:
46+
python-version: ${{ matrix.python-version }}
47+
cache: pip
48+
- run: python3 -m pip install --upgrade build
49+
- run: python3 -m build
50+
- run: python3 -m pip install "$(ls dist/crfm_helm-*.whl)[openvino]"
51+
- run: helm-run --run-entries boolq:model=hf-internal-testing/tiny-random-MistralForCausalLM --enable-huggingface-models hf-internal-testing/tiny-random-MistralForCausalLM --suite v1 --max-eval-instances 10 --openvino
3352

3453
test:
3554
name: Run all tests
@@ -59,3 +78,4 @@ jobs:
5978
- name: Run entire pipeline quickly without any data
6079
# Checking RunSpecs with openai/davinci should be comprehensive enough
6180
run: source venv/bin/activate && helm-run --suite test -m 100 --skip-instances --models-to-run openai/davinci --exit-on-error
81+

src/helm/clients/huggingface_client.py

+2-9
Original file line numberDiff line numberDiff line change
@@ -74,31 +74,24 @@ def __init__(
7474
OpenVINO™ Intermediate Representation (IR) format to accelerate end-to-end pipelines on \
7575
Intel® architectures using OpenVINO™ runtime.
7676
"""
77-
from pathlib import Path
7877
from helm.common.optional_dependencies import handle_module_not_found_error
7978

8079
try:
8180
from optimum.intel.openvino import OVModelForCausalLM
8281
except ModuleNotFoundError as e:
8382
handle_module_not_found_error(e, ["openvino"])
8483

85-
model_file = Path(pretrained_model_name_or_path) / "openvino_model.xml"
86-
if model_file.exists():
87-
export = False
88-
else:
89-
export = True
90-
9184
self.device = "cpu"
9285
# Security issue: currently we trust remote code by default.
9386
# We retain this temporarily to maintain reverse compatibility.
9487
# TODO: Delete if-else and don't set trust_remote_code=True
9588
if "trust_remote_code" in kwargs:
9689
self.model = OVModelForCausalLM.from_pretrained(
97-
pretrained_model_name_or_path, export=export, **kwargs
90+
pretrained_model_name_or_path, export=True, **kwargs
9891
).to(self.device)
9992
else:
10093
self.model = OVModelForCausalLM.from_pretrained(
101-
pretrained_model_name_or_path, export=export, trust_remote_code=True, **kwargs
94+
pretrained_model_name_or_path, export=True, trust_remote_code=True, **kwargs
10295
).to(self.device)
10396
else:
10497
# Security issue: currently we trust remote code by default.

0 commit comments

Comments
 (0)