diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 1212655f080..ff31eda7de9 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -30,6 +30,25 @@ jobs: - run: helm-run --run-entries simple1:model=simple/model1 --max-eval-instances 10 --suite test - run: helm-summarize --suite test - run: helm-server --help + + install_openvino: + # Tests that the Optimum Intel command works when only installing required dependencies + name: Run Optimum Intel with minimal dependencies only + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.8", "3.9", "3.10"] + steps: + - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: pip + - run: python3 -m pip install --upgrade build + - run: python3 -m build + - run: python3 -m pip install "$(ls dist/crfm_helm-*.whl)[openvino]" + - run: helm-run --run-entries boolq:model=hf-internal-testing/tiny-random-MistralForCausalLM --enable-huggingface-models hf-internal-testing/tiny-random-MistralForCausalLM --suite v1 --max-eval-instances 10 --openvino test: name: Run all tests @@ -59,3 +78,4 @@ jobs: - name: Run entire pipeline quickly without any data # Checking RunSpecs with openai/davinci should be comprehensive enough run: source venv/bin/activate && helm-run --suite test -m 100 --skip-instances --models-to-run openai/davinci --exit-on-error + \ No newline at end of file diff --git a/src/helm/clients/huggingface_client.py b/src/helm/clients/huggingface_client.py index 3099b9ca14f..57035856c63 100644 --- a/src/helm/clients/huggingface_client.py +++ b/src/helm/clients/huggingface_client.py @@ -74,7 +74,6 @@ def __init__( OpenVINO™ Intermediate Representation (IR) format to accelerate end-to-end pipelines on \ Intel® architectures using OpenVINO™ runtime. """ - from pathlib import Path from helm.common.optional_dependencies import handle_module_not_found_error try: @@ -82,23 +81,17 @@ def __init__( except ModuleNotFoundError as e: handle_module_not_found_error(e, ["openvino"]) - model_file = Path(pretrained_model_name_or_path) / "openvino_model.xml" - if model_file.exists(): - export = False - else: - export = True - self.device = "cpu" # Security issue: currently we trust remote code by default. # We retain this temporarily to maintain reverse compatibility. # TODO: Delete if-else and don't set trust_remote_code=True if "trust_remote_code" in kwargs: self.model = OVModelForCausalLM.from_pretrained( - pretrained_model_name_or_path, export=export, **kwargs + pretrained_model_name_or_path, export=True, **kwargs ).to(self.device) else: self.model = OVModelForCausalLM.from_pretrained( - pretrained_model_name_or_path, export=export, trust_remote_code=True, **kwargs + pretrained_model_name_or_path, export=True, trust_remote_code=True, **kwargs ).to(self.device) else: # Security issue: currently we trust remote code by default.