diff --git a/.github/workflows/llama_cpp_plugin_build_and_test.yml b/.github/workflows/llama_cpp_plugin_build_and_test.yml index ae239085d..cf2ffe36e 100644 --- a/.github/workflows/llama_cpp_plugin_build_and_test.yml +++ b/.github/workflows/llama_cpp_plugin_build_and_test.yml @@ -4,6 +4,7 @@ on: pull_request: paths: - 'modules/llama_cpp_plugin/**' + - '.github/workflows/llama_cpp_plugin_build_and_test.yml' permissions: read-all @@ -58,12 +59,16 @@ jobs: repository: ggerganov/llama.cpp path: llama.cpp + - uses: actions/setup-python@v4 + with: + python-version: '3.10' + - name: Prepare test data - convert test model files run: | - pip install -r llama.cpp/requirements/requirements-convert-hf-to-gguf.txt + python3 -m pip install -r llama.cpp/requirements/requirements-convert_hf_to_gguf.txt huggingface-cli download gpt2 model.safetensors tokenizer.json tokenizer_config.json vocab.json config.json merges.txt --local-dir hf_gpt2 mkdir -p ${{ github.workspace }}/test_data - python3 llama.cpp/convert-hf-to-gguf.py hf_gpt2 --outtype f32 --outfile ${{ github.workspace }}/test_data/gpt2.gguf + python3 llama.cpp/convert_hf_to_gguf.py hf_gpt2 --outtype f32 --outfile ${{ github.workspace }}/test_data/gpt2.gguf - name: Install libtbb2 run: |