@@ -38,11 +38,16 @@ jobs:
38
38
- name : CMake - build
39
39
run : cmake --build build -j`nproc` -- llama_cpp_plugin llama_cpp_e2e_tests
40
40
41
+
42
+ - name : Change dir to build folder
43
+ run : cd ${{ github.workspace }}/openvino/bin/intel64/Release/
44
+
45
+
41
46
- name : Upload build artifacts
42
47
uses : alehechka/upload-tartifact@v2
43
48
with :
44
49
name : build_artifacts
45
- path : ${{ github.workspace }}/openvino/bin/intel64/Release /
50
+ path : . /
46
51
47
52
test_ubuntu20 :
48
53
needs : build_ubuntu20
@@ -52,17 +57,20 @@ jobs:
52
57
uses : alehechka/download-tartifact@v2
53
58
with :
54
59
name : build_artifacts
55
- path : binaries
60
+ path : ${{ github.workspace }}/ binaries
56
61
57
- - name : Prepare test data
62
+ - name : Prepare test data - checkout llama.cpp repo
58
63
uses : actions/checkout@v3
59
64
with :
60
65
repository : ggerganov/llama.cpp
61
66
path : llama.cpp
62
- run : pip install llama.cpp/requirements/requirements-convert-hf-to-gguf.txt
63
- run : huggingface-cli huggingface-cli download gpt2 model.safetensors tokenizer.json tokenizer_config.json vocab.json config.json merges.txt --local-dir hf_gpt2
64
- run : mkdir -p ${{ github.workspace }}/test_data
65
- run : python3 llama.cpp/convert-hf-to-gguf.py hf_gpt2 --outtype f32 --outfile ${{ github.workspace }}/test_data/gpt2.gguf
67
+
68
+ - name : Prepare test data - convert test model files
69
+ run : |
70
+ pip install -r llama.cpp/requirements/requirements-convert-hf-to-gguf.txt
71
+ huggingface-cli download gpt2 model.safetensors tokenizer.json tokenizer_config.json vocab.json config.json merges.txt --local-dir hf_gpt2
72
+ mkdir -p ${{ github.workspace }}/test_data
73
+ python3 llama.cpp/convert-hf-to-gguf.py hf_gpt2 --outtype f32 --outfile ${{ github.workspace }}/test_data/gpt2.gguf
66
74
67
75
- name : Run E2E tests
68
76
run : ${{ github.workspace }}/binaries/llama_cpp_e2e_tests
0 commit comments