1
1
name : llama_cpp_plugin_build_and_test
2
2
3
3
on :
4
- pull_request :
5
- types :
6
- - opened
4
+ pull_request :
5
+ types :
6
+ - opened
7
7
- reopened
8
8
- synchronize
9
9
paths :
10
- - ' modules/llama_cpp_plugin/**'
10
+ - ' modules/llama_cpp_plugin/**'
11
11
12
12
jobs :
13
- build_ubuntu20 :
14
- runs-on : ubuntu-20.04
13
+ build_ubuntu20 :
14
+ runs-on : ubuntu-20.04
15
15
steps :
16
- - name : Setup cmake
17
- uses : jwlawson/actions-setup-cmake@v1.14
18
- with :
19
- cmake-version : ' 3.24.x'
16
+ - name : Setup cmake
17
+ uses : jwlawson/actions-setup-cmake@v1.14
18
+ with :
19
+ cmake-version : ' 3.24.x'
20
20
21
21
- name : Checkout openvino_contrib
22
22
uses : actions/checkout@v3
23
23
with :
24
- submodules : recursive
25
- path : openvino_contrib
24
+ submodules : recursive
25
+ path : openvino_contrib
26
26
27
27
- name : Checkout openvino
28
28
uses : actions/checkout@v3
29
29
with :
30
- submodules : recursive
31
- repository : openvinotoolkit/openvino
32
- path : openvino
30
+ submodules : recursive
31
+ repository : openvinotoolkit/openvino
32
+ path : openvino
33
33
34
34
- name : CMake - configure
35
- run : cmake -B build -DCMAKE_BUILD_TYPE=Release -DOPENVINO_EXTRA_MODULES=${{ github.workspace }}/openvino_contrib/modules/llama_cpp_plugin -DENABLE_TESTS=ON -DENABLE_FUNCTIONAL_TESTS=ON -DENABLE_PLUGINS_XML=ON -DENABLE_PLUGIN_REGISTRATION =ON openvino
35
+ run : cmake -B build -DCMAKE_BUILD_TYPE=Release -DOPENVINO_EXTRA_MODULES=${{ github.workspace }}/openvino_contrib/modules/llama_cpp_plugin -DENABLE_TESTS=ON -DENABLE_FUNCTIONAL_TESTS=ON -DENABLE_PLUGINS_XML=ON -DENABLE_LLAMA_CPP_PLUGIN_REGISTRATION =ON openvino
36
36
37
37
- name : CMake - build
38
38
run : cmake --build build -j`nproc` -- llama_cpp_plugin llama_cpp_e2e_tests
@@ -41,40 +41,40 @@ jobs:
41
41
- name : Upload build artifacts
42
42
uses : actions/upload-artifact@v4
43
43
with :
44
- name : build_artifacts
45
- path : ${{ github.workspace }}/openvino/bin/intel64/Release/
44
+ name : build_artifacts
45
+ path : ${{ github.workspace }}/openvino/bin/intel64/Release/
46
46
47
- test_ubuntu20 :
47
+ test_ubuntu20 :
48
48
needs : build_ubuntu20
49
49
runs-on : ubuntu-20.04
50
50
steps :
51
- - name : Download build artifacts
52
- uses : actions/download-artifact@v4
53
- with :
54
- name : build_artifacts
51
+ - name : Download build artifacts
52
+ uses : actions/download-artifact@v4
53
+ with :
54
+ name : build_artifacts
55
55
path : ${{ github.workspace }}/binaries
56
56
57
57
- name : Prepare test data - checkout llama.cpp repo
58
58
uses : actions/checkout@v3
59
59
with :
60
- repository : ggerganov/llama.cpp
61
- path : llama.cpp
60
+ repository : ggerganov/llama.cpp
61
+ path : llama.cpp
62
62
63
63
- name : Prepare test data - convert test model files
64
64
run : |
65
- pip install -r llama.cpp/requirements/requirements-convert-hf-to-gguf.txt
66
- huggingface-cli download gpt2 model.safetensors tokenizer.json tokenizer_config.json vocab.json config.json merges.txt --local-dir hf_gpt2
67
- mkdir -p ${{ github.workspace }}/test_data
68
- python3 llama.cpp/convert-hf-to-gguf.py hf_gpt2 --outtype f32 --outfile ${{ github.workspace }}/test_data/gpt2.gguf
65
+ pip install -r llama.cpp/requirements/requirements-convert-hf-to-gguf.txt
66
+ huggingface-cli download gpt2 model.safetensors tokenizer.json tokenizer_config.json vocab.json config.json merges.txt --local-dir hf_gpt2
67
+ mkdir -p ${{ github.workspace }}/test_data
68
+ python3 llama.cpp/convert-hf-to-gguf.py hf_gpt2 --outtype f32 --outfile ${{ github.workspace }}/test_data/gpt2.gguf
69
69
70
70
- name : Install libtbb2
71
71
run : |
72
- wget https://storage.openvinotoolkit.org/dependencies/thirdparty/linux/oneapi-tbb-2021.2.4-lin.tgz
73
- mkdir -p tbb
74
- tar xvzf oneapi-tbb-2021.2.4-lin.tgz
72
+ wget https://storage.openvinotoolkit.org/dependencies/thirdparty/linux/oneapi-tbb-2021.2.4-lin.tgz
73
+ mkdir -p tbb
74
+ tar xvzf oneapi-tbb-2021.2.4-lin.tgz
75
75
76
76
- name : Run E2E tests
77
77
run : |
78
- chmod +x ${{ github.workspace }}/binaries/llama_cpp_e2e_tests
79
- export LD_LIBRARY_PATH=${{ github.workspace }}/binaries:${{ github.workspace }}/tbb/lib
80
- ${{ github.workspace }}/binaries/llama_cpp_e2e_tests
78
+ chmod +x ${{ github.workspace }}/binaries/llama_cpp_e2e_tests
79
+ export LD_LIBRARY_PATH=${{ github.workspace }}/binaries:${{ github.workspace }}/tbb/lib
80
+ ${{ github.workspace }}/binaries/llama_cpp_e2e_tests
0 commit comments