Skip to content

Commit 2ba8532

Browse files
authoredMar 29, 2024··
Revert "Revert "LLAMA_CPP plugin - basic version with direct file loading (#8…" (#897)
This reverts commit 3eeb232.
1 parent 3eeb232 commit 2ba8532

15 files changed

+860
-0
lines changed
 
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
name: llama_cpp_plugin_build_and_test
2+
3+
on:
4+
pull_request:
5+
paths:
6+
- 'modules/llama_cpp_plugin/**'
7+
8+
jobs:
9+
build_ubuntu20:
10+
runs-on: ubuntu-20.04-8-cores
11+
steps:
12+
- name: Setup cmake
13+
uses: jwlawson/actions-setup-cmake@v1.14
14+
with:
15+
cmake-version: '3.24.x'
16+
17+
- name: Checkout openvino_contrib
18+
uses: actions/checkout@v4
19+
with:
20+
submodules: recursive
21+
path: openvino_contrib
22+
23+
- name: Checkout openvino
24+
uses: actions/checkout@v4
25+
with:
26+
submodules: recursive
27+
repository: openvinotoolkit/openvino
28+
path: openvino
29+
30+
- name: CMake - configure
31+
run: cmake -B build -DCMAKE_BUILD_TYPE=Release -DOPENVINO_EXTRA_MODULES=${{ github.workspace }}/openvino_contrib/modules/llama_cpp_plugin -DENABLE_TESTS=ON -DENABLE_FUNCTIONAL_TESTS=ON -DENABLE_PLUGINS_XML=ON -DENABLE_LLAMA_CPP_PLUGIN_REGISTRATION=ON openvino
32+
33+
- name: CMake - build
34+
run: cmake --build build -j`nproc` -- llama_cpp_plugin llama_cpp_e2e_tests
35+
36+
37+
- name: Upload build artifacts
38+
uses: actions/upload-artifact@v4
39+
with:
40+
name: build_artifacts
41+
path: ${{ github.workspace }}/openvino/bin/intel64/Release/
42+
43+
test_ubuntu20:
44+
needs: build_ubuntu20
45+
runs-on: ubuntu-20.04
46+
steps:
47+
- name: Download build artifacts
48+
uses: actions/download-artifact@v4
49+
with:
50+
name: build_artifacts
51+
path: ${{ github.workspace }}/binaries
52+
53+
- name: Prepare test data - checkout llama.cpp repo
54+
uses: actions/checkout@v4
55+
with:
56+
repository: ggerganov/llama.cpp
57+
path: llama.cpp
58+
59+
- name: Prepare test data - convert test model files
60+
run: |
61+
pip install -r llama.cpp/requirements/requirements-convert-hf-to-gguf.txt
62+
huggingface-cli download gpt2 model.safetensors tokenizer.json tokenizer_config.json vocab.json config.json merges.txt --local-dir hf_gpt2
63+
mkdir -p ${{ github.workspace }}/test_data
64+
python3 llama.cpp/convert-hf-to-gguf.py hf_gpt2 --outtype f32 --outfile ${{ github.workspace }}/test_data/gpt2.gguf
65+
66+
- name: Install libtbb2
67+
run: |
68+
wget https://storage.openvinotoolkit.org/dependencies/thirdparty/linux/oneapi-tbb-2021.2.4-lin.tgz
69+
mkdir -p tbb
70+
tar xvzf oneapi-tbb-2021.2.4-lin.tgz
71+
72+
- name: Run E2E tests
73+
run: |
74+
chmod +x ${{ github.workspace }}/binaries/llama_cpp_e2e_tests
75+
export LD_LIBRARY_PATH=${{ github.workspace }}/binaries:${{ github.workspace }}/tbb/lib
76+
${{ github.workspace }}/binaries/llama_cpp_e2e_tests
+28
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
BasedOnStyle: Google
2+
IndentWidth: 4
3+
UseTab: Never
4+
ColumnLimit: 120
5+
6+
Language: Cpp
7+
Standard: Cpp11
8+
9+
AccessModifierOffset: -4
10+
AlignConsecutiveMacros: true
11+
AllowAllArgumentsOnNextLine: false
12+
AllowAllConstructorInitializersOnNextLine: false
13+
AllowAllParametersOfDeclarationOnNextLine: false
14+
AllowShortFunctionsOnASingleLine: Empty
15+
AllowShortIfStatementsOnASingleLine: Never
16+
AllowShortLambdasOnASingleLine: Empty
17+
AllowShortLoopsOnASingleLine: false
18+
AlwaysBreakBeforeMultilineStrings: false
19+
BinPackArguments: false
20+
BinPackParameters: false
21+
CommentPragmas: '^#'
22+
DerivePointerAlignment: false
23+
FixNamespaceComments: true
24+
IndentCaseLabels: false
25+
IndentPPDirectives: AfterHash
26+
ForEachMacros:
27+
- foreach
28+
- FOREACH_CHILD
+32
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
# Copyright (C) 2024 Intel Corporation
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
cmake_minimum_required(VERSION 3.13)
5+
6+
project(LlamaCppPlugin)
7+
8+
find_package(OpenVINODeveloperPackage REQUIRED)
9+
10+
ov_option(ENABLE_LLAMA_CPP_PLUGIN_REGISTRATION "Enables registration of LLAMA_CPP plugin" OFF)
11+
12+
add_subdirectory(src)
13+
14+
FetchContent_Declare(
15+
llama_cpp
16+
GIT_REPOSITORY https://github.com/ggerganov/llama.cpp
17+
GIT_TAG b2417
18+
)
19+
20+
FetchContent_MakeAvailable(llama_cpp)
21+
22+
if(ENABLE_TESTS)
23+
include(CTest)
24+
enable_testing()
25+
add_subdirectory(tests/e2e)
26+
endif()
27+
28+
# install
29+
30+
if(OpenVINODeveloperPackage_FOUND)
31+
ov_cpack(LlamaCppPlugin)
32+
endif()

‎modules/llama_cpp_plugin/README.md

+52
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
### Build instructions
2+
3+
This plugin should be built in the same fashion as the rest of the modules:
4+
5+
1. Check out the OpenVINO repository proper (https://github.com/openvinotoolkit/openvino)
6+
2. Configure the CMake build of the OpenVINO repository, making sure to point the corresponding CMake option to the location of the `openvino_contrib` repository. The command below, executed in the `openvino` repo root, will configure the build so that the modules other `llama_cpp_plugin` module will not be built to save build time - adjust the `-DBUILD_*` options if you need the other modules as well.
7+
8+
```bash
9+
cmake -B build -DCMAKE_BUILD_TYPE=Release -DOPENVINO_EXTRA_MODULES=<PATH_TO_YOUR_CHECKED_OUT_OPENVINO_CONTRIB>/modules/llama_cpp_plugin -DENABLE_PLUGINS_XML=ON -DENABLE_PLUGIN_REGISTRATION=ON .
10+
```
11+
12+
3. Build the plugin either as part of the complete openvino build by executing:
13+
14+
```bash
15+
cmake --build build --parallel
16+
```
17+
18+
or separately by specifying only the `llama_cpp_plugin` target:
19+
20+
```bash
21+
cmake --build build --parallel -- llama_cpp_plugin
22+
```
23+
24+
4. Now you can utilize the built `libllama_cpp_plugin.so` as a regular OV plugin with the device name `"LLAMA_CPP"` to directly load GGUF files and infer them using OV API with llama.cpp execution under the hood. Make sure that the plugin is discoverable by the OV runtime (e.g. by putting the built `libllama_cpp_plugin.so`, `libllama.so` and the autogenerated `plugins.xml` from the built location to your OV binaries location, or by setting `LD_LIBRARY_PATH` appropriately).
25+
26+
#### Example of LLM inference code
27+
28+
```C++
29+
30+
ov::Core core;
31+
auto model = core.compile_model("model.gguf", "LLAMA_CPP")
32+
auto input_ids = ov::Tensor(ov::element::Type_t::i64, {1, 128});
33+
auto position_ids = ov::Tensor(ov::element::Type_t::i64, {1, 128});
34+
std::iota(position_ids.data<int64_t>(), position_ids.data<int64_t>() + position_ids.get_size(), 0);
35+
36+
auto infer_request == model.create_infer_request();
37+
infer_request.set_tensor("input_ids", input_ids);
38+
infer_request.set_tensor("position_ids", position_ids);
39+
infer_request.infer();
40+
41+
size_t vocab_size = lm.get_tensor("logits").get_shape().back();
42+
float* logits = lm.get_tensor("logits").data<float>() + (input_ids_tensor.get_size() - 1) * vocab_size;
43+
int64_t out_token = std::max_element(logits, logits + vocab_size) - logits;
44+
```
45+
46+
The models obtained by the `.compile_model` call with the `LLAMA_CPP` plugin expose two inputs (`input_ids` and `position_ids`) and a single output (`logits`) with equivalent meaning to the corresponding arguments in the LLM model representations in the huggingface `transformers` repository. The `attention_mask` and `beam_idx` inputs may be set as well, but will have no effect on the execution.
47+
48+
Only batch size of 1 is currently supported.
49+
50+
51+
52+
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
// Copyright (C) 2024 Intel Corporation
2+
// SPDX-License-Identifier: Apache-2.0
3+
4+
#ifndef LLAMA_CPP_COMPILED_MODEL_HPP
5+
#define LLAMA_CPP_COMPILED_MODEL_HPP
6+
7+
#include "llama.h"
8+
#include "openvino/runtime/icompiled_model.hpp"
9+
#include "openvino/runtime/isync_infer_request.hpp"
10+
11+
namespace ov {
12+
namespace llama_cpp_plugin {
13+
class LlamaCppSyncInferRequest;
14+
class LlamaCppPlugin;
15+
class LlamaCppState;
16+
class LlamaCppModel : public ICompiledModel {
17+
public:
18+
LlamaCppModel(const std::string& gguf_fname, const std::shared_ptr<const IPlugin>& plugin);
19+
/**
20+
* @brief Export compiled model to stream
21+
*
22+
* @param model output stream
23+
*/
24+
virtual void export_model(std::ostream& model) const override;
25+
26+
/**
27+
* @brief Returns runtime model
28+
*
29+
* @return OpenVINO Model which represents runtime graph
30+
*/
31+
virtual std::shared_ptr<const ov::Model> get_runtime_model() const override;
32+
33+
/**
34+
* @brief Allows to set property
35+
*
36+
* @param properties new plugin properties
37+
*/
38+
virtual void set_property(const ov::AnyMap& properties) override;
39+
40+
/**
41+
* @brief Returns property
42+
*
43+
* @param name Property name
44+
*
45+
* @return Property value
46+
* virtual std::shared_ptr<ov::ISyncInferRequest> create_sync_infer_request() const override;
47+
**/
48+
virtual ov::Any get_property(const std::string& name) const override;
49+
virtual const std::vector<ov::Output<const ov::Node>>& inputs() const override;
50+
virtual const std::vector<ov::Output<const ov::Node>>& outputs() const override;
51+
virtual ~LlamaCppModel();
52+
53+
protected:
54+
/**
55+
* @brief Method creates infer request implementation
56+
*
57+
* @return Sync infer request
58+
*/
59+
virtual std::shared_ptr<ov::ISyncInferRequest> create_sync_infer_request() const override;
60+
61+
private:
62+
gguf_context* m_gguf_ctx = nullptr;
63+
std::string m_gguf_fname;
64+
65+
llama_model* m_llama_model_ptr = nullptr;
66+
llama_context* m_llama_ctx = nullptr;
67+
std::shared_ptr<ov::Model> m_fake_model;
68+
69+
std::vector<ov::Output<const ov::Node>> m_fake_inputs;
70+
std::vector<ov::Output<const ov::Node>> m_fake_outputs;
71+
72+
friend class ov::llama_cpp_plugin::LlamaCppSyncInferRequest;
73+
friend class ov::llama_cpp_plugin::LlamaCppState;
74+
};
75+
} // namespace llama_cpp_plugin
76+
} // namespace ov
77+
78+
#endif // LLAMA_CPP_COMPILED_MODEL_HPP
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
// Copyright (C) 2024 Intel Corporation
2+
// SPDX-License-Identifier: Apache-2.0
3+
4+
#ifndef LLAMA_CPP_INFER_REQUEST_HPP
5+
#define LLAMA_CPP_INFER_REQUEST_HPP
6+
7+
#include "compiled_model.hpp"
8+
#include "openvino/openvino.hpp"
9+
10+
namespace ov {
11+
namespace llama_cpp_plugin {
12+
13+
class LlamaCppSyncInferRequest : public ISyncInferRequest {
14+
public:
15+
explicit LlamaCppSyncInferRequest(const std::shared_ptr<const LlamaCppModel>& compiled_model);
16+
virtual ~LlamaCppSyncInferRequest(){};
17+
18+
virtual void set_tensors_impl(const ov::Output<const ov::Node> port,
19+
const std::vector<ov::SoPtr<ov::ITensor>>& tensors) override;
20+
21+
virtual void infer() override;
22+
virtual std::vector<ov::ProfilingInfo> get_profiling_info() const override;
23+
virtual std::vector<ov::SoPtr<ov::IVariableState>> query_state() const override;
24+
25+
private:
26+
std::shared_ptr<const LlamaCppModel> m_compiled_model_ptr;
27+
};
28+
29+
} // namespace llama_cpp_plugin
30+
}; // namespace ov
31+
32+
#endif /* LLAMA_CPP_INFER_REQUEST_HPP */
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
// Copyright (C) 2018-2023 Intel Corporation
2+
// SPDX-License-Identifier: Apache-2.0
3+
4+
#ifndef LLAMA_CPP_PLUGIN_HPP
5+
#define LLAMA_CPP_PLUGIN_HPP
6+
7+
#include "openvino/runtime/iplugin.hpp"
8+
9+
namespace ov {
10+
namespace llama_cpp_plugin {
11+
class LlamaCppPlugin : public IPlugin {
12+
public:
13+
LlamaCppPlugin();
14+
virtual std::shared_ptr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
15+
const ov::AnyMap& properties) const override;
16+
17+
virtual std::shared_ptr<ov::ICompiledModel> compile_model(
18+
const std::shared_ptr<const ov::Model>& model,
19+
const ov::AnyMap& properties,
20+
const ov::SoPtr<ov::IRemoteContext>& context) const override;
21+
22+
virtual void set_property(const ov::AnyMap& properties) override;
23+
24+
virtual ov::Any get_property(const std::string& name, const ov::AnyMap& arguments) const override;
25+
26+
virtual ov::SoPtr<ov::IRemoteContext> create_context(const ov::AnyMap& remote_properties) const override;
27+
28+
virtual ov::SoPtr<ov::IRemoteContext> get_default_context(const ov::AnyMap& remote_properties) const override;
29+
30+
virtual std::shared_ptr<ov::ICompiledModel> import_model(std::istream& model,
31+
const ov::AnyMap& properties) const override;
32+
33+
virtual std::shared_ptr<ov::ICompiledModel> compile_model(const std::string& fname,
34+
const ov::AnyMap& properties) const override;
35+
36+
virtual std::shared_ptr<ov::ICompiledModel> import_model(std::istream& model,
37+
const ov::SoPtr<ov::IRemoteContext>& context,
38+
const ov::AnyMap& properties) const override;
39+
40+
virtual ov::SupportedOpsMap query_model(const std::shared_ptr<const ov::Model>& model,
41+
const ov::AnyMap& properties) const override;
42+
};
43+
} // namespace llama_cpp_plugin
44+
} // namespace ov
45+
46+
#endif // LLAMA_CPP_PLUGIN_HPP
+27
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
// Copyright (C) 2018-2023 Intel Corporation
2+
// SPDX-License-Identifier: Apache-2.0
3+
4+
#ifndef LLAMA_CPP_PLUGIN_HPP
5+
#define LLAMA_CPP_PLUGIN_HPP
6+
7+
#include "compiled_model.hpp"
8+
#include "openvino/runtime/ivariable_state.hpp"
9+
10+
namespace ov {
11+
namespace llama_cpp_plugin {
12+
class LlamaCppState : public IVariableState {
13+
public:
14+
LlamaCppState() = delete;
15+
LlamaCppState(const std::shared_ptr<const LlamaCppModel>& model_ptr)
16+
: m_model_ptr(model_ptr),
17+
IVariableState("llama_cpp_state") {}
18+
void reset() override {
19+
llama_kv_cache_clear(m_model_ptr->m_llama_ctx);
20+
}
21+
22+
private:
23+
const std::shared_ptr<const LlamaCppModel>& m_model_ptr;
24+
};
25+
} // namespace llama_cpp_plugin
26+
} // namespace ov
27+
#endif // LLAMA_CPP_STATE_HPP
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
# Copyright (C) 2024 Intel Corporation
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
set( PLUGIN_LIBRARY_NAME CACHE STRING "Library name for the generated plugin" ${TARGET_NAME})
5+
if(NOT PLUGIN_LIBRARY_NAME)
6+
set( PLUGIN_LIBRARY_NAME "llama_cpp_plugin" )
7+
endif()
8+
9+
set( PLUGIN_DEVICE_NAME CACHE STRING "Device name for the resulting plugin")
10+
if(NOT PLUGIN_DEVICE_NAME)
11+
set( PLUGIN_DEVICE_NAME "LLAMA_CPP" )
12+
endif()
13+
14+
set(TARGET_NAME ${PLUGIN_LIBRARY_NAME})
15+
16+
file(GLOB_RECURSE SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp)
17+
file(GLOB_RECURSE HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp)
18+
19+
if (NOT ENABLE_LLAMA_CPP_PLUGIN_REGISTRATION)
20+
# Skip install and registration of template component
21+
set(skip_plugin SKIP_INSTALL SKIP_REGISTRATION)
22+
endif()
23+
24+
25+
26+
# adds a shared library with plugin
27+
ov_add_plugin(NAME ${TARGET_NAME}
28+
DEVICE_NAME ${PLUGIN_DEVICE_NAME}
29+
SOURCES ${SOURCES} ${HEADERS}
30+
${skip_plugin}
31+
VERSION_DEFINES_FOR plugin.cpp
32+
ADD_CLANG_FORMAT)
33+
34+
target_include_directories(${TARGET_NAME} PRIVATE
35+
"${CMAKE_CURRENT_SOURCE_DIR}"
36+
"${LlamaCppPlugin_SOURCE_DIR}/include")
37+
38+
set( LLAMA_TARGET_NAME CACHE STRING "Exact target exposed by llama.cpp to link against as the main llama.cpp library")
39+
if(NOT LLAMA_TARGET_NAME)
40+
set( LLAMA_TARGET_NAME "llama" )
41+
endif()
42+
43+
# include and link llama.cpp and ggml code
44+
target_link_libraries(${TARGET_NAME} PRIVATE ${LLAMA_TARGET_NAME})
45+
target_link_libraries(${TARGET_NAME} PRIVATE ggml)
46+
47+
48+
set_target_properties(${TARGET_NAME} PROPERTIES INTERPROCEDURAL_OPTIMIZATION_RELEASE ${ENABLE_LTO})
49+
50+
if (ENABLE_LLAMA_CPP_PLUGIN_REGISTRATION)
51+
# Update the plugins.xml file
52+
ov_register_plugins(MAIN_TARGET ${TARGET_NAME})
53+
endif()
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,106 @@
1+
// Copyright (C) 2024 Intel Corporation
2+
// SPDX-License-Identifier: Apache-2.0
3+
4+
#include "compiled_model.hpp"
5+
6+
#include <fstream>
7+
#include <memory>
8+
#include <openvino/op/constant.hpp>
9+
#include <openvino/opsets/opset13.hpp>
10+
#include <openvino/runtime/properties.hpp>
11+
#include <openvino/util/log.hpp>
12+
#include <thread>
13+
14+
#include "infer_request.hpp"
15+
#include "plugin.hpp"
16+
17+
namespace ov {
18+
namespace llama_cpp_plugin {
19+
20+
LlamaCppModel::~LlamaCppModel() {
21+
llama_free(m_llama_ctx);
22+
llama_free_model(m_llama_model_ptr);
23+
llama_backend_free();
24+
}
25+
26+
LlamaCppModel::LlamaCppModel(const std::string& gguf_fname, const std::shared_ptr<const IPlugin>& plugin)
27+
: ICompiledModel(nullptr, plugin),
28+
m_gguf_fname(gguf_fname) {
29+
OPENVINO_DEBUG << "llama_cpp_plugin: loading llama model directly from GGUF... " << std::endl;
30+
llama_model_params mparams = llama_model_default_params();
31+
mparams.n_gpu_layers = 99;
32+
m_llama_model_ptr = llama_load_model_from_file(gguf_fname.c_str(), mparams);
33+
llama_context_params cparams = llama_context_default_params();
34+
cparams.n_threads =
35+
std::thread::hardware_concurrency(); // TODO (vshampor): reuse equivalent setting defined by OV API
36+
cparams.n_ctx = 0; // this means that the actual n_ctx will be taken equal to the model's train-time value
37+
m_llama_ctx = llama_new_context_with_model(m_llama_model_ptr, cparams);
38+
OPENVINO_DEBUG << "llama_cpp_plugin: llama model loaded successfully from GGUF..." << std::endl;
39+
40+
auto input_ids = std::make_shared<ov::opset13::Parameter>(ov::element::Type_t::i64, ov::PartialShape({-1, -1}));
41+
auto fake_convert = std::make_shared<ov::opset13::Convert>(input_ids->output(0), ov::element::Type_t::f32);
42+
auto logits = std::make_shared<ov::opset13::Result>(fake_convert->output(0));
43+
44+
ov::ParameterVector inputs{input_ids};
45+
46+
std::vector<std::tuple<std::string, ov::element::Type_t, ov::PartialShape>> additional_inputs_in_order = {
47+
{"attention_mask", ov::element::Type_t::i64, {-1, -1}},
48+
{"position_ids", ov::element::Type_t::i64, {-1, -1}},
49+
{"beam_idx", ov::element::Type_t::i32, {-1, -1}}};
50+
51+
for (const auto& descr : additional_inputs_in_order) {
52+
auto unused_inp = std::make_shared<ov::opset13::Parameter>(std::get<1>(descr), std::get<2>(descr));
53+
inputs.push_back(unused_inp);
54+
}
55+
56+
m_fake_model = std::make_shared<ov::Model>(logits, inputs, "fake_ov_model_for_io_specification");
57+
58+
m_fake_model->inputs()[0].set_names({"input_ids"});
59+
for (size_t i = 0; i < additional_inputs_in_order.size(); i++) {
60+
m_fake_model->inputs()[i + 1].set_names({std::get<0>(additional_inputs_in_order[i])});
61+
}
62+
63+
m_fake_model->outputs()[0].set_names({"logits"});
64+
65+
for (auto input : m_fake_model->inputs()) {
66+
m_fake_inputs.emplace_back(input);
67+
}
68+
for (auto output : m_fake_model->outputs()) {
69+
m_fake_outputs.emplace_back(output);
70+
}
71+
}
72+
73+
std::shared_ptr<const ov::Model> LlamaCppModel::get_runtime_model() const {
74+
OPENVINO_THROW_NOT_IMPLEMENTED("llama_cpp_plugin: Not Implemented");
75+
}
76+
77+
void LlamaCppModel::set_property(const ov::AnyMap& properties) {
78+
OPENVINO_DEBUG << "llama_cpp_plugin: attempted to set_property (did nothing)";
79+
}
80+
81+
ov::Any LlamaCppModel::get_property(const std::string& name) const {
82+
if (ov::supported_properties == name) {
83+
return decltype(ov::supported_properties)::value_type(std::vector<PropertyName>());
84+
}
85+
OPENVINO_THROW_NOT_IMPLEMENTED("llama_cpp_plugin: Not Implemented");
86+
}
87+
88+
std::shared_ptr<ov::ISyncInferRequest> LlamaCppModel::create_sync_infer_request() const {
89+
return std::make_shared<LlamaCppSyncInferRequest>(
90+
std::static_pointer_cast<const LlamaCppModel>(shared_from_this()));
91+
}
92+
93+
const std::vector<ov::Output<const ov::Node>>& LlamaCppModel::inputs() const {
94+
return m_fake_inputs;
95+
};
96+
const std::vector<ov::Output<const ov::Node>>& LlamaCppModel::outputs() const {
97+
return m_fake_outputs;
98+
};
99+
100+
void LlamaCppModel::export_model(std::ostream& output_stream) const {
101+
std::ifstream in(m_gguf_fname, std::ios::binary);
102+
output_stream << in.rdbuf();
103+
}
104+
105+
} // namespace llama_cpp_plugin
106+
} // namespace ov
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,129 @@
1+
// Copyright (C) 2024 Intel Corporation
2+
// SPDX-License-Identifier: Apache-2.0
3+
4+
#include "infer_request.hpp"
5+
6+
#include <memory>
7+
#include <openvino/runtime/ivariable_state.hpp>
8+
9+
#include "llama.h"
10+
#include "openvino/runtime/make_tensor.hpp"
11+
#include "openvino/util/log.hpp"
12+
#include "state.hpp"
13+
14+
namespace ov {
15+
namespace llama_cpp_plugin {
16+
17+
void allocate_tensor_impl(ov::SoPtr<ov::ITensor>& tensor,
18+
const ov::element::Type& element_type,
19+
const ov::Shape& shape) {
20+
if (!tensor || tensor->get_element_type() != element_type) {
21+
tensor = ov::make_tensor(element_type, shape);
22+
} else {
23+
tensor->set_shape(shape);
24+
}
25+
}
26+
27+
LlamaCppSyncInferRequest::LlamaCppSyncInferRequest(const std::shared_ptr<const LlamaCppModel>& compiled_model)
28+
: ov::ISyncInferRequest(compiled_model) {
29+
OPENVINO_DEBUG << "llama_cpp_plugin: infer request ctor called\n";
30+
m_compiled_model_ptr = compiled_model;
31+
for (const auto& input : get_inputs()) {
32+
allocate_tensor(input, [input](ov::SoPtr<ov::ITensor>& tensor) {
33+
allocate_tensor_impl(tensor,
34+
input.get_element_type(),
35+
input.get_partial_shape().is_dynamic() ? ov::Shape{0} : input.get_shape());
36+
});
37+
}
38+
for (const auto& output : get_outputs()) {
39+
allocate_tensor(output, [output](ov::SoPtr<ov::ITensor>& tensor) {
40+
allocate_tensor_impl(tensor,
41+
output.get_element_type(),
42+
output.get_partial_shape().is_dynamic() ? ov::Shape{0} : output.get_shape());
43+
});
44+
}
45+
}
46+
void LlamaCppSyncInferRequest::set_tensors_impl(const ov::Output<const ov::Node> port,
47+
const std::vector<ov::SoPtr<ov::ITensor>>& tensors) {
48+
OPENVINO_DEBUG << "llama_cpp_plugin: set_tensors_impl called\n";
49+
}
50+
51+
void llama_batch_add_reimpl(struct llama_batch& batch,
52+
llama_token id,
53+
llama_pos pos,
54+
const std::vector<llama_seq_id>& seq_ids,
55+
bool logits) {
56+
batch.token[batch.n_tokens] = id;
57+
batch.pos[batch.n_tokens] = pos;
58+
batch.n_seq_id[batch.n_tokens] = seq_ids.size();
59+
for (size_t i = 0; i < seq_ids.size(); ++i) {
60+
batch.seq_id[batch.n_tokens][i] = seq_ids[i];
61+
}
62+
batch.logits[batch.n_tokens] = logits;
63+
64+
batch.n_tokens++;
65+
}
66+
67+
void LlamaCppSyncInferRequest::infer() {
68+
auto input_ids_tensor_ptr = get_tensor(get_inputs()[0]); // TODO (vshampor) correctly identify input_ids among
69+
// all inputs without hardcode
70+
//
71+
auto position_ids_tensor_ptr = get_tensor(get_inputs()[2]); // TODO (vshampor) correctly identify input_ids among
72+
// all inputs without hardcode
73+
OPENVINO_ASSERT(input_ids_tensor_ptr->get_element_type() == ov::element::Type_t::i64);
74+
OPENVINO_ASSERT(input_ids_tensor_ptr->get_shape().size() == 2);
75+
size_t sequence_length = input_ids_tensor_ptr->get_shape()[1];
76+
77+
// llama_batch actually contains one sequence
78+
llama_batch batch = llama_batch_init(sequence_length, /* embd = */ 0, /* n_seq_max = */ 1);
79+
const int64_t* data_ptr = input_ids_tensor_ptr->data<int64_t>();
80+
81+
const int64_t* sequence_start_ptr = data_ptr /* + seq_idx */;
82+
83+
const int64_t* position_idx_ptr = position_ids_tensor_ptr->data<int64_t>();
84+
85+
for (size_t tok_idx = 0; tok_idx < sequence_length; ++tok_idx) {
86+
const int64_t token_id = sequence_start_ptr[tok_idx];
87+
const int64_t position_id = position_idx_ptr[tok_idx];
88+
llama_batch_add_reimpl(batch,
89+
token_id,
90+
position_id,
91+
{0},
92+
true); // the last `true` here is a marker that the logits for this
93+
// token should be computed and returned
94+
}
95+
96+
llama_context* ctx = m_compiled_model_ptr->m_llama_ctx;
97+
int32_t sts = llama_decode(ctx, batch);
98+
99+
if (sts != 0) {
100+
OPENVINO_THROW("llama_decode failed with code ", sts);
101+
}
102+
103+
size_t n_vocab = llama_n_vocab(m_compiled_model_ptr->m_llama_model_ptr);
104+
105+
ov::Tensor output_tensor{ov::element::Type_t::f32, {1, sequence_length, n_vocab}};
106+
float* output_tensor_data_ptr = output_tensor.data<float>();
107+
108+
for (size_t pos = 0; pos < sequence_length; pos++) {
109+
float* logits_from_llama = llama_get_logits_ith(ctx, pos);
110+
std::copy(logits_from_llama, logits_from_llama + n_vocab, output_tensor_data_ptr + pos * n_vocab);
111+
}
112+
113+
auto& logit_output = get_outputs()[0];
114+
allocate_tensor(logit_output, [&output_tensor](ov::SoPtr<ov::ITensor>& tensor) {
115+
allocate_tensor_impl(tensor, output_tensor.get_element_type(), output_tensor.get_shape());
116+
output_tensor.copy_to(ov::make_tensor(tensor));
117+
});
118+
};
119+
std::vector<ov::ProfilingInfo> LlamaCppSyncInferRequest::get_profiling_info() const {
120+
OPENVINO_DEBUG << "llama_cpp_plugin: get_profiling_info() called\n";
121+
return std::vector<ov::ProfilingInfo>{};
122+
};
123+
124+
std::vector<ov::SoPtr<ov::IVariableState>> LlamaCppSyncInferRequest::query_state() const {
125+
OPENVINO_DEBUG << "llama_cpp_plugin: query_state() called\n";
126+
return {std::static_pointer_cast<ov::IVariableState>(std::make_shared<LlamaCppState>(m_compiled_model_ptr))};
127+
}
128+
} // namespace llama_cpp_plugin
129+
} // namespace ov
+97
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,97 @@
1+
// Copyright (C) 2024 Intel Corporation
2+
// SPDX-License-Identifier: Apache-2.0
3+
4+
#include "plugin.hpp"
5+
6+
#include <openvino/runtime/properties.hpp>
7+
8+
#include "compiled_model.hpp"
9+
#include "openvino/op/constant.hpp"
10+
#include "openvino/runtime/internal_properties.hpp"
11+
#include "openvino/util/log.hpp"
12+
13+
namespace {
14+
static constexpr const char* wait_executor_name = "LlamaCppWaitExecutor";
15+
static constexpr const char* stream_executor_name = "LlamaCppStreamsExecutor";
16+
static constexpr const char* template_exclusive_executor = "LlamaCppExecutor";
17+
} // namespace
18+
19+
namespace ov {
20+
namespace llama_cpp_plugin {
21+
LlamaCppPlugin::LlamaCppPlugin() : IPlugin() {
22+
set_device_name("LLAMA_CPP");
23+
}
24+
std::shared_ptr<ov::ICompiledModel> LlamaCppPlugin::compile_model(const std::shared_ptr<const ov::Model>& model,
25+
const ov::AnyMap& properties) const {
26+
OPENVINO_THROW_NOT_IMPLEMENTED("Currently only direct GGUF file loading is "
27+
"supported for the LLAMA_CPP* plugins");
28+
}
29+
30+
std::shared_ptr<ov::ICompiledModel> LlamaCppPlugin::compile_model(const std::shared_ptr<const ov::Model>& model,
31+
const ov::AnyMap& properties,
32+
const ov::SoPtr<ov::IRemoteContext>& context) const {
33+
OPENVINO_THROW_NOT_IMPLEMENTED("Currently only direct GGUF file loading is "
34+
"supported for the LLAMA_CPP* plugins");
35+
}
36+
std::shared_ptr<ov::ICompiledModel> LlamaCppPlugin::compile_model(const std::string& fname,
37+
const ov::AnyMap& properties) const {
38+
return std::make_shared<LlamaCppModel>(fname, shared_from_this());
39+
}
40+
41+
void LlamaCppPlugin::set_property(const ov::AnyMap& properties) {
42+
for (const auto& map_entry : properties) {
43+
OPENVINO_THROW_NOT_IMPLEMENTED("llama_cpp_plugin: setting property ", map_entry.first, "not implemented");
44+
}
45+
}
46+
47+
ov::Any LlamaCppPlugin::get_property(const std::string& name, const ov::AnyMap& arguments) const {
48+
if (ov::supported_properties == name) {
49+
return decltype(ov::supported_properties)::value_type(
50+
std::vector<PropertyName>({ov::device::capabilities, ov::device::full_name}));
51+
}
52+
if (ov::device::capabilities == name) {
53+
return decltype(ov::device::capabilities)::value_type(
54+
std::vector<std::string>({ov::device::capability::EXPORT_IMPORT}));
55+
}
56+
if (ov::internal::supported_properties == name) {
57+
return decltype(ov::internal::supported_properties)::value_type(
58+
std::vector<PropertyName>({ov::internal::caching_properties}));
59+
}
60+
61+
if (ov::internal::caching_properties == name) {
62+
return std::vector<ov::PropertyName>{ov::device::full_name};
63+
}
64+
65+
if (ov::device::full_name == name) {
66+
return std::string("LLAMA_CPP");
67+
}
68+
69+
OPENVINO_THROW_NOT_IMPLEMENTED("llama_cpp_plugin: getting property ", name, "not implemented");
70+
}
71+
72+
ov::SoPtr<ov::IRemoteContext> LlamaCppPlugin::create_context(const ov::AnyMap& remote_properties) const {
73+
OPENVINO_THROW_NOT_IMPLEMENTED("llama_cpp_plugin: Not Implemented");
74+
}
75+
ov::SoPtr<ov::IRemoteContext> LlamaCppPlugin::get_default_context(const ov::AnyMap& remote_properties) const {
76+
OPENVINO_THROW_NOT_IMPLEMENTED("llama_cpp_plugin: Not Implemented");
77+
}
78+
std::shared_ptr<ov::ICompiledModel> LlamaCppPlugin::import_model(std::istream& model_file_stream,
79+
const ov::AnyMap& properties) const {
80+
OPENVINO_THROW_NOT_IMPLEMENTED("llama_cpp_plugin: model importing not implemented");
81+
}
82+
83+
std::shared_ptr<ov::ICompiledModel> LlamaCppPlugin::import_model(std::istream& model,
84+
const ov::SoPtr<ov::IRemoteContext>& context,
85+
const ov::AnyMap& properties) const {
86+
OPENVINO_THROW_NOT_IMPLEMENTED("llama_cpp_plugin: model importing not implemented");
87+
}
88+
89+
ov::SupportedOpsMap LlamaCppPlugin::query_model(const std::shared_ptr<const ov::Model>& model,
90+
const ov::AnyMap& properties) const {
91+
OPENVINO_THROW_NOT_IMPLEMENTED("llama_cpp_plugin: model importing not implemented");
92+
}
93+
} // namespace llama_cpp_plugin
94+
} // namespace ov
95+
96+
static const ov::Version version = {CI_BUILD_NUMBER, "llama_cpp_plugin"};
97+
OV_DEFINE_PLUGIN_CREATE_FUNCTION(ov::llama_cpp_plugin::LlamaCppPlugin, version)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
# Copyright (C) 2024 Intel Corporation
2+
# SPDX-License-Identifier: Apache-2.0
3+
4+
set(TARGET_NAME llama_cpp_e2e_tests)
5+
6+
ov_add_test_target(
7+
NAME ${TARGET_NAME}
8+
ROOT ${CMAKE_CURRENT_SOURCE_DIR}
9+
DEPENDENCIES
10+
llama_cpp_plugin
11+
LINK_LIBRARIES
12+
openvino::runtime::dev
13+
openvino::funcSharedTests
14+
INCLUDES
15+
"${LlamaCppPlugin_SOURCE_DIR}/include"
16+
ADD_CLANG_FORMAT
17+
LABELS
18+
OV UNIT TEMPLATE
19+
)
20+
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
// Copyright (C) 2024 Intel Corporation
2+
// SPDX-License-Identifier: Apache-2.0
3+
4+
#include <gtest/gtest.h>
5+
6+
#include "common_test_utils/file_utils.hpp"
7+
#include "openvino/openvino.hpp"
8+
9+
const std::string TEST_FILES_DIR = "test_data";
10+
11+
// "Why is the Sun yellow?"
12+
const std::vector<int64_t> GPT2_PROMPT_TOKEN_IDS = {5195, 318, 262, 3825, 7872, 30};
13+
// "The Sun is a bright red, which means it is a bright red. The Sun is a bright
14+
// red because it is a bright red."
15+
const std::vector<int64_t> GPT2_REFERENCE_RESPONSE_TOKEN_IDS = {
16+
198, 464, 3825, 318, 257, 6016, 2266, 11, 543, 1724, 340, 318, 257, 6016, 2266, 13,
17+
383, 3825, 318, 257, 6016, 2266, 780, 340, 318, 257, 6016, 2266, 13, 198, 198, 464};
18+
19+
const auto SEP = ov::util::FileTraits<char>::file_separator;
20+
21+
TEST(PromptResponseTest, TestGPT2) {
22+
const std::string plugin_name = "LLAMA_CPP";
23+
ov::Core core;
24+
25+
const std::string model_file_name = "gpt2.gguf";
26+
const std::string model_file =
27+
ov::test::utils::getCurrentWorkingDir() + SEP + TEST_FILES_DIR + SEP + model_file_name;
28+
ov::InferRequest lm = core.compile_model(model_file, plugin_name).create_infer_request();
29+
auto input_ids_tensor = ov::Tensor(ov::element::Type_t::i64, {1, GPT2_PROMPT_TOKEN_IDS.size()});
30+
std::copy(GPT2_PROMPT_TOKEN_IDS.begin(), GPT2_PROMPT_TOKEN_IDS.end(), input_ids_tensor.data<int64_t>());
31+
lm.set_tensor("input_ids", input_ids_tensor);
32+
lm.set_tensor("attention_mask", ov::Tensor(ov::element::Type_t::i64, {1, GPT2_PROMPT_TOKEN_IDS.size()}));
33+
ov::Tensor position_ids = lm.get_tensor("position_ids");
34+
position_ids.set_shape(input_ids_tensor.get_shape());
35+
std::iota(position_ids.data<int64_t>(), position_ids.data<int64_t>() + position_ids.get_size(), 0);
36+
37+
constexpr size_t BATCH_SIZE = 1;
38+
lm.get_tensor("beam_idx").set_shape({BATCH_SIZE});
39+
lm.get_tensor("beam_idx").data<int32_t>()[0] = 0;
40+
41+
lm.infer();
42+
43+
size_t vocab_size = lm.get_tensor("logits").get_shape().back();
44+
float* logits = lm.get_tensor("logits").data<float>() + (input_ids_tensor.get_size() - 1) * vocab_size;
45+
int64_t out_token = std::max_element(logits, logits + vocab_size) - logits;
46+
47+
lm.get_tensor("input_ids").set_shape({BATCH_SIZE, 1});
48+
position_ids.set_shape({BATCH_SIZE, 1});
49+
50+
size_t cnt = 0;
51+
std::vector<int64_t> out_token_ids;
52+
53+
while (cnt < GPT2_REFERENCE_RESPONSE_TOKEN_IDS.size()) {
54+
lm.get_tensor("input_ids").data<int64_t>()[0] = out_token;
55+
lm.get_tensor("attention_mask").set_shape({BATCH_SIZE, lm.get_tensor("attention_mask").get_shape().at(1) + 1});
56+
std::fill_n(lm.get_tensor("attention_mask").data<int64_t>(), lm.get_tensor("attention_mask").get_size(), 1);
57+
position_ids.data<int64_t>()[0] = int64_t(lm.get_tensor("attention_mask").get_size() - 2);
58+
lm.start_async();
59+
lm.wait();
60+
logits = lm.get_tensor("logits").data<float>();
61+
out_token = std::max_element(logits, logits + vocab_size) - logits;
62+
out_token_ids.push_back(out_token);
63+
cnt++;
64+
}
65+
66+
lm.reset_state();
67+
68+
ASSERT_EQ(out_token_ids, GPT2_REFERENCE_RESPONSE_TOKEN_IDS);
69+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
// Copyright (C) 2024 Intel Corporation
2+
// SPDX-License-Identifier: Apache-2.0
3+
4+
#include <stdexcept>
5+
#include <string>
6+
7+
namespace ov {
8+
namespace test {
9+
void set_device_suffix(const std::string& suffix) {
10+
if (!suffix.empty()) {
11+
throw std::runtime_error("The suffix can't be used for LLAMA_CPP device!");
12+
}
13+
}
14+
} // namespace test
15+
} // namespace ov

0 commit comments

Comments
 (0)
Please sign in to comment.