Skip to content

Commit 52da9bc

Browse files
committed
Reverted not needed changes.
1 parent 4c18a34 commit 52da9bc

File tree

3 files changed

+16
-9
lines changed

3 files changed

+16
-9
lines changed

.github/workflows/causal_lm_cpp.yml

+3-2
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ concurrency:
1515
env:
1616
l_ov_link: https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/2024.5.0-16570-19eb02fe60b/l_openvino_toolkit_ubuntu20_2024.5.0.dev20240830_x86_64.tgz
1717
l_u22_ov_link: https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/2024.5.0-16570-19eb02fe60b/l_openvino_toolkit_ubuntu22_2024.5.0.dev20240830_x86_64.tgz
18+
l_u24_ov_link: https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/2024.5.0-16570-19eb02fe60b/l_openvino_toolkit_ubuntu24_2024.5.0.dev20240830_x86_64.tgz
1819
m_ov_link: https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/2024.5.0-16570-19eb02fe60b/m_openvino_toolkit_macos_12_6_2024.5.0.dev20240830_x86_64.tgz
1920
w_ov_link: https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/2024.5.0-16570-19eb02fe60b/w_openvino_toolkit_windows_2024.5.0.dev20240830_x86_64.zip
2021
jobs:
@@ -631,7 +632,7 @@ jobs:
631632
- name: Install OpenVINO
632633
run: |
633634
mkdir ./ov/
634-
curl ${{ env.l_ov_link }} | tar --directory ./ov/ --strip-components 1 -xz
635+
curl ${{ env.l_u24_ov_link }} | tar --directory ./ov/ --strip-components 1 -xz
635636
sudo ./ov/install_dependencies/install_openvino_dependencies.sh
636637
- name: Build app
637638
run: |
@@ -717,7 +718,7 @@ jobs:
717718
source ./ov/setupvars.sh
718719
export PYTHONPATH=./build/:$PYTHONPATH
719720
printf 'What is on the image?\n' > ./input.txt
720-
timeout 600s python ./samples/python/vlm_chat_sample/vlm_chat_sample.py ./miniCPM-V-2_6/ d5fbbd1a-d484-415c-88cb-9986625b7b11 < input.txt > ./pred.txt
721+
timeout 300s --preserve-status 0 python ./samples/python/vlm_chat_sample/vlm_chat_sample.py ./miniCPM-V-2_6/ d5fbbd1a-d484-415c-88cb-9986625b7b11 < input.txt > ./pred.txt
721722
722723
cpp-continuous-batching-ubuntu:
723724
runs-on: ubuntu-20.04-8-cores

samples/python/vlm_chat_sample/vlm_chat_sample.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ def main():
5959
prompt = input('question:\n')
6060
except EOFError:
6161
break
62-
pipe(prompt, [image], config, streamer)
62+
pipe(prompt, image=image, generation_config=config, streamer=streamer)
6363
print('\n----------')
6464
pipe.finish_chat()
6565

src/python/py_vlm_pipeline.cpp

+12-6
Original file line numberDiff line numberDiff line change
@@ -104,40 +104,46 @@ void init_vlm_pipeline(py::module_& m) {
104104
.def(py::init([](
105105
const std::string& model_path,
106106
const std::string& device,
107-
const std::map<std::string, py::object>& config
107+
const std::map<std::string, py::object>& config,
108+
const ov::Core& core
108109
) {
109110
ScopedVar env_manager(utils::ov_tokenizers_module_path());
110-
return std::make_unique<ov::genai::VLMPipeline>(model_path, device, utils::properties_to_any_map(config));
111+
return std::make_unique<ov::genai::VLMPipeline>(model_path, device, utils::properties_to_any_map(config), core);
111112
}),
112113
py::arg("model_path"), "folder with openvino_model.xml and openvino_tokenizer[detokenizer].xml files",
113114
py::arg("device") = "CPU", "device on which inference will be done",
114-
py::arg("config") = ov::AnyMap({}), "openvino.properties map"
115+
py::arg("config") = ov::AnyMap({}), "openvino.properties map",
116+
py::arg("core") = ov::Core(), "openvino.Core object",
115117
R"(
116118
VLMPipeline class constructor.
117119
model_path (str): Path to the model file.
118120
device (str): Device to run the model on (e.g., CPU, GPU). Default is 'CPU'.
119121
Add {"scheduler_config": ov_genai.SchedulerConfig} to config properties to create continuous batching pipeline.
122+
core (openvino.Core): openvino.Core instance.
120123
)")
121124

122125
.def(py::init([](
123126
const std::string& model_path,
124127
const ov::genai::Tokenizer& tokenizer,
125128
const std::string& device,
126-
const std::map<std::string, py::object>& config
129+
const std::map<std::string, py::object>& config,
130+
const ov::Core& core
127131
) {
128132
ScopedVar env_manager(utils::ov_tokenizers_module_path());
129-
return std::make_unique<ov::genai::VLMPipeline>(model_path, tokenizer, device, utils::properties_to_any_map(config));
133+
return std::make_unique<ov::genai::VLMPipeline>(model_path, tokenizer, device, utils::properties_to_any_map(config), core);
130134
}),
131135
py::arg("model_path"),
132136
py::arg("tokenizer"),
133137
py::arg("device") = "CPU",
134-
py::arg("config") = ov::AnyMap({}), "openvino.properties map"
138+
py::arg("config") = ov::AnyMap({}), "openvino.properties map",
139+
py::arg("core") = ov::Core(), "openvino.Core object",
135140
R"(
136141
VLMPipeline class constructor for manualy created openvino_genai.Tokenizer.
137142
model_path (str): Path to the model file.
138143
tokenizer (openvino_genai.Tokenizer): tokenizer object.
139144
device (str): Device to run the model on (e.g., CPU, GPU). Default is 'CPU'.
140145
Add {"scheduler_config": ov_genai.SchedulerConfig} to config properties to create continuous batching pipeline.
146+
core (openvino.Core): openvino.Core instance.
141147
)")
142148
.def("start_chat", &ov::genai::VLMPipeline::start_chat)
143149
.def("finish_chat", &ov::genai::VLMPipeline::finish_chat)

0 commit comments

Comments
 (0)