Skip to content

Commit a015411

Browse files
committed
Error fixed.
1 parent 52da9bc commit a015411

File tree

2 files changed

+10
-16
lines changed

2 files changed

+10
-16
lines changed

.github/workflows/causal_lm_cpp.yml

+4-4
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,7 @@ concurrency:
1414

1515
env:
1616
l_ov_link: https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/2024.5.0-16570-19eb02fe60b/l_openvino_toolkit_ubuntu20_2024.5.0.dev20240830_x86_64.tgz
17-
l_u22_ov_link: https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/2024.5.0-16570-19eb02fe60b/l_openvino_toolkit_ubuntu22_2024.5.0.dev20240830_x86_64.tgz
18-
l_u24_ov_link: https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/2024.5.0-16570-19eb02fe60b/l_openvino_toolkit_ubuntu24_2024.5.0.dev20240830_x86_64.tgz
17+
l_u22_ov_link: https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/2024.5.0-16570-19eb02fe60b/l_openvino_toolkit_ubuntu22_2024.5.0.dev20240830_x86_64.tgz
1918
m_ov_link: https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/2024.5.0-16570-19eb02fe60b/m_openvino_toolkit_macos_12_6_2024.5.0.dev20240830_x86_64.tgz
2019
w_ov_link: https://storage.openvinotoolkit.org/repositories/openvino/packages/nightly/2024.5.0-16570-19eb02fe60b/w_openvino_toolkit_windows_2024.5.0.dev20240830_x86_64.zip
2120
jobs:
@@ -632,7 +631,7 @@ jobs:
632631
- name: Install OpenVINO
633632
run: |
634633
mkdir ./ov/
635-
curl ${{ env.l_u24_ov_link }} | tar --directory ./ov/ --strip-components 1 -xz
634+
curl ${{ env.l_ov_link }} | tar --directory ./ov/ --strip-components 1 -xz
636635
sudo ./ov/install_dependencies/install_openvino_dependencies.sh
637636
- name: Build app
638637
run: |
@@ -718,7 +717,8 @@ jobs:
718717
source ./ov/setupvars.sh
719718
export PYTHONPATH=./build/:$PYTHONPATH
720719
printf 'What is on the image?\n' > ./input.txt
721-
timeout 300s --preserve-status 0 python ./samples/python/vlm_chat_sample/vlm_chat_sample.py ./miniCPM-V-2_6/ d5fbbd1a-d484-415c-88cb-9986625b7b11 < input.txt > ./pred.txt
720+
timeout 300s python ./samples/python/vlm_chat_sample/vlm_chat_sample.py ./miniCPM-V-2_6/ d5fbbd1a-d484-415c-88cb-9986625b7b11 < input.txt > ./pred.txt || ( [[ $? -eq 124 ]] && \
721+
echo "Timeout reached, but it's excpected." )
722722
723723
cpp-continuous-batching-ubuntu:
724724
runs-on: ubuntu-20.04-8-cores

src/python/py_vlm_pipeline.cpp

+6-12
Original file line numberDiff line numberDiff line change
@@ -104,46 +104,40 @@ void init_vlm_pipeline(py::module_& m) {
104104
.def(py::init([](
105105
const std::string& model_path,
106106
const std::string& device,
107-
const std::map<std::string, py::object>& config,
108-
const ov::Core& core
107+
const std::map<std::string, py::object>& config
109108
) {
110109
ScopedVar env_manager(utils::ov_tokenizers_module_path());
111-
return std::make_unique<ov::genai::VLMPipeline>(model_path, device, utils::properties_to_any_map(config), core);
110+
return std::make_unique<ov::genai::VLMPipeline>(model_path, device, utils::properties_to_any_map(config));
112111
}),
113112
py::arg("model_path"), "folder with openvino_model.xml and openvino_tokenizer[detokenizer].xml files",
114113
py::arg("device") = "CPU", "device on which inference will be done",
115-
py::arg("config") = ov::AnyMap({}), "openvino.properties map",
116-
py::arg("core") = ov::Core(), "openvino.Core object",
114+
py::arg("config") = ov::AnyMap({}), "openvino.properties map"
117115
R"(
118116
VLMPipeline class constructor.
119117
model_path (str): Path to the model file.
120118
device (str): Device to run the model on (e.g., CPU, GPU). Default is 'CPU'.
121119
Add {"scheduler_config": ov_genai.SchedulerConfig} to config properties to create continuous batching pipeline.
122-
core (openvino.Core): openvino.Core instance.
123120
)")
124121

125122
.def(py::init([](
126123
const std::string& model_path,
127124
const ov::genai::Tokenizer& tokenizer,
128125
const std::string& device,
129-
const std::map<std::string, py::object>& config,
130-
const ov::Core& core
126+
const std::map<std::string, py::object>& config
131127
) {
132128
ScopedVar env_manager(utils::ov_tokenizers_module_path());
133-
return std::make_unique<ov::genai::VLMPipeline>(model_path, tokenizer, device, utils::properties_to_any_map(config), core);
129+
return std::make_unique<ov::genai::VLMPipeline>(model_path, tokenizer, device, utils::properties_to_any_map(config));
134130
}),
135131
py::arg("model_path"),
136132
py::arg("tokenizer"),
137133
py::arg("device") = "CPU",
138-
py::arg("config") = ov::AnyMap({}), "openvino.properties map",
139-
py::arg("core") = ov::Core(), "openvino.Core object",
134+
py::arg("config") = ov::AnyMap({}), "openvino.properties map"
140135
R"(
141136
VLMPipeline class constructor for manualy created openvino_genai.Tokenizer.
142137
model_path (str): Path to the model file.
143138
tokenizer (openvino_genai.Tokenizer): tokenizer object.
144139
device (str): Device to run the model on (e.g., CPU, GPU). Default is 'CPU'.
145140
Add {"scheduler_config": ov_genai.SchedulerConfig} to config properties to create continuous batching pipeline.
146-
core (openvino.Core): openvino.Core instance.
147141
)")
148142
.def("start_chat", &ov::genai::VLMPipeline::start_chat)
149143
.def("finish_chat", &ov::genai::VLMPipeline::finish_chat)

0 commit comments

Comments
 (0)