Skip to content

Commit b332eb6

Browse files
committed
Attempt to fix ci.
1 parent 3f6f426 commit b332eb6

File tree

1 file changed

+6
-12
lines changed

1 file changed

+6
-12
lines changed

src/python/py_vlm_pipeline.cpp

+6-12
Original file line numberDiff line numberDiff line change
@@ -104,46 +104,40 @@ void init_vlm_pipeline(py::module_& m) {
104104
.def(py::init([](
105105
const std::string& model_path,
106106
const std::string& device,
107-
const std::map<std::string, py::object>& config,
108-
const ov::Core& core
107+
const std::map<std::string, py::object>& config
109108
) {
110109
ScopedVar env_manager(utils::ov_tokenizers_module_path());
111-
return std::make_unique<ov::genai::VLMPipeline>(model_path, device, utils::properties_to_any_map(config), core);
110+
return std::make_unique<ov::genai::VLMPipeline>(model_path, device, utils::properties_to_any_map(config));
112111
}),
113112
py::arg("model_path"), "folder with openvino_model.xml and openvino_tokenizer[detokenizer].xml files",
114113
py::arg("device") = "CPU", "device on which inference will be done",
115-
py::arg("config") = ov::AnyMap({}), "openvino.properties map",
116-
py::arg("core") = ov::Core(), "openvino.Core object",
114+
py::arg("config") = ov::AnyMap({}), "openvino.properties map"
117115
R"(
118116
VLMPipeline class constructor.
119117
model_path (str): Path to the model file.
120118
device (str): Device to run the model on (e.g., CPU, GPU). Default is 'CPU'.
121119
Add {"scheduler_config": ov_genai.SchedulerConfig} to config properties to create continuous batching pipeline.
122-
core (openvino.Core): openvino.Core instance.
123120
)")
124121

125122
.def(py::init([](
126123
const std::string& model_path,
127124
const ov::genai::Tokenizer& tokenizer,
128125
const std::string& device,
129-
const std::map<std::string, py::object>& config,
130-
const ov::Core& core
126+
const std::map<std::string, py::object>& config
131127
) {
132128
ScopedVar env_manager(utils::ov_tokenizers_module_path());
133-
return std::make_unique<ov::genai::VLMPipeline>(model_path, tokenizer, device, utils::properties_to_any_map(config), core);
129+
return std::make_unique<ov::genai::VLMPipeline>(model_path, tokenizer, device, utils::properties_to_any_map(config));
134130
}),
135131
py::arg("model_path"),
136132
py::arg("tokenizer"),
137133
py::arg("device") = "CPU",
138-
py::arg("config") = ov::AnyMap({}), "openvino.properties map",
139-
py::arg("core") = ov::Core(), "openvino.Core object",
134+
py::arg("config") = ov::AnyMap({}), "openvino.properties map"
140135
R"(
141136
VLMPipeline class constructor for manualy created openvino_genai.Tokenizer.
142137
model_path (str): Path to the model file.
143138
tokenizer (openvino_genai.Tokenizer): tokenizer object.
144139
device (str): Device to run the model on (e.g., CPU, GPU). Default is 'CPU'.
145140
Add {"scheduler_config": ov_genai.SchedulerConfig} to config properties to create continuous batching pipeline.
146-
core (openvino.Core): openvino.Core instance.
147141
)")
148142
.def("start_chat", &ov::genai::VLMPipeline::start_chat)
149143
.def("finish_chat", &ov::genai::VLMPipeline::finish_chat)

0 commit comments

Comments
 (0)