@@ -104,40 +104,46 @@ void init_vlm_pipeline(py::module_& m) {
104
104
.def (py::init ([](
105
105
const std::string& model_path,
106
106
const std::string& device,
107
- const std::map<std::string, py::object>& config
107
+ const std::map<std::string, py::object>& config,
108
+ const ov::Core& core
108
109
) {
109
110
ScopedVar env_manager (utils::ov_tokenizers_module_path ());
110
- return std::make_unique<ov::genai::VLMPipeline>(model_path, device, utils::properties_to_any_map (config));
111
+ return std::make_unique<ov::genai::VLMPipeline>(model_path, device, utils::properties_to_any_map (config), core );
111
112
}),
112
113
py::arg (" model_path" ), " folder with openvino_model.xml and openvino_tokenizer[detokenizer].xml files" ,
113
114
py::arg (" device" ) = " CPU" , " device on which inference will be done" ,
114
- py::arg (" config" ) = ov::AnyMap ({}), " openvino.properties map"
115
+ py::arg (" config" ) = ov::AnyMap ({}), " openvino.properties map" ,
116
+ py::arg (" core" ) = ov::Core (), " openvino.Core object" ,
115
117
R"(
116
118
VLMPipeline class constructor.
117
119
model_path (str): Path to the model file.
118
120
device (str): Device to run the model on (e.g., CPU, GPU). Default is 'CPU'.
119
121
Add {"scheduler_config": ov_genai.SchedulerConfig} to config properties to create continuous batching pipeline.
122
+ core (openvino.Core): openvino.Core instance.
120
123
)" )
121
124
122
125
.def (py::init ([](
123
126
const std::string& model_path,
124
127
const ov::genai::Tokenizer& tokenizer,
125
128
const std::string& device,
126
- const std::map<std::string, py::object>& config
129
+ const std::map<std::string, py::object>& config,
130
+ const ov::Core& core
127
131
) {
128
132
ScopedVar env_manager (utils::ov_tokenizers_module_path ());
129
- return std::make_unique<ov::genai::VLMPipeline>(model_path, tokenizer, device, utils::properties_to_any_map (config));
133
+ return std::make_unique<ov::genai::VLMPipeline>(model_path, tokenizer, device, utils::properties_to_any_map (config), core );
130
134
}),
131
135
py::arg (" model_path" ),
132
136
py::arg (" tokenizer" ),
133
137
py::arg (" device" ) = " CPU" ,
134
- py::arg (" config" ) = ov::AnyMap ({}), " openvino.properties map"
138
+ py::arg (" config" ) = ov::AnyMap ({}), " openvino.properties map" ,
139
+ py::arg (" core" ) = ov::Core (), " openvino.Core object" ,
135
140
R"(
136
141
VLMPipeline class constructor for manualy created openvino_genai.Tokenizer.
137
142
model_path (str): Path to the model file.
138
143
tokenizer (openvino_genai.Tokenizer): tokenizer object.
139
144
device (str): Device to run the model on (e.g., CPU, GPU). Default is 'CPU'.
140
145
Add {"scheduler_config": ov_genai.SchedulerConfig} to config properties to create continuous batching pipeline.
146
+ core (openvino.Core): openvino.Core instance.
141
147
)" )
142
148
.def (" start_chat" , &ov::genai::VLMPipeline::start_chat)
143
149
.def (" finish_chat" , &ov::genai::VLMPipeline::finish_chat)
0 commit comments