Skip to content

Commit ee31d0a

Browse files
committed
Errors fixed.
1 parent 07184ae commit ee31d0a

File tree

4 files changed

+27
-11
lines changed

4 files changed

+27
-11
lines changed

.github/workflows/causal_lm_cpp.yml

+5-7
Original file line numberDiff line numberDiff line change
@@ -704,21 +704,19 @@ jobs:
704704
source ./ov/setupvars.sh
705705
cmake -DCMAKE_BUILD_TYPE=Release -S ./ -B ./build/
706706
cmake --build ./build/ --config Release --target visual_language_chat -j
707-
- name: Download and convert and model
707+
- name: Download and convert a model and an image
708708
run: |
709709
source ./ov/setupvars.sh
710-
python -m pip install --upgrade-strategy eager -r ./samples/requirements.txt --pre --extra-index-url https://storage.openvinotoolkit.org/simple/wheels/nightly
711710
python -m pip install ./thirdparty/openvino_tokenizers/[transformers] --pre --extra-index-url https://storage.openvinotoolkit.org/simple/wheels/nightly
711+
python -m pip install --upgrade-strategy eager -r ./samples/requirements.txt --pre --extra-index-url https://storage.openvinotoolkit.org/simple/wheels/nightly
712712
python ./samples/cpp/visual_language_chat/export_MiniCPM-V-2_6.py ./miniCPM-V-2_6/
713713
wget https://github.com/openvinotoolkit/openvino_notebooks/assets/29454499/d5fbbd1a-d484-415c-88cb-9986625b7b11
714714
715-
- name: Run chat sample
715+
- name: Run chat chat sample
716716
run: |
717717
source ./ov/setupvars.sh
718-
export PYTHONPATH=./build/:$PYTHONPATH
719-
printf 'What is on the image?\nWhat is special on the image?\n' > ./input.txt
720-
timeout 120s python ./samples/python/vlm_chat_sample/vlm_chat_sample.py ./miniCPM-V-2_6/ d5fbbd1a-d484-415c-88cb-9986625b7b11 < input.txt > ./pred.txt || ( [[ $? -eq 124 ]] && \
721-
echo "Timeout reached, but it's excpected." )
718+
&& timeout 120s ./build/samples/cpp/visual_language_chat/visual_language_chat ./miniCPM-V-2_6/ d5fbbd1a-d484-415c-88cb-9986625b7b11
719+
<<< $'What is on the image?\nWhat is special on the image?'
722720
723721
cpp-continuous-batching-ubuntu:
724722
runs-on: ubuntu-20.04-8-cores

src/cpp/include/openvino/genai/vlm_pipeline.hpp

+1
Original file line numberDiff line numberDiff line change
@@ -163,6 +163,7 @@ class OPENVINO_GENAI_EXPORTS VLMPipeline {
163163
class VLMPipelineImpl;
164164
std::unique_ptr<VLMPipelineImpl> m_pimpl;
165165
};
166+
166167
/*
167168
* utils that allow to use generate() in the following way:
168169
* pipe.generate(prompt, ov::genai::image(std::move(image_tensor))).

src/cpp/src/vlm_pipeline.cpp

+19-2
Original file line numberDiff line numberDiff line change
@@ -533,8 +533,25 @@ DecodedResults VLMPipeline::generate(
533533
variable.reset();
534534
}
535535
m_language.get_tensor("attention_mask").set_shape({1, 0});
536-
}
537-
return {{m_tokenizer.decode(generated)}};
536+
}
537+
DecodedResults results;
538+
results.texts = {m_tokenizer.decode(generated)};
539+
540+
// TODO: implement performance metrics
541+
results.perf_metrics = ov::genai::PerfMetrics();
542+
results.perf_metrics.m_evaluated = false;
543+
results.perf_metrics.generate_duration = {0, 0};
544+
results.perf_metrics.inference_duration= {0, 0};
545+
results.perf_metrics.tokenization_duration = {0, 0};
546+
results.perf_metrics.detokenization_duration= {0, 0};
547+
results.perf_metrics.ttft = {0, 0};
548+
results.perf_metrics.tpot= {0, 0};
549+
results.perf_metrics.ipot= {0, 0};
550+
results.perf_metrics.throughput= {0, 0};
551+
results.perf_metrics.num_generated_tokens = generated.size();
552+
results.perf_metrics.num_input_tokens= 0;
553+
554+
return results;
538555
}
539556

540557
DecodedResults VLMPipeline::generate(

src/python/py_vlm_pipeline.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -137,9 +137,9 @@ void init_vlm_pipeline(py::module_& m) {
137137
tokenizer (openvino_genai.Tokenizer): tokenizer object.
138138
device (str): Device to run the model on (e.g., CPU, GPU). Default is 'CPU'.
139139
)")
140-
.def("start_chat", &ov::genai::VLMPipeline::start_chat)
140+
.def("start_chat", &ov::genai::VLMPipeline::start_chat, py::arg("system_message") = "")
141141
.def("finish_chat", &ov::genai::VLMPipeline::finish_chat)
142-
.def("get_generation_config", static_cast<const ov::genai::GenerationConfig& (ov::genai::VLMPipeline::*)() const>(&ov::genai::VLMPipeline::get_generation_config))
142+
.def("get_generation_config", &ov::genai::VLMPipeline::get_generation_config)
143143
.def(
144144
"generate",
145145
[](ov::genai::VLMPipeline& pipe,

0 commit comments

Comments
 (0)