Skip to content

Commit 1b9a14c

Browse files
committed
Fixed conflicts.
2 parents ad53a80 + c67ddd5 commit 1b9a14c

File tree

4 files changed

+21
-2
lines changed

4 files changed

+21
-2
lines changed

.github/workflows/causal_lm_cpp.yml

+18
Original file line numberDiff line numberDiff line change
@@ -682,7 +682,11 @@ jobs:
682682
diff pred2.txt ref.txt
683683
echo "Chat sample python" passed
684684
685+
<<<<<<< HEAD
685686
visual_chat_sample-ubuntu:
687+
=======
688+
py-vlm_chat_sample-ubuntu:
689+
>>>>>>> c67ddd58652c6ac5aab1b3c9fff0036fb9f4a692
686690
runs-on: ubuntu-22.04-16-cores
687691
defaults:
688692
run:
@@ -703,19 +707,33 @@ jobs:
703707
run: |
704708
source ./ov/setupvars.sh
705709
cmake -DCMAKE_BUILD_TYPE=Release -S ./ -B ./build/
710+
<<<<<<< HEAD
706711
cmake --build ./build/ --config Release --target visual_language_chat -j
712+
=======
713+
cmake --build ./build/ --config Release -j
714+
>>>>>>> c67ddd58652c6ac5aab1b3c9fff0036fb9f4a692
707715
- name: Download and convert and model
708716
run: |
709717
source ./ov/setupvars.sh
710718
python -m pip install --upgrade-strategy eager -r ./samples/requirements.txt --pre --extra-index-url https://storage.openvinotoolkit.org/simple/wheels/nightly
711719
python -m pip install ./thirdparty/openvino_tokenizers/[transformers] --pre --extra-index-url https://storage.openvinotoolkit.org/simple/wheels/nightly
712720
python ./samples/cpp/visual_language_chat/export_MiniCPM-V-2_6.py ./miniCPM-V-2_6/
713721
wget https://github.com/openvinotoolkit/openvino_notebooks/assets/29454499/d5fbbd1a-d484-415c-88cb-9986625b7b11
722+
<<<<<<< HEAD
714723

715724
- name: Run chat sample
716725
source ./ov/setupvars.sh
717726
printf 'What is on the image?\nWhat is special on the image?\n' > ./input.txt
718727
timeout 120s python ./samples/python/vlm_chat_sample/vlm_chat_sample.py ./miniCPM-V-2_6/ d5fbbd1a-d484-415c-88cb-9986625b7b11 < input.txt > ./pred.txt || ( [[ $? -eq 124 ]] && \
728+
=======
729+
730+
- name: Run chat sample
731+
run: |
732+
source ./ov/setupvars.sh
733+
export PYTHONPATH=./build/:$PYTHONPATH
734+
printf 'What is on the image?\n' > ./input.txt
735+
timeout 300s python ./samples/python/vlm_chat_sample/vlm_chat_sample.py ./miniCPM-V-2_6/ d5fbbd1a-d484-415c-88cb-9986625b7b11 < input.txt > ./pred.txt || ( [[ $? -eq 124 ]] && \
736+
>>>>>>> c67ddd58652c6ac5aab1b3c9fff0036fb9f4a692
719737
echo "Timeout reached, but it's excpected." )
720738

721739
cpp-continuous-batching-ubuntu:

samples/requirements.txt

+1
Original file line numberDiff line numberDiff line change
@@ -5,3 +5,4 @@ transformers_stream_generator==0.0.5 # For Qwen
55
diffusers==0.30.3
66
pillow
77
torchvision # needed for mini-CPM export script. Need to remove when we switch to exporting with optimum-intel.
8+

src/python/openvino_genai/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
EncodedResults,
1818
GenerationConfig,
1919
GenerationResult,
20-
LLMPipeline,
20+
LLMPipeline,
2121
VLMPipeline,
2222
PerfMetrics,
2323
RawPerfMetrics,

src/python/py_vlm_pipeline.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ void init_vlm_pipeline(py::module_& m) {
109109
ScopedVar env_manager(utils::ov_tokenizers_module_path());
110110
return std::make_unique<ov::genai::VLMPipeline>(model_path, device, utils::properties_to_any_map(config));
111111
}),
112-
py::arg("model_path"), "folder with exported model files",
112+
py::arg("model_path"), "folder with exported model files",
113113
py::arg("device") = "CPU", "device on which inference will be done",
114114
py::arg("config") = ov::AnyMap({}), "openvino.properties map"
115115
R"(

0 commit comments

Comments
 (0)