3
3
4
4
#include " load_image.hpp"
5
5
#include < openvino/genai/visual_language/pipeline.hpp>
6
+ #include < filesystem>
6
7
#include < openvino/runtime/intel_gpu/properties.hpp>
7
8
8
9
bool print_subword (std::string&& subword) {
@@ -11,9 +12,14 @@ bool print_subword(std::string&& subword) {
11
12
12
13
int main (int argc, char * argv[]) try {
13
14
if (3 != argc) {
14
- throw std::runtime_error (std::string{" Usage " } + argv[0 ] + " <MODEL_DIR> <IMAGE_FILE>" );
15
+ throw std::runtime_error (std::string{" Usage " } + argv[0 ] + " <MODEL_DIR> <IMAGE_FILE OR DIR_WITH_IMAGES >" );
15
16
}
16
- ov::Tensor image = utils::load_image (argv[2 ]);
17
+
18
+ std::vector<ov::Tensor> images = utils::load_images (argv[2 ]);
19
+
20
+ ov::genai::GenerationConfig generation_config;
21
+ generation_config.max_new_tokens = 200 ;
22
+
17
23
std::string device = " CPU" ; // GPU can be used as well
18
24
ov::AnyMap enable_compile_cache;
19
25
if (" GPU" == device) {
@@ -26,16 +32,18 @@ int main(int argc, char* argv[]) try {
26
32
27
33
pipe .start_chat ();
28
34
std::cout << " question:\n " ;
35
+
29
36
std::getline (std::cin, prompt);
30
- pipe .generate (
31
- prompt,
32
- ov::genai::image (image),
33
- ov::genai::streamer (print_subword)
34
- );
37
+ pipe .generate (prompt,
38
+ ov::genai::images (images),
39
+ ov::genai::generation_config (generation_config),
40
+ ov::genai::streamer (print_subword));
35
41
std::cout << " \n ----------\n "
36
42
" question:\n " ;
37
43
while (std::getline (std::cin, prompt)) {
38
- pipe .generate (prompt, ov::genai::streamer (print_subword));
44
+ pipe .generate (prompt,
45
+ ov::genai::generation_config (generation_config),
46
+ ov::genai::streamer (print_subword));
39
47
std::cout << " \n ----------\n "
40
48
" question:\n " ;
41
49
}
0 commit comments