forked from openvinotoolkit/openvino.genai
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathchat_sample_c.c
67 lines (57 loc) · 2.43 KB
/
chat_sample_c.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
// Copyright (C) 2025 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "openvino/genai/c/llm_pipeline.h"
#define MAX_PROMPT_LENGTH 64
#define CHECK_STATUS(return_status) \
if (return_status != OK) { \
fprintf(stderr, "[ERROR] return status %d, line %d\n", return_status, __LINE__); \
goto err; \
}
ov_genai_streamming_status_e print_callback(const char* str, void* args) {
if (str) {
// If args is not null, it needs to be cast to its actual type.
fprintf(stdout, "%s", str);
fflush(stdout);
return OV_GENAI_STREAMMING_STATUS_RUNNING;
} else {
printf("Callback executed with NULL message!\n");
return OV_GENAI_STREAMMING_STATUS_STOP;
}
}
int main(int argc, char* argv[]) {
if (argc != 2) {
fprintf(stderr, "Usage: %s <MODEL_DIR>\n", argv[0]);
return EXIT_FAILURE;
}
const char* models_path = argv[1];
const char* device = "CPU"; // GPU, NPU can be used as well
ov_genai_generation_config* config = NULL;
ov_genai_llm_pipeline* pipeline = NULL;
streamer_callback streamer;
streamer.callback_func = print_callback;
char prompt[MAX_PROMPT_LENGTH];
CHECK_STATUS(ov_genai_llm_pipeline_create(models_path, device, &pipeline));
CHECK_STATUS(ov_genai_generation_config_create(&config));
CHECK_STATUS(ov_genai_generation_config_set_max_new_tokens(config, 100));
CHECK_STATUS(ov_genai_llm_pipeline_start_chat(pipeline));
printf("question:\n");
while (fgets(prompt, MAX_PROMPT_LENGTH, stdin)) {
prompt[strcspn(prompt, "\n")] = 0;
CHECK_STATUS(ov_genai_llm_pipeline_generate(pipeline,
prompt,
config,
&streamer,
NULL)); // Only the streamer functionality is used here.
printf("\n----------\nquestion:\n");
}
CHECK_STATUS(ov_genai_llm_pipeline_finish_chat(pipeline));
err:
if (pipeline)
ov_genai_llm_pipeline_free(pipeline);
if (config)
ov_genai_generation_config_free(config);
return EXIT_SUCCESS;
}