diff --git a/samples/c/text_generation/chat_sample_c.c b/samples/c/text_generation/chat_sample_c.c index 3c16ac63de..86eb4369e5 100644 --- a/samples/c/text_generation/chat_sample_c.c +++ b/samples/c/text_generation/chat_sample_c.c @@ -8,17 +8,16 @@ #include "openvino/genai/c/llm_pipeline.h" #define MAX_PROMPT_LENGTH 64 -#define MAX_OUTPUT_LENGTH 1024 #define CHECK_STATUS(return_status) \ if (return_status != OK) { \ fprintf(stderr, "[ERROR] return status %d, line %d\n", return_status, __LINE__); \ goto err; \ } - -ov_genai_streamming_status_e print_callback(const char* args) { - if (args) { - fprintf(stdout, "%s", args); +ov_genai_streamming_status_e print_callback(const char* str, void* args) { + if (str) { + // If args is not null, it needs to be cast to its actual type. + fprintf(stdout, "%s", str); fflush(stdout); return OV_GENAI_STREAMMING_STATUS_RUNNING; } else { @@ -37,7 +36,8 @@ int main(int argc, char* argv[]) { ov_genai_generation_config* config = NULL; ov_genai_llm_pipeline* pipeline = NULL; - stream_callback streamer = &print_callback; + streamer_callback streamer; + streamer.callback_func = print_callback; char prompt[MAX_PROMPT_LENGTH]; CHECK_STATUS(ov_genai_llm_pipeline_create(models_path, device, &pipeline)); diff --git a/src/c/include/openvino/genai/c/llm_pipeline.h b/src/c/include/openvino/genai/c/llm_pipeline.h index ed64692b3c..01ecc5068b 100644 --- a/src/c/include/openvino/genai/c/llm_pipeline.h +++ b/src/c/include/openvino/genai/c/llm_pipeline.h @@ -96,9 +96,17 @@ typedef enum { } ov_genai_streamming_status_e; /** - * @brief Callback function for streaming output. + * @brief Structure for streamer callback functions with arguments. + * + * The callback function takes two parameters: + * - `const char* str`: A constant string extracted from the decoded result for processing + * - `void* args`: A pointer to additional arguments, allowing flexible data passing. */ -typedef ov_genai_streamming_status_e(OPENVINO_C_API_CALLBACK* stream_callback)(const char*); +typedef struct { + ov_genai_streamming_status_e( + OPENVINO_C_API_CALLBACK* callback_func)(const char* str, void* args); //!< Pointer to the callback function + void* args; //!< Pointer to the arguments passed to the callback function +} streamer_callback; /** * @brief Generate results by ov_genai_llm_pipeline @@ -114,7 +122,7 @@ typedef ov_genai_streamming_status_e(OPENVINO_C_API_CALLBACK* stream_callback)(c OPENVINO_GENAI_C_EXPORTS ov_status_e ov_genai_llm_pipeline_generate(ov_genai_llm_pipeline* pipe, const char* inputs, const ov_genai_generation_config* config, - const stream_callback* streamer, + const streamer_callback* streamer, ov_genai_decoded_results** results); /** * @brief Start chat with keeping history in kv cache. diff --git a/src/c/src/llm_pipeline.cpp b/src/c/src/llm_pipeline.cpp index 8b0d6c1b28..a2779d8248 100644 --- a/src/c/src/llm_pipeline.cpp +++ b/src/c/src/llm_pipeline.cpp @@ -90,7 +90,7 @@ void ov_genai_llm_pipeline_free(ov_genai_llm_pipeline* pipe) { ov_status_e ov_genai_llm_pipeline_generate(ov_genai_llm_pipeline* pipe, const char* inputs, const ov_genai_generation_config* config, - const stream_callback* streamer, + const streamer_callback* streamer, ov_genai_decoded_results** results) { if (!pipe || !(pipe->object) || !inputs || !(streamer || results)) { return ov_status_e::INVALID_C_PARAM; @@ -102,7 +102,7 @@ ov_status_e ov_genai_llm_pipeline_generate(ov_genai_llm_pipeline* pipe, ov::genai::StringInputs input = {input_str}; if (streamer) { auto callback = [streamer](std::string word) -> ov::genai::StreamingStatus { - return static_cast((*streamer)(word.c_str())); + return static_cast((streamer->callback_func)(word.c_str(), streamer->args)); }; *(_results->object) = (config && config->object) ? pipe->object->generate(input, *(config->object), callback)