|
| 1 | +import gradio as gr |
| 2 | +from transformers import TextIteratorStreamer |
| 3 | +from threading import Thread |
| 4 | +import re |
| 5 | +import time |
| 6 | +import requests |
| 7 | +from pathlib import Path |
| 8 | +from PIL import Image |
| 9 | + |
| 10 | + |
| 11 | +def download_examples(): |
| 12 | + example_images = { |
| 13 | + "weather.png": "https://github.com/user-attachments/assets/85af4410-6e46-484d-b13b-fd9260eb2b7c", |
| 14 | + "newyork.jpg": "https://github.com/user-attachments/assets/c530b689-2ff6-4c4d-91bc-e6ac5331df59", |
| 15 | + "document.jpg": "https://github.com/user-attachments/assets/ac7225b6-bf90-4faf-b05f-bbba41a87142", |
| 16 | + "rococo.jpg": "https://github.com/user-attachments/assets/9e26e36e-f2be-4fa2-affd-891448abcc7d", |
| 17 | + "rococo_1.jpg": "https://github.com/user-attachments/assets/d39bdb95-833c-4ebd-8390-15a8fc2cd0b6", |
| 18 | + } |
| 19 | + for file_name, url in example_images.items(): |
| 20 | + if not Path(file_name).exists(): |
| 21 | + Image.open(requests.get(url, stream=True).raw).save(file_name) |
| 22 | + |
| 23 | + |
| 24 | +def make_demo(model, processor): |
| 25 | + download_examples() |
| 26 | + |
| 27 | + def model_inference(input_dict, history, max_tokens): |
| 28 | + resulting_messages = [] |
| 29 | + user_content = [] |
| 30 | + media_queue = [] |
| 31 | + for hist in history: |
| 32 | + if hist["role"] == "user" and isinstance(hist["content"], tuple): |
| 33 | + file_name = hist["content"][0] |
| 34 | + if file_name.endswith((".png", ".jpg", ".jpeg")): |
| 35 | + media_queue.append({"type": "image", "path": file_name}) |
| 36 | + elif file_name.endswith(".mp4"): |
| 37 | + media_queue.append({"type": "video", "path": file_name}) |
| 38 | + |
| 39 | + for hist in history: |
| 40 | + if hist["role"] == "user" and isinstance(hist["content"], str): |
| 41 | + text = hist["content"] |
| 42 | + parts = re.split(r"(<image>|<video>)", text) |
| 43 | + |
| 44 | + for part in parts: |
| 45 | + if part == "<image>" and media_queue: |
| 46 | + user_content.append(media_queue.pop(0)) |
| 47 | + elif part == "<video>" and media_queue: |
| 48 | + user_content.append(media_queue.pop(0)) |
| 49 | + elif part.strip(): |
| 50 | + user_content.append({"type": "text", "text": part.strip()}) |
| 51 | + |
| 52 | + elif hist["role"] == "assistant": |
| 53 | + resulting_messages.append({"role": "user", "content": user_content}) |
| 54 | + resulting_messages.append({"role": "assistant", "content": [{"type": "text", "text": hist["content"]}]}) |
| 55 | + user_content = [] |
| 56 | + |
| 57 | + text = input_dict["text"] |
| 58 | + c_user_content = [] |
| 59 | + c_media_queue = [] |
| 60 | + text = input_dict["text"].strip() |
| 61 | + for file in input_dict.get("files", []): |
| 62 | + if file.endswith((".png", ".jpg", ".jpeg", ".gif", ".bmp")): |
| 63 | + c_media_queue.append({"type": "image", "path": file}) |
| 64 | + elif file.endswith((".mp4", ".mov", ".avi", ".mkv", ".flv")): |
| 65 | + c_media_queue.append({"type": "video", "path": file}) |
| 66 | + |
| 67 | + if "<image>" in text or "<video>" in text: |
| 68 | + parts = re.split(r"(<image>|<video>)", text) |
| 69 | + for part in parts: |
| 70 | + if part == "<image>" and c_media_queue: |
| 71 | + c_user_content.append(c_media_queue.pop(0)) |
| 72 | + elif part == "<video>" and c_media_queue: |
| 73 | + c_user_content.append(c_media_queue.pop(0)) |
| 74 | + elif part.strip(): |
| 75 | + c_user_content.append({"type": "text", "text": part.strip()}) |
| 76 | + else: |
| 77 | + c_user_content.append({"type": "text", "text": text}) |
| 78 | + |
| 79 | + for media in c_media_queue: |
| 80 | + c_user_content.append(media) |
| 81 | + |
| 82 | + current_message = {"role": "user", "content": c_user_content} |
| 83 | + |
| 84 | + if text == "": |
| 85 | + gr.Error("Please input a query and optionally image(s).") |
| 86 | + resulting_messages.append(current_message) |
| 87 | + print("resulting_messages", resulting_messages) |
| 88 | + inputs = processor.apply_chat_template( |
| 89 | + resulting_messages, |
| 90 | + add_generation_prompt=True, |
| 91 | + tokenize=True, |
| 92 | + return_dict=True, |
| 93 | + return_tensors="pt", |
| 94 | + ) |
| 95 | + |
| 96 | + # Generate |
| 97 | + streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True) |
| 98 | + generation_args = dict(inputs, streamer=streamer, max_new_tokens=max_tokens) |
| 99 | + generated_text = "" |
| 100 | + |
| 101 | + thread = Thread(target=model.generate, kwargs=generation_args) |
| 102 | + thread.start() |
| 103 | + |
| 104 | + yield "..." |
| 105 | + buffer = "" |
| 106 | + |
| 107 | + for new_text in streamer: |
| 108 | + buffer += new_text |
| 109 | + generated_text_without_prompt = buffer |
| 110 | + time.sleep(0.01) |
| 111 | + yield buffer |
| 112 | + |
| 113 | + examples = [ |
| 114 | + [{"text": "Where do the severe droughts happen according to this diagram?", "files": ["weather.png"]}], |
| 115 | + [{"text": "What art era this artpiece <image> and this artpiece <image> belong to?", "files": ["rococo.jpg", "rococo_1.jpg"]}], |
| 116 | + [{"text": "Describe this image.", "files": ["newyork.jpg"]}], |
| 117 | + [{"text": "What is the date in this document?", "files": ["document.jpg"]}], |
| 118 | + [{"text": "What is happening in the video?", "files": ["dog.mp4"]}], |
| 119 | + ] |
| 120 | + demo = gr.ChatInterface( |
| 121 | + fn=model_inference, |
| 122 | + title="SmolVLM2: The Smollest Video Model Ever 📺", |
| 123 | + description="Play with SmolVLM2 and OpenVINO in this demo. To get started, upload an image and text or try one of the examples.", |
| 124 | + examples=examples, |
| 125 | + textbox=gr.MultimodalTextbox(label="Query Input", file_types=["image", ".mp4"], file_count="multiple"), |
| 126 | + stop_btn="Stop Generation", |
| 127 | + multimodal=True, |
| 128 | + cache_examples=False, |
| 129 | + additional_inputs=[gr.Slider(minimum=100, maximum=500, step=50, value=200, label="Max Tokens")], |
| 130 | + type="messages", |
| 131 | + ) |
| 132 | + |
| 133 | + return demo |
0 commit comments