#!/usr/bin/env python # encoding: utf-8 import spaces import gradio as gr from PIL import Image import traceback import re import torch import argparse import numpy as np from transformers import AutoModel, AutoTokenizer # README, How to run demo on different devices # For Nvidia GPUs. # python web_demo_2.5.py --device cuda # For Mac with MPS (Apple silicon or AMD GPUs). # PYTORCH_ENABLE_MPS_FALLBACK=1 python web_demo_2.5.py --device mps # Argparser # test.py import torch from PIL import Image from transformers import AutoModel, AutoTokenizer import bitsandbytes as bnb import accelerate model = AutoModel.from_pretrained('openbmb/MiniCPM-Llama3-V-2_5-int4', trust_remote_code=True) tokenizer = AutoTokenizer.from_pretrained('openbmb/MiniCPM-Llama3-V-2_5-int4', trust_remote_code=True) model.eval() image = Image.open('xx.jpg').convert('RGB') question = 'What is in the image?' msgs = [{'role': 'user', 'content': question}] res = model.chat( image=image, msgs=msgs, tokenizer=tokenizer, sampling=True, # if sampling=False, beam_search will be used by default temperature=0.7, # system_prompt='' # pass system_prompt if needed ) print(res) ## if you want to use streaming, please make sure sampling=True and stream=True ## the model.chat will return a generator res = model.chat( image=image, msgs=msgs, tokenizer=tokenizer, sampling=True, temperature=0.7, stream=True ) generated_text = "" for new_text in res: generated_text += new_text print(new_text, flush=True, end='')