File size: 1,891 Bytes
65298d5
 
 
 
 
 
 
 
 
 
 
fdcaf03
65298d5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import streamlit as st
import subprocess
from PIL import Image
import requests
from io import BytesIO
from transformers import AutoModel, AutoTokenizer
import torch

torch.cuda.empty_cache()

# Load the model and tokenizer on CPU
model = AutoModel.from_pretrained('openbmb/MiniCPM-Llama3-V-2_5-int4', trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained('openbmb/MiniCPM-Llama3-V-2_5-int4', trust_remote_code=True)
model.eval()

# Title of the app
st.title("Streamlit App with Image URL and Prompts")

# Text area for image URL
image_url = st.text_area("Enter Image URL:")

# Text area for system prompt input
system_prompt = st.text_area("Enter System Prompt:")

# Text area for user prompt input
question = st.text_area("Enter User Prompt:")

# Button to submit and display the image
if st.button("Submit"):
    if image_url:
        try:
            subprocess.run(['wget', image_url, "-O", 'flowchart.png'])
            response = requests.get(image_url)
            img = Image.open(BytesIO(response.content))
            st.image(img, caption="Image from URL")
        except Exception as e:
            st.error(f"Error loading image. Please submit another image URL with a .png or .jpg extension: {e}")
    else:
        st.warning("Please enter an image URL.")

    # Model code
    if system_prompt and question:
        image = Image.open('flowchart.png').convert('RGB')
        msgs = [{'role': 'user', 'content': question}]

        res = model.chat(
            image=image,
            msgs=msgs,
            tokenizer=tokenizer,
            sampling=True,  # if sampling=False, beam_search will be used by default
            temperature=0.7,
            system_prompt=system_prompt  # pass system_prompt if needed
        )

        st.text_area("Output:", value=res, height=200)
    else:
        st.warning("Please enter both system prompt and user prompt.")