Muratbeser commited on
Commit
4f7d679
1 Parent(s): 639ac95

Added multiple image upload functionality to the Qwen2.5-Math-Demo

Browse files
Files changed (1) hide show
  1. app.py +51 -92
app.py CHANGED
@@ -1,7 +1,6 @@
 
1
  import gradio as gr
2
  import os
3
-
4
- os.system('pip install dashscope -U')
5
  import tempfile
6
  from pathlib import Path
7
  import secrets
@@ -9,31 +8,29 @@ import dashscope
9
  from dashscope import MultiModalConversation, Generation
10
  from PIL import Image
11
 
12
-
13
- # 设置API密钥
14
  YOUR_API_TOKEN = os.getenv('YOUR_API_TOKEN')
15
  dashscope.api_key = YOUR_API_TOKEN
 
 
16
  math_messages = []
 
 
17
  def process_image(image, shouldConvert=False):
18
- # 获取上传文件的目录
19
- global math_messages
20
- math_messages = [] # reset when upload image
21
  uploaded_file_dir = os.environ.get("GRADIO_TEMP_DIR") or str(
22
  Path(tempfile.gettempdir()) / "gradio"
23
  )
24
  os.makedirs(uploaded_file_dir, exist_ok=True)
25
 
26
- # 创建临时文件路径
27
  name = f"tmp{secrets.token_hex(20)}.jpg"
28
  filename = os.path.join(uploaded_file_dir, name)
29
- # 保存上传的图片
30
  if shouldConvert:
31
  new_img = Image.new('RGB', size=(image.width, image.height), color=(255, 255, 255))
32
  new_img.paste(image, (0, 0), mask=image)
33
  image = new_img
34
  image.save(filename)
35
 
36
- # 调用qwen-vl-max-0809模型处理图片
37
  messages = [{
38
  'role': 'system',
39
  'content': [{'text': 'You are a helpful assistant.'}]
@@ -47,54 +44,53 @@ def process_image(image, shouldConvert=False):
47
 
48
  response = MultiModalConversation.call(model='qwen-vl-max-0809', messages=messages)
49
 
50
- # 清理临时文件
51
  os.remove(filename)
52
 
53
  return response.output.choices[0]["message"]["content"]
54
 
55
- def get_math_response(image_description, user_question):
56
  global math_messages
57
  if not math_messages:
58
  math_messages.append({'role': 'system', 'content': 'You are a helpful math assistant.'})
59
- math_messages = math_messages[:1]
60
- if image_description is not None:
61
- content = f'Image description: {image_description}\n\n'
62
- else:
63
- content = ''
64
- query = f"{content}User question: {user_question}"
65
- math_messages.append({'role': 'user', 'content': query})
66
  response = Generation.call(
67
  model="qwen2.5-math-72b-instruct",
68
  messages=math_messages,
69
  result_format='message',
70
  stream=True
71
  )
72
- answer = None
73
  for resp in response:
74
  if resp.output is None:
75
  continue
76
  answer = resp.output.choices[0].message.content
77
  yield answer.replace("\\", "\\\\")
78
- print(f'query: {query}\nanswer: {answer}')
79
- if answer is None:
80
- math_messages.pop()
81
- else:
82
- math_messages.append({'role': 'assistant', 'content': answer})
83
-
84
 
85
- def math_chat_bot(image, sketchpad, question, state):
86
- current_tab_index = state["tab_index"]
87
- image_description = None
88
- # Upload
89
- if current_tab_index == 0:
90
- if image is not None:
91
- image_description = process_image(image)
92
- # Sketch
93
- elif current_tab_index == 1:
94
- print(sketchpad)
95
- if sketchpad and sketchpad["composite"]:
96
- image_description = process_image(sketchpad["composite"], True)
97
- yield from get_math_response(image_description, question)
 
 
 
 
 
 
98
 
99
  css = """
100
  #qwen-md .katex-display { display: inline; }
@@ -102,11 +98,7 @@ css = """
102
  #qwen-md .katex-display>.katex>.katex-html { display: inline; }
103
  """
104
 
105
- def tabs_select(e: gr.SelectData, _state):
106
- _state["tab_index"] = e.index
107
-
108
-
109
- # 创建Gradio接口
110
  with gr.Blocks(css=css) as demo:
111
  gr.HTML("""\
112
  <p align="center"><img src="https://modelscope.oss-cn-beijing.aliyuncs.com/resource/qwen.png" style="height: 60px"/><p>"""
@@ -114,56 +106,23 @@ with gr.Blocks(css=css) as demo:
114
  """\
115
  <center><font size=3>This WebUI is based on Qwen2-VL for OCR and Qwen2.5-Math for mathematical reasoning. You can input either images or texts of mathematical or arithmetic problems.</center>"""
116
  )
117
- state = gr.State({"tab_index": 0})
118
  with gr.Row():
119
  with gr.Column():
120
- with gr.Tabs() as input_tabs:
121
- with gr.Tab("Upload"):
122
- input_image = gr.Image(type="pil", label="Upload"),
123
- with gr.Tab("Sketch"):
124
- input_sketchpad = gr.Sketchpad(type="pil", label="Sketch", layers=False)
125
- input_tabs.select(fn=tabs_select, inputs=[state])
126
- input_text = gr.Textbox(label="input your question")
127
  with gr.Row():
128
- with gr.Column():
129
- clear_btn = gr.ClearButton(
130
- [*input_image, input_sketchpad, input_text])
131
- with gr.Column():
132
- submit_btn = gr.Button("Submit", variant="primary")
133
  with gr.Column():
134
- output_md = gr.Markdown(label="answer",
135
- latex_delimiters=[{
136
- "left": "\\(",
137
- "right": "\\)",
138
- "display": True
139
- }, {
140
- "left": "\\begin\{equation\}",
141
- "right": "\\end\{equation\}",
142
- "display": True
143
- }, {
144
- "left": "\\begin\{align\}",
145
- "right": "\\end\{align\}",
146
- "display": True
147
- }, {
148
- "left": "\\begin\{alignat\}",
149
- "right": "\\end\{alignat\}",
150
- "display": True
151
- }, {
152
- "left": "\\begin\{gather\}",
153
- "right": "\\end\{gather\}",
154
- "display": True
155
- }, {
156
- "left": "\\begin\{CD\}",
157
- "right": "\\end\{CD\}",
158
- "display": True
159
- }, {
160
- "left": "\\[",
161
- "right": "\\]",
162
- "display": True
163
- }],
164
- elem_id="qwen-md")
165
- submit_btn.click(
166
- fn=math_chat_bot,
167
- inputs=[*input_image, input_sketchpad, input_text, state],
168
- outputs=output_md)
169
  demo.launch()
 
1
+ !pip install --upgrade gradio
2
  import gradio as gr
3
  import os
 
 
4
  import tempfile
5
  from pathlib import Path
6
  import secrets
 
8
  from dashscope import MultiModalConversation, Generation
9
  from PIL import Image
10
 
11
+ # API key setup
 
12
  YOUR_API_TOKEN = os.getenv('YOUR_API_TOKEN')
13
  dashscope.api_key = YOUR_API_TOKEN
14
+
15
+ # Global variables
16
  math_messages = []
17
+ image_descriptions = []
18
+
19
  def process_image(image, shouldConvert=False):
 
 
 
20
  uploaded_file_dir = os.environ.get("GRADIO_TEMP_DIR") or str(
21
  Path(tempfile.gettempdir()) / "gradio"
22
  )
23
  os.makedirs(uploaded_file_dir, exist_ok=True)
24
 
 
25
  name = f"tmp{secrets.token_hex(20)}.jpg"
26
  filename = os.path.join(uploaded_file_dir, name)
27
+
28
  if shouldConvert:
29
  new_img = Image.new('RGB', size=(image.width, image.height), color=(255, 255, 255))
30
  new_img.paste(image, (0, 0), mask=image)
31
  image = new_img
32
  image.save(filename)
33
 
 
34
  messages = [{
35
  'role': 'system',
36
  'content': [{'text': 'You are a helpful assistant.'}]
 
44
 
45
  response = MultiModalConversation.call(model='qwen-vl-max-0809', messages=messages)
46
 
 
47
  os.remove(filename)
48
 
49
  return response.output.choices[0]["message"]["content"]
50
 
51
+ def get_math_response(image_descriptions, user_question):
52
  global math_messages
53
  if not math_messages:
54
  math_messages.append({'role': 'system', 'content': 'You are a helpful math assistant.'})
55
+
56
+ content = "Image descriptions:\n" + "\n".join(image_descriptions) if image_descriptions else ""
57
+ content += f"\n\nUser question: {user_question}"
58
+
59
+ math_messages.append({'role': 'user', 'content': content})
 
 
60
  response = Generation.call(
61
  model="qwen2.5-math-72b-instruct",
62
  messages=math_messages,
63
  result_format='message',
64
  stream=True
65
  )
66
+ answer = ""
67
  for resp in response:
68
  if resp.output is None:
69
  continue
70
  answer = resp.output.choices[0].message.content
71
  yield answer.replace("\\", "\\\\")
72
+
73
+ math_messages.append({'role': 'assistant', 'content': answer})
 
 
 
 
74
 
75
+ def math_chat_bot(images, sketchpad, question, chat_history):
76
+ global image_descriptions
77
+
78
+ # Process new images
79
+ for image in images:
80
+ if image:
81
+ description = process_image(image)
82
+ image_descriptions.append(description)
83
+
84
+ # Process sketchpad if present
85
+ if sketchpad and sketchpad["composite"]:
86
+ sketch_description = process_image(sketchpad["composite"], True)
87
+ image_descriptions.append(sketch_description)
88
+
89
+ # Generate response
90
+ response = ""
91
+ for chunk in get_math_response(image_descriptions, question):
92
+ response += chunk
93
+ yield chat_history + [(question, response)]
94
 
95
  css = """
96
  #qwen-md .katex-display { display: inline; }
 
98
  #qwen-md .katex-display>.katex>.katex-html { display: inline; }
99
  """
100
 
101
+ # Create Gradio interface
 
 
 
 
102
  with gr.Blocks(css=css) as demo:
103
  gr.HTML("""\
104
  <p align="center"><img src="https://modelscope.oss-cn-beijing.aliyuncs.com/resource/qwen.png" style="height: 60px"/><p>"""
 
106
  """\
107
  <center><font size=3>This WebUI is based on Qwen2-VL for OCR and Qwen2.5-Math for mathematical reasoning. You can input either images or texts of mathematical or arithmetic problems.</center>"""
108
  )
109
+
110
  with gr.Row():
111
  with gr.Column():
112
+ input_images = gr.File(file_count="multiple", label="Upload Images", type="file")
113
+ input_sketchpad = gr.Sketchpad(type="pil", label="Sketch", layers=False)
114
+ input_text = gr.Textbox(label="Input your question")
 
 
 
 
115
  with gr.Row():
116
+ clear_btn = gr.ClearButton([input_images, input_sketchpad, input_text])
117
+ submit_btn = gr.Button("Submit", variant="primary")
118
+
 
 
119
  with gr.Column():
120
+ chat_output = gr.Chatbot(label="Chat History", elem_id="qwen-md")
121
+
122
+ submit_btn.click(
123
+ fn=math_chat_bot,
124
+ inputs=[input_images, input_sketchpad, input_text, chat_output],
125
+ outputs=chat_output
126
+ )
127
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
  demo.launch()