bensheng commited on
Commit
5bf3ded
1 Parent(s): 282742c

use minicpm llama3 model

Browse files
Files changed (1) hide show
  1. app.py +28 -26
app.py CHANGED
@@ -1,14 +1,17 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
 
9
 
10
  def respond(
11
  message,
 
12
  history: list[tuple[str, str]],
13
  system_message,
14
  max_tokens,
@@ -16,48 +19,47 @@ def respond(
16
  top_p,
17
  ):
18
  messages = [{"role": "system", "content": system_message}]
19
-
20
  for val in history:
21
  if val[0]:
22
  messages.append({"role": "user", "content": val[0]})
23
  if val[1]:
24
  messages.append({"role": "assistant", "content": val[1]})
 
 
 
 
 
 
25
 
26
  messages.append({"role": "user", "content": message})
27
-
28
  response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
  stream=True,
34
  temperature=temperature,
35
  top_p=top_p,
36
  ):
37
- token = message.choices[0].delta.content
38
-
39
  response += token
40
  yield response
41
 
42
- """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
45
- demo = gr.ChatInterface(
46
  respond,
47
- additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
 
 
 
49
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
- gr.Slider(
52
- minimum=0.1,
53
- maximum=1.0,
54
- value=0.95,
55
- step=0.05,
56
- label="Top-p (nucleus sampling)",
57
- ),
58
  ],
 
 
 
59
  )
60
 
61
-
62
  if __name__ == "__main__":
63
  demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ import base64
4
 
5
+ # 更新为 MiniCPM-Llama3-V-2_5 模型
6
+ client = InferenceClient("openbmb/MiniCPM-Llama3-V-2_5")
 
 
7
 
8
+ def encode_image(image_path):
9
+ with open(image_path, "rb") as image_file:
10
+ return base64.b64encode(image_file.read()).decode('utf-8')
11
 
12
  def respond(
13
  message,
14
+ image,
15
  history: list[tuple[str, str]],
16
  system_message,
17
  max_tokens,
 
19
  top_p,
20
  ):
21
  messages = [{"role": "system", "content": system_message}]
 
22
  for val in history:
23
  if val[0]:
24
  messages.append({"role": "user", "content": val[0]})
25
  if val[1]:
26
  messages.append({"role": "assistant", "content": val[1]})
27
+
28
+ # 处理图片输入
29
+ if image:
30
+ base64_image = encode_image(image.name)
31
+ image_message = f""
32
+ message = image_message + "\n" + message
33
 
34
  messages.append({"role": "user", "content": message})
35
+
36
  response = ""
37
+ for message in client.text_generation(
38
+ prompt=f"{messages}",
39
+ max_new_tokens=max_tokens,
 
40
  stream=True,
41
  temperature=temperature,
42
  top_p=top_p,
43
  ):
44
+ token = message.token.text
 
45
  response += token
46
  yield response
47
 
48
+ demo = gr.Interface(
 
 
 
49
  respond,
50
+ inputs=[
51
+ gr.Textbox(label="Message"),
52
+ gr.Image(type="filepath", label="Upload Image"),
53
+ gr.State([]), # for history
54
+ gr.Textbox(value="You are a friendly AI assistant capable of understanding images and text.", label="System message"),
55
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
56
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
57
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
 
 
 
 
 
 
58
  ],
59
+ outputs=gr.Textbox(label="Response"),
60
+ title="MiniCPM-Llama3-V-2_5 Image and Text Chat",
61
+ description="Upload an image and ask questions about it, or just chat without an image."
62
  )
63
 
 
64
  if __name__ == "__main__":
65
  demo.launch()