Samet Yilmaz commited on
Commit
216b96d
1 Parent(s): ab98940

Pixtral vLLM

Browse files
.idea/Pixal-Inference.iml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$" />
5
+ <orderEntry type="inheritedJdk" />
6
+ <orderEntry type="sourceFolder" forTests="false" />
7
+ </component>
8
+ </module>
.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/Pixal-Inference.iml" filepath="$PROJECT_DIR$/.idea/Pixal-Inference.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
.idea/workspace.xml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="PropertiesComponent">{}</component>
4
+ </project>
app.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from vllm import LLM, SamplingParams
2
+ import gradio as gr
3
+ repo_id = "mistral-community/pixtral-12b-240910" #Replace to the model you would like to use
4
+ sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
5
+
6
+ # @spaces.GPU #[uncomment to use ZeroGPU]
7
+ def infer(image_url, prompt, progress=gr.Progress(track_tqdm=True)):
8
+ # tokenize image urls and text
9
+ llm = LLM(model="mistralai/Pixtral-12B-2409") # Name or path of your model
10
+ messages = [
11
+ {
12
+ "role": "user",
13
+ "content": [{"type": "text", "text": prompt}, {"type": "image_url", "image_url": {"url": image_url}}]
14
+ },
15
+ ]
16
+
17
+ outputs = llm.chat(messages, sampling_params=sampling_params)
18
+
19
+ print(outputs[0].outputs[0].text)
20
+ return outputs
21
+
22
+
23
+ example_images = ["https://picsum.photos/id/237/200/300"]
24
+ example_prompts = ["What do you see in this image?"]
25
+
26
+ css = """
27
+ #col-container {
28
+ margin: 0 auto;
29
+ max-width: 640px;
30
+ }
31
+ """
32
+
33
+ with gr.Blocks(css=css) as demo:
34
+ with gr.Column(elem_id="col-container"):
35
+ gr.Markdown(f"""
36
+ # Text-to-Image Gradio Template
37
+ """)
38
+
39
+ with gr.Row():
40
+ prompt = gr.Text(
41
+ label="Prompt",
42
+ show_label=False,
43
+ max_lines=2,
44
+ placeholder="Enter your prompt",
45
+ container=False,
46
+ )
47
+
48
+ image_url = gr.Text(
49
+ label="Image URL",
50
+ show_label=False,
51
+ max_lines=1,
52
+ placeholder="Enter your image URL",
53
+ container=False,
54
+ )
55
+
56
+ run_button = gr.Button("Run", scale=0)
57
+
58
+ result = gr.Textbox(
59
+ show_label=False
60
+ )
61
+
62
+ gr.Examples(
63
+ examples=example_images,
64
+ inputs=[image_url]
65
+ )
66
+
67
+ gr.Examples(
68
+ examples=example_prompts,
69
+ inputs=[prompt]
70
+ )
71
+ gr.on(
72
+ triggers=[run_button.click, image_url.submit, prompt.submit],
73
+ fn=infer,
74
+ inputs=[image_url, prompt],
75
+ outputs=[result]
76
+ )
77
+
78
+ demo.queue().launch()
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ vllm==0.6.1