AnishHF commited on
Commit
98bcd58
1 Parent(s): 7e27808

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -34
app.py CHANGED
@@ -1,35 +1,7 @@
1
- import os
2
- import torch
3
- import gradio as gr
4
- from transformers import AutoTokenizer, AutoModelForCausalLM, QuantoConfig
5
 
6
- access_token = os.environ["GATED_ACCESS_TOKEN"]
7
-
8
- quantization_config = QuantoConfig(
9
- weights = "int4"
10
- )
11
-
12
- tokenizer = AutoTokenizer.from_pretrained("ProbeMedicalYonseiMAILab/medllama3-v20", quantization_config=quantization_config, device_map="auto")
13
- model = AutoModelForCausalLM.from_pretrained("ProbeMedicalYonseiMAILab/medllama3-v20")
14
-
15
- # Function to generate text using the model
16
- def generate_text(prompt):
17
- text = prompt
18
- inputs = tokenizer(text, return_tensors="pt")
19
-
20
- outputs = model.generate(**inputs, max_new_tokens=512)
21
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
22
-
23
- # Create the Gradio interface
24
- iface = gr.Interface(
25
- fn=generate_text,
26
- inputs=[
27
- gr.Textbox(lines=5, label="Input Prompt"),
28
- ],
29
- outputs=gr.outputs.Textbox(label="Generated Text"),
30
- title="MisTRAL Text Generation",
31
- description="Use this interface to generate text using the MisTRAL language model.",
32
- )
33
-
34
- # Launch the Gradio interface
35
- iface.launch()
 
1
+ from transformers import pipeline
 
 
 
2
 
3
+ messages = [
4
+ {"role": "user", "content": "Who are you?"},
5
+ ]
6
+ pipe = pipeline("text-generation", model="bongbongs/NewMes-v15")
7
+ pipe(messages)