AnishHF commited on
Commit
51ff4f7
1 Parent(s): 68edcc7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -9,8 +9,8 @@ quantization_config = QuantoConfig(
9
  weights = "int4"
10
  )
11
 
12
- model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1", quantization_config=quantization_config, device_map="auto", token=access_token)
13
- tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1", token = access_token)
14
 
15
  # Function to generate text using the model
16
  def generate_text(prompt):
@@ -24,7 +24,7 @@ def generate_text(prompt):
24
  iface = gr.Interface(
25
  fn=generate_text,
26
  inputs=[
27
- gr.inputs.Textbox(lines=5, label="Input Prompt"),
28
  ],
29
  outputs=gr.outputs.Textbox(label="Generated Text"),
30
  title="MisTRAL Text Generation",
 
9
  weights = "int4"
10
  )
11
 
12
+ tokenizer = AutoTokenizer.from_pretrained("ProbeMedicalYonseiMAILab/medllama3-v20")
13
+ model = AutoModelForCausalLM.from_pretrained("ProbeMedicalYonseiMAILab/medllama3-v20")
14
 
15
  # Function to generate text using the model
16
  def generate_text(prompt):
 
24
  iface = gr.Interface(
25
  fn=generate_text,
26
  inputs=[
27
+ gr.Textbox(lines=5, label="Input Prompt"),
28
  ],
29
  outputs=gr.outputs.Textbox(label="Generated Text"),
30
  title="MisTRAL Text Generation",