O S I H commited on
Commit
0a492a0
1 Parent(s): 0dfe54d

add more paremters to the chat generation

Browse files
Files changed (1) hide show
  1. app.py +17 -8
app.py CHANGED
@@ -18,9 +18,6 @@ subprocess.run(
18
  )
19
 
20
 
21
- token = os.environ["HF_TOKEN"]
22
-
23
-
24
  model = AutoModelForCausalLM.from_pretrained(
25
  "microsoft/Phi-3-small-8k-instruct",
26
  torch_dtype="auto",
@@ -43,8 +40,10 @@ model = model.to(device)
43
 
44
 
45
  @spaces.GPU(duration=60)
46
- def chat(message, history, temperature, do_sample, max_tokens):
47
- chat = []
 
 
48
  for item in history:
49
  chat.append({"role": "user", "content": item[0]})
50
  if item[1] is not None:
@@ -62,6 +61,9 @@ def chat(message, history, temperature, do_sample, max_tokens):
62
  do_sample=True,
63
  temperature=temperature,
64
  eos_token_id=terminators,
 
 
 
65
  )
66
 
67
  if temperature == 0:
@@ -80,12 +82,16 @@ def chat(message, history, temperature, do_sample, max_tokens):
80
 
81
  demo = gr.ChatInterface(
82
  fn=chat,
83
- examples=[["Write me a poem about Machine Learning."]],
84
- # multimodal=False,
 
 
 
85
  additional_inputs_accordion=gr.Accordion(
86
  label="⚙️ Parameters", open=False, render=False
87
  ),
88
  additional_inputs=[
 
89
  gr.Slider(
90
  minimum=0, maximum=1, step=0.1, value=0.9, label="Temperature", render=False
91
  ),
@@ -98,9 +104,12 @@ demo = gr.ChatInterface(
98
  label="Max new tokens",
99
  render=False,
100
  ),
 
 
 
101
  ],
102
  stop_btn="Stop Generation",
103
- title="Chat With Phi-3-Small-8k-8b-Instruct",
104
  description="[microsoft/Phi-3-small-8k-instruct](https://huggingface.co/microsoft/Phi-3-small-8k-instruct)",
105
  )
106
  demo.launch()
 
18
  )
19
 
20
 
 
 
 
21
  model = AutoModelForCausalLM.from_pretrained(
22
  "microsoft/Phi-3-small-8k-instruct",
23
  torch_dtype="auto",
 
40
 
41
 
42
  @spaces.GPU(duration=60)
43
+ def chat(message, history,system_prompt, temperature, do_sample, max_tokens, top_k, repetition_penalty, top_p):
44
+ chat = [
45
+ {"role": "assistant", "content": system_prompt}
46
+ ]
47
  for item in history:
48
  chat.append({"role": "user", "content": item[0]})
49
  if item[1] is not None:
 
61
  do_sample=True,
62
  temperature=temperature,
63
  eos_token_id=terminators,
64
+ top_k=top_k,
65
+ repetition_penalty=repetition_penalty,
66
+ top_p=top_p
67
  )
68
 
69
  if temperature == 0:
 
82
 
83
  demo = gr.ChatInterface(
84
  fn=chat,
85
+ examples=[["Write me a poem about Machine Learning."],
86
+ ["write fibonacci sequence in python"],
87
+ ["who won the world cup in 2018?"],
88
+ ["when was the first computer invented?"],
89
+ ],
90
  additional_inputs_accordion=gr.Accordion(
91
  label="⚙️ Parameters", open=False, render=False
92
  ),
93
  additional_inputs=[
94
+ gr.Textbox("Perform the task to the best of your ability.", label="System prompt"),
95
  gr.Slider(
96
  minimum=0, maximum=1, step=0.1, value=0.9, label="Temperature", render=False
97
  ),
 
104
  label="Max new tokens",
105
  render=False,
106
  ),
107
+ gr.Slider(1, 80, 40, label="Top K sampling"),
108
+ gr.Slider(0, 2, 1.1, label="Repetition penalty"),
109
+ gr.Slider(0, 1, 0.95, label="Top P sampling"),
110
  ],
111
  stop_btn="Stop Generation",
112
+ title="Chat With Phi-3-Small-8k-7b-Instruct",
113
  description="[microsoft/Phi-3-small-8k-instruct](https://huggingface.co/microsoft/Phi-3-small-8k-instruct)",
114
  )
115
  demo.launch()