KingNish commited on
Commit
bee5883
1 Parent(s): ad83e99

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
 
3
  # Import modules from other files
4
- from chatbot import chatbot, model_inference, BOT_AVATAR, EXAMPLES, model_selector, decoding_strategy, temperature, max_new_tokens, repetition_penalty, min_p
5
  from live_chat import videochat
6
 
7
  # Define Gradio theme
@@ -197,7 +197,7 @@ with gr.Blocks(
197
  with gr.Row(elem_id="model_selector_row"):
198
  # model_selector defined in chatbot.py
199
  pass
200
- # decoding_strategy, temperature, min_p defined in chatbot.py
201
  decoding_strategy.change(
202
  fn=lambda selection: gr.Slider(
203
  visible=(
@@ -205,7 +205,7 @@ with gr.Blocks(
205
  in [
206
  "contrastive_sampling",
207
  "beam_sampling",
208
- "Min P Sampling",
209
  "sampling_top_k",
210
  ]
211
  )
@@ -214,9 +214,9 @@ with gr.Blocks(
214
  outputs=temperature,
215
  )
216
  decoding_strategy.change(
217
- fn=lambda selection: gr.Slider(visible=(selection in ["Min P Sampling"])),
218
  inputs=decoding_strategy,
219
- outputs=min_p,
220
  )
221
  gr.ChatInterface(
222
  fn=model_inference,
@@ -230,7 +230,7 @@ with gr.Blocks(
230
  temperature,
231
  max_new_tokens,
232
  repetition_penalty,
233
- min_p,
234
  gr.Checkbox(label="Web Search", value=False),
235
  ],
236
  )
 
1
  import gradio as gr
2
 
3
  # Import modules from other files
4
+ from chatbot import chatbot, model_inference, BOT_AVATAR, EXAMPLES, model_selector, decoding_strategy, temperature, max_new_tokens, repetition_penalty, top_p
5
  from live_chat import videochat
6
 
7
  # Define Gradio theme
 
197
  with gr.Row(elem_id="model_selector_row"):
198
  # model_selector defined in chatbot.py
199
  pass
200
+ # decoding_strategy, temperature, top_p defined in chatbot.py
201
  decoding_strategy.change(
202
  fn=lambda selection: gr.Slider(
203
  visible=(
 
205
  in [
206
  "contrastive_sampling",
207
  "beam_sampling",
208
+ "Top P Sampling",
209
  "sampling_top_k",
210
  ]
211
  )
 
214
  outputs=temperature,
215
  )
216
  decoding_strategy.change(
217
+ fn=lambda selection: gr.Slider(visible=(selection in ["Top P Sampling"])),
218
  inputs=decoding_strategy,
219
+ outputs=top_p,
220
  )
221
  gr.ChatInterface(
222
  fn=model_inference,
 
230
  temperature,
231
  max_new_tokens,
232
  repetition_penalty,
233
+ top_p,
234
  gr.Checkbox(label="Web Search", value=False),
235
  ],
236
  )