Detsutut commited on
Commit
f586e32
1 Parent(s): 4a92b37

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -43,8 +43,9 @@ top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.95, step=0.01, label="Top-p"
43
  split_output = gr.Checkbox(label="Quick single-sentence output", value=True)
44
 
45
  with gr.Blocks(css="#outbox { border-radius: 8px !important; border: 1px solid #e5e7eb !important; padding: 8px !important; text-align:center !important;}") as iface:
46
- gr.Markdown("# Igea Text Generation Interface ⚕️🩺")
47
- gr.Markdown("⚠️ 🐢💬 This model runs on a **hardware-limited**, free-tier HuggingFace space, resulting in a **low output token throughput** (approx. 1 token/s)")
 
48
  input_text.render()
49
  with gr.Accordion("Advanced Options", open=False):
50
  max_new_tokens.render()
 
43
  split_output = gr.Checkbox(label="Quick single-sentence output", value=True)
44
 
45
  with gr.Blocks(css="#outbox { border-radius: 8px !important; border: 1px solid #e5e7eb !important; padding: 8px !important; text-align:center !important;}") as iface:
46
+ gr.Markdown("# **Igea** Text Generation Interface ⚕️🩺")
47
+ gr.Markdown("🐢💬 To guarantee a reasonable output token througput (approx. 5 tokens/s), this space employs a GGUF quantized version of [Igea 1B](https://huggingface.co/bmi-labmedinfo/Igea-1B-v0.0.1), optimized for **hardware-limited**, CPU-only machines like the free-tier HuggingFace space.")
48
+ gr.Markdown("⚠️ Read the [bias, risks and limitations](https://huggingface.co/bmi-labmedinfo/Igea-1B-v0.0.1#%F0%9F%9A%A8%E2%9A%A0%EF%B8%8F%F0%9F%9A%A8-bias-risks-and-limitations-%F0%9F%9A%A8%E2%9A%A0%EF%B8%8F%F0%9F%9A%A8) of Igea before use!")
49
  input_text.render()
50
  with gr.Accordion("Advanced Options", open=False):
51
  max_new_tokens.render()