Detsutut commited on
Commit
99ed14f
1 Parent(s): 13273ea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -74,7 +74,6 @@ def negative_feedback(last_generated_text):
74
 
75
 
76
  # Create the Gradio interface
77
- last_generated_text = gr.State({"input_prompt":"", "generated_text_raw":"", "generated_text_displayed":""})
78
 
79
  input_text = gr.Textbox(lines=2, placeholder="Enter your request here...", label="Input Text")
80
  system_prompt = gr.Textbox(lines=2, placeholder="Enter custom system prompt...", label="Custom System Prompt")
@@ -83,6 +82,7 @@ max_new_tokens = gr.Slider(minimum=1, maximum=200, value=100, step=1, label="Max
83
  temperature = gr.Slider(minimum=0.1, maximum=2.0, value=1.0, step=0.1, label="Temperature")
84
 
85
  with gr.Blocks(css="#outbox { border-radius: 8px !important; border: 1px solid #e5e7eb !important; padding: 8px !important; text-align:center !important;}") as iface:
 
86
  gr.Markdown("# Igea Instruct Interface ⚕️🩺")
87
  gr.Markdown("🐢💬 To guarantee a reasonable througput (<1 min to answer with default settings), this space employs a **GGUF quantized version of [Igea 1B](https://huggingface.co/bmi-labmedinfo/Igea-1B-v0.0.1)**, optimized for **hardware-limited, CPU-only machines** like the free-tier HuggingFace space. Quantized models may result in significant performance degradation and therefore are not representative of the original model capabilities.")
88
  gr.Markdown("⚠️ Read the **[bias, risks and limitations](https://huggingface.co/bmi-labmedinfo/Igea-1B-v0.0.1#%F0%9F%9A%A8%E2%9A%A0%EF%B8%8F%F0%9F%9A%A8-bias-risks-and-limitations-%F0%9F%9A%A8%E2%9A%A0%EF%B8%8F%F0%9F%9A%A8)** of Igea before use!")
 
74
 
75
 
76
  # Create the Gradio interface
 
77
 
78
  input_text = gr.Textbox(lines=2, placeholder="Enter your request here...", label="Input Text")
79
  system_prompt = gr.Textbox(lines=2, placeholder="Enter custom system prompt...", label="Custom System Prompt")
 
82
  temperature = gr.Slider(minimum=0.1, maximum=2.0, value=1.0, step=0.1, label="Temperature")
83
 
84
  with gr.Blocks(css="#outbox { border-radius: 8px !important; border: 1px solid #e5e7eb !important; padding: 8px !important; text-align:center !important;}") as iface:
85
+ last_generated_text = gr.State({"input_prompt":"", "generated_text_raw":"", "generated_text_displayed":""})
86
  gr.Markdown("# Igea Instruct Interface ⚕️🩺")
87
  gr.Markdown("🐢💬 To guarantee a reasonable througput (<1 min to answer with default settings), this space employs a **GGUF quantized version of [Igea 1B](https://huggingface.co/bmi-labmedinfo/Igea-1B-v0.0.1)**, optimized for **hardware-limited, CPU-only machines** like the free-tier HuggingFace space. Quantized models may result in significant performance degradation and therefore are not representative of the original model capabilities.")
88
  gr.Markdown("⚠️ Read the **[bias, risks and limitations](https://huggingface.co/bmi-labmedinfo/Igea-1B-v0.0.1#%F0%9F%9A%A8%E2%9A%A0%EF%B8%8F%F0%9F%9A%A8-bias-risks-and-limitations-%F0%9F%9A%A8%E2%9A%A0%EF%B8%8F%F0%9F%9A%A8)** of Igea before use!")