Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,8 +5,8 @@ import torch
|
|
5 |
import re
|
6 |
|
7 |
# Initialize the model
|
8 |
-
model = AutoModelForCausalLM.from_pretrained("Detsutut/Igea-1B-
|
9 |
-
tokenizer = AutoTokenizer.from_pretrained( "Detsutut/Igea-1B-
|
10 |
|
11 |
|
12 |
gen_pipeline = pipeline(
|
@@ -15,11 +15,23 @@ gen_pipeline = pipeline(
|
|
15 |
tokenizer=tokenizer
|
16 |
)
|
17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
# Define the function to generate text
|
19 |
-
def generate_text(input_text, max_new_tokens=30, temperature=1, top_p=0.95
|
20 |
-
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
23 |
output = gen_pipeline(
|
24 |
input_text,
|
25 |
max_new_tokens=max_new_tokens,
|
@@ -28,34 +40,32 @@ def generate_text(input_text, max_new_tokens=30, temperature=1, top_p=0.95, spli
|
|
28 |
return_full_text = False
|
29 |
)
|
30 |
generated_text = output[0]['generated_text']
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
return f"<span>{input_text}</span><b style='color: blue;'>{generated_text}</b>"
|
36 |
|
37 |
# Create the Gradio interface
|
38 |
-
input_text = gr.Textbox(lines=2, placeholder="Enter your
|
39 |
|
40 |
max_new_tokens = gr.Slider(minimum=1, maximum=200, value=30, step=1, label="Max New Tokens")
|
41 |
temperature = gr.Slider(minimum=0.1, maximum=2.0, value=1.0, step=0.1, label="Temperature")
|
42 |
top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.95, step=0.01, label="Top-p")
|
43 |
-
split_output = gr.Checkbox(label="Quick single-sentence output", value=True)
|
44 |
|
45 |
with gr.Blocks(css="#outbox { border-radius: 8px !important; border: 1px solid #e5e7eb !important; padding: 8px !important; text-align:center !important;}") as iface:
|
46 |
-
gr.Markdown("# Igea
|
47 |
-
gr.Markdown("🐢💬 To guarantee a reasonable througput (<1 min to answer with default settings), this space employs a **GGUF quantized version of [Igea 1B](https://huggingface.co/bmi-labmedinfo/Igea-1B-v0.0.1)**, optimized for **hardware-limited, CPU-only machines** like the free-tier HuggingFace space.")
|
48 |
gr.Markdown("⚠️ Read the **[bias, risks and limitations](https://huggingface.co/bmi-labmedinfo/Igea-1B-v0.0.1#%F0%9F%9A%A8%E2%9A%A0%EF%B8%8F%F0%9F%9A%A8-bias-risks-and-limitations-%F0%9F%9A%A8%E2%9A%A0%EF%B8%8F%F0%9F%9A%A8)** of Igea before use!")
|
49 |
input_text.render()
|
50 |
with gr.Accordion("Advanced Options", open=False):
|
51 |
max_new_tokens.render()
|
52 |
temperature.render()
|
53 |
top_p.render()
|
54 |
-
split_output.render()
|
55 |
output = gr.HTML(label="Generated Text",elem_id="outbox")
|
56 |
|
57 |
btn = gr.Button("Generate")
|
58 |
-
btn.click(generate_text, [input_text, max_new_tokens, temperature, top_p
|
59 |
|
60 |
# Launch the interface
|
61 |
if __name__ == "__main__":
|
|
|
5 |
import re
|
6 |
|
7 |
# Initialize the model
|
8 |
+
model = AutoModelForCausalLM.from_pretrained("Detsutut/Igea-1B-instruct-GGUF-Q4", model_file="unsloth.Q4_K_M.gguf", model_type="mistral", hf=True)
|
9 |
+
tokenizer = AutoTokenizer.from_pretrained( "Detsutut/Igea-1B-instruct")
|
10 |
|
11 |
|
12 |
gen_pipeline = pipeline(
|
|
|
15 |
tokenizer=tokenizer
|
16 |
)
|
17 |
|
18 |
+
alpaca_instruct_prompt = """
|
19 |
+
{}
|
20 |
+
|
21 |
+
### Istruzione:
|
22 |
+
{}
|
23 |
+
|
24 |
+
### Risposta:
|
25 |
+
{}"""
|
26 |
+
|
27 |
# Define the function to generate text
|
28 |
+
def generate_text(input_text, max_new_tokens=30, temperature=1, top_p=0.95):
|
29 |
+
|
30 |
+
prompt = alpaca_instruct_prompt.format("Di seguito è riportata un'istruzione che descrive un compito. Scrivi una risposta che completi in modo appropriato la richiesta.",
|
31 |
+
input_text,
|
32 |
+
""
|
33 |
+
)
|
34 |
+
|
35 |
output = gen_pipeline(
|
36 |
input_text,
|
37 |
max_new_tokens=max_new_tokens,
|
|
|
40 |
return_full_text = False
|
41 |
)
|
42 |
generated_text = output[0]['generated_text']
|
43 |
+
|
44 |
+
if generated_text[-1] not in [".","!","?","\n"]:
|
45 |
+
generated_text = generated_text + " [...]"
|
46 |
+
|
47 |
return f"<span>{input_text}</span><b style='color: blue;'>{generated_text}</b>"
|
48 |
|
49 |
# Create the Gradio interface
|
50 |
+
input_text = gr.Textbox(lines=2, placeholder="Enter your request here...", label="Input Text")
|
51 |
|
52 |
max_new_tokens = gr.Slider(minimum=1, maximum=200, value=30, step=1, label="Max New Tokens")
|
53 |
temperature = gr.Slider(minimum=0.1, maximum=2.0, value=1.0, step=0.1, label="Temperature")
|
54 |
top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.95, step=0.01, label="Top-p")
|
|
|
55 |
|
56 |
with gr.Blocks(css="#outbox { border-radius: 8px !important; border: 1px solid #e5e7eb !important; padding: 8px !important; text-align:center !important;}") as iface:
|
57 |
+
gr.Markdown("# Igea Instruct Interface ⚕️🩺")
|
58 |
+
gr.Markdown("🐢💬 To guarantee a reasonable througput (<1 min to answer with default settings), this space employs a **GGUF quantized version of [Igea 1B](https://huggingface.co/bmi-labmedinfo/Igea-1B-v0.0.1)**, optimized for **hardware-limited, CPU-only machines** like the free-tier HuggingFace space. Quantized models may result in significant performance degradation and therefore are not representative of the original model capabilities.")
|
59 |
gr.Markdown("⚠️ Read the **[bias, risks and limitations](https://huggingface.co/bmi-labmedinfo/Igea-1B-v0.0.1#%F0%9F%9A%A8%E2%9A%A0%EF%B8%8F%F0%9F%9A%A8-bias-risks-and-limitations-%F0%9F%9A%A8%E2%9A%A0%EF%B8%8F%F0%9F%9A%A8)** of Igea before use!")
|
60 |
input_text.render()
|
61 |
with gr.Accordion("Advanced Options", open=False):
|
62 |
max_new_tokens.render()
|
63 |
temperature.render()
|
64 |
top_p.render()
|
|
|
65 |
output = gr.HTML(label="Generated Text",elem_id="outbox")
|
66 |
|
67 |
btn = gr.Button("Generate")
|
68 |
+
btn.click(generate_text, [input_text, max_new_tokens, temperature, top_p], output)
|
69 |
|
70 |
# Launch the interface
|
71 |
if __name__ == "__main__":
|