Spaces:
Sleeping
Sleeping
Delete app_old.py
Browse files- app_old.py +0 -62
app_old.py
DELETED
@@ -1,62 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
3 |
-
import torch
|
4 |
-
import re
|
5 |
-
|
6 |
-
# Initialize the model
|
7 |
-
model_id = "Detsutut/Igea-350M-v0.0.1"
|
8 |
-
|
9 |
-
model = AutoModelForCausalLM.from_pretrained(model_id, load_in_8bit=True, device_map='auto')
|
10 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
11 |
-
|
12 |
-
|
13 |
-
gen_pipeline = pipeline(
|
14 |
-
"text-generation",
|
15 |
-
model=model,
|
16 |
-
tokenizer=tokenizer
|
17 |
-
)
|
18 |
-
|
19 |
-
# Define the function to generate text
|
20 |
-
def generate_text(input_text, max_new_tokens, temperature, top_p, split_output):
|
21 |
-
if split_output:
|
22 |
-
max_new_tokens=30
|
23 |
-
top_p=0.95
|
24 |
-
output = gen_pipeline(
|
25 |
-
input_text,
|
26 |
-
max_new_tokens=max_new_tokens,
|
27 |
-
temperature=temperature,
|
28 |
-
top_p=top_p,
|
29 |
-
return_full_text = False
|
30 |
-
)
|
31 |
-
generated_text = output[0]['generated_text']
|
32 |
-
if split_output:
|
33 |
-
sentences = re.split('(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', generated_text)
|
34 |
-
if sentences:
|
35 |
-
generated_text = sentences[0]
|
36 |
-
return f"<span>{input_text}</span><b style='color: blue;'>{generated_text}</b>"
|
37 |
-
|
38 |
-
# Create the Gradio interface
|
39 |
-
input_text = gr.Textbox(lines=2, placeholder="Enter your text here...", label="Input Text")
|
40 |
-
|
41 |
-
max_new_tokens = gr.Slider(minimum=1, maximum=200, value=30, step=1, label="Max New Tokens")
|
42 |
-
temperature = gr.Slider(minimum=0.1, maximum=2.0, value=1.0, step=0.1, label="Temperature")
|
43 |
-
top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.95, step=0.01, label="Top-p")
|
44 |
-
split_output = gr.Checkbox(label="Quick single-sentence output", value=True)
|
45 |
-
|
46 |
-
with gr.Blocks(css="#outbox { border-radius: 8px !important; border: 1px solid #e5e7eb !important; padding: 8px !important; text-align:center !important;}") as iface:
|
47 |
-
gr.Markdown("# Igea Text Generation Interface ⚕️🩺")
|
48 |
-
gr.Markdown("⚠️ 🐢💬 This model runs on a **hardware-limited**, free-tier HuggingFace space, resulting in a **low output token throughput** (approx. 1 token/s)")
|
49 |
-
input_text.render()
|
50 |
-
with gr.Accordion("Advanced Options", open=False):
|
51 |
-
max_new_tokens.render()
|
52 |
-
temperature.render()
|
53 |
-
top_p.render()
|
54 |
-
split_output.render()
|
55 |
-
output = gr.HTML(label="Generated Text",elem_id="outbox")
|
56 |
-
|
57 |
-
btn = gr.Button("Generate")
|
58 |
-
btn.click(generate_text, [input_text, max_new_tokens, temperature, top_p, split_output], output)
|
59 |
-
|
60 |
-
# Launch the interface
|
61 |
-
if __name__ == "__main__":
|
62 |
-
iface.launch(inline=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|