igea-pretrained / app.py
Detsutut's picture
Update app.py
53d078e verified
raw
history blame
No virus
2 kB
import gradio as gr
import transformers
import torch
import re
# Initialize the model
model_id = "Detsutut/Igea-350M-v0.0.1"
pipeline = transformers.pipeline(
"text-generation",
model=model_id,
model_kwargs={"torch_dtype": torch.bfloat16}
)
# Define the function to generate text
def generate_text(input_text, max_new_tokens, temperature, top_p, split_output):
if split_output:
max_new_tokens=30
top_p=0.95
output = pipeline(
input_text,
max_new_tokens=max_new_tokens,
temperature=temperature,
top_p=top_p,
)
generated_text = output[0]['generated_text']
if split_output:
sentences = re.split('(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s)', generated_text)
if sentences:
return sentences[0] + '.'
return generated_text
# Create the Gradio interface
input_text = gr.Textbox(lines=2, placeholder="Enter your text here...", label="Input Text")
max_new_tokens = gr.Slider(minimum=1, maximum=200, value=30, step=1, label="Max New Tokens")
temperature = gr.Slider(minimum=0.1, maximum=2.0, value=1.0, step=0.1, label="Temperature")
top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.95, step=0.01, label="Top-p")
split_output = gr.Checkbox(label="Quick single-sentence output", value=True)
with gr.Blocks() as iface:
gr.Markdown("# Igea 350M Text Generation Interface")
gr.Markdown("⚠️ 🐢💬 This model runs on a hardware-limited, free-tier HuggingFace space, resulting in a low output token throughput (approx. 1 token/s)")
input_text.render()
with gr.Accordion("Advanced Options", open=False):
max_new_tokens.render()
temperature.render()
top_p.render()
split_output.render()
output = gr.Textbox(label="Generated Text")
btn = gr.Button("Generate")
btn.click(generate_text, [input_text, max_new_tokens, temperature, top_p, split_output], output)
# Launch the interface
if __name__ == "__main__":
iface.launch(inline=True)