humanizer_model / app.py
lucidmorto's picture
feat: Improve text generation with advanced parameters
3f7af4c
raw
history blame
1.22 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
model_name = "t5-base"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
def generate_text(input_text):
# Preprocess input text
input_text = input_text.strip()
# Prepare input for the model
input_ids = tokenizer.encode("humanize: " + input_text, return_tensors="pt", max_length=512, truncation=True)
# Generate text with improved parameters
outputs = model.generate(
input_ids,
max_length=300,
min_length=30,
num_return_sequences=1,
no_repeat_ngram_size=3,
top_k=50,
top_p=0.95,
temperature=0.8,
do_sample=True,
early_stopping=True
)
# Decode and clean up the generated text
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
return generated_text.strip()
iface = gr.Interface(
fn=generate_text,
inputs=gr.Textbox(lines=5, label="Input Text"),
outputs=gr.Textbox(label="Generated Text"),
title="Text Generator",
description="Enter text to generate a summary or continuation."
)
iface.launch()