|
import gradio as gr |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-3b") |
|
model = AutoModelForCausalLM.from_pretrained("databricks/dolly-v2-3b") |
|
|
|
def generate_text(prompt): |
|
|
|
inputs = tokenizer.encode(prompt, return_tensors="pt") |
|
|
|
|
|
output = model.generate(inputs, max_length=100, num_return_sequences=1) |
|
|
|
|
|
generated_text = tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
|
return generated_text |
|
|
|
|
|
iface = gr.Interface( |
|
fn=generate_text, |
|
inputs="text", |
|
outputs="text", |
|
title="Transformer Text Generation", |
|
description="Enter a prompt and the model will generate text based on it.", |
|
) |
|
|
|
|
|
iface.launch() |
|
|