import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-3b") model = AutoModelForCausalLM.from_pretrained("databricks/dolly-v2-3b") def generate_text(prompt): # Tokenize the input prompt inputs = tokenizer.encode(prompt, return_tensors="pt") # Generate text based on the prompt output = model.generate(inputs, max_length=100, num_return_sequences=1) # Decode the generated output generated_text = tokenizer.decode(output[0], skip_special_tokens=True) return generated_text # Define the Gradio interface iface = gr.Interface( fn=generate_text, inputs="text", outputs="text", title="Transformer Text Generation", description="Enter a prompt and the model will generate text based on it.", ) # Launch the Gradio interface iface.launch()