import gradio as gr from transformers import GPT2LMHeadModel, GPT2Tokenizer tokenizer = GPT2Tokenizer.from_pretrained("gpt2") model = GPT2LMHeadModel.from_pretrained("gpt2") def generate_text(prompt): inputs = tokenizer(prompt, return_tensors="pt").to('cuda') outputs = model.generate(inputs, do_sample=False, max_length=100) generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) return generated_text iface = gr.Interface(fn=generate_text, inputs=[gr.Textbox(label="Enter a prompt")]) iface.launch()