import torch import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer peft_model_id = "sksayril/bpt-v-4-Bengali" model = AutoModelForCausalLM.from_pretrained(peft_model_id, load_in_8bit=False,device_map='auto') tokenizer = AutoTokenizer.from_pretrained(peft_model_id) def generate_text(prompt): with torch.no_grad(): input_ids = tokenizer(prompt, return_tensors="pt").input_ids output = model.generate(input_ids, max_length=100) generated_text = tokenizer.decode(output[0], skip_special_tokens=True) return generated_text iface = gr.Interface( fn=generate_text, inputs="text", outputs="text", title="Bengali Text Generation", description="Enter a prompt to generate Bengali text.", ) iface.launch()