import torch import streamlit as st from transformers import AutoModelForCausalLM, AutoTokenizer peft_model_id = "sksayril/bpt-v-4-Bengali" model = AutoModelForCausalLM.from_pretrained(peft_model_id, load_in_8bit=True) tokenizer = AutoTokenizer.from_pretrained(peft_model_id) st.title("Bengali Text Generation") prompt = st.text_input("Enter a prompt:", "What is Machine Learning") if st.button("Generate Answer"): with torch.no_grad(): input_ids = tokenizer(prompt, return_tensors="pt").input_ids output = model.generate(input_ids, max_length=100) generated_text = tokenizer.decode(output[0], skip_special_tokens=True) st.write("Generated Answer:") st.write(generated_text)