sayril007 commited on
Commit
945868f
β€’
1 Parent(s): cdbcb6e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -6
app.py CHANGED
@@ -1,17 +1,24 @@
1
  import torch
2
- import streamlit as st
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
 
5
  peft_model_id = "sksayril/bpt-v-4-Bengali"
6
  model = AutoModelForCausalLM.from_pretrained(peft_model_id, load_in_8bit=True)
7
  tokenizer = AutoTokenizer.from_pretrained(peft_model_id)
8
- st.title("Bengali Text Generation")
9
 
10
- prompt = st.text_input("Enter a prompt:", "What is Machine Learning")
11
- if st.button("Generate Answer"):
12
  with torch.no_grad():
13
  input_ids = tokenizer(prompt, return_tensors="pt").input_ids
14
  output = model.generate(input_ids, max_length=100)
15
  generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
16
- st.write("Generated Answer:")
17
- st.write(generated_text)
 
 
 
 
 
 
 
 
 
 
1
  import torch
2
+ import gradio as gr
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
 
5
  peft_model_id = "sksayril/bpt-v-4-Bengali"
6
  model = AutoModelForCausalLM.from_pretrained(peft_model_id, load_in_8bit=True)
7
  tokenizer = AutoTokenizer.from_pretrained(peft_model_id)
 
8
 
9
+ def generate_text(prompt):
 
10
  with torch.no_grad():
11
  input_ids = tokenizer(prompt, return_tensors="pt").input_ids
12
  output = model.generate(input_ids, max_length=100)
13
  generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
14
+ return generated_text
15
+
16
+ iface = gr.Interface(
17
+ fn=generate_text,
18
+ inputs="text",
19
+ outputs="text",
20
+ title="Bengali Text Generation",
21
+ description="Enter a prompt to generate Bengali text.",
22
+ )
23
+
24
+ iface.launch()