File size: 1,247 Bytes
eae27ac
 
ff0dcf7
 
 
eae27ac
b710584
eae27ac
 
ff0dcf7
eae27ac
 
 
 
 
 
 
 
 
 
 
 
 
b710584
eae27ac
 
 
ff0dcf7
eae27ac
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
import torch
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
model_name = "Tahsin-Mayeesha/squad-bn-mt5-base2"
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)  


import gradio as gr
def generate__questions(context,answer):
  text='answer: '+answer + ' context: ' + context
  text_encoding = tokenizer.encode_plus(
      text,return_tensors="pt"
  )
  model.eval()
  generated_ids =  model.generate(
    input_ids=text_encoding['input_ids'],
    attention_mask=text_encoding['attention_mask'],
    max_length=64,
    num_beams=5,
    num_return_sequences=1
  )
  
  return tokenizer.decode(generated_ids[0],skip_special_tokens=True,clean_up_tokenization_spaces=True).replace('question: ',' ')

demo = gr.Interface(fn=generate__questions, inputs=[gr.Textbox(label='Context'),
                                                    gr.Textbox(label='Answer')] ,
                                                    outputs=gr.Textbox(label='Question'),
                                                    title="Bangla Question Generation",
                                                    description="Get the Question from given Context and an Answer")
demo.launch()