Saurabh Parmar commited on
Commit
7ab3be2
1 Parent(s): b88bdd1

Add application file

Browse files
Files changed (1) hide show
  1. app.py +25 -3
app.py CHANGED
@@ -1,7 +1,29 @@
1
  import gradio as gr
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
 
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  iface.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
+ tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-3b")
5
+ model = AutoModelForCausalLM.from_pretrained("databricks/dolly-v2-3b")
6
 
7
+ def generate_text(prompt):
8
+ # Tokenize the input prompt
9
+ inputs = tokenizer.encode(prompt, return_tensors="pt")
10
+
11
+ # Generate text based on the prompt
12
+ output = model.generate(inputs, max_length=100, num_return_sequences=1)
13
+
14
+ # Decode the generated output
15
+ generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
16
+
17
+ return generated_text
18
+
19
+ # Define the Gradio interface
20
+ iface = gr.Interface(
21
+ fn=generate_text,
22
+ inputs="text",
23
+ outputs="text",
24
+ title="Transformer Text Generation",
25
+ description="Enter a prompt and the model will generate text based on it.",
26
+ )
27
+
28
+ # Launch the Gradio interface
29
  iface.launch()