File size: 3,225 Bytes
7aa37cd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import gradio as gr
from task import tasks_config
from pipeline_utils import handle_task_change, review_training_choices, test_pipeline
from playground_utils import create_playground_header, create_playground_footer, create_tabs_header

playground = gr.Blocks()

with playground:
    create_playground_header()
    with gr.Tabs():
        with gr.TabItem("Text"):
            radio, test_pipeline_button = create_tabs_header()
            with gr.Row(visible=True) as use_pipeline:
                with gr.Column():
                    task_dropdown = gr.Dropdown(
                        choices=[(task["name"], task_id)
                                 for task_id, task in tasks_config.items()],
                        label="Task",
                        interactive=True,
                        info="Select Pipelines for natural language processing tasks or     type if you have your own."
                    )
                    model_dropdown = gr.Dropdown(
                        [], label="Model", info="Select appropriate Model based on the task you selected")
                    prompt_textarea = gr.TextArea(
                        label="Prompt",
                        value="Enter your prompt here",
                        text_align="left",
                        info="Copy/Paste or type your prompt to try out. Make sure to provide clear prompt or try with different prompts"
                    )
                    context_for_question_answer = gr.TextArea(
                        label="Context",
                        value="Enter Context for your question here",
                        visible=False,
                        interactive=True,
                        info="Question answering tasks return an answer given a question. If you’ve ever asked a virtual assistant like Alexa, Siri or Google what the weather is, then you’ve used a question answering model before. Here, we are doing Extractive(extract the answer from the given context) Question answering. "
                    )
                    task_dropdown.change(handle_task_change,
                                         inputs=[task_dropdown],
                                         outputs=[context_for_question_answer,
                                                  model_dropdown, task_dropdown])
                with gr.Column():
                    text = gr.TextArea(label="Generated Text")
            radio.change(review_training_choices,
                         inputs=radio, outputs=use_pipeline)
            test_pipeline_button.click(test_pipeline,
                                       inputs=[
                                           task_dropdown, model_dropdown, prompt_textarea, context_for_question_answer],
                                       outputs=text)
        with gr.TabItem("Image"):
            radio, test_pipeline_button = create_tabs_header()
            gr.Markdown("""
                        > WIP
                        """)
        with gr.TabItem("Audio"):
            radio, test_pipeline_button = create_tabs_header()
            gr.Markdown("""
                        > WIP
                        """)
    create_playground_footer()
playground.launch()