File size: 3,133 Bytes
276e2b6
c0be431
 
eb967be
 
c0be431
 
 
 
 
 
 
79b2407
c0be431
 
 
 
 
c86acba
79b2407
c0be431
 
 
 
 
79b2407
c0be431
 
 
 
 
f0c7657
d900242
f0c7657
79b2407
f0c7657
79b2407
f0c7657
c0be431
d65753c
c0be431
 
 
 
 
79b2407
eb967be
276e2b6
 
 
eb967be
276e2b6
 
 
 
 
 
 
 
 
 
 
 
 
 
c0be431
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import gradio
from dotenv import load_dotenv
from generators import *
import gradio as gr

from utils import async_zip_stream

load_dotenv()


async def handle(system_input: str, user_input: str):
    print(system_input, user_input)
    buffers = ["", "", "", "", "", ""]
    async for outputs in async_zip_stream(
            generate_gpt2(system_input, user_input),
            generate_mistral_7bvo1(system_input, user_input),
            generate_llama2(system_input, user_input),
            generate_llama3(system_input, user_input),
            generate_t5(system_input, user_input),
            generate_mixtral(system_input, user_input),
    ):
        # gpt_output, mistral_output, llama_output, llama2_output, llama3_output, llama4_output = outputs
        for i, b in enumerate(buffers):
            buffers[i] += str(outputs[i])

        yield list(buffers)


with gr.Blocks() as demo:
    system_input = gr.Textbox(label='System Input', value='You are AI assistant', lines=2)
    with gr.Row():
        gpt = gr.Textbox(label='gpt-2', lines=4, interactive=False, info='OpenAI\n14 February 2019')
        t5 = gr.Textbox(label='t5', lines=4, interactive=False, info='Google\21 Oct 2022')
        llama2 = gr.Textbox(label='llama-2', lines=4, interactive=False, info='MetaAI\n18 Jul 2023')
    with gr.Row():
        mistral = gr.Textbox(label='mistral-v01', lines=4, interactive=False, info='MistralAI\n20 Sep 2023')
        mixtral = gr.Textbox(label='mixtral', lines=4, interactive=False, info='Mistral AI\n11 Dec 2023')
        llama3 = gr.Textbox(label='llama-3.1', lines=4, interactive=False, info='MetaAI\n18 Jul 2024')

    user_input = gr.Textbox(label='User Input', lines=2, value='Calculate expression: 7-3=')
    gen_button = gr.Button('Generate')

    gen_button.click(
        fn=handle,
        inputs=[system_input, user_input],
        outputs=[gpt, mistral, llama2, llama3, t5, mixtral],
    )
    user_input.submit(fn=handle,
                      inputs=[system_input, user_input],
                      outputs=[gpt, mistral, llama2, llama3, t5, mixtral], )

    with gr.Row():
        with gr.Column(scale=1):
            gr.Image(value='icon.jpg')
        with gr.Column(scale=4):
            gradio.HTML("""<div style="text-align: center; font-family: 'Helvetica Neue', sans-serif; padding: 10px; color: #333333;">
    <p style="font-size: 18px; font-weight: 600; margin-bottom: 8px;">
        Эта демка была создана телеграм каналом <strong style="color: #007ACC;"><a href='https://t.me/mlphys'> mlphys</a></strong>. Другие мои социальные сети:
    </p>
    <p style="font-size: 16px;">
        <a href="https://t.me/mlphys" target="_blank" style="color: #0088cc; text-decoration: none; font-weight: 500;">Telegram</a> |
        <a href="https://x.com/quensy23" target="_blank" style="color: #1DA1F2; text-decoration: none; font-weight: 500;">Twitter</a> |
        <a href="https://github.com/freQuensy23-coder"  target="_blank" style="color: #0088cc; text-decoration: none; font-weight: 500;">GitHub</a>
    </p>
</div>""")
demo.launch()