import gradio from dotenv import load_dotenv from generators import * import gradio as gr from utils import async_zip_stream load_dotenv() async def handle(system_input: str, user_input: str): print(system_input, user_input) buffers = ["", "", "", "", "", ""] async for outputs in async_zip_stream( generate_gpt2(system_input, user_input), generate_mistral_7bvo1(system_input, user_input), generate_llama2(system_input, user_input), generate_llama3(system_input, user_input), generate_t5(system_input, user_input), generate_mixtral(system_input, user_input), ): # gpt_output, mistral_output, llama_output, llama2_output, llama3_output, llama4_output = outputs for i, b in enumerate(buffers): buffers[i] += str(outputs[i]) yield list(buffers) with gr.Blocks() as demo: system_input = gr.Textbox(label='System Input', value='You are AI assistant', lines=2) with gr.Row(): gpt = gr.Textbox(label='gpt-2', lines=4, interactive=False, info='OpenAI\n14 February 2019') t5 = gr.Textbox(label='t5', lines=4, interactive=False, info='Google\21 Oct 2022') llama2 = gr.Textbox(label='llama-2', lines=4, interactive=False, info='MetaAI\n18 Jul 2023') with gr.Row(): mistral = gr.Textbox(label='mistral-v01', lines=4, interactive=False, info='MistralAI\n20 Sep 2023') mixtral = gr.Textbox(label='mixtral', lines=4, interactive=False, info='Mistral AI\n11 Dec 2023') llama3 = gr.Textbox(label='llama-3.1', lines=4, interactive=False, info='MetaAI\n18 Jul 2024') user_input = gr.Textbox(label='User Input', lines=2, value='Calculate expression: 7-3=') gen_button = gr.Button('Generate') gen_button.click( fn=handle, inputs=[system_input, user_input], outputs=[gpt, mistral, llama2, llama3, t5, mixtral], ) user_input.submit(fn=handle, inputs=[system_input, user_input], outputs=[gpt, mistral, llama2, llama3, t5, mixtral], ) with gr.Row(): with gr.Column(scale=1): gr.Image(value='icon.jpg') with gr.Column(scale=4): gradio.HTML("""

Эта демка была создана телеграм каналом mlphys. Другие мои социальные сети:

Telegram | Twitter | GitHub

""") demo.launch()