LLMhistory / app.py
freQuensy23's picture
Replace front
f0c7657
raw
history blame
No virus
1.95 kB
from dotenv import load_dotenv
from generators import *
import gradio as gr
from utils import async_zip_stream
load_dotenv()
async def handle(system_input: str, user_input: str):
print(system_input, user_input)
buffers = ["", "", "", "", ""]
async for outputs in async_zip_stream(
generate_gpt2(system_input, user_input),
generate_mistral_7bvo1(system_input, user_input),
generate_llama2(system_input, user_input),
generate_llama3(system_input, user_input),
generate_t5(system_input, user_input),
):
# gpt_output, mistral_output, llama_output, llama2_output, llama3_output, llama4_output = outputs
for i, b in enumerate(buffers):
buffers[i] += str(outputs[i])
yield list(buffers) + ["", ""]
yield list(buffers) + [generate_bloom(system_input, user_input)]
with gr.Blocks() as demo:
system_input = gr.Textbox(label='System Input', value='You are AI assistant', lines=2)
with gr.Row():
gpt = gr.Textbox(label='gpt-2', lines=4, interactive=False, info='OpenAI\n14 February 2019')
t5 = gr.Textbox(label='t5', lines=4, interactive=False, info='Google\n12 Dec 2019')
bloom = gr.Textbox(label='bloom [GPU]', lines=4, interactive=False, info='Big Science\n11 Jul 2022')
with gr.Row():
llama2 = gr.Textbox(label='llama-2', lines=4, interactive=False, info='MetaAI\n18 Jul 2023')
mistral = gr.Textbox(label='mistral-v01', lines=4, interactive=False, info='MistralAI\n20 Sep 2023')
llama3 = gr.Textbox(label='llama-3.1', lines=4, interactive=False, info='MetaAI\n18 Jul 2024')
user_input = gr.Textbox(label='User Input', lines=2, value='Calculate expression: 7-3=')
gen_button = gr.Button('Generate')
gen_button.click(
fn=handle,
inputs=[system_input, user_input],
outputs=[gpt, mistral, llama2, llama3, t5, bloom],
)
demo.launch()