import gradio as gr import os from openai import OpenAI import time # Initialize the OpenAI Client client = OpenAI( api_key=os.environ.get("RUNPOD_API_KEY"), base_url="https://api.runpod.ai/v2/vllm-k0g4c60zor9xuu/openai/v1", ) def runpod_chat(question, history=None): if history is None: history = [] # Ensure history starts as an empty list if none is provided history.append({"role": "user", "content": question}) response_stream = client.chat.completions.create( model="ambrosfitz/llama-3-history", messages=history, temperature=0, max_tokens=150, stream=True, ) # Stream the response and accumulate full response before displaying full_response = "RunPod: " for message in response_stream: part = message.choices[0].delta.content if message.choices[0].delta.content is not None else "" full_response += part # Append the full response to history once complete history.append({"role": "assistant", "content": full_response}) return full_response, history # Return full response and updated history to maintain state # Set up the Gradio interface iface = gr.Interface( fn=runpod_chat, inputs=[ gr.Textbox(label="Enter your question:"), gr.State() # Remove default parameter ], outputs=[ gr.Textbox(label="Responses"), gr.State() ], title="RunPod Chat", description="This app interfaces with RunPod's API to provide responses to your queries." ) iface.launch()