|
|
|
|
|
|
|
|
|
from langchain.llms import OpenAI |
|
from dotenv import load_dotenv |
|
load_dotenv() |
|
import streamlit as st |
|
import os |
|
|
|
|
|
def get_openai_response(question): |
|
|
|
|
|
llm = OpenAI(openai_api_key=os.getenv("OPEN_API_KEY"), |
|
model_name="gpt-3.5-turbo-instruct", |
|
temperature=0.5,) |
|
|
|
|
|
response = llm(question) |
|
|
|
return response |
|
|
|
|
|
st.set_page_config(page_title="Q&A Demo") |
|
st.header("Simple Q and A π¬") |
|
st.caption("This question and answer application utilizes large language models (LLMs) from the Langchain library. This app is for fun, and not necessarily an accurate source of information.") |
|
st.caption("β³ After submitting, please allow for a few seconds for your answer to load.") |
|
st.caption("π Examples: \"What is the capital of Canada?\", \"How big is Earth?\", \"Do dolphins have self-awareness?\"") |
|
|
|
|
|
input = st.text_input("Ask a question: ", key="input") |
|
|
|
|
|
response = get_openai_response(input) |
|
|
|
|
|
submit = st.button("Submit") |
|
|
|
|
|
if submit: |
|
|
|
st.write(response) |