File size: 1,534 Bytes
35ac803 50d6e02 0889642 35ac803 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
# app.py
# Q&A Chatbot
# imports
from langchain.llms import OpenAI
from dotenv import load_dotenv
load_dotenv() # load the environment variables from .env
import streamlit as st
import os
################################## Function to load OpenAI model and get responses
def get_openai_response(question):
# create llm model
llm = OpenAI(openai_api_key=os.getenv("OPEN_API_KEY"),
model_name="gpt-3.5-turbo-instruct", # old deprecated: "text-davinci-003",
temperature=0.5,)
# get response using the inputted question
response = llm(question)
return response
################################## Initialize streamlit app
st.set_page_config(page_title="Q&A Demo")
st.header("Simple Q and A π¬")
st.caption("This question and answer application utilizes large language models (LLMs) from the Langchain library. This app is for fun, and not necessarily an accurate source of information.")
st.caption("β³ After submitting, please allow for a few seconds for your answer to load.")
st.caption("π Examples: \"What is the capital of Canada?\", \"How big is Earth?\", \"Which animals have self-awareness?\"")
# create a variable for the input
input = st.text_input("Ask a question: ", key="input")
# call OpenAI to get the response
response = get_openai_response(input)
# create a submit button
submit = st.button("Submit")
# if the button is clicked, submit will be true
if submit:
#st.subheader("The response is:")
st.write(response) # out the response |