Spaces:
Running
Running
Add error handling
Browse files
app.py
CHANGED
@@ -11,6 +11,7 @@ from langchain_chroma import Chroma
|
|
11 |
from langchain_openai import OpenAIEmbeddings
|
12 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder, PromptTemplate
|
13 |
from langchain_openai import ChatOpenAI
|
|
|
14 |
from dotenv import load_dotenv
|
15 |
import gradio as gr
|
16 |
|
@@ -80,9 +81,18 @@ def predict(message, history):
|
|
80 |
chat_history.append(HumanMessage(content=m))
|
81 |
chat_history.append(a)
|
82 |
|
83 |
-
|
84 |
-
|
85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
|
87 |
theme = gr.themes.Default().set(
|
88 |
button_primary_background_fill_dark="#26C8A1",
|
|
|
11 |
from langchain_openai import OpenAIEmbeddings
|
12 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder, PromptTemplate
|
13 |
from langchain_openai import ChatOpenAI
|
14 |
+
from openai import RateLimitError
|
15 |
from dotenv import load_dotenv
|
16 |
import gradio as gr
|
17 |
|
|
|
81 |
chat_history.append(HumanMessage(content=m))
|
82 |
chat_history.append(a)
|
83 |
|
84 |
+
try:
|
85 |
+
ai_response = rag_chain.invoke({"input": message, "chat_history": chat_history})
|
86 |
+
answer = ai_response["answer"]
|
87 |
+
except RateLimitError as e:
|
88 |
+
if "You exceeded your current quota" in e.message:
|
89 |
+
answer = "This demo has ran out of credits! Come back again soon and try it out"
|
90 |
+
else:
|
91 |
+
answer = "There seems to be a problem with the OpenAI API. We'll look into it!"
|
92 |
+
except Exception as e:
|
93 |
+
answer = "There seems to be a problem with the OpenAI API. We'll look into it!"
|
94 |
+
|
95 |
+
return answer
|
96 |
|
97 |
theme = gr.themes.Default().set(
|
98 |
button_primary_background_fill_dark="#26C8A1",
|