File size: 3,927 Bytes
8ab1018
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
"""
Module for running the gradio web-ui application
"""
import gradio as gr

"""
Module for testing agent
"""
from time import time
import os

# from langchain_core.pydantic_v1 import (
#     BaseModel,
# )
from langchain.globals import set_llm_cache
from langchain_core.callbacks.manager import (
    AsyncCallbackManager,
)
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.runnables import RunnablePassthrough
from langchain_core.callbacks.streaming_stdout import (
    StreamingStdOutCallbackHandler,
)
from langchain_openai.chat_models import ChatOpenAI
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain.agents.format_scratchpad import format_to_openai_functions
from langchain.agents import AgentExecutor

from dotenv import (
    load_dotenv,
    find_dotenv,
)

from pydantic import BaseModel

from openai_functions_and_agents import (
    # create_consumable_functions,
    consumable_functions,
    # consumable_tools,
)

from prompt_verified import create_agent_prompt

if not load_dotenv(find_dotenv()):
    print("Could not load `.env` file or it is empty. Please check that it exists \
and is readable by the current user")

# for caching LLM calls
set_llm_cache(True)

OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
CHATBOT_NAME = os.environ.get("CHATBOT_NAME")

class query(BaseModel):
    query: str = "Hello there"

model = ChatOpenAI(
    model="gpt-3.5-turbo-1106",
    # model="gpt-4-0125-preview",
    temperature=0.7,
    streaming=True,
    callback_manager=AsyncCallbackManager(
        [StreamingStdOutCallbackHandler()]
    ),
    verbose=True,
).bind(
    functions=consumable_functions()
)

base_chain = create_agent_prompt() | model | OpenAIFunctionsAgentOutputParser()

agent_scratchpad_runnable = RunnablePassthrough.assign(
    agent_scratchpad = lambda x: format_to_openai_functions(x["intermediate_steps"])
)

agent_chain = agent_scratchpad_runnable | base_chain

# Check: https://python.langchain.com/docs/modules/agents/quick_start#adding-in-memory for docs
message_history = ChatMessageHistory()

agent_executor = AgentExecutor(
    agent=agent_chain,
    tools=consumable_functions(return_tool=True),
    verbose=True,
    handle_parsing_errors=True,
)

final_agent = RunnableWithMessageHistory(
    agent_executor,
    get_session_history=lambda session_id: message_history,
    input_messages_key="input",
    history_messages_key="chat_history",
    output_messages_key="output",
)

async def run_final_agent(
    query_: str,#query,
    phone_num: int,
    customer_name: str,
    session_id: str,
):
    start_time = time()
    response = await final_agent.ainvoke(
        input={
            "input": query_,#.query,
            "customer_name": customer_name,
            "CHATBOT_NAME": CHATBOT_NAME,
        },
        config={
            "configurable": {
                "session_id": "session_id"
            }
        },
    )
    print(response)
    total_time = round(time()-start_time, 2)

    return {
        "status": 200,
        "response": response["output"],
        "time_taken": f"{total_time}s",
    }

from langchain.schema import AIMessage, HumanMessage

async def predict(message, history):  # sourcery skip: merge-list-appends-into-extend
    history_langchain_format = []
    for human, ai in history:
        history_langchain_format.append(HumanMessage(content=human))
        history_langchain_format.append(AIMessage(content=ai))
    history_langchain_format.append(HumanMessage(content=message))
    gpt_response = await run_final_agent(
        query_=message,
        customer_name="Raheem",
        phone_num=816394,
        session_id="NotImplemented",
    )
    return gpt_response['response']

if __name__ == "__main__":
    gr.ChatInterface(predict).launch(debug=True, share=True)