File size: 6,350 Bytes
3d53b23
 
 
 
 
 
 
 
 
 
 
 
 
 
307a54d
3d53b23
 
 
 
 
 
 
 
307a54d
3d53b23
 
 
 
90a4da1
 
3d53b23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c232af0
0c248c0
3d53b23
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
from langchain_openai.chat_models import ChatOpenAI
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain.tools.render import format_tool_to_openai_function
from langgraph.prebuilt import ToolExecutor,ToolInvocation
from typing import TypedDict, Annotated, Sequence
import operator
from langchain_core.messages import BaseMessage,FunctionMessage,HumanMessage,AIMessage
from langchain_core.prompts import ChatPromptTemplate
from langchain.tools import ShellTool,tool
import json
import os
import gradio as gr
os.environ["LANGCHAIN_TRACING_V2"] ="True"
os.environ["LANGCHAIN_API_KEY"]="ls__54e16f70b2b0455aad0f2cbf47777d30"
os.environ["OPENAI_API_KEY"]="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"
# os.environ["OPENAI_API_KEY"]="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"
os.environ["LANGCHAIN_ENDPOINT"]="https://api.smith.langchain.com"
os.environ["LANGCHAIN_PROJECT"]="default"
os.environ['TAVILY_API_KEY'] = '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'

class AgentState(TypedDict):
    messages: Annotated[Sequence[BaseMessage], operator.add]

model = ChatOpenAI(model="gpt-3.5-turbo-1106",api_key="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")
# model = ChatOpenAI(model="Qwen/Qwen1.5-72B-Chat",api_key="20a79668d6113e99b35fcd541c65bfeaec497b8262c111bd328ef5f1ad8c6335",base_url="https://api.together.xyz/v1")


prompt = ChatPromptTemplate.from_messages([
    ("system", "你是一个餐厅经理,你叫唐僧,能为顾客提供服务,你有三个员工,分别是:厨师八戒,侍者沙僧,收银悟空,你需要根据顾客的需求,分别向员工下达指令,你和员工的对话也要同步显示给顾客,当收银结束后,服务全部结束。除了顾客关于食物的问题,其他问题你要委婉的拒绝回答"),
    ("human", "{input}")
])
@tool(return_direct=True)
def chushi(query: str)->str:
    '''你是餐厅厨师八戒,能根据经理的指令,做出一道菜'''
    input={"input":query},
    
    return "菜已做好"
@tool
def shizhe(query: str)->str:
    '''你是餐厅侍者沙僧,能根据经理的指令,把菜端到顾客面前'''
    input={"input":query}
    return "菜已送到"
@tool
def shouyin(query: str)->str:
    '''你是餐厅收银悟空,能根据经理的指令,为顾客结账'''
    input={"input":query}
    return "结账完成,欢迎下次光临"
tools=[chushi,shizhe,shouyin]

functions = [format_tool_to_openai_function(t) for t in tools]
model = model.bind_functions(functions)
# model= model.bind(tools=tools)
tool_executor = ToolExecutor(tools)

def should_continue(state):
    messages = state['messages']
    last_message = messages[-1]
    # If there is no function call, then we finish
    if "function_call" not in last_message.additional_kwargs:
        return "end"
    # Otherwise if there is, we continue
    else:
        return "continue"

# Define the function that calls the model
def call_model(state):
    messages = state['messages']
    response = model.invoke(messages)
    # We return a list, because this will get added to the existing list
    return {"messages": [response]}

# Define the function to execute tools
def call_tool(state):
    messages = state['messages']
    # Based on the continue condition
    # we know the last message involves a function call
    last_message = messages[-1]
    # We construct an ToolInvocation from the function_call
    action = ToolInvocation(
        tool=last_message.additional_kwargs["function_call"]["name"],
        tool_input=json.loads(last_message.additional_kwargs["function_call"]["arguments"]),
    )
    # We call the tool_executor and get back a response
    response = tool_executor.invoke(action)
    # We use the response to create a FunctionMessage
    function_message = FunctionMessage(content=str(response), name=action.tool)
    # We return a list, because this will get added to the existing list
    return {"messages": [function_message]}

from langgraph.graph import StateGraph, END
# Define a new graph
workflow = StateGraph(AgentState)

# Define the two nodes we will cycle between
workflow.add_node("agent", call_model)
workflow.add_node("action", call_tool)

# Set the entrypoint as `agent`
# This means that this node is the first one called
workflow.set_entry_point("agent")

# We now add a conditional edge
workflow.add_conditional_edges(
    # First, we define the start node. We use `agent`.
    # This means these are the edges taken after the `agent` node is called.
    "agent",
    # Next, we pass in the function that will determine which node is called next.
    should_continue,
    # Finally we pass in a mapping.
    # The keys are strings, and the values are other nodes.
    # END is a special node marking that the graph should finish.
    # What will happen is we will call `should_continue`, and then the output of that
    # will be matched against the keys in this mapping.
    # Based on which one it matches, that node will then be called.
    {
        # If `tools`, then we call the tool node.
        "continue": "action",
        # Otherwise we finish.
        "end": END
    }
)

# We now add a normal edge from `tools` to `agent`.
# This means that after `tools` is called, `agent` node is called next.
workflow.add_edge('action', 'agent')

# Finally, we compile it!
# This compiles it into a LangChain Runnable,
# meaning you can use it as you would any other runnable
app = workflow.compile()


async def predict(message,history):
    # history_langchain_format = []
    # for human, ai in history:
    #     history_langchain_format.append(HumanMessage(content=human))
    #     history_langchain_format.append(AIMessage(content=ai))
    # history_langchain_format.append(HumanMessage(content=message))
        
    # que={"messages": history_langchain_format}
    # que={"messages": [HumanMessage(content=message)]}
    que={"messages":[prompt.format(input=message)]}
    res=app.invoke(que)
    if res:
        
        response=(res["messages"][-1].content)
        return(response)
    else:print("不好意思,出了一个小问题,请联系我的微信:13603634456")
demo = gr.ChatInterface(fn=predict,  title="西游餐厅",description="西游餐厅开张了,我是经理唐僧,欢迎光临,您有什么需求,可以直接问我哦!",)
demo.launch()