netman19731 commited on
Commit
46127ea
1 Parent(s): 52c4598

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +113 -64
app.py CHANGED
@@ -1,71 +1,118 @@
1
- #有2个embedding库,HuggingFaceHubEmbeddings和HuggingFaceBgeEmbeddings,分别是云端和本地的,
2
- #对应pinecone有2个index,分别是myindex01和myindex
3
- import gradio as gr
4
- import requests
5
- import dashscope
6
- from http import HTTPStatus
7
- import json
8
- # from langchain.llms import Tongyi
9
- from langchain_community.llms import Tongyi,ChatGLM ,OpenAI
10
- from langchain import hub
11
  from langchain_community.tools.tavily_search import TavilySearchResults
12
- from langchain.tools import tool
13
- # from langchain_community.embeddings import TensorflowHubEmbeddings
14
- from langchain_community.embeddings import HuggingFaceBgeEmbeddings
15
- from langchain_community.embeddings import HuggingFaceHubEmbeddings
16
- from langchain_community.llms import HuggingFaceHub
17
- from pinecone import Pinecone, ServerlessSpec
18
- # from langchain.vectorstores import Pinecone as Pinecone_VectorStore
19
- from langchain_community.vectorstores import Pinecone as Pinecone_VectorStore
20
- from langchain.tools.retriever import create_retriever_tool
21
- from langchain.agents import AgentExecutor,create_react_agent
22
- from getpass import getpass
23
  import os
24
- os.environ['TAVILY_API_KEY'] = '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'
25
- @tool
26
- def tqyb(query: str) -> str:
27
- """这是天气预报api,示例query=北京"""
28
- url=f"https://api.seniverse.com/v3/weather/now.json?key=SWtPLxs4A2GhenWC-&location={query}&language=zh-Hans&unit=c"
29
- response = requests.get(url)
30
- # 检查请求是否成功
31
- if response.status_code == 200:
32
- res=response.json()
33
- return res # 假设API返回的是JSON格式数据
34
- else:
35
- return f"请求失败,状态码:{response.status_code}"
36
-
37
- os.environ['OPENAI_API_KEY']="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"
38
-
39
-
40
- # llm = HuggingFaceHub(repo_id="Qwen/Qwen1.5-0.5B", model_kwargs={"temperature": 0.5, "max_length": 64})
41
- llm = Tongyi(dashscope_api_key="sk-78c45d761ed04af2b965b43cd522108b",model="qwen-72b")
42
- prompt = hub.pull("hwchase17/react")
43
- search = TavilySearchResults(max_results=1)
44
-
45
- model_name = "BAAI/bge-small-en"
46
- model_kwargs = {"device": "cpu"}
47
- encode_kwargs = {"normalize_embeddings": True}
48
- # embeddings = HuggingFaceBgeEmbeddings(
49
- # model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs
50
- # )
51
- embeddings = HuggingFaceHubEmbeddings()
52
- pc = Pinecone(api_key='3538cd3c-eca8-4c61-9463-759f5ea65b10')
53
- index = pc.Index("myindex01")
54
- vectorstore = Pinecone_VectorStore(index, embeddings.embed_query, "text")
55
- db=vectorstore.as_retriever()
56
- retriever_tool = create_retriever_tool(
57
- db,
58
- "shuangcheng_search",
59
- "关于双城的区情信息检索工具,如果问题与双城的区情有关,你必须使用这个工具!",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  )
61
 
62
- tools = [search,tqyb,retriever_tool]
63
- agent = create_react_agent(llm, tools, prompt)
64
- agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
 
 
 
 
 
 
 
 
65
 
66
  async def predict(question):
67
- que={"input":question}
68
- res=agent_executor.invoke(que)
69
  if res:
70
  return(res["output"])
71
  else:print("不好意思,出了一个小问题,请联系我的微信:13603634456")
@@ -74,5 +121,7 @@ async def predict(question):
74
  gr.Interface(
75
  predict,inputs="textbox",
76
  outputs="textbox",
77
- title="定制版AI专家BOT",
78
- description="这是一个定制版的AI专家BOT,你可以通过输入问题,让AI为你回答。\n目前提供三个示例工具:\n1.天气预报(函数调用API)\n2.双城区情检索(增强型检索RAG)\n3.搜索引擎").launch()
 
 
 
1
+
2
+ from langchain_openai.chat_models import ChatOpenAI
 
 
 
 
 
 
 
 
3
  from langchain_community.tools.tavily_search import TavilySearchResults
4
+ from langchain.tools.render import format_tool_to_openai_function
5
+ from langgraph.prebuilt import ToolExecutor,ToolInvocation
6
+ from typing import TypedDict, Annotated, Sequence
7
+ import operator
8
+ from langchain_core.messages import BaseMessage,FunctionMessage,HumanMessage
9
+ from langchain.tools import ShellTool
10
+ import json
 
 
 
 
11
  import os
12
+ import gradio as gr
13
+ os.environ["LANGCHAIN_TRACING_V2"] ="True"
14
+ os.environ["LANGCHAIN_API_KEY"]="ls__54e16f70b2b0455aad0f2cbf47777d30"
15
+ os.environ["OPENAI_API_KEY"]="20a79668d6113e99b35fcd541c65bfeaec497b8262c111bd328ef5f1ad8c6335"
16
+ # os.environ["OPENAI_API_KEY"]="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"
17
+ os.environ["LANGCHAIN_ENDPOINT"]="https://api.smith.langchain.com"
18
+ os.environ["LANGCHAIN_PROJECT"]="default"
19
+
20
+ class AgentState(TypedDict):
21
+ messages: Annotated[Sequence[BaseMessage], operator.add]
22
+
23
+ model = ChatOpenAI(model="gpt-3.5-turbo-1106",api_key="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")
24
+ shell_tool = ShellTool()
25
+ tools = [TavilySearchResults(max_results=1),shell_tool]
26
+
27
+
28
+ functions = [format_tool_to_openai_function(t) for t in tools]
29
+ model = model.bind_functions(functions)
30
+
31
+ tool_executor = ToolExecutor(tools)
32
+ # Define the function that determines whether to continue or not
33
+ def should_continue(state):
34
+ messages = state['messages']
35
+ last_message = messages[-1]
36
+ # If there is no function call, then we finish
37
+ if "function_call" not in last_message.additional_kwargs:
38
+ return "end"
39
+ # Otherwise if there is, we continue
40
+ else:
41
+ return "continue"
42
+
43
+ # Define the function that calls the model
44
+ def call_model(state):
45
+ messages = state['messages']
46
+ response = model.invoke(messages)
47
+ # We return a list, because this will get added to the existing list
48
+ return {"messages": [response]}
49
+
50
+ # Define the function to execute tools
51
+ def call_tool(state):
52
+ messages = state['messages']
53
+ # Based on the continue condition
54
+ # we know the last message involves a function call
55
+ last_message = messages[-1]
56
+ # We construct an ToolInvocation from the function_call
57
+ action = ToolInvocation(
58
+ tool=last_message.additional_kwargs["function_call"]["name"],
59
+ tool_input=json.loads(last_message.additional_kwargs["function_call"]["arguments"]),
60
+ )
61
+ # We call the tool_executor and get back a response
62
+ response = tool_executor.invoke(action)
63
+ # We use the response to create a FunctionMessage
64
+ function_message = FunctionMessage(content=str(response), name=action.tool)
65
+ # We return a list, because this will get added to the existing list
66
+ return {"messages": [function_message]}
67
+
68
+ from langgraph.graph import StateGraph, END
69
+ # Define a new graph
70
+ workflow = StateGraph(AgentState)
71
+
72
+ # Define the two nodes we will cycle between
73
+ workflow.add_node("agent", call_model)
74
+ workflow.add_node("action", call_tool)
75
+
76
+ # Set the entrypoint as `agent`
77
+ # This means that this node is the first one called
78
+ workflow.set_entry_point("agent")
79
+
80
+ # We now add a conditional edge
81
+ workflow.add_conditional_edges(
82
+ # First, we define the start node. We use `agent`.
83
+ # This means these are the edges taken after the `agent` node is called.
84
+ "agent",
85
+ # Next, we pass in the function that will determine which node is called next.
86
+ should_continue,
87
+ # Finally we pass in a mapping.
88
+ # The keys are strings, and the values are other nodes.
89
+ # END is a special node marking that the graph should finish.
90
+ # What will happen is we will call `should_continue`, and then the output of that
91
+ # will be matched against the keys in this mapping.
92
+ # Based on which one it matches, that node will then be called.
93
+ {
94
+ # If `tools`, then we call the tool node.
95
+ "continue": "action",
96
+ # Otherwise we finish.
97
+ "end": END
98
+ }
99
  )
100
 
101
+ # We now add a normal edge from `tools` to `agent`.
102
+ # This means that after `tools` is called, `agent` node is called next.
103
+ workflow.add_edge('action', 'agent')
104
+
105
+ # Finally, we compile it!
106
+ # This compiles it into a LangChain Runnable,
107
+ # meaning you can use it as you would any other runnable
108
+ app = workflow.compile()
109
+
110
+ # inputs = {"messages": [HumanMessage(content="查询你的cast命令版本")]}
111
+ # app.invoke(inputs)
112
 
113
  async def predict(question):
114
+ que={"messages": [HumanMessage(content=question)]}
115
+ res=app.invoke(que)
116
  if res:
117
  return(res["output"])
118
  else:print("不好意思,出了一个小问题,请联系我的微信:13603634456")
 
121
  gr.Interface(
122
  predict,inputs="textbox",
123
  outputs="textbox",
124
+ title="定制版AI专家BOT-0.1版",
125
+ description="这是一个定制版的AI专家BOT,你可以通过输入问题,让AI为你回答。\n目前提供三个示例工具:\n1.bash命令行执行工具,可以将人类语言转化为bash命令,然后执行。\n2.搜索引擎").launch()
126
+
127
+