- 实现了基于LangChain的MCP Agent,支持连接MCP服务器调用工具 - 添加了环境配置文件(.env),包含LLM模型和API配置信息 - 创建了完整的工具系统,包括BaseTool基类和Bash、Terminate、Add等工具 - 集成了天气查询工具,支持通过中国气象局API获取天气预报信息 - 实现了交互式对话功能,支持多轮工具调用和结果处理 - 添加了详细的CLAUDE.md开发指导文档
97 lines
3.2 KiB
Python
97 lines
3.2 KiB
Python
import os
|
||
import uuid
|
||
from deepagents import create_deep_agent
|
||
from dotenv import load_dotenv, find_dotenv
|
||
from langchain.agents import create_agent
|
||
from langgraph.checkpoint.memory import InMemorySaver
|
||
from app.tools.agent_tools import add, get_weather_by_location
|
||
from langchain_openai import ChatOpenAI
|
||
from app.tools.execute_sql import execute_query
|
||
|
||
_ = load_dotenv(find_dotenv())
|
||
|
||
checkpointer = InMemorySaver()
|
||
|
||
# model = ChatTongyi(
|
||
# model="qwen3-30b-a3b-thinking-2507",
|
||
# dashscope_api_key=os.getenv('LLM_API_KEY'),
|
||
# base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
|
||
# )
|
||
|
||
model = ChatOpenAI(
|
||
model="qwen3-30b-a3b-thinking-2507",
|
||
api_key=os.getenv('LLM_API_KEY'),
|
||
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
|
||
)
|
||
|
||
# agent = create_agent(
|
||
# model=model,
|
||
# tools=[add, get_weather_by_location],
|
||
# system_prompt="你是一个有帮助的助手。请简洁准确。"
|
||
# )
|
||
|
||
agent = create_deep_agent(
|
||
model=model,
|
||
tools=[add, get_weather_by_location, execute_query],
|
||
checkpointer=checkpointer,
|
||
system_prompt="你是一个有帮助的助手。请简洁准确,用中文进行回答。"
|
||
)
|
||
|
||
thread_id = str(uuid.uuid4())
|
||
config = {
|
||
"configurable": {
|
||
"thread_id": thread_id
|
||
}
|
||
}
|
||
|
||
|
||
if __name__ == "__main__":
|
||
# 启用流式输出
|
||
print("=== 开始流式输出 ===")
|
||
stream_result = agent.stream(
|
||
{"messages": [{"role": "user", "content": "查询车上咖啡机数据,不要修改原来的sql,除非运行报错。"
|
||
"sql: select counts(*) from coffee_train"}]},
|
||
config=config
|
||
)
|
||
|
||
# 逐块处理流式输出
|
||
full_response = ""
|
||
for chunk in stream_result:
|
||
# 打印每一块内容(调试用途)
|
||
print("接收到数据块:", chunk)
|
||
|
||
# 解析消息内容
|
||
if 'messages' in chunk:
|
||
messages = chunk['messages']
|
||
for msg in messages:
|
||
# 打印消息类型和所有属性
|
||
print(f"--- 消息类型: {type(msg).__name__} ---")
|
||
if hasattr(msg, 'content') and msg.content:
|
||
print(f"消息内容: {msg.content}")
|
||
|
||
# 处理 AIMessage(带工具调用)
|
||
if hasattr(msg, 'tool_calls') and msg.tool_calls:
|
||
print("正在调用工具...")
|
||
for tool_call in msg.tool_calls:
|
||
print(f" 工具名: {tool_call.get('name', 'N/A')}")
|
||
print(f" 参数: {tool_call.get('args', {})}")
|
||
print(f" 调用ID: {tool_call.get('id', 'N/A')}")
|
||
|
||
# 处理 ToolMessage(工具响应)
|
||
elif hasattr(msg, 'name') and msg.name:
|
||
print(f"工具名称: {msg.name}")
|
||
print(f"工具调用ID: {msg.tool_call_id}")
|
||
print(f"工具响应: {msg.content}")
|
||
|
||
# 处理普通消息内容
|
||
elif hasattr(msg, 'content'):
|
||
content = msg.content
|
||
full_response += content
|
||
|
||
print("\n=== 流式输出结束 ===")
|
||
|
||
# 打印最终完整响应
|
||
print("\n=== 最终完整响应 ===")
|
||
print(full_response)
|
||
|