# @Time:2025/12/19 10:31 # @Author:jinglv """ 记忆的访问 在工具中传递数据 """ from langchain.agents import create_agent, AgentState from langchain.tools import tool, ToolRuntime from langgraph.checkpoint.memory import InMemorySaver
from src.core.llms import model_client
defprint_stream_result(response): """流式结果的输出""" for item in response: for key, value in item.items(): if key == "model": print("-----执行步骤:调用大模型") if value['messages'][0].content: print(f"-----大模型分析的结果:{value["messages"][0].content}") elif value['messages'][0].tool_calls: print("-----大模型分析的结果为调用以下工具:") for tool_ in value['messages'][0].tool_calls: print(f"工具名称:{tool_['name']},调用工具的入参:{tool_['args']}") elif key == "tools": print(f"智能体执行工具:{value['messages'][0].name}") print(f"工具执行结果:{value['messages'][0].content}")
from langchain.tools import tool, ToolRuntime from langchain_core.runnables import RunnableConfig from langchain.messages import ToolMessage from langchain.agents import create_agent, AgentState from langgraph.types import Command from pydantic import BaseModel
classCustomState(AgentState): user_name: str
classCustomContext(BaseModel): user_id: str
@tool defupdate_user_info( runtime: ToolRuntime[CustomContext, CustomState], ) -> Command: """Look up and update user info.""" user_id = runtime.context.user_id name = "John Smith"if user_id == "user_123"else"Unknown user" return Command(update={ "user_name": name, # update the message history "messages": [ ToolMessage( "Successfully looked up user information", tool_call_id=runtime.tool_call_id ) ] })
@tool defgreet( runtime: ToolRuntime[CustomContext, CustomState] ) -> str | Command: """Use this to greet the user once you found their info.""" user_name = runtime.state.get("user_name", None) if user_name isNone: return Command(update={ "messages": [ ToolMessage( "Please call the 'update_user_info' tool it will get and update the user's name.", tool_call_id=runtime.tool_call_id ) ] }) returnf"Hello {user_name}!"
# @Time:2025/12/19 10:31 # @Author:jinglv """ 在工具中写入短期记忆 """ from langchain.agents import create_agent, AgentState from langchain.tools import tool, ToolRuntime from langchain_core.messages import ToolMessage from langgraph.checkpoint.memory import InMemorySaver from langgraph.types import Command
from src.core.llms import model_client
defprint_stream_result(response): """流式结果的输出""" for item in response: for key, value in item.items(): if key == "model": print("-----执行步骤:调用大模型") if value['messages'][0].content: print(f"-----大模型分析的结果:{value["messages"][0].content}") elif value['messages'][0].tool_calls: print("-----大模型分析的结果为调用以下工具:") for tool_ in value['messages'][0].tool_calls: print(f"工具名称:{tool_['name']},调用工具的入参:{tool_['args']}") elif key == "tools": print(f"智能体执行工具:{value['messages'][0].name}") print(f"工具执行结果:{value['messages'][0].content}")
from langchain.agents import create_agent from typing import TypedDict from langchain.agents.middleware import dynamic_prompt, ModelRequest
classCustomContext(TypedDict): user_name: str
defget_weather(city: str) -> str: """Get the weather in a city.""" returnf"The weather in {city} is always sunny!"
@dynamic_prompt defdynamic_system_prompt(request: ModelRequest) -> str: user_name = request.runtime.context["user_name"] system_prompt = f"You are a helpful assistant. Address the user as {user_name}." return system_prompt
result = agent.invoke( {"messages": [{"role": "user", "content": "What is the weather in SF?"}]}, context=CustomContext(user_name="John Smith"), ) for msg in result["messages"]: msg.pretty_print()
# @Time:2025/12/29 14:18 # @Author:jinglv """ 使用 dynamic_prompt 可以根据短期记忆动态构建构建系统提示词 """ from langchain.agents import AgentState, create_agent from langchain.agents.middleware import dynamic_prompt, ModelRequest from langgraph.checkpoint.memory import InMemorySaver
from src.core.llms import model_client
defprint_stream_result(response): """流式结果的输出""" for item in response: for key, value in item.items(): if key == "model": print("-----执行步骤:调用大模型") if value['messages'][0].content: print(f"-----大模型分析的结果:{value["messages"][0].content}") elif value['messages'][0].tool_calls: print("-----大模型分析的结果为调用以下工具:") for tool_ in value['messages'][0].tool_calls: print(f"工具名称:{tool_['name']},调用工具的入参:{tool_['args']}") elif key == "tools": print(f"智能体执行工具:{value['messages'][0].name}") print(f"工具执行结果:{value['messages'][0].content}")
from langchain.messages import RemoveMessage from langgraph.graph.message import REMOVE_ALL_MESSAGES from langgraph.checkpoint.memory import InMemorySaver from langchain.agents import create_agent, AgentState from langchain.agents.middleware import before_model from langchain_core.runnables import RunnableConfig from langgraph.runtime import Runtime from typing importAny
@before_model deftrim_messages(state: AgentState, runtime: Runtime) -> dict[str, Any] | None: """Keep only the last few messages to fit context window.""" messages = state["messages"]
iflen(messages) <= 3: returnNone# No changes needed
agent.invoke({"messages": "hi, my name is bob"}, config) agent.invoke({"messages": "write a short poem about cats"}, config) agent.invoke({"messages": "now do the same but for dogs"}, config) final_response = agent.invoke({"messages": "what's my name?"}, config)
final_response["messages"][-1].pretty_print() """ ================================== Ai Message ================================== Your name is Bob. You told me that earlier. If you'd like me to call you a nickname or use a different name, just say the word. """
from langchain.messages import RemoveMessage from langgraph.checkpoint.memory import InMemorySaver from langchain.agents import create_agent, AgentState from langchain.agents.middleware import after_model from langgraph.runtime import Runtime
@after_model defvalidate_response(state: AgentState, runtime: Runtime) -> dict | None: """Remove messages containing sensitive words.""" STOP_WORDS = ["password", "secret"] last_message = state["messages"][-1] ifany(word in last_message.content for word in STOP_WORDS): return {"messages": [RemoveMessage(id=last_message.id)]} returnNone
from langchain.messages import RemoveMessage from langgraph.graph.message import REMOVE_ALL_MESSAGES from langgraph.checkpoint.memory import InMemorySaver from langchain.agents import create_agent, AgentState from langchain.agents.middleware import before_model from langgraph.runtime import Runtime from langchain_core.runnables import RunnableConfig from typing importAny
@before_model deftrim_messages(state: AgentState, runtime: Runtime) -> dict[str, Any] | None: """Keep only the last few messages to fit context window.""" messages = state["messages"]
iflen(messages) <= 3: returnNone# No changes needed
agent.invoke({"messages": "hi, my name is bob"}, config) agent.invoke({"messages": "write a short poem about cats"}, config) agent.invoke({"messages": "now do the same but for dogs"}, config) final_response = agent.invoke({"messages": "what's my name?"}, config)
final_response["messages"][-1].pretty_print() """ ================================== Ai Message ================================== Your name is Bob. You told me that earlier. If you'd like me to call you a nickname or use a different name, just say the word. """
删除消息:RemoveMessage
删除特定消息,示例代码
1 2 3 4 5 6 7
from langchain.messages import RemoveMessage
defdelete_messages(state): messages = state["messages"] iflen(messages) > 2: # remove the earliest two messages return {"messages": [RemoveMessage(id=m.id) for m in messages[:2]]}
删除所有消息,示例代码
1 2 3 4
from langgraph.graph.message import REMOVE_ALL_MESSAGES
from langchain.agents import create_agent from langchain.agents.middleware import SummarizationMiddleware from langgraph.checkpoint.memory import InMemorySaver from langchain_core.runnables import RunnableConfig
config: RunnableConfig = {"configurable": {"thread_id": "1"}} agent.invoke({"messages": "hi, my name is bob"}, config) agent.invoke({"messages": "write a short poem about cats"}, config) agent.invoke({"messages": "now do the same but for dogs"}, config) final_response = agent.invoke({"messages": "what's my name?"}, config)