LangChain 1.X CheatSheet
精简速查手册 - 背诵版
1. 安装与环境
bash
pip install langchain langchain-core langgraph langchain-openai
pip install "langgraph-cli[inmem]" # 开发工具bash
export OPENAI_API_KEY=sk-xxx
export LANGSMITH_TRACING=true
export LANGSMITH_API_KEY=lsv2_xxx2. Agent 核心
创建 Agent
python
from langchain.agents import create_agent
from langgraph.checkpoint.memory import InMemorySaver
from langgraph.store.memory import InMemoryStore
agent = create_agent(
model="gpt-4o", # 模型
tools=[my_tool], # 工具
system_prompt="你是助手", # 提示
context_schema=MyContext, # 运行时上下文
middleware=[my_middleware], # 中间件
checkpointer=InMemorySaver(), # 短期记忆
store=InMemoryStore(), # 长期记忆
)调用方式
python
result = agent.invoke({"messages": [{"role": "user", "content": "你好"}]})
# 流式
for chunk in agent.stream({"messages": [...]}, stream_mode="messages"):
print(chunk.content, end="")
# 带线程 (保持对话)
config = {"configurable": {"thread_id": "user_123"}}
result = agent.invoke({"messages": [...]}, config=config)3. 工具定义
python
from langchain_core.tools import tool
from langchain.agents import ToolRuntime, Command
# 基础工具
@tool
def search(query: str) -> str:
"""搜索信息"""
return f"结果: {query}"
# 访问运行时
@tool
def get_user(runtime: ToolRuntime[Context]) -> str:
"""获取用户"""
return runtime.context.user_name
# 更新状态
@tool
def login(user: str, runtime: ToolRuntime) -> Command:
"""登录"""
return Command(result="成功", update={"authenticated": True})4. 消息类型
python
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage, ToolMessage
# 三种格式
HumanMessage(content="你好")
{"role": "user", "content": "你好"}
("user", "你好")
# 多模态
HumanMessage(content=[
{"type": "text", "text": "描述图片"},
{"type": "image_url", "image_url": {"url": "data:image/png;base64,..."}}
])5. 中间件钩子
python
from langchain.agents import (
before_model, after_model, # 模型前后
dynamic_prompt, wrap_model_call, # 动态提示/包装调用
)
@before_model
def guard(state, runtime):
if "敏感词" in state["messages"][-1].content:
return {"messages": [AIMessage(content="拒绝")]}
return None
@dynamic_prompt
def prompt(request):
return f"你是 {request.runtime.context.user_name} 的助手"
@wrap_model_call
def filter_tools(request, handler):
allowed = [t for t in request.tools if t.name.startswith("safe_")]
return handler(request.override(tools=allowed))6. 运行时上下文
python
from dataclasses import dataclass
@dataclass
class Context:
user_id: str
user_name: str
is_admin: bool = False
agent = create_agent(model="gpt-4o", tools=[...], context_schema=Context)
result = agent.invoke(
{"messages": [...]},
context=Context(user_id="123", user_name="张三", is_admin=True)
)7. 记忆系统
短期记忆 (会话内)
python
from langgraph.checkpoint.memory import InMemorySaver
agent = create_agent(model="gpt-4o", tools=[...], checkpointer=InMemorySaver())
config = {"configurable": {"thread_id": "session_1"}}
result1 = agent.invoke({"messages": [...]}, config=config)
result2 = agent.invoke({"messages": [...]}, config=config) # 记住上文长期记忆 (跨会话)
python
from langgraph.store.memory import InMemoryStore
store = InMemoryStore()
# 操作
store.put(("users", "123"), "prefs", {"theme": "dark"})
item = store.get(("users", "123"), "prefs") # item.value
store.search(("users",), query="偏好", limit=5)
store.delete(("users", "123"), "prefs")| 类型 | 作用域 | 存储 | 访问 |
|---|---|---|---|
| 短期 | 会话内 | Checkpointer | state |
| 长期 | 跨会话 | Store | runtime.store |
8. 结构化输出
python
from pydantic import BaseModel
class Response(BaseModel):
answer: str
confidence: float
# 方式1: 模型级别
model = ChatOpenAI(model="gpt-4o")
result = model.with_structured_output(Response).invoke("问题")
# 方式2: Agent 级别
agent = create_agent(model="gpt-4o", tools=[...], response_format=Response)9. 人机协作 (HITL)
python
from langchain.agents.middleware import HumanInTheLoopMiddleware
agent = create_agent(
model="gpt-4o",
tools=[write_file, send_email],
middleware=[
HumanInTheLoopMiddleware(
interrupt_on={"write_file": True, "send_email": True}
),
],
checkpointer=InMemorySaver(), # 必需
)
result = agent.invoke({"messages": [...]}, config=config)
if result.get("__interrupt__"):
# 批准: {"decision": "approve"}
# 修改: {"decision": "edit", "new_args": {...}}
# 拒绝: {"decision": "reject", "feedback": "原因"}
result = agent.invoke({"decision": "approve"}, config=config)10. 多代理系统
python
# 子 Agent 包装为工具
research_agent = create_agent(model="gpt-4o", tools=[search])
@tool("research")
def call_research(query: str) -> str:
result = research_agent.invoke({"messages": [{"role": "user", "content": query}]})
return result["messages"][-1].content
# 主管 Agent
supervisor = create_agent(
model="gpt-4o",
tools=[call_research, call_math],
system_prompt="协调专家完成任务"
)11. RAG 检索
python
from langchain_openai import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_text_splitters import RecursiveCharacterTextSplitter
# 构建索引
docs = DirectoryLoader("./docs/").load()
chunks = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200).split_documents(docs)
vectorstore = FAISS.from_documents(chunks, OpenAIEmbeddings())
# 检索工具
@tool
def search_docs(query: str) -> str:
docs = vectorstore.similarity_search(query, k=3)
return "\n\n".join([d.page_content for d in docs])12. MCP 协议
python
from langchain_mcp_adapters.client import MultiServerMCPClient
from mcp.server.fastmcp import FastMCP
# 客户端
client = MultiServerMCPClient({
"math": {"transport": "stdio", "command": "python", "args": ["server.py"]},
})
tools = await client.get_tools()
# 服务器
mcp = FastMCP("MyServer")
@mcp.tool()
def add(a: int, b: int) -> int:
return a + b
mcp.run(transport="stdio")13. 开发与部署
本地开发
bash
langgraph dev # 启动开发服务器
# 访问 https://smith.langchain.com/studio/?baseUrl=http://127.0.0.1:2024json
// langgraph.json
{"graphs": {"agent": "./agent.py:agent"}, "env": ".env"}追踪与监控
python
from langsmith import traceable
import langsmith as ls
@traceable
def my_function(data):
return process(data)
# 添加标签
result = agent.invoke(
{"messages": [...]},
config={"tags": ["prod"], "metadata": {"user_id": "123"}}
)部署 SDK
python
from langgraph_sdk import get_client
client = get_client(url="deployment-url", api_key="api-key")
thread = await client.threads.create()
async for chunk in client.runs.stream(thread["thread_id"], "agent", input={...}):
print(chunk.data)核心导入速查
python
# Agent
from langchain.agents import create_agent, ToolRuntime, Command
from langchain.agents import before_model, after_model, dynamic_prompt, wrap_model_call
# 工具
from langchain_core.tools import tool
# 消息
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage
# 模型
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
# 记忆
from langgraph.checkpoint.memory import InMemorySaver
from langgraph.store.memory import InMemoryStore
# 中间件
from langchain.agents.middleware import HumanInTheLoopMiddleware, SummarizationMiddleware
# 追踪
from langsmith import traceable关键概念速记
| 概念 | 核心 |
|---|---|
| Agent | create_agent(model, tools, ...) |
| 工具 | @tool + docstring |
| 中间件 | @before_model, @after_model, @dynamic_prompt |
| 短期记忆 | checkpointer + thread_id |
| 长期记忆 | store.put/get/search |
| 运行时 | context_schema + ToolRuntime |
| HITL | HumanInTheLoopMiddleware + __interrupt__ |
| 多代理 | 子 Agent 包装为 @tool |
| RAG | 向量存储 + 检索工具 |
| 追踪 | @traceable + config.tags/metadata |
LangChain 1.X - 精简背诵版