LangChain Chains — 链式调用
LCEL 基础链
python
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="qwen-turbo", ...)
# 最简单的链
chain = (
ChatPromptTemplate.from_template("用中文总结以下内容:{text}")
| llm
| StrOutputParser()
)
result = chain.invoke({"text": "Annual report content..."})顺序链(Sequential Chain)
python
from langchain_core.runnables import RunnablePassthrough
# 第一步:提取关键信息
extract_chain = (
ChatPromptTemplate.from_template(
"从以下年报中提取:公司名称、营收、净利润、不良贷款率\n{report}"
)
| llm
| StrOutputParser()
)
# 第二步:风险评估
risk_chain = (
ChatPromptTemplate.from_template(
"基于以下财务数据,给出风险评级(高/中/低)和理由:\n{financial_data}"
)
| llm
| StrOutputParser()
)
# 组合:report → extract → risk
full_chain = (
{"financial_data": extract_chain, "report": RunnablePassthrough()}
| risk_chain
)
result = full_chain.invoke({"report": "某银行年报全文..."})条件路由链
python
from langchain_core.runnables import RunnableLambda
def route_by_query_type(input_dict):
"""根据问题类型路由到不同的处理链"""
query = input_dict["query"].lower()
if any(kw in query for kw in ["贷款", "利率", "月供"]):
return loan_chain
elif any(kw in query for kw in ["理财", "基金", "收益"]):
return investment_chain
else:
return general_chain
loan_chain = (
ChatPromptTemplate.from_template("作为贷款专家回答:{query}")
| llm | StrOutputParser()
)
investment_chain = (
ChatPromptTemplate.from_template("作为理财顾问回答:{query}")
| llm | StrOutputParser()
)
general_chain = (
ChatPromptTemplate.from_template("作为金融客服回答:{query}")
| llm | StrOutputParser()
)
router = RunnableLambda(route_by_query_type)
routed_chain = router
result = routed_chain.invoke({"query": "我想申请房贷,利率是多少?"})并行链
python
from langchain_core.runnables import RunnableParallel
# 同时执行多个分析
analysis_chain = RunnableParallel(
risk_analysis=(
ChatPromptTemplate.from_template("分析风险:{company_info}")
| llm | StrOutputParser()
),
growth_analysis=(
ChatPromptTemplate.from_template("分析成长性:{company_info}")
| llm | StrOutputParser()
),
compliance_check=(
ChatPromptTemplate.from_template("合规检查:{company_info}")
| llm | StrOutputParser()
)
)
results = analysis_chain.invoke({
"company_info": "某科技公司,成立5年,年营收1000万..."
})
print("风险分析:", results["risk_analysis"])
print("成长分析:", results["growth_analysis"])
print("合规检查:", results["compliance_check"])带回退的链(Fallback)
python
from langchain_openai import ChatOpenAI
# 主模型
primary_llm = ChatOpenAI(model="qwen-max", ...)
# 备用模型
fallback_llm = ChatOpenAI(model="qwen-turbo", ...)
# 自动回退
robust_llm = primary_llm.with_fallbacks([fallback_llm])
chain = (
ChatPromptTemplate.from_template("{question}")
| robust_llm
| StrOutputParser()
)