与 LangChain 集成

LangChain 是流行的 LLM 应用开发框架,与 Ollama 集成非常简单。

安装依赖

pip install langchain langchain-community

基本使用

直接调用

from langchain_community.llms import Ollama

llm = Ollama(model="llama3.2")

response = llm.invoke("你好,请介绍一下自己")
print(response)

流式输出

from langchain_community.llms import Ollama

llm = Ollama(model="llama3.2")

for chunk in llm.stream("写一首诗"):
    print(chunk, end="", flush=True)

聊天模型

from langchain_community.chat_models import ChatOllama
from langchain_core.messages import HumanMessage, SystemMessage

chat = ChatOllama(model="llama3.2")

messages = [
    SystemMessage(content="你是一个友好的助手"),
    HumanMessage(content="你好")
]

response = chat.invoke(messages)
print(response.content)

提示词模板

from langchain_community.llms import Ollama
from langchain_core.prompts import PromptTemplate

llm = Ollama(model="llama3.2")

template = """你是一个{role}。

问题:{question}
回答:"""

prompt = PromptTemplate.from_template(template)

chain = prompt | llm

response = chain.invoke({
    "role": "Python 专家",
    "question": "什么是装饰器?"
})

print(response)

链式调用

from langchain_community.llms import Ollama
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser

llm = Ollama(model="llama3.2")

prompt = ChatPromptTemplate.from_messages([
    ("system", "你是一个{role}"),
    ("user", "{input}")
])

output_parser = StrOutputParser()

chain = prompt | llm | output_parser

response = chain.invoke({
    "role": "翻译专家",
    "input": "将'Hello World'翻译成中文"
})

print(response)

RAG 应用

from langchain_community.llms import Ollama
from langchain_community.embeddings import OllamaEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_core.documents import Document

llm = Ollama(model="llama3.2")
embeddings = OllamaEmbeddings(model="nomic-embed-text")

texts = [
    "Ollama 是一个本地运行大语言模型的工具。",
    "Ollama 支持 Llama、Mistral 等多种模型。",
    "Ollama 提供 REST API 接口。"
]

text_splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=10)
documents = [Document(page_content=t) for t in texts]
splits = text_splitter.split_documents(documents)

vectorstore = Chroma.from_documents(documents=splits, embedding=embeddings)

retriever = vectorstore.as_retriever()

from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough

template = """根据以下上下文回答问题:

{context}

问题:{question}
"""

prompt = ChatPromptTemplate.from_template(template)

def format_docs(docs):
    return "\n\n".join(doc.page_content for doc in docs)

rag_chain = (
    {"context": retriever | format_docs, "question": RunnablePassthrough()}
    | prompt
    | llm
)

response = rag_chain.invoke("Ollama 是什么?")
print(response)

Agent

from langchain_community.llms import Ollama
from langchain.agents import AgentExecutor, create_react_agent
from langchain_core.tools import Tool
from langchain_core.prompts import PromptTemplate

llm = Ollama(model="llama3.2")

def get_weather(location):
    return f"{location}的天气晴朗,温度25度"

tools = [
    Tool(
        name="get_weather",
        func=get_weather,
        description="获取指定地点的天气信息"
    )
]

prompt = PromptTemplate.from_template(
    """你是一个助手,可以使用工具回答问题。

可用工具:
{tools}

使用格式:
Action: 工具名称
Action Input: 工具输入

开始!

问题:{input}
{agent_scratchpad}"""
)

agent = create_react_agent(llm, tools, prompt)
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)

response = agent_executor.invoke({"input": "北京今天天气怎么样?"})
print(response)