JavaScript SDK 提供了简洁的接口来进行文本生成和聊天操作。
import ollama from 'ollama'
const response = await ollama.chat({
model: 'llama3.2',
messages: [{ role: 'user', content: '什么是 JavaScript?' }]
})
console.log(response.message.content)
const response = await ollama.chat({
model: 'llama3.2',
messages: [
{ role: 'user', content: '我叫小明' },
{ role: 'assistant', content: '你好小明!' },
{ role: 'user', content: '我叫什么名字?' }
]
})
console.log(response.message.content)
const response = await ollama.chat({
model: 'llama3.2',
messages: [
{ role: 'system', content: '你是一个 JavaScript 专家' },
{ role: 'user', content: '什么是闭包?' }
]
})
const response = await ollama.chat({
model: 'llama3.2',
messages: [{ role: 'user', content: '写一首诗' }],
options: {
temperature: 0.7,
num_ctx: 4096,
top_p: 0.9
}
})
const response = await ollama.generate({
model: 'llama3.2',
prompt: '用 JavaScript 写一个快速排序'
})
console.log(response.response)
const response = await ollama.generate({
model: 'llama3.2',
prompt: '写一个函数',
system: '你是一个 JavaScript 编程专家'
})
const response = await ollama.generate({
model: 'llama3.2',
prompt: '生成一个用户信息,包含姓名、年龄、邮箱',
format: 'json'
})
const user = JSON.parse(response.response)
console.log(user)
const response = await ollama.embeddings({
model: 'nomic-embed-text',
prompt: '这是一段需要向量化的文本'
})
console.log(`向量维度: ${response.embedding.length}`)
const { models } = await ollama.list()
models.forEach(model => {
const sizeGB = (model.size / (1024 ** 3)).toFixed(2)
console.log(`${model.name}: ${sizeGB} GB`)
})
const stream = await ollama.pull({ model: 'llama3.2', stream: true })
for await (const chunk of stream) {
if (chunk.status === 'downloading digest') {
const percent = (chunk.completed / chunk.total * 100).toFixed(1)
process.stdout.write(`\r下载进度: ${percent}%`)
} else {
console.log(chunk.status)
}
}
await ollama.delete({ model: 'my-model' })
console.log('模型已删除')
await ollama.create({
model: 'my-assistant',
modelfile: `
FROM llama3.2
SYSTEM 你是一个友好的助手
PARAMETER temperature 0.7
`
})
const response = await ollama.chat({
model: 'llama3.2',
messages: [{ role: 'user', content: '北京今天天气怎么样?' }],
tools: [{
type: 'function',
function: {
name: 'get_weather',
description: '获取城市天气',
parameters: {
type: 'object',
properties: {
city: { type: 'string', description: '城市名称' }
},
required: ['city']
}
}
}]
})
if (response.message.tool_calls) {
for (const tool of response.message.tool_calls) {
console.log(`调用: ${tool.function.name}`)
console.log(`参数:`, tool.function.arguments)
}
}
import { readFileSync } from 'fs'
const image = readFileSync('image.png')
const response = await ollama.chat({
model: 'llava',
messages: [{
role: 'user',
content: '这张图片里有什么?',
images: [image]
}]
})
console.log(response.message.content)
class OllamaChat {
constructor(model = 'llama3.2', system = null) {
this.model = model
this.messages = []
if (system) {
this.messages.push({ role: 'system', content: system })
}
}
async send(content) {
this.messages.push({ role: 'user', content })
const response = await ollama.chat({
model: this.model,
messages: this.messages
})
const reply = response.message.content
this.messages.push({ role: 'assistant', content: reply })
return reply
}
clear() {
this.messages = this.messages.filter(m => m.role === 'system')
}
}
// 使用
const chat = new OllamaChat('llama3.2', '你是一个友好的助手')
console.log(await chat.send('你好'))
console.log(await chat.send('我叫小明'))
console.log(await chat.send('我叫什么名字?'))