Go 客户端配置

Go 语言没有官方 SDK,但可以直接使用标准库调用 API,或者使用社区维护的客户端库。

直接调用 API

最简单的方式是使用标准库:

package main

import (
    "bytes"
    "encoding/json"
    "fmt"
    "io"
    "net/http"
)

type ChatRequest struct {
    Model    string    `json:"model"`
    Messages []Message `json:"messages"`
    Stream   bool      `json:"stream"`
}

type Message struct {
    Role    string `json:"role"`
    Content string `json:"content"`
}

type ChatResponse struct {
    Message Message `json:"message"`
    Done    bool    `json:"done"`
}

func main() {
    req := ChatRequest{
        Model: "llama3.2",
        Messages: []Message{
            {Role: "user", Content: "你好"},
        },
        Stream: false,
    }
    
    body, _ := json.Marshal(req)
    resp, err := http.Post(
        "http://localhost:11434/api/chat",
        "application/json",
        bytes.NewReader(body),
    )
    if err != nil {
        panic(err)
    }
    defer resp.Body.Close()
    
    data, _ := io.ReadAll(resp.Body)
    var result ChatResponse
    json.Unmarshal(data, &result)
    
    fmt.Println(result.Message.Content)
}

封装客户端

创建一个可复用的客户端:

package ollama

import (
    "bytes"
    "encoding/json"
    "io"
    "net/http"
    "time"
)

type Client struct {
    Host    string
    HTTP    *http.Client
}

func NewClient(host string) *Client {
    if host == "" {
        host = "http://localhost:11434"
    }
    
    return &Client{
        Host: host,
        HTTP: &http.Client{
            Timeout: 120 * time.Second,
        },
    }
}

func (c *Client) Chat(model string, messages []Message) (*ChatResponse, error) {
    req := ChatRequest{
        Model:    model,
        Messages: messages,
        Stream:   false,
    }
    
    body, _ := json.Marshal(req)
    resp, err := c.HTTP.Post(
        c.Host+"/api/chat",
        "application/json",
        bytes.NewReader(body),
    )
    if err != nil {
        return nil, err
    }
    defer resp.Body.Close()
    
    data, _ := io.ReadAll(resp.Body)
    var result ChatResponse
    json.Unmarshal(data, &result)
    
    return &result, nil
}

func (c *Client) Generate(model, prompt string) (*GenerateResponse, error) {
    req := GenerateRequest{
        Model:  model,
        Prompt: prompt,
        Stream: false,
    }
    
    body, _ := json.Marshal(req)
    resp, err := c.HTTP.Post(
        c.Host+"/api/generate",
        "application/json",
        bytes.NewReader(body),
    )
    if err != nil {
        return nil, err
    }
    defer resp.Body.Close()
    
    data, _ := io.ReadAll(resp.Body)
    var result GenerateResponse
    json.Unmarshal(data, &result)
    
    return &result, nil
}

func (c *Client) List() (*ModelsResponse, error) {
    resp, err := c.HTTP.Get(c.Host + "/api/tags")
    if err != nil {
        return nil, err
    }
    defer resp.Body.Close()
    
    data, _ := io.ReadAll(resp.Body)
    var result ModelsResponse
    json.Unmarshal(data, &result)
    
    return &result, nil
}

使用社区库

使用 ollama-go 库:

go get github.com/ollama/ollama-go

使用示例:

package main

import (
    "context"
    "fmt"
    
    "github.com/ollama/ollama-go"
)

func main() {
    client := ollama.NewClient()
    
    resp, err := client.Chat(context.Background(), &ollama.ChatRequest{
        Model: "llama3.2",
        Messages: []ollama.Message{
            {Role: "user", Content: "你好"},
        },
    })
    if err != nil {
        panic(err)
    }
    
    fmt.Println(resp.Message.Content)
}

环境变量配置

package main

import (
    "os"
)

func getOllamaHost() string {
    if host := os.Getenv("OLLAMA_HOST"); host != "" {
        return host
    }
    return "http://localhost:11434"
}

func main() {
    client := NewClient(getOllamaHost())
    // ...
}