推送模型接口用于将本地模型上传到 Ollama 模型库或私有仓库。
curl http://localhost:11434/api/push -d '{
"name": "myusername/mymodel"
}'
响应(流式):
{"status":"retrieving manifest"}
{"status":"pushing manifest"}
{"status":"pushing layer","digest":"abc123...","total":4661224676,"completed":0}
{"status":"pushing layer","digest":"abc123...","total":4661224676,"completed":1000000000}
...
{"status":"success"}
| 参数 | 类型 | 必需 | 说明 |
|---|---|---|---|
| name | string | 是 | 模型名称(含用户名前缀) |
| insecure | bool | 否 | 是否允许不安全连接 |
| stream | bool | 否 | 是否流式输出,默认 true |
推送的模型需要包含用户名前缀:
username/modelname
username/modelname:tag
例如:
myuser/my-llama
myuser/my-llama:v1.0
import requests
def push_model(model_name):
response = requests.post(
"http://localhost:11434/api/push",
json={"name": model_name},
stream=True
)
for line in response.iter_lines():
if line:
import json
data = json.loads(line)
status = data.get("status", "")
if "pushing layer" in status:
total = data.get("total", 0)
completed = data.get("completed", 0)
if total > 0:
percent = (completed / total) * 100
print(f"\r上传进度: {percent:.1f}%", end="", flush=True)
else:
print(status)
print("\n推送完成")
push_model("myuser/my-llama")
async function pushModel(modelName) {
const response = await fetch('http://localhost:11434/api/push', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ name: modelName })
});
const reader = response.body.getReader();
const decoder = new TextDecoder();
while (true) {
const { done, value } = await reader.read();
if (done) break;
const lines = decoder.decode(value).split('\n').filter(Boolean);
for (const line of lines) {
const data = JSON.parse(line);
console.log(data.status);
}
}
console.log('推送完成');
}
await pushModel('myuser/my-llama');
package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
)
type PushRequest struct {
Name string `json:"name"`
}
func pushModel(modelName string) error {
req := PushRequest{Name: modelName}
body, _ := json.Marshal(req)
resp, err := http.Post(
"http://localhost:11434/api/push",
"application/json",
bytes.NewReader(body),
)
if err != nil {
return err
}
defer resp.Body.Close()
decoder := json.NewDecoder(resp.Body)
for {
var data map[string]interface{}
if err := decoder.Decode(&data); err != nil {
if err == io.EOF {
break
}
continue
}
fmt.Println(data["status"])
}
return nil
}
func main() {
pushModel("myuser/my-llama")
}
def push_to_private(model_name, registry="my-registry.com"):
full_name = f"{registry}/{model_name}"
push_model(full_name)
push_to_private("my-llama", "registry.example.com")
推送模型需要先登录:
# 登录 Ollama
ollama login
# 或设置环境变量
export OLLAMA_USERNAME=myuser
export OLLAMA_PASSWORD=mypassword