mirror of
https://github.com/ollama/ollama.git
synced 2025-12-31 03:30:51 -05:00
Compare commits
45 Commits
remove-fir
...
v0.1.10
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
41434a7cdc | ||
|
|
71687ab809 | ||
|
|
d8842b4d4b | ||
|
|
585f9c01fa | ||
|
|
c13bde962d | ||
|
|
ee307937fd | ||
|
|
ab6639bc47 | ||
|
|
dbe6e77472 | ||
|
|
4b3f4bc7d9 | ||
|
|
a5ccf742c1 | ||
|
|
e33ef391cd | ||
|
|
75295b9528 | ||
|
|
db5ef3004c | ||
|
|
b5f158f046 | ||
|
|
30141b42e9 | ||
|
|
5f301ece1d | ||
|
|
77954bea0e | ||
|
|
54f92f01cb | ||
|
|
30ae6e731e | ||
|
|
b28a30f7ba | ||
|
|
ecd71347ab | ||
|
|
8ee4cbea0f | ||
|
|
652d90e1c7 | ||
|
|
bc22d5a38b | ||
|
|
71d71d0988 | ||
|
|
1901044b07 | ||
|
|
d660eebf22 | ||
|
|
cac11c9137 | ||
|
|
a07c935d34 | ||
|
|
1552cee59f | ||
|
|
3ca56b5ada | ||
|
|
b0d14ed51c | ||
|
|
f61f340279 | ||
|
|
47ffb81db7 | ||
|
|
69795d2db0 | ||
|
|
acde0819d9 | ||
|
|
f748331aa3 | ||
|
|
f4edc302a8 | ||
|
|
64b7e0c218 | ||
|
|
eced0d52ab | ||
|
|
96bf9cafa7 | ||
|
|
b6817a83d8 | ||
|
|
73f3448ede | ||
|
|
e4f59ba073 | ||
|
|
5de568bffe |
@@ -225,6 +225,7 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
||||
- [Web UI](https://github.com/ollama-webui/ollama-webui)
|
||||
- [Ollamac](https://github.com/kevinhermawan/Ollamac)
|
||||
- [big-AGI](https://github.com/enricoros/big-agi/blob/main/docs/config-ollama.md)
|
||||
- [Cheshire Cat assistant framework](https://github.com/cheshire-cat-ai/core)
|
||||
|
||||
### Terminal
|
||||
|
||||
@@ -247,6 +248,10 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
||||
- [OllamaKit for Swift](https://github.com/kevinhermawan/OllamaKit)
|
||||
- [Ollama for Dart](https://github.com/breitburg/dart-ollama)
|
||||
|
||||
### Mobile
|
||||
|
||||
- [Maid](https://github.com/danemadsen/Maid) (Mobile Artificial Intelligence Distribution)
|
||||
|
||||
### Extensions & Plugins
|
||||
|
||||
- [Raycast extension](https://github.com/MassimilianoPasquini97/raycast_ollama)
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
@@ -95,11 +96,19 @@ func (c *Client) do(ctx context.Context, method, path string, reqData, respData
|
||||
var reqBody io.Reader
|
||||
var data []byte
|
||||
var err error
|
||||
if reqData != nil {
|
||||
|
||||
switch reqData := reqData.(type) {
|
||||
case io.Reader:
|
||||
// reqData is already an io.Reader
|
||||
reqBody = reqData
|
||||
case nil:
|
||||
// noop
|
||||
default:
|
||||
data, err = json.Marshal(reqData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reqBody = bytes.NewReader(data)
|
||||
}
|
||||
|
||||
@@ -287,3 +296,18 @@ func (c *Client) Heartbeat(ctx context.Context) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) CreateBlob(ctx context.Context, digest string, r io.Reader) error {
|
||||
if err := c.do(ctx, http.MethodHead, fmt.Sprintf("/api/blobs/%s", digest), nil, nil); err != nil {
|
||||
var statusError StatusError
|
||||
if !errors.As(err, &statusError) || statusError.StatusCode != http.StatusNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.do(ctx, http.MethodPost, fmt.Sprintf("/api/blobs/%s", digest), r, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -99,9 +99,10 @@ type EmbeddingResponse struct {
|
||||
}
|
||||
|
||||
type CreateRequest struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Stream *bool `json:"stream,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Modelfile string `json:"modelfile"`
|
||||
Stream *bool `json:"stream,omitempty"`
|
||||
}
|
||||
|
||||
type DeleteRequest struct {
|
||||
|
||||
72
cmd/cmd.go
72
cmd/cmd.go
@@ -1,9 +1,11 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -27,6 +29,7 @@ import (
|
||||
|
||||
"github.com/jmorganca/ollama/api"
|
||||
"github.com/jmorganca/ollama/format"
|
||||
"github.com/jmorganca/ollama/parser"
|
||||
"github.com/jmorganca/ollama/progressbar"
|
||||
"github.com/jmorganca/ollama/readline"
|
||||
"github.com/jmorganca/ollama/server"
|
||||
@@ -45,17 +48,64 @@ func CreateHandler(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
var spinner *Spinner
|
||||
modelfile, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
spinner := NewSpinner("transferring context")
|
||||
go spinner.Spin(100 * time.Millisecond)
|
||||
|
||||
commands, err := parser.Parse(bytes.NewReader(modelfile))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, c := range commands {
|
||||
switch c.Name {
|
||||
case "model", "adapter":
|
||||
path := c.Args
|
||||
if path == "~" {
|
||||
path = home
|
||||
} else if strings.HasPrefix(path, "~/") {
|
||||
path = filepath.Join(home, path[2:])
|
||||
}
|
||||
|
||||
bin, err := os.Open(path)
|
||||
if errors.Is(err, os.ErrNotExist) && c.Name == "model" {
|
||||
continue
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
defer bin.Close()
|
||||
|
||||
hash := sha256.New()
|
||||
if _, err := io.Copy(hash, bin); err != nil {
|
||||
return err
|
||||
}
|
||||
bin.Seek(0, io.SeekStart)
|
||||
|
||||
digest := fmt.Sprintf("sha256:%x", hash.Sum(nil))
|
||||
if err = client.CreateBlob(cmd.Context(), digest, bin); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
modelfile = bytes.ReplaceAll(modelfile, []byte(c.Args), []byte("@"+digest))
|
||||
}
|
||||
}
|
||||
|
||||
var currentDigest string
|
||||
var bar *progressbar.ProgressBar
|
||||
|
||||
request := api.CreateRequest{Name: args[0], Path: filename}
|
||||
request := api.CreateRequest{Name: args[0], Path: filename, Modelfile: string(modelfile)}
|
||||
fn := func(resp api.ProgressResponse) error {
|
||||
if resp.Digest != currentDigest && resp.Digest != "" {
|
||||
if spinner != nil {
|
||||
spinner.Stop()
|
||||
}
|
||||
spinner.Stop()
|
||||
currentDigest = resp.Digest
|
||||
// pulling
|
||||
bar = progressbar.DefaultBytes(
|
||||
@@ -67,9 +117,7 @@ func CreateHandler(cmd *cobra.Command, args []string) error {
|
||||
bar.Set64(resp.Completed)
|
||||
} else {
|
||||
currentDigest = ""
|
||||
if spinner != nil {
|
||||
spinner.Stop()
|
||||
}
|
||||
spinner.Stop()
|
||||
spinner = NewSpinner(resp.Status)
|
||||
go spinner.Spin(100 * time.Millisecond)
|
||||
}
|
||||
@@ -81,11 +129,9 @@ func CreateHandler(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if spinner != nil {
|
||||
spinner.Stop()
|
||||
if spinner.description != "success" {
|
||||
return errors.New("unexpected end to create model")
|
||||
}
|
||||
spinner.Stop()
|
||||
if spinner.description != "success" {
|
||||
return errors.New("unexpected end to create model")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
56
docs/api.md
56
docs/api.md
@@ -292,12 +292,13 @@ curl -X POST http://localhost:11434/api/generate -d '{
|
||||
POST /api/create
|
||||
```
|
||||
|
||||
Create a model from a [`Modelfile`](./modelfile.md)
|
||||
Create a model from a [`Modelfile`](./modelfile.md). It is recommended to set `modelfile` to the content of the Modelfile rather than just set `path`. This is a requirement for remote create. Remote model creation should also create any file blobs, fields such as `FROM` and `ADAPTER`, explicitly with the server using [Create a Blob](#create-a-blob) and the value to the path indicated in the response.
|
||||
|
||||
### Parameters
|
||||
|
||||
- `name`: name of the model to create
|
||||
- `path`: path to the Modelfile
|
||||
- `path`: path to the Modelfile (deprecated: please use modelfile instead)
|
||||
- `modelfile`: contents of the Modelfile
|
||||
- `stream`: (optional) if `false` the response will be returned as a single response object, rather than a stream of objects
|
||||
|
||||
### Examples
|
||||
@@ -307,7 +308,8 @@ Create a model from a [`Modelfile`](./modelfile.md)
|
||||
```shell
|
||||
curl -X POST http://localhost:11434/api/create -d '{
|
||||
"name": "mario",
|
||||
"path": "~/Modelfile"
|
||||
"path": "~/Modelfile",
|
||||
"modelfile": "FROM llama2"
|
||||
}'
|
||||
```
|
||||
|
||||
@@ -321,6 +323,54 @@ A stream of JSON objects. When finished, `status` is `success`.
|
||||
}
|
||||
```
|
||||
|
||||
### Check if a Blob Exists
|
||||
|
||||
```shell
|
||||
HEAD /api/blobs/:digest
|
||||
```
|
||||
|
||||
Check if a blob is known to the server.
|
||||
|
||||
#### Query Parameters
|
||||
|
||||
- `digest`: the SHA256 digest of the blob
|
||||
|
||||
#### Examples
|
||||
|
||||
##### Request
|
||||
|
||||
```shell
|
||||
curl -I http://localhost:11434/api/blobs/sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2
|
||||
```
|
||||
|
||||
##### Response
|
||||
|
||||
Return 200 OK if the blob exists, 404 Not Found if it does not.
|
||||
|
||||
### Create a Blob
|
||||
|
||||
```shell
|
||||
POST /api/blobs/:digest
|
||||
```
|
||||
|
||||
Create a blob from a file. Returns the server file path.
|
||||
|
||||
#### Query Parameters
|
||||
|
||||
- `digest`: the expected SHA256 digest of the file
|
||||
|
||||
#### Examples
|
||||
|
||||
##### Request
|
||||
|
||||
```shell
|
||||
curl -T model.bin -X POST http://localhost:11434/api/blobs/sha256:29fdb92e57cf0827ded04ae6461b5931d01fa595843f55d36f5b275a52087dd2
|
||||
```
|
||||
|
||||
##### Response
|
||||
|
||||
Return 201 Created if the blob was successfully created.
|
||||
|
||||
## List Local Models
|
||||
|
||||
```shell
|
||||
|
||||
62
docs/faq.md
62
docs/faq.md
@@ -32,11 +32,11 @@ Create a `systemd` drop-in directory and set `Environment=OLLAMA_HOST`
|
||||
|
||||
```bash
|
||||
mkdir -p /etc/systemd/system/ollama.service.d
|
||||
echo "[Service]" >>/etc/systemd/system/ollama.service.d/environment.conf
|
||||
echo '[Service]' >>/etc/systemd/system/ollama.service.d/environment.conf
|
||||
```
|
||||
|
||||
```bash
|
||||
echo "Environment=OLLAMA_HOST=0.0.0.0:11434" >>/etc/systemd/system/ollama.service.d/environment.conf
|
||||
echo 'Environment="OLLAMA_HOST=0.0.0.0:11434"' >>/etc/systemd/system/ollama.service.d/environment.conf
|
||||
```
|
||||
|
||||
Reload `systemd` and restart Ollama:
|
||||
@@ -59,7 +59,7 @@ OLLAMA_ORIGINS=http://192.168.1.1:*,https://example.com ollama serve
|
||||
On Linux:
|
||||
|
||||
```bash
|
||||
echo "Environment=OLLAMA_ORIGINS=http://129.168.1.1:*,https://example.com" >>/etc/systemd/system/ollama.service.d/environment.conf
|
||||
echo 'Environment="OLLAMA_ORIGINS=http://129.168.1.1:*,https://example.com"' >>/etc/systemd/system/ollama.service.d/environment.conf
|
||||
```
|
||||
|
||||
Reload `systemd` and restart Ollama:
|
||||
@@ -74,8 +74,6 @@ systemctl restart ollama
|
||||
- macOS: Raw model data is stored under `~/.ollama/models`.
|
||||
- Linux: Raw model data is stored under `/usr/share/ollama/.ollama/models`
|
||||
|
||||
|
||||
|
||||
Below the models directory you will find a structure similar to the following:
|
||||
|
||||
```shell
|
||||
@@ -96,3 +94,57 @@ The manifest lists all the layers used in this model. You will see a `media type
|
||||
### How can I change where Ollama stores models?
|
||||
|
||||
To modify where models are stored, you can use the `OLLAMA_MODELS` environment variable. Note that on Linux this means defining `OLLAMA_MODELS` in a drop-in `/etc/systemd/system/ollama.service.d` service file, reloading systemd, and restarting the ollama service.
|
||||
|
||||
## Does Ollama send my prompts and answers back to Ollama.ai to use in any way?
|
||||
|
||||
No. Anything you do with Ollama, such as generate a response from the model, stays with you. We don't collect any data about how you use the model. You are always in control of your own data.
|
||||
|
||||
## How can I use Ollama in Visual Studio Code?
|
||||
|
||||
There is already a large collection of plugins available for VSCode as well as other editors that leverage Ollama. You can see the list of [extensions & plugins](https://github.com/jmorganca/ollama#extensions--plugins) at the bottom of the main repository readme.
|
||||
|
||||
## How do I use Ollama behind a proxy?
|
||||
|
||||
Ollama is compatible with proxy servers if `HTTP_PROXY` or `HTTPS_PROXY` are configured. When using either variables, ensure it is set where `ollama serve` can access the values.
|
||||
|
||||
When using `HTTPS_PROXY`, ensure the proxy certificate is installed as a system certificate.
|
||||
|
||||
On macOS:
|
||||
|
||||
```bash
|
||||
HTTPS_PROXY=http://proxy.example.com ollama serve
|
||||
```
|
||||
|
||||
On Linux:
|
||||
|
||||
```bash
|
||||
echo 'Environment="HTTPS_PROXY=https://proxy.example.com"' >>/etc/systemd/system/ollama.service.d/environment.conf
|
||||
```
|
||||
|
||||
Reload `systemd` and restart Ollama:
|
||||
|
||||
```bash
|
||||
systemctl daemon-reload
|
||||
systemctl restart ollama
|
||||
```
|
||||
|
||||
### How do I use Ollama behind a proxy in Docker?
|
||||
|
||||
The Ollama Docker container image can be configured to use a proxy by passing `-e HTTPS_PROXY=https://proxy.example.com` when starting the container.
|
||||
|
||||
Alternatively, Docker daemon can be configured to use a proxy. Instructions are available for Docker Desktop on [macOS](https://docs.docker.com/desktop/settings/mac/#proxies), [Windows](https://docs.docker.com/desktop/settings/windows/#proxies), and [Linux](https://docs.docker.com/desktop/settings/linux/#proxies), and Docker [daemon with systemd](https://docs.docker.com/config/daemon/systemd/#httphttps-proxy).
|
||||
|
||||
Ensure the certificate is installed as a system certificate when using HTTPS. This may require a new Docker image when using a self-signed certificate.
|
||||
|
||||
```dockerfile
|
||||
FROM ollama/ollama
|
||||
COPY my-ca.pem /usr/local/share/ca-certificates/my-ca.crt
|
||||
RUN update-ca-certificate
|
||||
```
|
||||
|
||||
Build and run this image:
|
||||
|
||||
```shell
|
||||
docker build -t ollama-with-ca .
|
||||
docker run -d -e HTTPS_PROXY=https://my.proxy.example.com -p 11434:11434 ollama-with-ca
|
||||
```
|
||||
|
||||
31
examples/python-json-datagenerator/predefinedschema.py
Normal file
31
examples/python-json-datagenerator/predefinedschema.py
Normal file
@@ -0,0 +1,31 @@
|
||||
import requests
|
||||
import json
|
||||
import random
|
||||
|
||||
model = "llama2"
|
||||
template = {
|
||||
"firstName": "",
|
||||
"lastName": "",
|
||||
"address": {
|
||||
"street": "",
|
||||
"city": "",
|
||||
"state": "",
|
||||
"zipCode": ""
|
||||
},
|
||||
"phoneNumber": ""
|
||||
}
|
||||
|
||||
prompt = f"generate one realistically believable sample data set of a persons first name, last name, address in the US, and phone number. \nUse the following template: {json.dumps(template)}."
|
||||
|
||||
data = {
|
||||
"prompt": prompt,
|
||||
"model": model,
|
||||
"format": "json",
|
||||
"stream": False,
|
||||
"options": {"temperature": 2.5, "top_p": 0.99, "top_k": 100},
|
||||
}
|
||||
|
||||
print(f"Generating a sample user")
|
||||
response = requests.post("http://localhost:11434/api/generate", json=data, stream=False)
|
||||
json_data = json.loads(response.text)
|
||||
print(json.dumps(json.loads(json_data["response"]), indent=2))
|
||||
31
examples/python-json-datagenerator/randomaddresses.py
Normal file
31
examples/python-json-datagenerator/randomaddresses.py
Normal file
@@ -0,0 +1,31 @@
|
||||
import requests
|
||||
import json
|
||||
import random
|
||||
|
||||
countries = [
|
||||
"United States",
|
||||
"United Kingdom",
|
||||
"the Netherlands",
|
||||
"Germany",
|
||||
"Mexico",
|
||||
"Canada",
|
||||
"France",
|
||||
]
|
||||
country = random.choice(countries)
|
||||
model = "llama2"
|
||||
|
||||
prompt = f"generate one realistically believable sample data set of a persons first name, last name, address in {country}, and phone number. Do not use common names. Respond using JSON. Key names should have no backslashes, values should use plain ascii with no special characters."
|
||||
|
||||
data = {
|
||||
"prompt": prompt,
|
||||
"model": model,
|
||||
"format": "json",
|
||||
"stream": False,
|
||||
"options": {"temperature": 2.5, "top_p": 0.99, "top_k": 100},
|
||||
}
|
||||
|
||||
print(f"Generating a sample user in {country}")
|
||||
response = requests.post("http://localhost:11434/api/generate", json=data, stream=False)
|
||||
json_data = json.loads(response.text)
|
||||
|
||||
print(json.dumps(json.loads(json_data["response"]), indent=2))
|
||||
34
examples/python-json-datagenerator/readme.md
Normal file
34
examples/python-json-datagenerator/readme.md
Normal file
@@ -0,0 +1,34 @@
|
||||
# JSON Output Example
|
||||
|
||||

|
||||
|
||||
There are two python scripts in this example. `randomaddresses.py` generates random addresses from different countries. `predefinedschema.py` sets a template for the model to fill in.
|
||||
|
||||
## Review the Code
|
||||
|
||||
Both programs are basically the same, with a different prompt for each, demonstrating two different ideas. The key part of getting JSON out of a model is to state in the prompt or system prompt that it should respond using JSON, and specifying the `format` as `json` in the data body.
|
||||
|
||||
```python
|
||||
prompt = f"generate one realistically believable sample data set of a persons first name, last name, address in {country}, and phone number. Do not use common names. Respond using JSON. Key names should with no backslashes, values should use plain ascii with no special characters."
|
||||
|
||||
data = {
|
||||
"prompt": prompt,
|
||||
"model": model,
|
||||
"format": "json",
|
||||
"stream": False,
|
||||
"options": {"temperature": 2.5, "top_p": 0.99, "top_k": 100},
|
||||
}
|
||||
```
|
||||
|
||||
When running `randomaddresses.py` you will see that the schema changes and adapts to the chosen country.
|
||||
|
||||
In `predefinedschema.py`, a template has been specified in the prompt as well. It's been defined as JSON and then dumped into the prompt string to make it easier to work with.
|
||||
|
||||
Both examples turn streaming off so that we end up with the completed JSON all at once. We need to convert the `response.text` to JSON so that when we output it as a string we can set the indent spacing to make the output easy to read.
|
||||
|
||||
```python
|
||||
response = requests.post("http://localhost:11434/api/generate", json=data, stream=False)
|
||||
json_data = json.loads(response.text)
|
||||
|
||||
print(json.dumps(json.loads(json_data["response"]), indent=2))
|
||||
```
|
||||
1
examples/python-json-datagenerator/requirements.txt
Normal file
1
examples/python-json-datagenerator/requirements.txt
Normal file
@@ -0,0 +1 @@
|
||||
Requests==2.31.0
|
||||
8
examples/python-loganalysis/Modelfile
Normal file
8
examples/python-loganalysis/Modelfile
Normal file
@@ -0,0 +1,8 @@
|
||||
FROM codebooga:latest
|
||||
|
||||
SYSTEM """
|
||||
You are a log file analyzer. You will receive a set of lines from a log file for some software application, find the errors and other interesting aspects of the logs, and explain them so a new user can understand what they mean. If there are any steps they can do to resolve them, list the steps in your answer.
|
||||
"""
|
||||
|
||||
PARAMETER TEMPERATURE 0.3
|
||||
|
||||
42
examples/python-loganalysis/loganalysis.py
Normal file
42
examples/python-loganalysis/loganalysis.py
Normal file
@@ -0,0 +1,42 @@
|
||||
import sys
|
||||
import re
|
||||
import requests
|
||||
import json
|
||||
|
||||
# prelines and postlines represent the number of lines of context to include in the output around the error
|
||||
prelines = 10
|
||||
postlines = 10
|
||||
|
||||
def find_errors_in_log_file():
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: python loganalysis.py <filename>")
|
||||
return
|
||||
|
||||
log_file_path = sys.argv[1]
|
||||
with open(log_file_path, 'r') as log_file:
|
||||
log_lines = log_file.readlines()
|
||||
|
||||
error_logs = []
|
||||
for i, line in enumerate(log_lines):
|
||||
if "error" in line.lower():
|
||||
start_index = max(0, i - prelines)
|
||||
end_index = min(len(log_lines), i + postlines + 1)
|
||||
error_logs.extend(log_lines[start_index:end_index])
|
||||
|
||||
return error_logs
|
||||
|
||||
error_logs = find_errors_in_log_file()
|
||||
|
||||
data = {
|
||||
"prompt": "\n".join(error_logs),
|
||||
"model": "mattw/loganalyzer"
|
||||
}
|
||||
|
||||
|
||||
response = requests.post("http://localhost:11434/api/generate", json=data, stream=True)
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
json_data = json.loads(line)
|
||||
if json_data['done'] == False:
|
||||
print(json_data['response'], end='', flush=True)
|
||||
|
||||
32
examples/python-loganalysis/logtest.logfile
Normal file
32
examples/python-loganalysis/logtest.logfile
Normal file
@@ -0,0 +1,32 @@
|
||||
2023-11-10 07:17:40 /docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration
|
||||
2023-11-10 07:17:40 /docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/
|
||||
2023-11-10 07:17:40 /docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh
|
||||
2023-11-10 07:17:40 10-listen-on-ipv6-by-default.sh: info: Getting the checksum of /etc/nginx/conf.d/default.conf
|
||||
2023-11-10 07:17:40 10-listen-on-ipv6-by-default.sh: info: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf
|
||||
2023-11-10 07:17:40 /docker-entrypoint.sh: Sourcing /docker-entrypoint.d/15-local-resolvers.envsh
|
||||
2023-11-10 07:17:40 /docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh
|
||||
2023-11-10 07:17:40 /docker-entrypoint.sh: Launching /docker-entrypoint.d/30-tune-worker-processes.sh
|
||||
2023-11-10 07:17:40 /docker-entrypoint.sh: Configuration complete; ready for start up
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: using the "epoll" event method
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: nginx/1.25.3
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: built by gcc 12.2.0 (Debian 12.2.0-14)
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: OS: Linux 6.4.16-linuxkit
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: getrlimit(RLIMIT_NOFILE): 1048576:1048576
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker processes
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 29
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 30
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 31
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 32
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 33
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 34
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 35
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 36
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 37
|
||||
2023-11-10 07:17:40 2023/11/10 13:17:40 [notice] 1#1: start worker process 38
|
||||
2023-11-10 07:17:44 192.168.65.1 - - [10/Nov/2023:13:17:43 +0000] "GET / HTTP/1.1" 200 615 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36" "-"
|
||||
2023-11-10 07:17:44 2023/11/10 13:17:44 [error] 29#29: *1 open() "/usr/share/nginx/html/favicon.ico" failed (2: No such file or directory), client: 192.168.65.1, server: localhost, request: "GET /favicon.ico HTTP/1.1", host: "localhost:8080", referrer: "http://localhost:8080/"
|
||||
2023-11-10 07:17:44 192.168.65.1 - - [10/Nov/2023:13:17:44 +0000] "GET /favicon.ico HTTP/1.1" 404 555 "http://localhost:8080/" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36" "-"
|
||||
2023-11-10 07:17:50 2023/11/10 13:17:50 [error] 29#29: *1 open() "/usr/share/nginx/html/ahstat" failed (2: No such file or directory), client: 192.168.65.1, server: localhost, request: "GET /ahstat HTTP/1.1", host: "localhost:8080"
|
||||
2023-11-10 07:17:50 192.168.65.1 - - [10/Nov/2023:13:17:50 +0000] "GET /ahstat HTTP/1.1" 404 555 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36" "-"
|
||||
2023-11-10 07:18:53 2023/11/10 13:18:53 [error] 29#29: *1 open() "/usr/share/nginx/html/ahstat" failed (2: No such file or directory), client: 192.168.65.1, server: localhost, request: "GET /ahstat HTTP/1.1", host: "localhost:8080"
|
||||
2023-11-10 07:18:53 192.168.65.1 - - [10/Nov/2023:13:18:53 +0000] "GET /ahstat HTTP/1.1" 404 555 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36" "-"
|
||||
48
examples/python-loganalysis/readme.md
Normal file
48
examples/python-loganalysis/readme.md
Normal file
@@ -0,0 +1,48 @@
|
||||
# Log Analysis example
|
||||
|
||||

|
||||
|
||||
This example shows one possible way to create a log file analyzer. To use it, run:
|
||||
|
||||
`python loganalysis.py <logfile>`
|
||||
|
||||
You can try this with the `logtest.logfile` file included in this directory.
|
||||
|
||||
## Review the code
|
||||
|
||||
The first part of this example is a Modelfile that takes `codebooga` and applies a new System Prompt:
|
||||
|
||||
```plaintext
|
||||
SYSTEM """
|
||||
You are a log file analyzer. You will receive a set of lines from a log file for some software application, find the errors and other interesting aspects of the logs, and explain them so a new user can understand what they mean. If there are any steps they can do to resolve them, list the steps in your answer.
|
||||
"""
|
||||
```
|
||||
|
||||
This model is available at https://ollama.ai/mattw/loganalyzer. You can customize it and add to your own namespace using the command `ollama create <namespace/modelname> -f <path-to-modelfile>` then `ollama push <namespace/modelname>`.
|
||||
|
||||
Then loganalysis.py scans all the lines in the given log file and searches for the word 'error'. When the word is found, the 10 lines before and after are set as the prompt for a call to the Generate API.
|
||||
|
||||
```python
|
||||
data = {
|
||||
"prompt": "\n".join(error_logs),
|
||||
"model": "mattw/loganalyzer"
|
||||
}
|
||||
```
|
||||
|
||||
Finally, the streamed output is parsed and the response field in the output is printed to the line.
|
||||
|
||||
```python
|
||||
response = requests.post("http://localhost:11434/api/generate", json=data, stream=True)
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
json_data = json.loads(line)
|
||||
if json_data['done'] == False:
|
||||
print(json_data['response'], end='')
|
||||
|
||||
```
|
||||
|
||||
## Next Steps
|
||||
|
||||
There is a lot more that can be done here. This is a simple way to detect errors, looking for the word error. Perhaps it would be interesting to find anomalous activity in the logs. It could be interesting to create embeddings for each line and compare them, looking for similar lines. Or look into applying Levenshtein Distance algorithms to find similar lines to help identify the anomalous lines.
|
||||
|
||||
Also try different models and different prompts to analyze the data. You could consider adding retrieval augmented generation (RAG) to this to help understand newer log formats.
|
||||
1
examples/python-loganalysis/requirements.txt
Normal file
1
examples/python-loganalysis/requirements.txt
Normal file
@@ -0,0 +1 @@
|
||||
Requests==2.31.0
|
||||
@@ -14,6 +14,6 @@ package llm
|
||||
//go:generate git submodule update --force gguf
|
||||
//go:generate git -C gguf apply ../patches/0001-update-default-log-target.patch
|
||||
//go:generate git -C gguf apply ../patches/0001-metal-handle-ggml_scale-for-n-4-0-close-3754.patch
|
||||
//go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DCMAKE_SYSTEM_PROCESSOR=x86_64 -DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_OSX_DEPLOYMENT_TARGET=11.0
|
||||
//go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_METAL=off -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DCMAKE_SYSTEM_PROCESSOR=x86_64 -DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_OSX_DEPLOYMENT_TARGET=11.0 -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off
|
||||
//go:generate cmake --build gguf/build/cpu --target server --config Release
|
||||
//go:generate mv gguf/build/cpu/bin/server gguf/build/cpu/bin/ollama-runner
|
||||
|
||||
@@ -71,9 +71,10 @@ func chooseRunners(workDir, runnerType string) []ModelRunner {
|
||||
// IMPORTANT: the order of the runners in the array is the priority order
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
runners = []ModelRunner{
|
||||
{Path: path.Join(buildPath, "metal", "bin", "ollama-runner")},
|
||||
{Path: path.Join(buildPath, "cpu", "bin", "ollama-runner")},
|
||||
if runtime.GOARCH == "arm64" {
|
||||
runners = []ModelRunner{{Path: path.Join(buildPath, "metal", "bin", "ollama-runner")}}
|
||||
} else {
|
||||
runners = []ModelRunner{{Path: path.Join(buildPath, "cpu", "bin", "ollama-runner")}}
|
||||
}
|
||||
case "linux":
|
||||
runners = []ModelRunner{
|
||||
|
||||
@@ -10,6 +10,7 @@ mkdir -p dist
|
||||
for TARGETARCH in arm64 amd64; do
|
||||
GOOS=darwin GOARCH=$TARGETARCH go generate ./...
|
||||
GOOS=darwin GOARCH=$TARGETARCH go build -o dist/ollama-darwin-$TARGETARCH
|
||||
rm -rf llm/llama.cpp/*/build
|
||||
done
|
||||
|
||||
lipo -create -output dist/ollama dist/ollama-darwin-*
|
||||
|
||||
@@ -181,6 +181,9 @@ install_cuda_driver_apt() {
|
||||
debian)
|
||||
status 'Enabling contrib sources...'
|
||||
$SUDO sed 's/main/contrib/' < /etc/apt/sources.list | $SUDO tee /etc/apt/sources.list.d/contrib.list > /dev/null
|
||||
if [ -f "/etc/apt/sources.list.d/debian.sources" ]; then
|
||||
$SUDO sed 's/main/contrib/' < /etc/apt/sources.list.d/debian.sources | $SUDO tee /etc/apt/sources.list.d/contrib.sources > /dev/null
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
350
server/images.go
350
server/images.go
@@ -248,200 +248,181 @@ func filenameWithPath(path, f string) (string, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func CreateModel(ctx context.Context, name string, path string, fn func(resp api.ProgressResponse)) error {
|
||||
mp := ParseModelPath(name)
|
||||
|
||||
var manifest *ManifestV2
|
||||
var err error
|
||||
var noprune string
|
||||
|
||||
// build deleteMap to prune unused layers
|
||||
deleteMap := make(map[string]bool)
|
||||
|
||||
if noprune = os.Getenv("OLLAMA_NOPRUNE"); noprune == "" {
|
||||
manifest, _, err = GetManifest(mp)
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
return err
|
||||
}
|
||||
|
||||
if manifest != nil {
|
||||
for _, l := range manifest.Layers {
|
||||
deleteMap[l.Digest] = true
|
||||
}
|
||||
deleteMap[manifest.Config.Digest] = true
|
||||
}
|
||||
}
|
||||
|
||||
mf, err := os.Open(path)
|
||||
func realpath(p string) string {
|
||||
abspath, err := filepath.Abs(p)
|
||||
if err != nil {
|
||||
fn(api.ProgressResponse{Status: fmt.Sprintf("couldn't open modelfile '%s'", path)})
|
||||
return fmt.Errorf("failed to open file: %w", err)
|
||||
return p
|
||||
}
|
||||
defer mf.Close()
|
||||
|
||||
fn(api.ProgressResponse{Status: "parsing modelfile"})
|
||||
commands, err := parser.Parse(mf)
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return err
|
||||
return abspath
|
||||
}
|
||||
|
||||
if p == "~" {
|
||||
return home
|
||||
} else if strings.HasPrefix(p, "~/") {
|
||||
return filepath.Join(home, p[2:])
|
||||
}
|
||||
|
||||
return abspath
|
||||
}
|
||||
|
||||
func CreateModel(ctx context.Context, name string, commands []parser.Command, fn func(resp api.ProgressResponse)) error {
|
||||
config := ConfigV2{
|
||||
Architecture: "amd64",
|
||||
OS: "linux",
|
||||
Architecture: "amd64",
|
||||
}
|
||||
|
||||
deleteMap := make(map[string]struct{})
|
||||
|
||||
var layers []*LayerReader
|
||||
|
||||
params := make(map[string][]string)
|
||||
var sourceParams map[string]any
|
||||
fromParams := make(map[string]any)
|
||||
|
||||
for _, c := range commands {
|
||||
log.Printf("[%s] - %s\n", c.Name, c.Args)
|
||||
log.Printf("[%s] - %s", c.Name, c.Args)
|
||||
mediatype := fmt.Sprintf("application/vnd.ollama.image.%s", c.Name)
|
||||
|
||||
switch c.Name {
|
||||
case "model":
|
||||
fn(api.ProgressResponse{Status: "looking for model"})
|
||||
if strings.HasPrefix(c.Args, "@") {
|
||||
blobPath, err := GetBlobsPath(strings.TrimPrefix(c.Args, "@"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mp := ParseModelPath(c.Args)
|
||||
mf, _, err := GetManifest(mp)
|
||||
c.Args = blobPath
|
||||
}
|
||||
|
||||
bin, err := os.Open(realpath(c.Args))
|
||||
if err != nil {
|
||||
modelFile, err := filenameWithPath(path, c.Args)
|
||||
if err != nil {
|
||||
// not a file on disk so must be a model reference
|
||||
modelpath := ParseModelPath(c.Args)
|
||||
manifest, _, err := GetManifest(modelpath)
|
||||
switch {
|
||||
case errors.Is(err, os.ErrNotExist):
|
||||
fn(api.ProgressResponse{Status: "pulling model"})
|
||||
if err := PullModel(ctx, c.Args, &RegistryOptions{}, fn); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
manifest, _, err = GetManifest(modelpath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case err != nil:
|
||||
return err
|
||||
}
|
||||
if _, err := os.Stat(modelFile); err != nil {
|
||||
// the model file does not exist, try pulling it
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
fn(api.ProgressResponse{Status: "pulling model file"})
|
||||
if err := PullModel(ctx, c.Args, &RegistryOptions{}, fn); err != nil {
|
||||
return err
|
||||
}
|
||||
mf, _, err = GetManifest(mp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open file after pull: %v", err)
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// create a model from this specified file
|
||||
fn(api.ProgressResponse{Status: "creating model layer"})
|
||||
file, err := os.Open(modelFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open file: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
ggml, err := llm.DecodeGGML(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
config.ModelFormat = ggml.Name()
|
||||
config.ModelFamily = ggml.ModelFamily()
|
||||
config.ModelType = ggml.ModelType()
|
||||
config.FileType = ggml.FileType()
|
||||
|
||||
// reset the file
|
||||
file.Seek(0, io.SeekStart)
|
||||
|
||||
l, err := CreateLayer(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create layer: %v", err)
|
||||
}
|
||||
l.MediaType = "application/vnd.ollama.image.model"
|
||||
layers = append(layers, l)
|
||||
}
|
||||
}
|
||||
|
||||
if mf != nil {
|
||||
fn(api.ProgressResponse{Status: "reading model metadata"})
|
||||
sourceBlobPath, err := GetBlobsPath(mf.Config.Digest)
|
||||
fromConfigPath, err := GetBlobsPath(manifest.Config.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sourceBlob, err := os.Open(sourceBlobPath)
|
||||
fromConfigFile, err := os.Open(fromConfigPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sourceBlob.Close()
|
||||
defer fromConfigFile.Close()
|
||||
|
||||
var source ConfigV2
|
||||
if err := json.NewDecoder(sourceBlob).Decode(&source); err != nil {
|
||||
var fromConfig ConfigV2
|
||||
if err := json.NewDecoder(fromConfigFile).Decode(&fromConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// copy the model metadata
|
||||
config.ModelFamily = source.ModelFamily
|
||||
config.ModelType = source.ModelType
|
||||
config.ModelFormat = source.ModelFormat
|
||||
config.FileType = source.FileType
|
||||
config.ModelFormat = fromConfig.ModelFormat
|
||||
config.ModelFamily = fromConfig.ModelFamily
|
||||
config.ModelType = fromConfig.ModelType
|
||||
config.FileType = fromConfig.FileType
|
||||
|
||||
for _, l := range mf.Layers {
|
||||
if l.MediaType == "application/vnd.ollama.image.params" {
|
||||
sourceParamsBlobPath, err := GetBlobsPath(l.Digest)
|
||||
for _, layer := range manifest.Layers {
|
||||
deleteMap[layer.Digest] = struct{}{}
|
||||
if layer.MediaType == "application/vnd.ollama.image.params" {
|
||||
fromParamsPath, err := GetBlobsPath(layer.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sourceParamsBlob, err := os.Open(sourceParamsBlobPath)
|
||||
fromParamsFile, err := os.Open(fromParamsPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sourceParamsBlob.Close()
|
||||
defer fromParamsFile.Close()
|
||||
|
||||
if err := json.NewDecoder(sourceParamsBlob).Decode(&sourceParams); err != nil {
|
||||
if err := json.NewDecoder(fromParamsFile).Decode(&fromParams); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
newLayer, err := GetLayerWithBufferFromLayer(l)
|
||||
layer, err := GetLayerWithBufferFromLayer(layer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newLayer.From = mp.GetShortTagname()
|
||||
layers = append(layers, newLayer)
|
||||
}
|
||||
}
|
||||
case "adapter":
|
||||
fn(api.ProgressResponse{Status: fmt.Sprintf("creating model %s layer", c.Name)})
|
||||
|
||||
fp, err := filenameWithPath(path, c.Args)
|
||||
layer.From = modelpath.GetShortTagname()
|
||||
layers = append(layers, layer)
|
||||
}
|
||||
|
||||
deleteMap[manifest.Config.Digest] = struct{}{}
|
||||
continue
|
||||
}
|
||||
defer bin.Close()
|
||||
|
||||
fn(api.ProgressResponse{Status: "creating model layer"})
|
||||
ggml, err := llm.DecodeGGML(bin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// create a model from this specified file
|
||||
fn(api.ProgressResponse{Status: "creating model layer"})
|
||||
config.ModelFormat = ggml.Name()
|
||||
config.ModelFamily = ggml.ModelFamily()
|
||||
config.ModelType = ggml.ModelType()
|
||||
config.FileType = ggml.FileType()
|
||||
|
||||
file, err := os.Open(fp)
|
||||
bin.Seek(0, io.SeekStart)
|
||||
layer, err := CreateLayer(bin)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open file: %v", err)
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
l, err := CreateLayer(file)
|
||||
layer.MediaType = mediatype
|
||||
layers = append(layers, layer)
|
||||
case "adapter":
|
||||
fn(api.ProgressResponse{Status: "creating adapter layer"})
|
||||
bin, err := os.Open(realpath(c.Args))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create layer: %v", err)
|
||||
return err
|
||||
}
|
||||
l.MediaType = "application/vnd.ollama.image.adapter"
|
||||
layers = append(layers, l)
|
||||
case "license":
|
||||
fn(api.ProgressResponse{Status: fmt.Sprintf("creating model %s layer", c.Name)})
|
||||
mediaType := fmt.Sprintf("application/vnd.ollama.image.%s", c.Name)
|
||||
defer bin.Close()
|
||||
|
||||
layer, err := CreateLayer(strings.NewReader(c.Args))
|
||||
layer, err := CreateLayer(bin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if layer.Size > 0 {
|
||||
layer.MediaType = mediaType
|
||||
layer.MediaType = mediatype
|
||||
layers = append(layers, layer)
|
||||
}
|
||||
case "template", "system", "prompt":
|
||||
fn(api.ProgressResponse{Status: fmt.Sprintf("creating model %s layer", c.Name)})
|
||||
// remove the layer if one exists
|
||||
mediaType := fmt.Sprintf("application/vnd.ollama.image.%s", c.Name)
|
||||
layers = removeLayerFromLayers(layers, mediaType)
|
||||
case "license":
|
||||
fn(api.ProgressResponse{Status: "creating license layer"})
|
||||
layer, err := CreateLayer(strings.NewReader(c.Args))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if layer.Size > 0 {
|
||||
layer.MediaType = mediatype
|
||||
layers = append(layers, layer)
|
||||
}
|
||||
case "template", "system":
|
||||
fn(api.ProgressResponse{Status: fmt.Sprintf("creating %s layer", c.Name)})
|
||||
|
||||
// remove duplicate layers
|
||||
layers = removeLayerFromLayers(layers, mediatype)
|
||||
|
||||
layer, err := CreateLayer(strings.NewReader(c.Args))
|
||||
if err != nil {
|
||||
@@ -449,48 +430,47 @@ func CreateModel(ctx context.Context, name string, path string, fn func(resp api
|
||||
}
|
||||
|
||||
if layer.Size > 0 {
|
||||
layer.MediaType = mediaType
|
||||
layer.MediaType = mediatype
|
||||
layers = append(layers, layer)
|
||||
}
|
||||
default:
|
||||
// runtime parameters, build a list of args for each parameter to allow multiple values to be specified (ex: multiple stop sequences)
|
||||
params[c.Name] = append(params[c.Name], c.Args)
|
||||
}
|
||||
}
|
||||
|
||||
// Create a single layer for the parameters
|
||||
if len(params) > 0 {
|
||||
fn(api.ProgressResponse{Status: "creating parameter layer"})
|
||||
fn(api.ProgressResponse{Status: "creating parameters layer"})
|
||||
|
||||
layers = removeLayerFromLayers(layers, "application/vnd.ollama.image.params")
|
||||
formattedParams, err := formatParams(params)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't create params json: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
for k, v := range sourceParams {
|
||||
for k, v := range fromParams {
|
||||
if _, ok := formattedParams[k]; !ok {
|
||||
formattedParams[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if config.ModelType == "65B" {
|
||||
if numGQA, ok := formattedParams["num_gqa"].(int); ok && numGQA == 8 {
|
||||
if gqa, ok := formattedParams["gqa"].(int); ok && gqa == 8 {
|
||||
config.ModelType = "70B"
|
||||
}
|
||||
}
|
||||
|
||||
bts, err := json.Marshal(formattedParams)
|
||||
var b bytes.Buffer
|
||||
if err := json.NewEncoder(&b).Encode(formattedParams); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fn(api.ProgressResponse{Status: "creating config layer"})
|
||||
layer, err := CreateLayer(bytes.NewReader(b.Bytes()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l, err := CreateLayer(bytes.NewReader(bts))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create layer: %v", err)
|
||||
}
|
||||
l.MediaType = "application/vnd.ollama.image.params"
|
||||
layers = append(layers, l)
|
||||
layer.MediaType = "application/vnd.ollama.image.params"
|
||||
layers = append(layers, layer)
|
||||
}
|
||||
|
||||
digests, err := getLayerDigests(layers)
|
||||
@@ -498,36 +478,31 @@ func CreateModel(ctx context.Context, name string, path string, fn func(resp api
|
||||
return err
|
||||
}
|
||||
|
||||
var manifestLayers []*Layer
|
||||
for _, l := range layers {
|
||||
manifestLayers = append(manifestLayers, &l.Layer)
|
||||
delete(deleteMap, l.Layer.Digest)
|
||||
}
|
||||
|
||||
// Create a layer for the config object
|
||||
fn(api.ProgressResponse{Status: "creating config layer"})
|
||||
cfg, err := createConfigLayer(config, digests)
|
||||
configLayer, err := createConfigLayer(config, digests)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
layers = append(layers, cfg)
|
||||
delete(deleteMap, cfg.Layer.Digest)
|
||||
|
||||
layers = append(layers, configLayer)
|
||||
delete(deleteMap, configLayer.Digest)
|
||||
|
||||
if err := SaveLayers(layers, fn, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the manifest
|
||||
var contentLayers []*Layer
|
||||
for _, layer := range layers {
|
||||
contentLayers = append(contentLayers, &layer.Layer)
|
||||
delete(deleteMap, layer.Digest)
|
||||
}
|
||||
|
||||
fn(api.ProgressResponse{Status: "writing manifest"})
|
||||
err = CreateManifest(name, cfg, manifestLayers)
|
||||
if err != nil {
|
||||
if err := CreateManifest(name, configLayer, contentLayers); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if noprune == "" {
|
||||
fn(api.ProgressResponse{Status: "removing any unused layers"})
|
||||
err = deleteUnusedLayers(nil, deleteMap, false)
|
||||
if err != nil {
|
||||
if noprune := os.Getenv("OLLAMA_NOPRUNE"); noprune == "" {
|
||||
if err := deleteUnusedLayers(nil, deleteMap, false); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -739,7 +714,7 @@ func CopyModel(src, dest string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteUnusedLayers(skipModelPath *ModelPath, deleteMap map[string]bool, dryRun bool) error {
|
||||
func deleteUnusedLayers(skipModelPath *ModelPath, deleteMap map[string]struct{}, dryRun bool) error {
|
||||
fp, err := GetManifestPath()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -779,21 +754,19 @@ func deleteUnusedLayers(skipModelPath *ModelPath, deleteMap map[string]bool, dry
|
||||
}
|
||||
|
||||
// only delete the files which are still in the deleteMap
|
||||
for k, v := range deleteMap {
|
||||
if v {
|
||||
fp, err := GetBlobsPath(k)
|
||||
if err != nil {
|
||||
log.Printf("couldn't get file path for '%s': %v", k, err)
|
||||
for k := range deleteMap {
|
||||
fp, err := GetBlobsPath(k)
|
||||
if err != nil {
|
||||
log.Printf("couldn't get file path for '%s': %v", k, err)
|
||||
continue
|
||||
}
|
||||
if !dryRun {
|
||||
if err := os.Remove(fp); err != nil {
|
||||
log.Printf("couldn't remove file '%s': %v", fp, err)
|
||||
continue
|
||||
}
|
||||
if !dryRun {
|
||||
if err := os.Remove(fp); err != nil {
|
||||
log.Printf("couldn't remove file '%s': %v", fp, err)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
log.Printf("wanted to remove: %s", fp)
|
||||
}
|
||||
} else {
|
||||
log.Printf("wanted to remove: %s", fp)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -801,7 +774,7 @@ func deleteUnusedLayers(skipModelPath *ModelPath, deleteMap map[string]bool, dry
|
||||
}
|
||||
|
||||
func PruneLayers() error {
|
||||
deleteMap := make(map[string]bool)
|
||||
deleteMap := make(map[string]struct{})
|
||||
p, err := GetBlobsPath("")
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -818,7 +791,9 @@ func PruneLayers() error {
|
||||
if runtime.GOOS == "windows" {
|
||||
name = strings.ReplaceAll(name, "-", ":")
|
||||
}
|
||||
deleteMap[name] = true
|
||||
if strings.HasPrefix(name, "sha256:") {
|
||||
deleteMap[name] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("total blobs: %d", len(deleteMap))
|
||||
@@ -873,11 +848,11 @@ func DeleteModel(name string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
deleteMap := make(map[string]bool)
|
||||
deleteMap := make(map[string]struct{})
|
||||
for _, layer := range manifest.Layers {
|
||||
deleteMap[layer.Digest] = true
|
||||
deleteMap[layer.Digest] = struct{}{}
|
||||
}
|
||||
deleteMap[manifest.Config.Digest] = true
|
||||
deleteMap[manifest.Config.Digest] = struct{}{}
|
||||
|
||||
err = deleteUnusedLayers(&mp, deleteMap, false)
|
||||
if err != nil {
|
||||
@@ -979,6 +954,9 @@ func PushModel(ctx context.Context, name string, regOpts *RegistryOptions, fn fu
|
||||
for _, layer := range layers {
|
||||
if err := uploadBlob(ctx, mp, layer, regOpts, fn); err != nil {
|
||||
log.Printf("error uploading blob: %v", err)
|
||||
if errors.Is(err, errUnauthorized) {
|
||||
return fmt.Errorf("unable to push %s, make sure this namespace exists and you are authorized to push to it", ParseModelPath(name).GetNamespaceRepository())
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -1013,7 +991,7 @@ func PullModel(ctx context.Context, name string, regOpts *RegistryOptions, fn fu
|
||||
var noprune string
|
||||
|
||||
// build deleteMap to prune unused layers
|
||||
deleteMap := make(map[string]bool)
|
||||
deleteMap := make(map[string]struct{})
|
||||
|
||||
if noprune = os.Getenv("OLLAMA_NOPRUNE"); noprune == "" {
|
||||
manifest, _, err = GetManifest(mp)
|
||||
@@ -1023,9 +1001,9 @@ func PullModel(ctx context.Context, name string, regOpts *RegistryOptions, fn fu
|
||||
|
||||
if manifest != nil {
|
||||
for _, l := range manifest.Layers {
|
||||
deleteMap[l.Digest] = true
|
||||
deleteMap[l.Digest] = struct{}{}
|
||||
}
|
||||
deleteMap[manifest.Config.Digest] = true
|
||||
deleteMap[manifest.Config.Digest] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1165,7 +1143,10 @@ func GetSHA256Digest(r io.Reader) (string, int64) {
|
||||
return fmt.Sprintf("sha256:%x", h.Sum(nil)), n
|
||||
}
|
||||
|
||||
var errUnauthorized = fmt.Errorf("unauthorized")
|
||||
|
||||
func makeRequestWithRetry(ctx context.Context, method string, requestURL *url.URL, headers http.Header, body io.ReadSeeker, regOpts *RegistryOptions) (*http.Response, error) {
|
||||
lastErr := errMaxRetriesExceeded
|
||||
for try := 0; try < maxRetries; try++ {
|
||||
resp, err := makeRequest(ctx, method, requestURL, headers, body, regOpts)
|
||||
if err != nil {
|
||||
@@ -1186,8 +1167,7 @@ func makeRequestWithRetry(ctx context.Context, method string, requestURL *url.UR
|
||||
if body != nil {
|
||||
body.Seek(0, io.SeekStart)
|
||||
}
|
||||
|
||||
continue
|
||||
lastErr = errUnauthorized
|
||||
case resp.StatusCode == http.StatusNotFound:
|
||||
return nil, os.ErrNotExist
|
||||
case resp.StatusCode >= http.StatusBadRequest:
|
||||
@@ -1202,7 +1182,7 @@ func makeRequestWithRetry(ctx context.Context, method string, requestURL *url.UR
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errMaxRetriesExceeded
|
||||
return nil, lastErr
|
||||
}
|
||||
|
||||
func makeRequest(ctx context.Context, method string, requestURL *url.URL, headers http.Header, body io.Reader, regOpts *RegistryOptions) (*http.Response, error) {
|
||||
|
||||
@@ -2,6 +2,7 @@ package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -26,6 +27,7 @@ import (
|
||||
|
||||
"github.com/jmorganca/ollama/api"
|
||||
"github.com/jmorganca/ollama/llm"
|
||||
"github.com/jmorganca/ollama/parser"
|
||||
"github.com/jmorganca/ollama/version"
|
||||
)
|
||||
|
||||
@@ -409,8 +411,31 @@ func CreateModelHandler(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
if req.Name == "" || req.Path == "" {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "name and path are required"})
|
||||
if req.Name == "" {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "name is required"})
|
||||
return
|
||||
}
|
||||
|
||||
if req.Path == "" && req.Modelfile == "" {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "path or modelfile are required"})
|
||||
return
|
||||
}
|
||||
|
||||
var modelfile io.Reader = strings.NewReader(req.Modelfile)
|
||||
if req.Path != "" && req.Modelfile == "" {
|
||||
bin, err := os.Open(req.Path)
|
||||
if err != nil {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("error reading modelfile: %s", err)})
|
||||
return
|
||||
}
|
||||
defer bin.Close()
|
||||
|
||||
modelfile = bin
|
||||
}
|
||||
|
||||
commands, err := parser.Parse(modelfile)
|
||||
if err != nil {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -424,7 +449,7 @@ func CreateModelHandler(c *gin.Context) {
|
||||
ctx, cancel := context.WithCancel(c.Request.Context())
|
||||
defer cancel()
|
||||
|
||||
if err := CreateModel(ctx, req.Name, req.Path, fn); err != nil {
|
||||
if err := CreateModel(ctx, req.Name, commands, fn); err != nil {
|
||||
ch <- gin.H{"error": err.Error()}
|
||||
}
|
||||
}()
|
||||
@@ -625,6 +650,60 @@ func CopyModelHandler(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
func HeadBlobHandler(c *gin.Context) {
|
||||
path, err := GetBlobsPath(c.Param("digest"))
|
||||
if err != nil {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
c.AbortWithStatusJSON(http.StatusNotFound, gin.H{"error": fmt.Sprintf("blob %q not found", c.Param("digest"))})
|
||||
return
|
||||
}
|
||||
|
||||
c.Status(http.StatusOK)
|
||||
}
|
||||
|
||||
func CreateBlobHandler(c *gin.Context) {
|
||||
hash := sha256.New()
|
||||
temp, err := os.CreateTemp("", c.Param("digest"))
|
||||
if err != nil {
|
||||
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
defer temp.Close()
|
||||
defer os.Remove(temp.Name())
|
||||
|
||||
if _, err := io.Copy(temp, io.TeeReader(c.Request.Body, hash)); err != nil {
|
||||
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
if fmt.Sprintf("sha256:%x", hash.Sum(nil)) != c.Param("digest") {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "digest does not match body"})
|
||||
return
|
||||
}
|
||||
|
||||
if err := temp.Close(); err != nil {
|
||||
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
targetPath, err := GetBlobsPath(c.Param("digest"))
|
||||
if err != nil {
|
||||
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
if err := os.Rename(temp.Name(), targetPath); err != nil {
|
||||
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.Status(http.StatusCreated)
|
||||
}
|
||||
|
||||
var defaultAllowOrigins = []string{
|
||||
"localhost",
|
||||
"127.0.0.1",
|
||||
@@ -684,6 +763,8 @@ func Serve(ln net.Listener, allowOrigins []string) error {
|
||||
r.POST("/api/copy", CopyModelHandler)
|
||||
r.DELETE("/api/delete", DeleteModelHandler)
|
||||
r.POST("/api/show", ShowModelHandler)
|
||||
r.POST("/api/blobs/:digest", CreateBlobHandler)
|
||||
r.HEAD("/api/blobs/:digest", HeadBlobHandler)
|
||||
|
||||
for _, method := range []string{http.MethodGet, http.MethodHead} {
|
||||
r.Handle(method, "/", func(c *gin.Context) {
|
||||
|
||||
@@ -55,7 +55,7 @@ func (b *blobUpload) Prepare(ctx context.Context, requestURL *url.URL, opts *Reg
|
||||
if b.From != "" {
|
||||
values := requestURL.Query()
|
||||
values.Add("mount", b.Digest)
|
||||
values.Add("from", b.From)
|
||||
values.Add("from", ParseModelPath(b.From).GetNamespaceRepository())
|
||||
requestURL.RawQuery = values.Encode()
|
||||
}
|
||||
|
||||
@@ -77,6 +77,14 @@ func (b *blobUpload) Prepare(ctx context.Context, requestURL *url.URL, opts *Reg
|
||||
|
||||
b.Total = fi.Size()
|
||||
|
||||
// http.StatusCreated indicates a blob has been mounted
|
||||
// ref: https://distribution.github.io/distribution/spec/api/#cross-repository-blob-mount
|
||||
if resp.StatusCode == http.StatusCreated {
|
||||
b.Completed.Store(b.Total)
|
||||
b.done = true
|
||||
return nil
|
||||
}
|
||||
|
||||
var size = b.Total / numUploadParts
|
||||
switch {
|
||||
case size < minUploadPartSize:
|
||||
@@ -260,7 +268,7 @@ func (b *blobUpload) uploadChunk(ctx context.Context, method string, requestURL
|
||||
return err
|
||||
}
|
||||
|
||||
return fmt.Errorf("http status %d %s: %s", resp.StatusCode, resp.Status, body)
|
||||
return fmt.Errorf("http status %s: %s", resp.Status, body)
|
||||
}
|
||||
|
||||
if method == http.MethodPatch {
|
||||
|
||||
Reference in New Issue
Block a user