mirror of
https://github.com/ollama/ollama.git
synced 2026-02-08 06:34:22 -05:00
Compare commits
38 Commits
v0.1.39-rc
...
language_s
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c79fd5c168 | ||
|
|
73fb9ea36e | ||
|
|
646371f56d | ||
|
|
1f5008544b | ||
|
|
45cbfc5aee | ||
|
|
6d423b383b | ||
|
|
ad897080a2 | ||
|
|
b7d316d98d | ||
|
|
d7339fad52 | ||
|
|
92c81e8117 | ||
|
|
9db0996ed4 | ||
|
|
6f43898b17 | ||
|
|
7487229c34 | ||
|
|
8a8e7afa96 | ||
|
|
c79f8c9c39 | ||
|
|
485016bfbb | ||
|
|
0165ba1651 | ||
|
|
c4209d6d21 | ||
|
|
6adca97f37 | ||
|
|
9a3c8003c8 | ||
|
|
d51f15257c | ||
|
|
8f440d579a | ||
|
|
4cc3be3035 | ||
|
|
db2ffa79f1 | ||
|
|
afd2b058b4 | ||
|
|
fd5971be0b | ||
|
|
89bf98bcf2 | ||
|
|
1b2d156094 | ||
|
|
714adb8bd1 | ||
|
|
95b1133d0c | ||
|
|
b37b496a12 | ||
|
|
d6f692ad1a | ||
|
|
38255d2af1 | ||
|
|
73630a7e85 | ||
|
|
955c317cab | ||
|
|
9f18b88a06 | ||
|
|
353f83a9c7 | ||
|
|
3bade04e10 |
1
.github/workflows/release.yaml
vendored
1
.github/workflows/release.yaml
vendored
@@ -28,6 +28,7 @@ jobs:
|
||||
security unlock-keychain -p password build.keychain
|
||||
security import certificate.p12 -k build.keychain -P $MACOS_SIGNING_KEY_PASSWORD -T /usr/bin/codesign
|
||||
security set-key-partition-list -S apple-tool:,apple:,codesign: -s -k password build.keychain
|
||||
security set-keychain-settings -lut 3600 build.keychain
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
30
README.md
30
README.md
@@ -51,15 +51,17 @@ Here are some example models that can be downloaded:
|
||||
| ------------------ | ---------- | ----- | ------------------------------ |
|
||||
| Llama 3 | 8B | 4.7GB | `ollama run llama3` |
|
||||
| Llama 3 | 70B | 40GB | `ollama run llama3:70b` |
|
||||
| Phi-3 | 3.8B | 2.3GB | `ollama run phi3` |
|
||||
| Phi 3 Mini | 3.8B | 2.3GB | `ollama run phi3` |
|
||||
| Phi 3 Medium | 14B | 7.9GB | `ollama run phi3:medium` |
|
||||
| Gemma | 2B | 1.4GB | `ollama run gemma:2b` |
|
||||
| Gemma | 7B | 4.8GB | `ollama run gemma:7b` |
|
||||
| Mistral | 7B | 4.1GB | `ollama run mistral` |
|
||||
| Moondream 2 | 1.4B | 829MB | `ollama run moondream` |
|
||||
| Neural Chat | 7B | 4.1GB | `ollama run neural-chat` |
|
||||
| Starling | 7B | 4.1GB | `ollama run starling-lm` |
|
||||
| Code Llama | 7B | 3.8GB | `ollama run codellama` |
|
||||
| Llama 2 Uncensored | 7B | 3.8GB | `ollama run llama2-uncensored` |
|
||||
| LLaVA | 7B | 4.5GB | `ollama run llava` |
|
||||
| Gemma | 2B | 1.4GB | `ollama run gemma:2b` |
|
||||
| Gemma | 7B | 4.8GB | `ollama run gemma:7b` |
|
||||
| Solar | 10.7B | 6.1GB | `ollama run solar` |
|
||||
|
||||
> Note: You should have at least 8 GB of RAM available to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models.
|
||||
@@ -192,25 +194,7 @@ ollama list
|
||||
|
||||
## Building
|
||||
|
||||
Install `cmake` and `go`:
|
||||
|
||||
```
|
||||
brew install cmake go
|
||||
```
|
||||
|
||||
Then generate dependencies:
|
||||
|
||||
```
|
||||
go generate ./...
|
||||
```
|
||||
|
||||
Then build the binary:
|
||||
|
||||
```
|
||||
go build .
|
||||
```
|
||||
|
||||
More detailed instructions can be found in the [developer guide](https://github.com/ollama/ollama/blob/main/docs/development.md)
|
||||
See the [developer guide](https://github.com/ollama/ollama/blob/main/docs/development.md)
|
||||
|
||||
### Running local builds
|
||||
|
||||
@@ -299,6 +283,8 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
||||
- [Ollama RAG Chatbot](https://github.com/datvodinh/rag-chatbot.git) (Local Chat with multiple PDFs using Ollama and RAG)
|
||||
- [BrainSoup](https://www.nurgo-software.com/products/brainsoup) (Flexible native client with RAG & multi-agent automation)
|
||||
- [macai](https://github.com/Renset/macai) (macOS client for Ollama, ChatGPT, and other compatible API back-ends)
|
||||
- [Olpaka](https://github.com/Otacon/olpaka) (User-friendly Flutter Web App for Ollama)
|
||||
- [OllamaSpring](https://github.com/CrazyNeil/OllamaSpring) (Ollama Client for macOS)
|
||||
|
||||
### Terminal
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ollama/ollama/server/envconfig"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
)
|
||||
|
||||
func InitLogging() {
|
||||
|
||||
41
cmd/cmd.go
41
cmd/cmd.go
@@ -34,6 +34,7 @@ import (
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/auth"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
"github.com/ollama/ollama/format"
|
||||
"github.com/ollama/ollama/parser"
|
||||
"github.com/ollama/ollama/progress"
|
||||
@@ -1079,12 +1080,7 @@ func versionHandler(cmd *cobra.Command, _ []string) {
|
||||
}
|
||||
}
|
||||
|
||||
type EnvironmentVar struct {
|
||||
Name string
|
||||
Description string
|
||||
}
|
||||
|
||||
func appendEnvDocs(cmd *cobra.Command, envs []EnvironmentVar) {
|
||||
func appendEnvDocs(cmd *cobra.Command, envs []envconfig.EnvVar) {
|
||||
if len(envs) == 0 {
|
||||
return
|
||||
}
|
||||
@@ -1093,7 +1089,7 @@ func appendEnvDocs(cmd *cobra.Command, envs []EnvironmentVar) {
|
||||
Environment Variables:
|
||||
`
|
||||
for _, e := range envs {
|
||||
envUsage += fmt.Sprintf(" %-16s %s\n", e.Name, e.Description)
|
||||
envUsage += fmt.Sprintf(" %-24s %s\n", e.Name, e.Description)
|
||||
}
|
||||
|
||||
cmd.SetUsageTemplate(cmd.UsageTemplate() + envUsage)
|
||||
@@ -1172,15 +1168,6 @@ func NewCLI() *cobra.Command {
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: RunServer,
|
||||
}
|
||||
serveCmd.SetUsageTemplate(serveCmd.UsageTemplate() + `
|
||||
Environment Variables:
|
||||
|
||||
OLLAMA_HOST The host:port to bind to (default "127.0.0.1:11434")
|
||||
OLLAMA_ORIGINS A comma separated list of allowed origins
|
||||
OLLAMA_MODELS The path to the models directory (default "~/.ollama/models")
|
||||
OLLAMA_KEEP_ALIVE The duration that models stay loaded in memory (default "5m")
|
||||
OLLAMA_DEBUG Set to 1 to enable additional debug logging
|
||||
`)
|
||||
|
||||
pullCmd := &cobra.Command{
|
||||
Use: "pull MODEL",
|
||||
@@ -1233,9 +1220,9 @@ Environment Variables:
|
||||
RunE: DeleteHandler,
|
||||
}
|
||||
|
||||
ollamaHostEnv := EnvironmentVar{"OLLAMA_HOST", "The host:port or base URL of the Ollama server (e.g. http://localhost:11434)"}
|
||||
ollamaNoHistoryEnv := EnvironmentVar{"OLLAMA_NOHISTORY", "Disable readline history"}
|
||||
envs := []EnvironmentVar{ollamaHostEnv}
|
||||
envVars := envconfig.AsMap()
|
||||
|
||||
envs := []envconfig.EnvVar{envVars["OLLAMA_HOST"]}
|
||||
|
||||
for _, cmd := range []*cobra.Command{
|
||||
createCmd,
|
||||
@@ -1247,10 +1234,24 @@ Environment Variables:
|
||||
psCmd,
|
||||
copyCmd,
|
||||
deleteCmd,
|
||||
serveCmd,
|
||||
} {
|
||||
switch cmd {
|
||||
case runCmd:
|
||||
appendEnvDocs(cmd, []EnvironmentVar{ollamaHostEnv, ollamaNoHistoryEnv})
|
||||
appendEnvDocs(cmd, []envconfig.EnvVar{envVars["OLLAMA_HOST"], envVars["OLLAMA_NOHISTORY"]})
|
||||
case serveCmd:
|
||||
appendEnvDocs(cmd, []envconfig.EnvVar{
|
||||
envVars["OLLAMA_DEBUG"],
|
||||
envVars["OLLAMA_HOST"],
|
||||
envVars["OLLAMA_KEEP_ALIVE"],
|
||||
envVars["OLLAMA_MAX_LOADED_MODELS"],
|
||||
envVars["OLLAMA_MAX_QUEUE"],
|
||||
envVars["OLLAMA_MODELS"],
|
||||
envVars["OLLAMA_NUM_PARALLEL"],
|
||||
envVars["OLLAMA_NOPRUNE"],
|
||||
envVars["OLLAMA_ORIGINS"],
|
||||
envVars["OLLAMA_TMPDIR"],
|
||||
})
|
||||
default:
|
||||
appendEnvDocs(cmd, envs)
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
"github.com/ollama/ollama/progress"
|
||||
"github.com/ollama/ollama/readline"
|
||||
"github.com/ollama/ollama/types/errtypes"
|
||||
@@ -138,6 +139,7 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
||||
fmt.Fprintln(os.Stderr, " Alt + f Move forward (right) one word")
|
||||
fmt.Fprintln(os.Stderr, " Ctrl + k Delete the sentence after the cursor")
|
||||
fmt.Fprintln(os.Stderr, " Ctrl + u Delete the sentence before the cursor")
|
||||
fmt.Fprintln(os.Stderr, " Ctrl + w Delete the word before the cursor")
|
||||
fmt.Fprintln(os.Stderr, "")
|
||||
fmt.Fprintln(os.Stderr, " Ctrl + l Clear the screen")
|
||||
fmt.Fprintln(os.Stderr, " Ctrl + c Stop the model from responding")
|
||||
@@ -182,7 +184,7 @@ func generateInteractive(cmd *cobra.Command, opts runOptions) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if os.Getenv("OLLAMA_NOHISTORY") != "" {
|
||||
if envconfig.NoHistory {
|
||||
scanner.HistoryDisable()
|
||||
}
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ type Tokenizer struct {
|
||||
Model TokenizerModel `json:"model"`
|
||||
|
||||
PreTokenizer struct {
|
||||
PreTokenziers []struct {
|
||||
PreTokenizers []struct {
|
||||
Type string `json:"type"`
|
||||
Pattern struct {
|
||||
Regex string `json:"Regex"`
|
||||
@@ -84,7 +84,7 @@ func parseTokens(dirpath string) (pre string, tokens []Token, merges []string, e
|
||||
}
|
||||
|
||||
sha256sum := sha256.New()
|
||||
for _, pt := range t.PreTokenizer.PreTokenziers {
|
||||
for _, pt := range t.PreTokenizer.PreTokenizers {
|
||||
switch pt.Type {
|
||||
case "Split":
|
||||
if pt.Pattern.Regex != "" {
|
||||
|
||||
@@ -6,6 +6,8 @@ Install required tools:
|
||||
- go version 1.22 or higher
|
||||
- gcc version 11.4.0 or higher
|
||||
|
||||
### MacOS
|
||||
|
||||
```bash
|
||||
brew install go cmake gcc
|
||||
```
|
||||
|
||||
93
docs/faq.md
93
docs/faq.md
@@ -6,7 +6,7 @@ Ollama on macOS and Windows will automatically download updates. Click on the ta
|
||||
|
||||
On Linux, re-run the install script:
|
||||
|
||||
```
|
||||
```shell
|
||||
curl -fsSL https://ollama.com/install.sh | sh
|
||||
```
|
||||
|
||||
@@ -30,7 +30,7 @@ To change this when using `ollama run`, use `/set parameter`:
|
||||
|
||||
When using the API, specify the `num_ctx` parameter:
|
||||
|
||||
```
|
||||
```shell
|
||||
curl http://localhost:11434/api/generate -d '{
|
||||
"model": "llama3",
|
||||
"prompt": "Why is the sky blue?",
|
||||
@@ -40,6 +40,21 @@ curl http://localhost:11434/api/generate -d '{
|
||||
}'
|
||||
```
|
||||
|
||||
## How can I tell if my model was loaded onto the GPU?
|
||||
|
||||
Use the `ollama ps` command to see what models are currently loaded into memory.
|
||||
|
||||
```shell
|
||||
ollama ps
|
||||
NAME ID SIZE PROCESSOR UNTIL
|
||||
llama3:70b bcfb190ca3a7 42 GB 100% GPU 4 minutes from now
|
||||
```
|
||||
|
||||
The `Processor` column will show which memory the model was loaded in to:
|
||||
* `100% GPU` means the model was loaded entirely into the GPU
|
||||
* `100% CPU` means the model was loaded entirely in system memory
|
||||
* `48%/52% CPU/GPU` means the model was loaded partially onto both the GPU and into system memory
|
||||
|
||||
## How do I configure Ollama server?
|
||||
|
||||
Ollama server can be configured with environment variables.
|
||||
@@ -94,6 +109,34 @@ On Windows, Ollama inherits your user and system environment variables.
|
||||
|
||||
6. Start the Ollama application from the Windows Start menu.
|
||||
|
||||
## How do I use Ollama behind a proxy?
|
||||
|
||||
Ollama is compatible with proxy servers if `HTTP_PROXY` or `HTTPS_PROXY` are configured. When using either variables, ensure it is set where `ollama serve` can access the values. When using `HTTPS_PROXY`, ensure the proxy certificate is installed as a system certificate. Refer to the section above for how to use environment variables on your platform.
|
||||
|
||||
### How do I use Ollama behind a proxy in Docker?
|
||||
|
||||
The Ollama Docker container image can be configured to use a proxy by passing `-e HTTPS_PROXY=https://proxy.example.com` when starting the container.
|
||||
|
||||
Alternatively, the Docker daemon can be configured to use a proxy. Instructions are available for Docker Desktop on [macOS](https://docs.docker.com/desktop/settings/mac/#proxies), [Windows](https://docs.docker.com/desktop/settings/windows/#proxies), and [Linux](https://docs.docker.com/desktop/settings/linux/#proxies), and Docker [daemon with systemd](https://docs.docker.com/config/daemon/systemd/#httphttps-proxy).
|
||||
|
||||
Ensure the certificate is installed as a system certificate when using HTTPS. This may require a new Docker image when using a self-signed certificate.
|
||||
|
||||
```dockerfile
|
||||
FROM ollama/ollama
|
||||
COPY my-ca.pem /usr/local/share/ca-certificates/my-ca.crt
|
||||
RUN update-ca-certificates
|
||||
```
|
||||
|
||||
Build and run this image:
|
||||
|
||||
```shell
|
||||
docker build -t ollama-with-ca .
|
||||
docker run -d -e HTTPS_PROXY=https://my.proxy.example.com -p 11434:11434 ollama-with-ca
|
||||
```
|
||||
|
||||
## Does Ollama send my prompts and answers back to ollama.com?
|
||||
|
||||
No. Ollama runs locally, and conversation data does not leave your machine.
|
||||
|
||||
## How can I expose Ollama on my network?
|
||||
|
||||
@@ -120,7 +163,7 @@ server {
|
||||
|
||||
Ollama can be accessed using a range of tools for tunneling tools. For example with Ngrok:
|
||||
|
||||
```
|
||||
```shell
|
||||
ngrok http 11434 --host-header="localhost:11434"
|
||||
```
|
||||
|
||||
@@ -128,7 +171,7 @@ ngrok http 11434 --host-header="localhost:11434"
|
||||
|
||||
To use Ollama with Cloudflare Tunnel, use the `--url` and `--http-host-header` flags:
|
||||
|
||||
```
|
||||
```shell
|
||||
cloudflared tunnel --url http://localhost:11434 --http-host-header="localhost:11434"
|
||||
```
|
||||
|
||||
@@ -150,39 +193,10 @@ If a different directory needs to be used, set the environment variable `OLLAMA_
|
||||
|
||||
Refer to the section [above](#how-do-i-configure-ollama-server) for how to set environment variables on your platform.
|
||||
|
||||
## Does Ollama send my prompts and answers back to ollama.com?
|
||||
|
||||
No. Ollama runs locally, and conversation data does not leave your machine.
|
||||
|
||||
## How can I use Ollama in Visual Studio Code?
|
||||
|
||||
There is already a large collection of plugins available for VSCode as well as other editors that leverage Ollama. See the list of [extensions & plugins](https://github.com/ollama/ollama#extensions--plugins) at the bottom of the main repository readme.
|
||||
|
||||
## How do I use Ollama behind a proxy?
|
||||
|
||||
Ollama is compatible with proxy servers if `HTTP_PROXY` or `HTTPS_PROXY` are configured. When using either variables, ensure it is set where `ollama serve` can access the values. When using `HTTPS_PROXY`, ensure the proxy certificate is installed as a system certificate. Refer to the section above for how to use environment variables on your platform.
|
||||
|
||||
### How do I use Ollama behind a proxy in Docker?
|
||||
|
||||
The Ollama Docker container image can be configured to use a proxy by passing `-e HTTPS_PROXY=https://proxy.example.com` when starting the container.
|
||||
|
||||
Alternatively, the Docker daemon can be configured to use a proxy. Instructions are available for Docker Desktop on [macOS](https://docs.docker.com/desktop/settings/mac/#proxies), [Windows](https://docs.docker.com/desktop/settings/windows/#proxies), and [Linux](https://docs.docker.com/desktop/settings/linux/#proxies), and Docker [daemon with systemd](https://docs.docker.com/config/daemon/systemd/#httphttps-proxy).
|
||||
|
||||
Ensure the certificate is installed as a system certificate when using HTTPS. This may require a new Docker image when using a self-signed certificate.
|
||||
|
||||
```dockerfile
|
||||
FROM ollama/ollama
|
||||
COPY my-ca.pem /usr/local/share/ca-certificates/my-ca.crt
|
||||
RUN update-ca-certificates
|
||||
```
|
||||
|
||||
Build and run this image:
|
||||
|
||||
```shell
|
||||
docker build -t ollama-with-ca .
|
||||
docker run -d -e HTTPS_PROXY=https://my.proxy.example.com -p 11434:11434 ollama-with-ca
|
||||
```
|
||||
|
||||
## How do I use Ollama with GPU acceleration in Docker?
|
||||
|
||||
The Ollama Docker container can be configured with GPU acceleration in Linux or Windows (with WSL2). This requires the [nvidia-container-toolkit](https://github.com/NVIDIA/nvidia-container-toolkit). See [ollama/ollama](https://hub.docker.com/r/ollama/ollama) for more details.
|
||||
@@ -197,7 +211,7 @@ Open `Control Panel > Networking and Internet > View network status and tasks` a
|
||||
Click on `Configure` and open the `Advanced` tab. Search through each of the properties until you find `Large Send Offload Version 2 (IPv4)` and `Large Send Offload Version 2 (IPv6)`. *Disable* both of these
|
||||
properties.
|
||||
|
||||
## How can I pre-load a model to get faster response times?
|
||||
## How can I preload a model into Ollama to get faster response times?
|
||||
|
||||
If you are using the API you can preload a model by sending the Ollama server an empty request. This works with both the `/api/generate` and `/api/chat` API endpoints.
|
||||
|
||||
@@ -211,6 +225,11 @@ To use the chat completions endpoint, use:
|
||||
curl http://localhost:11434/api/chat -d '{"model": "mistral"}'
|
||||
```
|
||||
|
||||
To preload a model using the CLI, use the command:
|
||||
```shell
|
||||
ollama run llama3 ""
|
||||
```
|
||||
|
||||
## How do I keep a model loaded in memory or make it unload immediately?
|
||||
|
||||
By default models are kept in memory for 5 minutes before being unloaded. This allows for quicker response times if you are making numerous requests to the LLM. You may, however, want to free up the memory before the 5 minutes have elapsed or keep the model loaded indefinitely. Use the `keep_alive` parameter with either the `/api/generate` and `/api/chat` API endpoints to control how long the model is left in memory.
|
||||
@@ -235,8 +254,6 @@ Alternatively, you can change the amount of time all models are loaded into memo
|
||||
|
||||
If you wish to override the `OLLAMA_KEEP_ALIVE` setting, use the `keep_alive` API parameter with the `/api/generate` or `/api/chat` API endpoints.
|
||||
|
||||
## How do I manage the maximum number of requests the server can queue
|
||||
## How do I manage the maximum number of requests the Ollama server can queue?
|
||||
|
||||
If too many requests are sent to the server, it will respond with a 503 error
|
||||
indicating the server is overloaded. You can adjust how many requests may be
|
||||
queue by setting `OLLAMA_MAX_QUEUE`
|
||||
If too many requests are sent to the server, it will respond with a 503 error indicating the server is overloaded. You can adjust how many requests may be queue by setting `OLLAMA_MAX_QUEUE`.
|
||||
|
||||
@@ -1,104 +1,86 @@
|
||||
# How to troubleshoot issues
|
||||
|
||||
Sometimes Ollama may not perform as expected. One of the best ways to figure out what happened is to take a look at the logs. Find the logs on **Mac** by running the command:
|
||||
|
||||
```shell
|
||||
cat ~/.ollama/logs/server.log
|
||||
```
|
||||
|
||||
On **Linux** systems with systemd, the logs can be found with this command:
|
||||
|
||||
```shell
|
||||
journalctl -u ollama
|
||||
```
|
||||
|
||||
When you run Ollama in a **container**, the logs go to stdout/stderr in the container:
|
||||
|
||||
```shell
|
||||
docker logs <container-name>
|
||||
```
|
||||
(Use `docker ps` to find the container name)
|
||||
|
||||
If manually running `ollama serve` in a terminal, the logs will be on that terminal.
|
||||
|
||||
When you run Ollama on **Windows**, there are a few different locations. You can view them in the explorer window by hitting `<cmd>+R` and type in:
|
||||
- `explorer %LOCALAPPDATA%\Ollama` to view logs
|
||||
- `explorer %LOCALAPPDATA%\Programs\Ollama` to browse the binaries (The installer adds this to your user PATH)
|
||||
- `explorer %HOMEPATH%\.ollama` to browse where models and configuration is stored
|
||||
- `explorer %TEMP%` where temporary executable files are stored in one or more `ollama*` directories
|
||||
|
||||
To enable additional debug logging to help troubleshoot problems, first **Quit the running app from the tray menu** then in a powershell terminal
|
||||
```powershell
|
||||
$env:OLLAMA_DEBUG="1"
|
||||
& "ollama app.exe"
|
||||
```
|
||||
|
||||
Join the [Discord](https://discord.gg/ollama) for help interpreting the logs.
|
||||
|
||||
## LLM libraries
|
||||
|
||||
Ollama includes multiple LLM libraries compiled for different GPUs and CPU
|
||||
vector features. Ollama tries to pick the best one based on the capabilities of
|
||||
your system. If this autodetection has problems, or you run into other problems
|
||||
(e.g. crashes in your GPU) you can workaround this by forcing a specific LLM
|
||||
library. `cpu_avx2` will perform the best, followed by `cpu_avx` an the slowest
|
||||
but most compatible is `cpu`. Rosetta emulation under MacOS will work with the
|
||||
`cpu` library.
|
||||
|
||||
In the server log, you will see a message that looks something like this (varies
|
||||
from release to release):
|
||||
|
||||
```
|
||||
Dynamic LLM libraries [rocm_v6 cpu cpu_avx cpu_avx2 cuda_v11 rocm_v5]
|
||||
```
|
||||
|
||||
**Experimental LLM Library Override**
|
||||
|
||||
You can set OLLAMA_LLM_LIBRARY to any of the available LLM libraries to bypass
|
||||
autodetection, so for example, if you have a CUDA card, but want to force the
|
||||
CPU LLM library with AVX2 vector support, use:
|
||||
|
||||
```
|
||||
OLLAMA_LLM_LIBRARY="cpu_avx2" ollama serve
|
||||
```
|
||||
|
||||
You can see what features your CPU has with the following.
|
||||
```
|
||||
cat /proc/cpuinfo| grep flags | head -1
|
||||
```
|
||||
|
||||
## Installing older or pre-release versions on Linux
|
||||
|
||||
If you run into problems on Linux and want to install an older version, or you'd
|
||||
like to try out a pre-release before it's officially released, you can tell the
|
||||
install script which version to install.
|
||||
|
||||
```sh
|
||||
curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION="0.1.29" sh
|
||||
```
|
||||
|
||||
## Linux tmp noexec
|
||||
|
||||
If your system is configured with the "noexec" flag where Ollama stores its
|
||||
temporary executable files, you can specify an alternate location by setting
|
||||
OLLAMA_TMPDIR to a location writable by the user ollama runs as. For example
|
||||
OLLAMA_TMPDIR=/usr/share/ollama/
|
||||
|
||||
## Container fails to run on NVIDIA GPU
|
||||
|
||||
Make sure you've set up the container runtime first as described in [docker.md](./docker.md)
|
||||
|
||||
Sometimes the container runtime can have difficulties initializing the GPU.
|
||||
When you check the server logs, this can show up as various error codes, such
|
||||
as "3" (not initialized), "46" (device unavailable), "100" (no device), "999"
|
||||
(unknown), or others. The following troubleshooting techniques may help resolve
|
||||
the problem
|
||||
|
||||
- Is the uvm driver not loaded? `sudo nvidia-modprobe -u`
|
||||
- Try reloading the nvidia_uvm driver - `sudo rmmod nvidia_uvm` then `sudo modprobe nvidia_uvm`
|
||||
- Try rebooting
|
||||
- Make sure you're running the latest nvidia drivers
|
||||
|
||||
If none of those resolve the problem, gather additional information and file an issue:
|
||||
- Set `CUDA_ERROR_LEVEL=50` and try again to get more diagnostic logs
|
||||
- Check dmesg for any errors `sudo dmesg | grep -i nvrm` and `sudo dmesg | grep -i nvidia`
|
||||
# How to troubleshoot issues
|
||||
|
||||
Sometimes Ollama may not perform as expected. One of the best ways to figure out what happened is to take a look at the logs. Find the logs on **Mac** by running the command:
|
||||
|
||||
```shell
|
||||
cat ~/.ollama/logs/server.log
|
||||
```
|
||||
|
||||
On **Linux** systems with systemd, the logs can be found with this command:
|
||||
|
||||
```shell
|
||||
journalctl -u ollama
|
||||
```
|
||||
|
||||
When you run Ollama in a **container**, the logs go to stdout/stderr in the container:
|
||||
|
||||
```shell
|
||||
docker logs <container-name>
|
||||
```
|
||||
(Use `docker ps` to find the container name)
|
||||
|
||||
If manually running `ollama serve` in a terminal, the logs will be on that terminal.
|
||||
|
||||
When you run Ollama on **Windows**, there are a few different locations. You can view them in the explorer window by hitting `<cmd>+R` and type in:
|
||||
- `explorer %LOCALAPPDATA%\Ollama` to view logs
|
||||
- `explorer %LOCALAPPDATA%\Programs\Ollama` to browse the binaries (The installer adds this to your user PATH)
|
||||
- `explorer %HOMEPATH%\.ollama` to browse where models and configuration is stored
|
||||
- `explorer %TEMP%` where temporary executable files are stored in one or more `ollama*` directories
|
||||
|
||||
To enable additional debug logging to help troubleshoot problems, first **Quit the running app from the tray menu** then in a powershell terminal
|
||||
```powershell
|
||||
$env:OLLAMA_DEBUG="1"
|
||||
& "ollama app.exe"
|
||||
```
|
||||
|
||||
Join the [Discord](https://discord.gg/ollama) for help interpreting the logs.
|
||||
|
||||
## LLM libraries
|
||||
|
||||
Ollama includes multiple LLM libraries compiled for different GPUs and CPU vector features. Ollama tries to pick the best one based on the capabilities of your system. If this autodetection has problems, or you run into other problems (e.g. crashes in your GPU) you can workaround this by forcing a specific LLM library. `cpu_avx2` will perform the best, followed by `cpu_avx` an the slowest but most compatible is `cpu`. Rosetta emulation under MacOS will work with the `cpu` library.
|
||||
|
||||
In the server log, you will see a message that looks something like this (varies from release to release):
|
||||
|
||||
```
|
||||
Dynamic LLM libraries [rocm_v6 cpu cpu_avx cpu_avx2 cuda_v11 rocm_v5]
|
||||
```
|
||||
|
||||
**Experimental LLM Library Override**
|
||||
|
||||
You can set OLLAMA_LLM_LIBRARY to any of the available LLM libraries to bypass autodetection, so for example, if you have a CUDA card, but want to force the CPU LLM library with AVX2 vector support, use:
|
||||
|
||||
```
|
||||
OLLAMA_LLM_LIBRARY="cpu_avx2" ollama serve
|
||||
```
|
||||
|
||||
You can see what features your CPU has with the following.
|
||||
```
|
||||
cat /proc/cpuinfo| grep flags | head -1
|
||||
```
|
||||
|
||||
## Installing older or pre-release versions on Linux
|
||||
|
||||
If you run into problems on Linux and want to install an older version, or you'd like to try out a pre-release before it's officially released, you can tell the install script which version to install.
|
||||
|
||||
```sh
|
||||
curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION="0.1.29" sh
|
||||
```
|
||||
|
||||
## Linux tmp noexec
|
||||
|
||||
If your system is configured with the "noexec" flag where Ollama stores its temporary executable files, you can specify an alternate location by setting OLLAMA_TMPDIR to a location writable by the user ollama runs as. For example OLLAMA_TMPDIR=/usr/share/ollama/
|
||||
|
||||
## Container fails to run on NVIDIA GPU
|
||||
|
||||
Make sure you've set up the container runtime first as described in [docker.md](./docker.md)
|
||||
|
||||
Sometimes the container runtime can have difficulties initializing the GPU. When you check the server logs, this can show up as various error codes, such as "3" (not initialized), "46" (device unavailable), "100" (no device), "999" (unknown), or others. The following troubleshooting techniques may help resolve the problem
|
||||
|
||||
- Is the uvm driver not loaded? `sudo nvidia-modprobe -u`
|
||||
- Try reloading the nvidia_uvm driver - `sudo rmmod nvidia_uvm` then `sudo modprobe nvidia_uvm`
|
||||
- Try rebooting
|
||||
- Make sure you're running the latest nvidia drivers
|
||||
|
||||
If none of those resolve the problem, gather additional information and file an issue:
|
||||
- Set `CUDA_ERROR_LEVEL=50` and try again to get more diagnostic logs
|
||||
- Check dmesg for any errors `sudo dmesg | grep -i nvrm` and `sudo dmesg | grep -i nvidia`
|
||||
|
||||
@@ -15,6 +15,10 @@ var (
|
||||
AllowOrigins []string
|
||||
// Set via OLLAMA_DEBUG in the environment
|
||||
Debug bool
|
||||
// Experimental flash attention
|
||||
FlashAttention bool
|
||||
// Set via OLLAMA_KEEP_ALIVE in the environment
|
||||
KeepAlive string
|
||||
// Set via OLLAMA_LLM_LIBRARY in the environment
|
||||
LLMLibrary string
|
||||
// Set via OLLAMA_MAX_LOADED_MODELS in the environment
|
||||
@@ -23,6 +27,8 @@ var (
|
||||
MaxQueuedRequests int
|
||||
// Set via OLLAMA_MAX_VRAM in the environment
|
||||
MaxVRAM uint64
|
||||
// Set via OLLAMA_NOHISTORY in the environment
|
||||
NoHistory bool
|
||||
// Set via OLLAMA_NOPRUNE in the environment
|
||||
NoPrune bool
|
||||
// Set via OLLAMA_NUM_PARALLEL in the environment
|
||||
@@ -33,21 +39,40 @@ var (
|
||||
TmpDir string
|
||||
)
|
||||
|
||||
func AsMap() map[string]string {
|
||||
return map[string]string{
|
||||
"OLLAMA_ORIGINS": fmt.Sprintf("%v", AllowOrigins),
|
||||
"OLLAMA_DEBUG": fmt.Sprintf("%v", Debug),
|
||||
"OLLAMA_LLM_LIBRARY": fmt.Sprintf("%v", LLMLibrary),
|
||||
"OLLAMA_MAX_LOADED_MODELS": fmt.Sprintf("%v", MaxRunners),
|
||||
"OLLAMA_MAX_QUEUE": fmt.Sprintf("%v", MaxQueuedRequests),
|
||||
"OLLAMA_MAX_VRAM": fmt.Sprintf("%v", MaxVRAM),
|
||||
"OLLAMA_NOPRUNE": fmt.Sprintf("%v", NoPrune),
|
||||
"OLLAMA_NUM_PARALLEL": fmt.Sprintf("%v", NumParallel),
|
||||
"OLLAMA_RUNNERS_DIR": fmt.Sprintf("%v", RunnersDir),
|
||||
"OLLAMA_TMPDIR": fmt.Sprintf("%v", TmpDir),
|
||||
type EnvVar struct {
|
||||
Name string
|
||||
Value any
|
||||
Description string
|
||||
}
|
||||
|
||||
func AsMap() map[string]EnvVar {
|
||||
return map[string]EnvVar{
|
||||
"OLLAMA_DEBUG": {"OLLAMA_DEBUG", Debug, "Show additional debug information (e.g. OLLAMA_DEBUG=1)"},
|
||||
"OLLAMA_FLASH_ATTENTION": {"OLLAMA_FLASH_ATTENTION", FlashAttention, "Enabled flash attention"},
|
||||
"OLLAMA_HOST": {"OLLAMA_HOST", "", "IP Address for the ollama server (default 127.0.0.1:11434)"},
|
||||
"OLLAMA_KEEP_ALIVE": {"OLLAMA_KEEP_ALIVE", KeepAlive, "The duration that models stay loaded in memory (default \"5m\")"},
|
||||
"OLLAMA_LLM_LIBRARY": {"OLLAMA_ORIGINS", LLMLibrary, ""},
|
||||
"OLLAMA_MAX_LOADED_MODELS": {"OLLAMA_MAX_LOADED_MODELS", MaxRunners, "Maximum number of loaded models (default 1)"},
|
||||
"OLLAMA_MAX_QUEUE": {"OLLAMA_MAX_QUEUE", MaxQueuedRequests, "Maximum number of queued requests"},
|
||||
"OLLAMA_MAX_VRAM": {"OLLAMA_MAX_VRAM", MaxVRAM, ""},
|
||||
"OLLAMA_MODELS": {"OLLAMA_MODELS", "", "The path to the models directory"},
|
||||
"OLLAMA_NOHISTORY": {"OLLAMA_NOHISTORY", NoHistory, "Do not preserve readline history"},
|
||||
"OLLAMA_NOPRUNE": {"OLLAMA_NOPRUNE", NoPrune, "Do not prune model blobs on startup"},
|
||||
"OLLAMA_NUM_PARALLEL": {"OLLAMA_NUM_PARALLEL", NumParallel, "Maximum number of parallel requests (default 1)"},
|
||||
"OLLAMA_ORIGINS": {"OLLAMA_ORIGINS", AllowOrigins, "A comma separated list of allowed origins"},
|
||||
"OLLAMA_RUNNERS_DIR": {"OLLAMA_RUNNERS_DIR", RunnersDir, ""},
|
||||
"OLLAMA_TMPDIR": {"OLLAMA_TMPDIR", TmpDir, "Location for temporary files"},
|
||||
}
|
||||
}
|
||||
|
||||
func Values() map[string]string {
|
||||
vals := make(map[string]string)
|
||||
for k, v := range AsMap() {
|
||||
vals[k] = fmt.Sprintf("%v", v.Value)
|
||||
}
|
||||
return vals
|
||||
}
|
||||
|
||||
var defaultAllowOrigins = []string{
|
||||
"localhost",
|
||||
"127.0.0.1",
|
||||
@@ -78,6 +103,13 @@ func LoadConfig() {
|
||||
}
|
||||
}
|
||||
|
||||
if fa := clean("OLLAMA_FLASH_ATTENTION"); fa != "" {
|
||||
d, err := strconv.ParseBool(fa)
|
||||
if err == nil {
|
||||
FlashAttention = d
|
||||
}
|
||||
}
|
||||
|
||||
RunnersDir = clean("OLLAMA_RUNNERS_DIR")
|
||||
if runtime.GOOS == "windows" && RunnersDir == "" {
|
||||
// On Windows we do not carry the payloads inside the main executable
|
||||
@@ -137,6 +169,10 @@ func LoadConfig() {
|
||||
}
|
||||
}
|
||||
|
||||
if nohistory := clean("OLLAMA_NOHISTORY"); nohistory != "" {
|
||||
NoHistory = true
|
||||
}
|
||||
|
||||
if noprune := clean("OLLAMA_NOPRUNE"); noprune != "" {
|
||||
NoPrune = true
|
||||
}
|
||||
@@ -171,4 +207,6 @@ func LoadConfig() {
|
||||
MaxQueuedRequests = p
|
||||
}
|
||||
}
|
||||
|
||||
KeepAlive = clean("OLLAMA_KEEP_ALIVE")
|
||||
}
|
||||
@@ -17,4 +17,7 @@ func TestConfig(t *testing.T) {
|
||||
t.Setenv("OLLAMA_DEBUG", "1")
|
||||
LoadConfig()
|
||||
require.True(t, Debug)
|
||||
t.Setenv("OLLAMA_FLASH_ATTENTION", "1")
|
||||
LoadConfig()
|
||||
require.True(t, FlashAttention)
|
||||
}
|
||||
@@ -9,6 +9,7 @@ def chat(messages):
|
||||
r = requests.post(
|
||||
"http://0.0.0.0:11434/api/chat",
|
||||
json={"model": model, "messages": messages, "stream": True},
|
||||
stream=True
|
||||
)
|
||||
r.raise_for_status()
|
||||
output = ""
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/ollama/ollama/server/envconfig"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
128
gpu/gpu.go
128
gpu/gpu.go
@@ -16,18 +16,20 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"github.com/ollama/ollama/format"
|
||||
"github.com/ollama/ollama/server/envconfig"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
)
|
||||
|
||||
type handles struct {
|
||||
deviceCount int
|
||||
cudart *C.cudart_handle_t
|
||||
nvcuda *C.nvcuda_handle_t
|
||||
oneapi *C.oneapi_handle_t
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -80,6 +82,15 @@ var NvcudaWindowsGlobs = []string{
|
||||
"c:\\windows\\system*\\nvcuda.dll",
|
||||
}
|
||||
|
||||
var OneapiWindowsGlobs = []string{
|
||||
"c:\\Windows\\System32\\DriverStore\\FileRepository\\*\\ze_intel_gpu64.dll",
|
||||
}
|
||||
|
||||
var OneapiLinuxGlobs = []string{
|
||||
"/usr/lib/x86_64-linux-gnu/libze_intel_gpu.so*",
|
||||
"/usr/lib*/libze_intel_gpu.so*",
|
||||
}
|
||||
|
||||
// Jetson devices have JETSON_JETPACK="x.y.z" factory set to the Jetpack version installed.
|
||||
// Included to drive logic for reducing Ollama-allocated overhead on L4T/Jetson devices.
|
||||
var CudaTegra string = os.Getenv("JETSON_JETPACK")
|
||||
@@ -94,6 +105,8 @@ func initGPUHandles() *handles {
|
||||
var cudartMgmtPatterns []string
|
||||
var nvcudaMgmtName string
|
||||
var nvcudaMgmtPatterns []string
|
||||
var oneapiMgmtName string
|
||||
var oneapiMgmtPatterns []string
|
||||
|
||||
tmpDir, _ := PayloadsDir()
|
||||
switch runtime.GOOS {
|
||||
@@ -105,6 +118,8 @@ func initGPUHandles() *handles {
|
||||
// Aligned with driver, we can't carry as payloads
|
||||
nvcudaMgmtName = "nvcuda.dll"
|
||||
nvcudaMgmtPatterns = NvcudaWindowsGlobs
|
||||
oneapiMgmtName = "ze_intel_gpu64.dll"
|
||||
oneapiMgmtPatterns = OneapiWindowsGlobs
|
||||
case "linux":
|
||||
cudartMgmtName = "libcudart.so*"
|
||||
if tmpDir != "" {
|
||||
@@ -115,6 +130,8 @@ func initGPUHandles() *handles {
|
||||
// Aligned with driver, we can't carry as payloads
|
||||
nvcudaMgmtName = "libcuda.so*"
|
||||
nvcudaMgmtPatterns = NvcudaLinuxGlobs
|
||||
oneapiMgmtName = "libze_intel_gpu.so"
|
||||
oneapiMgmtPatterns = OneapiLinuxGlobs
|
||||
default:
|
||||
return gpuHandles
|
||||
}
|
||||
@@ -141,6 +158,18 @@ func initGPUHandles() *handles {
|
||||
return gpuHandles
|
||||
}
|
||||
}
|
||||
|
||||
oneapiLibPaths := FindGPULibs(oneapiMgmtName, oneapiMgmtPatterns)
|
||||
if len(oneapiLibPaths) > 0 {
|
||||
deviceCount, oneapi, libPath := LoadOneapiMgmt(oneapiLibPaths)
|
||||
if oneapi != nil {
|
||||
slog.Debug("detected Intel GPUs", "library", libPath, "count", deviceCount)
|
||||
gpuHandles.oneapi = oneapi
|
||||
gpuHandles.deviceCount = deviceCount
|
||||
return gpuHandles
|
||||
}
|
||||
}
|
||||
|
||||
return gpuHandles
|
||||
}
|
||||
|
||||
@@ -181,39 +210,53 @@ func GetGPUInfo() GpuInfoList {
|
||||
if cpuVariant == "" && runtime.GOARCH == "amd64" {
|
||||
continue
|
||||
}
|
||||
gpuInfo := GpuInfo{
|
||||
Library: "cuda",
|
||||
}
|
||||
var driverMajor int
|
||||
var driverMinor int
|
||||
if gpuHandles.cudart != nil {
|
||||
C.cudart_check_vram(*gpuHandles.cudart, C.int(i), &memInfo)
|
||||
} else {
|
||||
C.nvcuda_check_vram(*gpuHandles.nvcuda, C.int(i), &memInfo)
|
||||
driverMajor = int(gpuHandles.nvcuda.driver_major)
|
||||
driverMinor = int(gpuHandles.nvcuda.driver_minor)
|
||||
}
|
||||
if memInfo.err != nil {
|
||||
slog.Info("error looking up nvidia GPU memory", "error", C.GoString(memInfo.err))
|
||||
C.free(unsafe.Pointer(memInfo.err))
|
||||
continue
|
||||
}
|
||||
if memInfo.major < CudaComputeMin[0] || (memInfo.major == CudaComputeMin[0] && memInfo.minor < CudaComputeMin[1]) {
|
||||
slog.Info(fmt.Sprintf("[%d] CUDA GPU is too old. Compute Capability detected: %d.%d", i, memInfo.major, memInfo.minor))
|
||||
continue
|
||||
}
|
||||
gpuInfo.TotalMemory = uint64(memInfo.total)
|
||||
gpuInfo.FreeMemory = uint64(memInfo.free)
|
||||
gpuInfo.ID = C.GoString(&memInfo.gpu_id[0])
|
||||
gpuInfo.Compute = fmt.Sprintf("%d.%d", memInfo.major, memInfo.minor)
|
||||
gpuInfo.MinimumMemory = cudaMinimumMemory
|
||||
gpuInfo.DependencyPath = depPath
|
||||
gpuInfo.Name = C.GoString(&memInfo.gpu_name[0])
|
||||
gpuInfo.DriverMajor = int(driverMajor)
|
||||
gpuInfo.DriverMinor = int(driverMinor)
|
||||
if gpuHandles.cudart != nil || gpuHandles.nvcuda != nil {
|
||||
gpuInfo := GpuInfo{
|
||||
Library: "cuda",
|
||||
}
|
||||
var driverMajor int
|
||||
var driverMinor int
|
||||
if gpuHandles.cudart != nil {
|
||||
C.cudart_check_vram(*gpuHandles.cudart, C.int(i), &memInfo)
|
||||
} else {
|
||||
C.nvcuda_check_vram(*gpuHandles.nvcuda, C.int(i), &memInfo)
|
||||
driverMajor = int(gpuHandles.nvcuda.driver_major)
|
||||
driverMinor = int(gpuHandles.nvcuda.driver_minor)
|
||||
}
|
||||
if memInfo.err != nil {
|
||||
slog.Info("error looking up nvidia GPU memory", "error", C.GoString(memInfo.err))
|
||||
C.free(unsafe.Pointer(memInfo.err))
|
||||
continue
|
||||
}
|
||||
if memInfo.major < CudaComputeMin[0] || (memInfo.major == CudaComputeMin[0] && memInfo.minor < CudaComputeMin[1]) {
|
||||
slog.Info(fmt.Sprintf("[%d] CUDA GPU is too old. Compute Capability detected: %d.%d", i, memInfo.major, memInfo.minor))
|
||||
continue
|
||||
}
|
||||
gpuInfo.TotalMemory = uint64(memInfo.total)
|
||||
gpuInfo.FreeMemory = uint64(memInfo.free)
|
||||
gpuInfo.ID = C.GoString(&memInfo.gpu_id[0])
|
||||
gpuInfo.Compute = fmt.Sprintf("%d.%d", memInfo.major, memInfo.minor)
|
||||
gpuInfo.MinimumMemory = cudaMinimumMemory
|
||||
gpuInfo.DependencyPath = depPath
|
||||
gpuInfo.Name = C.GoString(&memInfo.gpu_name[0])
|
||||
gpuInfo.DriverMajor = int(driverMajor)
|
||||
gpuInfo.DriverMinor = int(driverMinor)
|
||||
|
||||
// TODO potentially sort on our own algorithm instead of what the underlying GPU library does...
|
||||
resp = append(resp, gpuInfo)
|
||||
// TODO potentially sort on our own algorithm instead of what the underlying GPU library does...
|
||||
resp = append(resp, gpuInfo)
|
||||
}
|
||||
if gpuHandles.oneapi != nil {
|
||||
gpuInfo := GpuInfo{
|
||||
Library: "oneapi",
|
||||
}
|
||||
C.oneapi_check_vram(*gpuHandles.oneapi, &memInfo)
|
||||
var totalFreeMem float64 = float64(memInfo.free) * 0.95 // work-around: leave some reserve vram for mkl lib used in ggml-sycl backend.
|
||||
memInfo.free = C.uint64_t(totalFreeMem)
|
||||
gpuInfo.TotalMemory = uint64(memInfo.total)
|
||||
gpuInfo.FreeMemory = uint64(memInfo.free)
|
||||
gpuInfo.ID = strconv.Itoa(i)
|
||||
resp = append(resp, gpuInfo)
|
||||
}
|
||||
}
|
||||
|
||||
// Then AMD
|
||||
@@ -348,6 +391,23 @@ func LoadNVCUDAMgmt(nvcudaLibPaths []string) (int, *C.nvcuda_handle_t, string) {
|
||||
return 0, nil, ""
|
||||
}
|
||||
|
||||
func LoadOneapiMgmt(oneapiLibPaths []string) (int, *C.oneapi_handle_t, string) {
|
||||
var resp C.oneapi_init_resp_t
|
||||
resp.oh.verbose = getVerboseState()
|
||||
for _, libPath := range oneapiLibPaths {
|
||||
lib := C.CString(libPath)
|
||||
defer C.free(unsafe.Pointer(lib))
|
||||
C.oneapi_init(lib, &resp)
|
||||
if resp.err != nil {
|
||||
slog.Debug("Unable to load oneAPI management library", "library", libPath, "error", C.GoString(resp.err))
|
||||
C.free(unsafe.Pointer(resp.err))
|
||||
} else {
|
||||
return int(resp.num_devices), &resp.oh, libPath
|
||||
}
|
||||
}
|
||||
return 0, nil, ""
|
||||
}
|
||||
|
||||
func getVerboseState() C.uint16_t {
|
||||
if envconfig.Debug {
|
||||
return C.uint16_t(1)
|
||||
@@ -368,6 +428,8 @@ func (l GpuInfoList) GetVisibleDevicesEnv() (string, string) {
|
||||
return cudaGetVisibleDevicesEnv(l)
|
||||
case "rocm":
|
||||
return rocmGetVisibleDevicesEnv(l)
|
||||
case "oneapi":
|
||||
return oneapiGetVisibleDevicesEnv(l)
|
||||
default:
|
||||
slog.Debug("no filter required for library " + l[0].Library)
|
||||
return "", ""
|
||||
|
||||
@@ -62,6 +62,7 @@ void cpu_check_ram(mem_info_t *resp);
|
||||
|
||||
#include "gpu_info_cudart.h"
|
||||
#include "gpu_info_nvcuda.h"
|
||||
#include "gpu_info_oneapi.h"
|
||||
|
||||
#endif // __GPU_INFO_H__
|
||||
#endif // __APPLE__
|
||||
214
gpu/gpu_info_oneapi.c
Normal file
214
gpu/gpu_info_oneapi.c
Normal file
@@ -0,0 +1,214 @@
|
||||
#ifndef __APPLE__
|
||||
|
||||
#include "gpu_info_oneapi.h"
|
||||
|
||||
#include <string.h>
|
||||
|
||||
void oneapi_init(char *oneapi_lib_path, oneapi_init_resp_t *resp)
|
||||
{
|
||||
ze_result_t ret;
|
||||
resp->err = NULL;
|
||||
const int buflen = 256;
|
||||
char buf[buflen + 1];
|
||||
int i;
|
||||
struct lookup
|
||||
{
|
||||
char *s;
|
||||
void **p;
|
||||
} l[] = {
|
||||
{"zesInit", (void *)&resp->oh.zesInit},
|
||||
{"zesDriverGet", (void *)&resp->oh.zesDriverGet},
|
||||
{"zesDeviceGet", (void *)&resp->oh.zesDeviceGet},
|
||||
{"zesDeviceGetProperties", (void *)&resp->oh.zesDeviceGetProperties},
|
||||
{"zesDeviceEnumMemoryModules",
|
||||
(void *)&resp->oh.zesDeviceEnumMemoryModules},
|
||||
{"zesMemoryGetProperties", (void *)&resp->oh.zesMemoryGetProperties},
|
||||
{"zesMemoryGetState", (void *)&resp->oh.zesMemoryGetState},
|
||||
{NULL, NULL},
|
||||
};
|
||||
|
||||
resp->oh.handle = LOAD_LIBRARY(oneapi_lib_path, RTLD_LAZY);
|
||||
if (!resp->oh.handle)
|
||||
{
|
||||
char *msg = LOAD_ERR();
|
||||
snprintf(buf, buflen,
|
||||
"Unable to load %s library to query for Intel GPUs: %s\n",
|
||||
oneapi_lib_path, msg);
|
||||
free(msg);
|
||||
resp->err = strdup(buf);
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO once we've squashed the remaining corner cases remove this log
|
||||
LOG(resp->oh.verbose,
|
||||
"wiring Level-Zero management library functions in %s\n",
|
||||
oneapi_lib_path);
|
||||
|
||||
for (i = 0; l[i].s != NULL; i++)
|
||||
{
|
||||
// TODO once we've squashed the remaining corner cases remove this log
|
||||
LOG(resp->oh.verbose, "dlsym: %s\n", l[i].s);
|
||||
|
||||
*l[i].p = LOAD_SYMBOL(resp->oh.handle, l[i].s);
|
||||
if (!l[i].p)
|
||||
{
|
||||
resp->oh.handle = NULL;
|
||||
char *msg = LOAD_ERR();
|
||||
LOG(resp->oh.verbose, "dlerr: %s\n", msg);
|
||||
UNLOAD_LIBRARY(resp->oh.handle);
|
||||
snprintf(buf, buflen, "symbol lookup for %s failed: %s", l[i].s, msg);
|
||||
free(msg);
|
||||
resp->err = strdup(buf);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
ret = (*resp->oh.zesInit)(0);
|
||||
if (ret != ZE_RESULT_SUCCESS)
|
||||
{
|
||||
LOG(resp->oh.verbose, "zesInit err: %d\n", ret);
|
||||
UNLOAD_LIBRARY(resp->oh.handle);
|
||||
resp->oh.handle = NULL;
|
||||
snprintf(buf, buflen, "oneapi vram init failure: %d", ret);
|
||||
resp->err = strdup(buf);
|
||||
}
|
||||
|
||||
(*resp->oh.zesDriverGet)(&resp->num_devices, NULL);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void oneapi_check_vram(oneapi_handle_t h, mem_info_t *resp)
|
||||
{
|
||||
ze_result_t ret;
|
||||
resp->err = NULL;
|
||||
uint64_t totalMem = 0;
|
||||
uint64_t usedMem = 0;
|
||||
const int buflen = 256;
|
||||
char buf[buflen + 1];
|
||||
int i, d, m;
|
||||
|
||||
if (h.handle == NULL)
|
||||
{
|
||||
resp->err = strdup("Level-Zero handle not initialized");
|
||||
return;
|
||||
}
|
||||
|
||||
uint32_t driversCount = 0;
|
||||
ret = (*h.zesDriverGet)(&driversCount, NULL);
|
||||
if (ret != ZE_RESULT_SUCCESS)
|
||||
{
|
||||
snprintf(buf, buflen, "unable to get driver count: %d", ret);
|
||||
resp->err = strdup(buf);
|
||||
return;
|
||||
}
|
||||
LOG(h.verbose, "discovered %d Level-Zero drivers\n", driversCount);
|
||||
|
||||
zes_driver_handle_t *allDrivers =
|
||||
malloc(driversCount * sizeof(zes_driver_handle_t));
|
||||
(*h.zesDriverGet)(&driversCount, allDrivers);
|
||||
|
||||
resp->total = 0;
|
||||
resp->free = 0;
|
||||
|
||||
for (d = 0; d < driversCount; d++)
|
||||
{
|
||||
uint32_t deviceCount = 0;
|
||||
ret = (*h.zesDeviceGet)(allDrivers[d], &deviceCount, NULL);
|
||||
if (ret != ZE_RESULT_SUCCESS)
|
||||
{
|
||||
snprintf(buf, buflen, "unable to get device count: %d", ret);
|
||||
resp->err = strdup(buf);
|
||||
free(allDrivers);
|
||||
return;
|
||||
}
|
||||
|
||||
LOG(h.verbose, "discovered %d Level-Zero devices\n", deviceCount);
|
||||
|
||||
zes_device_handle_t *devices =
|
||||
malloc(deviceCount * sizeof(zes_device_handle_t));
|
||||
(*h.zesDeviceGet)(allDrivers[d], &deviceCount, devices);
|
||||
|
||||
for (i = 0; i < deviceCount; i++)
|
||||
{
|
||||
zes_device_ext_properties_t ext_props;
|
||||
ext_props.stype = ZES_STRUCTURE_TYPE_DEVICE_EXT_PROPERTIES;
|
||||
ext_props.pNext = NULL;
|
||||
|
||||
zes_device_properties_t props;
|
||||
props.stype = ZES_STRUCTURE_TYPE_DEVICE_PROPERTIES;
|
||||
props.pNext = &ext_props;
|
||||
|
||||
ret = (*h.zesDeviceGetProperties)(devices[i], &props);
|
||||
if (ret != ZE_RESULT_SUCCESS)
|
||||
{
|
||||
snprintf(buf, buflen, "unable to get device properties: %d", ret);
|
||||
resp->err = strdup(buf);
|
||||
free(allDrivers);
|
||||
free(devices);
|
||||
return;
|
||||
}
|
||||
|
||||
if (h.verbose)
|
||||
{
|
||||
// When in verbose mode, report more information about
|
||||
// the card we discover.
|
||||
LOG(h.verbose, "[%d] oneAPI device name: %s\n", i,
|
||||
props.modelName);
|
||||
LOG(h.verbose, "[%d] oneAPI brand: %s\n", i,
|
||||
props.brandName);
|
||||
LOG(h.verbose, "[%d] oneAPI vendor: %s\n", i,
|
||||
props.vendorName);
|
||||
LOG(h.verbose, "[%d] oneAPI S/N: %s\n", i,
|
||||
props.serialNumber);
|
||||
LOG(h.verbose, "[%d] oneAPI board number: %s\n", i,
|
||||
props.boardNumber);
|
||||
}
|
||||
|
||||
uint32_t memCount = 0;
|
||||
ret = (*h.zesDeviceEnumMemoryModules)(devices[i], &memCount, NULL);
|
||||
if (ret != ZE_RESULT_SUCCESS)
|
||||
{
|
||||
snprintf(buf, buflen,
|
||||
"unable to enumerate Level-Zero memory modules: %d", ret);
|
||||
resp->err = strdup(buf);
|
||||
free(allDrivers);
|
||||
free(devices);
|
||||
return;
|
||||
}
|
||||
|
||||
LOG(h.verbose, "discovered %d Level-Zero memory modules\n", memCount);
|
||||
|
||||
zes_mem_handle_t *mems = malloc(memCount * sizeof(zes_mem_handle_t));
|
||||
(*h.zesDeviceEnumMemoryModules)(devices[i], &memCount, mems);
|
||||
|
||||
for (m = 0; m < memCount; m++)
|
||||
{
|
||||
zes_mem_state_t state;
|
||||
state.stype = ZES_STRUCTURE_TYPE_MEM_STATE;
|
||||
state.pNext = NULL;
|
||||
ret = (*h.zesMemoryGetState)(mems[m], &state);
|
||||
if (ret != ZE_RESULT_SUCCESS)
|
||||
{
|
||||
snprintf(buf, buflen, "unable to get memory state: %d", ret);
|
||||
resp->err = strdup(buf);
|
||||
free(allDrivers);
|
||||
free(devices);
|
||||
free(mems);
|
||||
return;
|
||||
}
|
||||
|
||||
resp->total += state.size;
|
||||
resp->free += state.free;
|
||||
}
|
||||
|
||||
free(mems);
|
||||
}
|
||||
|
||||
free(devices);
|
||||
}
|
||||
|
||||
free(allDrivers);
|
||||
}
|
||||
|
||||
#endif // __APPLE__
|
||||
211
gpu/gpu_info_oneapi.h
Normal file
211
gpu/gpu_info_oneapi.h
Normal file
@@ -0,0 +1,211 @@
|
||||
#ifndef __APPLE__
|
||||
#ifndef __GPU_INFO_ONEAPI_H__
|
||||
#define __GPU_INFO_ONEAPI_H__
|
||||
#include "gpu_info.h"
|
||||
|
||||
#define ZE_MAX_DEVICE_NAME 256
|
||||
#define ZE_MAX_DEVICE_UUID_SIZE 16
|
||||
#define ZES_STRING_PROPERTY_SIZE 64
|
||||
#define ZE_BIT(_i) (1 << _i)
|
||||
|
||||
// Just enough typedef's to dlopen/dlsym for memory information
|
||||
typedef enum ze_result_t
|
||||
{
|
||||
ZE_RESULT_SUCCESS = 0,
|
||||
// Other values omitted for now...
|
||||
} ze_result_t;
|
||||
|
||||
typedef uint8_t ze_bool_t;
|
||||
typedef struct _zes_driver_handle_t *zes_driver_handle_t;
|
||||
typedef struct _zes_device_handle_t *zes_device_handle_t;
|
||||
typedef struct _zes_mem_handle_t *zes_mem_handle_t;
|
||||
|
||||
typedef enum _ze_structure_type_t
|
||||
{
|
||||
ZE_STRUCTURE_TYPE_FORCE_UINT32 = 0x7fffffff
|
||||
} ze_structure_type_t;
|
||||
|
||||
typedef enum _zes_structure_type_t
|
||||
{
|
||||
ZES_STRUCTURE_TYPE_DEVICE_PROPERTIES = 0x1,
|
||||
ZES_STRUCTURE_TYPE_MEM_PROPERTIES = 0xb,
|
||||
ZES_STRUCTURE_TYPE_MEM_STATE = 0x1e,
|
||||
ZES_STRUCTURE_TYPE_DEVICE_EXT_PROPERTIES = 0x2d,
|
||||
ZES_STRUCTURE_TYPE_FORCE_UINT32 = 0x7fffffff
|
||||
} zes_structure_type_t;
|
||||
|
||||
typedef enum _zes_mem_type_t
|
||||
{
|
||||
ZES_MEM_TYPE_FORCE_UINT32 = 0x7fffffff
|
||||
} zes_mem_type_t;
|
||||
|
||||
typedef enum _zes_mem_loc_t
|
||||
{
|
||||
ZES_MEM_LOC_SYSTEM = 0,
|
||||
ZES_MEM_LOC_DEVICE = 1,
|
||||
ZES_MEM_LOC_FORCE_UINT32 = 0x7fffffff
|
||||
} zes_mem_loc_t;
|
||||
|
||||
typedef enum _zes_mem_health_t
|
||||
{
|
||||
ZES_MEM_HEALTH_FORCE_UINT32 = 0x7fffffff
|
||||
} zes_mem_health_t;
|
||||
|
||||
typedef struct _ze_device_uuid_t
|
||||
{
|
||||
uint8_t id[ZE_MAX_DEVICE_UUID_SIZE];
|
||||
} ze_device_uuid_t;
|
||||
|
||||
typedef struct _zes_uuid_t
|
||||
{
|
||||
uint8_t id[ZE_MAX_DEVICE_UUID_SIZE];
|
||||
} zes_uuid_t;
|
||||
|
||||
typedef enum _ze_device_type_t
|
||||
{
|
||||
ZE_DEVICE_TYPE_GPU = 1,
|
||||
ZE_DEVICE_TYPE_CPU = 2,
|
||||
ZE_DEVICE_TYPE_FPGA = 3,
|
||||
ZE_DEVICE_TYPE_MCA = 4,
|
||||
ZE_DEVICE_TYPE_VPU = 5,
|
||||
ZE_DEVICE_TYPE_FORCE_UINT32 = 0x7fffffff
|
||||
} ze_device_type_t;
|
||||
|
||||
typedef enum _zes_device_type_t
|
||||
{
|
||||
ZES_DEVICE_TYPE_GPU = 1,
|
||||
ZES_DEVICE_TYPE_CPU = 2,
|
||||
ZES_DEVICE_TYPE_FPGA = 3,
|
||||
ZES_DEVICE_TYPE_MCA = 4,
|
||||
ZES_DEVICE_TYPE_VPU = 5,
|
||||
ZES_DEVICE_TYPE_FORCE_UINT32 = 0x7fffffff
|
||||
} zes_device_type_t;
|
||||
|
||||
typedef uint32_t ze_device_property_flags_t;
|
||||
typedef enum _ze_device_property_flag_t
|
||||
{
|
||||
ZE_DEVICE_PROPERTY_FLAG_INTEGRATED = ZE_BIT(0),
|
||||
ZE_DEVICE_PROPERTY_FLAG_SUBDEVICE = ZE_BIT(1),
|
||||
ZE_DEVICE_PROPERTY_FLAG_ECC = ZE_BIT(2),
|
||||
ZE_DEVICE_PROPERTY_FLAG_ONDEMANDPAGING = ZE_BIT(3),
|
||||
ZE_DEVICE_PROPERTY_FLAG_FORCE_UINT32 = 0x7fffffff
|
||||
} ze_device_property_flag_t;
|
||||
|
||||
typedef uint32_t zes_device_property_flags_t;
|
||||
typedef enum _zes_device_property_flag_t
|
||||
{
|
||||
ZES_DEVICE_PROPERTY_FLAG_INTEGRATED = ZE_BIT(0),
|
||||
ZES_DEVICE_PROPERTY_FLAG_SUBDEVICE = ZE_BIT(1),
|
||||
ZES_DEVICE_PROPERTY_FLAG_ECC = ZE_BIT(2),
|
||||
ZES_DEVICE_PROPERTY_FLAG_ONDEMANDPAGING = ZE_BIT(3),
|
||||
ZES_DEVICE_PROPERTY_FLAG_FORCE_UINT32 = 0x7fffffff
|
||||
} zes_device_property_flag_t;
|
||||
|
||||
typedef struct _ze_device_properties_t
|
||||
{
|
||||
ze_structure_type_t stype;
|
||||
void *pNext;
|
||||
ze_device_type_t type;
|
||||
uint32_t vendorId;
|
||||
uint32_t deviceId;
|
||||
ze_device_property_flags_t flags;
|
||||
uint32_t subdeviceId;
|
||||
uint32_t coreClockRate;
|
||||
uint64_t maxMemAllocSize;
|
||||
uint32_t maxHardwareContexts;
|
||||
uint32_t maxCommandQueuePriority;
|
||||
uint32_t numThreadsPerEU;
|
||||
uint32_t physicalEUSimdWidth;
|
||||
uint32_t numEUsPerSubslice;
|
||||
uint32_t numSubslicesPerSlice;
|
||||
uint32_t numSlices;
|
||||
uint64_t timerResolution;
|
||||
uint32_t timestampValidBits;
|
||||
uint32_t kernelTimestampValidBits;
|
||||
ze_device_uuid_t uuid;
|
||||
char name[ZE_MAX_DEVICE_NAME];
|
||||
} ze_device_properties_t;
|
||||
|
||||
typedef struct _zes_device_properties_t
|
||||
{
|
||||
zes_structure_type_t stype;
|
||||
void *pNext;
|
||||
ze_device_properties_t core;
|
||||
uint32_t numSubdevices;
|
||||
char serialNumber[ZES_STRING_PROPERTY_SIZE];
|
||||
char boardNumber[ZES_STRING_PROPERTY_SIZE];
|
||||
char brandName[ZES_STRING_PROPERTY_SIZE];
|
||||
char modelName[ZES_STRING_PROPERTY_SIZE];
|
||||
char vendorName[ZES_STRING_PROPERTY_SIZE];
|
||||
char driverVersion[ZES_STRING_PROPERTY_SIZE];
|
||||
} zes_device_properties_t;
|
||||
|
||||
typedef struct _zes_device_ext_properties_t
|
||||
{
|
||||
zes_structure_type_t stype;
|
||||
void *pNext;
|
||||
zes_uuid_t uuid;
|
||||
zes_device_type_t type;
|
||||
zes_device_property_flags_t flags;
|
||||
} zes_device_ext_properties_t;
|
||||
|
||||
typedef struct _zes_mem_properties_t
|
||||
{
|
||||
zes_structure_type_t stype;
|
||||
void *pNext;
|
||||
zes_mem_type_t type;
|
||||
ze_bool_t onSubdevice;
|
||||
uint32_t subdeviceId;
|
||||
zes_mem_loc_t location;
|
||||
uint64_t physicalSize;
|
||||
int32_t busWidth;
|
||||
int32_t numChannels;
|
||||
} zes_mem_properties_t;
|
||||
|
||||
typedef struct _zes_mem_state_t
|
||||
{
|
||||
zes_structure_type_t stype;
|
||||
const void *pNext;
|
||||
zes_mem_health_t health;
|
||||
uint64_t free;
|
||||
uint64_t size;
|
||||
} zes_mem_state_t;
|
||||
|
||||
typedef struct oneapi_handle
|
||||
{
|
||||
void *handle;
|
||||
uint16_t verbose;
|
||||
ze_result_t (*zesInit)(int);
|
||||
ze_result_t (*zesDriverGet)(uint32_t *pCount, zes_driver_handle_t *phDrivers);
|
||||
ze_result_t (*zesDeviceGet)(zes_driver_handle_t hDriver, uint32_t *pCount,
|
||||
zes_device_handle_t *phDevices);
|
||||
ze_result_t (*zesDeviceGetProperties)(zes_device_handle_t hDevice,
|
||||
zes_device_properties_t *pProperties);
|
||||
ze_result_t (*zesDeviceEnumMemoryModules)(zes_device_handle_t hDevice,
|
||||
uint32_t *pCount,
|
||||
zes_mem_handle_t *phMemory);
|
||||
ze_result_t (*zesMemoryGetProperties)(zes_mem_handle_t hMemory,
|
||||
zes_mem_properties_t *pProperties);
|
||||
ze_result_t (*zesMemoryGetState)(zes_mem_handle_t hMemory,
|
||||
zes_mem_state_t *pState);
|
||||
|
||||
} oneapi_handle_t;
|
||||
|
||||
typedef struct oneapi_init_resp
|
||||
{
|
||||
char *err; // If err is non-null handle is invalid
|
||||
int num_devices;
|
||||
oneapi_handle_t oh;
|
||||
} oneapi_init_resp_t;
|
||||
|
||||
typedef struct oneapi_version_resp
|
||||
{
|
||||
ze_result_t status;
|
||||
char *str; // Contains version or error string if status != 0
|
||||
} oneapi_version_resp_t;
|
||||
|
||||
void oneapi_init(char *oneapi_lib_path, oneapi_init_resp_t *resp);
|
||||
void oneapi_check_vram(oneapi_handle_t rh, mem_info_t *resp);
|
||||
|
||||
#endif // __GPU_INFO_INTEL_H__
|
||||
#endif // __APPLE__
|
||||
21
gpu/gpu_oneapi.go
Normal file
21
gpu/gpu_oneapi.go
Normal file
@@ -0,0 +1,21 @@
|
||||
//go:build linux || windows
|
||||
|
||||
package gpu
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func oneapiGetVisibleDevicesEnv(gpuInfo []GpuInfo) (string, string) {
|
||||
ids := []string{}
|
||||
for _, info := range gpuInfo {
|
||||
if info.Library != "oneapi" {
|
||||
// TODO shouldn't happen if things are wired correctly...
|
||||
slog.Debug("oneapiGetVisibleDevicesEnv skipping over non-sycl device", "library", info.Library)
|
||||
continue
|
||||
}
|
||||
ids = append(ids, info.ID)
|
||||
}
|
||||
return "ONEAPI_DEVICE_SELECTOR", "level_zero:" + strings.Join(ids, ",")
|
||||
}
|
||||
18
llm/ext_server/server.cpp
vendored
18
llm/ext_server/server.cpp
vendored
@@ -334,6 +334,7 @@ struct server_metrics {
|
||||
struct llama_server_context
|
||||
{
|
||||
llama_model *model = nullptr;
|
||||
float modelProgress = 0.0;
|
||||
llama_context *ctx = nullptr;
|
||||
|
||||
clip_ctx *clp_ctx = nullptr;
|
||||
@@ -737,7 +738,7 @@ struct llama_server_context
|
||||
sampler_names.emplace_back(sampler_name);
|
||||
}
|
||||
}
|
||||
slot->sparams.samplers_sequence = sampler_types_from_names(sampler_names, false);
|
||||
slot->sparams.samplers_sequence = llama_sampling_types_from_names(sampler_names, false);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -1095,7 +1096,7 @@ struct llama_server_context
|
||||
std::vector<std::string> samplers_sequence;
|
||||
for (const auto &sampler_type : slot.sparams.samplers_sequence)
|
||||
{
|
||||
samplers_sequence.emplace_back(sampler_type_to_name_string(sampler_type));
|
||||
samplers_sequence.emplace_back(llama_sampling_type_to_str(sampler_type));
|
||||
}
|
||||
|
||||
return json {
|
||||
@@ -2779,6 +2780,12 @@ inline void signal_handler(int signal) {
|
||||
shutdown_handler(signal);
|
||||
}
|
||||
|
||||
static bool update_load_progress(float progress, void *data)
|
||||
{
|
||||
((llama_server_context*)data)->modelProgress = progress;
|
||||
return true;
|
||||
}
|
||||
|
||||
#if defined(_WIN32)
|
||||
char* wchar_to_char(const wchar_t* wstr) {
|
||||
if (wstr == nullptr) return nullptr;
|
||||
@@ -2884,7 +2891,9 @@ int main(int argc, char **argv) {
|
||||
break;
|
||||
}
|
||||
case SERVER_STATE_LOADING_MODEL:
|
||||
res.set_content(R"({"status": "loading model"})", "application/json");
|
||||
char buf[128];
|
||||
snprintf(&buf[0], 128, R"({"status": "loading model", "progress": %0.2f})", llama.modelProgress);
|
||||
res.set_content(buf, "application/json");
|
||||
res.status = 503; // HTTP Service Unavailable
|
||||
break;
|
||||
case SERVER_STATE_ERROR:
|
||||
@@ -3079,6 +3088,9 @@ int main(int argc, char **argv) {
|
||||
});
|
||||
|
||||
// load the model
|
||||
params.progress_callback = update_load_progress;
|
||||
params.progress_callback_user_data = (void*)&llama;
|
||||
|
||||
if (!llama.load_model(params))
|
||||
{
|
||||
state.store(SERVER_STATE_ERROR);
|
||||
|
||||
@@ -27,8 +27,16 @@ const (
|
||||
fileTypeIQ2_XXS
|
||||
fileTypeIQ2_XS
|
||||
fileTypeQ2_K_S
|
||||
fileTypeQ3_K_XS
|
||||
fileTypeIQ3_XS
|
||||
fileTypeIQ3_XXS
|
||||
fileTypeIQ1_S
|
||||
fileTypeIQ4_NL
|
||||
fileTypeIQ3_S
|
||||
fileTypeIQ2_S
|
||||
fileTypeIQ4_XS
|
||||
fileTypeIQ2_M
|
||||
fileTypeIQ1_M
|
||||
fileTypeBF16
|
||||
|
||||
fileTypeUnknown
|
||||
)
|
||||
@@ -75,10 +83,26 @@ func ParseFileType(s string) (fileType, error) {
|
||||
return fileTypeIQ2_XS, nil
|
||||
case "Q2_K_S":
|
||||
return fileTypeQ2_K_S, nil
|
||||
case "Q3_K_XS":
|
||||
return fileTypeQ3_K_XS, nil
|
||||
case "IQ3_XS":
|
||||
return fileTypeIQ3_XS, nil
|
||||
case "IQ3_XXS":
|
||||
return fileTypeIQ3_XXS, nil
|
||||
case "IQ1_S":
|
||||
return fileTypeIQ1_S, nil
|
||||
case "IQ4_NL":
|
||||
return fileTypeIQ4_NL, nil
|
||||
case "IQ3_S":
|
||||
return fileTypeIQ3_S, nil
|
||||
case "IQ2_S":
|
||||
return fileTypeIQ2_S, nil
|
||||
case "IQ4_XS":
|
||||
return fileTypeIQ4_XS, nil
|
||||
case "IQ2_M":
|
||||
return fileTypeIQ2_M, nil
|
||||
case "IQ1_M":
|
||||
return fileTypeIQ1_M, nil
|
||||
case "BF16":
|
||||
return fileTypeBF16, nil
|
||||
default:
|
||||
return fileTypeUnknown, fmt.Errorf("unknown fileType: %s", s)
|
||||
}
|
||||
@@ -126,10 +150,26 @@ func (t fileType) String() string {
|
||||
return "IQ2_XS"
|
||||
case fileTypeQ2_K_S:
|
||||
return "Q2_K_S"
|
||||
case fileTypeQ3_K_XS:
|
||||
return "Q3_K_XS"
|
||||
case fileTypeIQ3_XS:
|
||||
return "IQ3_XS"
|
||||
case fileTypeIQ3_XXS:
|
||||
return "IQ3_XXS"
|
||||
case fileTypeIQ1_S:
|
||||
return "IQ1_S"
|
||||
case fileTypeIQ4_NL:
|
||||
return "IQ4_NL"
|
||||
case fileTypeIQ3_S:
|
||||
return "IQ3_S"
|
||||
case fileTypeIQ2_S:
|
||||
return "IQ2_S"
|
||||
case fileTypeIQ4_XS:
|
||||
return "IQ4_XS"
|
||||
case fileTypeIQ2_M:
|
||||
return "IQ2_M"
|
||||
case fileTypeIQ1_M:
|
||||
return "IQ1_M"
|
||||
case fileTypeBF16:
|
||||
return "BF16"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
@@ -206,6 +206,36 @@ if [ -z "${OLLAMA_SKIP_CUDA_GENERATE}" -a -d "${CUDA_LIB_DIR}" ]; then
|
||||
|
||||
fi
|
||||
|
||||
if [ -z "${ONEAPI_ROOT}" ]; then
|
||||
# Try the default location in case it exists
|
||||
ONEAPI_ROOT=/opt/intel/oneapi
|
||||
fi
|
||||
|
||||
if [ -d "${ONEAPI_ROOT}" ]; then
|
||||
echo "OneAPI libraries detected - building dynamic OneAPI library"
|
||||
init_vars
|
||||
source ${ONEAPI_ROOT}/setvars.sh --force # set up environment variables for oneAPI
|
||||
CC=icx
|
||||
CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_SYCL=ON -DLLAMA_SYCL_F16=OFF"
|
||||
BUILD_DIR="../build/linux/${ARCH}/oneapi"
|
||||
EXTRA_LIBS="-fsycl -Wl,-rpath,${ONEAPI_ROOT}/compiler/latest/lib,-rpath,${ONEAPI_ROOT}/mkl/latest/lib,-rpath,${ONEAPI_ROOT}/tbb/latest/lib,-rpath,${ONEAPI_ROOT}/compiler/latest/opt/oclfpga/linux64/lib -lOpenCL -lmkl_core -lmkl_sycl_blas -lmkl_intel_ilp64 -lmkl_tbb_thread -ltbb"
|
||||
DEBUG_FLAGS="" # icx compiles with -O0 if we pass -g, so we must remove it
|
||||
build
|
||||
|
||||
# copy oneAPI dependencies
|
||||
for dep in $(ldd "${BUILD_DIR}/bin/ollama_llama_server" | grep "=>" | cut -f2 -d= | cut -f2 -d' ' | grep -e sycl -e mkl -e tbb); do
|
||||
cp "${dep}" "${BUILD_DIR}/bin/"
|
||||
done
|
||||
cp "${ONEAPI_ROOT}/compiler/latest/lib/libOpenCL.so" "${BUILD_DIR}/bin/"
|
||||
cp "${ONEAPI_ROOT}/compiler/latest/lib/libimf.so" "${BUILD_DIR}/bin/"
|
||||
cp "${ONEAPI_ROOT}/compiler/latest/lib/libintlc.so.5" "${BUILD_DIR}/bin/"
|
||||
cp "${ONEAPI_ROOT}/compiler/latest/lib/libirng.so" "${BUILD_DIR}/bin/"
|
||||
cp "${ONEAPI_ROOT}/compiler/latest/lib/libpi_level_zero.so" "${BUILD_DIR}/bin/"
|
||||
cp "${ONEAPI_ROOT}/compiler/latest/lib/libsvml.so" "${BUILD_DIR}/bin/"
|
||||
cp "${ONEAPI_ROOT}/compiler/latest/lib/libur_loader.so.0" "${BUILD_DIR}/bin/"
|
||||
compress
|
||||
fi
|
||||
|
||||
if [ -z "${ROCM_PATH}" ]; then
|
||||
# Try the default location in case it exists
|
||||
ROCM_PATH=/opt/rocm
|
||||
|
||||
@@ -289,6 +289,49 @@ function build_cuda() {
|
||||
}
|
||||
}
|
||||
|
||||
function build_oneapi() {
|
||||
if ((-not "${env:OLLAMA_SKIP_CUDA_GENERATE}") -and ("${env:ONEAPI_ROOT}")) {
|
||||
# Get oneAPI version
|
||||
$script:ONEAPI_VERSION = icpx --version
|
||||
$script:ONEAPI_VERSION = [regex]::Match($script:ONEAPI_VERSION, '(?<=oneAPI DPC\+\+/C\+\+ Compiler )(?<version>\d+\.\d+\.\d+)').Value
|
||||
if ($null -ne $script:ONEAPI_VERSION) {
|
||||
$script:ONEAPI_VARIANT = "_v" + $script:ONEAPI_VERSION
|
||||
}
|
||||
init_vars
|
||||
$script:buildDir = "../build/windows/${script:ARCH}/oneapi$script:ONEAPI_VARIANT"
|
||||
$script:distDir ="$script:DIST_BASE\oneapi$script:ONEAPI_VARIANT"
|
||||
$script:cmakeDefs += @(
|
||||
"-G", "MinGW Makefiles",
|
||||
"-DLLAMA_SYCL=ON",
|
||||
"-DCMAKE_C_COMPILER=icx",
|
||||
"-DCMAKE_CXX_COMPILER=icx",
|
||||
"-DCMAKE_BUILD_TYPE=Release"
|
||||
)
|
||||
|
||||
Write-Host "Building oneAPI"
|
||||
build
|
||||
# Ninja doesn't prefix with config name
|
||||
if ($null -ne $script:DUMPBIN) {
|
||||
& "$script:DUMPBIN" /dependents "${script:buildDir}/bin/ollama_llama_server.exe" | Select-String ".dll"
|
||||
}
|
||||
sign
|
||||
install
|
||||
|
||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\libirngmd.dll" "${script:distDir}"
|
||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\libmmd.dll" "${script:distDir}"
|
||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\pi_level_zero.dll" "${script:distDir}"
|
||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\pi_unified_runtime.dll" "${script:distDir}"
|
||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\pi_win_proxy_loader.dll" "${script:distDir}"
|
||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\svml_dispmd.dll" "${script:distDir}"
|
||||
cp "${env:ONEAPI_ROOT}\compiler\latest\bin\sycl7.dll" "${script:distDir}"
|
||||
cp "${env:ONEAPI_ROOT}\mkl\latest\bin\mkl_core.2.dll" "${script:distDir}"
|
||||
cp "${env:ONEAPI_ROOT}\mkl\latest\bin\mkl_sycl_blas.4.dll" "${script:distDir}"
|
||||
cp "${env:ONEAPI_ROOT}\mkl\latest\bin\mkl_tbb_thread.2.dll" "${script:distDir}"
|
||||
} else {
|
||||
Write-Host "Skipping oneAPI generation step"
|
||||
}
|
||||
}
|
||||
|
||||
function build_rocm() {
|
||||
if ((-not "${env:OLLAMA_SKIP_ROCM_GENERATE}") -and ("${env:HIP_PATH}")) {
|
||||
$script:ROCM_VERSION=(get-item $env:HIP_PATH).Basename
|
||||
@@ -356,6 +399,7 @@ if ($($args.count) -eq 0) {
|
||||
build_cpu_avx
|
||||
build_cpu_avx2
|
||||
build_cuda
|
||||
build_oneapi
|
||||
build_rocm
|
||||
}
|
||||
|
||||
|
||||
32
llm/ggml.go
32
llm/ggml.go
@@ -124,12 +124,12 @@ type Tensor struct {
|
||||
}
|
||||
|
||||
func (t Tensor) blockSize() uint64 {
|
||||
switch {
|
||||
case t.Kind < 2:
|
||||
switch t.Kind {
|
||||
case 0, 1, 24, 25, 26, 27, 28, 30: // F32, F16, I8, I16, I32, I64, F64, BF16
|
||||
return 1
|
||||
case t.Kind < 10:
|
||||
case 2, 3, 4, 5, 6, 7, 8, 9, 20: // Q4_0, Q4_1, Q5_0, Q5_1, Q8_0, Q8_1, IQ4_NL
|
||||
return 32
|
||||
default:
|
||||
default: // All others
|
||||
return 256
|
||||
}
|
||||
}
|
||||
@@ -171,7 +171,29 @@ func (t Tensor) typeSize() uint64 {
|
||||
case 17: // IQ2_XS
|
||||
return 2 + 2*blockSize/8 + blockSize/32
|
||||
case 18: // IQ3_XXS
|
||||
return 2 + 3*blockSize/8
|
||||
return 2 + blockSize/4 + blockSize/8
|
||||
case 19: // IQ1_S
|
||||
return 2 + blockSize/8 + blockSize/16
|
||||
case 20: // IQ4_NL
|
||||
return 2 + blockSize/2
|
||||
case 21: // IQ3_S
|
||||
return 2 + blockSize/4 + blockSize/8 + blockSize/32 + 4
|
||||
case 22: // IQ2_S
|
||||
return 2 + blockSize/4 + blockSize/16
|
||||
case 23: // IQ4_XS
|
||||
return 2 + 2 + blockSize/2 + blockSize/64
|
||||
case 24: // I8
|
||||
return 1
|
||||
case 25: // I16
|
||||
return 2
|
||||
case 26: // I32
|
||||
return 4
|
||||
case 27: // I64
|
||||
return 8
|
||||
case 28: // F64
|
||||
return 8
|
||||
case 29: // IQ1_M
|
||||
return blockSize/8 + blockSize/16 + blockSize/32
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
|
||||
Submodule llm/llama.cpp updated: 614d3b914e...74f33adf5f
@@ -7,7 +7,7 @@ import (
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/format"
|
||||
"github.com/ollama/ollama/gpu"
|
||||
"github.com/ollama/ollama/server/envconfig"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
)
|
||||
|
||||
// This algorithm looks for a complete fit to determine if we need to unload other models
|
||||
|
||||
31
llm/patches/01-load-progress.diff
Normal file
31
llm/patches/01-load-progress.diff
Normal file
@@ -0,0 +1,31 @@
|
||||
diff --git a/common/common.cpp b/common/common.cpp
|
||||
index ba1ecf0e..cead57cc 100644
|
||||
--- a/common/common.cpp
|
||||
+++ b/common/common.cpp
|
||||
@@ -1836,6 +1836,8 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params &
|
||||
mparams.use_mmap = params.use_mmap;
|
||||
mparams.use_mlock = params.use_mlock;
|
||||
mparams.check_tensors = params.check_tensors;
|
||||
+ mparams.progress_callback = params.progress_callback;
|
||||
+ mparams.progress_callback_user_data = params.progress_callback_user_data;
|
||||
if (params.kv_overrides.empty()) {
|
||||
mparams.kv_overrides = NULL;
|
||||
} else {
|
||||
diff --git a/common/common.h b/common/common.h
|
||||
index d80344f2..71e84834 100644
|
||||
--- a/common/common.h
|
||||
+++ b/common/common.h
|
||||
@@ -174,6 +174,13 @@ struct gpt_params {
|
||||
// multimodal models (see examples/llava)
|
||||
std::string mmproj = ""; // path to multimodal projector
|
||||
std::vector<std::string> image; // path to image file(s)
|
||||
+
|
||||
+ // Called with a progress value between 0.0 and 1.0. Pass NULL to disable.
|
||||
+ // If the provided progress_callback returns true, model loading continues.
|
||||
+ // If it returns false, model loading is immediately aborted.
|
||||
+ llama_progress_callback progress_callback = NULL;
|
||||
+ // context pointer passed to the progress callback
|
||||
+ void * progress_callback_user_data;
|
||||
};
|
||||
|
||||
void gpt_params_handle_model_default(gpt_params & params);
|
||||
@@ -1,8 +1,17 @@
|
||||
From 544a2d2e646d39e878d87dfbb3398a356bc560ab Mon Sep 17 00:00:00 2001
|
||||
From: Michael Yang <mxyng@pm.me>
|
||||
Date: Thu, 23 May 2024 11:18:45 -0700
|
||||
Subject: [PATCH] throw exception on load errors
|
||||
|
||||
---
|
||||
llama.cpp | 25 ++++++++++++++++---------
|
||||
1 file changed, 16 insertions(+), 9 deletions(-)
|
||||
|
||||
diff --git a/llama.cpp b/llama.cpp
|
||||
index 4225f955..7b762f86 100644
|
||||
index 15c66077..8ba90b6a 100644
|
||||
--- a/llama.cpp
|
||||
+++ b/llama.cpp
|
||||
@@ -4756,7 +4756,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
|
||||
@@ -6346,7 +6346,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
|
||||
}
|
||||
} catch (const std::exception & err) {
|
||||
LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what());
|
||||
@@ -11,10 +20,10 @@ index 4225f955..7b762f86 100644
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -12102,16 +12102,22 @@ struct llama_model * llama_load_model_from_file(
|
||||
};
|
||||
@@ -15600,16 +15600,23 @@ struct llama_model * llama_load_model_from_file(
|
||||
}
|
||||
model->rpc_servers.push_back(servers);
|
||||
}
|
||||
|
||||
- int status = llama_model_load(path_model, *model, params);
|
||||
- GGML_ASSERT(status <= 0);
|
||||
- if (status < 0) {
|
||||
@@ -22,6 +31,7 @@ index 4225f955..7b762f86 100644
|
||||
- LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
|
||||
- } else if (status == -2) {
|
||||
- LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
|
||||
+
|
||||
+ try {
|
||||
+ int status = llama_model_load(path_model, *model, params);
|
||||
+ GGML_ASSERT(status <= 0);
|
||||
@@ -42,3 +52,6 @@ index 4225f955..7b762f86 100644
|
||||
}
|
||||
|
||||
return model;
|
||||
--
|
||||
2.45.1
|
||||
|
||||
|
||||
35
llm/patches/05-default-pretokenizer.diff
Normal file
35
llm/patches/05-default-pretokenizer.diff
Normal file
@@ -0,0 +1,35 @@
|
||||
From d02a06f3f45a09255ace8684a66590e06ce44605 Mon Sep 17 00:00:00 2001
|
||||
From: Michael Yang <mxyng@pm.me>
|
||||
Date: Thu, 23 May 2024 11:33:20 -0700
|
||||
Subject: [PATCH] default pretokenizer on unrecognized type
|
||||
|
||||
---
|
||||
llama.cpp | 5 +----
|
||||
1 file changed, 1 insertion(+), 4 deletions(-)
|
||||
|
||||
diff --git a/llama.cpp b/llama.cpp
|
||||
index 15c66077..af1aede3 100644
|
||||
--- a/llama.cpp
|
||||
+++ b/llama.cpp
|
||||
@@ -4504,9 +4504,6 @@ static void llm_load_vocab(
|
||||
LLAMA_LOG_WARN("%s: ************************************ \n", __func__);
|
||||
LLAMA_LOG_WARN("%s: \n", __func__);
|
||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
||||
- } else if (
|
||||
- tokenizer_pre == "default") {
|
||||
- vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
||||
} else if (
|
||||
tokenizer_pre == "llama3" ||
|
||||
tokenizer_pre == "llama-v3" ||
|
||||
@@ -4553,7 +4550,7 @@ static void llm_load_vocab(
|
||||
tokenizer_pre == "dbrx") {
|
||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DBRX;
|
||||
} else {
|
||||
- throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
|
||||
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
||||
}
|
||||
} else {
|
||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
||||
--
|
||||
2.45.1
|
||||
|
||||
@@ -24,9 +24,9 @@ import (
|
||||
"golang.org/x/sync/semaphore"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
"github.com/ollama/ollama/format"
|
||||
"github.com/ollama/ollama/gpu"
|
||||
"github.com/ollama/ollama/server/envconfig"
|
||||
)
|
||||
|
||||
type LlamaServer interface {
|
||||
@@ -55,6 +55,7 @@ type llmServer struct {
|
||||
totalLayers uint64
|
||||
gpuCount int
|
||||
loadDuration time.Duration // Record how long it took the model to load
|
||||
loadProgress float32
|
||||
|
||||
sem *semaphore.Weighted
|
||||
}
|
||||
@@ -200,20 +201,20 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
||||
params = append(params, "--numa")
|
||||
}
|
||||
|
||||
flashAttnSupported := true
|
||||
flashAttnEnabled := envconfig.FlashAttention
|
||||
|
||||
// partial offloading does not support flash attention
|
||||
if uint64(opts.NumGPU) < ggml.KV().BlockCount() + 1 {
|
||||
flashAttnSupported = false
|
||||
if uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 {
|
||||
flashAttnEnabled = false
|
||||
}
|
||||
|
||||
// only cuda (compute capability 7+) and metal support flash attention
|
||||
for _, g := range gpus {
|
||||
if g.Library != "metal" && (g.Library != "cuda" || g.DriverMajor < 7) {
|
||||
flashAttnSupported = false
|
||||
flashAttnEnabled = false
|
||||
}
|
||||
}
|
||||
if flashAttnSupported {
|
||||
if flashAttnEnabled {
|
||||
params = append(params, "--flash-attn")
|
||||
}
|
||||
|
||||
@@ -242,7 +243,7 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
||||
gpuCount = 0
|
||||
}
|
||||
|
||||
// Find an availableServers port, retry on each iterration in case the failure was a port conflict race
|
||||
// Find an availableServers port, retry on each iteration in case the failure was a port conflict race
|
||||
port := 0
|
||||
if a, err := net.ResolveTCPAddr("tcp", "localhost:0"); err == nil {
|
||||
var l *net.TCPListener
|
||||
@@ -425,10 +426,11 @@ func (s ServerStatus) ToString() string {
|
||||
}
|
||||
|
||||
type ServerStatusResp struct {
|
||||
Status string `json:"status"`
|
||||
SlotsIdle int `json:"slots_idle"`
|
||||
SlotsProcessing int `json:"slots_processing"`
|
||||
Error string `json:"error"`
|
||||
Status string `json:"status"`
|
||||
SlotsIdle int `json:"slots_idle"`
|
||||
SlotsProcessing int `json:"slots_processing"`
|
||||
Error string `json:"error"`
|
||||
Progress float32 `json:"progress"`
|
||||
}
|
||||
|
||||
func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
|
||||
@@ -476,6 +478,7 @@ func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
|
||||
case "no slot available":
|
||||
return ServerStatusNoSlotsAvailable, nil
|
||||
case "loading model":
|
||||
s.loadProgress = status.Progress
|
||||
return ServerStatusLoadingModel, nil
|
||||
default:
|
||||
return ServerStatusError, fmt.Errorf("server error: %+v", status)
|
||||
@@ -516,15 +519,18 @@ func (s *llmServer) Ping(ctx context.Context) error {
|
||||
|
||||
func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
|
||||
start := time.Now()
|
||||
expiresAt := time.Now().Add(10 * time.Minute) // be generous with timeout, large models can take a while to load
|
||||
stallDuration := 5 * time.Minute // If no progress happens
|
||||
finalLoadDuration := 5 * time.Minute // After we hit 100%, give the runner more time to come online
|
||||
stallTimer := time.Now().Add(stallDuration) // give up if we stall
|
||||
|
||||
slog.Info("waiting for llama runner to start responding")
|
||||
var lastStatus ServerStatus = -1
|
||||
fullyLoaded := false
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
slog.Info("context expired before server started")
|
||||
slog.Warn("client connection closed before server finished loading, aborting load")
|
||||
return fmt.Errorf("timed out waiting for llama runner to start: %w", ctx.Err())
|
||||
case err := <-s.done:
|
||||
msg := ""
|
||||
@@ -534,13 +540,13 @@ func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
|
||||
return fmt.Errorf("llama runner process has terminated: %v %s", err, msg)
|
||||
default:
|
||||
}
|
||||
if time.Now().After(expiresAt) {
|
||||
if time.Now().After(stallTimer) {
|
||||
// timeout
|
||||
msg := ""
|
||||
if s.status != nil && s.status.LastErrMsg != "" {
|
||||
msg = s.status.LastErrMsg
|
||||
}
|
||||
return fmt.Errorf("timed out waiting for llama runner to start: %s", msg)
|
||||
return fmt.Errorf("timed out waiting for llama runner to start - progress %0.2f - %s", s.loadProgress, msg)
|
||||
}
|
||||
if s.cmd.ProcessState != nil {
|
||||
msg := ""
|
||||
@@ -551,6 +557,7 @@ func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(ctx, 200*time.Millisecond)
|
||||
defer cancel()
|
||||
priorProgress := s.loadProgress
|
||||
status, _ := s.getServerStatus(ctx)
|
||||
if lastStatus != status && status != ServerStatusReady {
|
||||
// Only log on status changes
|
||||
@@ -563,6 +570,15 @@ func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
|
||||
return nil
|
||||
default:
|
||||
lastStatus = status
|
||||
// Reset the timer as long as we're making forward progress on the load
|
||||
if priorProgress != s.loadProgress {
|
||||
slog.Debug(fmt.Sprintf("model load progress %0.2f", s.loadProgress))
|
||||
stallTimer = time.Now().Add(stallDuration)
|
||||
} else if !fullyLoaded && int(s.loadProgress*100.0) >= 100 {
|
||||
slog.Debug("model load completed, waiting for server to become available", "status", status.ToString())
|
||||
stallTimer = time.Now().Add(finalLoadDuration)
|
||||
fullyLoaded = true
|
||||
}
|
||||
time.Sleep(time.Millisecond * 250)
|
||||
continue
|
||||
}
|
||||
@@ -746,7 +762,7 @@ func (s *llmServer) Completion(ctx context.Context, req CompletionRequest, fn fu
|
||||
|
||||
var c completion
|
||||
if err := json.Unmarshal(evt, &c); err != nil {
|
||||
return fmt.Errorf("error unmarshaling llm prediction response: %v", err)
|
||||
return fmt.Errorf("error unmarshalling llm prediction response: %v", err)
|
||||
}
|
||||
|
||||
switch {
|
||||
|
||||
@@ -5,16 +5,20 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/emirpasic/gods/lists/arraylist"
|
||||
"github.com/mattn/go-runewidth"
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
type Buffer struct {
|
||||
Pos int
|
||||
Buf *arraylist.List
|
||||
Prompt *Prompt
|
||||
LineWidth int
|
||||
Width int
|
||||
Height int
|
||||
DisplayPos int
|
||||
Pos int
|
||||
Buf *arraylist.List
|
||||
//LineHasSpace is an arraylist of bools to keep track of whether a line has a space at the end
|
||||
LineHasSpace *arraylist.List
|
||||
Prompt *Prompt
|
||||
LineWidth int
|
||||
Width int
|
||||
Height int
|
||||
}
|
||||
|
||||
func NewBuffer(prompt *Prompt) (*Buffer, error) {
|
||||
@@ -27,25 +31,57 @@ func NewBuffer(prompt *Prompt) (*Buffer, error) {
|
||||
lwidth := width - len(prompt.prompt())
|
||||
|
||||
b := &Buffer{
|
||||
Pos: 0,
|
||||
Buf: arraylist.New(),
|
||||
Prompt: prompt,
|
||||
Width: width,
|
||||
Height: height,
|
||||
LineWidth: lwidth,
|
||||
DisplayPos: 0,
|
||||
Pos: 0,
|
||||
Buf: arraylist.New(),
|
||||
LineHasSpace: arraylist.New(),
|
||||
Prompt: prompt,
|
||||
Width: width,
|
||||
Height: height,
|
||||
LineWidth: lwidth,
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (b *Buffer) GetLineSpacing(line int) bool {
|
||||
hasSpace, _ := b.LineHasSpace.Get(line)
|
||||
|
||||
if hasSpace == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return hasSpace.(bool)
|
||||
|
||||
}
|
||||
|
||||
func (b *Buffer) MoveLeft() {
|
||||
if b.Pos > 0 {
|
||||
if b.Pos%b.LineWidth == 0 {
|
||||
fmt.Printf(CursorUp + CursorBOL + cursorRightN(b.Width))
|
||||
} else {
|
||||
fmt.Print(CursorLeft)
|
||||
//asserts that we retrieve a rune
|
||||
if e, ok := b.Buf.Get(b.Pos - 1); ok {
|
||||
if r, ok := e.(rune); ok {
|
||||
rLength := runewidth.RuneWidth(r)
|
||||
|
||||
if b.DisplayPos%b.LineWidth == 0 {
|
||||
fmt.Printf(CursorUp + CursorBOL + cursorRightN(b.Width))
|
||||
if rLength == 2 {
|
||||
fmt.Print(CursorLeft)
|
||||
}
|
||||
|
||||
line := b.DisplayPos/b.LineWidth - 1
|
||||
hasSpace := b.GetLineSpacing(line)
|
||||
if hasSpace {
|
||||
b.DisplayPos -= 1
|
||||
fmt.Print(CursorLeft)
|
||||
}
|
||||
} else {
|
||||
fmt.Print(cursorLeftN(rLength))
|
||||
}
|
||||
|
||||
b.Pos -= 1
|
||||
b.DisplayPos -= rLength
|
||||
}
|
||||
}
|
||||
b.Pos -= 1
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,18 +107,35 @@ func (b *Buffer) MoveLeftWord() {
|
||||
}
|
||||
|
||||
func (b *Buffer) MoveRight() {
|
||||
if b.Pos < b.Size() {
|
||||
b.Pos += 1
|
||||
if b.Pos%b.LineWidth == 0 {
|
||||
fmt.Printf(CursorDown + CursorBOL + cursorRightN(len(b.Prompt.prompt())))
|
||||
} else {
|
||||
fmt.Print(CursorRight)
|
||||
if b.Pos < b.Buf.Size() {
|
||||
if e, ok := b.Buf.Get(b.Pos); ok {
|
||||
if r, ok := e.(rune); ok {
|
||||
rLength := runewidth.RuneWidth(r)
|
||||
b.Pos += 1
|
||||
hasSpace := b.GetLineSpacing(b.DisplayPos / b.LineWidth)
|
||||
b.DisplayPos += rLength
|
||||
|
||||
if b.DisplayPos%b.LineWidth == 0 {
|
||||
fmt.Printf(CursorDown + CursorBOL + cursorRightN(len(b.Prompt.prompt())))
|
||||
|
||||
} else if (b.DisplayPos-rLength)%b.LineWidth == b.LineWidth-1 && hasSpace {
|
||||
fmt.Printf(CursorDown + CursorBOL + cursorRightN(len(b.Prompt.prompt())+rLength))
|
||||
b.DisplayPos += 1
|
||||
|
||||
} else if b.LineHasSpace.Size() > 0 && b.DisplayPos%b.LineWidth == b.LineWidth-1 && hasSpace {
|
||||
fmt.Printf(CursorDown + CursorBOL + cursorRightN(len(b.Prompt.prompt())))
|
||||
b.DisplayPos += 1
|
||||
|
||||
} else {
|
||||
fmt.Print(cursorRightN(rLength))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Buffer) MoveRightWord() {
|
||||
if b.Pos < b.Size() {
|
||||
if b.Pos < b.Buf.Size() {
|
||||
for {
|
||||
b.MoveRight()
|
||||
v, _ := b.Buf.Get(b.Pos)
|
||||
@@ -90,7 +143,7 @@ func (b *Buffer) MoveRightWord() {
|
||||
break
|
||||
}
|
||||
|
||||
if b.Pos == b.Size() {
|
||||
if b.Pos == b.Buf.Size() {
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -99,7 +152,7 @@ func (b *Buffer) MoveRightWord() {
|
||||
|
||||
func (b *Buffer) MoveToStart() {
|
||||
if b.Pos > 0 {
|
||||
currLine := b.Pos / b.LineWidth
|
||||
currLine := b.DisplayPos / b.LineWidth
|
||||
if currLine > 0 {
|
||||
for cnt := 0; cnt < currLine; cnt++ {
|
||||
fmt.Print(CursorUp)
|
||||
@@ -107,81 +160,195 @@ func (b *Buffer) MoveToStart() {
|
||||
}
|
||||
fmt.Printf(CursorBOL + cursorRightN(len(b.Prompt.prompt())))
|
||||
b.Pos = 0
|
||||
b.DisplayPos = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Buffer) MoveToEnd() {
|
||||
if b.Pos < b.Size() {
|
||||
currLine := b.Pos / b.LineWidth
|
||||
totalLines := b.Size() / b.LineWidth
|
||||
if b.Pos < b.Buf.Size() {
|
||||
currLine := b.DisplayPos / b.LineWidth
|
||||
totalLines := b.DisplaySize() / b.LineWidth
|
||||
if currLine < totalLines {
|
||||
for cnt := 0; cnt < totalLines-currLine; cnt++ {
|
||||
fmt.Print(CursorDown)
|
||||
}
|
||||
remainder := b.Size() % b.LineWidth
|
||||
remainder := b.DisplaySize() % b.LineWidth
|
||||
fmt.Printf(CursorBOL + cursorRightN(len(b.Prompt.prompt())+remainder))
|
||||
} else {
|
||||
fmt.Print(cursorRightN(b.Size() - b.Pos))
|
||||
fmt.Print(cursorRightN(b.DisplaySize() - b.DisplayPos))
|
||||
}
|
||||
|
||||
b.Pos = b.Size()
|
||||
b.Pos = b.Buf.Size()
|
||||
b.DisplayPos = b.DisplaySize()
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Buffer) Size() int {
|
||||
return b.Buf.Size()
|
||||
func (b *Buffer) DisplaySize() int {
|
||||
sum := 0
|
||||
for i := 0; i < b.Buf.Size(); i++ {
|
||||
if e, ok := b.Buf.Get(i); ok {
|
||||
if r, ok := e.(rune); ok {
|
||||
sum += runewidth.RuneWidth(r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return sum
|
||||
}
|
||||
|
||||
func (b *Buffer) Add(r rune) {
|
||||
|
||||
if b.Pos == b.Buf.Size() {
|
||||
fmt.Printf("%c", r)
|
||||
b.Buf.Add(r)
|
||||
b.Pos += 1
|
||||
if b.Pos > 0 && b.Pos%b.LineWidth == 0 {
|
||||
b.AddChar(r, false)
|
||||
} else {
|
||||
b.AddChar(r, true)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Buffer) AddChar(r rune, insert bool) {
|
||||
rLength := runewidth.RuneWidth(r)
|
||||
b.DisplayPos += rLength
|
||||
|
||||
if b.Pos > 0 {
|
||||
|
||||
if b.DisplayPos%b.LineWidth == 0 {
|
||||
fmt.Printf("%c", r)
|
||||
fmt.Printf("\n%s", b.Prompt.AltPrompt)
|
||||
|
||||
if insert {
|
||||
b.LineHasSpace.Set(b.DisplayPos/b.LineWidth-1, false)
|
||||
} else {
|
||||
b.LineHasSpace.Add(false)
|
||||
}
|
||||
|
||||
// this case occurs when a double-width rune crosses the line boundary
|
||||
} else if b.DisplayPos%b.LineWidth < (b.DisplayPos-rLength)%b.LineWidth {
|
||||
if insert {
|
||||
fmt.Print(ClearToEOL)
|
||||
}
|
||||
fmt.Printf("\n%s", b.Prompt.AltPrompt)
|
||||
b.DisplayPos += 1
|
||||
fmt.Printf("%c", r)
|
||||
|
||||
if insert {
|
||||
b.LineHasSpace.Set(b.DisplayPos/b.LineWidth-1, true)
|
||||
} else {
|
||||
b.LineHasSpace.Add(true)
|
||||
}
|
||||
|
||||
} else {
|
||||
fmt.Printf("%c", r)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("%c", r)
|
||||
}
|
||||
|
||||
if insert {
|
||||
b.Buf.Insert(b.Pos, r)
|
||||
b.Pos += 1
|
||||
if b.Pos > 0 && b.Pos%b.LineWidth == 0 {
|
||||
fmt.Printf("\n%s", b.Prompt.AltPrompt)
|
||||
}
|
||||
} else {
|
||||
b.Buf.Add(r)
|
||||
}
|
||||
|
||||
b.Pos += 1
|
||||
|
||||
if insert {
|
||||
b.drawRemaining()
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Buffer) countRemainingLineWidth(place int) int {
|
||||
var sum int
|
||||
counter := -1
|
||||
var prevLen int
|
||||
|
||||
for place <= b.LineWidth {
|
||||
counter += 1
|
||||
sum += prevLen
|
||||
if e, ok := b.Buf.Get(b.Pos + counter); ok {
|
||||
if r, ok := e.(rune); ok {
|
||||
place += runewidth.RuneWidth(r)
|
||||
prevLen = len(string(r))
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return sum
|
||||
}
|
||||
|
||||
func (b *Buffer) drawRemaining() {
|
||||
var place int
|
||||
remainingText := b.StringN(b.Pos)
|
||||
if b.Pos > 0 {
|
||||
place = b.Pos % b.LineWidth
|
||||
place = b.DisplayPos % b.LineWidth
|
||||
}
|
||||
fmt.Print(CursorHide)
|
||||
|
||||
// render the rest of the current line
|
||||
currLine := remainingText[:min(b.LineWidth-place, len(remainingText))]
|
||||
currLineLength := b.countRemainingLineWidth(place)
|
||||
|
||||
currLine := remainingText[:min(currLineLength, len(remainingText))]
|
||||
currLineSpace := runewidth.StringWidth(currLine)
|
||||
remLength := runewidth.StringWidth(remainingText)
|
||||
|
||||
if len(currLine) > 0 {
|
||||
fmt.Printf(ClearToEOL + currLine)
|
||||
fmt.Print(cursorLeftN(len(currLine)))
|
||||
fmt.Print(cursorLeftN(currLineSpace))
|
||||
} else {
|
||||
fmt.Print(ClearToEOL)
|
||||
}
|
||||
|
||||
if currLineSpace != b.LineWidth-place && currLineSpace != remLength {
|
||||
b.LineHasSpace.Set(b.DisplayPos/b.LineWidth, true)
|
||||
} else if currLineSpace != b.LineWidth-place {
|
||||
b.LineHasSpace.Remove(b.DisplayPos / b.LineWidth)
|
||||
} else {
|
||||
b.LineHasSpace.Set(b.DisplayPos/b.LineWidth, false)
|
||||
}
|
||||
|
||||
if (b.DisplayPos+currLineSpace)%b.LineWidth == 0 && currLine == remainingText {
|
||||
fmt.Print(cursorRightN(currLineSpace))
|
||||
fmt.Printf("\n%s", b.Prompt.AltPrompt)
|
||||
fmt.Printf(CursorUp + CursorBOL + cursorRightN(b.Width-currLineSpace))
|
||||
}
|
||||
|
||||
// render the other lines
|
||||
if len(remainingText) > len(currLine) {
|
||||
remaining := []rune(remainingText[len(currLine):])
|
||||
if remLength > currLineSpace {
|
||||
remaining := (remainingText[len(currLine):])
|
||||
var totalLines int
|
||||
for i, c := range remaining {
|
||||
if i%b.LineWidth == 0 {
|
||||
var displayLength int
|
||||
var lineLength int = currLineSpace
|
||||
|
||||
for _, c := range remaining {
|
||||
if displayLength == 0 || (displayLength+runewidth.RuneWidth(c))%b.LineWidth < displayLength%b.LineWidth {
|
||||
fmt.Printf("\n%s", b.Prompt.AltPrompt)
|
||||
totalLines += 1
|
||||
|
||||
if displayLength != 0 {
|
||||
if lineLength == b.LineWidth {
|
||||
b.LineHasSpace.Set(b.DisplayPos/b.LineWidth+totalLines-1, false)
|
||||
} else {
|
||||
b.LineHasSpace.Set(b.DisplayPos/b.LineWidth+totalLines-1, true)
|
||||
}
|
||||
}
|
||||
|
||||
lineLength = 0
|
||||
}
|
||||
|
||||
displayLength += runewidth.RuneWidth(c)
|
||||
lineLength += runewidth.RuneWidth(c)
|
||||
fmt.Printf("%c", c)
|
||||
}
|
||||
fmt.Print(ClearToEOL)
|
||||
fmt.Print(cursorUpN(totalLines))
|
||||
fmt.Printf(CursorBOL + cursorRightN(b.Width-len(currLine)))
|
||||
fmt.Printf(CursorBOL + cursorRightN(b.Width-currLineSpace))
|
||||
|
||||
hasSpace := b.GetLineSpacing(b.DisplayPos / b.LineWidth)
|
||||
|
||||
if hasSpace && b.DisplayPos%b.LineWidth != b.LineWidth-1 {
|
||||
fmt.Print(CursorLeft)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Print(CursorShow)
|
||||
@@ -189,46 +356,84 @@ func (b *Buffer) drawRemaining() {
|
||||
|
||||
func (b *Buffer) Remove() {
|
||||
if b.Buf.Size() > 0 && b.Pos > 0 {
|
||||
if b.Pos%b.LineWidth == 0 {
|
||||
// if the user backspaces over the word boundary, do this magic to clear the line
|
||||
// and move to the end of the previous line
|
||||
fmt.Printf(CursorBOL + ClearToEOL)
|
||||
fmt.Printf(CursorUp + CursorBOL + cursorRightN(b.Width) + " " + CursorLeft)
|
||||
} else {
|
||||
fmt.Printf(CursorLeft + " " + CursorLeft)
|
||||
}
|
||||
|
||||
var eraseExtraLine bool
|
||||
if (b.Size()-1)%b.LineWidth == 0 {
|
||||
eraseExtraLine = true
|
||||
}
|
||||
if e, ok := b.Buf.Get(b.Pos - 1); ok {
|
||||
if r, ok := e.(rune); ok {
|
||||
rLength := runewidth.RuneWidth(r)
|
||||
hasSpace := b.GetLineSpacing(b.DisplayPos/b.LineWidth - 1)
|
||||
|
||||
b.Pos -= 1
|
||||
b.Buf.Remove(b.Pos)
|
||||
if b.DisplayPos%b.LineWidth == 0 {
|
||||
// if the user backspaces over the word boundary, do this magic to clear the line
|
||||
// and move to the end of the previous line
|
||||
fmt.Printf(CursorBOL + ClearToEOL)
|
||||
fmt.Printf(CursorUp + CursorBOL + cursorRightN(b.Width))
|
||||
|
||||
if b.Pos < b.Size() {
|
||||
b.drawRemaining()
|
||||
// this erases a line which is left over when backspacing in the middle of a line and there
|
||||
// are trailing characters which go over the line width boundary
|
||||
if eraseExtraLine {
|
||||
remainingLines := (b.Size() - b.Pos) / b.LineWidth
|
||||
fmt.Printf(cursorDownN(remainingLines+1) + CursorBOL + ClearToEOL)
|
||||
place := b.Pos % b.LineWidth
|
||||
fmt.Printf(cursorUpN(remainingLines+1) + cursorRightN(place+len(b.Prompt.prompt())))
|
||||
if b.DisplaySize()%b.LineWidth < (b.DisplaySize()-rLength)%b.LineWidth {
|
||||
b.LineHasSpace.Remove(b.DisplayPos/b.LineWidth - 1)
|
||||
}
|
||||
|
||||
if hasSpace {
|
||||
b.DisplayPos -= 1
|
||||
fmt.Print(CursorLeft)
|
||||
}
|
||||
|
||||
if rLength == 2 {
|
||||
fmt.Print(CursorLeft + " " + cursorLeftN(2))
|
||||
} else {
|
||||
fmt.Print(" " + CursorLeft)
|
||||
}
|
||||
|
||||
} else if (b.DisplayPos-rLength)%b.LineWidth == 0 && hasSpace {
|
||||
fmt.Printf(CursorBOL + ClearToEOL)
|
||||
fmt.Printf(CursorUp + CursorBOL + cursorRightN(b.Width))
|
||||
|
||||
if b.Pos == b.Buf.Size() {
|
||||
b.LineHasSpace.Remove(b.DisplayPos/b.LineWidth - 1)
|
||||
}
|
||||
b.DisplayPos -= 1
|
||||
|
||||
} else {
|
||||
fmt.Print(cursorLeftN(rLength))
|
||||
for i := 0; i < rLength; i++ {
|
||||
fmt.Print(" ")
|
||||
}
|
||||
fmt.Print(cursorLeftN(rLength))
|
||||
}
|
||||
|
||||
var eraseExtraLine bool
|
||||
if (b.DisplaySize()-1)%b.LineWidth == 0 || (rLength == 2 && ((b.DisplaySize()-2)%b.LineWidth == 0)) || b.DisplaySize()%b.LineWidth == 0 {
|
||||
eraseExtraLine = true
|
||||
}
|
||||
|
||||
b.Pos -= 1
|
||||
b.DisplayPos -= rLength
|
||||
b.Buf.Remove(b.Pos)
|
||||
|
||||
if b.Pos < b.Buf.Size() {
|
||||
b.drawRemaining()
|
||||
// this erases a line which is left over when backspacing in the middle of a line and there
|
||||
// are trailing characters which go over the line width boundary
|
||||
if eraseExtraLine {
|
||||
remainingLines := (b.DisplaySize() - b.DisplayPos) / b.LineWidth
|
||||
fmt.Printf(cursorDownN(remainingLines+1) + CursorBOL + ClearToEOL)
|
||||
place := b.DisplayPos % b.LineWidth
|
||||
fmt.Printf(cursorUpN(remainingLines+1) + cursorRightN(place+len(b.Prompt.prompt())))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Buffer) Delete() {
|
||||
if b.Size() > 0 && b.Pos < b.Size() {
|
||||
if b.Buf.Size() > 0 && b.Pos < b.Buf.Size() {
|
||||
b.Buf.Remove(b.Pos)
|
||||
b.drawRemaining()
|
||||
if b.Size()%b.LineWidth == 0 {
|
||||
if b.Pos != b.Size() {
|
||||
remainingLines := (b.Size() - b.Pos) / b.LineWidth
|
||||
if b.DisplaySize()%b.LineWidth == 0 {
|
||||
if b.DisplayPos != b.DisplaySize() {
|
||||
remainingLines := (b.DisplaySize() - b.DisplayPos) / b.LineWidth
|
||||
fmt.Printf(cursorDownN(remainingLines) + CursorBOL + ClearToEOL)
|
||||
place := b.Pos % b.LineWidth
|
||||
place := b.DisplayPos % b.LineWidth
|
||||
fmt.Printf(cursorUpN(remainingLines) + cursorRightN(place+len(b.Prompt.prompt())))
|
||||
}
|
||||
}
|
||||
@@ -244,8 +449,8 @@ func (b *Buffer) DeleteBefore() {
|
||||
}
|
||||
|
||||
func (b *Buffer) DeleteRemaining() {
|
||||
if b.Size() > 0 && b.Pos < b.Size() {
|
||||
charsToDel := b.Size() - b.Pos
|
||||
if b.DisplaySize() > 0 && b.Pos < b.DisplaySize() {
|
||||
charsToDel := b.Buf.Size() - b.Pos
|
||||
for cnt := 0; cnt < charsToDel; cnt++ {
|
||||
b.Delete()
|
||||
}
|
||||
@@ -281,8 +486,10 @@ func (b *Buffer) ClearScreen() {
|
||||
ph := b.Prompt.placeholder()
|
||||
fmt.Printf(ColorGrey + ph + cursorLeftN(len(ph)) + ColorDefault)
|
||||
} else {
|
||||
currPos := b.Pos
|
||||
currPos := b.DisplayPos
|
||||
currIndex := b.Pos
|
||||
b.Pos = 0
|
||||
b.DisplayPos = 0
|
||||
b.drawRemaining()
|
||||
fmt.Printf(CursorReset + cursorRightN(len(b.Prompt.prompt())))
|
||||
if currPos > 0 {
|
||||
@@ -300,7 +507,8 @@ func (b *Buffer) ClearScreen() {
|
||||
fmt.Printf(CursorBOL + b.Prompt.AltPrompt)
|
||||
}
|
||||
}
|
||||
b.Pos = currPos
|
||||
b.Pos = currIndex
|
||||
b.DisplayPos = currPos
|
||||
}
|
||||
}
|
||||
|
||||
@@ -309,9 +517,20 @@ func (b *Buffer) IsEmpty() bool {
|
||||
}
|
||||
|
||||
func (b *Buffer) Replace(r []rune) {
|
||||
b.DisplayPos = 0
|
||||
b.Pos = 0
|
||||
lineNums := b.DisplaySize() / b.LineWidth
|
||||
|
||||
b.Buf.Clear()
|
||||
fmt.Printf(ClearLine + CursorBOL + b.Prompt.prompt())
|
||||
|
||||
fmt.Printf(CursorBOL + ClearToEOL)
|
||||
|
||||
for i := 0; i < lineNums; i++ {
|
||||
fmt.Print(CursorUp + CursorBOL + ClearToEOL)
|
||||
}
|
||||
|
||||
fmt.Printf(CursorBOL + b.Prompt.prompt())
|
||||
|
||||
for _, c := range r {
|
||||
b.Add(c)
|
||||
}
|
||||
@@ -328,7 +547,7 @@ func (b *Buffer) StringN(n int) string {
|
||||
func (b *Buffer) StringNM(n, m int) string {
|
||||
var s string
|
||||
if m == 0 {
|
||||
m = b.Size()
|
||||
m = b.Buf.Size()
|
||||
}
|
||||
for cnt := n; cnt < m; cnt++ {
|
||||
c, _ := b.Buf.Get(cnt)
|
||||
|
||||
@@ -150,7 +150,7 @@ func (i *Instance) Readline() (string, error) {
|
||||
i.Pasting = false
|
||||
}
|
||||
case KeyDel:
|
||||
if buf.Size() > 0 {
|
||||
if buf.DisplaySize() > 0 {
|
||||
buf.Delete()
|
||||
}
|
||||
metaDel = true
|
||||
@@ -202,7 +202,7 @@ func (i *Instance) Readline() (string, error) {
|
||||
buf.Add(' ')
|
||||
}
|
||||
case CharDelete:
|
||||
if buf.Size() > 0 {
|
||||
if buf.DisplaySize() > 0 {
|
||||
buf.Delete()
|
||||
} else {
|
||||
return "", io.EOF
|
||||
|
||||
@@ -33,9 +33,11 @@ case "$ARCH" in
|
||||
*) error "Unsupported architecture: $ARCH" ;;
|
||||
esac
|
||||
|
||||
IS_WSL2=false
|
||||
|
||||
KERN=$(uname -r)
|
||||
case "$KERN" in
|
||||
*icrosoft*WSL2 | *icrosoft*wsl2) ;;
|
||||
*icrosoft*WSL2 | *icrosoft*wsl2) IS_WSL2=true;;
|
||||
*icrosoft) error "Microsoft WSL1 is not currently supported. Please upgrade to WSL2 with 'wsl --set-version <distro> 2'" ;;
|
||||
*) ;;
|
||||
esac
|
||||
@@ -72,7 +74,7 @@ status "Installing ollama to $BINDIR..."
|
||||
$SUDO install -o0 -g0 -m755 -d $BINDIR
|
||||
$SUDO install -o0 -g0 -m755 $TEMP_DIR/ollama $BINDIR/ollama
|
||||
|
||||
install_success() {
|
||||
install_success() {
|
||||
status 'The Ollama API is now available at 127.0.0.1:11434.'
|
||||
status 'Install complete. Run "ollama" from the command line.'
|
||||
}
|
||||
@@ -131,6 +133,17 @@ if available systemctl; then
|
||||
configure_systemd
|
||||
fi
|
||||
|
||||
# WSL2 only supports GPUs via nvidia passthrough
|
||||
# so check for nvidia-smi to determine if GPU is available
|
||||
if [ "$IS_WSL2" = true ]; then
|
||||
if available nvidia-smi && [ -n "$(nvidia-smi | grep -o "CUDA Version: [0-9]*\.[0-9]*")" ]; then
|
||||
status "Nvidia GPU detected."
|
||||
fi
|
||||
install_success
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Install GPU dependencies on Linux
|
||||
if ! available lspci && ! available lshw; then
|
||||
warning "Unable to detect NVIDIA/AMD GPU. Install lspci or lshw to automatically detect and install GPU dependencies."
|
||||
exit 0
|
||||
@@ -139,12 +152,12 @@ fi
|
||||
check_gpu() {
|
||||
# Look for devices based on vendor ID for NVIDIA and AMD
|
||||
case $1 in
|
||||
lspci)
|
||||
lspci)
|
||||
case $2 in
|
||||
nvidia) available lspci && lspci -d '10de:' | grep -q 'NVIDIA' || return 1 ;;
|
||||
amdgpu) available lspci && lspci -d '1002:' | grep -q 'AMD' || return 1 ;;
|
||||
esac ;;
|
||||
lshw)
|
||||
lshw)
|
||||
case $2 in
|
||||
nvidia) available lshw && $SUDO lshw -c display -numeric | grep -q 'vendor: .* \[10DE\]' || return 1 ;;
|
||||
amdgpu) available lshw && $SUDO lshw -c display -numeric | grep -q 'vendor: .* \[1002\]' || return 1 ;;
|
||||
@@ -181,7 +194,7 @@ if check_gpu lspci amdgpu || check_gpu lshw amdgpu; then
|
||||
curl --fail --show-error --location --progress-bar "https://ollama.com/download/ollama-linux-amd64-rocm.tgz${VER_PARAM}" \
|
||||
| $SUDO tar zx --owner ollama --group ollama -C /usr/share/ollama/lib/rocm .
|
||||
install_success
|
||||
status "AMD GPU dependencies installed."
|
||||
status "AMD GPU ready."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
@@ -274,7 +287,7 @@ if ! check_gpu nvidia-smi || [ -z "$(nvidia-smi | grep -o "CUDA Version: [0-9]*\
|
||||
esac
|
||||
fi
|
||||
|
||||
if ! lsmod | grep -q nvidia; then
|
||||
if ! lsmod | grep -q nvidia || ! lsmod | grep -q nvidia_uvm; then
|
||||
KERNEL_RELEASE="$(uname -r)"
|
||||
case $OS_NAME in
|
||||
rocky) $SUDO $PACKAGE_MANAGER -y install kernel-devel kernel-headers ;;
|
||||
@@ -295,7 +308,19 @@ if ! lsmod | grep -q nvidia; then
|
||||
fi
|
||||
|
||||
$SUDO modprobe nvidia
|
||||
$SUDO modprobe nvidia_uvm
|
||||
fi
|
||||
|
||||
# make sure the NVIDIA modules are loaded on boot with nvidia-persistenced
|
||||
if command -v nvidia-persistenced > /dev/null 2>&1; then
|
||||
$SUDO touch /etc/modules-load.d/nvidia.conf
|
||||
MODULES="nvidia nvidia-uvm"
|
||||
for MODULE in $MODULES; do
|
||||
if ! grep -qxF "$MODULE" /etc/modules-load.d/nvidia.conf; then
|
||||
echo "$MODULE" | sudo tee -a /etc/modules-load.d/nvidia.conf > /dev/null
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
status "NVIDIA CUDA drivers installed."
|
||||
status "NVIDIA GPU ready."
|
||||
install_success
|
||||
|
||||
@@ -221,7 +221,7 @@ func (b *blobDownload) downloadChunk(ctx context.Context, requestURL *url.URL, w
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
n, err := io.CopyN(w, io.TeeReader(resp.Body, part), part.Size)
|
||||
n, err := io.CopyN(w, io.TeeReader(resp.Body, part), part.Size-part.Completed)
|
||||
if err != nil && !errors.Is(err, context.Canceled) && !errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
// rollback progress
|
||||
b.Completed.Add(-n)
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"github.com/ollama/ollama/format"
|
||||
"github.com/ollama/ollama/llm"
|
||||
"github.com/ollama/ollama/parser"
|
||||
"github.com/ollama/ollama/server/envconfig"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
"github.com/ollama/ollama/types/errtypes"
|
||||
"github.com/ollama/ollama/types/model"
|
||||
"github.com/ollama/ollama/version"
|
||||
|
||||
@@ -26,11 +26,11 @@ import (
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
"github.com/ollama/ollama/gpu"
|
||||
"github.com/ollama/ollama/llm"
|
||||
"github.com/ollama/ollama/openai"
|
||||
"github.com/ollama/ollama/parser"
|
||||
"github.com/ollama/ollama/server/envconfig"
|
||||
"github.com/ollama/ollama/types/errtypes"
|
||||
"github.com/ollama/ollama/types/model"
|
||||
"github.com/ollama/ollama/version"
|
||||
@@ -315,10 +315,10 @@ func (s *Server) GenerateHandler(c *gin.Context) {
|
||||
}
|
||||
|
||||
func getDefaultSessionDuration() time.Duration {
|
||||
if t, exists := os.LookupEnv("OLLAMA_KEEP_ALIVE"); exists {
|
||||
v, err := strconv.Atoi(t)
|
||||
if envconfig.KeepAlive != "" {
|
||||
v, err := strconv.Atoi(envconfig.KeepAlive)
|
||||
if err != nil {
|
||||
d, err := time.ParseDuration(t)
|
||||
d, err := time.ParseDuration(envconfig.KeepAlive)
|
||||
if err != nil {
|
||||
return defaultSessionDuration
|
||||
}
|
||||
@@ -1025,7 +1025,7 @@ func Serve(ln net.Listener) error {
|
||||
level = slog.LevelDebug
|
||||
}
|
||||
|
||||
slog.Info("server config", "env", envconfig.AsMap())
|
||||
slog.Info("server config", "env", envconfig.Values())
|
||||
handler := slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{
|
||||
Level: level,
|
||||
AddSource: true,
|
||||
|
||||
@@ -209,14 +209,14 @@ func Test_Routes(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
t.Setenv("OLLAMA_MODELS", t.TempDir())
|
||||
|
||||
s := &Server{}
|
||||
router := s.GenerateRoutes()
|
||||
|
||||
httpSrv := httptest.NewServer(router)
|
||||
t.Cleanup(httpSrv.Close)
|
||||
|
||||
t.Setenv("OLLAMA_MODELS", t.TempDir())
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
u := httpSrv.URL + tc.Path
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"github.com/ollama/ollama/format"
|
||||
"github.com/ollama/ollama/gpu"
|
||||
"github.com/ollama/ollama/llm"
|
||||
"github.com/ollama/ollama/server/envconfig"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"github.com/ollama/ollama/format"
|
||||
"github.com/ollama/ollama/gpu"
|
||||
"github.com/ollama/ollama/llm"
|
||||
"github.com/ollama/ollama/server/envconfig"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -151,7 +151,7 @@ func newScenario(t *testing.T, ctx context.Context, modelName string, estimatedV
|
||||
}
|
||||
|
||||
func TestRequests(t *testing.T) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 500*time.Millisecond)
|
||||
ctx, done := context.WithTimeout(context.Background(), time.Second)
|
||||
defer done()
|
||||
|
||||
// Same model, same request
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"log/slog"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// Errors
|
||||
@@ -318,14 +319,14 @@ func isValidPart(kind partKind, s string) bool {
|
||||
if !isValidLen(kind, s) {
|
||||
return false
|
||||
}
|
||||
for i := range s {
|
||||
for i, c := range s {
|
||||
if i == 0 {
|
||||
if !isAlphanumericOrUnderscore(s[i]) {
|
||||
if !isAlphanumericOrUnderscore(c) {
|
||||
return false
|
||||
}
|
||||
continue
|
||||
}
|
||||
switch s[i] {
|
||||
switch c {
|
||||
case '_', '-':
|
||||
case '.':
|
||||
if kind == kindNamespace {
|
||||
@@ -336,7 +337,7 @@ func isValidPart(kind partKind, s string) bool {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
if !isAlphanumericOrUnderscore(s[i]) {
|
||||
if !isAlphanumericOrUnderscore(c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -344,8 +345,8 @@ func isValidPart(kind partKind, s string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func isAlphanumericOrUnderscore(c byte) bool {
|
||||
return c >= 'A' && c <= 'Z' || c >= 'a' && c <= 'z' || c >= '0' && c <= '9' || c == '_'
|
||||
func isAlphanumericOrUnderscore(c rune) bool {
|
||||
return unicode.IsLetter(c) || unicode.IsDigit(c) || c == '_'
|
||||
}
|
||||
|
||||
func cutLast(s, sep string) (before, after string, ok bool) {
|
||||
|
||||
Reference in New Issue
Block a user