Compare commits

...

8 Commits

Author SHA1 Message Date
Matt Williams
e2389b63aa add examples of streaming in python and node
Signed-off-by: Matt Williams <m@technovangelist.com>
2023-09-14 07:12:09 -07:00
Michael Yang
f89c23764b Merge pull request #525 from jmorganca/mxyng/falcon-decode
fix: add falcon.go
2023-09-13 15:08:47 -07:00
Michael Yang
d028853879 fix: add falcon.go 2023-09-13 14:47:37 -07:00
Michael Yang
949553db23 Merge pull request #519 from jmorganca/mxyng/decode
Mxyng/decode
2023-09-13 12:43:57 -07:00
Michael Yang
0c5a454361 fix model type for 70b 2023-09-12 15:12:59 -07:00
Bruce MacDonald
f59c4d03f7 fix ggml arm64 cuda build (#520) 2023-09-12 17:06:48 -04:00
Michael Yang
7dee25a07f fix falcon decode
get model and file type from bin file
2023-09-12 12:34:53 -07:00
Bruce MacDonald
f221637053 first pass at linux gpu support (#454)
* linux gpu support
* handle multiple gpus
* add cuda docker image (#488)
---------

Co-authored-by: Michael Yang <mxyng@pm.me>
2023-09-12 11:04:35 -04:00
17 changed files with 390 additions and 183 deletions

22
Dockerfile.cuda Normal file
View File

@@ -0,0 +1,22 @@
FROM nvidia/cuda:12.2.0-devel-ubuntu22.04
WORKDIR /go/src/github.com/jmorganca/ollama
RUN apt-get update && apt-get install -y git build-essential cmake
ADD https://dl.google.com/go/go1.21.1.linux-amd64.tar.gz /tmp/go1.21.1.tar.gz
RUN mkdir -p /usr/local && tar xz -C /usr/local </tmp/go1.21.1.tar.gz
COPY . .
RUN /usr/local/go/bin/go generate ./... && /usr/local/go/bin/go build -ldflags '-linkmode external -extldflags "-static"' .
FROM nvidia/cuda:12.2.0-runtime-ubuntu22.04
ENV OLLAMA_HOST 0.0.0.0
ARG USER=ollama
ARG GROUP=ollama
RUN groupadd $GROUP && useradd -m -g $GROUP $USER
COPY --from=0 /go/src/github.com/jmorganca/ollama/ollama /bin/ollama
USER $USER:$GROUP
ENTRYPOINT ["/bin/ollama"]
CMD ["serve"]

View File

@@ -291,7 +291,7 @@ func DefaultOptions() Options {
NumCtx: 2048,
NumKeep: -1,
NumBatch: 512,
NumGPU: 1,
NumGPU: -1, // -1 here indicates that NumGPU should be set dynamically
NumGQA: 1,
LowVRAM: false,
F16KV: true,

View File

@@ -20,6 +20,10 @@ Model names follow a `model:tag` format. Some examples are `orca-mini:3b-q4_1` a
All durations are returned in nanoseconds.
### Streams
Many API responses are streams of JSON objects showing the current status. For examples of working with streams in various languages, see [streaming.md](./streaming.md)
## Generate a completion
```

View File

@@ -6,6 +6,10 @@
Install required tools:
- cmake version 3.24 or higher
- go version 1.20 or higher
- gcc version 11.4.0 or higher
```
brew install go cmake gcc
```
@@ -27,3 +31,9 @@ Now you can run `ollama`:
```
./ollama
```
## Building on Linux with GPU support
- Install cmake and nvidia-cuda-toolkit
- run `go generate ./...`
- run `go build .`

35
docs/streaming.md Normal file
View File

@@ -0,0 +1,35 @@
# Streaming responses in the Ollama Client API
## JavaScript / TypeScript / Deno
```javascript
const pull = async () => {
const request = await fetch("http://localhost:11434/api/pull", {
method: "POST",
body: JSON.stringify({ name: "llama2:7b-q5_0" }),
});
const reader = await request.body?.pipeThrough(new TextDecoderStream());
if (!reader) throw new Error("No reader");
for await (const chunk of reader) {
const out = JSON.parse(chunk);
if (out.status.startsWith("downloading")) {
console.log(`${out.status} - ${(out.completed / out.total) * 100}%`);
}
}
}
pull();
```
## Python
```python
import requests
import json
response = requests.post("http://localhost:11434/api/pull", json={"name": "llama2:7b-q5_0"}, stream=True)
for data in response.iter_lines():
out = json.loads(data)
if "completed" in out:
print(out["completed"] / out["total"] * 100)
```

22
llm/falcon.go Normal file
View File

@@ -0,0 +1,22 @@
package llm
const ModelFamilyFalcon = "falcon"
const (
falconModelType7B = 32
falconModelType40B = 60
falconModelType180B = 80
)
func falconModelType(numLayer uint32) string {
switch numLayer {
case 32:
return "7B"
case 60:
return "40B"
case 80:
return "180B"
default:
return "Unknown"
}
}

View File

@@ -8,54 +8,77 @@ import (
"sync"
)
type ModelFamily string
const ModelFamilyUnknown ModelFamily = "unknown"
type ModelType uint32
const (
ModelType3B ModelType = 26
ModelType7B ModelType = 32
ModelType13B ModelType = 40
ModelType34B ModelType = 48
ModelType30B ModelType = 60
ModelType65B ModelType = 80
)
func (mt ModelType) String() string {
switch mt {
case ModelType3B:
return "3B"
case ModelType7B:
return "7B"
case ModelType13B:
return "13B"
case ModelType34B:
return "34B"
case ModelType30B:
return "30B"
case ModelType65B:
return "65B"
default:
return "Unknown"
}
}
type FileType interface {
String() string
}
type GGML struct {
magic uint32
container
model
}
const (
fileTypeF32 uint32 = iota
fileTypeF16
fileTypeQ4_0
fileTypeQ4_1
fileTypeQ4_1_F16
fileTypeQ8_0 uint32 = iota + 2
fileTypeQ5_0
fileTypeQ5_1
fileTypeQ2_K
fileTypeQ3_K_S
fileTypeQ3_K_M
fileTypeQ3_K_L
fileTypeQ4_K_S
fileTypeQ4_K_M
fileTypeQ5_K_S
fileTypeQ5_K_M
fileTypeQ6_K
)
func fileType(fileType uint32) string {
switch fileType {
case fileTypeF32:
return "F32"
case fileTypeF16:
return "F16"
case fileTypeQ4_0:
return "Q4_0"
case fileTypeQ4_1:
return "Q4_1"
case fileTypeQ4_1_F16:
return "Q4_1_F16"
case fileTypeQ8_0:
return "Q8_0"
case fileTypeQ5_0:
return "Q5_0"
case fileTypeQ5_1:
return "Q5_1"
case fileTypeQ2_K:
return "Q2_K"
case fileTypeQ3_K_S:
return "Q3_K_S"
case fileTypeQ3_K_M:
return "Q3_K_M"
case fileTypeQ3_K_L:
return "Q3_K_L"
case fileTypeQ4_K_S:
return "Q4_K_S"
case fileTypeQ4_K_M:
return "Q4_K_M"
case fileTypeQ5_K_S:
return "Q5_K_S"
case fileTypeQ5_K_M:
return "Q5_K_M"
case fileTypeQ6_K:
return "Q6_K"
default:
return "Unknown"
}
}
type model interface {
ModelFamily() ModelFamily
ModelType() ModelType
FileType() FileType
ModelFamily() string
ModelType() string
FileType() string
}
type container interface {

View File

@@ -6,7 +6,6 @@ import (
"errors"
"fmt"
"io"
"log"
"path"
"sync"
)
@@ -87,38 +86,43 @@ func (llm *ggufModel) NumKV() uint64 {
return llm.V2.NumKV
}
func (llm *ggufModel) ModelFamily() ModelFamily {
func (llm *ggufModel) ModelFamily() string {
t, ok := llm.kv["general.architecture"].(string)
if ok {
return ModelFamily(t)
return t
}
log.Printf("unknown model family: %T", t)
return ModelFamilyUnknown
return "unknown"
}
func (llm *ggufModel) ModelType() ModelType {
func (llm *ggufModel) ModelType() string {
switch llm.ModelFamily() {
case ModelFamilyLlama:
blocks, ok := llm.kv["llama.block_count"].(uint32)
if ok {
return ModelType(blocks)
case "llama":
if blocks, ok := llm.kv["llama.block_count"].(uint32); ok {
heads, headsOK := llm.kv["llama.head_count"].(uint32)
headKVs, headsKVsOK := llm.kv["llama.head_count_kv"].(uint32)
if headsOK && headsKVsOK && heads/headKVs == 8 {
return "70B"
}
return llamaModelType(blocks)
}
case "falcon":
if blocks, ok := llm.kv["falcon.block_count"].(uint32); ok {
return falconModelType(blocks)
}
}
return ModelType7B
return "Unknown"
}
func (llm *ggufModel) FileType() FileType {
switch llm.ModelFamily() {
case ModelFamilyLlama:
t, ok := llm.kv["general.file_type"].(uint32)
if ok {
return llamaFileType(t)
}
func (llm *ggufModel) FileType() string {
t, ok := llm.kv["general.file_type"].(uint32)
if ok {
return fileType(t)
}
return llamaFileTypeF16
return "Unknown"
}
func (llm *ggufModel) Decode(r io.Reader) error {

View File

@@ -4,12 +4,14 @@
package llm
//go:generate git submodule init
//go:generate git submodule update --force ggml gguf
//go:generate git -C ggml apply ../ggml_patch/0001-add-detokenize-endpoint.patch
//go:generate git -C ggml apply ../ggml_patch/0002-34B-model-support.patch
//go:generate git -C ggml apply ../ggml_patch/0003-metal-fix-synchronization-in-new-matrix-multiplicati.patch
//go:generate git -C ggml apply ../ggml_patch/0004-metal-add-missing-barriers-for-mul-mat-2699.patch
//go:generate cmake --fresh -S ggml -B ggml/build/cpu -DLLAMA_K_QUANTS=on
//go:generate git submodule update --force ggml
//go:generate -command git-apply git -C ggml apply
//go:generate git-apply ../ggml_patch/0001-add-detokenize-endpoint.patch
//go:generate git-apply ../ggml_patch/0002-34B-model-support.patch
//go:generate cmake -S ggml -B ggml/build/cpu -DLLAMA_K_QUANTS=on
//go:generate cmake --build ggml/build/cpu --target server --config Release
//go:generate git submodule update --force gguf
//go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_K_QUANTS=on
//go:generate cmake --build gguf/build/cpu --target server --config Release

View File

@@ -1,12 +1,16 @@
package llm
//go:generate git submodule init
//go:generate git submodule update --force ggml gguf
//go:generate git -C ggml apply ../ggml_patch/0001-add-detokenize-endpoint.patch
//go:generate git -C ggml apply ../ggml_patch/0002-34B-model-support.patch
//go:generate git -C ggml apply ../ggml_patch/0003-metal-fix-synchronization-in-new-matrix-multiplicati.patch
//go:generate git -C ggml apply ../ggml_patch/0004-metal-add-missing-barriers-for-mul-mat-2699.patch
//go:generate cmake --fresh -S ggml -B ggml/build/cpu -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DCMAKE_SYSTEM_PROCESSOR=x86_64 -DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_OSX_DEPLOYMENT_TARGET=11.0
//go:generate git submodule update --force ggml
//go:generate -command git-apply git -C ggml apply
//go:generate git-apply ../ggml_patch/0001-add-detokenize-endpoint.patch
//go:generate git-apply ../ggml_patch/0002-34B-model-support.patch
//go:generate git-apply ../ggml_patch/0003-metal-fix-synchronization-in-new-matrix-multiplicati.patch
//go:generate git-apply ../ggml_patch/0004-metal-add-missing-barriers-for-mul-mat-2699.patch
//go:generate cmake -S ggml -B ggml/build/cpu -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DCMAKE_SYSTEM_PROCESSOR=x86_64 -DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_OSX_DEPLOYMENT_TARGET=11.0
//go:generate cmake --build ggml/build/cpu --target server --config Release
//go:generate cmake --fresh -S gguf -B gguf/build/cpu -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DCMAKE_SYSTEM_PROCESSOR=x86_64 -DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_OSX_DEPLOYMENT_TARGET=11.0
//go:generate git submodule update --force gguf
//go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DCMAKE_SYSTEM_PROCESSOR=x86_64 -DCMAKE_OSX_ARCHITECTURES=x86_64 -DCMAKE_OSX_DEPLOYMENT_TARGET=11.0
//go:generate cmake --build gguf/build/cpu --target server --config Release

View File

@@ -1,12 +1,16 @@
package llm
//go:generate git submodule init
//go:generate git submodule update --force ggml gguf
//go:generate git -C ggml apply ../ggml_patch/0001-add-detokenize-endpoint.patch
//go:generate git -C ggml apply ../ggml_patch/0002-34B-model-support.patch
//go:generate git -C ggml apply ../ggml_patch/0003-metal-fix-synchronization-in-new-matrix-multiplicati.patch
//go:generate git -C ggml apply ../ggml_patch/0004-metal-add-missing-barriers-for-mul-mat-2699.patch
//go:generate cmake --fresh -S ggml -B ggml/build/gpu -DLLAMA_METAL=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DCMAKE_SYSTEM_PROCESSOR=arm64 -DCMAKE_OSX_ARCHITECTURES=arm64 -DCMAKE_OSX_DEPLOYMENT_TARGET=11.0
//go:generate git submodule update --force ggml
//go:generate -command git-apply git -C ggml apply
//go:generate git-apply ../ggml_patch/0001-add-detokenize-endpoint.patch
//go:generate git-apply ../ggml_patch/0002-34B-model-support.patch
//go:generate git-apply ../ggml_patch/0003-metal-fix-synchronization-in-new-matrix-multiplicati.patch
//go:generate git-apply ../ggml_patch/0004-metal-add-missing-barriers-for-mul-mat-2699.patch
//go:generate cmake -S ggml -B ggml/build/gpu -DLLAMA_METAL=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DCMAKE_SYSTEM_PROCESSOR=arm64 -DCMAKE_OSX_ARCHITECTURES=arm64 -DCMAKE_OSX_DEPLOYMENT_TARGET=11.0
//go:generate cmake --build ggml/build/gpu --target server --config Release
//go:generate git submodule update --force gguf
//go:generate cmake -S gguf -B gguf/build/gpu -DLLAMA_METAL=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DCMAKE_SYSTEM_PROCESSOR=arm64 -DCMAKE_OSX_ARCHITECTURES=arm64 -DCMAKE_OSX_DEPLOYMENT_TARGET=11.0
//go:generate cmake --build gguf/build/gpu --target server --config Release

View File

@@ -0,0 +1,15 @@
package llm
//go:generate git submodule init
//go:generate git submodule update --force ggml
//go:generate -command git-apply git -C ggml apply
//go:generate git-apply ../ggml_patch/0001-add-detokenize-endpoint.patch
//go:generate git-apply ../ggml_patch/0002-34B-model-support.patch
//go:generate git-apply ../ggml_patch/0005-ggml-support-CUDA-s-half-type-for-aarch64-1455-2670.patch
//go:generate cmake -S ggml -B ggml/build/gpu -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on
//go:generate cmake --build ggml/build/gpu --target server --config Release
//go:generate git submodule update --force gguf
//go:generate cmake -S gguf -B gguf/build/gpu -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on
//go:generate cmake --build gguf/build/gpu --target server --config Release

View File

@@ -0,0 +1,32 @@
From 1e3bc523d8053a77df3ac7126a84d0297ee97ef6 Mon Sep 17 00:00:00 2001
From: Kylin <56434533+KyL0N@users.noreply.github.com>
Date: Tue, 22 Aug 2023 15:14:23 +0800
Subject: [PATCH] ggml : support CUDA's half type for aarch64(#1455) (#2670)
* ggml: support CUDA's half type for aarch64(#1455)
support CUDA's half type for aarch64 in ggml_fp16_t definition
* ggml: use __CUDACC__ to recognise nvcc compiler
---
ggml.h | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/ggml.h b/ggml.h
index 544ad2d..0ec7ec5 100644
--- a/ggml.h
+++ b/ggml.h
@@ -259,8 +259,9 @@
extern "C" {
#endif
-#ifdef __ARM_NEON
- // we use the built-in 16-bit float type
+#if defined(__ARM_NEON) && defined(__CUDACC__)
+ typedef half ggml_fp16_t;
+#elif defined(__ARM_NEON)
typedef __fp16 ggml_fp16_t;
#else
typedef uint16_t ggml_fp16_t;
--
2.39.2 (Apple Git-143)

View File

@@ -58,6 +58,12 @@ func chooseRunner(gpuPath, cpuPath string) string {
if llamaPath == osPath(gpuPath) {
files = append(files, "ggml-metal.metal")
}
case "linux":
// check if there is a GPU available
if _, err := CheckVRAM(); errors.Is(err, errNoGPU) {
// this error was logged on start-up, so we don't need to log it again
llamaPath = osPath(cpuPath)
}
}
for _, f := range files {
@@ -89,38 +95,39 @@ func chooseRunner(gpuPath, cpuPath string) string {
return runPath
}
const ModelFamilyLlama ModelFamily = "llama"
type llamaModel struct {
hyperparameters llamaHyperparameters
}
func (llm *llamaModel) ModelFamily() ModelFamily {
return ModelFamilyLlama
func (llm *llamaModel) ModelFamily() string {
return "llama"
}
func (llm *llamaModel) ModelType() ModelType {
switch llm.hyperparameters.NumLayer {
func llamaModelType(numLayer uint32) string {
switch numLayer {
case 26:
return ModelType3B
return "3B"
case 32:
return ModelType7B
return "7B"
case 40:
return ModelType13B
return "13B"
case 48:
return ModelType34B
return "34B"
case 60:
return ModelType30B
return "30B"
case 80:
return ModelType65B
return "65B"
default:
return "Unknown"
}
// TODO: find a better default
return ModelType7B
}
func (llm *llamaModel) FileType() FileType {
return llm.hyperparameters.FileType
func (llm *llamaModel) ModelType() string {
return llamaModelType(llm.hyperparameters.NumLayer)
}
func (llm *llamaModel) FileType() string {
return fileType(llm.hyperparameters.FileType)
}
type llamaHyperparameters struct {
@@ -137,70 +144,7 @@ type llamaHyperparameters struct {
NumRot uint32
// FileType describes the quantization level of the model, e.g. Q4_0, Q5_K, etc.
FileType llamaFileType
}
type llamaFileType uint32
const (
llamaFileTypeF32 llamaFileType = iota
llamaFileTypeF16
llamaFileTypeQ4_0
llamaFileTypeQ4_1
llamaFileTypeQ4_1_F16
llamaFileTypeQ8_0 llamaFileType = iota + 2
llamaFileTypeQ5_0
llamaFileTypeQ5_1
llamaFileTypeQ2_K
llamaFileTypeQ3_K_S
llamaFileTypeQ3_K_M
llamaFileTypeQ3_K_L
llamaFileTypeQ4_K_S
llamaFileTypeQ4_K_M
llamaFileTypeQ5_K_S
llamaFileTypeQ5_K_M
llamaFileTypeQ6_K
)
func (ft llamaFileType) String() string {
switch ft {
case llamaFileTypeF32:
return "F32"
case llamaFileTypeF16:
return "F16"
case llamaFileTypeQ4_0:
return "Q4_0"
case llamaFileTypeQ4_1:
return "Q4_1"
case llamaFileTypeQ4_1_F16:
return "Q4_1_F16"
case llamaFileTypeQ8_0:
return "Q8_0"
case llamaFileTypeQ5_0:
return "Q5_0"
case llamaFileTypeQ5_1:
return "Q5_1"
case llamaFileTypeQ2_K:
return "Q2_K"
case llamaFileTypeQ3_K_S:
return "Q3_K_S"
case llamaFileTypeQ3_K_M:
return "Q3_K_M"
case llamaFileTypeQ3_K_L:
return "Q3_K_L"
case llamaFileTypeQ4_K_S:
return "Q4_K_S"
case llamaFileTypeQ4_K_M:
return "Q4_K_M"
case llamaFileTypeQ5_K_S:
return "Q5_K_S"
case llamaFileTypeQ5_K_M:
return "Q5_K_M"
case llamaFileTypeQ6_K:
return "Q6_K"
default:
return "Unknown"
}
FileType uint32
}
type Running struct {
@@ -218,6 +162,72 @@ type llama struct {
Running
}
var errNoGPU = errors.New("nvidia-smi command failed")
// CheckVRAM returns the available VRAM in MiB on Linux machines with NVIDIA GPUs
func CheckVRAM() (int, error) {
cmd := exec.Command("nvidia-smi", "--query-gpu=memory.total", "--format=csv,noheader,nounits")
var stdout bytes.Buffer
cmd.Stdout = &stdout
err := cmd.Run()
if err != nil {
return 0, errNoGPU
}
var total int
scanner := bufio.NewScanner(&stdout)
for scanner.Scan() {
line := scanner.Text()
vram, err := strconv.Atoi(line)
if err != nil {
return 0, fmt.Errorf("failed to parse available VRAM: %v", err)
}
total += vram
}
return total, nil
}
func NumGPU(opts api.Options) int {
if opts.NumGPU != -1 {
return opts.NumGPU
}
n := 1 // default to enable metal on macOS
if runtime.GOOS == "linux" {
vram, err := CheckVRAM()
if err != nil {
if err.Error() != "nvidia-smi command failed" {
log.Print(err.Error())
}
// nvidia driver not installed or no nvidia GPU found
return 0
}
// TODO: this is a very rough heuristic, better would be to calculate this based on number of layers and context size
switch {
case vram < 500:
log.Printf("WARNING: Low VRAM detected, disabling GPU")
n = 0
case vram < 1000:
n = 4
case vram < 2000:
n = 8
case vram < 4000:
n = 12
case vram < 8000:
n = 16
case vram < 12000:
n = 24
case vram < 16000:
n = 32
default:
n = 48
}
log.Printf("%d MB VRAM available, loading %d GPU layers", vram, n)
}
return n
}
func newLlama(model string, adapters []string, runner ModelRunner, opts api.Options) (*llama, error) {
if _, err := os.Stat(model); err != nil {
return nil, err
@@ -237,7 +247,7 @@ func newLlama(model string, adapters []string, runner ModelRunner, opts api.Opti
"--rope-freq-base", fmt.Sprintf("%f", opts.RopeFrequencyBase),
"--rope-freq-scale", fmt.Sprintf("%f", opts.RopeFrequencyScale),
"--batch-size", fmt.Sprintf("%d", opts.NumBatch),
"--n-gpu-layers", fmt.Sprintf("%d", opts.NumGPU),
"--n-gpu-layers", fmt.Sprintf("%d", NumGPU(opts)),
"--embedding",
}
@@ -305,7 +315,7 @@ func newLlama(model string, adapters []string, runner ModelRunner, opts api.Opti
func waitForServer(llm *llama) error {
// wait for the server to start responding
start := time.Now()
expiresAt := time.Now().Add(30 * time.Second)
expiresAt := time.Now().Add(45 * time.Second)
ticker := time.NewTicker(200 * time.Millisecond)
log.Print("waiting for llama.cpp server to start responding")

View File

@@ -37,7 +37,7 @@ func New(model string, adapters []string, opts api.Options) (LLM, error) {
return nil, err
}
switch ggml.FileType().String() {
switch ggml.FileType() {
case "Q8_0":
if ggml.Name() != "gguf" && opts.NumGPU != 0 {
// GGML Q8_0 do not support Metal API and will
@@ -56,30 +56,36 @@ func New(model string, adapters []string, opts api.Options) (LLM, error) {
totalResidentMemory := memory.TotalMemory()
switch ggml.ModelType() {
case ModelType3B, ModelType7B:
if ggml.FileType().String() == "F16" && totalResidentMemory < 16*1024*1024 {
case "3B", "7B":
if ggml.FileType() == "F16" && totalResidentMemory < 16*1024*1024 {
return nil, fmt.Errorf("F16 model requires at least 16GB of memory")
} else if totalResidentMemory < 8*1024*1024 {
return nil, fmt.Errorf("model requires at least 8GB of memory")
}
case ModelType13B:
if ggml.FileType().String() == "F16" && totalResidentMemory < 32*1024*1024 {
case "13B":
if ggml.FileType() == "F16" && totalResidentMemory < 32*1024*1024 {
return nil, fmt.Errorf("F16 model requires at least 32GB of memory")
} else if totalResidentMemory < 16*1024*1024 {
return nil, fmt.Errorf("model requires at least 16GB of memory")
}
case ModelType30B, ModelType34B:
if ggml.FileType().String() == "F16" && totalResidentMemory < 64*1024*1024 {
case "30B", "34B", "40B":
if ggml.FileType() == "F16" && totalResidentMemory < 64*1024*1024 {
return nil, fmt.Errorf("F16 model requires at least 64GB of memory")
} else if totalResidentMemory < 32*1024*1024 {
return nil, fmt.Errorf("model requires at least 32GB of memory")
}
case ModelType65B:
if ggml.FileType().String() == "F16" && totalResidentMemory < 128*1024*1024 {
case "65B", "70B":
if ggml.FileType() == "F16" && totalResidentMemory < 128*1024*1024 {
return nil, fmt.Errorf("F16 model requires at least 128GB of memory")
} else if totalResidentMemory < 64*1024*1024 {
return nil, fmt.Errorf("model requires at least 64GB of memory")
}
case "180B":
if ggml.FileType() == "F16" && totalResidentMemory < 512*1024*1024 {
return nil, fmt.Errorf("F16 model requires at least 512GB of memory")
} else if totalResidentMemory < 128*1024*1024 {
return nil, fmt.Errorf("model requires at least 128GB of memory")
}
}
switch ggml.Name() {

View File

@@ -114,11 +114,11 @@ type LayerReader struct {
}
type ConfigV2 struct {
ModelFamily llm.ModelFamily `json:"model_family"`
ModelType string `json:"model_type"`
ModelFormat string `json:"model_format"`
FileType string `json:"file_type"`
RootFS RootFS `json:"rootfs"`
ModelFormat string `json:"model_format"`
ModelFamily string `json:"model_family"`
ModelType string `json:"model_type"`
FileType string `json:"file_type"`
RootFS RootFS `json:"rootfs"`
// required by spec
Architecture string `json:"architecture"`
@@ -357,10 +357,10 @@ func CreateModel(ctx context.Context, name string, path string, fn func(resp api
return err
}
config.ModelFamily = ggml.ModelFamily()
config.ModelType = ggml.ModelType().String()
config.ModelFormat = ggml.Name()
config.FileType = ggml.FileType().String()
config.ModelFamily = ggml.ModelFamily()
config.ModelType = ggml.ModelType()
config.FileType = ggml.FileType()
// reset the file
file.Seek(0, io.SeekStart)
@@ -498,6 +498,12 @@ func CreateModel(ctx context.Context, name string, path string, fn func(resp api
}
}
if config.ModelType == "65B" {
if numGQA, ok := formattedParams["num_gqa"].(int); ok && numGQA == 8 {
config.ModelType = "70B"
}
}
bts, err := json.Marshal(formattedParams)
if err != nil {
return err
@@ -815,14 +821,14 @@ func formatParams(params map[string][]string) (map[string]interface{}, error) {
return nil, fmt.Errorf("invalid float value %s", vals)
}
out[key] = floatVal
out[key] = float32(floatVal)
case reflect.Int:
intVal, err := strconv.ParseInt(vals[0], 10, 0)
intVal, err := strconv.ParseInt(vals[0], 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid int value %s", vals)
}
out[key] = intVal
out[key] = int(intVal)
case reflect.Bool:
boolVal, err := strconv.ParseBool(vals[0])
if err != nil {

View File

@@ -12,6 +12,7 @@ import (
"os/signal"
"path/filepath"
"reflect"
"runtime"
"strconv"
"strings"
"sync"
@@ -548,6 +549,13 @@ func Serve(ln net.Listener, origins []string) error {
os.Exit(0)
}()
if runtime.GOOS == "linux" {
// check compatibility to log warnings
if _, err := llm.CheckVRAM(); err != nil {
log.Printf("Warning: GPU support not enabled, you may need to install GPU drivers: %v", err)
}
}
return s.Serve(ln)
}