mirror of
https://github.com/ollama/ollama.git
synced 2026-01-10 08:28:20 -05:00
Compare commits
20 Commits
jyan/quant
...
jyan/quant
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a548eb6003 | ||
|
|
f92818d90d | ||
|
|
1ef59057d0 | ||
|
|
106fe6b4ae | ||
|
|
5fd359d117 | ||
|
|
b0e4e8d76c | ||
|
|
e59453982d | ||
|
|
369113970a | ||
|
|
26ed829415 | ||
|
|
542134bf50 | ||
|
|
9e0b8f1fe2 | ||
|
|
c498609ba3 | ||
|
|
c800a67f1b | ||
|
|
dfc62648f3 | ||
|
|
24e8292e94 | ||
|
|
c63b4ecbf7 | ||
|
|
ee2b9b076c | ||
|
|
bec9100f32 | ||
|
|
1344843515 | ||
|
|
e87eafe5cd |
1
.gitattributes
vendored
1
.gitattributes
vendored
@@ -1,2 +1 @@
|
||||
llm/ext_server/* linguist-vendored
|
||||
llm/*.h linguist-vendored
|
||||
1227
llm/llama.h
1227
llm/llama.h
File diff suppressed because it is too large
Load Diff
44
llm/llm.go
44
llm/llm.go
@@ -1,6 +1,6 @@
|
||||
package llm
|
||||
|
||||
// #cgo CPPFLAGS: -Illama.cpp/ggml/include
|
||||
// #cgo CFLAGS: -Illama.cpp -Illama.cpp/include -Illama.cpp/ggml/include
|
||||
// #cgo LDFLAGS: -lllama -lggml -lstdc++ -lpthread
|
||||
// #cgo darwin,arm64 LDFLAGS: -L${SRCDIR}/build/darwin/arm64_static -L${SRCDIR}/build/darwin/arm64_static/src -L${SRCDIR}/build/darwin/arm64_static/ggml/src -framework Accelerate -framework Metal
|
||||
// #cgo darwin,amd64 LDFLAGS: -L${SRCDIR}/build/darwin/x86_64_static -L${SRCDIR}/build/darwin/x86_64_static/src -L${SRCDIR}/build/darwin/x86_64_static/ggml/src
|
||||
@@ -9,20 +9,16 @@ package llm
|
||||
// #cgo linux,amd64 LDFLAGS: -L${SRCDIR}/build/linux/x86_64_static -L${SRCDIR}/build/linux/x86_64_static/src -L${SRCDIR}/build/linux/x86_64_static/ggml/src
|
||||
// #cgo linux,arm64 LDFLAGS: -L${SRCDIR}/build/linux/arm64_static -L${SRCDIR}/build/linux/arm64_static/src -L${SRCDIR}/build/linux/arm64_static/ggml/src
|
||||
// #include <stdlib.h>
|
||||
// #include <stdatomic.h>
|
||||
// #include "llama.h"
|
||||
// bool update_quantize_progress(float progress, void* data) {
|
||||
// atomic_int* atomicData = (atomic_int*)data;
|
||||
// int intProgress = *((int*)&progress);
|
||||
// atomic_store(atomicData, intProgress);
|
||||
// return true;
|
||||
// *((float*)data) = progress;
|
||||
// return true;
|
||||
// }
|
||||
import "C"
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unsafe"
|
||||
"time"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
)
|
||||
@@ -44,16 +40,16 @@ func Quantize(infile, outfile string, ftype fileType, fn func(resp api.ProgressR
|
||||
params.ftype = ftype.Value()
|
||||
|
||||
// Initialize "global" to store progress
|
||||
store := (*int32)(C.malloc(C.sizeof_int))
|
||||
defer C.free(unsafe.Pointer(store))
|
||||
store := C.malloc(C.sizeof_float)
|
||||
defer C.free(unsafe.Pointer(store))
|
||||
|
||||
// Initialize store value, e.g., setting initial progress to 0
|
||||
atomic.StoreInt32(store, 0)
|
||||
// Initialize store value, e.g., setting initial progress to 0
|
||||
*(*C.float)(store) = 0.0
|
||||
|
||||
params.quantize_callback_data = unsafe.Pointer(store)
|
||||
params.quantize_callback_data = store
|
||||
params.quantize_callback = (C.llama_progress_callback)(C.update_quantize_progress)
|
||||
|
||||
ticker := time.NewTicker(30 * time.Millisecond)
|
||||
ticker := time.NewTicker(60 * time.Millisecond)
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
|
||||
@@ -62,18 +58,16 @@ func Quantize(infile, outfile string, ftype fileType, fn func(resp api.ProgressR
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
progressInt := atomic.LoadInt32(store)
|
||||
progress := *(*float32)(unsafe.Pointer(&progressInt))
|
||||
fn(api.ProgressResponse{
|
||||
Status: fmt.Sprintf("quantizing model tensors %d/%d", int(progress), tensorCount),
|
||||
Quantize: "quant",
|
||||
})
|
||||
fmt.Println("Progress: ", progress)
|
||||
fn(api.ProgressResponse{
|
||||
Status: fmt.Sprintf("quantizing model %d/%d", int(*((*C.float)(store))), tensorCount),
|
||||
Quantize: "quant",
|
||||
})
|
||||
fmt.Println("Progress: ", *((*C.float)(store)))
|
||||
case <-done:
|
||||
fn(api.ProgressResponse{
|
||||
Status: fmt.Sprintf("quantizing model tensors %d/%d", tensorCount, tensorCount),
|
||||
Quantize: "quant",
|
||||
})
|
||||
fn(api.ProgressResponse{
|
||||
Status: fmt.Sprintf("quantizing model %d/%d", tensorCount, tensorCount),
|
||||
Quantize: "quant",
|
||||
})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,32 +1,18 @@
|
||||
From ed941590d59fc07b1ad21d6aa458588e47d1e446 Mon Sep 17 00:00:00 2001
|
||||
From fa509abf281177eacdc71a2a14432c4e6ed74a47 Mon Sep 17 00:00:00 2001
|
||||
From: Josh Yan <jyan00017@gmail.com>
|
||||
Date: Wed, 10 Jul 2024 13:39:39 -0700
|
||||
Subject: [PATCH] quantize progress
|
||||
Date: Wed, 10 Jul 2024 12:58:31 -0700
|
||||
Subject: [PATCH] quantize callback
|
||||
|
||||
---
|
||||
include/llama.h | 3 +++
|
||||
src/llama.cpp | 8 ++++++++
|
||||
llama.cpp | 8 ++++++++
|
||||
llama.h | 3 +++
|
||||
2 files changed, 11 insertions(+)
|
||||
|
||||
diff --git a/include/llama.h b/include/llama.h
|
||||
index bb4b05ba..613db68e 100644
|
||||
--- a/include/llama.h
|
||||
+++ b/include/llama.h
|
||||
@@ -349,6 +349,9 @@ extern "C" {
|
||||
bool keep_split; // quantize to the same number of shards
|
||||
void * imatrix; // pointer to importance matrix data
|
||||
void * kv_overrides; // pointer to vector containing overrides
|
||||
+
|
||||
+ llama_progress_callback quantize_callback; // callback to report quantization progress
|
||||
+ void * quantize_callback_data; // user data for the callback
|
||||
} llama_model_quantize_params;
|
||||
|
||||
// grammar types
|
||||
diff --git a/src/llama.cpp b/src/llama.cpp
|
||||
index 2b9ace28..ac640c02 100644
|
||||
--- a/src/llama.cpp
|
||||
+++ b/src/llama.cpp
|
||||
@@ -18252,6 +18252,12 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||
diff --git a/llama.cpp b/llama.cpp
|
||||
index 61948751..d3126510 100644
|
||||
--- a/llama.cpp
|
||||
+++ b/llama.cpp
|
||||
@@ -15586,6 +15586,12 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||
const auto tn = LLM_TN(model.arch);
|
||||
new_ofstream(0);
|
||||
for (int i = 0; i < ml.n_tensors; ++i) {
|
||||
@@ -39,7 +25,7 @@ index 2b9ace28..ac640c02 100644
|
||||
auto weight = ml.get_weight(i);
|
||||
struct ggml_tensor * tensor = weight->tensor;
|
||||
if (weight->idx != cur_split && params->keep_split) {
|
||||
@@ -18789,6 +18795,8 @@ struct llama_model_quantize_params llama_model_quantize_default_params() {
|
||||
@@ -16119,6 +16125,8 @@ struct llama_model_quantize_params llama_model_quantize_default_params() {
|
||||
/*.keep_split =*/ false,
|
||||
/*.imatrix =*/ nullptr,
|
||||
/*.kv_overrides =*/ nullptr,
|
||||
@@ -48,5 +34,20 @@ index 2b9ace28..ac640c02 100644
|
||||
};
|
||||
|
||||
return result;
|
||||
diff --git a/llama.h b/llama.h
|
||||
index da310ffa..3cbe6023 100644
|
||||
--- a/llama.h
|
||||
+++ b/llama.h
|
||||
@@ -337,6 +337,9 @@ extern "C" {
|
||||
bool keep_split; // quantize to the same number of shards
|
||||
void * imatrix; // pointer to importance matrix data
|
||||
void * kv_overrides; // pointer to vector containing overrides
|
||||
+
|
||||
+ llama_progress_callback quantize_callback; // callback to report quantization progress
|
||||
+ void * quantize_callback_data; // user data for the callback
|
||||
} llama_model_quantize_params;
|
||||
|
||||
// grammar types
|
||||
--
|
||||
2.39.3 (Apple Git-146)
|
||||
2.39.3 (Apple Git-146)
|
||||
|
||||
@@ -422,17 +422,12 @@ func CreateModel(ctx context.Context, name model.Name, modelFileDir, quantizatio
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tensorCount := len(baseLayer.GGML.Tensors())
|
||||
|
||||
ft := baseLayer.GGML.KV().FileType()
|
||||
if !slices.Contains([]string{"F16", "F32"}, ft.String()) {
|
||||
return errors.New("quantization is only supported for F16 and F32 models")
|
||||
} else if want != ft {
|
||||
fn(api.ProgressResponse{
|
||||
Status: "quantizing model tensors",
|
||||
Quantize: "quant",
|
||||
})
|
||||
|
||||
blob, err := GetBlobsPath(baseLayer.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -478,6 +473,7 @@ func CreateModel(ctx context.Context, name model.Name, modelFileDir, quantizatio
|
||||
|
||||
layers = append(layers, baseLayer.Layer)
|
||||
}
|
||||
|
||||
case "license", "template", "system":
|
||||
if c.Name != "license" {
|
||||
// replace
|
||||
|
||||
Reference in New Issue
Block a user