Compare commits

..

7 Commits

Author SHA1 Message Date
Josh Yan
8476ef2bd8 atomic for race 2024-07-15 10:44:35 -07:00
Josh Yan
4c9a160a08 race 2024-07-12 11:52:10 -07:00
Josh Yan
657a1102fc lint complained 2024-07-11 09:26:17 -07:00
Josh Yan
d352c68ffc move llama.h 2024-07-10 14:20:04 -07:00
Josh Yan
d82d25d70c patched 2024-07-10 13:57:28 -07:00
Josh Yan
60be9e2840 patch 2024-07-10 13:46:38 -07:00
Josh Yan
a083852eb5 quantize progress 2024-07-10 13:21:22 -07:00
5 changed files with 1285 additions and 48 deletions

1
.gitattributes vendored
View File

@@ -1 +1,2 @@
llm/ext_server/* linguist-vendored
llm/*.h linguist-vendored

1227
llm/llama.h vendored Normal file
View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
package llm
// #cgo CFLAGS: -Illama.cpp -Illama.cpp/include -Illama.cpp/ggml/include
// #cgo CPPFLAGS: -Illama.cpp/ggml/include
// #cgo LDFLAGS: -lllama -lggml -lstdc++ -lpthread
// #cgo darwin,arm64 LDFLAGS: -L${SRCDIR}/build/darwin/arm64_static -L${SRCDIR}/build/darwin/arm64_static/src -L${SRCDIR}/build/darwin/arm64_static/ggml/src -framework Accelerate -framework Metal
// #cgo darwin,amd64 LDFLAGS: -L${SRCDIR}/build/darwin/x86_64_static -L${SRCDIR}/build/darwin/x86_64_static/src -L${SRCDIR}/build/darwin/x86_64_static/ggml/src
@@ -9,16 +9,20 @@ package llm
// #cgo linux,amd64 LDFLAGS: -L${SRCDIR}/build/linux/x86_64_static -L${SRCDIR}/build/linux/x86_64_static/src -L${SRCDIR}/build/linux/x86_64_static/ggml/src
// #cgo linux,arm64 LDFLAGS: -L${SRCDIR}/build/linux/arm64_static -L${SRCDIR}/build/linux/arm64_static/src -L${SRCDIR}/build/linux/arm64_static/ggml/src
// #include <stdlib.h>
// #include <stdatomic.h>
// #include "llama.h"
// bool update_quantize_progress(float progress, void* data) {
// *((float*)data) = progress;
// return true;
// atomic_int* atomicData = (atomic_int*)data;
// int intProgress = *((int*)&progress);
// atomic_store(atomicData, intProgress);
// return true;
// }
import "C"
import (
"fmt"
"unsafe"
"sync/atomic"
"time"
"unsafe"
"github.com/ollama/ollama/api"
)
@@ -40,16 +44,16 @@ func Quantize(infile, outfile string, ftype fileType, fn func(resp api.ProgressR
params.ftype = ftype.Value()
// Initialize "global" to store progress
store := C.malloc(C.sizeof_float)
defer C.free(unsafe.Pointer(store))
store := (*int32)(C.malloc(C.sizeof_int))
defer C.free(unsafe.Pointer(store))
// Initialize store value, e.g., setting initial progress to 0
*(*C.float)(store) = 0.0
// Initialize store value, e.g., setting initial progress to 0
atomic.StoreInt32(store, 0)
params.quantize_callback_data = store
params.quantize_callback_data = unsafe.Pointer(store)
params.quantize_callback = (C.llama_progress_callback)(C.update_quantize_progress)
ticker := time.NewTicker(60 * time.Millisecond)
ticker := time.NewTicker(30 * time.Millisecond)
done := make(chan struct{})
defer close(done)
@@ -58,16 +62,18 @@ func Quantize(infile, outfile string, ftype fileType, fn func(resp api.ProgressR
for {
select {
case <-ticker.C:
fn(api.ProgressResponse{
Status: fmt.Sprintf("quantizing model %d/%d", int(*((*C.float)(store))), tensorCount),
Quantize: "quant",
})
fmt.Println("Progress: ", *((*C.float)(store)))
progressInt := atomic.LoadInt32(store)
progress := *(*float32)(unsafe.Pointer(&progressInt))
fn(api.ProgressResponse{
Status: fmt.Sprintf("quantizing model tensors %d/%d", int(progress), tensorCount),
Quantize: "quant",
})
fmt.Println("Progress: ", progress)
case <-done:
fn(api.ProgressResponse{
Status: fmt.Sprintf("quantizing model %d/%d", tensorCount, tensorCount),
Quantize: "quant",
})
fn(api.ProgressResponse{
Status: fmt.Sprintf("quantizing model tensors %d/%d", tensorCount, tensorCount),
Quantize: "quant",
})
return
}
}

View File

@@ -1,18 +1,32 @@
From fa509abf281177eacdc71a2a14432c4e6ed74a47 Mon Sep 17 00:00:00 2001
From ed941590d59fc07b1ad21d6aa458588e47d1e446 Mon Sep 17 00:00:00 2001
From: Josh Yan <jyan00017@gmail.com>
Date: Wed, 10 Jul 2024 12:58:31 -0700
Subject: [PATCH] quantize callback
Date: Wed, 10 Jul 2024 13:39:39 -0700
Subject: [PATCH] quantize progress
---
llama.cpp | 8 ++++++++
llama.h | 3 +++
include/llama.h | 3 +++
src/llama.cpp | 8 ++++++++
2 files changed, 11 insertions(+)
diff --git a/llama.cpp b/llama.cpp
index 61948751..d3126510 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -15586,6 +15586,12 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
diff --git a/include/llama.h b/include/llama.h
index bb4b05ba..613db68e 100644
--- a/include/llama.h
+++ b/include/llama.h
@@ -349,6 +349,9 @@ extern "C" {
bool keep_split; // quantize to the same number of shards
void * imatrix; // pointer to importance matrix data
void * kv_overrides; // pointer to vector containing overrides
+
+ llama_progress_callback quantize_callback; // callback to report quantization progress
+ void * quantize_callback_data; // user data for the callback
} llama_model_quantize_params;
// grammar types
diff --git a/src/llama.cpp b/src/llama.cpp
index 2b9ace28..ac640c02 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -18252,6 +18252,12 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
const auto tn = LLM_TN(model.arch);
new_ofstream(0);
for (int i = 0; i < ml.n_tensors; ++i) {
@@ -25,7 +39,7 @@ index 61948751..d3126510 100644
auto weight = ml.get_weight(i);
struct ggml_tensor * tensor = weight->tensor;
if (weight->idx != cur_split && params->keep_split) {
@@ -16119,6 +16125,8 @@ struct llama_model_quantize_params llama_model_quantize_default_params() {
@@ -18789,6 +18795,8 @@ struct llama_model_quantize_params llama_model_quantize_default_params() {
/*.keep_split =*/ false,
/*.imatrix =*/ nullptr,
/*.kv_overrides =*/ nullptr,
@@ -34,20 +48,5 @@ index 61948751..d3126510 100644
};
return result;
diff --git a/llama.h b/llama.h
index da310ffa..3cbe6023 100644
--- a/llama.h
+++ b/llama.h
@@ -337,6 +337,9 @@ extern "C" {
bool keep_split; // quantize to the same number of shards
void * imatrix; // pointer to importance matrix data
void * kv_overrides; // pointer to vector containing overrides
+
+ llama_progress_callback quantize_callback; // callback to report quantization progress
+ void * quantize_callback_data; // user data for the callback
} llama_model_quantize_params;
// grammar types
--
2.39.3 (Apple Git-146)
2.39.3 (Apple Git-146)

View File

@@ -422,12 +422,17 @@ func CreateModel(ctx context.Context, name model.Name, modelFileDir, quantizatio
if err != nil {
return err
}
tensorCount := len(baseLayer.GGML.Tensors())
ft := baseLayer.GGML.KV().FileType()
if !slices.Contains([]string{"F16", "F32"}, ft.String()) {
return errors.New("quantization is only supported for F16 and F32 models")
} else if want != ft {
fn(api.ProgressResponse{
Status: "quantizing model tensors",
Quantize: "quant",
})
blob, err := GetBlobsPath(baseLayer.Digest)
if err != nil {
return err
@@ -473,7 +478,6 @@ func CreateModel(ctx context.Context, name model.Name, modelFileDir, quantizatio
layers = append(layers, baseLayer.Layer)
}
case "license", "template", "system":
if c.Name != "license" {
// replace