mirror of
https://github.com/ollama/ollama.git
synced 2026-02-23 02:40:16 -05:00
Compare commits
1 Commits
v0.1.44
...
bmizerany/
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
95af97b9f3 |
4
.github/workflows/test.yaml
vendored
4
.github/workflows/test.yaml
vendored
@@ -269,9 +269,9 @@ jobs:
|
||||
mkdir -p llm/build/darwin/$ARCH/stub/bin
|
||||
touch llm/build/darwin/$ARCH/stub/bin/ollama_llama_server
|
||||
if: ${{ startsWith(matrix.os, 'macos-') }}
|
||||
- uses: golangci/golangci-lint-action@v6
|
||||
- uses: golangci/golangci-lint-action@v4
|
||||
with:
|
||||
args: --timeout 8m0s -v ${{ startsWith(matrix.os, 'windows-') && '' || '--disable gofmt --disable goimports' }}
|
||||
args: --timeout 8m0s -v
|
||||
test:
|
||||
strategy:
|
||||
matrix:
|
||||
|
||||
@@ -9,26 +9,9 @@ linters:
|
||||
- contextcheck
|
||||
- exportloopref
|
||||
- gocheckcompilerdirectives
|
||||
# conditionally enable this on linux/macos
|
||||
# FIXME: for some reason this errors on windows
|
||||
# - gofmt
|
||||
# - goimports
|
||||
- intrange
|
||||
- misspell
|
||||
- nilerr
|
||||
- nolintlint
|
||||
- nosprintfhostport
|
||||
- testifylint
|
||||
- unconvert
|
||||
- unused
|
||||
- wastedassign
|
||||
- whitespace
|
||||
- usestdlibvars
|
||||
severity:
|
||||
default-severity: error
|
||||
rules:
|
||||
- linters:
|
||||
- gofmt
|
||||
- goimports
|
||||
- intrange
|
||||
- usestdlibvars
|
||||
severity: info
|
||||
|
||||
11
README.md
11
README.md
@@ -6,7 +6,7 @@
|
||||
|
||||
[](https://discord.gg/ollama)
|
||||
|
||||
Get up and running with large language models.
|
||||
Get up and running with large language models locally.
|
||||
|
||||
### macOS
|
||||
|
||||
@@ -285,7 +285,6 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
||||
- [macai](https://github.com/Renset/macai) (macOS client for Ollama, ChatGPT, and other compatible API back-ends)
|
||||
- [Olpaka](https://github.com/Otacon/olpaka) (User-friendly Flutter Web App for Ollama)
|
||||
- [OllamaSpring](https://github.com/CrazyNeil/OllamaSpring) (Ollama Client for macOS)
|
||||
- [LLocal.in](https://github.com/kartikm7/llocal) (Easy to use Electron Desktop Client for Ollama)
|
||||
|
||||
### Terminal
|
||||
|
||||
@@ -308,7 +307,6 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
||||
- [ShellOracle](https://github.com/djcopley/ShellOracle)
|
||||
- [tlm](https://github.com/yusufcanb/tlm)
|
||||
- [podman-ollama](https://github.com/ericcurtin/podman-ollama)
|
||||
- [gollama](https://github.com/sammcj/gollama)
|
||||
|
||||
### Database
|
||||
|
||||
@@ -326,13 +324,11 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
||||
- [LangChain](https://python.langchain.com/docs/integrations/llms/ollama) and [LangChain.js](https://js.langchain.com/docs/modules/model_io/models/llms/integrations/ollama) with [example](https://js.langchain.com/docs/use_cases/question_answering/local_retrieval_qa)
|
||||
- [LangChainGo](https://github.com/tmc/langchaingo/) with [example](https://github.com/tmc/langchaingo/tree/main/examples/ollama-completion-example)
|
||||
- [LangChain4j](https://github.com/langchain4j/langchain4j) with [example](https://github.com/langchain4j/langchain4j-examples/tree/main/ollama-examples/src/main/java)
|
||||
- [LangChainRust](https://github.com/Abraxas-365/langchain-rust) with [example](https://github.com/Abraxas-365/langchain-rust/blob/main/examples/llm_ollama.rs)
|
||||
- [LlamaIndex](https://gpt-index.readthedocs.io/en/stable/examples/llm/ollama.html)
|
||||
- [LiteLLM](https://github.com/BerriAI/litellm)
|
||||
- [OllamaSharp for .NET](https://github.com/awaescher/OllamaSharp)
|
||||
- [Ollama for Ruby](https://github.com/gbaptista/ollama-ai)
|
||||
- [Ollama-rs for Rust](https://github.com/pepperoni21/ollama-rs)
|
||||
- [Ollama-hpp for C++](https://github.com/jmont-dev/ollama-hpp)
|
||||
- [Ollama4j for Java](https://github.com/amithkoujalgi/ollama4j)
|
||||
- [ModelFusion Typescript Library](https://modelfusion.dev/integration/model-provider/ollama)
|
||||
- [OllamaKit for Swift](https://github.com/kevinhermawan/OllamaKit)
|
||||
@@ -350,7 +346,6 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
||||
- [Portkey](https://portkey.ai/docs/welcome/integration-guides/ollama)
|
||||
- [PromptingTools.jl](https://github.com/svilupp/PromptingTools.jl) with an [example](https://svilupp.github.io/PromptingTools.jl/dev/examples/working_with_ollama)
|
||||
- [LlamaScript](https://github.com/Project-Llama/llamascript)
|
||||
|
||||
### Mobile
|
||||
|
||||
- [Enchanted](https://github.com/AugustDev/enchanted)
|
||||
@@ -383,9 +378,7 @@ See the [API documentation](./docs/api.md) for all endpoints.
|
||||
- [AI ST Completion](https://github.com/yaroslavyaroslav/OpenAI-sublime-text) (Sublime Text 4 AI assistant plugin with Ollama support)
|
||||
- [Discord-Ollama Chat Bot](https://github.com/kevinthedang/discord-ollama) (Generalized TypeScript Discord Bot w/ Tuning Documentation)
|
||||
- [Discord AI chat/moderation bot](https://github.com/rapmd73/Companion) Chat/moderation bot written in python. Uses Ollama to create personalities.
|
||||
- [Headless Ollama](https://github.com/nischalj10/headless-ollama) (Scripts to automatically install ollama client & models on any OS for apps that depends on ollama server)
|
||||
|
||||
### Supported backends
|
||||
|
||||
### Supported backends
|
||||
- [llama.cpp](https://github.com/ggerganov/llama.cpp) project founded by Georgi Gerganov.
|
||||
|
||||
|
||||
@@ -23,9 +23,11 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
"github.com/ollama/ollama/format"
|
||||
"github.com/ollama/ollama/version"
|
||||
)
|
||||
@@ -63,7 +65,10 @@ func checkError(resp *http.Response, body []byte) error {
|
||||
// If the variable is not specified, a default ollama host and port will be
|
||||
// used.
|
||||
func ClientFromEnvironment() (*Client, error) {
|
||||
ollamaHost := envconfig.Host
|
||||
ollamaHost, err := GetOllamaHost()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Client{
|
||||
base: &url.URL{
|
||||
@@ -74,6 +79,52 @@ func ClientFromEnvironment() (*Client, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
type OllamaHost struct {
|
||||
Scheme string
|
||||
Host string
|
||||
Port string
|
||||
}
|
||||
|
||||
func GetOllamaHost() (OllamaHost, error) {
|
||||
defaultPort := "11434"
|
||||
|
||||
hostVar := os.Getenv("OLLAMA_HOST")
|
||||
hostVar = strings.TrimSpace(strings.Trim(strings.TrimSpace(hostVar), "\"'"))
|
||||
|
||||
scheme, hostport, ok := strings.Cut(hostVar, "://")
|
||||
switch {
|
||||
case !ok:
|
||||
scheme, hostport = "http", hostVar
|
||||
case scheme == "http":
|
||||
defaultPort = "80"
|
||||
case scheme == "https":
|
||||
defaultPort = "443"
|
||||
}
|
||||
|
||||
// trim trailing slashes
|
||||
hostport = strings.TrimRight(hostport, "/")
|
||||
|
||||
host, port, err := net.SplitHostPort(hostport)
|
||||
if err != nil {
|
||||
host, port = "127.0.0.1", defaultPort
|
||||
if ip := net.ParseIP(strings.Trim(hostport, "[]")); ip != nil {
|
||||
host = ip.String()
|
||||
} else if hostport != "" {
|
||||
host = hostport
|
||||
}
|
||||
}
|
||||
|
||||
if portNum, err := strconv.ParseInt(port, 10, 32); err != nil || portNum > 65535 || portNum < 0 {
|
||||
return OllamaHost{}, ErrInvalidHostPort
|
||||
}
|
||||
|
||||
return OllamaHost{
|
||||
Scheme: scheme,
|
||||
Host: host,
|
||||
Port: port,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewClient(base *url.URL, http *http.Client) *Client {
|
||||
return &Client{
|
||||
base: base,
|
||||
@@ -304,8 +355,8 @@ func (c *Client) List(ctx context.Context) (*ListResponse, error) {
|
||||
}
|
||||
|
||||
// List running models.
|
||||
func (c *Client) ListRunning(ctx context.Context) (*ProcessResponse, error) {
|
||||
var lr ProcessResponse
|
||||
func (c *Client) ListRunning(ctx context.Context) (*ListResponse, error) {
|
||||
var lr ListResponse
|
||||
if err := c.do(ctx, http.MethodGet, "/api/ps", nil, &lr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestClientFromEnvironment(t *testing.T) {
|
||||
@@ -33,7 +35,6 @@ func TestClientFromEnvironment(t *testing.T) {
|
||||
for k, v := range testCases {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
t.Setenv("OLLAMA_HOST", v.value)
|
||||
envconfig.LoadConfig()
|
||||
|
||||
client, err := ClientFromEnvironment()
|
||||
if err != v.err {
|
||||
@@ -45,4 +46,40 @@ func TestClientFromEnvironment(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
hostTestCases := map[string]*testCase{
|
||||
"empty": {value: "", expect: "127.0.0.1:11434"},
|
||||
"only address": {value: "1.2.3.4", expect: "1.2.3.4:11434"},
|
||||
"only port": {value: ":1234", expect: ":1234"},
|
||||
"address and port": {value: "1.2.3.4:1234", expect: "1.2.3.4:1234"},
|
||||
"hostname": {value: "example.com", expect: "example.com:11434"},
|
||||
"hostname and port": {value: "example.com:1234", expect: "example.com:1234"},
|
||||
"zero port": {value: ":0", expect: ":0"},
|
||||
"too large port": {value: ":66000", err: ErrInvalidHostPort},
|
||||
"too small port": {value: ":-1", err: ErrInvalidHostPort},
|
||||
"ipv6 localhost": {value: "[::1]", expect: "[::1]:11434"},
|
||||
"ipv6 world open": {value: "[::]", expect: "[::]:11434"},
|
||||
"ipv6 no brackets": {value: "::1", expect: "[::1]:11434"},
|
||||
"ipv6 + port": {value: "[::1]:1337", expect: "[::1]:1337"},
|
||||
"extra space": {value: " 1.2.3.4 ", expect: "1.2.3.4:11434"},
|
||||
"extra quotes": {value: "\"1.2.3.4\"", expect: "1.2.3.4:11434"},
|
||||
"extra space+quotes": {value: " \" 1.2.3.4 \" ", expect: "1.2.3.4:11434"},
|
||||
"extra single quotes": {value: "'1.2.3.4'", expect: "1.2.3.4:11434"},
|
||||
}
|
||||
|
||||
for k, v := range hostTestCases {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
t.Setenv("OLLAMA_HOST", v.value)
|
||||
|
||||
oh, err := GetOllamaHost()
|
||||
if err != v.err {
|
||||
t.Fatalf("expected %s, got %s", v.err, err)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
host := net.JoinHostPort(oh.Host, oh.Port)
|
||||
assert.Equal(t, v.expect, host, fmt.Sprintf("%s: expected %s, got %s", k, v.expect, host))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
31
api/types.go
31
api/types.go
@@ -2,6 +2,7 @@ package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"math"
|
||||
@@ -281,33 +282,19 @@ type PushRequest struct {
|
||||
|
||||
// ListResponse is the response from [Client.List].
|
||||
type ListResponse struct {
|
||||
Models []ListModelResponse `json:"models"`
|
||||
Models []ModelResponse `json:"models"`
|
||||
}
|
||||
|
||||
// ProcessResponse is the response from [Client.Process].
|
||||
type ProcessResponse struct {
|
||||
Models []ProcessModelResponse `json:"models"`
|
||||
}
|
||||
|
||||
// ListModelResponse is a single model description in [ListResponse].
|
||||
type ListModelResponse struct {
|
||||
// ModelResponse is a single model description in [ListResponse].
|
||||
type ModelResponse struct {
|
||||
Name string `json:"name"`
|
||||
Model string `json:"model"`
|
||||
ModifiedAt time.Time `json:"modified_at"`
|
||||
ModifiedAt time.Time `json:"modified_at,omitempty"`
|
||||
Size int64 `json:"size"`
|
||||
Digest string `json:"digest"`
|
||||
Details ModelDetails `json:"details,omitempty"`
|
||||
}
|
||||
|
||||
// ProcessModelResponse is a single model description in [ProcessResponse].
|
||||
type ProcessModelResponse struct {
|
||||
Name string `json:"name"`
|
||||
Model string `json:"model"`
|
||||
Size int64 `json:"size"`
|
||||
Digest string `json:"digest"`
|
||||
Details ModelDetails `json:"details,omitempty"`
|
||||
ExpiresAt time.Time `json:"expires_at"`
|
||||
SizeVRAM int64 `json:"size_vram"`
|
||||
ExpiresAt time.Time `json:"expires_at,omitempty"`
|
||||
SizeVRAM int64 `json:"size_vram,omitempty"`
|
||||
}
|
||||
|
||||
type TokenResponse struct {
|
||||
@@ -319,7 +306,7 @@ type GenerateResponse struct {
|
||||
// Model is the model name that generated the response.
|
||||
Model string `json:"model"`
|
||||
|
||||
// CreatedAt is the timestamp of the response.
|
||||
//CreatedAt is the timestamp of the response.
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
|
||||
// Response is the textual response itself.
|
||||
@@ -376,6 +363,8 @@ func (m *Metrics) Summary() {
|
||||
}
|
||||
}
|
||||
|
||||
var ErrInvalidHostPort = errors.New("invalid port specified in OLLAMA_HOST")
|
||||
|
||||
func (opts *Options) FromMap(m map[string]interface{}) error {
|
||||
valueOpts := reflect.ValueOf(opts).Elem() // names of the fields in the options struct
|
||||
typeOpts := reflect.TypeOf(opts).Elem() // types of the fields in the options struct
|
||||
|
||||
@@ -72,13 +72,13 @@ func TestDurationMarshalUnmarshal(t *testing.T) {
|
||||
},
|
||||
{
|
||||
"positive duration",
|
||||
42 * time.Second,
|
||||
42 * time.Second,
|
||||
time.Duration(42 * time.Second),
|
||||
time.Duration(42 * time.Second),
|
||||
},
|
||||
{
|
||||
"another positive duration",
|
||||
42 * time.Minute,
|
||||
42 * time.Minute,
|
||||
time.Duration(42 * time.Minute),
|
||||
time.Duration(42 * time.Minute),
|
||||
},
|
||||
{
|
||||
"zero duration",
|
||||
|
||||
@@ -69,6 +69,7 @@ func init() {
|
||||
slog.Error(fmt.Sprintf("create ollama dir %s: %v", AppDataDir, err))
|
||||
}
|
||||
}
|
||||
|
||||
} else if runtime.GOOS == "darwin" {
|
||||
// TODO
|
||||
AppName += ".app"
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
)
|
||||
|
||||
func getCLIFullPath(command string) string {
|
||||
var cmdPath string
|
||||
cmdPath := ""
|
||||
appExe, err := os.Executable()
|
||||
if err == nil {
|
||||
cmdPath = filepath.Join(filepath.Dir(appExe), command)
|
||||
@@ -65,6 +65,7 @@ func start(ctx context.Context, command string) (*exec.Cmd, error) {
|
||||
if err != nil {
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
return nil, fmt.Errorf("stat ollama server log dir %s: %v", logDir, err)
|
||||
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(logDir, 0o755); err != nil {
|
||||
|
||||
@@ -24,8 +24,7 @@ func terminate(cmd *exec.Cmd) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//nolint:errcheck
|
||||
defer dll.Release()
|
||||
defer dll.Release() // nolint: errcheck
|
||||
|
||||
pid := cmd.Process.Pid
|
||||
|
||||
@@ -74,8 +73,7 @@ func isProcessExited(pid int) (bool, error) {
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to open process: %v", err)
|
||||
}
|
||||
//nolint:errcheck
|
||||
defer windows.CloseHandle(hProcess)
|
||||
defer windows.CloseHandle(hProcess) // nolint: errcheck
|
||||
|
||||
var exitCode uint32
|
||||
err = windows.GetExitCodeProcess(hProcess, &exitCode)
|
||||
|
||||
@@ -78,7 +78,7 @@ func IsNewReleaseAvailable(ctx context.Context) (bool, UpdateResponse) {
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == http.StatusNoContent {
|
||||
if resp.StatusCode == 204 {
|
||||
slog.Debug("check update response 204 (current version is up to date)")
|
||||
return false, updateResp
|
||||
}
|
||||
@@ -87,7 +87,7 @@ func IsNewReleaseAvailable(ctx context.Context) (bool, UpdateResponse) {
|
||||
slog.Warn(fmt.Sprintf("failed to read body response: %s", err))
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
if resp.StatusCode != 200 {
|
||||
slog.Info(fmt.Sprintf("check update error %d - %.96s", resp.StatusCode, string(body)))
|
||||
return false, updateResp
|
||||
}
|
||||
@@ -114,7 +114,7 @@ func DownloadNewRelease(ctx context.Context, updateResp UpdateResponse) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("error checking update: %w", err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
if resp.StatusCode != 200 {
|
||||
return fmt.Errorf("unexpected status attempting to download update %d", resp.StatusCode)
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
@@ -4,5 +4,5 @@ write-host "Welcome to Ollama!"
|
||||
write-host ""
|
||||
write-host "Run your first model:"
|
||||
write-host ""
|
||||
write-host "`tollama run llama3"
|
||||
write-host "`tollama run llama2"
|
||||
write-host ""
|
||||
@@ -29,6 +29,7 @@ func GetID() string {
|
||||
initStore()
|
||||
}
|
||||
return store.ID
|
||||
|
||||
}
|
||||
|
||||
func GetFirstTimeRun() bool {
|
||||
|
||||
@@ -47,6 +47,7 @@ func nativeLoop() {
|
||||
default:
|
||||
pTranslateMessage.Call(uintptr(unsafe.Pointer(m))) //nolint:errcheck
|
||||
pDispatchMessage.Call(uintptr(unsafe.Pointer(m))) //nolint:errcheck
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -159,8 +160,8 @@ func (t *winTray) wndProc(hWnd windows.Handle, message uint32, wParam, lParam ui
|
||||
lResult, _, _ = pDefWindowProc.Call(
|
||||
uintptr(hWnd),
|
||||
uintptr(message),
|
||||
wParam,
|
||||
lParam,
|
||||
uintptr(wParam),
|
||||
uintptr(lParam),
|
||||
)
|
||||
}
|
||||
return
|
||||
|
||||
@@ -186,7 +186,7 @@ func (t *winTray) initInstance() error {
|
||||
t.muNID.Lock()
|
||||
defer t.muNID.Unlock()
|
||||
t.nid = ¬ifyIconData{
|
||||
Wnd: t.window,
|
||||
Wnd: windows.Handle(t.window),
|
||||
ID: 100,
|
||||
Flags: NIF_MESSAGE,
|
||||
CallbackMessage: t.wmSystrayMessage,
|
||||
@@ -197,6 +197,7 @@ func (t *winTray) initInstance() error {
|
||||
}
|
||||
|
||||
func (t *winTray) createMenu() error {
|
||||
|
||||
menuHandle, _, err := pCreatePopupMenu.Call()
|
||||
if menuHandle == 0 {
|
||||
return err
|
||||
@@ -245,7 +246,7 @@ func (t *winTray) addOrUpdateMenuItem(menuItemId uint32, parentId uint32, title
|
||||
mi := menuItemInfo{
|
||||
Mask: MIIM_FTYPE | MIIM_STRING | MIIM_ID | MIIM_STATE,
|
||||
Type: MFT_STRING,
|
||||
ID: menuItemId,
|
||||
ID: uint32(menuItemId),
|
||||
TypeData: titlePtr,
|
||||
Cch: uint32(len(title)),
|
||||
}
|
||||
@@ -301,10 +302,11 @@ func (t *winTray) addOrUpdateMenuItem(menuItemId uint32, parentId uint32, title
|
||||
}
|
||||
|
||||
func (t *winTray) addSeparatorMenuItem(menuItemId, parentId uint32) error {
|
||||
|
||||
mi := menuItemInfo{
|
||||
Mask: MIIM_FTYPE | MIIM_ID | MIIM_STATE,
|
||||
Type: MFT_SEPARATOR,
|
||||
ID: menuItemId,
|
||||
ID: uint32(menuItemId),
|
||||
}
|
||||
|
||||
mi.Size = uint32(unsafe.Sizeof(mi))
|
||||
@@ -424,6 +426,7 @@ func iconBytesToFilePath(iconBytes []byte) (string, error) {
|
||||
// Loads an image from file and shows it in tray.
|
||||
// Shell_NotifyIcon: https://msdn.microsoft.com/en-us/library/windows/desktop/bb762159(v=vs.85).aspx
|
||||
func (t *winTray) setIcon(src string) error {
|
||||
|
||||
h, err := t.loadIconFrom(src)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -441,6 +444,7 @@ func (t *winTray) setIcon(src string) error {
|
||||
// Loads an image from file to be shown in tray or menu item.
|
||||
// LoadImage: https://msdn.microsoft.com/en-us/library/windows/desktop/ms648045(v=vs.85).aspx
|
||||
func (t *winTray) loadIconFrom(src string) (windows.Handle, error) {
|
||||
|
||||
// Save and reuse handles of loaded images
|
||||
t.muLoadedImages.RLock()
|
||||
h, ok := t.loadedImages[src]
|
||||
|
||||
32
cmd/cmd.go
32
cmd/cmd.go
@@ -6,7 +6,6 @@ import (
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -20,16 +19,18 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/minio/sha256-simd"
|
||||
|
||||
"github.com/containerd/console"
|
||||
"github.com/mattn/go-runewidth"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/exp/slices"
|
||||
"golang.org/x/term"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
@@ -746,6 +747,7 @@ func displayResponse(content string, wordWrap bool, state *displayResponseState)
|
||||
if wordWrap && termWidth >= 10 {
|
||||
for _, ch := range content {
|
||||
if state.lineLength+1 > termWidth-5 {
|
||||
|
||||
if runewidth.StringWidth(state.wordBuffer) > termWidth-10 {
|
||||
fmt.Printf("%s%c", state.wordBuffer, ch)
|
||||
state.wordBuffer = ""
|
||||
@@ -960,11 +962,17 @@ func generate(cmd *cobra.Command, opts runOptions) error {
|
||||
}
|
||||
|
||||
func RunServer(cmd *cobra.Command, _ []string) error {
|
||||
// retrieve the OLLAMA_HOST environment variable
|
||||
ollamaHost, err := api.GetOllamaHost()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := initializeKeypair(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ln, err := net.Listen("tcp", net.JoinHostPort(envconfig.Host.Host, envconfig.Host.Port))
|
||||
ln, err := net.Listen("tcp", net.JoinHostPort(ollamaHost.Host, ollamaHost.Port))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1023,6 +1031,24 @@ func initializeKeypair() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//nolint:unused
|
||||
func waitForServer(ctx context.Context, client *api.Client) error {
|
||||
// wait for the server to start
|
||||
timeout := time.After(5 * time.Second)
|
||||
tick := time.Tick(500 * time.Millisecond)
|
||||
for {
|
||||
select {
|
||||
case <-timeout:
|
||||
return errors.New("timed out waiting for server to start")
|
||||
case <-tick:
|
||||
if err := client.Heartbeat(ctx); err == nil {
|
||||
return nil // server has started
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func checkServerHeartbeat(cmd *cobra.Command, _ []string) error {
|
||||
client, err := api.ClientFromEnvironment()
|
||||
if err != nil {
|
||||
|
||||
@@ -8,11 +8,11 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"text/template"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
)
|
||||
@@ -86,11 +85,11 @@ MESSAGE assistant """Yes it is true, I am half horse, half shark."""
|
||||
`
|
||||
|
||||
tmpl, err := template.New("").Parse(expectedModelfile)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var buf bytes.Buffer
|
||||
err = tmpl.Execute(&buf, opts)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, buf.String(), mf)
|
||||
|
||||
opts.ParentModel = "horseshark"
|
||||
@@ -108,10 +107,10 @@ MESSAGE assistant """Yes it is true, I am half horse, half shark."""
|
||||
`
|
||||
|
||||
tmpl, err = template.New("").Parse(expectedModelfile)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var parentBuf bytes.Buffer
|
||||
err = tmpl.Execute(&parentBuf, opts)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, parentBuf.String(), mf)
|
||||
}
|
||||
|
||||
27
cmd/start.go
27
cmd/start.go
@@ -1,27 +0,0 @@
|
||||
//go:build darwin || windows
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
)
|
||||
|
||||
func waitForServer(ctx context.Context, client *api.Client) error {
|
||||
// wait for the server to start
|
||||
timeout := time.After(5 * time.Second)
|
||||
tick := time.Tick(500 * time.Millisecond)
|
||||
for {
|
||||
select {
|
||||
case <-timeout:
|
||||
return errors.New("timed out waiting for server to start")
|
||||
case <-tick:
|
||||
if err := client.Heartbeat(ctx); err == nil {
|
||||
return nil // server has started
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -189,7 +189,7 @@ func LoadSentencePieceTokens(dirpath string, params *Params) (*Vocab, error) {
|
||||
if params.VocabSize > len(v.Tokens) {
|
||||
missingTokens := params.VocabSize - len(v.Tokens)
|
||||
slog.Warn(fmt.Sprintf("vocab is missing %d tokens", missingTokens))
|
||||
for cnt := range missingTokens {
|
||||
for cnt := 0; cnt < missingTokens; cnt++ {
|
||||
v.Tokens = append(v.Tokens, fmt.Sprintf("<dummy%05d>", cnt+1))
|
||||
v.Scores = append(v.Scores, -1)
|
||||
v.Types = append(v.Types, tokenTypeUserDefined)
|
||||
|
||||
@@ -35,6 +35,7 @@ func addOnes(data []float32, vectorSize int) ([]float32, error) {
|
||||
f32s = append(f32s, t...)
|
||||
}
|
||||
|
||||
|
||||
return f32s, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -119,12 +119,11 @@ func llamaRepack(name string, params *Params, data []float32, shape []uint64) ([
|
||||
}
|
||||
|
||||
var heads int
|
||||
switch {
|
||||
case strings.HasSuffix(name, "attn_q.weight"):
|
||||
if strings.HasSuffix(name, "attn_q.weight") {
|
||||
heads = params.AttentionHeads
|
||||
case strings.HasSuffix(name, "attn_k.weight"):
|
||||
} else if strings.HasSuffix(name, "attn_k.weight") {
|
||||
heads = cmp.Or(params.KeyValHeads, params.AttentionHeads)
|
||||
default:
|
||||
} else {
|
||||
return nil, fmt.Errorf("unknown tensor name: %s", name)
|
||||
}
|
||||
|
||||
|
||||
@@ -120,7 +120,7 @@ func (m *SafetensorFormat) readTensors(fn string, offset uint64, params *Params)
|
||||
Name: name,
|
||||
Kind: kind,
|
||||
Offset: offset,
|
||||
Shape: shape,
|
||||
Shape: shape[:],
|
||||
}
|
||||
|
||||
t.WriterTo = safetensorWriterTo{
|
||||
|
||||
@@ -2,13 +2,14 @@ package convert
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"slices"
|
||||
|
||||
"github.com/minio/sha256-simd"
|
||||
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
@@ -85,8 +86,11 @@ func parseTokens(dirpath string) (pre string, tokens []Token, merges []string, e
|
||||
|
||||
sha256sum := sha256.New()
|
||||
for _, pt := range t.PreTokenizer.PreTokenizers {
|
||||
if pt.Type == "Split" && pt.Pattern.Regex != "" {
|
||||
sha256sum.Write([]byte(pt.Pattern.Regex))
|
||||
switch pt.Type {
|
||||
case "Split":
|
||||
if pt.Pattern.Regex != "" {
|
||||
sha256sum.Write([]byte(pt.Pattern.Regex))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -88,7 +88,7 @@ func (tf *TorchFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor,
|
||||
Name: ggufName,
|
||||
Kind: kind,
|
||||
Offset: offset, // calculate the offset
|
||||
Shape: shape,
|
||||
Shape: shape[:],
|
||||
}
|
||||
|
||||
tensor.WriterTo = torchWriterTo{
|
||||
@@ -104,6 +104,7 @@ func (tf *TorchFormat) GetTensors(dirpath string, params *Params) ([]llm.Tensor,
|
||||
}
|
||||
|
||||
return tensors, nil
|
||||
|
||||
}
|
||||
|
||||
func getAltParams(dirpath string) (*Params, error) {
|
||||
|
||||
50
docs/api.md
50
docs/api.md
@@ -12,7 +12,6 @@
|
||||
- [Pull a Model](#pull-a-model)
|
||||
- [Push a Model](#push-a-model)
|
||||
- [Generate Embeddings](#generate-embeddings)
|
||||
- [List Running Models](#list-running-models)
|
||||
|
||||
## Conventions
|
||||
|
||||
@@ -250,7 +249,7 @@ curl http://localhost:11434/api/generate -d '{
|
||||
|
||||
#### Request (Reproducible outputs)
|
||||
|
||||
For reproducible outputs, set `seed` to a number:
|
||||
For reproducible outputs, set `temperature` to 0 and `seed` to a number:
|
||||
|
||||
##### Request
|
||||
|
||||
@@ -259,7 +258,8 @@ curl http://localhost:11434/api/generate -d '{
|
||||
"model": "mistral",
|
||||
"prompt": "Why is the sky blue?",
|
||||
"options": {
|
||||
"seed": 123
|
||||
"seed": 123,
|
||||
"temperature": 0
|
||||
}
|
||||
}'
|
||||
```
|
||||
@@ -1035,47 +1035,3 @@ curl http://localhost:11434/api/embeddings -d '{
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## List Running Models
|
||||
```shell
|
||||
GET /api/ps
|
||||
```
|
||||
|
||||
List models that are currently loaded into memory.
|
||||
|
||||
#### Examples
|
||||
|
||||
### Request
|
||||
|
||||
```shell
|
||||
curl http://localhost:11434/api/ps
|
||||
```
|
||||
|
||||
#### Response
|
||||
|
||||
A single JSON object will be returned.
|
||||
|
||||
```json
|
||||
{
|
||||
"models": [
|
||||
{
|
||||
"name": "mistral:latest",
|
||||
"model": "mistral:latest",
|
||||
"size": 5137025024,
|
||||
"digest": "2ae6f6dd7a3dd734790bbbf58b8909a606e0e7e97e94b7604e0aa7ae4490e6d8",
|
||||
"details": {
|
||||
"parent_model": "",
|
||||
"format": "gguf",
|
||||
"family": "llama",
|
||||
"families": [
|
||||
"llama"
|
||||
],
|
||||
"parameter_size": "7.2B",
|
||||
"quantization_level": "Q4_0"
|
||||
},
|
||||
"expires_at": "2024-06-04T14:38:31.83753-07:00",
|
||||
"size_vram": 5137025024
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
227
docs/import.md
227
docs/import.md
@@ -1,99 +1,170 @@
|
||||
# Import
|
||||
# Import a model
|
||||
|
||||
GGUF models and select Safetensors models can be imported directly into Ollama.
|
||||
This guide walks through importing a GGUF, PyTorch or Safetensors model.
|
||||
|
||||
## Import GGUF
|
||||
## Importing (GGUF)
|
||||
|
||||
A binary GGUF file can be imported directly into Ollama through a Modelfile.
|
||||
### Step 1: Write a `Modelfile`
|
||||
|
||||
```dockerfile
|
||||
FROM /path/to/file.gguf
|
||||
Start by creating a `Modelfile`. This file is the blueprint for your model, specifying weights, parameters, prompt templates and more.
|
||||
|
||||
```
|
||||
FROM ./mistral-7b-v0.1.Q4_0.gguf
|
||||
```
|
||||
|
||||
## Import Safetensors
|
||||
(Optional) many chat models require a prompt template in order to answer correctly. A default prompt template can be specified with the `TEMPLATE` instruction in the `Modelfile`:
|
||||
|
||||
If the model being imported is one of these architectures, it can be imported directly into Ollama through a Modelfile:
|
||||
|
||||
- LlamaForCausalLM
|
||||
- MistralForCausalLM
|
||||
- GemmaForCausalLM
|
||||
|
||||
```dockerfile
|
||||
FROM /path/to/safetensors/directory
|
||||
```
|
||||
FROM ./mistral-7b-v0.1.Q4_0.gguf
|
||||
TEMPLATE "[INST] {{ .Prompt }} [/INST]"
|
||||
```
|
||||
|
||||
For architectures not directly convertable by Ollama, see llama.cpp's [guide](https://github.com/ggerganov/llama.cpp/blob/master/README.md#prepare-and-quantize) on conversion. After conversion, see [Import GGUF](#import-gguf).
|
||||
### Step 2: Create the Ollama model
|
||||
|
||||
## Automatic Quantization
|
||||
Finally, create a model from your `Modelfile`:
|
||||
|
||||
> [!NOTE]
|
||||
> Automatic quantization requires v0.1.35 or higher.
|
||||
|
||||
Ollama is capable of quantizing FP16 or FP32 models to any of the supported quantizations with the `-q/--quantize` flag in `ollama create`.
|
||||
|
||||
```dockerfile
|
||||
FROM /path/to/my/gemma/f16/model
|
||||
```
|
||||
ollama create example -f Modelfile
|
||||
```
|
||||
|
||||
### Step 3: Run your model
|
||||
|
||||
Next, test the model with `ollama run`:
|
||||
|
||||
```
|
||||
ollama run example "What is your favourite condiment?"
|
||||
```
|
||||
|
||||
## Importing (PyTorch & Safetensors)
|
||||
|
||||
> Importing from PyTorch and Safetensors is a longer process than importing from GGUF. Improvements that make it easier are a work in progress.
|
||||
|
||||
### Setup
|
||||
|
||||
First, clone the `ollama/ollama` repo:
|
||||
|
||||
```
|
||||
git clone git@github.com:ollama/ollama.git ollama
|
||||
cd ollama
|
||||
```
|
||||
|
||||
and then fetch its `llama.cpp` submodule:
|
||||
|
||||
```shell
|
||||
$ ollama create -q Q4_K_M mymodel
|
||||
transferring model data
|
||||
quantizing F16 model to Q4_K_M
|
||||
creating new layer sha256:735e246cc1abfd06e9cdcf95504d6789a6cd1ad7577108a70d9902fef503c1bd
|
||||
creating new layer sha256:0853f0ad24e5865173bbf9ffcc7b0f5d56b66fd690ab1009867e45e7d2c4db0f
|
||||
writing manifest
|
||||
success
|
||||
git submodule init
|
||||
git submodule update llm/llama.cpp
|
||||
```
|
||||
|
||||
### Supported Quantizations
|
||||
Next, install the Python dependencies:
|
||||
|
||||
<details>
|
||||
<summary>Legacy Quantization</summary>
|
||||
|
||||
- `Q4_0`
|
||||
- `Q4_1`
|
||||
- `Q5_0`
|
||||
- `Q5_1`
|
||||
- `Q8_0`
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>K-means Quantization</summary>`
|
||||
|
||||
- `Q3_K_S`
|
||||
- `Q3_K_M`
|
||||
- `Q3_K_L`
|
||||
- `Q4_K_S`
|
||||
- `Q4_K_M`
|
||||
- `Q5_K_S`
|
||||
- `Q5_K_M`
|
||||
- `Q6_K`
|
||||
|
||||
</details>
|
||||
|
||||
> [!NOTE]
|
||||
> Activation-aware Weight Quantization (i.e. IQ) are not currently supported for automatic quantization however you can still import the quantized model into Ollama, see [Import GGUF](#import-gguf).
|
||||
|
||||
## Template Detection
|
||||
|
||||
> [!NOTE]
|
||||
> Template detection requires v0.1.42 or higher.
|
||||
|
||||
Ollama uses model metadata, specifically `tokenizer.chat_template`, to automatically create a template appropriate for the model you're importing.
|
||||
|
||||
```dockerfile
|
||||
FROM /path/to/my/gemma/model
|
||||
```
|
||||
python3 -m venv llm/llama.cpp/.venv
|
||||
source llm/llama.cpp/.venv/bin/activate
|
||||
pip install -r llm/llama.cpp/requirements.txt
|
||||
```
|
||||
|
||||
```shell
|
||||
$ ollama create mymodel
|
||||
transferring model data
|
||||
using autodetected template gemma-instruct
|
||||
creating new layer sha256:baa2a0edc27d19cc6b7537578a9a7ba1a4e3214dc185ed5ae43692b319af7b84
|
||||
creating new layer sha256:ba66c3309914dbef07e5149a648fd1877f030d337a4f240d444ea335008943cb
|
||||
writing manifest
|
||||
success
|
||||
Then build the `quantize` tool:
|
||||
|
||||
```
|
||||
make -C llm/llama.cpp quantize
|
||||
```
|
||||
|
||||
Defining a template in the Modelfile will disable this feature which may be useful if you want to use a different template than the autodetected one.
|
||||
### Clone the HuggingFace repository (optional)
|
||||
|
||||
If the model is currently hosted in a HuggingFace repository, first clone that repository to download the raw model.
|
||||
|
||||
Install [Git LFS](https://docs.github.com/en/repositories/working-with-files/managing-large-files/installing-git-large-file-storage), verify it's installed, and then clone the model's repository:
|
||||
|
||||
```
|
||||
git lfs install
|
||||
git clone https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1 model
|
||||
```
|
||||
|
||||
### Convert the model
|
||||
|
||||
> Note: some model architectures require using specific convert scripts. For example, Qwen models require running `convert-hf-to-gguf.py` instead of `convert.py`
|
||||
|
||||
```
|
||||
python llm/llama.cpp/convert.py ./model --outtype f16 --outfile converted.bin
|
||||
```
|
||||
|
||||
### Quantize the model
|
||||
|
||||
```
|
||||
llm/llama.cpp/quantize converted.bin quantized.bin q4_0
|
||||
```
|
||||
|
||||
### Step 3: Write a `Modelfile`
|
||||
|
||||
Next, create a `Modelfile` for your model:
|
||||
|
||||
```
|
||||
FROM quantized.bin
|
||||
TEMPLATE "[INST] {{ .Prompt }} [/INST]"
|
||||
```
|
||||
|
||||
### Step 4: Create the Ollama model
|
||||
|
||||
Finally, create a model from your `Modelfile`:
|
||||
|
||||
```
|
||||
ollama create example -f Modelfile
|
||||
```
|
||||
|
||||
### Step 5: Run your model
|
||||
|
||||
Next, test the model with `ollama run`:
|
||||
|
||||
```
|
||||
ollama run example "What is your favourite condiment?"
|
||||
```
|
||||
|
||||
## Publishing your model (optional – early alpha)
|
||||
|
||||
Publishing models is in early alpha. If you'd like to publish your model to share with others, follow these steps:
|
||||
|
||||
1. Create [an account](https://ollama.com/signup)
|
||||
2. Copy your Ollama public key:
|
||||
- macOS: `cat ~/.ollama/id_ed25519.pub | pbcopy`
|
||||
- Windows: `type %USERPROFILE%\.ollama\id_ed25519.pub`
|
||||
- Linux: `cat /usr/share/ollama/.ollama/id_ed25519.pub`
|
||||
3. Add your public key to your [Ollama account](https://ollama.com/settings/keys)
|
||||
|
||||
Next, copy your model to your username's namespace:
|
||||
|
||||
```
|
||||
ollama cp example <your username>/example
|
||||
```
|
||||
|
||||
> Note: model names may only contain lowercase letters, digits, and the characters `.`, `-`, and `_`.
|
||||
|
||||
Then push the model:
|
||||
|
||||
```
|
||||
ollama push <your username>/example
|
||||
```
|
||||
|
||||
After publishing, your model will be available at `https://ollama.com/<your username>/example`.
|
||||
|
||||
## Quantization reference
|
||||
|
||||
The quantization options are as follow (from highest highest to lowest levels of quantization). Note: some architectures such as Falcon do not support K quants.
|
||||
|
||||
- `q2_K`
|
||||
- `q3_K`
|
||||
- `q3_K_S`
|
||||
- `q3_K_M`
|
||||
- `q3_K_L`
|
||||
- `q4_0` (recommended)
|
||||
- `q4_1`
|
||||
- `q4_K`
|
||||
- `q4_K_S`
|
||||
- `q4_K_M`
|
||||
- `q5_0`
|
||||
- `q5_1`
|
||||
- `q5_K`
|
||||
- `q5_K_S`
|
||||
- `q5_K_M`
|
||||
- `q6_K`
|
||||
- `q8_0`
|
||||
- `f16`
|
||||
|
||||
@@ -100,16 +100,6 @@ sudo curl -L https://ollama.com/download/ollama-linux-amd64 -o /usr/bin/ollama
|
||||
sudo chmod +x /usr/bin/ollama
|
||||
```
|
||||
|
||||
## Installing specific versions
|
||||
|
||||
Use `OLLAMA_VERSION` environment variable with the install script to install a specific version of Ollama, including pre-releases. You can find the version numbers in the [releases page](https://github.com/ollama/ollama/releases).
|
||||
|
||||
For example:
|
||||
|
||||
```
|
||||
curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION=0.1.32 sh
|
||||
```
|
||||
|
||||
## Viewing logs
|
||||
|
||||
To view logs of Ollama running as a startup service, run:
|
||||
|
||||
@@ -45,7 +45,7 @@ all_splits = text_splitter.split_documents(data)
|
||||
```
|
||||
|
||||
It's split up, but we have to find the relevant splits and then submit those to the model. We can do this by creating embeddings and storing them in a vector database. We can use Ollama directly to instantiate an embedding model. We will use ChromaDB in this example for a vector database. `pip install chromadb`
|
||||
We also need to pull embedding model: `ollama pull nomic-embed-text`
|
||||
|
||||
```python
|
||||
from langchain.embeddings import OllamaEmbeddings
|
||||
from langchain.vectorstores import Chroma
|
||||
@@ -68,8 +68,7 @@ The next thing is to send the question and the relevant parts of the docs to the
|
||||
```python
|
||||
from langchain.chains import RetrievalQA
|
||||
qachain=RetrievalQA.from_chain_type(ollama, retriever=vectorstore.as_retriever())
|
||||
res = qachain.invoke({"query": question})
|
||||
print(res['result'])
|
||||
qachain.invoke({"query": question})
|
||||
```
|
||||
|
||||
The answer received from this chain was:
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
package envconfig
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
@@ -12,18 +10,6 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
type OllamaHost struct {
|
||||
Scheme string
|
||||
Host string
|
||||
Port string
|
||||
}
|
||||
|
||||
func (o OllamaHost) String() string {
|
||||
return fmt.Sprintf("%s://%s:%s", o.Scheme, o.Host, o.Port)
|
||||
}
|
||||
|
||||
var ErrInvalidHostPort = errors.New("invalid port specified in OLLAMA_HOST")
|
||||
|
||||
var (
|
||||
// Set via OLLAMA_ORIGINS in the environment
|
||||
AllowOrigins []string
|
||||
@@ -31,8 +17,6 @@ var (
|
||||
Debug bool
|
||||
// Experimental flash attention
|
||||
FlashAttention bool
|
||||
// Set via OLLAMA_HOST in the environment
|
||||
Host *OllamaHost
|
||||
// Set via OLLAMA_KEEP_ALIVE in the environment
|
||||
KeepAlive string
|
||||
// Set via OLLAMA_LLM_LIBRARY in the environment
|
||||
@@ -41,8 +25,6 @@ var (
|
||||
MaxRunners int
|
||||
// Set via OLLAMA_MAX_QUEUE in the environment
|
||||
MaxQueuedRequests int
|
||||
// Set via OLLAMA_MODELS in the environment
|
||||
ModelsDir string
|
||||
// Set via OLLAMA_MAX_VRAM in the environment
|
||||
MaxVRAM uint64
|
||||
// Set via OLLAMA_NOHISTORY in the environment
|
||||
@@ -67,13 +49,13 @@ func AsMap() map[string]EnvVar {
|
||||
return map[string]EnvVar{
|
||||
"OLLAMA_DEBUG": {"OLLAMA_DEBUG", Debug, "Show additional debug information (e.g. OLLAMA_DEBUG=1)"},
|
||||
"OLLAMA_FLASH_ATTENTION": {"OLLAMA_FLASH_ATTENTION", FlashAttention, "Enabled flash attention"},
|
||||
"OLLAMA_HOST": {"OLLAMA_HOST", Host, "IP Address for the ollama server (default 127.0.0.1:11434)"},
|
||||
"OLLAMA_HOST": {"OLLAMA_HOST", "", "IP Address for the ollama server (default 127.0.0.1:11434)"},
|
||||
"OLLAMA_KEEP_ALIVE": {"OLLAMA_KEEP_ALIVE", KeepAlive, "The duration that models stay loaded in memory (default \"5m\")"},
|
||||
"OLLAMA_LLM_LIBRARY": {"OLLAMA_LLM_LIBRARY", LLMLibrary, "Set LLM library to bypass autodetection"},
|
||||
"OLLAMA_MAX_LOADED_MODELS": {"OLLAMA_MAX_LOADED_MODELS", MaxRunners, "Maximum number of loaded models (default 1)"},
|
||||
"OLLAMA_MAX_QUEUE": {"OLLAMA_MAX_QUEUE", MaxQueuedRequests, "Maximum number of queued requests"},
|
||||
"OLLAMA_MAX_VRAM": {"OLLAMA_MAX_VRAM", MaxVRAM, "Maximum VRAM"},
|
||||
"OLLAMA_MODELS": {"OLLAMA_MODELS", ModelsDir, "The path to the models directory"},
|
||||
"OLLAMA_MODELS": {"OLLAMA_MODELS", "", "The path to the models directory"},
|
||||
"OLLAMA_NOHISTORY": {"OLLAMA_NOHISTORY", NoHistory, "Do not preserve readline history"},
|
||||
"OLLAMA_NOPRUNE": {"OLLAMA_NOPRUNE", NoPrune, "Do not prune model blobs on startup"},
|
||||
"OLLAMA_NUM_PARALLEL": {"OLLAMA_NUM_PARALLEL", NumParallel, "Maximum number of parallel requests (default 1)"},
|
||||
@@ -144,7 +126,7 @@ func LoadConfig() {
|
||||
var paths []string
|
||||
for _, root := range []string{filepath.Dir(appExe), cwd} {
|
||||
paths = append(paths,
|
||||
root,
|
||||
filepath.Join(root),
|
||||
filepath.Join(root, "windows-"+runtime.GOARCH),
|
||||
filepath.Join(root, "dist", "windows-"+runtime.GOARCH),
|
||||
)
|
||||
@@ -202,17 +184,11 @@ func LoadConfig() {
|
||||
AllowOrigins = append(AllowOrigins,
|
||||
fmt.Sprintf("http://%s", allowOrigin),
|
||||
fmt.Sprintf("https://%s", allowOrigin),
|
||||
fmt.Sprintf("http://%s", net.JoinHostPort(allowOrigin, "*")),
|
||||
fmt.Sprintf("https://%s", net.JoinHostPort(allowOrigin, "*")),
|
||||
fmt.Sprintf("http://%s:*", allowOrigin),
|
||||
fmt.Sprintf("https://%s:*", allowOrigin),
|
||||
)
|
||||
}
|
||||
|
||||
AllowOrigins = append(AllowOrigins,
|
||||
"app://*",
|
||||
"file://*",
|
||||
"tauri://*",
|
||||
)
|
||||
|
||||
maxRunners := clean("OLLAMA_MAX_LOADED_MODELS")
|
||||
if maxRunners != "" {
|
||||
m, err := strconv.Atoi(maxRunners)
|
||||
@@ -233,70 +209,4 @@ func LoadConfig() {
|
||||
}
|
||||
|
||||
KeepAlive = clean("OLLAMA_KEEP_ALIVE")
|
||||
|
||||
var err error
|
||||
ModelsDir, err = getModelsDir()
|
||||
if err != nil {
|
||||
slog.Error("invalid setting", "OLLAMA_MODELS", ModelsDir, "error", err)
|
||||
}
|
||||
|
||||
Host, err = getOllamaHost()
|
||||
if err != nil {
|
||||
slog.Error("invalid setting", "OLLAMA_HOST", Host, "error", err, "using default port", Host.Port)
|
||||
}
|
||||
}
|
||||
|
||||
func getModelsDir() (string, error) {
|
||||
if models, exists := os.LookupEnv("OLLAMA_MODELS"); exists {
|
||||
return models, nil
|
||||
}
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(home, ".ollama", "models"), nil
|
||||
}
|
||||
|
||||
func getOllamaHost() (*OllamaHost, error) {
|
||||
defaultPort := "11434"
|
||||
|
||||
hostVar := os.Getenv("OLLAMA_HOST")
|
||||
hostVar = strings.TrimSpace(strings.Trim(strings.TrimSpace(hostVar), "\"'"))
|
||||
|
||||
scheme, hostport, ok := strings.Cut(hostVar, "://")
|
||||
switch {
|
||||
case !ok:
|
||||
scheme, hostport = "http", hostVar
|
||||
case scheme == "http":
|
||||
defaultPort = "80"
|
||||
case scheme == "https":
|
||||
defaultPort = "443"
|
||||
}
|
||||
|
||||
// trim trailing slashes
|
||||
hostport = strings.TrimRight(hostport, "/")
|
||||
|
||||
host, port, err := net.SplitHostPort(hostport)
|
||||
if err != nil {
|
||||
host, port = "127.0.0.1", defaultPort
|
||||
if ip := net.ParseIP(strings.Trim(hostport, "[]")); ip != nil {
|
||||
host = ip.String()
|
||||
} else if hostport != "" {
|
||||
host = hostport
|
||||
}
|
||||
}
|
||||
|
||||
if portNum, err := strconv.ParseInt(port, 10, 32); err != nil || portNum > 65535 || portNum < 0 {
|
||||
return &OllamaHost{
|
||||
Scheme: scheme,
|
||||
Host: host,
|
||||
Port: defaultPort,
|
||||
}, ErrInvalidHostPort
|
||||
}
|
||||
|
||||
return &OllamaHost{
|
||||
Scheme: scheme,
|
||||
Host: host,
|
||||
Port: port,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -1,11 +1,8 @@
|
||||
package envconfig
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -24,48 +21,3 @@ func TestConfig(t *testing.T) {
|
||||
LoadConfig()
|
||||
require.True(t, FlashAttention)
|
||||
}
|
||||
|
||||
func TestClientFromEnvironment(t *testing.T) {
|
||||
type testCase struct {
|
||||
value string
|
||||
expect string
|
||||
err error
|
||||
}
|
||||
|
||||
hostTestCases := map[string]*testCase{
|
||||
"empty": {value: "", expect: "127.0.0.1:11434"},
|
||||
"only address": {value: "1.2.3.4", expect: "1.2.3.4:11434"},
|
||||
"only port": {value: ":1234", expect: ":1234"},
|
||||
"address and port": {value: "1.2.3.4:1234", expect: "1.2.3.4:1234"},
|
||||
"hostname": {value: "example.com", expect: "example.com:11434"},
|
||||
"hostname and port": {value: "example.com:1234", expect: "example.com:1234"},
|
||||
"zero port": {value: ":0", expect: ":0"},
|
||||
"too large port": {value: ":66000", err: ErrInvalidHostPort},
|
||||
"too small port": {value: ":-1", err: ErrInvalidHostPort},
|
||||
"ipv6 localhost": {value: "[::1]", expect: "[::1]:11434"},
|
||||
"ipv6 world open": {value: "[::]", expect: "[::]:11434"},
|
||||
"ipv6 no brackets": {value: "::1", expect: "[::1]:11434"},
|
||||
"ipv6 + port": {value: "[::1]:1337", expect: "[::1]:1337"},
|
||||
"extra space": {value: " 1.2.3.4 ", expect: "1.2.3.4:11434"},
|
||||
"extra quotes": {value: "\"1.2.3.4\"", expect: "1.2.3.4:11434"},
|
||||
"extra space+quotes": {value: " \" 1.2.3.4 \" ", expect: "1.2.3.4:11434"},
|
||||
"extra single quotes": {value: "'1.2.3.4'", expect: "1.2.3.4:11434"},
|
||||
}
|
||||
|
||||
for k, v := range hostTestCases {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
t.Setenv("OLLAMA_HOST", v.value)
|
||||
LoadConfig()
|
||||
|
||||
oh, err := getOllamaHost()
|
||||
if err != v.err {
|
||||
t.Fatalf("expected %s, got %s", v.err, err)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
host := net.JoinHostPort(oh.Host, oh.Port)
|
||||
assert.Equal(t, v.expect, host, fmt.Sprintf("%s: expected %s, got %s", k, v.expect, host))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -77,21 +77,13 @@ LOADER_MAPPING = {
|
||||
|
||||
|
||||
def load_single_document(file_path: str) -> List[Document]:
|
||||
if os.path.getsize(file_path) != 0:
|
||||
filename, ext = os.path.splitext(file_path)
|
||||
if ext in LOADER_MAPPING:
|
||||
loader_class, loader_args = LOADER_MAPPING[ext]
|
||||
try:
|
||||
loader = loader_class(file_path, **loader_args)
|
||||
if loader:
|
||||
return loader.load()
|
||||
except:
|
||||
print(f"Corrupted file {file_path}. Ignoring it.")
|
||||
else:
|
||||
print(f"Unsupported file {file_path}. Ignoring it.")
|
||||
else:
|
||||
print(f"Empty file {file_path}. Ignoring it.")
|
||||
ext = "." + file_path.rsplit(".", 1)[-1]
|
||||
if ext in LOADER_MAPPING:
|
||||
loader_class, loader_args = LOADER_MAPPING[ext]
|
||||
loader = loader_class(file_path, **loader_args)
|
||||
return loader.load()
|
||||
|
||||
raise ValueError(f"Unsupported file extension '{ext}'")
|
||||
|
||||
def load_documents(source_dir: str, ignored_files: List[str] = []) -> List[Document]:
|
||||
"""
|
||||
@@ -108,8 +100,7 @@ def load_documents(source_dir: str, ignored_files: List[str] = []) -> List[Docum
|
||||
results = []
|
||||
with tqdm(total=len(filtered_files), desc='Loading new documents', ncols=80) as pbar:
|
||||
for i, docs in enumerate(pool.imap_unordered(load_single_document, filtered_files)):
|
||||
if docs:
|
||||
results.extend(docs)
|
||||
results.extend(docs)
|
||||
pbar.update()
|
||||
|
||||
return results
|
||||
|
||||
@@ -11,5 +11,4 @@ tabulate==0.9.0
|
||||
pandoc==2.3
|
||||
pypandoc==1.11
|
||||
tqdm==4.66.1
|
||||
sentence_transformers==2.2.2
|
||||
numpy>=1.22.2 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
sentence_transformers==2.2.2
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
)
|
||||
|
||||
func TestHumanNumber(t *testing.T) {
|
||||
|
||||
type testCase struct {
|
||||
input uint64
|
||||
expected string
|
||||
|
||||
2
go.mod
2
go.mod
@@ -16,7 +16,6 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/agnivade/levenshtein v1.1.1
|
||||
github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1
|
||||
github.com/mattn/go-runewidth v0.0.14
|
||||
github.com/nlpodyssey/gopickle v0.3.0
|
||||
@@ -34,6 +33,7 @@ require (
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/flatbuffers v24.3.25+incompatible // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
|
||||
8
go.sum
8
go.sum
@@ -4,14 +4,10 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7
|
||||
gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8=
|
||||
github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
|
||||
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40 h1:q4dksr6ICHXqG5hm0ZW5IHyeEJXoIJSOZeBLmWPNeIQ=
|
||||
github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs=
|
||||
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q=
|
||||
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
|
||||
github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
||||
github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0=
|
||||
github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
|
||||
@@ -40,8 +36,6 @@ github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1/go.mod h1:uw2gLc
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g=
|
||||
github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
|
||||
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
|
||||
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
@@ -141,6 +135,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
|
||||
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
|
||||
@@ -65,7 +65,7 @@ func AMDGetGPUInfo() []GpuInfo {
|
||||
|
||||
slog.Debug("detected hip devices", "count", count)
|
||||
// TODO how to determine the underlying device ID when visible devices is causing this to subset?
|
||||
for i := range count {
|
||||
for i := 0; i < count; i++ {
|
||||
err = hl.HipSetDevice(i)
|
||||
if err != nil {
|
||||
slog.Warn("set device", "id", i, "error", err)
|
||||
|
||||
@@ -80,7 +80,7 @@ func cleanupTmpDirs() {
|
||||
if err == nil {
|
||||
pid, err := strconv.Atoi(string(raw))
|
||||
if err == nil {
|
||||
if proc, err := os.FindProcess(pid); err == nil && !errors.Is(proc.Signal(syscall.Signal(0)), os.ErrProcessDone) {
|
||||
if proc, err := os.FindProcess(int(pid)); err == nil && !errors.Is(proc.Signal(syscall.Signal(0)), os.ErrProcessDone) {
|
||||
// Another running ollama, ignore this tmpdir
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -18,4 +18,5 @@ func cudaGetVisibleDevicesEnv(gpuInfo []GpuInfo) (string, string) {
|
||||
ids = append(ids, info.ID)
|
||||
}
|
||||
return "CUDA_VISIBLE_DEVICES", strings.Join(ids, ",")
|
||||
|
||||
}
|
||||
|
||||
39
gpu/gpu.go
39
gpu/gpu.go
@@ -16,12 +16,13 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
"github.com/ollama/ollama/format"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
)
|
||||
|
||||
type handles struct {
|
||||
@@ -104,6 +105,8 @@ func initGPUHandles() *handles {
|
||||
var cudartMgmtPatterns []string
|
||||
var nvcudaMgmtName string
|
||||
var nvcudaMgmtPatterns []string
|
||||
var oneapiMgmtName string
|
||||
var oneapiMgmtPatterns []string
|
||||
|
||||
tmpDir, _ := PayloadsDir()
|
||||
switch runtime.GOOS {
|
||||
@@ -115,6 +118,8 @@ func initGPUHandles() *handles {
|
||||
// Aligned with driver, we can't carry as payloads
|
||||
nvcudaMgmtName = "nvcuda.dll"
|
||||
nvcudaMgmtPatterns = NvcudaWindowsGlobs
|
||||
oneapiMgmtName = "ze_intel_gpu64.dll"
|
||||
oneapiMgmtPatterns = OneapiWindowsGlobs
|
||||
case "linux":
|
||||
cudartMgmtName = "libcudart.so*"
|
||||
if tmpDir != "" {
|
||||
@@ -125,6 +130,8 @@ func initGPUHandles() *handles {
|
||||
// Aligned with driver, we can't carry as payloads
|
||||
nvcudaMgmtName = "libcuda.so*"
|
||||
nvcudaMgmtPatterns = NvcudaLinuxGlobs
|
||||
oneapiMgmtName = "libze_intel_gpu.so"
|
||||
oneapiMgmtPatterns = OneapiLinuxGlobs
|
||||
default:
|
||||
return gpuHandles
|
||||
}
|
||||
@@ -152,6 +159,17 @@ func initGPUHandles() *handles {
|
||||
}
|
||||
}
|
||||
|
||||
oneapiLibPaths := FindGPULibs(oneapiMgmtName, oneapiMgmtPatterns)
|
||||
if len(oneapiLibPaths) > 0 {
|
||||
deviceCount, oneapi, libPath := LoadOneapiMgmt(oneapiLibPaths)
|
||||
if oneapi != nil {
|
||||
slog.Debug("detected Intel GPUs", "library", libPath, "count", deviceCount)
|
||||
gpuHandles.oneapi = oneapi
|
||||
gpuHandles.deviceCount = deviceCount
|
||||
return gpuHandles
|
||||
}
|
||||
}
|
||||
|
||||
return gpuHandles
|
||||
}
|
||||
|
||||
@@ -187,7 +205,7 @@ func GetGPUInfo() GpuInfoList {
|
||||
resp := []GpuInfo{}
|
||||
|
||||
// NVIDIA first
|
||||
for i := range gpuHandles.deviceCount {
|
||||
for i := 0; i < gpuHandles.deviceCount; i++ {
|
||||
// TODO once we support CPU compilation variants of GPU libraries refine this...
|
||||
if cpuVariant == "" && runtime.GOARCH == "amd64" {
|
||||
continue
|
||||
@@ -221,12 +239,24 @@ func GetGPUInfo() GpuInfoList {
|
||||
gpuInfo.MinimumMemory = cudaMinimumMemory
|
||||
gpuInfo.DependencyPath = depPath
|
||||
gpuInfo.Name = C.GoString(&memInfo.gpu_name[0])
|
||||
gpuInfo.DriverMajor = driverMajor
|
||||
gpuInfo.DriverMinor = driverMinor
|
||||
gpuInfo.DriverMajor = int(driverMajor)
|
||||
gpuInfo.DriverMinor = int(driverMinor)
|
||||
|
||||
// TODO potentially sort on our own algorithm instead of what the underlying GPU library does...
|
||||
resp = append(resp, gpuInfo)
|
||||
}
|
||||
if gpuHandles.oneapi != nil {
|
||||
gpuInfo := GpuInfo{
|
||||
Library: "oneapi",
|
||||
}
|
||||
C.oneapi_check_vram(*gpuHandles.oneapi, &memInfo)
|
||||
var totalFreeMem float64 = float64(memInfo.free) * 0.95 // work-around: leave some reserve vram for mkl lib used in ggml-sycl backend.
|
||||
memInfo.free = C.uint64_t(totalFreeMem)
|
||||
gpuInfo.TotalMemory = uint64(memInfo.total)
|
||||
gpuInfo.FreeMemory = uint64(memInfo.free)
|
||||
gpuInfo.ID = strconv.Itoa(i)
|
||||
resp = append(resp, gpuInfo)
|
||||
}
|
||||
}
|
||||
|
||||
// Then AMD
|
||||
@@ -296,7 +326,6 @@ func FindGPULibs(baseLibName string, defaultPatterns []string) []string {
|
||||
// Nvidia PhysX known to return bogus results
|
||||
if strings.Contains(pattern, "PhysX") {
|
||||
slog.Debug("skipping PhysX cuda library path", "path", pattern)
|
||||
continue
|
||||
}
|
||||
// Ignore glob discovery errors
|
||||
matches, _ := filepath.Glob(pattern)
|
||||
|
||||
@@ -5,12 +5,11 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBasicGetGPUInfo(t *testing.T) {
|
||||
info := GetGPUInfo()
|
||||
assert.NotEmpty(t, len(info))
|
||||
assert.Greater(t, len(info), 0)
|
||||
assert.Contains(t, "cuda rocm cpu metal", info[0].Library)
|
||||
if info[0].Library != "cpu" {
|
||||
assert.Greater(t, info[0].TotalMemory, uint64(0))
|
||||
@@ -20,7 +19,7 @@ func TestBasicGetGPUInfo(t *testing.T) {
|
||||
|
||||
func TestCPUMemInfo(t *testing.T) {
|
||||
info, err := GetCPUMem()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
t.Skip("CPU memory not populated on darwin")
|
||||
|
||||
54
llm/ext_server/server.cpp
vendored
54
llm/ext_server/server.cpp
vendored
@@ -359,6 +359,7 @@ struct llama_server_context
|
||||
|
||||
// slots / clients
|
||||
std::vector<server_slot> slots;
|
||||
json default_generation_settings_for_props;
|
||||
|
||||
llama_server_queue queue_tasks;
|
||||
llama_server_response queue_results;
|
||||
@@ -482,6 +483,9 @@ struct llama_server_context
|
||||
slots.push_back(slot);
|
||||
}
|
||||
|
||||
default_generation_settings_for_props = get_formated_generation(slots.front());
|
||||
default_generation_settings_for_props["seed"] = -1;
|
||||
|
||||
batch = llama_batch_init(n_ctx, 0, params.n_parallel);
|
||||
}
|
||||
|
||||
@@ -580,7 +584,7 @@ struct llama_server_context
|
||||
slot->sparams.mirostat_eta = json_value(data, "mirostat_eta", default_sparams.mirostat_eta);
|
||||
slot->sparams.penalize_nl = json_value(data, "penalize_nl", default_sparams.penalize_nl);
|
||||
slot->params.n_keep = json_value(data, "n_keep", slot->params.n_keep);
|
||||
slot->sparams.seed = json_value(data, "seed", default_params.seed);
|
||||
slot->params.seed = json_value(data, "seed", default_params.seed);
|
||||
slot->sparams.grammar = json_value(data, "grammar", default_sparams.grammar);
|
||||
slot->sparams.n_probs = json_value(data, "n_probs", default_sparams.n_probs);
|
||||
slot->sparams.min_keep = json_value(data, "min_keep", default_sparams.min_keep);
|
||||
@@ -807,6 +811,7 @@ struct llama_server_context
|
||||
llama_sampling_free(slot->ctx_sampling);
|
||||
}
|
||||
slot->ctx_sampling = llama_sampling_init(slot->sparams);
|
||||
llama_set_rng_seed(ctx, slot->params.seed);
|
||||
slot->command = LOAD_PROMPT;
|
||||
|
||||
all_slots_are_idle = false;
|
||||
@@ -830,7 +835,7 @@ struct llama_server_context
|
||||
system_tokens.clear();
|
||||
|
||||
if (!system_prompt.empty()) {
|
||||
system_tokens = ::llama_tokenize(ctx, system_prompt, true);
|
||||
system_tokens = ::llama_tokenize(ctx, system_prompt, add_bos_token);
|
||||
|
||||
llama_batch_clear(batch);
|
||||
|
||||
@@ -1651,7 +1656,7 @@ struct llama_server_context
|
||||
slot.t_start_process_prompt = ggml_time_us();
|
||||
slot.t_start_genereration = 0;
|
||||
|
||||
prompt_tokens = tokenize(slot.prompt, system_prompt.empty()); // add BOS if there isn't system prompt
|
||||
prompt_tokens = tokenize(slot.prompt, system_prompt.empty() && add_bos_token); // add BOS if there isn't system prompt
|
||||
|
||||
slot.n_prompt_tokens = prompt_tokens.size();
|
||||
|
||||
@@ -2620,21 +2625,6 @@ static json format_partial_response(
|
||||
return res;
|
||||
}
|
||||
|
||||
static json format_tokenizer_response(const std::vector<llama_token> &tokens)
|
||||
{
|
||||
return json {
|
||||
{"tokens", tokens}
|
||||
};
|
||||
}
|
||||
|
||||
static json format_detokenized_response(std::string content)
|
||||
{
|
||||
return json {
|
||||
{"content", content}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
static void log_server_request(const httplib::Request &req, const httplib::Response &res)
|
||||
{
|
||||
// skip GH copilot requests when using default port
|
||||
@@ -3124,34 +3114,6 @@ int main(int argc, char **argv) {
|
||||
}
|
||||
});
|
||||
|
||||
svr.Post("/tokenize", [&llama](const httplib::Request &req, httplib::Response &res)
|
||||
{
|
||||
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
||||
const json body = json::parse(req.body);
|
||||
std::vector<llama_token> tokens;
|
||||
if (body.count("content") != 0)
|
||||
{
|
||||
tokens = llama.tokenize(body["content"], false);
|
||||
}
|
||||
const json data = format_tokenizer_response(tokens);
|
||||
return res.set_content(data.dump(), "application/json; charset=utf-8");
|
||||
});
|
||||
|
||||
svr.Post("/detokenize", [&llama](const httplib::Request &req, httplib::Response &res)
|
||||
{
|
||||
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
||||
const json body = json::parse(req.body);
|
||||
std::string content;
|
||||
if (body.count("tokens") != 0)
|
||||
{
|
||||
const std::vector<llama_token> tokens = body["tokens"];
|
||||
content = tokens_to_str(llama.ctx, tokens.cbegin(), tokens.cend());
|
||||
}
|
||||
|
||||
const json data = format_detokenized_response(content);
|
||||
return res.set_content(data.dump(), "application/json; charset=utf-8");
|
||||
});
|
||||
|
||||
svr.Post("/embedding", [&llama](const httplib::Request &req, httplib::Response &res)
|
||||
{
|
||||
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
||||
|
||||
@@ -211,7 +211,7 @@ if [ -z "${ONEAPI_ROOT}" ]; then
|
||||
ONEAPI_ROOT=/opt/intel/oneapi
|
||||
fi
|
||||
|
||||
if [ -z "${OLLAMA_SKIP_ONEAPI_GENERATE}" -a -d "${ONEAPI_ROOT}" ]; then
|
||||
if [ -d "${ONEAPI_ROOT}" ]; then
|
||||
echo "OneAPI libraries detected - building dynamic OneAPI library"
|
||||
init_vars
|
||||
source ${ONEAPI_ROOT}/setvars.sh --force # set up environment variables for oneAPI
|
||||
|
||||
@@ -290,7 +290,7 @@ function build_cuda() {
|
||||
}
|
||||
|
||||
function build_oneapi() {
|
||||
if ((-not "${env:OLLAMA_SKIP_ONEAPI_GENERATE}") -and ("${env:ONEAPI_ROOT}")) {
|
||||
if ((-not "${env:OLLAMA_SKIP_CUDA_GENERATE}") -and ("${env:ONEAPI_ROOT}")) {
|
||||
# Get oneAPI version
|
||||
$script:ONEAPI_VERSION = icpx --version
|
||||
$script:ONEAPI_VERSION = [regex]::Match($script:ONEAPI_VERSION, '(?<=oneAPI DPC\+\+/C\+\+ Compiler )(?<version>\d+\.\d+\.\d+)').Value
|
||||
|
||||
@@ -81,11 +81,6 @@ func (kv KV) ContextLength() uint64 {
|
||||
return kv.u64(fmt.Sprintf("%s.context_length", kv.Architecture()))
|
||||
}
|
||||
|
||||
func (kv KV) ChatTemplate() string {
|
||||
s, _ := kv["tokenizer.chat_template"].(string)
|
||||
return s
|
||||
}
|
||||
|
||||
type Tensors []*Tensor
|
||||
|
||||
func (ts Tensors) Layers() map[string]Layer {
|
||||
|
||||
26
llm/gguf.go
26
llm/gguf.go
@@ -592,8 +592,8 @@ func (llm *gguf) Encode(ws io.WriteSeeker, kv KV, tensors []Tensor) error {
|
||||
return err
|
||||
}
|
||||
|
||||
var dims int
|
||||
for cnt := range len(tensor.Shape) {
|
||||
dims := 0
|
||||
for cnt := 0; cnt < len(tensor.Shape); cnt++ {
|
||||
if tensor.Shape[cnt] > 0 {
|
||||
dims++
|
||||
}
|
||||
@@ -603,8 +603,8 @@ func (llm *gguf) Encode(ws io.WriteSeeker, kv KV, tensors []Tensor) error {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := range dims {
|
||||
if err := binary.Write(ws, llm.ByteOrder, tensor.Shape[dims-1-i]); err != nil {
|
||||
for i := 0; i < dims; i++ {
|
||||
if err := binary.Write(ws, llm.ByteOrder, uint64(tensor.Shape[dims-1-i])); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -618,8 +618,22 @@ func (llm *gguf) Encode(ws io.WriteSeeker, kv KV, tensors []Tensor) error {
|
||||
}
|
||||
}
|
||||
|
||||
offset, err := ws.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var alignment int64 = 32
|
||||
padding := llm.padding(offset, alignment)
|
||||
if err := binary.Write(ws, llm.ByteOrder, bytes.Repeat([]byte{0}, int(padding))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, tensor := range tensors {
|
||||
if _, err := tensor.WriteTo(ws); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
offset, err := ws.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -629,10 +643,6 @@ func (llm *gguf) Encode(ws io.WriteSeeker, kv KV, tensors []Tensor) error {
|
||||
if err := binary.Write(ws, llm.ByteOrder, bytes.Repeat([]byte{0}, int(padding))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tensor.WriteTo(ws); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
Submodule llm/llama.cpp updated: 5921b8f089...74f33adf5f
60
llm/llm.go
60
llm/llm.go
@@ -12,6 +12,7 @@ package llm
|
||||
import "C"
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
@@ -37,3 +38,62 @@ func Quantize(infile, outfile string, ftype fileType) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type llamaModel struct {
|
||||
m *C.struct_llama_model
|
||||
}
|
||||
|
||||
func newLlamaModel(p string) *llamaModel {
|
||||
cs := C.CString(p)
|
||||
defer C.free(unsafe.Pointer(cs))
|
||||
|
||||
params := C.llama_model_default_params()
|
||||
params.vocab_only = true
|
||||
|
||||
return &llamaModel{
|
||||
C.llama_load_model_from_file(cs, params),
|
||||
}
|
||||
}
|
||||
|
||||
func (llm *llamaModel) Close() {
|
||||
C.llama_free_model(llm.m)
|
||||
}
|
||||
|
||||
func (llm *llamaModel) Tokenize(s string) []int {
|
||||
cs := C.CString(s)
|
||||
defer C.free(unsafe.Pointer(cs))
|
||||
|
||||
ltokens := make([]C.llama_token, len(s)+2)
|
||||
n := C.llama_tokenize(
|
||||
llm.m,
|
||||
cs,
|
||||
C.int32_t(len(s)),
|
||||
<okens[0],
|
||||
C.int32_t(len(ltokens)),
|
||||
false,
|
||||
true,
|
||||
)
|
||||
|
||||
if n < 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
tokens := make([]int, n)
|
||||
for i := 0; i < int(n); i++ {
|
||||
tokens[i] = int(ltokens[i])
|
||||
}
|
||||
|
||||
return tokens
|
||||
}
|
||||
|
||||
func (llm *llamaModel) Detokenize(i32s []int) string {
|
||||
var sb strings.Builder
|
||||
for _, i32 := range i32s {
|
||||
c := make([]byte, 512)
|
||||
if n := C.llama_token_to_piece(llm.m, C.llama_token(i32), (*C.char)(unsafe.Pointer(&c[0])), C.int(len(c)), false); n > 0 {
|
||||
sb.WriteString(unsafe.String(&c[0], n))
|
||||
}
|
||||
}
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
@@ -5,9 +5,9 @@ import (
|
||||
"log/slog"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
"github.com/ollama/ollama/format"
|
||||
"github.com/ollama/ollama/gpu"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
)
|
||||
|
||||
// This algorithm looks for a complete fit to determine if we need to unload other models
|
||||
@@ -103,7 +103,7 @@ func EstimateGPULayers(gpus []gpu.GpuInfo, ggml *GGML, projectors []string, opts
|
||||
}
|
||||
|
||||
var layerCount int
|
||||
for i := range int(ggml.KV().BlockCount()) {
|
||||
for i := 0; i < int(ggml.KV().BlockCount()); i++ {
|
||||
if blk, ok := layers[fmt.Sprintf("blk.%d", i)]; ok {
|
||||
memoryLayer := blk.size()
|
||||
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
diff --git a/llama.cpp b/llama.cpp
|
||||
index 40d2ec2c..f34eb79a 100644
|
||||
--- a/llama.cpp
|
||||
+++ b/llama.cpp
|
||||
@@ -6943,7 +6943,7 @@ static struct ggml_tensor * llm_build_kqv(
|
||||
struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
|
||||
cb(kq, "kq", il);
|
||||
|
||||
- if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX) {
|
||||
+ if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX || model.arch == LLM_ARCH_QWEN2) {
|
||||
// for this arch, we need to perform the KQ multiplication with F32 precision, otherwise we get NaNs
|
||||
// ref: https://github.com/ggerganov/llama.cpp/pull/4490#issuecomment-1859055847
|
||||
ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
|
||||
@@ -10,9 +10,9 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/exp/slices"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/ollama/ollama/gpu"
|
||||
|
||||
133
llm/server.go
133
llm/server.go
@@ -57,6 +57,8 @@ type llmServer struct {
|
||||
loadDuration time.Duration // Record how long it took the model to load
|
||||
loadProgress float32
|
||||
|
||||
*llamaModel
|
||||
|
||||
sem *semaphore.Weighted
|
||||
}
|
||||
|
||||
@@ -85,6 +87,7 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
||||
var systemMemory uint64
|
||||
gpuCount := len(gpus)
|
||||
if (len(gpus) == 1 && gpus[0].Library == "cpu") || opts.NumGPU == 0 {
|
||||
|
||||
// TODO evaluate system memory to see if we should block the load, or force an unload of another CPU runner
|
||||
|
||||
cpuRunner = serverForCpu()
|
||||
@@ -103,22 +106,21 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
||||
var layers int
|
||||
layers, estimatedVRAM, estimatedTotal = EstimateGPULayers(gpus, ggml, projectors, opts)
|
||||
|
||||
switch {
|
||||
case gpus[0].Library == "metal" && estimatedVRAM > systemMemory:
|
||||
if gpus[0].Library == "metal" && estimatedVRAM > systemMemory {
|
||||
// disable partial offloading when model is greater than total system memory as this
|
||||
// can lead to locking up the system
|
||||
opts.NumGPU = 0
|
||||
case gpus[0].Library != "metal" && layers == 0:
|
||||
} else if gpus[0].Library != "metal" && layers == 0 {
|
||||
// Don't bother loading into the GPU if no layers can fit
|
||||
cpuRunner = serverForCpu()
|
||||
gpuCount = 0
|
||||
case opts.NumGPU < 0 && layers > 0 && gpus[0].Library != "cpu":
|
||||
} else if opts.NumGPU < 0 && layers > 0 && gpus[0].Library != "cpu" {
|
||||
opts.NumGPU = layers
|
||||
}
|
||||
}
|
||||
|
||||
// Loop through potential servers
|
||||
finalErr := errors.New("no suitable llama servers found")
|
||||
finalErr := fmt.Errorf("no suitable llama servers found")
|
||||
|
||||
if len(adapters) > 1 {
|
||||
return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
|
||||
@@ -232,7 +234,7 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
||||
|
||||
params = append(params, "--parallel", fmt.Sprintf("%d", numParallel))
|
||||
|
||||
for i := range len(servers) {
|
||||
for i := 0; i < len(servers); i++ {
|
||||
dir := availableServers[servers[i]]
|
||||
if dir == "" {
|
||||
// Shouldn't happen
|
||||
@@ -284,7 +286,7 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
||||
|
||||
server := filepath.Join(dir, "ollama_llama_server")
|
||||
if runtime.GOOS == "windows" {
|
||||
server += ".exe"
|
||||
server = server + ".exe"
|
||||
}
|
||||
|
||||
// Detect tmp cleaners wiping out the file
|
||||
@@ -309,13 +311,14 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
||||
totalLayers: ggml.KV().BlockCount() + 1,
|
||||
gpuCount: gpuCount,
|
||||
done: make(chan error, 1),
|
||||
llamaModel: newLlamaModel(model),
|
||||
}
|
||||
|
||||
s.cmd.Env = os.Environ()
|
||||
s.cmd.Stdout = os.Stdout
|
||||
s.cmd.Stderr = s.status
|
||||
|
||||
visibleDevicesEnv, visibleDevicesEnvVal := gpus.GetVisibleDevicesEnv()
|
||||
visibleDevicesEnv, visibleDevicesEnvVal := gpu.GpuInfoList(gpus).GetVisibleDevicesEnv()
|
||||
pathEnvVal := strings.Join(libraryPaths, string(filepath.ListSeparator))
|
||||
|
||||
// Update or add the path and visible devices variable with our adjusted version
|
||||
@@ -459,7 +462,7 @@ func (s *llmServer) getServerStatus(ctx context.Context) (ServerStatus, error) {
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return ServerStatusNotResponding, errors.New("server not responding")
|
||||
return ServerStatusNotResponding, fmt.Errorf("server not responding")
|
||||
}
|
||||
return ServerStatusError, fmt.Errorf("health resp: %w", err)
|
||||
}
|
||||
@@ -606,7 +609,7 @@ array ::=
|
||||
|
||||
string ::=
|
||||
"\"" (
|
||||
[^"\\\x7F\x00-\x1F] |
|
||||
[^"\\] |
|
||||
"\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
|
||||
)* "\"" ws
|
||||
|
||||
@@ -846,12 +849,12 @@ func (s *llmServer) Embedding(ctx context.Context, prompt string) ([]float64, er
|
||||
return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
|
||||
}
|
||||
|
||||
data, err := json.Marshal(TokenizeRequest{Content: prompt})
|
||||
if err != nil {
|
||||
var b bytes.Buffer
|
||||
if err := json.NewEncoder(&b).Encode(EmbeddingRequest{Content: prompt}); err != nil {
|
||||
return nil, fmt.Errorf("error marshaling embed data: %w", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), bytes.NewBuffer(data))
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/embedding", s.port), &b)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating embed request: %w", err)
|
||||
}
|
||||
@@ -881,108 +884,12 @@ func (s *llmServer) Embedding(ctx context.Context, prompt string) ([]float64, er
|
||||
return embedding.Embedding, nil
|
||||
}
|
||||
|
||||
type TokenizeRequest struct {
|
||||
Content string `json:"content"`
|
||||
}
|
||||
|
||||
type TokenizeResponse struct {
|
||||
Tokens []int `json:"tokens"`
|
||||
}
|
||||
|
||||
func (s *llmServer) Tokenize(ctx context.Context, content string) ([]int, error) {
|
||||
// Make sure the server is ready
|
||||
status, err := s.getServerStatus(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
|
||||
return nil, fmt.Errorf("unexpected server status: %s", status.ToString())
|
||||
}
|
||||
|
||||
data, err := json.Marshal(TokenizeRequest{Content: content})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshaling encode data: %w", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/tokenize", s.port), bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("encode request: %w", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("do encode request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read encode request: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
log.Printf("llm encode error: %s", body)
|
||||
return nil, fmt.Errorf("%s", body)
|
||||
}
|
||||
|
||||
var encoded TokenizeResponse
|
||||
if err := json.Unmarshal(body, &encoded); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal encode response: %w", err)
|
||||
}
|
||||
|
||||
return encoded.Tokens, nil
|
||||
}
|
||||
|
||||
type DetokenizeRequest struct {
|
||||
Tokens []int `json:"tokens"`
|
||||
}
|
||||
|
||||
type DetokenizeResponse struct {
|
||||
Content string `json:"content"`
|
||||
return s.llamaModel.Tokenize(content), nil
|
||||
}
|
||||
|
||||
func (s *llmServer) Detokenize(ctx context.Context, tokens []int) (string, error) {
|
||||
// Make sure the server is ready
|
||||
status, err := s.getServerStatus(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else if status != ServerStatusReady && status != ServerStatusNoSlotsAvailable {
|
||||
return "", fmt.Errorf("unexpected server status: %s", status.ToString())
|
||||
}
|
||||
|
||||
data, err := json.Marshal(DetokenizeRequest{Tokens: tokens})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("marshaling decode data: %w", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/detokenize", s.port), bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("decode request: %w", err)
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("do decode request: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("read decode request: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
log.Printf("llm decode error: %s", body)
|
||||
return "", fmt.Errorf("%s", body)
|
||||
}
|
||||
|
||||
var decoded DetokenizeResponse
|
||||
if err := json.Unmarshal(body, &decoded); err != nil {
|
||||
return "", fmt.Errorf("unmarshal encode response: %w", err)
|
||||
}
|
||||
|
||||
return decoded.Content, nil
|
||||
return s.llamaModel.Detokenize(tokens), nil
|
||||
}
|
||||
|
||||
func (s *llmServer) Close() error {
|
||||
@@ -1000,6 +907,10 @@ func (s *llmServer) Close() error {
|
||||
slog.Debug("llama server stopped")
|
||||
}
|
||||
|
||||
if s.llamaModel != nil {
|
||||
s.llamaModel.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -245,6 +245,7 @@ func (w *writer) writeResponse(data []byte) (int, error) {
|
||||
d, err := json.Marshal(toChunk(w.id, chatResponse))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
||||
}
|
||||
|
||||
w.ResponseWriter.Header().Set("Content-Type", "text/event-stream")
|
||||
|
||||
@@ -8,9 +8,7 @@ import (
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/encoding/unicode"
|
||||
"golang.org/x/text/transform"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
type File struct {
|
||||
@@ -71,11 +69,14 @@ func ParseFile(r io.Reader) (*File, error) {
|
||||
var b bytes.Buffer
|
||||
var role string
|
||||
|
||||
var lineCount int
|
||||
var linePos int
|
||||
|
||||
var utf16 bool
|
||||
|
||||
var f File
|
||||
|
||||
tr := unicode.BOMOverride(unicode.UTF8.NewDecoder())
|
||||
br := bufio.NewReader(transform.NewReader(r, tr))
|
||||
|
||||
br := bufio.NewReader(r)
|
||||
for {
|
||||
r, _, err := br.ReadRune()
|
||||
if errors.Is(err, io.EOF) {
|
||||
@@ -84,6 +85,17 @@ func ParseFile(r io.Reader) (*File, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// the utf16 byte order mark will be read as "unreadable" by ReadRune()
|
||||
if isUnreadable(r) && lineCount == 0 && linePos == 0 {
|
||||
utf16 = true
|
||||
continue
|
||||
}
|
||||
|
||||
// skip the second byte if we're reading utf16
|
||||
if utf16 && r == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
next, r, err := parseRuneForState(r, curr)
|
||||
if errors.Is(err, io.ErrUnexpectedEOF) {
|
||||
return nil, fmt.Errorf("%w: %s", err, b.String())
|
||||
@@ -91,6 +103,13 @@ func ParseFile(r io.Reader) (*File, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if isNewline(r) {
|
||||
lineCount++
|
||||
linePos = 0
|
||||
} else {
|
||||
linePos++
|
||||
}
|
||||
|
||||
// process the state transition, some transitions need to be intercepted and redirected
|
||||
if next != curr {
|
||||
switch curr {
|
||||
@@ -290,6 +309,10 @@ func isNewline(r rune) bool {
|
||||
return r == '\r' || r == '\n'
|
||||
}
|
||||
|
||||
func isUnreadable(r rune) bool {
|
||||
return r == unicode.ReplacementChar
|
||||
}
|
||||
|
||||
func isValidMessageRole(role string) bool {
|
||||
return role == "system" || role == "user" || role == "assistant"
|
||||
}
|
||||
|
||||
@@ -10,9 +10,6 @@ import (
|
||||
"unicode/utf16"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/text/encoding"
|
||||
"golang.org/x/text/encoding/unicode"
|
||||
)
|
||||
|
||||
func TestParseFileFile(t *testing.T) {
|
||||
@@ -28,7 +25,7 @@ TEMPLATE template1
|
||||
reader := strings.NewReader(input)
|
||||
|
||||
modelfile, err := ParseFile(reader)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
expectedCommands := []Command{
|
||||
{Name: "model", Args: "model1"},
|
||||
@@ -91,7 +88,7 @@ func TestParseFileFrom(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
t.Run("", func(t *testing.T) {
|
||||
modelfile, err := ParseFile(strings.NewReader(c.input))
|
||||
require.ErrorIs(t, err, c.err)
|
||||
assert.ErrorIs(t, err, c.err)
|
||||
if modelfile != nil {
|
||||
assert.Equal(t, c.expected, modelfile.Commands)
|
||||
}
|
||||
@@ -108,7 +105,7 @@ PARAMETER param1
|
||||
reader := strings.NewReader(input)
|
||||
|
||||
_, err := ParseFile(reader)
|
||||
require.ErrorIs(t, err, io.ErrUnexpectedEOF)
|
||||
assert.ErrorIs(t, err, io.ErrUnexpectedEOF)
|
||||
}
|
||||
|
||||
func TestParseFileBadCommand(t *testing.T) {
|
||||
@@ -117,7 +114,8 @@ FROM foo
|
||||
BADCOMMAND param1 value1
|
||||
`
|
||||
_, err := ParseFile(strings.NewReader(input))
|
||||
require.ErrorIs(t, err, errInvalidCommand)
|
||||
assert.ErrorIs(t, err, errInvalidCommand)
|
||||
|
||||
}
|
||||
|
||||
func TestParseFileMessages(t *testing.T) {
|
||||
@@ -203,7 +201,7 @@ MESSAGE system`,
|
||||
for _, c := range cases {
|
||||
t.Run("", func(t *testing.T) {
|
||||
modelfile, err := ParseFile(strings.NewReader(c.input))
|
||||
require.ErrorIs(t, err, c.err)
|
||||
assert.ErrorIs(t, err, c.err)
|
||||
if modelfile != nil {
|
||||
assert.Equal(t, c.expected, modelfile.Commands)
|
||||
}
|
||||
@@ -357,7 +355,7 @@ TEMPLATE """
|
||||
for _, c := range cases {
|
||||
t.Run("", func(t *testing.T) {
|
||||
modelfile, err := ParseFile(strings.NewReader(c.multiline))
|
||||
require.ErrorIs(t, err, c.err)
|
||||
assert.ErrorIs(t, err, c.err)
|
||||
if modelfile != nil {
|
||||
assert.Equal(t, c.expected, modelfile.Commands)
|
||||
}
|
||||
@@ -415,7 +413,7 @@ func TestParseFileParameters(t *testing.T) {
|
||||
fmt.Fprintln(&b, "FROM foo")
|
||||
fmt.Fprintln(&b, "PARAMETER", k)
|
||||
modelfile, err := ParseFile(&b)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, []Command{
|
||||
{Name: "model", Args: "foo"},
|
||||
@@ -444,7 +442,7 @@ FROM foo
|
||||
for _, c := range cases {
|
||||
t.Run("", func(t *testing.T) {
|
||||
modelfile, err := ParseFile(strings.NewReader(c.input))
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, c.expected, modelfile.Commands)
|
||||
})
|
||||
}
|
||||
@@ -503,14 +501,15 @@ SYSTEM ""
|
||||
for _, c := range cases {
|
||||
t.Run("", func(t *testing.T) {
|
||||
modelfile, err := ParseFile(strings.NewReader(c))
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
modelfile2, err := ParseFile(strings.NewReader(modelfile.String()))
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, modelfile, modelfile2)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestParseFileUTF16ParseFile(t *testing.T) {
|
||||
@@ -519,6 +518,14 @@ PARAMETER param1 1
|
||||
PARAMETER param2 4096
|
||||
SYSTEM You are a utf16 file.
|
||||
`
|
||||
// simulate a utf16 le file
|
||||
utf16File := utf16.Encode(append([]rune{'\ufffe'}, []rune(data)...))
|
||||
buf := new(bytes.Buffer)
|
||||
err := binary.Write(buf, binary.LittleEndian, utf16File)
|
||||
assert.NoError(t, err)
|
||||
|
||||
actual, err := ParseFile(buf)
|
||||
assert.NoError(t, err)
|
||||
|
||||
expected := []Command{
|
||||
{Name: "model", Args: "bob"},
|
||||
@@ -527,52 +534,14 @@ SYSTEM You are a utf16 file.
|
||||
{Name: "system", Args: "You are a utf16 file."},
|
||||
}
|
||||
|
||||
t.Run("le", func(t *testing.T) {
|
||||
var b bytes.Buffer
|
||||
require.NoError(t, binary.Write(&b, binary.LittleEndian, []byte{0xff, 0xfe}))
|
||||
require.NoError(t, binary.Write(&b, binary.LittleEndian, utf16.Encode([]rune(data))))
|
||||
assert.Equal(t, expected, actual.Commands)
|
||||
|
||||
actual, err := ParseFile(&b)
|
||||
require.NoError(t, err)
|
||||
// simulate a utf16 be file
|
||||
buf = new(bytes.Buffer)
|
||||
err = binary.Write(buf, binary.BigEndian, utf16File)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, expected, actual.Commands)
|
||||
})
|
||||
|
||||
t.Run("be", func(t *testing.T) {
|
||||
var b bytes.Buffer
|
||||
require.NoError(t, binary.Write(&b, binary.BigEndian, []byte{0xfe, 0xff}))
|
||||
require.NoError(t, binary.Write(&b, binary.BigEndian, utf16.Encode([]rune(data))))
|
||||
|
||||
actual, err := ParseFile(&b)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expected, actual.Commands)
|
||||
})
|
||||
}
|
||||
|
||||
func TestParseMultiByte(t *testing.T) {
|
||||
input := `FROM test
|
||||
SYSTEM 你好👋`
|
||||
|
||||
expect := []Command{
|
||||
{Name: "model", Args: "test"},
|
||||
{Name: "system", Args: "你好👋"},
|
||||
}
|
||||
|
||||
encodings := []encoding.Encoding{
|
||||
unicode.UTF8,
|
||||
unicode.UTF16(unicode.LittleEndian, unicode.UseBOM),
|
||||
unicode.UTF16(unicode.BigEndian, unicode.UseBOM),
|
||||
}
|
||||
|
||||
for _, encoding := range encodings {
|
||||
t.Run(fmt.Sprintf("%s", encoding), func(t *testing.T) {
|
||||
s, err := encoding.NewEncoder().String(input)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual, err := ParseFile(strings.NewReader(s))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, expect, actual.Commands)
|
||||
})
|
||||
}
|
||||
actual, err = ParseFile(buf)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expected, actual.Commands)
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ func (p *Progress) StopAndClear() bool {
|
||||
stopped := p.stop()
|
||||
if stopped {
|
||||
// clear all progress lines
|
||||
for i := range p.pos {
|
||||
for i := 0; i < p.pos; i++ {
|
||||
if i > 0 {
|
||||
fmt.Fprint(p.w, "\033[A")
|
||||
}
|
||||
@@ -85,7 +85,7 @@ func (p *Progress) render() {
|
||||
defer fmt.Fprint(p.w, "\033[?25h")
|
||||
|
||||
// clear already rendered progress lines
|
||||
for i := range p.pos {
|
||||
for i := 0; i < p.pos; i++ {
|
||||
if i > 0 {
|
||||
fmt.Fprint(p.w, "\033[A")
|
||||
}
|
||||
|
||||
@@ -52,6 +52,7 @@ func (b *Buffer) GetLineSpacing(line int) bool {
|
||||
}
|
||||
|
||||
return hasSpace.(bool)
|
||||
|
||||
}
|
||||
|
||||
func (b *Buffer) MoveLeft() {
|
||||
@@ -116,12 +117,15 @@ func (b *Buffer) MoveRight() {
|
||||
|
||||
if b.DisplayPos%b.LineWidth == 0 {
|
||||
fmt.Printf(CursorDown + CursorBOL + cursorRightN(len(b.Prompt.prompt())))
|
||||
|
||||
} else if (b.DisplayPos-rLength)%b.LineWidth == b.LineWidth-1 && hasSpace {
|
||||
fmt.Printf(CursorDown + CursorBOL + cursorRightN(len(b.Prompt.prompt())+rLength))
|
||||
b.DisplayPos += 1
|
||||
|
||||
} else if b.LineHasSpace.Size() > 0 && b.DisplayPos%b.LineWidth == b.LineWidth-1 && hasSpace {
|
||||
fmt.Printf(CursorDown + CursorBOL + cursorRightN(len(b.Prompt.prompt())))
|
||||
b.DisplayPos += 1
|
||||
|
||||
} else {
|
||||
fmt.Print(cursorRightN(rLength))
|
||||
}
|
||||
@@ -150,7 +154,7 @@ func (b *Buffer) MoveToStart() {
|
||||
if b.Pos > 0 {
|
||||
currLine := b.DisplayPos / b.LineWidth
|
||||
if currLine > 0 {
|
||||
for range currLine {
|
||||
for cnt := 0; cnt < currLine; cnt++ {
|
||||
fmt.Print(CursorUp)
|
||||
}
|
||||
}
|
||||
@@ -165,7 +169,7 @@ func (b *Buffer) MoveToEnd() {
|
||||
currLine := b.DisplayPos / b.LineWidth
|
||||
totalLines := b.DisplaySize() / b.LineWidth
|
||||
if currLine < totalLines {
|
||||
for range totalLines - currLine {
|
||||
for cnt := 0; cnt < totalLines-currLine; cnt++ {
|
||||
fmt.Print(CursorDown)
|
||||
}
|
||||
remainder := b.DisplaySize() % b.LineWidth
|
||||
@@ -181,7 +185,7 @@ func (b *Buffer) MoveToEnd() {
|
||||
|
||||
func (b *Buffer) DisplaySize() int {
|
||||
sum := 0
|
||||
for i := range b.Buf.Size() {
|
||||
for i := 0; i < b.Buf.Size(); i++ {
|
||||
if e, ok := b.Buf.Get(i); ok {
|
||||
if r, ok := e.(rune); ok {
|
||||
sum += runewidth.RuneWidth(r)
|
||||
@@ -193,6 +197,7 @@ func (b *Buffer) DisplaySize() int {
|
||||
}
|
||||
|
||||
func (b *Buffer) Add(r rune) {
|
||||
|
||||
if b.Pos == b.Buf.Size() {
|
||||
b.AddChar(r, false)
|
||||
} else {
|
||||
@@ -205,6 +210,7 @@ func (b *Buffer) AddChar(r rune, insert bool) {
|
||||
b.DisplayPos += rLength
|
||||
|
||||
if b.Pos > 0 {
|
||||
|
||||
if b.DisplayPos%b.LineWidth == 0 {
|
||||
fmt.Printf("%c", r)
|
||||
fmt.Printf("\n%s", b.Prompt.AltPrompt)
|
||||
@@ -229,6 +235,7 @@ func (b *Buffer) AddChar(r rune, insert bool) {
|
||||
} else {
|
||||
b.LineHasSpace.Add(true)
|
||||
}
|
||||
|
||||
} else {
|
||||
fmt.Printf("%c", r)
|
||||
}
|
||||
@@ -349,6 +356,7 @@ func (b *Buffer) drawRemaining() {
|
||||
|
||||
func (b *Buffer) Remove() {
|
||||
if b.Buf.Size() > 0 && b.Pos > 0 {
|
||||
|
||||
if e, ok := b.Buf.Get(b.Pos - 1); ok {
|
||||
if r, ok := e.(rune); ok {
|
||||
rLength := runewidth.RuneWidth(r)
|
||||
@@ -374,6 +382,7 @@ func (b *Buffer) Remove() {
|
||||
} else {
|
||||
fmt.Print(" " + CursorLeft)
|
||||
}
|
||||
|
||||
} else if (b.DisplayPos-rLength)%b.LineWidth == 0 && hasSpace {
|
||||
fmt.Printf(CursorBOL + ClearToEOL)
|
||||
fmt.Printf(CursorUp + CursorBOL + cursorRightN(b.Width))
|
||||
@@ -382,9 +391,10 @@ func (b *Buffer) Remove() {
|
||||
b.LineHasSpace.Remove(b.DisplayPos/b.LineWidth - 1)
|
||||
}
|
||||
b.DisplayPos -= 1
|
||||
|
||||
} else {
|
||||
fmt.Print(cursorLeftN(rLength))
|
||||
for range rLength {
|
||||
for i := 0; i < rLength; i++ {
|
||||
fmt.Print(" ")
|
||||
}
|
||||
fmt.Print(cursorLeftN(rLength))
|
||||
@@ -441,7 +451,7 @@ func (b *Buffer) DeleteBefore() {
|
||||
func (b *Buffer) DeleteRemaining() {
|
||||
if b.DisplaySize() > 0 && b.Pos < b.DisplaySize() {
|
||||
charsToDel := b.Buf.Size() - b.Pos
|
||||
for range charsToDel {
|
||||
for cnt := 0; cnt < charsToDel; cnt++ {
|
||||
b.Delete()
|
||||
}
|
||||
}
|
||||
@@ -485,7 +495,7 @@ func (b *Buffer) ClearScreen() {
|
||||
if currPos > 0 {
|
||||
targetLine := currPos / b.LineWidth
|
||||
if targetLine > 0 {
|
||||
for range targetLine {
|
||||
for cnt := 0; cnt < targetLine; cnt++ {
|
||||
fmt.Print(CursorDown)
|
||||
}
|
||||
}
|
||||
@@ -515,7 +525,7 @@ func (b *Buffer) Replace(r []rune) {
|
||||
|
||||
fmt.Printf(CursorBOL + ClearToEOL)
|
||||
|
||||
for range lineNums {
|
||||
for i := 0; i < lineNums; i++ {
|
||||
fmt.Print(CursorUp + CursorBOL + ClearToEOL)
|
||||
}
|
||||
|
||||
|
||||
@@ -91,7 +91,7 @@ func (h *History) Add(l []rune) {
|
||||
func (h *History) Compact() {
|
||||
s := h.Buf.Size()
|
||||
if s > h.Limit {
|
||||
for range s - h.Limit {
|
||||
for cnt := 0; cnt < s-h.Limit; cnt++ {
|
||||
h.Buf.Remove(0)
|
||||
}
|
||||
}
|
||||
@@ -139,7 +139,7 @@ func (h *History) Save() error {
|
||||
defer f.Close()
|
||||
|
||||
buf := bufio.NewWriter(f)
|
||||
for cnt := range h.Size() {
|
||||
for cnt := 0; cnt < h.Size(); cnt++ {
|
||||
v, _ := h.Buf.Get(cnt)
|
||||
line, _ := v.([]rune)
|
||||
if _, err := buf.WriteString(string(line) + "\n"); err != nil {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
type Prompt struct {
|
||||
@@ -62,7 +63,7 @@ func New(prompt Prompt) (*Instance, error) {
|
||||
|
||||
func (i *Instance) Readline() (string, error) {
|
||||
if !i.Terminal.rawmode {
|
||||
fd := os.Stdin.Fd()
|
||||
fd := int(syscall.Stdin)
|
||||
termios, err := SetRawMode(fd)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -79,8 +80,8 @@ func (i *Instance) Readline() (string, error) {
|
||||
fmt.Print(prompt)
|
||||
|
||||
defer func() {
|
||||
fd := os.Stdin.Fd()
|
||||
//nolint:errcheck
|
||||
fd := int(syscall.Stdin)
|
||||
// nolint: errcheck
|
||||
UnsetRawMode(fd, i.Terminal.termios)
|
||||
i.Terminal.rawmode = false
|
||||
}()
|
||||
@@ -135,7 +136,7 @@ func (i *Instance) Readline() (string, error) {
|
||||
buf.MoveRight()
|
||||
case CharBracketedPaste:
|
||||
var code string
|
||||
for range 3 {
|
||||
for cnt := 0; cnt < 3; cnt++ {
|
||||
r, err = i.Terminal.Read()
|
||||
if err != nil {
|
||||
return "", io.EOF
|
||||
@@ -197,7 +198,7 @@ func (i *Instance) Readline() (string, error) {
|
||||
buf.Remove()
|
||||
case CharTab:
|
||||
// todo: convert back to real tabs
|
||||
for range 8 {
|
||||
for cnt := 0; cnt < 8; cnt++ {
|
||||
buf.Add(' ')
|
||||
}
|
||||
case CharDelete:
|
||||
@@ -215,7 +216,7 @@ func (i *Instance) Readline() (string, error) {
|
||||
case CharCtrlW:
|
||||
buf.DeleteWord()
|
||||
case CharCtrlZ:
|
||||
fd := os.Stdin.Fd()
|
||||
fd := int(syscall.Stdin)
|
||||
return handleCharCtrlZ(fd, i.Terminal.termios)
|
||||
case CharEnter, CharCtrlJ:
|
||||
output := buf.String()
|
||||
@@ -247,7 +248,7 @@ func (i *Instance) HistoryDisable() {
|
||||
}
|
||||
|
||||
func NewTerminal() (*Terminal, error) {
|
||||
fd := os.Stdin.Fd()
|
||||
fd := int(syscall.Stdin)
|
||||
termios, err := SetRawMode(fd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func handleCharCtrlZ(fd uintptr, termios any) (string, error) {
|
||||
func handleCharCtrlZ(fd int, termios any) (string, error) {
|
||||
t := termios.(*Termios)
|
||||
if err := UnsetRawMode(fd, t); err != nil {
|
||||
return "", err
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
package readline
|
||||
|
||||
func handleCharCtrlZ(fd uintptr, state any) (string, error) {
|
||||
func handleCharCtrlZ(fd int, state any) (string, error) {
|
||||
// not supported
|
||||
return "", nil
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
|
||||
type Termios syscall.Termios
|
||||
|
||||
func SetRawMode(fd uintptr) (*Termios, error) {
|
||||
func SetRawMode(fd int) (*Termios, error) {
|
||||
termios, err := getTermios(fd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -25,13 +25,13 @@ func SetRawMode(fd uintptr) (*Termios, error) {
|
||||
return termios, setTermios(fd, &newTermios)
|
||||
}
|
||||
|
||||
func UnsetRawMode(fd uintptr, termios any) error {
|
||||
func UnsetRawMode(fd int, termios any) error {
|
||||
t := termios.(*Termios)
|
||||
return setTermios(fd, t)
|
||||
}
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
func IsTerminal(fd int) bool {
|
||||
_, err := getTermios(fd)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
@@ -7,17 +7,17 @@ import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func getTermios(fd uintptr) (*Termios, error) {
|
||||
func getTermios(fd int) (*Termios, error) {
|
||||
termios := new(Termios)
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, syscall.TIOCGETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0)
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TIOCGETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0)
|
||||
if err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
return termios, nil
|
||||
}
|
||||
|
||||
func setTermios(fd uintptr, termios *Termios) error {
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, syscall.TIOCSETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0)
|
||||
func setTermios(fd int, termios *Termios) error {
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TIOCSETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0)
|
||||
if err != 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -10,17 +10,17 @@ import (
|
||||
const tcgets = 0x5401
|
||||
const tcsets = 0x5402
|
||||
|
||||
func getTermios(fd uintptr) (*Termios, error) {
|
||||
func getTermios(fd int) (*Termios, error) {
|
||||
termios := new(Termios)
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, tcgets, uintptr(unsafe.Pointer(termios)), 0, 0, 0)
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), tcgets, uintptr(unsafe.Pointer(termios)), 0, 0, 0)
|
||||
if err != 0 {
|
||||
return nil, err
|
||||
}
|
||||
return termios, nil
|
||||
}
|
||||
|
||||
func setTermios(fd uintptr, termios *Termios) error {
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, tcsets, uintptr(unsafe.Pointer(termios)), 0, 0, 0)
|
||||
func setTermios(fd int, termios *Termios) error {
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), tcsets, uintptr(unsafe.Pointer(termios)), 0, 0, 0)
|
||||
if err != 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -9,13 +9,13 @@ type State struct {
|
||||
}
|
||||
|
||||
// IsTerminal checks if the given file descriptor is associated with a terminal
|
||||
func IsTerminal(fd uintptr) bool {
|
||||
func IsTerminal(fd int) bool {
|
||||
var st uint32
|
||||
err := windows.GetConsoleMode(windows.Handle(fd), &st)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func SetRawMode(fd uintptr) (*State, error) {
|
||||
func SetRawMode(fd int) (*State, error) {
|
||||
var st uint32
|
||||
if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil {
|
||||
return nil, err
|
||||
@@ -32,7 +32,7 @@ func SetRawMode(fd uintptr) (*State, error) {
|
||||
return &State{st}, nil
|
||||
}
|
||||
|
||||
func UnsetRawMode(fd uintptr, state any) error {
|
||||
func UnsetRawMode(fd int, state any) error {
|
||||
s := state.(*State)
|
||||
return windows.SetConsoleMode(windows.Handle(fd), s.mode)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package server
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
@@ -15,6 +14,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/sha256-simd"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/auth"
|
||||
)
|
||||
|
||||
@@ -340,17 +340,17 @@ type downloadOpts struct {
|
||||
}
|
||||
|
||||
// downloadBlob downloads a blob from the registry and stores it in the blobs directory
|
||||
func downloadBlob(ctx context.Context, opts downloadOpts) (cacheHit bool, _ error) {
|
||||
func downloadBlob(ctx context.Context, opts downloadOpts) error {
|
||||
fp, err := GetBlobsPath(opts.digest)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
|
||||
fi, err := os.Stat(fp)
|
||||
switch {
|
||||
case errors.Is(err, os.ErrNotExist):
|
||||
case err != nil:
|
||||
return false, err
|
||||
return err
|
||||
default:
|
||||
opts.fn(api.ProgressResponse{
|
||||
Status: fmt.Sprintf("pulling %s", opts.digest[7:19]),
|
||||
@@ -359,7 +359,7 @@ func downloadBlob(ctx context.Context, opts downloadOpts) (cacheHit bool, _ erro
|
||||
Completed: fi.Size(),
|
||||
})
|
||||
|
||||
return true, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
data, ok := blobDownloadManager.LoadOrStore(opts.digest, &blobDownload{Name: fp, Digest: opts.digest})
|
||||
@@ -369,12 +369,12 @@ func downloadBlob(ctx context.Context, opts downloadOpts) (cacheHit bool, _ erro
|
||||
requestURL = requestURL.JoinPath("v2", opts.mp.GetNamespaceRepository(), "blobs", opts.digest)
|
||||
if err := download.Prepare(ctx, requestURL, opts.regOpts); err != nil {
|
||||
blobDownloadManager.Delete(opts.digest)
|
||||
return false, err
|
||||
return err
|
||||
}
|
||||
|
||||
//nolint:contextcheck
|
||||
// nolint: contextcheck
|
||||
go download.Run(context.Background(), requestURL, opts.regOpts)
|
||||
}
|
||||
|
||||
return false, download.Wait(ctx, opts.fn)
|
||||
return download.Wait(ctx, opts.fn)
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
@@ -18,10 +17,13 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/sha256-simd"
|
||||
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/auth"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
@@ -314,7 +316,7 @@ func realpath(rel, from string) string {
|
||||
return abspath
|
||||
}
|
||||
|
||||
func CreateModel(ctx context.Context, name model.Name, modelFileDir, quantization string, modelfile *parser.File, fn func(resp api.ProgressResponse)) (err error) {
|
||||
func CreateModel(ctx context.Context, name, modelFileDir, quantization string, modelfile *parser.File, fn func(resp api.ProgressResponse)) (err error) {
|
||||
config := ConfigV2{
|
||||
OS: "linux",
|
||||
Architecture: "amd64",
|
||||
@@ -332,7 +334,7 @@ func CreateModel(ctx context.Context, name model.Name, modelFileDir, quantizatio
|
||||
|
||||
switch c.Name {
|
||||
case "model", "adapter":
|
||||
var baseLayers []*layerGGML
|
||||
var baseLayers []*layerWithGGML
|
||||
if name := model.ParseName(c.Args); name.IsValid() {
|
||||
baseLayers, err = parseFromModel(ctx, name, fn)
|
||||
if err != nil {
|
||||
@@ -439,27 +441,19 @@ func CreateModel(ctx context.Context, name model.Name, modelFileDir, quantizatio
|
||||
layers = append(layers, baseLayer.Layer)
|
||||
}
|
||||
case "license", "template", "system":
|
||||
if c.Name != "license" {
|
||||
// replace
|
||||
layers = slices.DeleteFunc(layers, func(layer *Layer) bool {
|
||||
if layer.MediaType != mediatype {
|
||||
return false
|
||||
}
|
||||
|
||||
if err := layer.Remove(); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
blob := strings.NewReader(c.Args)
|
||||
layer, err := NewLayer(blob, mediatype)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.Name != "license" {
|
||||
// replace
|
||||
layers = slices.DeleteFunc(layers, func(layer *Layer) bool {
|
||||
return layer.MediaType == mediatype
|
||||
})
|
||||
}
|
||||
|
||||
layers = append(layers, layer)
|
||||
case "message":
|
||||
role, content, ok := strings.Cut(c.Args, ": ")
|
||||
@@ -578,15 +572,26 @@ func CreateModel(ctx context.Context, name model.Name, modelFileDir, quantizatio
|
||||
}
|
||||
}
|
||||
|
||||
old, _ := ParseNamedManifest(name)
|
||||
unref := make(map[string]struct{})
|
||||
if manifest, _, err := GetManifest(ParseModelPath(name)); err == nil {
|
||||
for _, layer := range manifest.Layers {
|
||||
if !slices.Contains(digests, layer.Digest) {
|
||||
unref[layer.Digest] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
if manifest.Config.Digest != layer.Digest {
|
||||
unref[manifest.Config.Digest] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
fn(api.ProgressResponse{Status: "writing manifest"})
|
||||
if err := WriteManifest(name, layer, layers); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !envconfig.NoPrune && old != nil {
|
||||
if err := old.RemoveLayers(); err != nil {
|
||||
if !envconfig.NoPrune {
|
||||
if err := deleteUnusedLayers(nil, unref); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -658,7 +663,7 @@ func deleteUnusedLayers(skipModelPath *ModelPath, deleteMap map[string]struct{})
|
||||
// save (i.e. delete from the deleteMap) any files used in other manifests
|
||||
manifest, _, err := GetManifest(fmp)
|
||||
if err != nil {
|
||||
//nolint:nilerr
|
||||
// nolint: nilerr
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -853,27 +858,23 @@ func PullModel(ctx context.Context, name string, regOpts *registryOptions, fn fu
|
||||
layers = append(layers, manifest.Layers...)
|
||||
layers = append(layers, manifest.Config)
|
||||
|
||||
skipVerify := make(map[string]bool)
|
||||
for _, layer := range layers {
|
||||
cacheHit, err := downloadBlob(ctx, downloadOpts{
|
||||
mp: mp,
|
||||
digest: layer.Digest,
|
||||
regOpts: regOpts,
|
||||
fn: fn,
|
||||
})
|
||||
if err != nil {
|
||||
if err := downloadBlob(
|
||||
ctx,
|
||||
downloadOpts{
|
||||
mp: mp,
|
||||
digest: layer.Digest,
|
||||
regOpts: regOpts,
|
||||
fn: fn,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
skipVerify[layer.Digest] = cacheHit
|
||||
delete(deleteMap, layer.Digest)
|
||||
}
|
||||
delete(deleteMap, manifest.Config.Digest)
|
||||
|
||||
fn(api.ProgressResponse{Status: "verifying sha256 digest"})
|
||||
for _, layer := range layers {
|
||||
if skipVerify[layer.Digest] {
|
||||
continue
|
||||
}
|
||||
if err := verifyBlob(layer.Digest); err != nil {
|
||||
if errors.Is(err, errDigestMismatch) {
|
||||
// something went wrong, delete the blob
|
||||
@@ -960,6 +961,7 @@ var errUnauthorized = fmt.Errorf("unauthorized: access denied")
|
||||
func getTokenSubject(token string) string {
|
||||
parts := strings.Split(token, ".")
|
||||
if len(parts) != 3 {
|
||||
slog.Error("jwt token does not contain 3 parts")
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -987,7 +989,7 @@ func getTokenSubject(token string) string {
|
||||
|
||||
func makeRequestWithRetry(ctx context.Context, method string, requestURL *url.URL, headers http.Header, body io.ReadSeeker, regOpts *registryOptions) (*http.Response, error) {
|
||||
anonymous := true // access will default to anonymous if no user is found associated with the public key
|
||||
for range 2 {
|
||||
for i := 0; i < 2; i++ {
|
||||
resp, err := makeRequest(ctx, method, requestURL, headers, body, regOpts)
|
||||
if err != nil {
|
||||
if !errors.Is(err, context.Canceled) {
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/minio/sha256-simd"
|
||||
)
|
||||
|
||||
type Layer struct {
|
||||
|
||||
@@ -1,15 +1,16 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/minio/sha256-simd"
|
||||
|
||||
"github.com/ollama/ollama/types/model"
|
||||
)
|
||||
|
||||
@@ -34,6 +35,12 @@ func (m *Manifest) Remove() error {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, layer := range append(m.Layers, m.Config) {
|
||||
if err := layer.Remove(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
manifests, err := GetManifestPath()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -42,18 +49,6 @@ func (m *Manifest) Remove() error {
|
||||
return PruneDirectory(manifests)
|
||||
}
|
||||
|
||||
func (m *Manifest) RemoveLayers() error {
|
||||
for _, layer := range append(m.Layers, m.Config) {
|
||||
if err := layer.Remove(); errors.Is(err, os.ErrNotExist) {
|
||||
slog.Debug("layer does not exist", "digest", layer.Digest)
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ParseNamedManifest(n model.Name) (*Manifest, error) {
|
||||
if !n.IsFullyQualified() {
|
||||
return nil, model.Unqualified(n)
|
||||
@@ -91,31 +86,30 @@ func ParseNamedManifest(n model.Name) (*Manifest, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func WriteManifest(name model.Name, config *Layer, layers []*Layer) error {
|
||||
manifests, err := GetManifestPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p := filepath.Join(manifests, name.Filepath())
|
||||
if err := os.MkdirAll(filepath.Dir(p), 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := os.Create(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
m := ManifestV2{
|
||||
func WriteManifest(name string, config *Layer, layers []*Layer) error {
|
||||
manifest := ManifestV2{
|
||||
SchemaVersion: 2,
|
||||
MediaType: "application/vnd.docker.distribution.manifest.v2+json",
|
||||
Config: config,
|
||||
Layers: layers,
|
||||
}
|
||||
|
||||
return json.NewEncoder(f).Encode(m)
|
||||
var b bytes.Buffer
|
||||
if err := json.NewEncoder(&b).Encode(manifest); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
modelpath := ParseModelPath(name)
|
||||
manifestPath, err := modelpath.GetManifestPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(manifestPath), 0o755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(manifestPath, b.Bytes(), 0o644)
|
||||
}
|
||||
|
||||
func Manifests() (map[model.Name]*Manifest, error) {
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
"github.com/ollama/ollama/types/model"
|
||||
)
|
||||
|
||||
@@ -108,7 +107,6 @@ func TestManifests(t *testing.T) {
|
||||
t.Run(n, func(t *testing.T) {
|
||||
d := t.TempDir()
|
||||
t.Setenv("OLLAMA_MODELS", d)
|
||||
envconfig.LoadConfig()
|
||||
|
||||
for _, p := range wants.ps {
|
||||
createManifest(t, d, p)
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -15,26 +14,27 @@ import (
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/convert"
|
||||
"github.com/ollama/ollama/llm"
|
||||
"github.com/ollama/ollama/templates"
|
||||
"github.com/ollama/ollama/types/model"
|
||||
)
|
||||
|
||||
var intermediateBlobs map[string]string = make(map[string]string)
|
||||
|
||||
type layerGGML struct {
|
||||
type layerWithGGML struct {
|
||||
*Layer
|
||||
*llm.GGML
|
||||
}
|
||||
|
||||
func parseFromModel(ctx context.Context, name model.Name, fn func(api.ProgressResponse)) (layers []*layerGGML, err error) {
|
||||
m, err := ParseNamedManifest(name)
|
||||
func parseFromModel(ctx context.Context, name model.Name, fn func(api.ProgressResponse)) (layers []*layerWithGGML, err error) {
|
||||
modelpath := ParseModelPath(name.String())
|
||||
manifest, _, err := GetManifest(modelpath)
|
||||
switch {
|
||||
case errors.Is(err, os.ErrNotExist):
|
||||
if err := PullModel(ctx, name.String(), ®istryOptions{}, fn); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m, err = ParseNamedManifest(name)
|
||||
modelpath = ParseModelPath(name.String())
|
||||
manifest, _, err = GetManifest(modelpath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -42,8 +42,8 @@ func parseFromModel(ctx context.Context, name model.Name, fn func(api.ProgressRe
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, layer := range m.Layers {
|
||||
layer, err := NewLayerFromLayer(layer.Digest, layer.MediaType, name.DisplayShortest())
|
||||
for _, layer := range manifest.Layers {
|
||||
layer, err := NewLayerFromLayer(layer.Digest, layer.MediaType, modelpath.GetShortTagname())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -68,16 +68,17 @@ func parseFromModel(ctx context.Context, name model.Name, fn func(api.ProgressRe
|
||||
return nil, err
|
||||
}
|
||||
|
||||
layers = append(layers, &layerGGML{layer, ggml})
|
||||
layers = append(layers, &layerWithGGML{layer, ggml})
|
||||
default:
|
||||
layers = append(layers, &layerGGML{layer, nil})
|
||||
layers = append(layers, &layerWithGGML{layer, nil})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return layers, nil
|
||||
}
|
||||
|
||||
func parseFromZipFile(_ context.Context, file *os.File, digest string, fn func(api.ProgressResponse)) (layers []*layerGGML, err error) {
|
||||
func parseFromZipFile(_ context.Context, file *os.File, digest string, fn func(api.ProgressResponse)) (layers []*layerWithGGML, err error) {
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -181,13 +182,13 @@ func parseFromZipFile(_ context.Context, file *os.File, digest string, fn func(a
|
||||
return nil, err
|
||||
}
|
||||
|
||||
layers = append(layers, &layerGGML{layer, ggml})
|
||||
layers = append(layers, &layerWithGGML{layer, ggml})
|
||||
|
||||
intermediateBlobs[digest] = layer.Digest
|
||||
return detectChatTemplate(layers)
|
||||
return layers, nil
|
||||
}
|
||||
|
||||
func parseFromFile(ctx context.Context, file *os.File, digest string, fn func(api.ProgressResponse)) (layers []*layerGGML, err error) {
|
||||
func parseFromFile(ctx context.Context, file *os.File, digest string, fn func(api.ProgressResponse)) (layers []*layerWithGGML, err error) {
|
||||
sr := io.NewSectionReader(file, 0, 512)
|
||||
contentType, err := detectContentType(sr)
|
||||
if err != nil {
|
||||
@@ -229,30 +230,10 @@ func parseFromFile(ctx context.Context, file *os.File, digest string, fn func(ap
|
||||
return nil, err
|
||||
}
|
||||
|
||||
layers = append(layers, &layerGGML{layer, ggml})
|
||||
layers = append(layers, &layerWithGGML{layer, ggml})
|
||||
offset = n
|
||||
}
|
||||
|
||||
return detectChatTemplate(layers)
|
||||
}
|
||||
|
||||
func detectChatTemplate(layers []*layerGGML) ([]*layerGGML, error) {
|
||||
for _, layer := range layers {
|
||||
if s := layer.GGML.KV().ChatTemplate(); s != "" {
|
||||
if t, err := templates.NamedTemplate(s); err != nil {
|
||||
slog.Debug("template detection", "error", err)
|
||||
} else {
|
||||
tmpl, err := NewLayer(t.Reader(), "application/vnd.ollama.image.template")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tmpl.status = fmt.Sprintf("using autodetected template %s", t.Name)
|
||||
layers = append(layers, &layerGGML{tmpl, nil})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return layers, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -8,8 +8,6 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
)
|
||||
|
||||
type ModelPath struct {
|
||||
@@ -106,7 +104,14 @@ func (mp ModelPath) GetShortTagname() string {
|
||||
// modelsDir returns the value of the OLLAMA_MODELS environment variable or the user's home directory if OLLAMA_MODELS is not set.
|
||||
// The models directory is where Ollama stores its model files and manifests.
|
||||
func modelsDir() (string, error) {
|
||||
return envconfig.ModelsDir, nil
|
||||
if models, exists := os.LookupEnv("OLLAMA_MODELS"); exists {
|
||||
return models, nil
|
||||
}
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(home, ".ollama", "models"), nil
|
||||
}
|
||||
|
||||
// GetManifestPath returns the path to the manifest file for the given model path, it is up to the caller to create the directory if it does not exist.
|
||||
|
||||
@@ -6,15 +6,12 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
)
|
||||
|
||||
func TestGetBlobsPath(t *testing.T) {
|
||||
// GetBlobsPath expects an actual directory to exist
|
||||
dir, err := os.MkdirTemp("", "ollama-test")
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
tests := []struct {
|
||||
@@ -63,11 +60,10 @@ func TestGetBlobsPath(t *testing.T) {
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Setenv("OLLAMA_MODELS", dir)
|
||||
envconfig.LoadConfig()
|
||||
|
||||
got, err := GetBlobsPath(tc.digest)
|
||||
|
||||
require.ErrorIs(t, tc.err, err, tc.name)
|
||||
assert.ErrorIs(t, tc.err, err, tc.name)
|
||||
assert.Equal(t, tc.expected, got, tc.name)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
@@ -24,6 +23,7 @@ import (
|
||||
|
||||
"github.com/gin-contrib/cors"
|
||||
"github.com/gin-gonic/gin"
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
@@ -77,6 +77,7 @@ func isSupportedImageType(image []byte) bool {
|
||||
}
|
||||
|
||||
func (s *Server) GenerateHandler(c *gin.Context) {
|
||||
|
||||
checkpointStart := time.Now()
|
||||
var req api.GenerateRequest
|
||||
err := c.ShouldBindJSON(&req)
|
||||
@@ -523,8 +524,8 @@ func checkNameExists(name model.Name) error {
|
||||
}
|
||||
|
||||
func (s *Server) CreateModelHandler(c *gin.Context) {
|
||||
var r api.CreateRequest
|
||||
if err := c.ShouldBindJSON(&r); errors.Is(err, io.EOF) {
|
||||
var req api.CreateRequest
|
||||
if err := c.ShouldBindJSON(&req); errors.Is(err, io.EOF) {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "missing request body"})
|
||||
return
|
||||
} else if err != nil {
|
||||
@@ -532,7 +533,7 @@ func (s *Server) CreateModelHandler(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
name := model.ParseName(cmp.Or(r.Model, r.Name))
|
||||
name := model.ParseName(cmp.Or(req.Model, req.Name))
|
||||
if !name.IsValid() {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": errtypes.InvalidModelNameErrMsg})
|
||||
return
|
||||
@@ -543,24 +544,24 @@ func (s *Server) CreateModelHandler(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
if r.Path == "" && r.Modelfile == "" {
|
||||
if req.Path == "" && req.Modelfile == "" {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "path or modelfile are required"})
|
||||
return
|
||||
}
|
||||
|
||||
var sr io.Reader = strings.NewReader(r.Modelfile)
|
||||
if r.Path != "" && r.Modelfile == "" {
|
||||
f, err := os.Open(r.Path)
|
||||
var r io.Reader = strings.NewReader(req.Modelfile)
|
||||
if req.Path != "" && req.Modelfile == "" {
|
||||
f, err := os.Open(req.Path)
|
||||
if err != nil {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("error reading modelfile: %s", err)})
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
sr = f
|
||||
r = f
|
||||
}
|
||||
|
||||
f, err := parser.ParseFile(sr)
|
||||
modelfile, err := parser.ParseFile(r)
|
||||
if err != nil {
|
||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
@@ -576,13 +577,17 @@ func (s *Server) CreateModelHandler(c *gin.Context) {
|
||||
ctx, cancel := context.WithCancel(c.Request.Context())
|
||||
defer cancel()
|
||||
|
||||
quantization := cmp.Or(r.Quantize, r.Quantization)
|
||||
if err := CreateModel(ctx, name, filepath.Dir(r.Path), strings.ToUpper(quantization), f, fn); err != nil {
|
||||
quantization := req.Quantization
|
||||
if req.Quantize != "" {
|
||||
quantization = req.Quantize
|
||||
}
|
||||
|
||||
if err := CreateModel(ctx, name.String(), filepath.Dir(req.Path), strings.ToUpper(quantization), modelfile, fn); err != nil {
|
||||
ch <- gin.H{"error": err.Error()}
|
||||
}
|
||||
}()
|
||||
|
||||
if r.Stream != nil && !*r.Stream {
|
||||
if req.Stream != nil && !*req.Stream {
|
||||
waitForStream(c, ch)
|
||||
return
|
||||
}
|
||||
@@ -616,11 +621,6 @@ func (s *Server) DeleteModelHandler(c *gin.Context) {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
if err := m.RemoveLayers(); err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) ShowModelHandler(c *gin.Context) {
|
||||
@@ -730,7 +730,7 @@ func (s *Server) ListModelsHandler(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
models := []api.ListModelResponse{}
|
||||
models := []api.ModelResponse{}
|
||||
for n, m := range ms {
|
||||
f, err := m.Config.Open()
|
||||
if err != nil {
|
||||
@@ -746,7 +746,7 @@ func (s *Server) ListModelsHandler(c *gin.Context) {
|
||||
}
|
||||
|
||||
// tag should never be masked
|
||||
models = append(models, api.ListModelResponse{
|
||||
models = append(models, api.ModelResponse{
|
||||
Model: n.DisplayShortest(),
|
||||
Name: n.DisplayShortest(),
|
||||
Size: m.Size(),
|
||||
@@ -762,7 +762,7 @@ func (s *Server) ListModelsHandler(c *gin.Context) {
|
||||
})
|
||||
}
|
||||
|
||||
slices.SortStableFunc(models, func(i, j api.ListModelResponse) int {
|
||||
slices.SortStableFunc(models, func(i, j api.ModelResponse) int {
|
||||
// most recently modified first
|
||||
return cmp.Compare(j.ModifiedAt.Unix(), i.ModifiedAt.Unix())
|
||||
})
|
||||
@@ -942,7 +942,7 @@ func allowedHostsMiddleware(addr net.Addr) gin.HandlerFunc {
|
||||
}
|
||||
|
||||
if allowedHost(host) {
|
||||
if c.Request.Method == http.MethodOptions {
|
||||
if c.Request.Method == "OPTIONS" {
|
||||
c.AbortWithStatus(http.StatusNoContent)
|
||||
return
|
||||
}
|
||||
@@ -960,10 +960,6 @@ func (s *Server) GenerateRoutes() http.Handler {
|
||||
config.AllowWildcard = true
|
||||
config.AllowBrowserExtensions = true
|
||||
config.AllowHeaders = []string{"Authorization", "Content-Type", "User-Agent", "Accept", "X-Requested-With"}
|
||||
openAIProperties := []string{"lang", "package-version", "os", "arch", "runtime", "runtime-version", "async"}
|
||||
for _, prop := range openAIProperties {
|
||||
config.AllowHeaders = append(config.AllowHeaders, "x-stainless-"+prop)
|
||||
}
|
||||
config.AllowOrigins = envconfig.AllowOrigins
|
||||
|
||||
r := gin.Default()
|
||||
@@ -1143,7 +1139,7 @@ func streamResponse(c *gin.Context, ch chan any) {
|
||||
}
|
||||
|
||||
func (s *Server) ProcessHandler(c *gin.Context) {
|
||||
models := []api.ProcessModelResponse{}
|
||||
models := []api.ModelResponse{}
|
||||
|
||||
for _, v := range s.sched.loaded {
|
||||
model := v.model
|
||||
@@ -1155,7 +1151,7 @@ func (s *Server) ProcessHandler(c *gin.Context) {
|
||||
QuantizationLevel: model.Config.FileType,
|
||||
}
|
||||
|
||||
mr := api.ProcessModelResponse{
|
||||
mr := api.ModelResponse{
|
||||
Model: model.ShortName,
|
||||
Name: model.ShortName,
|
||||
Size: int64(v.estimatedTotal),
|
||||
@@ -1175,7 +1171,7 @@ func (s *Server) ProcessHandler(c *gin.Context) {
|
||||
models = append(models, mr)
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, api.ProcessResponse{Models: models})
|
||||
c.JSON(http.StatusOK, api.ListResponse{Models: models})
|
||||
}
|
||||
|
||||
// ChatPrompt builds up a prompt from a series of messages for the currently `loaded` model
|
||||
@@ -1310,6 +1306,7 @@ func (s *Server) ChatHandler(c *gin.Context) {
|
||||
defer close(ch)
|
||||
|
||||
fn := func(r llm.CompletionResponse) {
|
||||
|
||||
resp := api.ChatResponse{
|
||||
Model: req.Model,
|
||||
CreatedAt: time.Now().UTC(),
|
||||
|
||||
@@ -15,13 +15,11 @@ import (
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
"github.com/ollama/ollama/llm"
|
||||
)
|
||||
|
||||
var stream bool = false
|
||||
|
||||
func createBinFile(t *testing.T, kv map[string]any, ti []llm.Tensor) string {
|
||||
func createBinFile(t *testing.T) string {
|
||||
t.Helper()
|
||||
|
||||
f, err := os.CreateTemp(t.TempDir(), "")
|
||||
@@ -30,7 +28,19 @@ func createBinFile(t *testing.T, kv map[string]any, ti []llm.Tensor) string {
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if err := llm.NewGGUFV3(binary.LittleEndian).Encode(f, kv, ti); err != nil {
|
||||
if err := binary.Write(f, binary.LittleEndian, []byte("GGUF")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := binary.Write(f, binary.LittleEndian, uint32(3)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := binary.Write(f, binary.LittleEndian, uint64(0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := binary.Write(f, binary.LittleEndian, uint64(0)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -87,12 +97,11 @@ func checkFileExists(t *testing.T, p string, expect []string) {
|
||||
func TestCreateFromBin(t *testing.T) {
|
||||
p := t.TempDir()
|
||||
t.Setenv("OLLAMA_MODELS", p)
|
||||
envconfig.LoadConfig()
|
||||
|
||||
var s Server
|
||||
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
|
||||
Name: "test",
|
||||
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t, nil, nil)),
|
||||
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t)),
|
||||
Stream: &stream,
|
||||
})
|
||||
|
||||
@@ -113,12 +122,11 @@ func TestCreateFromBin(t *testing.T) {
|
||||
func TestCreateFromModel(t *testing.T) {
|
||||
p := t.TempDir()
|
||||
t.Setenv("OLLAMA_MODELS", p)
|
||||
envconfig.LoadConfig()
|
||||
var s Server
|
||||
|
||||
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
|
||||
Name: "test",
|
||||
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t, nil, nil)),
|
||||
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t)),
|
||||
Stream: &stream,
|
||||
})
|
||||
|
||||
@@ -150,421 +158,3 @@ func TestCreateFromModel(t *testing.T) {
|
||||
filepath.Join(p, "blobs", "sha256-ca239d7bd8ea90e4a5d2e6bf88f8d74a47b14336e73eb4e18bed4dd325018116"),
|
||||
})
|
||||
}
|
||||
|
||||
func TestCreateRemovesLayers(t *testing.T) {
|
||||
p := t.TempDir()
|
||||
t.Setenv("OLLAMA_MODELS", p)
|
||||
envconfig.LoadConfig()
|
||||
var s Server
|
||||
|
||||
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
|
||||
Name: "test",
|
||||
Modelfile: fmt.Sprintf("FROM %s\nTEMPLATE {{ .Prompt }}", createBinFile(t, nil, nil)),
|
||||
Stream: &stream,
|
||||
})
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected status code 200, actual %d", w.Code)
|
||||
}
|
||||
|
||||
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{
|
||||
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"),
|
||||
})
|
||||
|
||||
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
|
||||
filepath.Join(p, "blobs", "sha256-a4e5e156ddec27e286f75328784d7106b60a4eb1d246e950a001a3f944fbda99"),
|
||||
filepath.Join(p, "blobs", "sha256-b507b9c2f6ca642bffcd06665ea7c91f235fd32daeefdf875a0f938db05fb315"),
|
||||
filepath.Join(p, "blobs", "sha256-bc80b03733773e0728011b2f4adf34c458b400e1aad48cb28d61170f3a2ad2d6"),
|
||||
})
|
||||
|
||||
w = createRequest(t, s.CreateModelHandler, api.CreateRequest{
|
||||
Name: "test",
|
||||
Modelfile: fmt.Sprintf("FROM %s\nTEMPLATE {{ .System }} {{ .Prompt }}", createBinFile(t, nil, nil)),
|
||||
Stream: &stream,
|
||||
})
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected status code 200, actual %d", w.Code)
|
||||
}
|
||||
|
||||
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{
|
||||
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"),
|
||||
})
|
||||
|
||||
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
|
||||
filepath.Join(p, "blobs", "sha256-8f2c2167d789c6b2302dff965160fa5029f6a24096d262c1cbb469f21a045382"),
|
||||
filepath.Join(p, "blobs", "sha256-a4e5e156ddec27e286f75328784d7106b60a4eb1d246e950a001a3f944fbda99"),
|
||||
filepath.Join(p, "blobs", "sha256-fe7ac77b725cda2ccad03f88a880ecdfd7a33192d6cae08fce2c0ee1455991ed"),
|
||||
})
|
||||
}
|
||||
|
||||
func TestCreateUnsetsSystem(t *testing.T) {
|
||||
p := t.TempDir()
|
||||
t.Setenv("OLLAMA_MODELS", p)
|
||||
envconfig.LoadConfig()
|
||||
var s Server
|
||||
|
||||
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
|
||||
Name: "test",
|
||||
Modelfile: fmt.Sprintf("FROM %s\nSYSTEM Say hi!", createBinFile(t, nil, nil)),
|
||||
Stream: &stream,
|
||||
})
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected status code 200, actual %d", w.Code)
|
||||
}
|
||||
|
||||
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{
|
||||
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"),
|
||||
})
|
||||
|
||||
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
|
||||
filepath.Join(p, "blobs", "sha256-8585df945d1069bc78b79bd10bb73ba07fbc29b0f5479a31a601c0d12731416e"),
|
||||
filepath.Join(p, "blobs", "sha256-a4e5e156ddec27e286f75328784d7106b60a4eb1d246e950a001a3f944fbda99"),
|
||||
filepath.Join(p, "blobs", "sha256-f29e82a8284dbdf5910b1555580ff60b04238b8da9d5e51159ada67a4d0d5851"),
|
||||
})
|
||||
|
||||
w = createRequest(t, s.CreateModelHandler, api.CreateRequest{
|
||||
Name: "test",
|
||||
Modelfile: fmt.Sprintf("FROM %s\nSYSTEM \"\"", createBinFile(t, nil, nil)),
|
||||
Stream: &stream,
|
||||
})
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected status code 200, actual %d", w.Code)
|
||||
}
|
||||
|
||||
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{
|
||||
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"),
|
||||
})
|
||||
|
||||
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
|
||||
filepath.Join(p, "blobs", "sha256-67d4b8d106af2a5b100a46e9bdc038c71eef2a35c9abac784092654212f97cf5"),
|
||||
filepath.Join(p, "blobs", "sha256-a4e5e156ddec27e286f75328784d7106b60a4eb1d246e950a001a3f944fbda99"),
|
||||
filepath.Join(p, "blobs", "sha256-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"),
|
||||
})
|
||||
|
||||
bts, err := os.ReadFile(filepath.Join(p, "blobs", "sha256-e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(bts) != "" {
|
||||
t.Fatalf("expected empty string, actual %s", string(bts))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateMergeParameters(t *testing.T) {
|
||||
p := t.TempDir()
|
||||
t.Setenv("OLLAMA_MODELS", p)
|
||||
envconfig.LoadConfig()
|
||||
var s Server
|
||||
|
||||
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
|
||||
Name: "test",
|
||||
Modelfile: fmt.Sprintf("FROM %s\nPARAMETER temperature 1\nPARAMETER top_k 10\nPARAMETER stop USER:\nPARAMETER stop ASSISTANT:", createBinFile(t, nil, nil)),
|
||||
Stream: &stream,
|
||||
})
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected status code 200, actual %d", w.Code)
|
||||
}
|
||||
|
||||
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{
|
||||
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"),
|
||||
})
|
||||
|
||||
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
|
||||
filepath.Join(p, "blobs", "sha256-1d0ad71299d48c2fb7ae2b98e683643e771f8a5b72be34942af90d97a91c1e37"),
|
||||
filepath.Join(p, "blobs", "sha256-4a384beaf47a9cbe452dfa5ab70eea691790f3b35a832d12933a1996685bf2b6"),
|
||||
filepath.Join(p, "blobs", "sha256-a4e5e156ddec27e286f75328784d7106b60a4eb1d246e950a001a3f944fbda99"),
|
||||
})
|
||||
|
||||
// in order to merge parameters, the second model must be created FROM the first
|
||||
w = createRequest(t, s.CreateModelHandler, api.CreateRequest{
|
||||
Name: "test2",
|
||||
Modelfile: "FROM test\nPARAMETER temperature 0.6\nPARAMETER top_p 0.7",
|
||||
Stream: &stream,
|
||||
})
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected status code 200, actual %d", w.Code)
|
||||
}
|
||||
|
||||
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{
|
||||
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"),
|
||||
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test2", "latest"),
|
||||
})
|
||||
|
||||
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
|
||||
filepath.Join(p, "blobs", "sha256-1d0ad71299d48c2fb7ae2b98e683643e771f8a5b72be34942af90d97a91c1e37"),
|
||||
filepath.Join(p, "blobs", "sha256-4a384beaf47a9cbe452dfa5ab70eea691790f3b35a832d12933a1996685bf2b6"),
|
||||
filepath.Join(p, "blobs", "sha256-4cd9d4ba6b734d9b4cbd1e5caa60374c00722e993fce5e1e2d15a33698f71187"),
|
||||
filepath.Join(p, "blobs", "sha256-a4e5e156ddec27e286f75328784d7106b60a4eb1d246e950a001a3f944fbda99"),
|
||||
filepath.Join(p, "blobs", "sha256-e29a7b3c47287a2489c895d21fe413c20f859a85d20e749492f52a838e36e1ba"),
|
||||
})
|
||||
|
||||
actual, err := os.ReadFile(filepath.Join(p, "blobs", "sha256-e29a7b3c47287a2489c895d21fe413c20f859a85d20e749492f52a838e36e1ba"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expect, err := json.Marshal(map[string]any{"temperature": 0.6, "top_k": 10, "top_p": 0.7, "stop": []string{"USER:", "ASSISTANT:"}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(bytes.TrimSpace(expect), bytes.TrimSpace(actual)) {
|
||||
t.Errorf("expected %s, actual %s", string(expect), string(actual))
|
||||
}
|
||||
|
||||
// slices are replaced
|
||||
w = createRequest(t, s.CreateModelHandler, api.CreateRequest{
|
||||
Name: "test2",
|
||||
Modelfile: "FROM test\nPARAMETER temperature 0.6\nPARAMETER top_p 0.7\nPARAMETER stop <|endoftext|>",
|
||||
Stream: &stream,
|
||||
})
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected status code 200, actual %d", w.Code)
|
||||
}
|
||||
|
||||
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{
|
||||
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"),
|
||||
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test2", "latest"),
|
||||
})
|
||||
|
||||
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
|
||||
filepath.Join(p, "blobs", "sha256-12f58bb75cb3042d69a7e013ab87fb3c3c7088f50ddc62f0c77bd332f0d44d35"),
|
||||
filepath.Join(p, "blobs", "sha256-1d0ad71299d48c2fb7ae2b98e683643e771f8a5b72be34942af90d97a91c1e37"),
|
||||
filepath.Join(p, "blobs", "sha256-257aa726584f24970a4f240765e75a7169bfbe7f4966c1f04513d6b6c860583a"),
|
||||
filepath.Join(p, "blobs", "sha256-4a384beaf47a9cbe452dfa5ab70eea691790f3b35a832d12933a1996685bf2b6"),
|
||||
filepath.Join(p, "blobs", "sha256-a4e5e156ddec27e286f75328784d7106b60a4eb1d246e950a001a3f944fbda99"),
|
||||
})
|
||||
|
||||
actual, err = os.ReadFile(filepath.Join(p, "blobs", "sha256-12f58bb75cb3042d69a7e013ab87fb3c3c7088f50ddc62f0c77bd332f0d44d35"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expect, err = json.Marshal(map[string]any{"temperature": 0.6, "top_k": 10, "top_p": 0.7, "stop": []string{"<|endoftext|>"}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(bytes.TrimSpace(expect), bytes.TrimSpace(actual)) {
|
||||
t.Errorf("expected %s, actual %s", string(expect), string(actual))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateReplacesMessages(t *testing.T) {
|
||||
p := t.TempDir()
|
||||
t.Setenv("OLLAMA_MODELS", p)
|
||||
envconfig.LoadConfig()
|
||||
var s Server
|
||||
|
||||
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
|
||||
Name: "test",
|
||||
Modelfile: fmt.Sprintf("FROM %s\nMESSAGE assistant \"What is my purpose?\"\nMESSAGE user \"You run tests.\"\nMESSAGE assistant \"Oh, my god.\"", createBinFile(t, nil, nil)),
|
||||
Stream: &stream,
|
||||
})
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected status code 200, actual %d", w.Code)
|
||||
}
|
||||
|
||||
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{
|
||||
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"),
|
||||
})
|
||||
|
||||
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
|
||||
filepath.Join(p, "blobs", "sha256-298baeaf6928a60cf666d88d64a1ba606feb43a2865687c39e40652e407bffc4"),
|
||||
filepath.Join(p, "blobs", "sha256-a4e5e156ddec27e286f75328784d7106b60a4eb1d246e950a001a3f944fbda99"),
|
||||
filepath.Join(p, "blobs", "sha256-e0e27d47045063ccb167ae852c51d49a98eab33fabaee4633fdddf97213e40b5"),
|
||||
})
|
||||
|
||||
w = createRequest(t, s.CreateModelHandler, api.CreateRequest{
|
||||
Name: "test2",
|
||||
Modelfile: "FROM test\nMESSAGE assistant \"You're a test, Harry.\"\nMESSAGE user \"I-I'm a what?\"\nMESSAGE assistant \"A test. And a thumping good one at that, I'd wager.\"",
|
||||
Stream: &stream,
|
||||
})
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected status code 200, actual %d", w.Code)
|
||||
}
|
||||
|
||||
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{
|
||||
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"),
|
||||
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test2", "latest"),
|
||||
})
|
||||
|
||||
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
|
||||
filepath.Join(p, "blobs", "sha256-298baeaf6928a60cf666d88d64a1ba606feb43a2865687c39e40652e407bffc4"),
|
||||
filepath.Join(p, "blobs", "sha256-4f48b25fe9969564c82f58eb1cedbdff6484cc0baf474bc6c2a9b37c8da3362a"),
|
||||
filepath.Join(p, "blobs", "sha256-a4e5e156ddec27e286f75328784d7106b60a4eb1d246e950a001a3f944fbda99"),
|
||||
filepath.Join(p, "blobs", "sha256-a60ecc9da299ec7ede453f99236e5577fd125e143689b646d9f0ddc9971bf4db"),
|
||||
filepath.Join(p, "blobs", "sha256-e0e27d47045063ccb167ae852c51d49a98eab33fabaee4633fdddf97213e40b5"),
|
||||
})
|
||||
|
||||
type message struct {
|
||||
Role string `json:"role"`
|
||||
Content string `json:"content"`
|
||||
}
|
||||
|
||||
f, err := os.Open(filepath.Join(p, "blobs", "sha256-a60ecc9da299ec7ede453f99236e5577fd125e143689b646d9f0ddc9971bf4db"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var actual []message
|
||||
if err := json.NewDecoder(f).Decode(&actual); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expect := []message{
|
||||
{Role: "assistant", Content: "You're a test, Harry."},
|
||||
{Role: "user", Content: "I-I'm a what?"},
|
||||
{Role: "assistant", Content: "A test. And a thumping good one at that, I'd wager."},
|
||||
}
|
||||
|
||||
if !slices.Equal(actual, expect) {
|
||||
t.Errorf("expected %s, actual %s", expect, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateTemplateSystem(t *testing.T) {
|
||||
p := t.TempDir()
|
||||
t.Setenv("OLLAMA_MODELS", p)
|
||||
envconfig.LoadConfig()
|
||||
var s Server
|
||||
|
||||
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
|
||||
Name: "test",
|
||||
Modelfile: fmt.Sprintf("FROM %s\nTEMPLATE {{ .Prompt }}\nSYSTEM Say hello!\nTEMPLATE {{ .System }} {{ .Prompt }}\nSYSTEM Say bye!", createBinFile(t, nil, nil)),
|
||||
Stream: &stream,
|
||||
})
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected status code 200, actual %d", w.Code)
|
||||
}
|
||||
|
||||
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{
|
||||
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"),
|
||||
})
|
||||
|
||||
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
|
||||
filepath.Join(p, "blobs", "sha256-2b5e330885117c82f3fd75169ea323e141070a2947c11ddb9f79ee0b01c589c1"),
|
||||
filepath.Join(p, "blobs", "sha256-4c5f51faac758fecaff8db42f0b7382891a4d0c0bb885f7b86be88c814a7cc86"),
|
||||
filepath.Join(p, "blobs", "sha256-a4e5e156ddec27e286f75328784d7106b60a4eb1d246e950a001a3f944fbda99"),
|
||||
filepath.Join(p, "blobs", "sha256-fe7ac77b725cda2ccad03f88a880ecdfd7a33192d6cae08fce2c0ee1455991ed"),
|
||||
})
|
||||
|
||||
template, err := os.ReadFile(filepath.Join(p, "blobs", "sha256-fe7ac77b725cda2ccad03f88a880ecdfd7a33192d6cae08fce2c0ee1455991ed"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(template) != "{{ .System }} {{ .Prompt }}" {
|
||||
t.Errorf("expected \"{{ .System }} {{ .Prompt }}\", actual %s", template)
|
||||
}
|
||||
|
||||
system, err := os.ReadFile(filepath.Join(p, "blobs", "sha256-4c5f51faac758fecaff8db42f0b7382891a4d0c0bb885f7b86be88c814a7cc86"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(system) != "Say bye!" {
|
||||
t.Errorf("expected \"Say bye!\", actual %s", system)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateLicenses(t *testing.T) {
|
||||
p := t.TempDir()
|
||||
t.Setenv("OLLAMA_MODELS", p)
|
||||
envconfig.LoadConfig()
|
||||
var s Server
|
||||
|
||||
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
|
||||
Name: "test",
|
||||
Modelfile: fmt.Sprintf("FROM %s\nLICENSE MIT\nLICENSE Apache-2.0", createBinFile(t, nil, nil)),
|
||||
Stream: &stream,
|
||||
})
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected status code 200, actual %d", w.Code)
|
||||
}
|
||||
|
||||
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{
|
||||
filepath.Join(p, "manifests", "registry.ollama.ai", "library", "test", "latest"),
|
||||
})
|
||||
|
||||
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
|
||||
filepath.Join(p, "blobs", "sha256-2af71558e438db0b73a20beab92dc278a94e1bbe974c00c1a33e3ab62d53a608"),
|
||||
filepath.Join(p, "blobs", "sha256-79a39c37536ddee29cbadd5d5e2dcba8ed7f03e431f626ff38432c1c866bb7e2"),
|
||||
filepath.Join(p, "blobs", "sha256-a4e5e156ddec27e286f75328784d7106b60a4eb1d246e950a001a3f944fbda99"),
|
||||
filepath.Join(p, "blobs", "sha256-e5dcffe836b6ec8a58e492419b550e65fb8cbdc308503979e5dacb33ac7ea3b7"),
|
||||
})
|
||||
|
||||
mit, err := os.ReadFile(filepath.Join(p, "blobs", "sha256-e5dcffe836b6ec8a58e492419b550e65fb8cbdc308503979e5dacb33ac7ea3b7"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(mit) != "MIT" {
|
||||
t.Errorf("expected MIT, actual %s", mit)
|
||||
}
|
||||
|
||||
apache, err := os.ReadFile(filepath.Join(p, "blobs", "sha256-2af71558e438db0b73a20beab92dc278a94e1bbe974c00c1a33e3ab62d53a608"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if string(apache) != "Apache-2.0" {
|
||||
t.Errorf("expected Apache-2.0, actual %s", apache)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateDetectTemplate(t *testing.T) {
|
||||
p := t.TempDir()
|
||||
t.Setenv("OLLAMA_MODELS", p)
|
||||
envconfig.LoadConfig()
|
||||
var s Server
|
||||
|
||||
t.Run("matched", func(t *testing.T) {
|
||||
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
|
||||
Name: "test",
|
||||
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t, llm.KV{
|
||||
"tokenizer.chat_template": "{{ bos_token }}{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}",
|
||||
}, nil)),
|
||||
Stream: &stream,
|
||||
})
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected status code 200, actual %d", w.Code)
|
||||
}
|
||||
|
||||
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
|
||||
filepath.Join(p, "blobs", "sha256-2f8e594e6f34b1b4d36a246628eeb3365ce442303d656f1fcc69e821722acea0"),
|
||||
filepath.Join(p, "blobs", "sha256-542b217f179c7825eeb5bca3c77d2b75ed05bafbd3451d9188891a60a85337c6"),
|
||||
filepath.Join(p, "blobs", "sha256-553c4a3f747b3d22a4946875f1cc8ed011c2930d83f864a0c7265f9ec0a20413"),
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("unmatched", func(t *testing.T) {
|
||||
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
|
||||
Name: "test",
|
||||
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t, nil, nil)),
|
||||
Stream: &stream,
|
||||
})
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
t.Fatalf("expected status code 200, actual %d", w.Code)
|
||||
}
|
||||
|
||||
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{
|
||||
filepath.Join(p, "blobs", "sha256-a4e5e156ddec27e286f75328784d7106b60a4eb1d246e950a001a3f944fbda99"),
|
||||
filepath.Join(p, "blobs", "sha256-ca239d7bd8ea90e4a5d2e6bf88f8d74a47b14336e73eb4e18bed4dd325018116"),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,28 +1,22 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
"github.com/ollama/ollama/types/model"
|
||||
)
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
p := t.TempDir()
|
||||
t.Setenv("OLLAMA_MODELS", p)
|
||||
envconfig.LoadConfig()
|
||||
|
||||
var s Server
|
||||
|
||||
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
|
||||
Name: "test",
|
||||
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t, nil, nil)),
|
||||
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t)),
|
||||
})
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
@@ -31,7 +25,7 @@ func TestDelete(t *testing.T) {
|
||||
|
||||
w = createRequest(t, s.CreateModelHandler, api.CreateRequest{
|
||||
Name: "test2",
|
||||
Modelfile: fmt.Sprintf("FROM %s\nTEMPLATE {{ .System }} {{ .Prompt }}", createBinFile(t, nil, nil)),
|
||||
Modelfile: fmt.Sprintf("FROM %s\nTEMPLATE {{ .System }} {{ .Prompt }}", createBinFile(t)),
|
||||
})
|
||||
|
||||
if w.Code != http.StatusOK {
|
||||
@@ -75,33 +69,3 @@ func TestDelete(t *testing.T) {
|
||||
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{})
|
||||
checkFileExists(t, filepath.Join(p, "blobs", "*"), []string{})
|
||||
}
|
||||
|
||||
func TestDeleteDuplicateLayers(t *testing.T) {
|
||||
p := t.TempDir()
|
||||
t.Setenv("OLLAMA_MODELS", p)
|
||||
var s Server
|
||||
|
||||
n := model.ParseName("test")
|
||||
|
||||
var b bytes.Buffer
|
||||
if err := json.NewEncoder(&b).Encode(&ConfigV2{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
config, err := NewLayer(&b, "application/vnd.docker.container.image.v1+json")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// create a manifest with duplicate layers
|
||||
if err := WriteManifest(n, config, []*Layer{config}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
w := createRequest(t, s.DeleteModelHandler, api.DeleteRequest{Name: "test"})
|
||||
if w.Code != http.StatusOK {
|
||||
t.Errorf("expected status code 200, actual %d", w.Code)
|
||||
}
|
||||
|
||||
checkFileExists(t, filepath.Join(p, "manifests", "*", "*", "*", "*"), []string{})
|
||||
}
|
||||
|
||||
@@ -8,12 +8,10 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
)
|
||||
|
||||
func TestList(t *testing.T) {
|
||||
t.Setenv("OLLAMA_MODELS", t.TempDir())
|
||||
envconfig.LoadConfig()
|
||||
|
||||
expectNames := []string{
|
||||
"mistral:7b-instruct-q4_0",
|
||||
@@ -31,7 +29,7 @@ func TestList(t *testing.T) {
|
||||
for _, n := range expectNames {
|
||||
createRequest(t, s.CreateModelHandler, api.CreateRequest{
|
||||
Name: n,
|
||||
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t, nil, nil)),
|
||||
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t)),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -15,12 +15,9 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
"github.com/ollama/ollama/parser"
|
||||
"github.com/ollama/ollama/types/model"
|
||||
"github.com/ollama/ollama/version"
|
||||
)
|
||||
|
||||
@@ -28,20 +25,20 @@ func createTestFile(t *testing.T, name string) string {
|
||||
t.Helper()
|
||||
|
||||
f, err := os.CreateTemp(t.TempDir(), name)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
defer f.Close()
|
||||
|
||||
err = binary.Write(f, binary.LittleEndian, []byte("GGUF"))
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = binary.Write(f, binary.LittleEndian, uint32(3))
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = binary.Write(f, binary.LittleEndian, uint64(0))
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = binary.Write(f, binary.LittleEndian, uint64(0))
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
return f.Name()
|
||||
}
|
||||
@@ -56,18 +53,16 @@ func Test_Routes(t *testing.T) {
|
||||
}
|
||||
|
||||
createTestModel := func(t *testing.T, name string) {
|
||||
t.Helper()
|
||||
|
||||
fname := createTestFile(t, "ollama-model")
|
||||
|
||||
r := strings.NewReader(fmt.Sprintf("FROM %s\nPARAMETER seed 42\nPARAMETER top_p 0.9\nPARAMETER stop foo\nPARAMETER stop bar", fname))
|
||||
modelfile, err := parser.ParseFile(r)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
fn := func(resp api.ProgressResponse) {
|
||||
t.Logf("Status: %s", resp.Status)
|
||||
}
|
||||
err = CreateModel(context.TODO(), model.ParseName(name), "", "", modelfile, fn)
|
||||
require.NoError(t, err)
|
||||
err = CreateModel(context.TODO(), name, "", "", modelfile, fn)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
testCases := []testCase{
|
||||
@@ -79,9 +74,9 @@ func Test_Routes(t *testing.T) {
|
||||
},
|
||||
Expected: func(t *testing.T, resp *http.Response) {
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
assert.Equal(t, "application/json; charset=utf-8", contentType)
|
||||
assert.Equal(t, contentType, "application/json; charset=utf-8")
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, fmt.Sprintf(`{"version":"%s"}`, version.Version), string(body))
|
||||
},
|
||||
},
|
||||
@@ -91,17 +86,17 @@ func Test_Routes(t *testing.T) {
|
||||
Path: "/api/tags",
|
||||
Expected: func(t *testing.T, resp *http.Response) {
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
assert.Equal(t, "application/json; charset=utf-8", contentType)
|
||||
assert.Equal(t, contentType, "application/json; charset=utf-8")
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var modelList api.ListResponse
|
||||
|
||||
err = json.Unmarshal(body, &modelList)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.NotNil(t, modelList.Models)
|
||||
assert.Empty(t, len(modelList.Models))
|
||||
assert.Equal(t, 0, len(modelList.Models))
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -113,18 +108,16 @@ func Test_Routes(t *testing.T) {
|
||||
},
|
||||
Expected: func(t *testing.T, resp *http.Response) {
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
assert.Equal(t, "application/json; charset=utf-8", contentType)
|
||||
assert.Equal(t, contentType, "application/json; charset=utf-8")
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.NotContains(t, string(body), "expires_at")
|
||||
assert.Nil(t, err)
|
||||
|
||||
var modelList api.ListResponse
|
||||
err = json.Unmarshal(body, &modelList)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.Len(t, modelList.Models, 1)
|
||||
assert.Equal(t, "test-model:latest", modelList.Models[0].Name)
|
||||
assert.Equal(t, 1, len(modelList.Models))
|
||||
assert.Equal(t, modelList.Models[0].Name, "test-model:latest")
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -141,7 +134,7 @@ func Test_Routes(t *testing.T) {
|
||||
Stream: &stream,
|
||||
}
|
||||
jsonData, err := json.Marshal(createReq)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
req.Body = io.NopCloser(bytes.NewReader(jsonData))
|
||||
},
|
||||
@@ -149,11 +142,11 @@ func Test_Routes(t *testing.T) {
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
assert.Equal(t, "application/json", contentType)
|
||||
_, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, resp.StatusCode, 200)
|
||||
|
||||
model, err := GetModel("t-bone")
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "t-bone:latest", model.ShortName)
|
||||
},
|
||||
},
|
||||
@@ -168,13 +161,13 @@ func Test_Routes(t *testing.T) {
|
||||
Destination: "beefsteak",
|
||||
}
|
||||
jsonData, err := json.Marshal(copyReq)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
req.Body = io.NopCloser(bytes.NewReader(jsonData))
|
||||
},
|
||||
Expected: func(t *testing.T, resp *http.Response) {
|
||||
model, err := GetModel("beefsteak")
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "beefsteak:latest", model.ShortName)
|
||||
},
|
||||
},
|
||||
@@ -186,18 +179,18 @@ func Test_Routes(t *testing.T) {
|
||||
createTestModel(t, "show-model")
|
||||
showReq := api.ShowRequest{Model: "show-model"}
|
||||
jsonData, err := json.Marshal(showReq)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
req.Body = io.NopCloser(bytes.NewReader(jsonData))
|
||||
},
|
||||
Expected: func(t *testing.T, resp *http.Response) {
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
assert.Equal(t, "application/json; charset=utf-8", contentType)
|
||||
assert.Equal(t, contentType, "application/json; charset=utf-8")
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var showResp api.ShowResponse
|
||||
err = json.Unmarshal(body, &showResp)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var params []string
|
||||
paramsSplit := strings.Split(showResp.Parameters, "\n")
|
||||
@@ -217,7 +210,6 @@ func Test_Routes(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Setenv("OLLAMA_MODELS", t.TempDir())
|
||||
envconfig.LoadConfig()
|
||||
|
||||
s := &Server{}
|
||||
router := s.GenerateRoutes()
|
||||
@@ -229,14 +221,14 @@ func Test_Routes(t *testing.T) {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
u := httpSrv.URL + tc.Path
|
||||
req, err := http.NewRequestWithContext(context.TODO(), tc.Method, u, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
if tc.Setup != nil {
|
||||
tc.Setup(t, req)
|
||||
}
|
||||
|
||||
resp, err := httpSrv.Client().Do(req)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
defer resp.Body.Close()
|
||||
|
||||
if tc.Expected != nil {
|
||||
@@ -248,7 +240,6 @@ func Test_Routes(t *testing.T) {
|
||||
|
||||
func TestCase(t *testing.T) {
|
||||
t.Setenv("OLLAMA_MODELS", t.TempDir())
|
||||
envconfig.LoadConfig()
|
||||
|
||||
cases := []string{
|
||||
"mistral",
|
||||
@@ -264,7 +255,7 @@ func TestCase(t *testing.T) {
|
||||
t.Run(tt, func(t *testing.T) {
|
||||
w := createRequest(t, s.CreateModelHandler, api.CreateRequest{
|
||||
Name: tt,
|
||||
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t, nil, nil)),
|
||||
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t)),
|
||||
Stream: &stream,
|
||||
})
|
||||
|
||||
@@ -280,7 +271,7 @@ func TestCase(t *testing.T) {
|
||||
t.Run("create", func(t *testing.T) {
|
||||
w = createRequest(t, s.CreateModelHandler, api.CreateRequest{
|
||||
Name: strings.ToUpper(tt),
|
||||
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t, nil, nil)),
|
||||
Modelfile: fmt.Sprintf("FROM %s", createBinFile(t)),
|
||||
Stream: &stream,
|
||||
})
|
||||
|
||||
|
||||
@@ -7,17 +7,17 @@ import (
|
||||
"log/slog"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
"github.com/ollama/ollama/format"
|
||||
"github.com/ollama/ollama/gpu"
|
||||
"github.com/ollama/ollama/llm"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
type LlmRequest struct {
|
||||
@@ -66,7 +66,7 @@ func (s *Scheduler) GetRunner(c context.Context, model *Model, opts api.Options,
|
||||
opts.NumCtx = 4
|
||||
}
|
||||
|
||||
opts.NumCtx *= envconfig.NumParallel
|
||||
opts.NumCtx = opts.NumCtx * envconfig.NumParallel
|
||||
|
||||
req := &LlmRequest{
|
||||
ctx: c,
|
||||
@@ -370,6 +370,7 @@ func (s *Scheduler) updateFreeSpace(allGpus gpu.GpuInfoList) {
|
||||
r.refMu.Lock()
|
||||
gpuIDs := make([]string, 0, len(r.gpus))
|
||||
if r.llama != nil {
|
||||
|
||||
// TODO this should be broken down by GPU instead of assuming uniform spread
|
||||
estimatedVRAMPerGPU := r.llama.EstimatedVRAM() / uint64(len(r.gpus))
|
||||
for _, gpu := range r.gpus {
|
||||
@@ -528,6 +529,7 @@ func (runner *runnerRef) waitForVRAMRecovery() chan interface{} {
|
||||
}
|
||||
}()
|
||||
return finished
|
||||
|
||||
}
|
||||
|
||||
type ByDuration []*runnerRef
|
||||
|
||||
@@ -12,10 +12,11 @@ import (
|
||||
|
||||
"github.com/ollama/ollama/api"
|
||||
"github.com/ollama/ollama/app/lifecycle"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
"github.com/ollama/ollama/format"
|
||||
"github.com/ollama/ollama/gpu"
|
||||
"github.com/ollama/ollama/llm"
|
||||
"github.com/ollama/ollama/envconfig"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -52,10 +53,10 @@ func TestLoad(t *testing.T) {
|
||||
}
|
||||
gpus := gpu.GpuInfoList{}
|
||||
s.load(req, ggml, gpus)
|
||||
require.Empty(t, req.successCh)
|
||||
require.Len(t, req.successCh, 0)
|
||||
require.Len(t, req.errCh, 1)
|
||||
s.loadedMu.Lock()
|
||||
require.Empty(t, s.loaded)
|
||||
require.Len(t, s.loaded, 0)
|
||||
s.loadedMu.Unlock()
|
||||
err := <-req.errCh
|
||||
require.Contains(t, err.Error(), "this model may be incompatible")
|
||||
@@ -112,7 +113,7 @@ func newScenario(t *testing.T, ctx context.Context, modelName string, estimatedV
|
||||
t.Helper()
|
||||
|
||||
f, err := os.CreateTemp(t.TempDir(), modelName)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
defer f.Close()
|
||||
|
||||
gguf := llm.NewGGUFV3(binary.LittleEndian)
|
||||
@@ -130,7 +131,7 @@ func newScenario(t *testing.T, ctx context.Context, modelName string, estimatedV
|
||||
}, []llm.Tensor{
|
||||
{Name: "blk.0.attn.weight", Kind: uint32(0), Offset: uint64(0), Shape: []uint64{1, 1, 1, 1}, WriterTo: &bytes.Reader{}},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
fname := f.Name()
|
||||
model := &Model{Name: modelName, ModelPath: fname}
|
||||
@@ -189,8 +190,8 @@ func TestRequests(t *testing.T) {
|
||||
select {
|
||||
case resp := <-scenario1a.req.successCh:
|
||||
require.Equal(t, resp.llama, scenario1a.srv)
|
||||
require.Empty(t, s.pendingReqCh)
|
||||
require.Empty(t, scenario1a.req.errCh)
|
||||
require.Len(t, s.pendingReqCh, 0)
|
||||
require.Len(t, scenario1a.req.errCh, 0)
|
||||
case <-ctx.Done():
|
||||
t.Errorf("timeout")
|
||||
}
|
||||
@@ -202,8 +203,8 @@ func TestRequests(t *testing.T) {
|
||||
select {
|
||||
case resp := <-scenario1b.req.successCh:
|
||||
require.Equal(t, resp.llama, scenario1a.srv)
|
||||
require.Empty(t, s.pendingReqCh)
|
||||
require.Empty(t, scenario1b.req.errCh)
|
||||
require.Len(t, s.pendingReqCh, 0)
|
||||
require.Len(t, scenario1b.req.errCh, 0)
|
||||
case <-ctx.Done():
|
||||
t.Errorf("timeout")
|
||||
}
|
||||
@@ -220,8 +221,8 @@ func TestRequests(t *testing.T) {
|
||||
select {
|
||||
case resp := <-scenario2a.req.successCh:
|
||||
require.Equal(t, resp.llama, scenario2a.srv)
|
||||
require.Empty(t, s.pendingReqCh)
|
||||
require.Empty(t, scenario2a.req.errCh)
|
||||
require.Len(t, s.pendingReqCh, 0)
|
||||
require.Len(t, scenario2a.req.errCh, 0)
|
||||
case <-ctx.Done():
|
||||
t.Errorf("timeout")
|
||||
}
|
||||
@@ -236,8 +237,8 @@ func TestRequests(t *testing.T) {
|
||||
select {
|
||||
case resp := <-scenario3a.req.successCh:
|
||||
require.Equal(t, resp.llama, scenario3a.srv)
|
||||
require.Empty(t, s.pendingReqCh)
|
||||
require.Empty(t, scenario3a.req.errCh)
|
||||
require.Len(t, s.pendingReqCh, 0)
|
||||
require.Len(t, scenario3a.req.errCh, 0)
|
||||
case <-ctx.Done():
|
||||
t.Errorf("timeout")
|
||||
}
|
||||
@@ -252,8 +253,8 @@ func TestRequests(t *testing.T) {
|
||||
select {
|
||||
case resp := <-scenario3b.req.successCh:
|
||||
require.Equal(t, resp.llama, scenario3b.srv)
|
||||
require.Empty(t, s.pendingReqCh)
|
||||
require.Empty(t, scenario3b.req.errCh)
|
||||
require.Len(t, s.pendingReqCh, 0)
|
||||
require.Len(t, scenario3b.req.errCh, 0)
|
||||
case <-ctx.Done():
|
||||
t.Errorf("timeout")
|
||||
}
|
||||
@@ -268,8 +269,8 @@ func TestRequests(t *testing.T) {
|
||||
select {
|
||||
case resp := <-scenario3c.req.successCh:
|
||||
require.Equal(t, resp.llama, scenario3c.srv)
|
||||
require.Empty(t, s.pendingReqCh)
|
||||
require.Empty(t, scenario3c.req.errCh)
|
||||
require.Len(t, s.pendingReqCh, 0)
|
||||
require.Len(t, scenario3c.req.errCh, 0)
|
||||
case <-ctx.Done():
|
||||
t.Errorf("timeout")
|
||||
}
|
||||
@@ -295,8 +296,8 @@ func TestRequests(t *testing.T) {
|
||||
select {
|
||||
case resp := <-scenario3d.req.successCh:
|
||||
require.Equal(t, resp.llama, scenario3d.srv)
|
||||
require.Empty(t, s.pendingReqCh)
|
||||
require.Empty(t, scenario3d.req.errCh)
|
||||
require.Len(t, s.pendingReqCh, 0)
|
||||
require.Len(t, scenario3d.req.errCh, 0)
|
||||
case <-ctx.Done():
|
||||
t.Errorf("timeout")
|
||||
}
|
||||
@@ -331,7 +332,7 @@ func TestGetRunner(t *testing.T) {
|
||||
slog.Info("scenario1b")
|
||||
successCh1b, errCh1b := s.GetRunner(scenario1b.ctx, scenario1b.req.model, scenario1b.req.opts, scenario1b.req.sessionDuration)
|
||||
require.Len(t, s.pendingReqCh, 1)
|
||||
require.Empty(t, successCh1b)
|
||||
require.Len(t, successCh1b, 0)
|
||||
require.Len(t, errCh1b, 1)
|
||||
err := <-errCh1b
|
||||
require.Contains(t, err.Error(), "server busy")
|
||||
@@ -339,8 +340,8 @@ func TestGetRunner(t *testing.T) {
|
||||
select {
|
||||
case resp := <-successCh1a:
|
||||
require.Equal(t, resp.llama, scenario1a.srv)
|
||||
require.Empty(t, s.pendingReqCh)
|
||||
require.Empty(t, errCh1a)
|
||||
require.Len(t, s.pendingReqCh, 0)
|
||||
require.Len(t, errCh1a, 0)
|
||||
case <-ctx.Done():
|
||||
t.Errorf("timeout")
|
||||
}
|
||||
@@ -354,9 +355,9 @@ func TestGetRunner(t *testing.T) {
|
||||
successCh1c, errCh1c := s.GetRunner(scenario1c.ctx, scenario1c.req.model, scenario1c.req.opts, scenario1c.req.sessionDuration)
|
||||
// Starts in pending channel, then should be quickly processsed to return an error
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
require.Empty(t, successCh1c)
|
||||
require.Len(t, successCh1c, 0)
|
||||
s.loadedMu.Lock()
|
||||
require.Empty(t, s.loaded)
|
||||
require.Len(t, s.loaded, 0)
|
||||
s.loadedMu.Unlock()
|
||||
require.Len(t, errCh1c, 1)
|
||||
err = <-errCh1c
|
||||
@@ -385,8 +386,8 @@ func TestPrematureExpired(t *testing.T) {
|
||||
select {
|
||||
case resp := <-successCh1a:
|
||||
require.Equal(t, resp.llama, scenario1a.srv)
|
||||
require.Empty(t, s.pendingReqCh)
|
||||
require.Empty(t, errCh1a)
|
||||
require.Len(t, s.pendingReqCh, 0)
|
||||
require.Len(t, errCh1a, 0)
|
||||
s.loadedMu.Lock()
|
||||
require.Len(t, s.loaded, 1)
|
||||
s.loadedMu.Unlock()
|
||||
@@ -400,9 +401,9 @@ func TestPrematureExpired(t *testing.T) {
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
require.LessOrEqual(t, len(s.finishedReqCh), 1)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
require.Empty(t, s.finishedReqCh)
|
||||
require.Len(t, s.finishedReqCh, 0)
|
||||
s.loadedMu.Lock()
|
||||
require.Empty(t, s.loaded)
|
||||
require.Len(t, s.loaded, 0)
|
||||
s.loadedMu.Unlock()
|
||||
|
||||
// also shouldn't happen in real life
|
||||
@@ -486,6 +487,7 @@ func TestFindRunnerToUnload(t *testing.T) {
|
||||
r2.refCount = 1
|
||||
resp = s.findRunnerToUnload()
|
||||
require.Equal(t, r1, resp)
|
||||
|
||||
}
|
||||
|
||||
func TestNeedsReload(t *testing.T) {
|
||||
|
||||
@@ -146,7 +146,7 @@ func (b *blobUpload) Run(ctx context.Context, opts *registryOptions) {
|
||||
case requestURL := <-b.nextURL:
|
||||
g.Go(func() error {
|
||||
var err error
|
||||
for try := range maxRetries {
|
||||
for try := 0; try < maxRetries; try++ {
|
||||
err = b.uploadPart(inner, http.MethodPatch, requestURL, part, opts)
|
||||
switch {
|
||||
case errors.Is(err, context.Canceled):
|
||||
@@ -190,7 +190,7 @@ func (b *blobUpload) Run(ctx context.Context, opts *registryOptions) {
|
||||
headers.Set("Content-Type", "application/octet-stream")
|
||||
headers.Set("Content-Length", "0")
|
||||
|
||||
for try := range maxRetries {
|
||||
for try := 0; try < maxRetries; try++ {
|
||||
var resp *http.Response
|
||||
resp, err = makeRequestWithRetry(ctx, http.MethodPut, requestURL, headers, nil, opts)
|
||||
if errors.Is(err, context.Canceled) {
|
||||
@@ -253,7 +253,7 @@ func (b *blobUpload) uploadPart(ctx context.Context, method string, requestURL *
|
||||
}
|
||||
|
||||
// retry uploading to the redirect URL
|
||||
for try := range maxRetries {
|
||||
for try := 0; try < maxRetries; try++ {
|
||||
err = b.uploadPart(ctx, http.MethodPut, redirectURL, part, nil)
|
||||
switch {
|
||||
case errors.Is(err, context.Canceled):
|
||||
@@ -391,7 +391,7 @@ func uploadBlob(ctx context.Context, mp ModelPath, layer *Layer, opts *registryO
|
||||
return err
|
||||
}
|
||||
|
||||
//nolint:contextcheck
|
||||
// nolint: contextcheck
|
||||
go upload.Run(context.Background(), opts)
|
||||
}
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
{{ if .System }}<start_system>{{ .System }}<end_message>{{ end }}{{ if .Prompt }}<start_user>{{ .Prompt }}<end_message>{{ end }}<start_assistant>{{ .Response }}<end_message>
|
||||
@@ -1,7 +0,0 @@
|
||||
{{ if .System }}{{ .System }}
|
||||
|
||||
{{ end }}{{ if .Prompt }}### Instruction:
|
||||
{{ .Prompt }}
|
||||
|
||||
{{ end }}### Response:
|
||||
{{ .Response }}
|
||||
@@ -1,6 +0,0 @@
|
||||
{{ if .System }}<|im_start|>system
|
||||
{{ .System }}<|im_end|>
|
||||
{{ end }}{{ if .Prompt }}<|im_start|>user
|
||||
{{ .Prompt }}<|im_end|>
|
||||
{{ end }}<|im_start|>assistant
|
||||
{{ .Response }}<|im_end|>
|
||||
@@ -1,5 +0,0 @@
|
||||
{{ if .System }}System: {{ .System }}
|
||||
|
||||
{{ end }}{{ if .Prompt }}User: {{ .Prompt }}
|
||||
|
||||
{{ end }}Assistant: <|begin_of_text|>{{ .Response }}
|
||||
@@ -1,8 +0,0 @@
|
||||
{{ if .System }} Source: system
|
||||
|
||||
{{ .System }} <step>{{ end }} Source: user
|
||||
|
||||
{{ .Prompt }} <step> Source: assistant
|
||||
Destination: user
|
||||
|
||||
{{ .Response }}<step>
|
||||
@@ -1,3 +0,0 @@
|
||||
{{ if .System }}{{ .System }}
|
||||
{{ end }}{{ if .Prompt }}User: {{ .Prompt }}
|
||||
{{ end }}Assistant: {{ .Response }}
|
||||
@@ -1,4 +0,0 @@
|
||||
<start_of_turn>user
|
||||
{{ if .System }}{{ .System }} {{ end }}{{ .Prompt }}<end_of_turn>
|
||||
<start_of_turn>model
|
||||
{{ .Response }}<end_of_turn>
|
||||
@@ -1,9 +0,0 @@
|
||||
{{ if .System }}
|
||||
System:
|
||||
{{ .System }}
|
||||
|
||||
{{ end }}{{ if .Prompt }}Question:
|
||||
{{ .Prompt }}
|
||||
|
||||
{{ end }}Answer:
|
||||
{{ .Response }}
|
||||
@@ -1,138 +0,0 @@
|
||||
[
|
||||
{
|
||||
"template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\\n' + content + '<|im_end|>\\n<|im_start|>assistant\\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\\n' }}{% endif %}{% endfor %}",
|
||||
"name": "chatml"
|
||||
},
|
||||
{
|
||||
"template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
||||
"name": "chatml"
|
||||
},
|
||||
{
|
||||
"template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
|
||||
"name": "zephyr"
|
||||
},
|
||||
{
|
||||
"template": "{% if messages[0]['role'] == 'user' or messages[0]['role'] == 'system' %}{{ bos_token }}{% endif %}{% for message in messages %}{{ '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% elif messages[-1]['role'] == 'assistant' %}{{ eos_token }}{% endif %}",
|
||||
"name": "chatml"
|
||||
},
|
||||
{
|
||||
"template": "{{ bos_token }}{% for message in messages %}{{ 'GPT4 Correct ' + message['role'].title() + ': ' + message['content'] + '<|end_of_turn|>'}}{% endfor %}{% if add_generation_prompt %}{{ 'GPT4 Correct Assistant:' }}{% endif %}",
|
||||
"name": "openchat"
|
||||
},
|
||||
{
|
||||
"template": "{{bos_token}}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
||||
"name": "chatml"
|
||||
},
|
||||
{
|
||||
"template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
||||
"name": "chatml"
|
||||
},
|
||||
{
|
||||
"template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
||||
"name": "chatml"
|
||||
},
|
||||
{
|
||||
"template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
||||
"name": "chatml"
|
||||
},
|
||||
{
|
||||
"template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
|
||||
"name": "zephyr"
|
||||
},
|
||||
{
|
||||
"template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
|
||||
"name": "mistral-instruct"
|
||||
},
|
||||
{
|
||||
"template": "{{bos_token}}{{'You are an exceptionally intelligent coding assistant that consistently delivers accurate and reliable responses to user instructions.\n\n'}}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n {{ raise_exception('System messages are not allowed in this template.') }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction\n' + message['content'] + '\n\n'}}\n {%- else %}\n{{'### Response\n' + message['content'] + eos_token + '\n\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{{'### Response\n'}}",
|
||||
"name": "starcoder2-instruct"
|
||||
},
|
||||
{
|
||||
"template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content | trim + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content | trim + ' ' + eos_token }}{% endif %}{% endfor %}",
|
||||
"name": "llama2-chat"
|
||||
},
|
||||
{
|
||||
"template": "{% if messages[0]['role'] == 'system' %}{% set user_index = 1 %}{% else %}{% set user_index = 0 %}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != ((loop.index0 + user_index) % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 %}{{ '<s>' }}{% endif %}{% set content = 'Source: ' + message['role'] + '\n\n ' + message['content'] | trim %}{{ content + ' <step> ' }}{% endfor %}{{'Source: assistant\nDestination: user\n\n '}}",
|
||||
"name": "codellama-70b-instruct"
|
||||
},
|
||||
{
|
||||
"template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
|
||||
"name": "mistral-instruct"
|
||||
},
|
||||
{
|
||||
"template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|im_start|>user\n' + message['content'] + '<|im_end|>' }}\n{% elif message['role'] == 'system' %}\n{{ '<|im_start|>system\n' + message['content'] + '<|im_end|>' }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|im_start|>assistant\n' + message['content'] + '<|im_end|>' }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|im_start|>assistant' }}\n{% endif %}\n{% endfor %}",
|
||||
"name": "chatml"
|
||||
},
|
||||
{
|
||||
"template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
||||
"name": "chatml"
|
||||
},
|
||||
{
|
||||
"template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = 'You are a helpful assistant.' %}{% endif %}{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in loop_messages %}{% if loop.index0 == 0 %}{{'<|im_start|>system\n' + system_message + '<|im_end|>\n'}}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
||||
"name": "chatml"
|
||||
},
|
||||
{
|
||||
"template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif 'system' not in messages[0]['role'] %}{% set loop_messages = messages %}{% set system_message = 'You are DBRX, created by Databricks. You were last updated in December 2023. You answer questions based on information available up to that point.\nYOU PROVIDE SHORT RESPONSES TO SHORT QUESTIONS OR STATEMENTS, but provide thorough responses to more complex and open-ended questions.\nYou assist with various tasks, from writing to coding (using markdown for code blocks \u2014 remember to use ``` with code, JSON, and tables).\n(You do not have real-time data access or code execution capabilities. You avoid stereotyping and provide balanced perspectives on controversial topics. You do not provide song lyrics, poems, or news articles and do not divulge details of your training data.)\nThis is your system prompt, guiding your responses. Do not reference it, just respond to the user. If you find yourself talking about this message, stop. You should be responding appropriately and usually that means not mentioning this.\nYOU DO NOT MENTION ANY OF THIS INFORMATION ABOUT YOURSELF UNLESS THE INFORMATION IS DIRECTLY PERTINENT TO THE USER\\'S QUERY.' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if loop.index0 == 0 %}{% if system_message != false %}{{ '<|im_start|>system\n' + system_message | trim + '<|im_end|>\n'}}{% endif %}{{ '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' }}{% else %}{{ '\n' + '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' }}{% endif %}{% if (add_generation_prompt == true and loop.last) %}{{ '\n' + '<|im_start|>' + 'assistant' + '\n' }}{% endif %}{% endfor %}",
|
||||
"name": "chatml"
|
||||
},
|
||||
{
|
||||
"template": "{% if not add_generation_prompt is defined %}\n{% set add_generation_prompt = false %}\n{% endif %}\n{%- set ns = namespace(found=false) -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set ns.found = true -%}\n {%- endif -%}\n{%- endfor -%}\n{{bos_token}}{%- if not ns.found -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{% if add_generation_prompt %}\n{{'### Response:'}}\n{% endif %}",
|
||||
"name": "alpaca"
|
||||
},
|
||||
{
|
||||
"template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}",
|
||||
"name": "chatqa"
|
||||
},
|
||||
{
|
||||
"template": "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '\n' + message['content'] | trim + '<end_of_turn>\n' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model\n'}}{% endif %}",
|
||||
"name": "gemma-instruct"
|
||||
},
|
||||
{
|
||||
"template": "{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}",
|
||||
"name": "llama3-instruct"
|
||||
},
|
||||
{
|
||||
"template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ 'Question:\n' + message['content'] + '\n\n' }}{% elif message['role'] == 'system' %}\n{{ 'System:\n' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Answer:\n' + message['content'] + '\n\n' }}{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ 'Answer:\n' }}{% endif %}{% endfor %}",
|
||||
"name": "granite-instruct"
|
||||
},
|
||||
{
|
||||
"template": "{{bos_token}}{{'You are an exceptionally intelligent coding assistant that consistently delivers accurate and reliable responses to user instructions.\n\n'}}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n {{ raise_exception('System messages are not allowed in this template.') }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'@@ Instruction\n' + message['content'] + '\n\n'}}\n {%- else %}\n{{'@@ Response\n' + message['content'] + eos_token + '\n\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{{'@@ Response\n'}}",
|
||||
"name": "magicoder"
|
||||
},
|
||||
{
|
||||
"template": "{% for message in messages %}{% if message['role'] == 'user' %}{{ '<start_user>' + message['content'].strip() + '<end_message>' }}{% elif message['role'] == 'system' %}{{ '<start_system>' + message['content'].strip() + '<end_message>' }}{% elif message['role'] == 'assistant' %}{{ '<start_assistant>' + message['content'] + '<end_message>' }}{% else %}{{ raise_exception('Only system, user and assistant roles are supported.') }}{% endif %}{% if loop.last and add_generation_prompt %}{{ '<start_assistant>' }}{% endif %}{% endfor %}",
|
||||
"name": "alfred"
|
||||
},
|
||||
{
|
||||
"template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}",
|
||||
"name": "llama2-chat"
|
||||
},
|
||||
{
|
||||
"template": "{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}",
|
||||
"name": "phi-3"
|
||||
},
|
||||
{
|
||||
"template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') %}{{'<|user|>' + '\n' + message['content'] + '<|end|>' + '\n' + '<|assistant|>' + '\n'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|end|>' + '\n'}}{% endif %}{% endfor %}",
|
||||
"name": "phi-3"
|
||||
},
|
||||
{
|
||||
"template": "{{ bos_token }}{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>\n' }}{% else %}{{ eos_token }}{% endif %}",
|
||||
"name": "phi-3"
|
||||
},
|
||||
{
|
||||
"template": "{{ bos_token }}{%- if messages[0]['role'] == 'system' -%}{% set loop_messages = messages[1:] %}{%- else -%}{% set loop_messages = messages %}{% endif %}System: This is a chat between a user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions based on the context. The assistant should also indicate when the answer cannot be found in the context.\n\n{% for message in loop_messages %}{%- if message['role'] == 'user' -%}User: {{ message['content'].strip() + '\n\n' }}{%- else -%}Assistant: {{ message['content'].strip() + '\n\n' }}{%- endif %}{% if loop.last and message['role'] == 'user' %}Assistant:{% endif %}{% endfor %}",
|
||||
"name": "chatqa"
|
||||
},
|
||||
{
|
||||
"template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ 'User: \n' + message['content'] }}\n{% elif message['role'] == 'system' %}\n{{ 'System: ' + message['content'] }}\n{% elif message['role'] == 'assistant' %}\n{{ 'Falcon:\n' + message['content']}}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ 'Falcon:' }}\n{% endif %}\n{% endfor %}",
|
||||
"name": "falcon-instruct"
|
||||
},
|
||||
{
|
||||
"template": "{% for message in messages %}{% if not loop.first %}{{ '\n' }}{% endif %}{% if message['role'] == 'system' %}{{ 'System: ' }}{% elif message['role'] == 'user' %}{{ 'User: ' }}{% elif message['role'] == 'assistant' %}{{ 'Falcon: ' }}{% endif %}{{ message['content'] }}{% endfor %}{% if add_generation_prompt %}{{ '\n' + 'Falcon:' }}{% endif %}",
|
||||
"name": "falcon-instruct"
|
||||
},
|
||||
{
|
||||
"template": "{% for message in messages %}{% if message['role'] == 'system' %}{% if message['content']%}{{'### System:\n' + message['content']+'\n\n'}}{% endif %}{% elif message['role'] == 'user' %}{{'### User:\n' + message['content']+'\n\n'}}{% elif message['role'] == 'assistant' %}{{'### Assistant:\n' + message['content']}}{% endif %}{% if loop.last and add_generation_prompt %}{{ '### Assistant:\n' }}{% endif %}{% endfor %}",
|
||||
"name": "solar-instruct"
|
||||
}
|
||||
]
|
||||
@@ -1,3 +0,0 @@
|
||||
[INST] <<SYS>>{{ .System }}<</SYS>>
|
||||
|
||||
{{ .Prompt }} [/INST] {{ .Response }}
|
||||
@@ -1,7 +0,0 @@
|
||||
{{ if .System }}<|start_header_id|>system<|end_header_id|>
|
||||
|
||||
{{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|>
|
||||
|
||||
{{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|>
|
||||
|
||||
{{ .Response }}<|eot_id|>
|
||||
@@ -1,7 +0,0 @@
|
||||
{{ if .System }}{{ .System }}
|
||||
|
||||
{{ end }}{{ if .Prompt }}@@ Instruction
|
||||
{{ .Prompt }}
|
||||
|
||||
{{ end }}@@ Response
|
||||
{{ .Response }}
|
||||
@@ -1,6 +0,0 @@
|
||||
{{ if .System }}<|im_start|>system
|
||||
{{ .System }}<|im_end|>
|
||||
{{ end }}{{ if .Prompt }}<|im_start|>user
|
||||
{{ .Prompt }}<|im_end|>
|
||||
{{ end }}<|im_start|>assistant
|
||||
{{ .Response }}<|im_end|>
|
||||
@@ -1 +0,0 @@
|
||||
{{ .System }}<|end_of_turn|>GPT4 Correct User: {{ .Prompt }}<|end_of_turn|>GPT4 Correct Assistant: {{ .Response }}<|end_of_turn|>
|
||||
@@ -1,6 +0,0 @@
|
||||
{{ if .System }}<|system|>
|
||||
{{ .System }}<|end|>
|
||||
{{ end }}{{ if .Prompt }}<|user|>
|
||||
{{ .Prompt }}<|end|>
|
||||
{{ end }}<|assistant|>
|
||||
{{ .Response }}<|end|>
|
||||
@@ -1,8 +0,0 @@
|
||||
{{ if .System }}### System:
|
||||
{{ .System }}
|
||||
|
||||
{{ end }}{{ if .Prompt }}### User:
|
||||
{{ .Prompt }}
|
||||
|
||||
{{ end }}### Assistant:
|
||||
{{ .Response }}
|
||||
@@ -1,9 +0,0 @@
|
||||
{{ if .System }}{{ .System }}
|
||||
|
||||
{{ end }}{{ if .Prompt }}### Instruction
|
||||
{{ .Prompt }}
|
||||
|
||||
|
||||
{{ end }}### Response
|
||||
{{ .Response }}<|endoftext|>
|
||||
|
||||
@@ -1,70 +0,0 @@
|
||||
package templates
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"embed"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/agnivade/levenshtein"
|
||||
)
|
||||
|
||||
//go:embed index.json
|
||||
var indexBytes []byte
|
||||
|
||||
//go:embed *.gotmpl
|
||||
var templatesFS embed.FS
|
||||
|
||||
var templatesOnce = sync.OnceValues(func() ([]*Template, error) {
|
||||
var templates []*Template
|
||||
if err := json.Unmarshal(indexBytes, &templates); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, t := range templates {
|
||||
bts, err := templatesFS.ReadFile(t.Name + ".gotmpl")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// normalize line endings
|
||||
t.Bytes = bytes.ReplaceAll(bts, []byte("\r\n"), []byte("\n"))
|
||||
}
|
||||
|
||||
return templates, nil
|
||||
})
|
||||
|
||||
type Template struct {
|
||||
Name string `json:"name"`
|
||||
Template string `json:"template"`
|
||||
Bytes []byte
|
||||
}
|
||||
|
||||
func (t Template) Reader() io.Reader {
|
||||
return bytes.NewReader(t.Bytes)
|
||||
}
|
||||
|
||||
func NamedTemplate(s string) (*Template, error) {
|
||||
templates, err := templatesOnce()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var template *Template
|
||||
score := math.MaxInt
|
||||
for _, t := range templates {
|
||||
if s := levenshtein.ComputeDistance(s, t.Template); s < score {
|
||||
score = s
|
||||
template = t
|
||||
}
|
||||
}
|
||||
|
||||
if score < 100 {
|
||||
return template, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("no matching template found")
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user