mirror of
https://github.com/mudler/LocalAI.git
synced 2026-02-03 03:02:38 -05:00
Compare commits
116 Commits
v3.9.0
...
copilot/co
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5ecda78be4 | ||
|
|
8da5ef7231 | ||
|
|
4758996936 | ||
|
|
9a50215867 | ||
|
|
4435c8af57 | ||
|
|
65a57daba6 | ||
|
|
b5465cbc3a | ||
|
|
2de30440fe | ||
|
|
673a80a578 | ||
|
|
2554e9fabe | ||
|
|
5bfc3eebf8 | ||
|
|
ab893fe302 | ||
|
|
c88074a19e | ||
|
|
5ca8f0aea0 | ||
|
|
84234e531f | ||
|
|
4cbf9abfef | ||
|
|
fdc2c0737c | ||
|
|
f4b0a304d7 | ||
|
|
d16ec7aa9e | ||
|
|
d699b7ccdc | ||
|
|
a4d224dd1b | ||
|
|
917c7aa9f3 | ||
|
|
5aa66842dd | ||
|
|
f5dee90962 | ||
|
|
06323df457 | ||
|
|
98f28bf583 | ||
|
|
383312b50e | ||
|
|
b736db4bbe | ||
|
|
09bc2e4a00 | ||
|
|
c03e532a18 | ||
|
|
fcb58ee243 | ||
|
|
b2ff1cea2a | ||
|
|
b964b3d53e | ||
|
|
0b26669d0b | ||
|
|
5a9698bc69 | ||
|
|
1fe0e9f74f | ||
|
|
ffb2dc4666 | ||
|
|
cfc2225fc7 | ||
|
|
fd53978a7b | ||
|
|
7abc0242bb | ||
|
|
23df29fbd3 | ||
|
|
fb9879949c | ||
|
|
1642b39cb8 | ||
|
|
e6ba26c3e7 | ||
|
|
26c4f80d1b | ||
|
|
5add7b47f5 | ||
|
|
3244ccc224 | ||
|
|
4f7b6b0bff | ||
|
|
3a629cea2f | ||
|
|
f917feda29 | ||
|
|
e2018cdc8f | ||
|
|
a3b8a94187 | ||
|
|
41de7d32ad | ||
|
|
93364df0a8 | ||
|
|
21c84f432f | ||
|
|
9d3da0bed5 | ||
|
|
1b063b5595 | ||
|
|
560bf50299 | ||
|
|
a7e155240b | ||
|
|
793e4907a2 | ||
|
|
d38811560c | ||
|
|
33cc0b8e13 | ||
|
|
4cd95b8a9d | ||
|
|
8c504113a2 | ||
|
|
666d110714 | ||
|
|
641606ae93 | ||
|
|
5f6c941399 | ||
|
|
1639fc6309 | ||
|
|
841e8f6d47 | ||
|
|
fd152c97c0 | ||
|
|
949de04052 | ||
|
|
76cfe1f367 | ||
|
|
5ee6c1810b | ||
|
|
7db79aadfa | ||
|
|
dee48679b4 | ||
|
|
94b47a9310 | ||
|
|
bc3e8793ed | ||
|
|
91978bb3a5 | ||
|
|
797f27f09f | ||
|
|
3f1631aa87 | ||
|
|
dad509637e | ||
|
|
218f3a126a | ||
|
|
be77a845fa | ||
|
|
ca32286022 | ||
|
|
1f592505dd | ||
|
|
b3bc623eb3 | ||
|
|
e56391cf14 | ||
|
|
ef3ffe4a4e | ||
|
|
3cffde2cd5 | ||
|
|
234bf7e2ad | ||
|
|
ba73d2e759 | ||
|
|
592697216b | ||
|
|
8bd7143a44 | ||
|
|
0d0ef0121c | ||
|
|
d7b2eee08f | ||
|
|
bc8ec5cb39 | ||
|
|
3f38fecdfc | ||
|
|
20a4199229 | ||
|
|
ded9955881 | ||
|
|
cf78f9a2a8 | ||
|
|
0c16f55b45 | ||
|
|
0b80167912 | ||
|
|
99b5c5f156 | ||
|
|
9ab812a8e8 | ||
|
|
185a685211 | ||
|
|
1a6fd0f7fc | ||
|
|
c95c482f36 | ||
|
|
21c464c34f | ||
|
|
ddf0281785 | ||
|
|
86c68c9623 | ||
|
|
c844b7ac58 | ||
|
|
bb459e671f | ||
|
|
2fe6e278c8 | ||
|
|
ae69921d77 | ||
|
|
bf2f95c684 | ||
|
|
94069f2751 |
28
.github/gallery-agent/agent.go
vendored
28
.github/gallery-agent/agent.go
vendored
@@ -11,6 +11,7 @@ import (
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
hfapi "github.com/mudler/LocalAI/pkg/huggingface-api"
|
||||
cogito "github.com/mudler/cogito"
|
||||
|
||||
@@ -52,6 +53,11 @@ func cleanTextContent(text string) string {
|
||||
return stripThinkingTags(strings.TrimRight(result, "\n"))
|
||||
}
|
||||
|
||||
type galleryModel struct {
|
||||
Name string `yaml:"name"`
|
||||
Urls []string `yaml:"urls"`
|
||||
}
|
||||
|
||||
// isModelExisting checks if a specific model ID exists in the gallery using text search
|
||||
func isModelExisting(modelID string) (bool, error) {
|
||||
indexPath := getGalleryIndexPath()
|
||||
@@ -60,9 +66,20 @@ func isModelExisting(modelID string) (bool, error) {
|
||||
return false, fmt.Errorf("failed to read %s: %w", indexPath, err)
|
||||
}
|
||||
|
||||
contentStr := string(content)
|
||||
// Simple text search - if the model ID appears anywhere in the file, it exists
|
||||
return strings.Contains(contentStr, modelID), nil
|
||||
var galleryModels []galleryModel
|
||||
|
||||
err = yaml.Unmarshal(content, &galleryModels)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to unmarshal %s: %w", indexPath, err)
|
||||
}
|
||||
|
||||
for _, galleryModel := range galleryModels {
|
||||
if slices.Contains(galleryModel.Urls, modelID) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// filterExistingModels removes models that already exist in the gallery
|
||||
@@ -134,6 +151,11 @@ func getRealReadme(ctx context.Context, repository string) (string, error) {
|
||||
}
|
||||
|
||||
func selectMostInterestingModels(ctx context.Context, searchResult *SearchResult) ([]ProcessedModel, error) {
|
||||
|
||||
if len(searchResult.Models) == 1 {
|
||||
return searchResult.Models, nil
|
||||
}
|
||||
|
||||
// Create a conversation fragment
|
||||
fragment := cogito.NewEmptyFragment().
|
||||
AddMessage("user",
|
||||
|
||||
24
.github/gallery-agent/main.go
vendored
24
.github/gallery-agent/main.go
vendored
@@ -119,14 +119,24 @@ func main() {
|
||||
}
|
||||
|
||||
fmt.Println(result.FormattedOutput)
|
||||
var models []ProcessedModel
|
||||
|
||||
// Use AI agent to select the most interesting models
|
||||
fmt.Println("Using AI agent to select the most interesting models...")
|
||||
models, err := selectMostInterestingModels(context.Background(), result)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error in model selection: %v\n", err)
|
||||
// Continue with original result if selection fails
|
||||
if len(result.Models) > 1 {
|
||||
fmt.Println("More than one model found (", len(result.Models), "), using AI agent to select the most interesting models")
|
||||
for _, model := range result.Models {
|
||||
fmt.Println("Model: ", model.ModelID)
|
||||
}
|
||||
// Use AI agent to select the most interesting models
|
||||
fmt.Println("Using AI agent to select the most interesting models...")
|
||||
models, err = selectMostInterestingModels(context.Background(), result)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error in model selection: %v\n", err)
|
||||
// Continue with original result if selection fails
|
||||
models = result.Models
|
||||
}
|
||||
} else if len(result.Models) == 1 {
|
||||
models = result.Models
|
||||
fmt.Println("Only one model found, using it directly")
|
||||
}
|
||||
|
||||
fmt.Print(models)
|
||||
@@ -315,7 +325,7 @@ func searchAndProcessModels(searchTerm string, limit int, quantization string) (
|
||||
outputBuilder.WriteString(fmt.Sprintf(" README Content Preview: %s\n",
|
||||
processedModel.ReadmeContentPreview))
|
||||
} else {
|
||||
continue
|
||||
fmt.Printf(" Warning: Failed to get real readme: %v\n", err)
|
||||
}
|
||||
fmt.Println("Real readme got", readmeContent)
|
||||
|
||||
|
||||
670
.github/workflows/backend.yml
vendored
670
.github/workflows/backend.yml
vendored
File diff suppressed because it is too large
Load Diff
2
.github/workflows/dependabot_auto.yml
vendored
2
.github/workflows/dependabot_auto.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
steps:
|
||||
- name: Dependabot metadata
|
||||
id: metadata
|
||||
uses: dependabot/fetch-metadata@v2.4.0
|
||||
uses: dependabot/fetch-metadata@v2.5.0
|
||||
with:
|
||||
github-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
skip-commit-verification: true
|
||||
|
||||
4
.github/workflows/gallery-agent.yaml
vendored
4
.github/workflows/gallery-agent.yaml
vendored
@@ -49,12 +49,12 @@ jobs:
|
||||
PATH="$PATH:$HOME/go/bin" make protogen-go
|
||||
- uses: mudler/localai-github-action@v1.1
|
||||
with:
|
||||
model: 'qwen3-4b'
|
||||
model: 'https://huggingface.co/bartowski/Qwen_Qwen3-1.7B-GGUF'
|
||||
|
||||
- name: Run gallery agent
|
||||
env:
|
||||
#OPENAI_MODEL: ${{ secrets.OPENAI_MODEL }}
|
||||
OPENAI_MODE: qwen3-4b
|
||||
OPENAI_MODE: Qwen_Qwen3-1.7B-GGUF
|
||||
OPENAI_BASE_URL: "http://localhost:8080"
|
||||
OPENAI_KEY: ${{ secrets.OPENAI_KEY }}
|
||||
#OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }}
|
||||
|
||||
2
.github/workflows/generate_grpc_cache.yaml
vendored
2
.github/workflows/generate_grpc_cache.yaml
vendored
@@ -16,7 +16,7 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- grpc-base-image: ubuntu:22.04
|
||||
- grpc-base-image: ubuntu:24.04
|
||||
runs-on: 'ubuntu-latest'
|
||||
platforms: 'linux/amd64,linux/arm64'
|
||||
runs-on: ${{matrix.runs-on}}
|
||||
|
||||
4
.github/workflows/generate_intel_image.yaml
vendored
4
.github/workflows/generate_intel_image.yaml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- base-image: intel/oneapi-basekit:2025.2.0-0-devel-ubuntu22.04
|
||||
- base-image: intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04
|
||||
runs-on: 'arc-runner-set'
|
||||
platforms: 'linux/amd64'
|
||||
runs-on: ${{matrix.runs-on}}
|
||||
@@ -53,7 +53,7 @@ jobs:
|
||||
BASE_IMAGE=${{ matrix.base-image }}
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
tags: quay.io/go-skynet/intel-oneapi-base:latest
|
||||
tags: quay.io/go-skynet/intel-oneapi-base:24.04
|
||||
push: true
|
||||
target: intel
|
||||
platforms: ${{ matrix.platforms }}
|
||||
|
||||
187
.github/workflows/image-pr.yml
vendored
187
.github/workflows/image-pr.yml
vendored
@@ -1,94 +1,95 @@
|
||||
---
|
||||
name: 'build container images tests'
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ci-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
image-build:
|
||||
uses: ./.github/workflows/image_build.yml
|
||||
with:
|
||||
tag-latest: ${{ matrix.tag-latest }}
|
||||
tag-suffix: ${{ matrix.tag-suffix }}
|
||||
build-type: ${{ matrix.build-type }}
|
||||
cuda-major-version: ${{ matrix.cuda-major-version }}
|
||||
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
base-image: ${{ matrix.base-image }}
|
||||
grpc-base-image: ${{ matrix.grpc-base-image }}
|
||||
makeflags: ${{ matrix.makeflags }}
|
||||
ubuntu-version: ${{ matrix.ubuntu-version }}
|
||||
secrets:
|
||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||
strategy:
|
||||
# Pushing with all jobs in parallel
|
||||
# eats the bandwidth of all the nodes
|
||||
max-parallel: ${{ github.event_name != 'pull_request' && 4 || 8 }}
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'hipblas'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-hipblas'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
runs-on: 'ubuntu-latest'
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'sycl'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
tag-suffix: 'sycl'
|
||||
runs-on: 'ubuntu-latest'
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'vulkan'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-vulkan-core'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-nvidia-l4t-arm64-cuda-13'
|
||||
base-image: "ubuntu:24.04"
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
skip-drivers: 'false'
|
||||
ubuntu-version: '2404'
|
||||
name: 'build container images tests'
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ci-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
image-build:
|
||||
uses: ./.github/workflows/image_build.yml
|
||||
with:
|
||||
tag-latest: ${{ matrix.tag-latest }}
|
||||
tag-suffix: ${{ matrix.tag-suffix }}
|
||||
build-type: ${{ matrix.build-type }}
|
||||
cuda-major-version: ${{ matrix.cuda-major-version }}
|
||||
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
base-image: ${{ matrix.base-image }}
|
||||
grpc-base-image: ${{ matrix.grpc-base-image }}
|
||||
makeflags: ${{ matrix.makeflags }}
|
||||
ubuntu-version: ${{ matrix.ubuntu-version }}
|
||||
secrets:
|
||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||
strategy:
|
||||
# Pushing with all jobs in parallel
|
||||
# eats the bandwidth of all the nodes
|
||||
max-parallel: ${{ github.event_name != 'pull_request' && 4 || 8 }}
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'hipblas'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-hipblas'
|
||||
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
|
||||
grpc-base-image: "ubuntu:24.04"
|
||||
runs-on: 'ubuntu-latest'
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'sycl'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
|
||||
grpc-base-image: "ubuntu:24.04"
|
||||
tag-suffix: 'sycl'
|
||||
runs-on: 'ubuntu-latest'
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'vulkan'
|
||||
platforms: 'linux/amd64,linux/arm64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-vulkan-core'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-nvidia-l4t-arm64-cuda-13'
|
||||
base-image: "ubuntu:24.04"
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
skip-drivers: 'false'
|
||||
ubuntu-version: '2404'
|
||||
|
||||
372
.github/workflows/image.yml
vendored
372
.github/workflows/image.yml
vendored
@@ -1,187 +1,187 @@
|
||||
---
|
||||
name: 'build container images'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
concurrency:
|
||||
group: ci-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
hipblas-jobs:
|
||||
uses: ./.github/workflows/image_build.yml
|
||||
with:
|
||||
tag-latest: ${{ matrix.tag-latest }}
|
||||
tag-suffix: ${{ matrix.tag-suffix }}
|
||||
build-type: ${{ matrix.build-type }}
|
||||
cuda-major-version: ${{ matrix.cuda-major-version }}
|
||||
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
base-image: ${{ matrix.base-image }}
|
||||
grpc-base-image: ${{ matrix.grpc-base-image }}
|
||||
aio: ${{ matrix.aio }}
|
||||
makeflags: ${{ matrix.makeflags }}
|
||||
ubuntu-version: ${{ matrix.ubuntu-version }}
|
||||
secrets:
|
||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- build-type: 'hipblas'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-hipblas'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
runs-on: 'ubuntu-latest'
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
aio: "-aio-gpu-hipblas"
|
||||
ubuntu-version: '2204'
|
||||
|
||||
core-image-build:
|
||||
uses: ./.github/workflows/image_build.yml
|
||||
with:
|
||||
tag-latest: ${{ matrix.tag-latest }}
|
||||
tag-suffix: ${{ matrix.tag-suffix }}
|
||||
build-type: ${{ matrix.build-type }}
|
||||
cuda-major-version: ${{ matrix.cuda-major-version }}
|
||||
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
aio: ${{ matrix.aio }}
|
||||
base-image: ${{ matrix.base-image }}
|
||||
grpc-base-image: ${{ matrix.grpc-base-image }}
|
||||
makeflags: ${{ matrix.makeflags }}
|
||||
skip-drivers: ${{ matrix.skip-drivers }}
|
||||
ubuntu-version: ${{ matrix.ubuntu-version }}
|
||||
secrets:
|
||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||
strategy:
|
||||
#max-parallel: ${{ github.event_name != 'pull_request' && 2 || 4 }}
|
||||
matrix:
|
||||
include:
|
||||
- build-type: ''
|
||||
platforms: 'linux/amd64,linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: ''
|
||||
base-image: "ubuntu:22.04"
|
||||
runs-on: 'ubuntu-latest'
|
||||
aio: "-aio-cpu"
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
skip-drivers: 'false'
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "11"
|
||||
cuda-minor-version: "7"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-11'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
skip-drivers: 'false'
|
||||
aio: "-aio-gpu-nvidia-cuda-11"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
aio: "-aio-gpu-nvidia-cuda-12"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
aio: "-aio-gpu-nvidia-cuda-13"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'vulkan'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-vulkan'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
aio: "-aio-gpu-vulkan"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'intel'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
tag-suffix: '-gpu-intel'
|
||||
runs-on: 'ubuntu-latest'
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
aio: "-aio-gpu-intel"
|
||||
ubuntu-version: '2204'
|
||||
|
||||
gh-runner:
|
||||
uses: ./.github/workflows/image_build.yml
|
||||
with:
|
||||
tag-latest: ${{ matrix.tag-latest }}
|
||||
tag-suffix: ${{ matrix.tag-suffix }}
|
||||
build-type: ${{ matrix.build-type }}
|
||||
cuda-major-version: ${{ matrix.cuda-major-version }}
|
||||
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
aio: ${{ matrix.aio }}
|
||||
base-image: ${{ matrix.base-image }}
|
||||
grpc-base-image: ${{ matrix.grpc-base-image }}
|
||||
makeflags: ${{ matrix.makeflags }}
|
||||
skip-drivers: ${{ matrix.skip-drivers }}
|
||||
ubuntu-version: ${{ matrix.ubuntu-version }}
|
||||
secrets:
|
||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-arm64'
|
||||
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
skip-drivers: 'true'
|
||||
ubuntu-version: "2204"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-arm64-cuda-13'
|
||||
base-image: "ubuntu:24.04"
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
skip-drivers: 'false'
|
||||
ubuntu-version: '2404'
|
||||
name: 'build container images'
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
concurrency:
|
||||
group: ci-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
hipblas-jobs:
|
||||
uses: ./.github/workflows/image_build.yml
|
||||
with:
|
||||
tag-latest: ${{ matrix.tag-latest }}
|
||||
tag-suffix: ${{ matrix.tag-suffix }}
|
||||
build-type: ${{ matrix.build-type }}
|
||||
cuda-major-version: ${{ matrix.cuda-major-version }}
|
||||
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
base-image: ${{ matrix.base-image }}
|
||||
grpc-base-image: ${{ matrix.grpc-base-image }}
|
||||
aio: ${{ matrix.aio }}
|
||||
makeflags: ${{ matrix.makeflags }}
|
||||
ubuntu-version: ${{ matrix.ubuntu-version }}
|
||||
ubuntu-codename: ${{ matrix.ubuntu-codename }}
|
||||
secrets:
|
||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- build-type: 'hipblas'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-hipblas'
|
||||
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
|
||||
grpc-base-image: "ubuntu:24.04"
|
||||
runs-on: 'ubuntu-latest'
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
aio: "-aio-gpu-hipblas"
|
||||
ubuntu-version: '2404'
|
||||
ubuntu-codename: 'noble'
|
||||
|
||||
core-image-build:
|
||||
uses: ./.github/workflows/image_build.yml
|
||||
with:
|
||||
tag-latest: ${{ matrix.tag-latest }}
|
||||
tag-suffix: ${{ matrix.tag-suffix }}
|
||||
build-type: ${{ matrix.build-type }}
|
||||
cuda-major-version: ${{ matrix.cuda-major-version }}
|
||||
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
aio: ${{ matrix.aio }}
|
||||
base-image: ${{ matrix.base-image }}
|
||||
grpc-base-image: ${{ matrix.grpc-base-image }}
|
||||
makeflags: ${{ matrix.makeflags }}
|
||||
skip-drivers: ${{ matrix.skip-drivers }}
|
||||
ubuntu-version: ${{ matrix.ubuntu-version }}
|
||||
ubuntu-codename: ${{ matrix.ubuntu-codename }}
|
||||
secrets:
|
||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||
strategy:
|
||||
#max-parallel: ${{ github.event_name != 'pull_request' && 2 || 4 }}
|
||||
matrix:
|
||||
include:
|
||||
- build-type: ''
|
||||
platforms: 'linux/amd64,linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: ''
|
||||
base-image: "ubuntu:24.04"
|
||||
runs-on: 'ubuntu-latest'
|
||||
aio: "-aio-cpu"
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
skip-drivers: 'false'
|
||||
ubuntu-version: '2404'
|
||||
ubuntu-codename: 'noble'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
aio: "-aio-gpu-nvidia-cuda-12"
|
||||
ubuntu-version: '2404'
|
||||
ubuntu-codename: 'noble'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
aio: "-aio-gpu-nvidia-cuda-13"
|
||||
ubuntu-version: '2404'
|
||||
ubuntu-codename: 'noble'
|
||||
- build-type: 'vulkan'
|
||||
platforms: 'linux/amd64,linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-vulkan'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
aio: "-aio-gpu-vulkan"
|
||||
ubuntu-version: '2404'
|
||||
ubuntu-codename: 'noble'
|
||||
- build-type: 'intel'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
|
||||
grpc-base-image: "ubuntu:24.04"
|
||||
tag-suffix: '-gpu-intel'
|
||||
runs-on: 'ubuntu-latest'
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
aio: "-aio-gpu-intel"
|
||||
ubuntu-version: '2404'
|
||||
ubuntu-codename: 'noble'
|
||||
|
||||
gh-runner:
|
||||
uses: ./.github/workflows/image_build.yml
|
||||
with:
|
||||
tag-latest: ${{ matrix.tag-latest }}
|
||||
tag-suffix: ${{ matrix.tag-suffix }}
|
||||
build-type: ${{ matrix.build-type }}
|
||||
cuda-major-version: ${{ matrix.cuda-major-version }}
|
||||
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
aio: ${{ matrix.aio }}
|
||||
base-image: ${{ matrix.base-image }}
|
||||
grpc-base-image: ${{ matrix.grpc-base-image }}
|
||||
makeflags: ${{ matrix.makeflags }}
|
||||
skip-drivers: ${{ matrix.skip-drivers }}
|
||||
ubuntu-version: ${{ matrix.ubuntu-version }}
|
||||
ubuntu-codename: ${{ matrix.ubuntu-codename }}
|
||||
secrets:
|
||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-arm64'
|
||||
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
skip-drivers: 'true'
|
||||
ubuntu-version: "2204"
|
||||
ubuntu-codename: 'jammy'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-arm64-cuda-13'
|
||||
base-image: "ubuntu:24.04"
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
skip-drivers: 'false'
|
||||
ubuntu-version: '2404'
|
||||
ubuntu-codename: 'noble'
|
||||
|
||||
9
.github/workflows/image_build.yml
vendored
9
.github/workflows/image_build.yml
vendored
@@ -23,7 +23,7 @@ on:
|
||||
type: string
|
||||
cuda-minor-version:
|
||||
description: 'CUDA minor version'
|
||||
default: "4"
|
||||
default: "9"
|
||||
type: string
|
||||
platforms:
|
||||
description: 'Platforms'
|
||||
@@ -61,6 +61,11 @@ on:
|
||||
required: false
|
||||
default: '2204'
|
||||
type: string
|
||||
ubuntu-codename:
|
||||
description: 'Ubuntu codename'
|
||||
required: false
|
||||
default: 'noble'
|
||||
type: string
|
||||
secrets:
|
||||
dockerUsername:
|
||||
required: true
|
||||
@@ -244,6 +249,7 @@ jobs:
|
||||
MAKEFLAGS=${{ inputs.makeflags }}
|
||||
SKIP_DRIVERS=${{ inputs.skip-drivers }}
|
||||
UBUNTU_VERSION=${{ inputs.ubuntu-version }}
|
||||
UBUNTU_CODENAME=${{ inputs.ubuntu-codename }}
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
cache-from: type=gha
|
||||
@@ -272,6 +278,7 @@ jobs:
|
||||
MAKEFLAGS=${{ inputs.makeflags }}
|
||||
SKIP_DRIVERS=${{ inputs.skip-drivers }}
|
||||
UBUNTU_VERSION=${{ inputs.ubuntu-version }}
|
||||
UBUNTU_CODENAME=${{ inputs.ubuntu-codename }}
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
cache-from: type=gha
|
||||
|
||||
19
.github/workflows/test-extra.yml
vendored
19
.github/workflows/test-extra.yml
vendored
@@ -247,3 +247,22 @@ jobs:
|
||||
run: |
|
||||
make --jobs=5 --output-sync=target -C backend/python/coqui
|
||||
make --jobs=5 --output-sync=target -C backend/python/coqui test
|
||||
tests-moonshine:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: true
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential ffmpeg
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
# Install UV
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
pip install --user --no-cache-dir grpcio-tools==1.64.1
|
||||
- name: Test moonshine
|
||||
run: |
|
||||
make --jobs=5 --output-sync=target -C backend/python/moonshine
|
||||
make --jobs=5 --output-sync=target -C backend/python/moonshine test
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -25,6 +25,7 @@ go-bert
|
||||
# LocalAI build binary
|
||||
LocalAI
|
||||
/local-ai
|
||||
/local-ai-launcher
|
||||
# prevent above rules from omitting the helm chart
|
||||
!charts/*
|
||||
# prevent above rules from omitting the api/localai folder
|
||||
|
||||
203
AGENTS.md
203
AGENTS.md
@@ -2,6 +2,163 @@
|
||||
|
||||
Building and testing the project depends on the components involved and the platform where development is taking place. Due to the amount of context required it's usually best not to try building or testing the project unless the user requests it. If you must build the project then inspect the Makefile in the project root and the Makefiles of any backends that are effected by changes you are making. In addition the workflows in .github/workflows can be used as a reference when it is unclear how to build or test a component. The primary Makefile contains targets for building inside or outside Docker, if the user has not previously specified a preference then ask which they would like to use.
|
||||
|
||||
## Building a specified backend
|
||||
|
||||
Let's say the user wants to build a particular backend for a given platform. For example let's say they want to build bark for ROCM/hipblas
|
||||
|
||||
- The Makefile has targets like `docker-build-bark` created with `generate-docker-build-target` at the time of writing. Recently added backends may require a new target.
|
||||
- At a minimum we need to set the BUILD_TYPE, BASE_IMAGE build-args
|
||||
- Use .github/workflows/backend.yml as a reference it lists the needed args in the `include` job strategy matrix
|
||||
- l4t and cublas also requires the CUDA major and minor version
|
||||
- You can pretty print a command like `DOCKER_MAKEFLAGS=-j$(nproc --ignore=1) BUILD_TYPE=hipblas BASE_IMAGE=rocm/dev-ubuntu-24.04:6.4.4 make docker-build-bark`
|
||||
- Unless the user specifies that they want you to run the command, then just print it because not all agent frontends handle long running jobs well and the output may overflow your context
|
||||
- The user may say they want to build AMD or ROCM instead of hipblas, or Intel instead of SYCL or NVIDIA insted of l4t or cublas. Ask for confirmation if there is ambiguity.
|
||||
- Sometimes the user may need extra parameters to be added to `docker build` (e.g. `--platform` for cross-platform builds or `--progress` to view the full logs), in which case you can generate the `docker build` command directly.
|
||||
|
||||
## Adding a New Backend
|
||||
|
||||
When adding a new backend to LocalAI, you need to update several files to ensure the backend is properly built, tested, and registered. Here's a step-by-step guide based on the pattern used for adding backends like `moonshine`:
|
||||
|
||||
### 1. Create Backend Directory Structure
|
||||
|
||||
Create the backend directory under the appropriate location:
|
||||
- **Python backends**: `backend/python/<backend-name>/`
|
||||
- **Go backends**: `backend/go/<backend-name>/`
|
||||
- **C++ backends**: `backend/cpp/<backend-name>/`
|
||||
|
||||
For Python backends, you'll typically need:
|
||||
- `backend.py` - Main gRPC server implementation
|
||||
- `Makefile` - Build configuration
|
||||
- `install.sh` - Installation script for dependencies
|
||||
- `protogen.sh` - Protocol buffer generation script
|
||||
- `requirements.txt` - Python dependencies
|
||||
- `run.sh` - Runtime script
|
||||
- `test.py` / `test.sh` - Test files
|
||||
|
||||
### 2. Add Build Configurations to `.github/workflows/backend.yml`
|
||||
|
||||
Add build matrix entries for each platform/GPU type you want to support. Look at similar backends (e.g., `chatterbox`, `faster-whisper`) for reference.
|
||||
|
||||
**Placement in file:**
|
||||
- CPU builds: Add after other CPU builds (e.g., after `cpu-chatterbox`)
|
||||
- CUDA 12 builds: Add after other CUDA 12 builds (e.g., after `gpu-nvidia-cuda-12-chatterbox`)
|
||||
- CUDA 13 builds: Add after other CUDA 13 builds (e.g., after `gpu-nvidia-cuda-13-chatterbox`)
|
||||
|
||||
**Additional build types you may need:**
|
||||
- ROCm/HIP: Use `build-type: 'hipblas'` with `base-image: "rocm/dev-ubuntu-24.04:6.4.4"`
|
||||
- Intel/SYCL: Use `build-type: 'intel'` or `build-type: 'sycl_f16'`/`sycl_f32` with `base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"`
|
||||
- L4T (ARM): Use `build-type: 'l4t'` with `platforms: 'linux/arm64'` and `runs-on: 'ubuntu-24.04-arm'`
|
||||
|
||||
### 3. Add Backend Metadata to `backend/index.yaml`
|
||||
|
||||
**Step 3a: Add Meta Definition**
|
||||
|
||||
Add a YAML anchor definition in the `## metas` section (around line 2-300). Look for similar backends to use as a template such as `diffusers` or `chatterbox`
|
||||
|
||||
**Step 3b: Add Image Entries**
|
||||
|
||||
Add image entries at the end of the file, following the pattern of similar backends such as `diffusers` or `chatterbox`. Include both `latest` (production) and `master` (development) tags.
|
||||
|
||||
### 4. Update the Makefile
|
||||
|
||||
The Makefile needs to be updated in several places to support building and testing the new backend:
|
||||
|
||||
**Step 4a: Add to `.NOTPARALLEL`**
|
||||
|
||||
Add `backends/<backend-name>` to the `.NOTPARALLEL` line (around line 2) to prevent parallel execution conflicts:
|
||||
|
||||
```makefile
|
||||
.NOTPARALLEL: ... backends/<backend-name>
|
||||
```
|
||||
|
||||
**Step 4b: Add to `prepare-test-extra`**
|
||||
|
||||
Add the backend to the `prepare-test-extra` target (around line 312) to prepare it for testing:
|
||||
|
||||
```makefile
|
||||
prepare-test-extra: protogen-python
|
||||
...
|
||||
$(MAKE) -C backend/python/<backend-name>
|
||||
```
|
||||
|
||||
**Step 4c: Add to `test-extra`**
|
||||
|
||||
Add the backend to the `test-extra` target (around line 319) to run its tests:
|
||||
|
||||
```makefile
|
||||
test-extra: prepare-test-extra
|
||||
...
|
||||
$(MAKE) -C backend/python/<backend-name> test
|
||||
```
|
||||
|
||||
**Step 4d: Add Backend Definition**
|
||||
|
||||
Add a backend definition variable in the backend definitions section (around line 428-457). The format depends on the backend type:
|
||||
|
||||
**For Python backends with root context** (like `faster-whisper`, `bark`):
|
||||
```makefile
|
||||
BACKEND_<BACKEND_NAME> = <backend-name>|python|.|false|true
|
||||
```
|
||||
|
||||
**For Python backends with `./backend` context** (like `chatterbox`, `moonshine`):
|
||||
```makefile
|
||||
BACKEND_<BACKEND_NAME> = <backend-name>|python|./backend|false|true
|
||||
```
|
||||
|
||||
**For Go backends**:
|
||||
```makefile
|
||||
BACKEND_<BACKEND_NAME> = <backend-name>|golang|.|false|true
|
||||
```
|
||||
|
||||
**Step 4e: Generate Docker Build Target**
|
||||
|
||||
Add an eval call to generate the docker-build target (around line 480-501):
|
||||
|
||||
```makefile
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_<BACKEND_NAME>)))
|
||||
```
|
||||
|
||||
**Step 4f: Add to `docker-build-backends`**
|
||||
|
||||
Add `docker-build-<backend-name>` to the `docker-build-backends` target (around line 507):
|
||||
|
||||
```makefile
|
||||
docker-build-backends: ... docker-build-<backend-name>
|
||||
```
|
||||
|
||||
**Determining the Context:**
|
||||
|
||||
- If the backend is in `backend/python/<backend-name>/` and uses `./backend` as context in the workflow file, use `./backend` context
|
||||
- If the backend is in `backend/python/<backend-name>/` but uses `.` as context in the workflow file, use `.` context
|
||||
- Check similar backends to determine the correct context
|
||||
|
||||
### 5. Verification Checklist
|
||||
|
||||
After adding a new backend, verify:
|
||||
|
||||
- [ ] Backend directory structure is complete with all necessary files
|
||||
- [ ] Build configurations added to `.github/workflows/backend.yml` for all desired platforms
|
||||
- [ ] Meta definition added to `backend/index.yaml` in the `## metas` section
|
||||
- [ ] Image entries added to `backend/index.yaml` for all build variants (latest + development)
|
||||
- [ ] Tag suffixes match between workflow file and index.yaml
|
||||
- [ ] Makefile updated with all 6 required changes (`.NOTPARALLEL`, `prepare-test-extra`, `test-extra`, backend definition, docker-build target eval, `docker-build-backends`)
|
||||
- [ ] No YAML syntax errors (check with linter)
|
||||
- [ ] No Makefile syntax errors (check with linter)
|
||||
- [ ] Follows the same pattern as similar backends (e.g., if it's a transcription backend, follow `faster-whisper` pattern)
|
||||
|
||||
### 6. Example: Adding a Python Backend
|
||||
|
||||
For reference, when `moonshine` was added:
|
||||
- **Files created**: `backend/python/moonshine/{backend.py, Makefile, install.sh, protogen.sh, requirements.txt, run.sh, test.py, test.sh}`
|
||||
- **Workflow entries**: 3 build configurations (CPU, CUDA 12, CUDA 13)
|
||||
- **Index entries**: 1 meta definition + 6 image entries (cpu, cuda12, cuda13 × latest/development)
|
||||
- **Makefile updates**:
|
||||
- Added to `.NOTPARALLEL` line
|
||||
- Added to `prepare-test-extra` and `test-extra` targets
|
||||
- Added `BACKEND_MOONSHINE = moonshine|python|./backend|false|true`
|
||||
- Added eval for docker-build target generation
|
||||
- Added `docker-build-moonshine` to `docker-build-backends`
|
||||
|
||||
# Coding style
|
||||
|
||||
- The project has the following .editorconfig
|
||||
@@ -77,3 +234,49 @@ When fixing compilation errors after upstream changes:
|
||||
- HTTP server uses `server_routes` with HTTP handlers
|
||||
- Both use the same `server_context` and task queue infrastructure
|
||||
- gRPC methods: `LoadModel`, `Predict`, `PredictStream`, `Embedding`, `Rerank`, `TokenizeString`, `GetMetrics`, `Health`
|
||||
|
||||
## Tool Call Parsing Maintenance
|
||||
|
||||
When working on JSON/XML tool call parsing functionality, always check llama.cpp for reference implementation and updates:
|
||||
|
||||
### Checking for XML Parsing Changes
|
||||
|
||||
1. **Review XML Format Definitions**: Check `llama.cpp/common/chat-parser-xml-toolcall.h` for `xml_tool_call_format` struct changes
|
||||
2. **Review Parsing Logic**: Check `llama.cpp/common/chat-parser-xml-toolcall.cpp` for parsing algorithm updates
|
||||
3. **Review Format Presets**: Check `llama.cpp/common/chat-parser.cpp` for new XML format presets (search for `xml_tool_call_format form`)
|
||||
4. **Review Model Lists**: Check `llama.cpp/common/chat.h` for `COMMON_CHAT_FORMAT_*` enum values that use XML parsing:
|
||||
- `COMMON_CHAT_FORMAT_GLM_4_5`
|
||||
- `COMMON_CHAT_FORMAT_MINIMAX_M2`
|
||||
- `COMMON_CHAT_FORMAT_KIMI_K2`
|
||||
- `COMMON_CHAT_FORMAT_QWEN3_CODER_XML`
|
||||
- `COMMON_CHAT_FORMAT_APRIEL_1_5`
|
||||
- `COMMON_CHAT_FORMAT_XIAOMI_MIMO`
|
||||
- Any new formats added
|
||||
|
||||
### Model Configuration Options
|
||||
|
||||
Always check `llama.cpp` for new model configuration options that should be supported in LocalAI:
|
||||
|
||||
1. **Check Server Context**: Review `llama.cpp/tools/server/server-context.cpp` for new parameters
|
||||
2. **Check Chat Params**: Review `llama.cpp/common/chat.h` for `common_chat_params` struct changes
|
||||
3. **Check Server Options**: Review `llama.cpp/tools/server/server.cpp` for command-line argument changes
|
||||
4. **Examples of options to check**:
|
||||
- `ctx_shift` - Context shifting support
|
||||
- `parallel_tool_calls` - Parallel tool calling
|
||||
- `reasoning_format` - Reasoning format options
|
||||
- Any new flags or parameters
|
||||
|
||||
### Implementation Guidelines
|
||||
|
||||
1. **Feature Parity**: Always aim for feature parity with llama.cpp's implementation
|
||||
2. **Test Coverage**: Add tests for new features matching llama.cpp's behavior
|
||||
3. **Documentation**: Update relevant documentation when adding new formats or options
|
||||
4. **Backward Compatibility**: Ensure changes don't break existing functionality
|
||||
|
||||
### Files to Monitor
|
||||
|
||||
- `llama.cpp/common/chat-parser-xml-toolcall.h` - Format definitions
|
||||
- `llama.cpp/common/chat-parser-xml-toolcall.cpp` - Parsing logic
|
||||
- `llama.cpp/common/chat-parser.cpp` - Format presets and model-specific handlers
|
||||
- `llama.cpp/common/chat.h` - Format enums and parameter structures
|
||||
- `llama.cpp/tools/server/server-context.cpp` - Server configuration options
|
||||
|
||||
@@ -78,6 +78,20 @@ LOCALAI_IMAGE_TAG=test LOCALAI_IMAGE=local-ai-aio make run-e2e-aio
|
||||
|
||||
We are welcome the contribution of the documents, please open new PR or create a new issue. The documentation is available under `docs/` https://github.com/mudler/LocalAI/tree/master/docs
|
||||
|
||||
### Gallery YAML Schema
|
||||
|
||||
LocalAI provides a JSON Schema for gallery model YAML files at:
|
||||
|
||||
`core/schema/gallery-model.schema.json`
|
||||
|
||||
This schema mirrors the internal gallery model configuration and can be used by editors (such as VS Code) to enable autocomplete, validation, and inline documentation when creating or modifying gallery files.
|
||||
|
||||
To use it with the YAML language server, add the following comment at the top of a gallery YAML file:
|
||||
|
||||
```yaml
|
||||
# yaml-language-server: $schema=../core/schema/gallery-model.schema.json
|
||||
```
|
||||
|
||||
## Community and Communication
|
||||
|
||||
- You can reach out via the Github issue tracker.
|
||||
|
||||
59
Dockerfile
59
Dockerfile
@@ -1,6 +1,7 @@
|
||||
ARG BASE_IMAGE=ubuntu:22.04
|
||||
ARG BASE_IMAGE=ubuntu:24.04
|
||||
ARG GRPC_BASE_IMAGE=${BASE_IMAGE}
|
||||
ARG INTEL_BASE_IMAGE=${BASE_IMAGE}
|
||||
ARG UBUNTU_CODENAME=noble
|
||||
|
||||
FROM ${BASE_IMAGE} AS requirements
|
||||
|
||||
@@ -9,7 +10,7 @@ ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
ca-certificates curl wget espeak-ng libgomp1 \
|
||||
ffmpeg && \
|
||||
ffmpeg libopenblas0 libopenblas-dev && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
@@ -23,7 +24,7 @@ ARG SKIP_DRIVERS=false
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
ENV BUILD_TYPE=${BUILD_TYPE}
|
||||
ARG UBUNTU_VERSION=2204
|
||||
ARG UBUNTU_VERSION=2404
|
||||
|
||||
RUN mkdir -p /run/localai
|
||||
RUN echo "default" > /run/localai/capability
|
||||
@@ -34,11 +35,45 @@ RUN <<EOT bash
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
software-properties-common pciutils wget gpg-agent && \
|
||||
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
|
||||
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
|
||||
apt-get update && \
|
||||
apt-get install -y \
|
||||
vulkan-sdk && \
|
||||
apt-get install -y libglm-dev cmake libxcb-dri3-0 libxcb-present0 libpciaccess0 \
|
||||
libpng-dev libxcb-keysyms1-dev libxcb-dri3-dev libx11-dev g++ gcc \
|
||||
libwayland-dev libxrandr-dev libxcb-randr0-dev libxcb-ewmh-dev \
|
||||
git python-is-python3 bison libx11-xcb-dev liblz4-dev libzstd-dev \
|
||||
ocaml-core ninja-build pkg-config libxml2-dev wayland-protocols python3-jsonschema \
|
||||
clang-format qtbase5-dev qt6-base-dev libxcb-glx0-dev sudo xz-utils mesa-vulkan-drivers
|
||||
if [ "amd64" = "$TARGETARCH" ]; then
|
||||
wget "https://sdk.lunarg.com/sdk/download/1.4.328.1/linux/vulkansdk-linux-x86_64-1.4.328.1.tar.xz" && \
|
||||
tar -xf vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
|
||||
rm vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
|
||||
mkdir -p /opt/vulkan-sdk && \
|
||||
mv 1.4.328.1 /opt/vulkan-sdk/ && \
|
||||
cd /opt/vulkan-sdk/1.4.328.1 && \
|
||||
./vulkansdk --no-deps --maxjobs \
|
||||
vulkan-loader \
|
||||
vulkan-validationlayers \
|
||||
vulkan-extensionlayer \
|
||||
vulkan-tools \
|
||||
shaderc && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/bin/* /usr/bin/ && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/lib/* /usr/lib/x86_64-linux-gnu/ && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/include/* /usr/include/ && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/share/* /usr/share/ && \
|
||||
rm -rf /opt/vulkan-sdk
|
||||
fi
|
||||
if [ "arm64" = "$TARGETARCH" ]; then
|
||||
mkdir vulkan && cd vulkan && \
|
||||
curl -L -o vulkan-sdk.tar.xz https://github.com/mudler/vulkan-sdk-arm/releases/download/1.4.335.0/vulkansdk-ubuntu-24.04-arm-1.4.335.0.tar.xz && \
|
||||
tar -xvf vulkan-sdk.tar.xz && \
|
||||
rm vulkan-sdk.tar.xz && \
|
||||
cd 1.4.335.0 && \
|
||||
cp -rfv aarch64/bin/* /usr/bin/ && \
|
||||
cp -rfv aarch64/lib/* /usr/lib/aarch64-linux-gnu/ && \
|
||||
cp -rfv aarch64/include/* /usr/include/ && \
|
||||
cp -rfv aarch64/share/* /usr/share/ && \
|
||||
cd ../.. && \
|
||||
rm -rf vulkan
|
||||
fi
|
||||
ldconfig && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/* && \
|
||||
echo "vulkan" > /run/localai/capability
|
||||
@@ -141,13 +176,12 @@ ENV PATH=/opt/rocm/bin:${PATH}
|
||||
# The requirements-core target is common to all images. It should not be placed in requirements-core unless every single build will use it.
|
||||
FROM requirements-drivers AS build-requirements
|
||||
|
||||
ARG GO_VERSION=1.22.6
|
||||
ARG CMAKE_VERSION=3.26.4
|
||||
ARG GO_VERSION=1.25.4
|
||||
ARG CMAKE_VERSION=3.31.10
|
||||
ARG CMAKE_FROM_SOURCE=false
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
@@ -204,9 +238,10 @@ WORKDIR /build
|
||||
# https://community.intel.com/t5/Intel-oneAPI-Math-Kernel-Library/APT-Repository-not-working-signatures-invalid/m-p/1599436/highlight/true#M36143
|
||||
# This is a temporary workaround until Intel fixes their repository
|
||||
FROM ${INTEL_BASE_IMAGE} AS intel
|
||||
ARG UBUNTU_CODENAME=noble
|
||||
RUN wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | \
|
||||
gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
|
||||
RUN echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy/lts/2350 unified" > /etc/apt/sources.list.d/intel-graphics.list
|
||||
RUN echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu ${UBUNTU_CODENAME}/lts/2350 unified" > /etc/apt/sources.list.d/intel-graphics.list
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
intel-oneapi-runtime-libs && \
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ARG BASE_IMAGE=ubuntu:22.04
|
||||
ARG BASE_IMAGE=ubuntu:24.04
|
||||
|
||||
FROM ${BASE_IMAGE}
|
||||
|
||||
|
||||
293
Makefile
293
Makefile
@@ -1,3 +1,6 @@
|
||||
# Disable parallel execution for backend builds
|
||||
.NOTPARALLEL: backends/diffusers backends/llama-cpp backends/piper backends/stablediffusion-ggml backends/whisper backends/faster-whisper backends/silero-vad backends/local-store backends/huggingface backends/rfdetr backends/kitten-tts backends/kokoro backends/chatterbox backends/llama-cpp-darwin backends/neutts build-darwin-python-backend build-darwin-go-backend backends/mlx backends/diffuser-darwin backends/mlx-vlm backends/mlx-audio backends/stablediffusion-ggml-darwin backends/vllm backends/moonshine
|
||||
|
||||
GOCMD=go
|
||||
GOTEST=$(GOCMD) test
|
||||
GOVET=$(GOCMD) vet
|
||||
@@ -6,10 +9,14 @@ LAUNCHER_BINARY_NAME=local-ai-launcher
|
||||
|
||||
CUDA_MAJOR_VERSION?=13
|
||||
CUDA_MINOR_VERSION?=0
|
||||
UBUNTU_VERSION?=2204
|
||||
UBUNTU_CODENAME?=noble
|
||||
|
||||
GORELEASER?=
|
||||
|
||||
export BUILD_TYPE?=
|
||||
export CUDA_MAJOR_VERSION?=12
|
||||
export CUDA_MINOR_VERSION?=9
|
||||
|
||||
GO_TAGS?=
|
||||
BUILD_ID?=
|
||||
@@ -155,7 +162,17 @@ test: test-models/testmodel.ggml protogen-go
|
||||
########################################################
|
||||
|
||||
docker-build-aio:
|
||||
docker build --build-arg MAKEFLAGS="--jobs=5 --output-sync=target" -t local-ai:tests -f Dockerfile .
|
||||
docker build \
|
||||
--build-arg MAKEFLAGS="--jobs=5 --output-sync=target" \
|
||||
--build-arg BASE_IMAGE=$(BASE_IMAGE) \
|
||||
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
|
||||
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
|
||||
--build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) \
|
||||
--build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) \
|
||||
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \
|
||||
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
|
||||
--build-arg GO_TAGS="$(GO_TAGS)" \
|
||||
-t local-ai:tests -f Dockerfile .
|
||||
BASE_IMAGE=local-ai:tests DOCKER_AIO_IMAGE=local-ai-aio:test $(MAKE) docker-aio
|
||||
|
||||
e2e-aio:
|
||||
@@ -177,7 +194,17 @@ prepare-e2e:
|
||||
mkdir -p $(TEST_DIR)
|
||||
cp -rfv $(abspath ./tests/e2e-fixtures)/gpu.yaml $(TEST_DIR)/gpu.yaml
|
||||
test -e $(TEST_DIR)/ggllm-test-model.bin || wget -q https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GGUF/resolve/main/codellama-7b-instruct.Q2_K.gguf -O $(TEST_DIR)/ggllm-test-model.bin
|
||||
docker build --build-arg IMAGE_TYPE=core --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg CUDA_MAJOR_VERSION=12 --build-arg CUDA_MINOR_VERSION=0 -t localai-tests .
|
||||
docker build \
|
||||
--build-arg IMAGE_TYPE=core \
|
||||
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
|
||||
--build-arg BASE_IMAGE=$(BASE_IMAGE) \
|
||||
--build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) \
|
||||
--build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) \
|
||||
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \
|
||||
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
|
||||
--build-arg GO_TAGS="$(GO_TAGS)" \
|
||||
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
|
||||
-t localai-tests .
|
||||
|
||||
run-e2e-image:
|
||||
ls -liah $(abspath ./tests/e2e-fixtures)
|
||||
@@ -288,6 +315,7 @@ prepare-test-extra: protogen-python
|
||||
$(MAKE) -C backend/python/chatterbox
|
||||
$(MAKE) -C backend/python/vllm
|
||||
$(MAKE) -C backend/python/vibevoice
|
||||
$(MAKE) -C backend/python/moonshine
|
||||
|
||||
test-extra: prepare-test-extra
|
||||
$(MAKE) -C backend/python/transformers test
|
||||
@@ -295,11 +323,12 @@ test-extra: prepare-test-extra
|
||||
$(MAKE) -C backend/python/chatterbox test
|
||||
$(MAKE) -C backend/python/vllm test
|
||||
$(MAKE) -C backend/python/vibevoice test
|
||||
$(MAKE) -C backend/python/moonshine test
|
||||
|
||||
DOCKER_IMAGE?=local-ai
|
||||
DOCKER_AIO_IMAGE?=local-ai-aio
|
||||
IMAGE_TYPE?=core
|
||||
BASE_IMAGE?=ubuntu:22.04
|
||||
BASE_IMAGE?=ubuntu:24.04
|
||||
|
||||
docker:
|
||||
docker build \
|
||||
@@ -308,24 +337,34 @@ docker:
|
||||
--build-arg GO_TAGS="$(GO_TAGS)" \
|
||||
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
|
||||
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
|
||||
--build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) \
|
||||
--build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) \
|
||||
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \
|
||||
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
|
||||
-t $(DOCKER_IMAGE) .
|
||||
|
||||
docker-cuda11:
|
||||
docker-cuda12:
|
||||
docker build \
|
||||
--build-arg CUDA_MAJOR_VERSION=11 \
|
||||
--build-arg CUDA_MINOR_VERSION=8 \
|
||||
--build-arg CUDA_MAJOR_VERSION=${CUDA_MAJOR_VERSION} \
|
||||
--build-arg CUDA_MINOR_VERSION=${CUDA_MINOR_VERSION} \
|
||||
--build-arg BASE_IMAGE=$(BASE_IMAGE) \
|
||||
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
|
||||
--build-arg GO_TAGS="$(GO_TAGS)" \
|
||||
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
|
||||
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
|
||||
-t $(DOCKER_IMAGE)-cuda-11 .
|
||||
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \
|
||||
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
|
||||
-t $(DOCKER_IMAGE)-cuda-12 .
|
||||
|
||||
docker-aio:
|
||||
@echo "Building AIO image with base $(BASE_IMAGE) as $(DOCKER_AIO_IMAGE)"
|
||||
docker build \
|
||||
--build-arg BASE_IMAGE=$(BASE_IMAGE) \
|
||||
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
|
||||
--build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) \
|
||||
--build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) \
|
||||
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \
|
||||
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
|
||||
-t $(DOCKER_AIO_IMAGE) -f Dockerfile.aio .
|
||||
|
||||
docker-aio-all:
|
||||
@@ -334,66 +373,31 @@ docker-aio-all:
|
||||
|
||||
docker-image-intel:
|
||||
docker build \
|
||||
--build-arg BASE_IMAGE=quay.io/go-skynet/intel-oneapi-base:latest \
|
||||
--build-arg BASE_IMAGE=intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04 \
|
||||
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
|
||||
--build-arg GO_TAGS="$(GO_TAGS)" \
|
||||
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
|
||||
--build-arg BUILD_TYPE=intel -t $(DOCKER_IMAGE) .
|
||||
--build-arg BUILD_TYPE=intel \
|
||||
--build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) \
|
||||
--build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) \
|
||||
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \
|
||||
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
|
||||
-t $(DOCKER_IMAGE) .
|
||||
|
||||
########################################################
|
||||
## Backends
|
||||
########################################################
|
||||
|
||||
# Pattern rule for standard backends (docker-based)
|
||||
# This matches all backends that use docker-build-* and docker-save-*
|
||||
backends/%: docker-build-% docker-save-% build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/$*.tar)"
|
||||
|
||||
backends/diffusers: docker-build-diffusers docker-save-diffusers build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/diffusers.tar)"
|
||||
|
||||
backends/llama-cpp: docker-build-llama-cpp docker-save-llama-cpp build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/llama-cpp.tar)"
|
||||
|
||||
backends/piper: docker-build-piper docker-save-piper build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/piper.tar)"
|
||||
|
||||
backends/stablediffusion-ggml: docker-build-stablediffusion-ggml docker-save-stablediffusion-ggml build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/stablediffusion-ggml.tar)"
|
||||
|
||||
backends/whisper: docker-build-whisper docker-save-whisper build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/whisper.tar)"
|
||||
|
||||
backends/silero-vad: docker-build-silero-vad docker-save-silero-vad build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/silero-vad.tar)"
|
||||
|
||||
backends/local-store: docker-build-local-store docker-save-local-store build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/local-store.tar)"
|
||||
|
||||
backends/huggingface: docker-build-huggingface docker-save-huggingface build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/huggingface.tar)"
|
||||
|
||||
backends/rfdetr: docker-build-rfdetr docker-save-rfdetr build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/rfdetr.tar)"
|
||||
|
||||
backends/kitten-tts: docker-build-kitten-tts docker-save-kitten-tts build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/kitten-tts.tar)"
|
||||
|
||||
backends/kokoro: docker-build-kokoro docker-save-kokoro build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/kokoro.tar)"
|
||||
|
||||
backends/chatterbox: docker-build-chatterbox docker-save-chatterbox build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/chatterbox.tar)"
|
||||
|
||||
# Darwin-specific backends (keep as explicit targets since they have special build logic)
|
||||
backends/llama-cpp-darwin: build
|
||||
bash ./scripts/build/llama-cpp-darwin.sh
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/llama-cpp.tar)"
|
||||
|
||||
backends/neutts: docker-build-neutts docker-save-neutts build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/neutts.tar)"
|
||||
|
||||
backends/vllm: docker-build-vllm docker-save-vllm build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/vllm.tar)"
|
||||
|
||||
backends/vibevoice: docker-build-vibevoice docker-save-vibevoice build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/vibevoice.tar)"
|
||||
|
||||
build-darwin-python-backend: build
|
||||
bash ./scripts/build/python-darwin.sh
|
||||
|
||||
@@ -423,121 +427,88 @@ backends/stablediffusion-ggml-darwin:
|
||||
backend-images:
|
||||
mkdir -p backend-images
|
||||
|
||||
docker-build-llama-cpp:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:llama-cpp -f backend/Dockerfile.llama-cpp .
|
||||
# Backend metadata: BACKEND_NAME | DOCKERFILE_TYPE | BUILD_CONTEXT | PROGRESS_FLAG | NEEDS_BACKEND_ARG
|
||||
# llama-cpp is special - uses llama-cpp Dockerfile and doesn't need BACKEND arg
|
||||
BACKEND_LLAMA_CPP = llama-cpp|llama-cpp|.|false|false
|
||||
|
||||
docker-build-bark-cpp:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:bark-cpp -f backend/Dockerfile.golang --build-arg BACKEND=bark-cpp .
|
||||
# Golang backends
|
||||
BACKEND_BARK_CPP = bark-cpp|golang|.|false|true
|
||||
BACKEND_PIPER = piper|golang|.|false|true
|
||||
BACKEND_LOCAL_STORE = local-store|golang|.|false|true
|
||||
BACKEND_HUGGINGFACE = huggingface|golang|.|false|true
|
||||
BACKEND_SILERO_VAD = silero-vad|golang|.|false|true
|
||||
BACKEND_STABLEDIFFUSION_GGML = stablediffusion-ggml|golang|.|--progress=plain|true
|
||||
BACKEND_WHISPER = whisper|golang|.|false|true
|
||||
|
||||
docker-build-piper:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:piper -f backend/Dockerfile.golang --build-arg BACKEND=piper .
|
||||
# Python backends with root context
|
||||
BACKEND_RERANKERS = rerankers|python|.|false|true
|
||||
BACKEND_TRANSFORMERS = transformers|python|.|false|true
|
||||
BACKEND_FASTER_WHISPER = faster-whisper|python|.|false|true
|
||||
BACKEND_COQUI = coqui|python|.|false|true
|
||||
BACKEND_BARK = bark|python|.|false|true
|
||||
BACKEND_EXLLAMA2 = exllama2|python|.|false|true
|
||||
|
||||
docker-build-local-store:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:local-store -f backend/Dockerfile.golang --build-arg BACKEND=local-store .
|
||||
# Python backends with ./backend context
|
||||
BACKEND_RFDETR = rfdetr|python|./backend|false|true
|
||||
BACKEND_KITTEN_TTS = kitten-tts|python|./backend|false|true
|
||||
BACKEND_NEUTTS = neutts|python|./backend|false|true
|
||||
BACKEND_KOKORO = kokoro|python|./backend|false|true
|
||||
BACKEND_VLLM = vllm|python|./backend|false|true
|
||||
BACKEND_DIFFUSERS = diffusers|python|./backend|--progress=plain|true
|
||||
BACKEND_CHATTERBOX = chatterbox|python|./backend|false|true
|
||||
BACKEND_VIBEVOICE = vibevoice|python|./backend|--progress=plain|true
|
||||
BACKEND_MOONSHINE = moonshine|python|./backend|false|true
|
||||
|
||||
docker-build-huggingface:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:huggingface -f backend/Dockerfile.golang --build-arg BACKEND=huggingface .
|
||||
# Helper function to build docker image for a backend
|
||||
# Usage: $(call docker-build-backend,BACKEND_NAME,DOCKERFILE_TYPE,BUILD_CONTEXT,PROGRESS_FLAG,NEEDS_BACKEND_ARG)
|
||||
define docker-build-backend
|
||||
docker build $(if $(filter-out false,$(4)),$(4)) \
|
||||
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
|
||||
--build-arg BASE_IMAGE=$(BASE_IMAGE) \
|
||||
--build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) \
|
||||
--build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) \
|
||||
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \
|
||||
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
|
||||
$(if $(filter true,$(5)),--build-arg BACKEND=$(1)) \
|
||||
-t local-ai-backend:$(1) -f backend/Dockerfile.$(2) $(3)
|
||||
endef
|
||||
|
||||
docker-build-rfdetr:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:rfdetr -f backend/Dockerfile.python --build-arg BACKEND=rfdetr ./backend
|
||||
# Generate docker-build targets from backend definitions
|
||||
define generate-docker-build-target
|
||||
docker-build-$(word 1,$(subst |, ,$(1))):
|
||||
$$(call docker-build-backend,$(word 1,$(subst |, ,$(1))),$(word 2,$(subst |, ,$(1))),$(word 3,$(subst |, ,$(1))),$(word 4,$(subst |, ,$(1))),$(word 5,$(subst |, ,$(1))))
|
||||
endef
|
||||
|
||||
docker-build-kitten-tts:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:kitten-tts -f backend/Dockerfile.python --build-arg BACKEND=kitten-tts ./backend
|
||||
# Generate all docker-build targets
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_LLAMA_CPP)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_BARK_CPP)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_PIPER)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_LOCAL_STORE)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_HUGGINGFACE)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_SILERO_VAD)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_STABLEDIFFUSION_GGML)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_WHISPER)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_RERANKERS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_TRANSFORMERS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_FASTER_WHISPER)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_COQUI)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_BARK)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_EXLLAMA2)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_RFDETR)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_KITTEN_TTS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_NEUTTS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_KOKORO)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_VLLM)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_DIFFUSERS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_CHATTERBOX)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_VIBEVOICE)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_MOONSHINE)))
|
||||
|
||||
docker-save-kitten-tts: backend-images
|
||||
docker save local-ai-backend:kitten-tts -o backend-images/kitten-tts.tar
|
||||
# Pattern rule for docker-save targets
|
||||
docker-save-%: backend-images
|
||||
docker save local-ai-backend:$* -o backend-images/$*.tar
|
||||
|
||||
docker-save-chatterbox: backend-images
|
||||
docker save local-ai-backend:chatterbox -o backend-images/chatterbox.tar
|
||||
|
||||
docker-save-vibevoice: backend-images
|
||||
docker save local-ai-backend:vibevoice -o backend-images/vibevoice.tar
|
||||
|
||||
docker-build-neutts:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:neutts -f backend/Dockerfile.python --build-arg BACKEND=neutts ./backend
|
||||
|
||||
docker-save-neutts: backend-images
|
||||
docker save local-ai-backend:neutts -o backend-images/neutts.tar
|
||||
|
||||
docker-build-kokoro:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:kokoro -f backend/Dockerfile.python --build-arg BACKEND=kokoro ./backend
|
||||
|
||||
docker-build-vllm:
|
||||
docker build --build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) --build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:vllm -f backend/Dockerfile.python --build-arg BACKEND=vllm ./backend
|
||||
|
||||
docker-save-vllm: backend-images
|
||||
docker save local-ai-backend:vllm -o backend-images/vllm.tar
|
||||
|
||||
docker-save-kokoro: backend-images
|
||||
docker save local-ai-backend:kokoro -o backend-images/kokoro.tar
|
||||
|
||||
docker-save-rfdetr: backend-images
|
||||
docker save local-ai-backend:rfdetr -o backend-images/rfdetr.tar
|
||||
|
||||
docker-save-huggingface: backend-images
|
||||
docker save local-ai-backend:huggingface -o backend-images/huggingface.tar
|
||||
|
||||
docker-save-local-store: backend-images
|
||||
docker save local-ai-backend:local-store -o backend-images/local-store.tar
|
||||
|
||||
docker-build-silero-vad:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:silero-vad -f backend/Dockerfile.golang --build-arg BACKEND=silero-vad .
|
||||
|
||||
docker-save-silero-vad: backend-images
|
||||
docker save local-ai-backend:silero-vad -o backend-images/silero-vad.tar
|
||||
|
||||
docker-save-piper: backend-images
|
||||
docker save local-ai-backend:piper -o backend-images/piper.tar
|
||||
|
||||
docker-save-llama-cpp: backend-images
|
||||
docker save local-ai-backend:llama-cpp -o backend-images/llama-cpp.tar
|
||||
|
||||
docker-save-bark-cpp: backend-images
|
||||
docker save local-ai-backend:bark-cpp -o backend-images/bark-cpp.tar
|
||||
|
||||
docker-build-stablediffusion-ggml:
|
||||
docker build --progress=plain --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) --build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) --build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) -t local-ai-backend:stablediffusion-ggml -f backend/Dockerfile.golang --build-arg BACKEND=stablediffusion-ggml .
|
||||
|
||||
docker-save-stablediffusion-ggml: backend-images
|
||||
docker save local-ai-backend:stablediffusion-ggml -o backend-images/stablediffusion-ggml.tar
|
||||
|
||||
docker-build-rerankers:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:rerankers -f backend/Dockerfile.python --build-arg BACKEND=rerankers .
|
||||
|
||||
docker-build-transformers:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:transformers -f backend/Dockerfile.python --build-arg BACKEND=transformers .
|
||||
|
||||
docker-build-diffusers:
|
||||
docker build --progress=plain --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:diffusers -f backend/Dockerfile.python --build-arg BACKEND=diffusers ./backend
|
||||
|
||||
docker-save-diffusers: backend-images
|
||||
docker save local-ai-backend:diffusers -o backend-images/diffusers.tar
|
||||
|
||||
docker-build-whisper:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) --build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) --build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) -t local-ai-backend:whisper -f backend/Dockerfile.golang --build-arg BACKEND=whisper .
|
||||
|
||||
docker-save-whisper: backend-images
|
||||
docker save local-ai-backend:whisper -o backend-images/whisper.tar
|
||||
|
||||
docker-build-faster-whisper:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:faster-whisper -f backend/Dockerfile.python --build-arg BACKEND=faster-whisper .
|
||||
|
||||
docker-build-coqui:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:coqui -f backend/Dockerfile.python --build-arg BACKEND=coqui .
|
||||
|
||||
docker-build-bark:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:bark -f backend/Dockerfile.python --build-arg BACKEND=bark .
|
||||
|
||||
docker-build-chatterbox:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:chatterbox -f backend/Dockerfile.python --build-arg BACKEND=chatterbox ./backend
|
||||
|
||||
docker-build-vibevoice:
|
||||
docker build --progress=plain --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:vibevoice -f backend/Dockerfile.python --build-arg BACKEND=vibevoice ./backend
|
||||
|
||||
docker-build-exllama2:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:exllama2 -f backend/Dockerfile.python --build-arg BACKEND=exllama2 .
|
||||
|
||||
docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-bark docker-build-chatterbox docker-build-vibevoice docker-build-exllama2
|
||||
docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-bark docker-build-chatterbox docker-build-vibevoice docker-build-exllama2 docker-build-moonshine
|
||||
|
||||
########################################################
|
||||
### END Backends
|
||||
|
||||
60
README.md
60
README.md
@@ -146,14 +146,18 @@ docker run -ti --name local-ai -p 8080:8080 localai/localai:latest
|
||||
### NVIDIA GPU Images:
|
||||
|
||||
```bash
|
||||
# CUDA 13.0
|
||||
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-gpu-nvidia-cuda-13
|
||||
|
||||
# CUDA 12.0
|
||||
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-gpu-nvidia-cuda-12
|
||||
|
||||
# CUDA 11.7
|
||||
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-gpu-nvidia-cuda-11
|
||||
|
||||
# NVIDIA Jetson (L4T) ARM64
|
||||
# CUDA 12 (for Nvidia AGX Orin and similar platforms)
|
||||
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-nvidia-l4t-arm64
|
||||
|
||||
# CUDA 13 (for Nvidia DGX Spark)
|
||||
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-nvidia-l4t-arm64-cuda-13
|
||||
```
|
||||
|
||||
### AMD GPU Images (ROCm):
|
||||
@@ -180,12 +184,12 @@ docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-gpu-vulkan
|
||||
# CPU version
|
||||
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-cpu
|
||||
|
||||
# NVIDIA CUDA 13 version
|
||||
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-13
|
||||
|
||||
# NVIDIA CUDA 12 version
|
||||
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
|
||||
# NVIDIA CUDA 11 version
|
||||
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-11
|
||||
|
||||
# Intel GPU version
|
||||
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-gpu-intel
|
||||
|
||||
@@ -269,39 +273,40 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
|
||||
### Text Generation & Language Models
|
||||
| Backend | Description | Acceleration Support |
|
||||
|---------|-------------|---------------------|
|
||||
| **llama.cpp** | LLM inference in C/C++ | CUDA 11/12, ROCm, Intel SYCL, Vulkan, Metal, CPU |
|
||||
| **vLLM** | Fast LLM inference with PagedAttention | CUDA 12, ROCm, Intel |
|
||||
| **transformers** | HuggingFace transformers framework | CUDA 11/12, ROCm, Intel, CPU |
|
||||
| **exllama2** | GPTQ inference library | CUDA 12 |
|
||||
| **llama.cpp** | LLM inference in C/C++ | CUDA 12/13, ROCm, Intel SYCL, Vulkan, Metal, CPU |
|
||||
| **vLLM** | Fast LLM inference with PagedAttention | CUDA 12/13, ROCm, Intel |
|
||||
| **transformers** | HuggingFace transformers framework | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **exllama2** | GPTQ inference library | CUDA 12/13 |
|
||||
| **MLX** | Apple Silicon LLM inference | Metal (M1/M2/M3+) |
|
||||
| **MLX-VLM** | Apple Silicon Vision-Language Models | Metal (M1/M2/M3+) |
|
||||
|
||||
### Audio & Speech Processing
|
||||
| Backend | Description | Acceleration Support |
|
||||
|---------|-------------|---------------------|
|
||||
| **whisper.cpp** | OpenAI Whisper in C/C++ | CUDA 12, ROCm, Intel SYCL, Vulkan, CPU |
|
||||
| **faster-whisper** | Fast Whisper with CTranslate2 | CUDA 12, ROCm, Intel, CPU |
|
||||
| **bark** | Text-to-audio generation | CUDA 12, ROCm, Intel |
|
||||
| **whisper.cpp** | OpenAI Whisper in C/C++ | CUDA 12/13, ROCm, Intel SYCL, Vulkan, CPU |
|
||||
| **faster-whisper** | Fast Whisper with CTranslate2 | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **bark** | Text-to-audio generation | CUDA 12/13, ROCm, Intel |
|
||||
| **bark-cpp** | C++ implementation of Bark | CUDA, Metal, CPU |
|
||||
| **coqui** | Advanced TTS with 1100+ languages | CUDA 12, ROCm, Intel, CPU |
|
||||
| **kokoro** | Lightweight TTS model | CUDA 12, ROCm, Intel, CPU |
|
||||
| **chatterbox** | Production-grade TTS | CUDA 11/12, CPU |
|
||||
| **coqui** | Advanced TTS with 1100+ languages | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **kokoro** | Lightweight TTS model | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **chatterbox** | Production-grade TTS | CUDA 12/13, CPU |
|
||||
| **piper** | Fast neural TTS system | CPU |
|
||||
| **kitten-tts** | Kitten TTS models | CPU |
|
||||
| **silero-vad** | Voice Activity Detection | CPU |
|
||||
| **neutts** | Text-to-speech with voice cloning | CUDA 12, ROCm, CPU |
|
||||
| **neutts** | Text-to-speech with voice cloning | CUDA 12/13, ROCm, CPU |
|
||||
| **vibevoice** | Real-time TTS with voice cloning | CUDA 12/13, ROCm, Intel, CPU |
|
||||
|
||||
### Image & Video Generation
|
||||
| Backend | Description | Acceleration Support |
|
||||
|---------|-------------|---------------------|
|
||||
| **stablediffusion.cpp** | Stable Diffusion in C/C++ | CUDA 12, Intel SYCL, Vulkan, CPU |
|
||||
| **diffusers** | HuggingFace diffusion models | CUDA 11/12, ROCm, Intel, Metal, CPU |
|
||||
| **stablediffusion.cpp** | Stable Diffusion in C/C++ | CUDA 12/13, Intel SYCL, Vulkan, CPU |
|
||||
| **diffusers** | HuggingFace diffusion models | CUDA 12/13, ROCm, Intel, Metal, CPU |
|
||||
|
||||
### Specialized AI Tasks
|
||||
| Backend | Description | Acceleration Support |
|
||||
|---------|-------------|---------------------|
|
||||
| **rfdetr** | Real-time object detection | CUDA 12, Intel, CPU |
|
||||
| **rerankers** | Document reranking API | CUDA 11/12, ROCm, Intel, CPU |
|
||||
| **rfdetr** | Real-time object detection | CUDA 12/13, Intel, CPU |
|
||||
| **rerankers** | Document reranking API | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **local-store** | Vector database | CPU |
|
||||
| **huggingface** | HuggingFace API integration | API-based |
|
||||
|
||||
@@ -309,13 +314,14 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
|
||||
|
||||
| Acceleration Type | Supported Backends | Hardware Support |
|
||||
|-------------------|-------------------|------------------|
|
||||
| **NVIDIA CUDA 11** | llama.cpp, whisper, stablediffusion, diffusers, rerankers, bark, chatterbox | Nvidia hardware |
|
||||
| **NVIDIA CUDA 12** | All CUDA-compatible backends | Nvidia hardware |
|
||||
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, bark, neutts | AMD Graphics |
|
||||
| **Intel oneAPI** | llama.cpp, whisper, stablediffusion, vllm, transformers, diffusers, rfdetr, rerankers, exllama2, coqui, kokoro, bark | Intel Arc, Intel iGPUs |
|
||||
| **NVIDIA CUDA 13** | All CUDA-compatible backends | Nvidia hardware |
|
||||
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, bark, neutts, vibevoice | AMD Graphics |
|
||||
| **Intel oneAPI** | llama.cpp, whisper, stablediffusion, vllm, transformers, diffusers, rfdetr, rerankers, exllama2, coqui, kokoro, bark, vibevoice | Intel Arc, Intel iGPUs |
|
||||
| **Apple Metal** | llama.cpp, whisper, diffusers, MLX, MLX-VLM, bark-cpp | Apple M1/M2/M3+ |
|
||||
| **Vulkan** | llama.cpp, whisper, stablediffusion | Cross-platform GPUs |
|
||||
| **NVIDIA Jetson** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr | ARM64 embedded AI |
|
||||
| **NVIDIA Jetson (CUDA 12)** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr | ARM64 embedded AI (AGX Orin, etc.) |
|
||||
| **NVIDIA Jetson (CUDA 13)** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr | ARM64 embedded AI (DGX Spark) |
|
||||
| **CPU Optimized** | All backends | AVX/AVX2/AVX512, quantization support |
|
||||
|
||||
### 🔗 Community and integrations
|
||||
@@ -408,6 +414,10 @@ A huge thank you to our generous sponsors who support this project covering CI e
|
||||
</a>
|
||||
</p>
|
||||
|
||||
### Individual sponsors
|
||||
|
||||
A special thanks to individual sponsors that contributed to the project, a full list is in [Github](https://github.com/sponsors/mudler) and [buymeacoffee](https://buymeacoffee.com/mudler), a special shout out goes to [drikster80](https://github.com/drikster80) for being generous. Thank you everyone!
|
||||
|
||||
## 🌟 Star history
|
||||
|
||||
[](https://star-history.com/#go-skynet/LocalAI&Date)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ARG BASE_IMAGE=ubuntu:22.04
|
||||
ARG BASE_IMAGE=ubuntu:24.04
|
||||
|
||||
FROM ${BASE_IMAGE} AS builder
|
||||
ARG BACKEND=rerankers
|
||||
@@ -12,8 +12,8 @@ ENV CUDA_MINOR_VERSION=${CUDA_MINOR_VERSION}
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
ARG GO_VERSION=1.22.6
|
||||
ARG UBUNTU_VERSION=2204
|
||||
ARG GO_VERSION=1.25.4
|
||||
ARG UBUNTU_VERSION=2404
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
@@ -40,11 +40,45 @@ RUN <<EOT bash
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
software-properties-common pciutils wget gpg-agent && \
|
||||
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
|
||||
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
|
||||
apt-get update && \
|
||||
apt-get install -y \
|
||||
vulkan-sdk && \
|
||||
apt-get install -y libglm-dev cmake libxcb-dri3-0 libxcb-present0 libpciaccess0 \
|
||||
libpng-dev libxcb-keysyms1-dev libxcb-dri3-dev libx11-dev g++ gcc \
|
||||
libwayland-dev libxrandr-dev libxcb-randr0-dev libxcb-ewmh-dev \
|
||||
git python-is-python3 bison libx11-xcb-dev liblz4-dev libzstd-dev \
|
||||
ocaml-core ninja-build pkg-config libxml2-dev wayland-protocols python3-jsonschema \
|
||||
clang-format qtbase5-dev qt6-base-dev libxcb-glx0-dev sudo xz-utils
|
||||
if [ "amd64" = "$TARGETARCH" ]; then
|
||||
wget "https://sdk.lunarg.com/sdk/download/1.4.328.1/linux/vulkansdk-linux-x86_64-1.4.328.1.tar.xz" && \
|
||||
tar -xf vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
|
||||
rm vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
|
||||
mkdir -p /opt/vulkan-sdk && \
|
||||
mv 1.4.328.1 /opt/vulkan-sdk/ && \
|
||||
cd /opt/vulkan-sdk/1.4.328.1 && \
|
||||
./vulkansdk --no-deps --maxjobs \
|
||||
vulkan-loader \
|
||||
vulkan-validationlayers \
|
||||
vulkan-extensionlayer \
|
||||
vulkan-tools \
|
||||
shaderc && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/bin/* /usr/bin/ && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/lib/* /usr/lib/x86_64-linux-gnu/ && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/include/* /usr/include/ && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/share/* /usr/share/ && \
|
||||
rm -rf /opt/vulkan-sdk
|
||||
fi
|
||||
if [ "arm64" = "$TARGETARCH" ]; then
|
||||
mkdir vulkan && cd vulkan && \
|
||||
curl -L -o vulkan-sdk.tar.xz https://github.com/mudler/vulkan-sdk-arm/releases/download/1.4.335.0/vulkansdk-ubuntu-24.04-arm-1.4.335.0.tar.xz && \
|
||||
tar -xvf vulkan-sdk.tar.xz && \
|
||||
rm vulkan-sdk.tar.xz && \
|
||||
cd 1.4.335.0 && \
|
||||
cp -rfv aarch64/bin/* /usr/bin/ && \
|
||||
cp -rfv aarch64/lib/* /usr/lib/aarch64-linux-gnu/ && \
|
||||
cp -rfv aarch64/include/* /usr/include/ && \
|
||||
cp -rfv aarch64/share/* /usr/share/ && \
|
||||
cd ../.. && \
|
||||
rm -rf vulkan
|
||||
fi
|
||||
ldconfig && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
fi
|
||||
@@ -148,6 +182,8 @@ EOT
|
||||
|
||||
COPY . /LocalAI
|
||||
|
||||
RUN git config --global --add safe.directory /LocalAI
|
||||
|
||||
RUN cd /LocalAI && make protogen-go && make -C /LocalAI/backend/go/${BACKEND} build
|
||||
|
||||
FROM scratch
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ARG BASE_IMAGE=ubuntu:22.04
|
||||
ARG BASE_IMAGE=ubuntu:24.04
|
||||
ARG GRPC_BASE_IMAGE=${BASE_IMAGE}
|
||||
|
||||
|
||||
@@ -10,7 +10,8 @@ FROM ${GRPC_BASE_IMAGE} AS grpc
|
||||
ARG GRPC_MAKEFLAGS="-j4 -Otarget"
|
||||
ARG GRPC_VERSION=v1.65.0
|
||||
ARG CMAKE_FROM_SOURCE=false
|
||||
ARG CMAKE_VERSION=3.26.4
|
||||
# CUDA Toolkit 13.x compatibility: CMake 3.31.9+ fixes toolchain detection/arch table issues
|
||||
ARG CMAKE_VERSION=3.31.10
|
||||
|
||||
ENV MAKEFLAGS=${GRPC_MAKEFLAGS}
|
||||
|
||||
@@ -26,7 +27,7 @@ RUN apt-get update && \
|
||||
|
||||
# Install CMake (the version in 22.04 is too old)
|
||||
RUN <<EOT bash
|
||||
if [ "${CMAKE_FROM_SOURCE}}" = "true" ]; then
|
||||
if [ "${CMAKE_FROM_SOURCE}" = "true" ]; then
|
||||
curl -L -s https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz -o cmake.tar.gz && tar xvf cmake.tar.gz && cd cmake-${CMAKE_VERSION} && ./configure && make && make install
|
||||
else
|
||||
apt-get update && \
|
||||
@@ -50,6 +51,13 @@ RUN git clone --recurse-submodules --jobs 4 -b ${GRPC_VERSION} --depth 1 --shall
|
||||
rm -rf /build
|
||||
|
||||
FROM ${BASE_IMAGE} AS builder
|
||||
ARG CMAKE_FROM_SOURCE=false
|
||||
ARG CMAKE_VERSION=3.31.10
|
||||
# We can target specific CUDA ARCHITECTURES like --build-arg CUDA_DOCKER_ARCH='75;86;89;120'
|
||||
ARG CUDA_DOCKER_ARCH
|
||||
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
|
||||
ARG CMAKE_ARGS
|
||||
ENV CMAKE_ARGS=${CMAKE_ARGS}
|
||||
ARG BACKEND=rerankers
|
||||
ARG BUILD_TYPE
|
||||
ENV BUILD_TYPE=${BUILD_TYPE}
|
||||
@@ -61,8 +69,8 @@ ENV CUDA_MINOR_VERSION=${CUDA_MINOR_VERSION}
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
ARG GO_VERSION=1.22.6
|
||||
ARG UBUNTU_VERSION=2204
|
||||
ARG GO_VERSION=1.25.4
|
||||
ARG UBUNTU_VERSION=2404
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
@@ -70,6 +78,7 @@ RUN apt-get update && \
|
||||
ccache git \
|
||||
ca-certificates \
|
||||
make \
|
||||
pkg-config libcurl4-openssl-dev \
|
||||
curl unzip \
|
||||
libssl-dev wget && \
|
||||
apt-get clean && \
|
||||
@@ -88,11 +97,45 @@ RUN <<EOT bash
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
software-properties-common pciutils wget gpg-agent && \
|
||||
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
|
||||
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
|
||||
apt-get update && \
|
||||
apt-get install -y \
|
||||
vulkan-sdk && \
|
||||
apt-get install -y libglm-dev cmake libxcb-dri3-0 libxcb-present0 libpciaccess0 \
|
||||
libpng-dev libxcb-keysyms1-dev libxcb-dri3-dev libx11-dev g++ gcc \
|
||||
libwayland-dev libxrandr-dev libxcb-randr0-dev libxcb-ewmh-dev \
|
||||
git python-is-python3 bison libx11-xcb-dev liblz4-dev libzstd-dev \
|
||||
ocaml-core ninja-build pkg-config libxml2-dev wayland-protocols python3-jsonschema \
|
||||
clang-format qtbase5-dev qt6-base-dev libxcb-glx0-dev sudo xz-utils
|
||||
if [ "amd64" = "$TARGETARCH" ]; then
|
||||
wget "https://sdk.lunarg.com/sdk/download/1.4.328.1/linux/vulkansdk-linux-x86_64-1.4.328.1.tar.xz" && \
|
||||
tar -xf vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
|
||||
rm vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
|
||||
mkdir -p /opt/vulkan-sdk && \
|
||||
mv 1.4.328.1 /opt/vulkan-sdk/ && \
|
||||
cd /opt/vulkan-sdk/1.4.328.1 && \
|
||||
./vulkansdk --no-deps --maxjobs \
|
||||
vulkan-loader \
|
||||
vulkan-validationlayers \
|
||||
vulkan-extensionlayer \
|
||||
vulkan-tools \
|
||||
shaderc && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/bin/* /usr/bin/ && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/lib/* /usr/lib/x86_64-linux-gnu/ && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/include/* /usr/include/ && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/share/* /usr/share/ && \
|
||||
rm -rf /opt/vulkan-sdk
|
||||
fi
|
||||
if [ "arm64" = "$TARGETARCH" ]; then
|
||||
mkdir vulkan && cd vulkan && \
|
||||
curl -L -o vulkan-sdk.tar.xz https://github.com/mudler/vulkan-sdk-arm/releases/download/1.4.335.0/vulkansdk-ubuntu-24.04-arm-1.4.335.0.tar.xz && \
|
||||
tar -xvf vulkan-sdk.tar.xz && \
|
||||
rm vulkan-sdk.tar.xz && \
|
||||
cd 1.4.335.0 && \
|
||||
cp -rfv aarch64/bin/* /usr/bin/ && \
|
||||
cp -rfv aarch64/lib/* /usr/lib/aarch64-linux-gnu/ && \
|
||||
cp -rfv aarch64/include/* /usr/include/ && \
|
||||
cp -rfv aarch64/share/* /usr/share/ && \
|
||||
cd ../.. && \
|
||||
rm -rf vulkan
|
||||
fi
|
||||
ldconfig && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
fi
|
||||
@@ -189,7 +232,7 @@ EOT
|
||||
|
||||
# Install CMake (the version in 22.04 is too old)
|
||||
RUN <<EOT bash
|
||||
if [ "${CMAKE_FROM_SOURCE}}" = "true" ]; then
|
||||
if [ "${CMAKE_FROM_SOURCE}" = "true" ]; then
|
||||
curl -L -s https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz -o cmake.tar.gz && tar xvf cmake.tar.gz && cd cmake-${CMAKE_VERSION} && ./configure && make && make install
|
||||
else
|
||||
apt-get update && \
|
||||
@@ -205,19 +248,30 @@ COPY --from=grpc /opt/grpc /usr/local
|
||||
|
||||
COPY . /LocalAI
|
||||
|
||||
## Otherwise just run the normal build
|
||||
RUN <<EOT bash
|
||||
if [ "${TARGETARCH}" = "arm64" ] || [ "${BUILD_TYPE}" = "hipblas" ]; then \
|
||||
cd /LocalAI/backend/cpp/llama-cpp && make llama-cpp-fallback && \
|
||||
make llama-cpp-grpc && make llama-cpp-rpc-server; \
|
||||
else \
|
||||
cd /LocalAI/backend/cpp/llama-cpp && make llama-cpp-avx && \
|
||||
make llama-cpp-avx2 && \
|
||||
make llama-cpp-avx512 && \
|
||||
make llama-cpp-fallback && \
|
||||
make llama-cpp-grpc && \
|
||||
make llama-cpp-rpc-server; \
|
||||
fi
|
||||
RUN <<'EOT' bash
|
||||
set -euxo pipefail
|
||||
|
||||
if [[ -n "${CUDA_DOCKER_ARCH:-}" ]]; then
|
||||
CUDA_ARCH_ESC="${CUDA_DOCKER_ARCH//;/\\;}"
|
||||
export CMAKE_ARGS="${CMAKE_ARGS:-} -DCMAKE_CUDA_ARCHITECTURES=${CUDA_ARCH_ESC}"
|
||||
echo "CMAKE_ARGS(env) = ${CMAKE_ARGS}"
|
||||
rm -rf /LocalAI/backend/cpp/llama-cpp-*-build
|
||||
fi
|
||||
|
||||
if [ "${TARGETARCH}" = "arm64" ] || [ "${BUILD_TYPE}" = "hipblas" ]; then
|
||||
cd /LocalAI/backend/cpp/llama-cpp
|
||||
make llama-cpp-fallback
|
||||
make llama-cpp-grpc
|
||||
make llama-cpp-rpc-server
|
||||
else
|
||||
cd /LocalAI/backend/cpp/llama-cpp
|
||||
make llama-cpp-avx
|
||||
make llama-cpp-avx2
|
||||
make llama-cpp-avx512
|
||||
make llama-cpp-fallback
|
||||
make llama-cpp-grpc
|
||||
make llama-cpp-rpc-server
|
||||
fi
|
||||
EOT
|
||||
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ARG BASE_IMAGE=ubuntu:22.04
|
||||
ARG BASE_IMAGE=ubuntu:24.04
|
||||
|
||||
FROM ${BASE_IMAGE} AS builder
|
||||
ARG BACKEND=rerankers
|
||||
@@ -12,7 +12,7 @@ ENV CUDA_MINOR_VERSION=${CUDA_MINOR_VERSION}
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
ARG UBUNTU_VERSION=2204
|
||||
ARG UBUNTU_VERSION=2404
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
@@ -54,11 +54,45 @@ RUN <<EOT bash
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
software-properties-common pciutils wget gpg-agent && \
|
||||
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
|
||||
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
|
||||
apt-get update && \
|
||||
apt-get install -y \
|
||||
vulkan-sdk && \
|
||||
apt-get install -y libglm-dev cmake libxcb-dri3-0 libxcb-present0 libpciaccess0 \
|
||||
libpng-dev libxcb-keysyms1-dev libxcb-dri3-dev libx11-dev g++ gcc \
|
||||
libwayland-dev libxrandr-dev libxcb-randr0-dev libxcb-ewmh-dev \
|
||||
git python-is-python3 bison libx11-xcb-dev liblz4-dev libzstd-dev \
|
||||
ocaml-core ninja-build pkg-config libxml2-dev wayland-protocols python3-jsonschema \
|
||||
clang-format qtbase5-dev qt6-base-dev libxcb-glx0-dev sudo xz-utils
|
||||
if [ "amd64" = "$TARGETARCH" ]; then
|
||||
wget "https://sdk.lunarg.com/sdk/download/1.4.328.1/linux/vulkansdk-linux-x86_64-1.4.328.1.tar.xz" && \
|
||||
tar -xf vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
|
||||
rm vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
|
||||
mkdir -p /opt/vulkan-sdk && \
|
||||
mv 1.4.328.1 /opt/vulkan-sdk/ && \
|
||||
cd /opt/vulkan-sdk/1.4.328.1 && \
|
||||
./vulkansdk --no-deps --maxjobs \
|
||||
vulkan-loader \
|
||||
vulkan-validationlayers \
|
||||
vulkan-extensionlayer \
|
||||
vulkan-tools \
|
||||
shaderc && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/bin/* /usr/bin/ && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/lib/* /usr/lib/x86_64-linux-gnu/ && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/include/* /usr/include/ && \
|
||||
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/share/* /usr/share/ && \
|
||||
rm -rf /opt/vulkan-sdk
|
||||
fi
|
||||
if [ "arm64" = "$TARGETARCH" ]; then
|
||||
mkdir vulkan && cd vulkan && \
|
||||
curl -L -o vulkan-sdk.tar.xz https://github.com/mudler/vulkan-sdk-arm/releases/download/1.4.335.0/vulkansdk-ubuntu-24.04-arm-1.4.335.0.tar.xz && \
|
||||
tar -xvf vulkan-sdk.tar.xz && \
|
||||
rm vulkan-sdk.tar.xz && \
|
||||
cd 1.4.335.0 && \
|
||||
cp -rfv aarch64/bin/* /usr/bin/ && \
|
||||
cp -rfv aarch64/lib/* /usr/lib/aarch64-linux-gnu/ && \
|
||||
cp -rfv aarch64/include/* /usr/include/ && \
|
||||
cp -rfv aarch64/share/* /usr/share/ && \
|
||||
cd ../.. && \
|
||||
rm -rf vulkan
|
||||
fi
|
||||
ldconfig && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
fi
|
||||
@@ -142,7 +176,8 @@ RUN if [ "${BUILD_TYPE}" = "hipblas" ]; then \
|
||||
# Install uv as a system package
|
||||
RUN curl -LsSf https://astral.sh/uv/install.sh | UV_INSTALL_DIR=/usr/bin sh
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
|
||||
# Increase timeout for uv installs behind slow networks
|
||||
ENV UV_HTTP_TIMEOUT=180
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Install grpcio-tools (the version in 22.04 is too old)
|
||||
@@ -155,12 +190,18 @@ RUN <<EOT bash
|
||||
EOT
|
||||
|
||||
|
||||
COPY python/${BACKEND} /${BACKEND}
|
||||
COPY backend.proto /${BACKEND}/backend.proto
|
||||
COPY python/common/ /${BACKEND}/common
|
||||
COPY backend/python/${BACKEND} /${BACKEND}
|
||||
COPY backend/backend.proto /${BACKEND}/backend.proto
|
||||
COPY backend/python/common/ /${BACKEND}/common
|
||||
COPY scripts/build/package-gpu-libs.sh /package-gpu-libs.sh
|
||||
|
||||
RUN cd /${BACKEND} && PORTABLE_PYTHON=true make
|
||||
|
||||
# Package GPU libraries into the backend's lib directory
|
||||
RUN mkdir -p /${BACKEND}/lib && \
|
||||
TARGET_LIB_DIR="/${BACKEND}/lib" BUILD_TYPE="${BUILD_TYPE}" CUDA_MAJOR_VERSION="${CUDA_MAJOR_VERSION}" \
|
||||
bash /package-gpu-libs.sh "/${BACKEND}/lib"
|
||||
|
||||
FROM scratch
|
||||
ARG BACKEND=rerankers
|
||||
COPY --from=builder /${BACKEND}/ /
|
||||
@@ -65,7 +65,7 @@ The backend system provides language-specific Dockerfiles that handle the build
|
||||
## Hardware Acceleration Support
|
||||
|
||||
### CUDA (NVIDIA)
|
||||
- **Versions**: CUDA 11.x, 12.x
|
||||
- **Versions**: CUDA 12.x, 13.x
|
||||
- **Features**: cuBLAS, cuDNN, TensorRT optimization
|
||||
- **Targets**: x86_64, ARM64 (Jetson)
|
||||
|
||||
@@ -132,8 +132,7 @@ For ARM64/Mac builds, docker can't be used, and the makefile in the respective b
|
||||
### Build Types
|
||||
|
||||
- **`cpu`**: CPU-only optimization
|
||||
- **`cublas11`**: CUDA 11.x with cuBLAS
|
||||
- **`cublas12`**: CUDA 12.x with cuBLAS
|
||||
- **`cublas12`**, **`cublas13`**: CUDA 12.x, 13.x with cuBLAS
|
||||
- **`hipblas`**: ROCm with rocBLAS
|
||||
- **`intel`**: Intel oneAPI optimization
|
||||
- **`vulkan`**: Vulkan-based acceleration
|
||||
@@ -210,4 +209,4 @@ When contributing to the backend system:
|
||||
2. **Add Tests**: Include comprehensive test coverage
|
||||
3. **Document**: Provide clear usage examples
|
||||
4. **Optimize**: Consider performance and resource usage
|
||||
5. **Validate**: Test across different hardware targets
|
||||
5. **Validate**: Test across different hardware targets
|
||||
|
||||
@@ -301,7 +301,6 @@ message TranscriptSegment {
|
||||
message GenerateImageRequest {
|
||||
int32 height = 1;
|
||||
int32 width = 2;
|
||||
int32 mode = 3;
|
||||
int32 step = 4;
|
||||
int32 seed = 5;
|
||||
string positive_prompt = 6;
|
||||
|
||||
@@ -70,4 +70,4 @@ target_link_libraries(${TARGET} PRIVATE common llama mtmd ${CMAKE_THREAD_LIBS_IN
|
||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
||||
if(TARGET BUILD_INFO)
|
||||
add_dependencies(${TARGET} BUILD_INFO)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
|
||||
LLAMA_VERSION?=5b6c9bc0f3c8f55598b9999b65aff7ce4119bc15
|
||||
LLAMA_VERSION?=b1377188784f9aea26b8abde56d4aee8c733eec7
|
||||
LLAMA_REPO?=https://github.com/ggerganov/llama.cpp
|
||||
|
||||
CMAKE_ARGS?=
|
||||
@@ -7,7 +7,8 @@ BUILD_TYPE?=
|
||||
NATIVE?=false
|
||||
ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh
|
||||
TARGET?=--target grpc-server
|
||||
JOBS?=$(shell nproc)
|
||||
JOBS?=$(shell nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 1)
|
||||
ARCH?=$(shell uname -m)
|
||||
|
||||
# Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
|
||||
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF -DLLAMA_CURL=OFF
|
||||
@@ -106,21 +107,21 @@ llama-cpp-avx: llama.cpp
|
||||
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build
|
||||
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build purge
|
||||
$(info ${GREEN}I llama-cpp build info:avx${RESET})
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE) VARIANT="llama-cpp-avx-build" build-llama-cpp-grpc-server
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_BMI2=off" $(MAKE) VARIANT="llama-cpp-avx-build" build-llama-cpp-grpc-server
|
||||
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build/grpc-server llama-cpp-avx
|
||||
|
||||
llama-cpp-fallback: llama.cpp
|
||||
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build
|
||||
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build purge
|
||||
$(info ${GREEN}I llama-cpp build info:fallback${RESET})
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE) VARIANT="llama-cpp-fallback-build" build-llama-cpp-grpc-server
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_BMI2=off" $(MAKE) VARIANT="llama-cpp-fallback-build" build-llama-cpp-grpc-server
|
||||
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build/grpc-server llama-cpp-fallback
|
||||
|
||||
llama-cpp-grpc: llama.cpp
|
||||
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build
|
||||
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build purge
|
||||
$(info ${GREEN}I llama-cpp build info:grpc${RESET})
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_RPC=ON -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" TARGET="--target grpc-server --target rpc-server" $(MAKE) VARIANT="llama-cpp-grpc-build" build-llama-cpp-grpc-server
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_RPC=ON -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_BMI2=off" TARGET="--target grpc-server --target rpc-server" $(MAKE) VARIANT="llama-cpp-grpc-build" build-llama-cpp-grpc-server
|
||||
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build/grpc-server llama-cpp-grpc
|
||||
|
||||
llama-cpp-rpc-server: llama-cpp-grpc
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
#include <grpcpp/health_check_service_interface.h>
|
||||
#include <regex>
|
||||
#include <atomic>
|
||||
#include <mutex>
|
||||
#include <signal.h>
|
||||
#include <thread>
|
||||
|
||||
@@ -358,9 +359,7 @@ static void params_parse(server_context& /*ctx_server*/, const backend::ModelOpt
|
||||
|
||||
params.model.path = request->modelfile();
|
||||
if (!request->mmproj().empty()) {
|
||||
// get the directory of modelfile
|
||||
std::string model_dir = params.model.path.substr(0, params.model.path.find_last_of("/\\"));
|
||||
params.mmproj.path = model_dir + "/"+ request->mmproj();
|
||||
params.mmproj.path = request->mmproj();
|
||||
}
|
||||
// params.model_alias ??
|
||||
params.model_alias = request->modelfile();
|
||||
@@ -392,8 +391,9 @@ static void params_parse(server_context& /*ctx_server*/, const backend::ModelOpt
|
||||
// Initialize fit_params options (can be overridden by options)
|
||||
// fit_params: whether to auto-adjust params to fit device memory (default: true as in llama.cpp)
|
||||
params.fit_params = true;
|
||||
// fit_params_target: target margin per device in bytes (default: 1GB)
|
||||
params.fit_params_target = 1024 * 1024 * 1024;
|
||||
// fit_params_target: target margin per device in bytes (default: 1GB per device)
|
||||
// Initialize as vector with default value for all devices
|
||||
params.fit_params_target = std::vector<size_t>(llama_max_devices(), 1024 * 1024 * 1024);
|
||||
// fit_params_min_ctx: minimum context size for fit (default: 4096)
|
||||
params.fit_params_min_ctx = 4096;
|
||||
|
||||
@@ -470,10 +470,28 @@ static void params_parse(server_context& /*ctx_server*/, const backend::ModelOpt
|
||||
} else if (!strcmp(optname, "fit_params_target") || !strcmp(optname, "fit_target")) {
|
||||
if (optval != NULL) {
|
||||
try {
|
||||
// Value is in MiB, convert to bytes
|
||||
params.fit_params_target = static_cast<size_t>(std::stoi(optval_str)) * 1024 * 1024;
|
||||
// Value is in MiB, can be comma-separated list for multiple devices
|
||||
// Single value is broadcast across all devices
|
||||
std::string arg_next = optval_str;
|
||||
const std::regex regex{ R"([,/]+)" };
|
||||
std::sregex_token_iterator it{ arg_next.begin(), arg_next.end(), regex, -1 };
|
||||
std::vector<std::string> split_arg{ it, {} };
|
||||
if (split_arg.size() >= llama_max_devices()) {
|
||||
// Too many values provided
|
||||
continue;
|
||||
}
|
||||
if (split_arg.size() == 1) {
|
||||
// Single value: broadcast to all devices
|
||||
size_t value_mib = std::stoul(split_arg[0]);
|
||||
std::fill(params.fit_params_target.begin(), params.fit_params_target.end(), value_mib * 1024 * 1024);
|
||||
} else {
|
||||
// Multiple values: set per device
|
||||
for (size_t i = 0; i < split_arg.size() && i < params.fit_params_target.size(); i++) {
|
||||
params.fit_params_target[i] = std::stoul(split_arg[i]) * 1024 * 1024;
|
||||
}
|
||||
}
|
||||
} catch (const std::exception& e) {
|
||||
// If conversion fails, keep default value (1GB)
|
||||
// If conversion fails, keep default value (1GB per device)
|
||||
}
|
||||
}
|
||||
} else if (!strcmp(optname, "fit_params_min_ctx") || !strcmp(optname, "fit_ctx")) {
|
||||
@@ -688,13 +706,13 @@ private:
|
||||
public:
|
||||
BackendServiceImpl(server_context& ctx) : ctx_server(ctx) {}
|
||||
|
||||
grpc::Status Health(ServerContext* /*context*/, const backend::HealthMessage* /*request*/, backend::Reply* reply) {
|
||||
grpc::Status Health(ServerContext* /*context*/, const backend::HealthMessage* /*request*/, backend::Reply* reply) override {
|
||||
// Implement Health RPC
|
||||
reply->set_message("OK");
|
||||
return Status::OK;
|
||||
}
|
||||
|
||||
grpc::Status LoadModel(ServerContext* /*context*/, const backend::ModelOptions* request, backend::Result* result) {
|
||||
grpc::Status LoadModel(ServerContext* /*context*/, const backend::ModelOptions* request, backend::Result* result) override {
|
||||
// Implement LoadModel RPC
|
||||
common_params params;
|
||||
params_parse(ctx_server, request, params);
|
||||
@@ -711,11 +729,72 @@ public:
|
||||
LOG_INF("\n");
|
||||
LOG_INF("%s\n", common_params_get_system_info(params).c_str());
|
||||
LOG_INF("\n");
|
||||
|
||||
// Capture error messages during model loading
|
||||
struct error_capture {
|
||||
std::string captured_error;
|
||||
std::mutex error_mutex;
|
||||
ggml_log_callback original_callback;
|
||||
void* original_user_data;
|
||||
} error_capture_data;
|
||||
|
||||
// Get original log callback
|
||||
llama_log_get(&error_capture_data.original_callback, &error_capture_data.original_user_data);
|
||||
|
||||
// Set custom callback to capture errors
|
||||
llama_log_set([](ggml_log_level level, const char * text, void * user_data) {
|
||||
auto* capture = static_cast<error_capture*>(user_data);
|
||||
|
||||
// Capture error messages
|
||||
if (level == GGML_LOG_LEVEL_ERROR) {
|
||||
std::lock_guard<std::mutex> lock(capture->error_mutex);
|
||||
// Append error message, removing trailing newlines
|
||||
std::string msg(text);
|
||||
while (!msg.empty() && (msg.back() == '\n' || msg.back() == '\r')) {
|
||||
msg.pop_back();
|
||||
}
|
||||
if (!msg.empty()) {
|
||||
if (!capture->captured_error.empty()) {
|
||||
capture->captured_error.append("; ");
|
||||
}
|
||||
capture->captured_error.append(msg);
|
||||
}
|
||||
}
|
||||
|
||||
// Also call original callback to preserve logging
|
||||
if (capture->original_callback) {
|
||||
capture->original_callback(level, text, capture->original_user_data);
|
||||
}
|
||||
}, &error_capture_data);
|
||||
|
||||
// load the model
|
||||
if (!ctx_server.load_model(params)) {
|
||||
result->set_message("Failed loading model");
|
||||
bool load_success = ctx_server.load_model(params);
|
||||
|
||||
// Restore original log callback
|
||||
llama_log_set(error_capture_data.original_callback, error_capture_data.original_user_data);
|
||||
|
||||
if (!load_success) {
|
||||
std::string error_msg = "Failed to load model: " + params.model.path;
|
||||
if (!params.mmproj.path.empty()) {
|
||||
error_msg += " (with mmproj: " + params.mmproj.path + ")";
|
||||
}
|
||||
if (params.has_speculative() && !params.speculative.model.path.empty()) {
|
||||
error_msg += " (with draft model: " + params.speculative.model.path + ")";
|
||||
}
|
||||
|
||||
// Add captured error details if available
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(error_capture_data.error_mutex);
|
||||
if (!error_capture_data.captured_error.empty()) {
|
||||
error_msg += ". Error: " + error_capture_data.captured_error;
|
||||
} else {
|
||||
error_msg += ". Model file may not exist or be invalid.";
|
||||
}
|
||||
}
|
||||
|
||||
result->set_message(error_msg);
|
||||
result->set_success(false);
|
||||
return Status::CANCELLED;
|
||||
return grpc::Status(grpc::StatusCode::INTERNAL, error_msg);
|
||||
}
|
||||
|
||||
// Process grammar triggers now that vocab is available
|
||||
@@ -1494,7 +1573,7 @@ public:
|
||||
return grpc::Status::OK;
|
||||
}
|
||||
|
||||
grpc::Status Predict(ServerContext* context, const backend::PredictOptions* request, backend::Reply* reply) {
|
||||
grpc::Status Predict(ServerContext* context, const backend::PredictOptions* request, backend::Reply* reply) override {
|
||||
if (params_base.model.path.empty()) {
|
||||
return grpc::Status(grpc::StatusCode::FAILED_PRECONDITION, "Model not loaded");
|
||||
}
|
||||
@@ -2165,7 +2244,7 @@ public:
|
||||
return grpc::Status::OK;
|
||||
}
|
||||
|
||||
grpc::Status Embedding(ServerContext* context, const backend::PredictOptions* request, backend::EmbeddingResult* embeddingResult) {
|
||||
grpc::Status Embedding(ServerContext* context, const backend::PredictOptions* request, backend::EmbeddingResult* embeddingResult) override {
|
||||
if (params_base.model.path.empty()) {
|
||||
return grpc::Status(grpc::StatusCode::FAILED_PRECONDITION, "Model not loaded");
|
||||
}
|
||||
@@ -2260,7 +2339,7 @@ public:
|
||||
return grpc::Status::OK;
|
||||
}
|
||||
|
||||
grpc::Status Rerank(ServerContext* context, const backend::RerankRequest* request, backend::RerankResult* rerankResult) {
|
||||
grpc::Status Rerank(ServerContext* context, const backend::RerankRequest* request, backend::RerankResult* rerankResult) override {
|
||||
if (!params_base.embedding || params_base.pooling_type != LLAMA_POOLING_TYPE_RANK) {
|
||||
return grpc::Status(grpc::StatusCode::UNIMPLEMENTED, "This server does not support reranking. Start it with `--reranking` and without `--embedding`");
|
||||
}
|
||||
@@ -2346,7 +2425,7 @@ public:
|
||||
return grpc::Status::OK;
|
||||
}
|
||||
|
||||
grpc::Status TokenizeString(ServerContext* /*context*/, const backend::PredictOptions* request, backend::TokenizationResponse* response) {
|
||||
grpc::Status TokenizeString(ServerContext* /*context*/, const backend::PredictOptions* request, backend::TokenizationResponse* response) override {
|
||||
if (params_base.model.path.empty()) {
|
||||
return grpc::Status(grpc::StatusCode::FAILED_PRECONDITION, "Model not loaded");
|
||||
}
|
||||
@@ -2369,7 +2448,7 @@ public:
|
||||
return grpc::Status::OK;
|
||||
}
|
||||
|
||||
grpc::Status GetMetrics(ServerContext* /*context*/, const backend::MetricsRequest* /*request*/, backend::MetricsResponse* response) {
|
||||
grpc::Status GetMetrics(ServerContext* /*context*/, const backend::MetricsRequest* /*request*/, backend::MetricsResponse* response) override {
|
||||
|
||||
// request slots data using task queue
|
||||
auto rd = ctx_server.get_response_reader();
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
set -e
|
||||
|
||||
CURDIR=$(dirname "$(realpath $0)")
|
||||
REPO_ROOT="${CURDIR}/../../.."
|
||||
|
||||
# Create lib directory
|
||||
mkdir -p $CURDIR/package/lib
|
||||
@@ -37,6 +38,15 @@ else
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Package GPU libraries based on BUILD_TYPE
|
||||
# The GPU library packaging script will detect BUILD_TYPE and copy appropriate GPU libraries
|
||||
GPU_LIB_SCRIPT="${REPO_ROOT}/scripts/build/package-gpu-libs.sh"
|
||||
if [ -f "$GPU_LIB_SCRIPT" ]; then
|
||||
echo "Packaging GPU libraries for BUILD_TYPE=${BUILD_TYPE:-cpu}..."
|
||||
source "$GPU_LIB_SCRIPT" "$CURDIR/package/lib"
|
||||
package_gpu_libs
|
||||
fi
|
||||
|
||||
echo "Packaging completed successfully"
|
||||
ls -liah $CURDIR/package/
|
||||
ls -liah $CURDIR/package/lib/
|
||||
@@ -8,7 +8,7 @@ JOBS?=$(shell nproc --ignore=1)
|
||||
|
||||
# stablediffusion.cpp (ggml)
|
||||
STABLEDIFFUSION_GGML_REPO?=https://github.com/leejet/stable-diffusion.cpp
|
||||
STABLEDIFFUSION_GGML_VERSION?=bda7fab9f208dff4b67179a68f694b6ddec13326
|
||||
STABLEDIFFUSION_GGML_VERSION?=0e52afc6513cc2dea9a1a017afc4a008d5acf2b0
|
||||
|
||||
CMAKE_ARGS+=-DGGML_MAX_NAME=128
|
||||
|
||||
@@ -28,7 +28,12 @@ else ifeq ($(BUILD_TYPE),clblas)
|
||||
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
|
||||
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
|
||||
else ifeq ($(BUILD_TYPE),hipblas)
|
||||
CMAKE_ARGS+=-DSD_HIPBLAS=ON -DGGML_HIPBLAS=ON
|
||||
ROCM_HOME ?= /opt/rocm
|
||||
ROCM_PATH ?= /opt/rocm
|
||||
export CXX=$(ROCM_HOME)/llvm/bin/clang++
|
||||
export CC=$(ROCM_HOME)/llvm/bin/clang
|
||||
AMDGPU_TARGETS?=gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102,gfx1200,gfx1201
|
||||
CMAKE_ARGS+=-DSD_HIPBLAS=ON -DGGML_HIPBLAS=ON -DAMDGPU_TARGETS=$(AMDGPU_TARGETS)
|
||||
else ifeq ($(BUILD_TYPE),vulkan)
|
||||
CMAKE_ARGS+=-DSD_VULKAN=ON -DGGML_VULKAN=ON
|
||||
else ifeq ($(OS),Darwin)
|
||||
|
||||
@@ -55,6 +55,7 @@ const char* schedulers[] = {
|
||||
"sgm_uniform",
|
||||
"simple",
|
||||
"smoothstep",
|
||||
"kl_optimal",
|
||||
"lcm",
|
||||
};
|
||||
|
||||
@@ -147,26 +148,26 @@ static std::string lora_dir_path;
|
||||
static void build_embedding_vec(const char* embedding_dir) {
|
||||
embedding_vec.clear();
|
||||
embedding_strings.clear();
|
||||
|
||||
|
||||
if (!embedding_dir || strlen(embedding_dir) == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
if (!std::filesystem::exists(embedding_dir) || !std::filesystem::is_directory(embedding_dir)) {
|
||||
fprintf(stderr, "Embedding directory does not exist or is not a directory: %s\n", embedding_dir);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
static const std::vector<std::string> valid_ext = {".pt", ".safetensors", ".gguf"};
|
||||
|
||||
|
||||
for (const auto& entry : std::filesystem::directory_iterator(embedding_dir)) {
|
||||
if (!entry.is_regular_file()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
auto path = entry.path();
|
||||
std::string ext = path.extension().string();
|
||||
|
||||
|
||||
bool valid = false;
|
||||
for (const auto& e : valid_ext) {
|
||||
if (ext == e) {
|
||||
@@ -177,51 +178,51 @@ static void build_embedding_vec(const char* embedding_dir) {
|
||||
if (!valid) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
std::string name = path.stem().string();
|
||||
std::string full_path = path.string();
|
||||
|
||||
|
||||
// Store strings in persistent storage
|
||||
embedding_strings.push_back(name);
|
||||
embedding_strings.push_back(full_path);
|
||||
|
||||
|
||||
sd_embedding_t item;
|
||||
item.name = embedding_strings[embedding_strings.size() - 2].c_str();
|
||||
item.path = embedding_strings[embedding_strings.size() - 1].c_str();
|
||||
|
||||
|
||||
embedding_vec.push_back(item);
|
||||
fprintf(stderr, "Found embedding: %s -> %s\n", item.name, item.path);
|
||||
}
|
||||
|
||||
|
||||
fprintf(stderr, "Loaded %zu embeddings from %s\n", embedding_vec.size(), embedding_dir);
|
||||
}
|
||||
|
||||
// Discover LoRA files in directory and build a map of name -> path
|
||||
static std::map<std::string, std::string> discover_lora_files(const char* lora_dir) {
|
||||
std::map<std::string, std::string> lora_map;
|
||||
|
||||
|
||||
if (!lora_dir || strlen(lora_dir) == 0) {
|
||||
fprintf(stderr, "LoRA directory not specified\n");
|
||||
return lora_map;
|
||||
}
|
||||
|
||||
|
||||
if (!std::filesystem::exists(lora_dir) || !std::filesystem::is_directory(lora_dir)) {
|
||||
fprintf(stderr, "LoRA directory does not exist or is not a directory: %s\n", lora_dir);
|
||||
return lora_map;
|
||||
}
|
||||
|
||||
|
||||
static const std::vector<std::string> valid_ext = {".safetensors", ".ckpt", ".pt", ".gguf"};
|
||||
|
||||
|
||||
fprintf(stderr, "Discovering LoRA files in: %s\n", lora_dir);
|
||||
|
||||
|
||||
for (const auto& entry : std::filesystem::directory_iterator(lora_dir)) {
|
||||
if (!entry.is_regular_file()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
auto path = entry.path();
|
||||
std::string ext = path.extension().string();
|
||||
|
||||
|
||||
bool valid = false;
|
||||
for (const auto& e : valid_ext) {
|
||||
if (ext == e) {
|
||||
@@ -232,17 +233,17 @@ static std::map<std::string, std::string> discover_lora_files(const char* lora_d
|
||||
if (!valid) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
std::string name = path.stem().string(); // stem() already removes extension
|
||||
std::string full_path = path.string();
|
||||
|
||||
|
||||
// Store the name (without extension) -> full path mapping
|
||||
// This allows users to specify just the name in <lora:name:strength>
|
||||
lora_map[name] = full_path;
|
||||
|
||||
|
||||
fprintf(stderr, "Found LoRA file: %s -> %s\n", name.c_str(), full_path.c_str());
|
||||
}
|
||||
|
||||
|
||||
fprintf(stderr, "Discovered %zu LoRA files in %s\n", lora_map.size(), lora_dir);
|
||||
return lora_map;
|
||||
}
|
||||
@@ -264,31 +265,31 @@ static bool is_absolute_path(const std::string& p) {
|
||||
static std::pair<std::vector<sd_lora_t>, std::string> parse_loras_from_prompt(const std::string& prompt, const char* lora_dir) {
|
||||
std::vector<sd_lora_t> loras;
|
||||
std::string cleaned_prompt = prompt;
|
||||
|
||||
|
||||
if (!lora_dir || strlen(lora_dir) == 0) {
|
||||
fprintf(stderr, "LoRA directory not set, cannot parse LoRAs from prompt\n");
|
||||
return {loras, cleaned_prompt};
|
||||
}
|
||||
|
||||
|
||||
// Discover LoRA files for name-based lookup
|
||||
std::map<std::string, std::string> discovered_lora_map = discover_lora_files(lora_dir);
|
||||
|
||||
|
||||
// Map to accumulate multipliers for the same LoRA (matches upstream)
|
||||
std::map<std::string, float> lora_map;
|
||||
std::map<std::string, float> high_noise_lora_map;
|
||||
|
||||
|
||||
static const std::regex re(R"(<lora:([^:>]+):([^>]+)>)");
|
||||
static const std::vector<std::string> valid_ext = {".pt", ".safetensors", ".gguf"};
|
||||
std::smatch m;
|
||||
|
||||
|
||||
std::string tmp = prompt;
|
||||
|
||||
|
||||
fprintf(stderr, "Parsing LoRAs from prompt: %s\n", prompt.c_str());
|
||||
|
||||
|
||||
while (std::regex_search(tmp, m, re)) {
|
||||
std::string raw_path = m[1].str();
|
||||
const std::string raw_mul = m[2].str();
|
||||
|
||||
|
||||
float mul = 0.f;
|
||||
try {
|
||||
mul = std::stof(raw_mul);
|
||||
@@ -298,14 +299,14 @@ static std::pair<std::vector<sd_lora_t>, std::string> parse_loras_from_prompt(co
|
||||
fprintf(stderr, "Invalid LoRA multiplier '%s', skipping\n", raw_mul.c_str());
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
bool is_high_noise = false;
|
||||
static const std::string prefix = "|high_noise|";
|
||||
if (raw_path.rfind(prefix, 0) == 0) {
|
||||
raw_path.erase(0, prefix.size());
|
||||
is_high_noise = true;
|
||||
}
|
||||
|
||||
|
||||
std::filesystem::path final_path;
|
||||
if (is_absolute_path(raw_path)) {
|
||||
final_path = raw_path;
|
||||
@@ -334,7 +335,7 @@ static std::pair<std::vector<sd_lora_t>, std::string> parse_loras_from_prompt(co
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Try adding extensions if file doesn't exist
|
||||
if (!std::filesystem::exists(final_path)) {
|
||||
bool found = false;
|
||||
@@ -354,24 +355,24 @@ static std::pair<std::vector<sd_lora_t>, std::string> parse_loras_from_prompt(co
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Normalize path (matches upstream)
|
||||
const std::string key = final_path.lexically_normal().string();
|
||||
|
||||
|
||||
// Accumulate multiplier if same LoRA appears multiple times (matches upstream)
|
||||
if (is_high_noise) {
|
||||
high_noise_lora_map[key] += mul;
|
||||
} else {
|
||||
lora_map[key] += mul;
|
||||
}
|
||||
|
||||
fprintf(stderr, "Parsed LoRA: path='%s', multiplier=%.2f, is_high_noise=%s\n",
|
||||
|
||||
fprintf(stderr, "Parsed LoRA: path='%s', multiplier=%.2f, is_high_noise=%s\n",
|
||||
key.c_str(), mul, is_high_noise ? "true" : "false");
|
||||
|
||||
|
||||
cleaned_prompt = std::regex_replace(cleaned_prompt, re, "", std::regex_constants::format_first_only);
|
||||
tmp = m.suffix().str();
|
||||
}
|
||||
|
||||
|
||||
// Build final LoRA vector from accumulated maps (matches upstream)
|
||||
// Store all path strings first to ensure they persist
|
||||
for (const auto& kv : lora_map) {
|
||||
@@ -380,7 +381,7 @@ static std::pair<std::vector<sd_lora_t>, std::string> parse_loras_from_prompt(co
|
||||
for (const auto& kv : high_noise_lora_map) {
|
||||
lora_strings.push_back(kv.first);
|
||||
}
|
||||
|
||||
|
||||
// Now build the LoRA vector with pointers to the stored strings
|
||||
size_t string_idx = 0;
|
||||
for (const auto& kv : lora_map) {
|
||||
@@ -391,7 +392,7 @@ static std::pair<std::vector<sd_lora_t>, std::string> parse_loras_from_prompt(co
|
||||
loras.push_back(item);
|
||||
string_idx++;
|
||||
}
|
||||
|
||||
|
||||
for (const auto& kv : high_noise_lora_map) {
|
||||
sd_lora_t item;
|
||||
item.is_high_noise = true;
|
||||
@@ -400,7 +401,7 @@ static std::pair<std::vector<sd_lora_t>, std::string> parse_loras_from_prompt(co
|
||||
loras.push_back(item);
|
||||
string_idx++;
|
||||
}
|
||||
|
||||
|
||||
// Clean up extra spaces
|
||||
std::regex space_regex(R"(\s+)");
|
||||
cleaned_prompt = std::regex_replace(cleaned_prompt, space_regex, " ");
|
||||
@@ -413,9 +414,9 @@ static std::pair<std::vector<sd_lora_t>, std::string> parse_loras_from_prompt(co
|
||||
if (last != std::string::npos) {
|
||||
cleaned_prompt.erase(last + 1);
|
||||
}
|
||||
|
||||
|
||||
fprintf(stderr, "Parsed %zu LoRA(s) from prompt. Cleaned prompt: %s\n", loras.size(), cleaned_prompt.c_str());
|
||||
|
||||
|
||||
return {loras, cleaned_prompt};
|
||||
}
|
||||
|
||||
@@ -752,7 +753,7 @@ int load_model(const char *model, char *model_path, char* options[], int threads
|
||||
}
|
||||
}
|
||||
if (scheduler == SCHEDULER_COUNT) {
|
||||
scheduler = sd_get_default_scheduler(sd_ctx);
|
||||
scheduler = sd_get_default_scheduler(sd_ctx, sample_method);
|
||||
fprintf(stderr, "Invalid scheduler, using default: %s\n", schedulers[scheduler]);
|
||||
}
|
||||
|
||||
@@ -787,7 +788,7 @@ sd_img_gen_params_t* sd_img_gen_params_new(void) {
|
||||
sd_img_gen_params_t *params = (sd_img_gen_params_t *)std::malloc(sizeof(sd_img_gen_params_t));
|
||||
sd_img_gen_params_init(params);
|
||||
sd_sample_params_init(¶ms->sample_params);
|
||||
sd_easycache_params_init(¶ms->easycache);
|
||||
sd_cache_params_init(¶ms->cache);
|
||||
params->control_strength = 0.9f;
|
||||
return params;
|
||||
}
|
||||
@@ -819,18 +820,18 @@ void sd_img_gen_params_set_prompts(sd_img_gen_params_t *params, const char *prom
|
||||
fprintf(stderr, "Note: Found %zu LoRAs in negative prompt (may not be supported)\n", neg_loras.size());
|
||||
}
|
||||
cleaned_negative_prompt_storage = cleaned_negative;
|
||||
|
||||
|
||||
// Set the cleaned prompts
|
||||
params->prompt = cleaned_prompt_storage.c_str();
|
||||
params->negative_prompt = cleaned_negative_prompt_storage.c_str();
|
||||
|
||||
|
||||
// Set LoRAs in params
|
||||
params->loras = lora_vec.empty() ? nullptr : lora_vec.data();
|
||||
params->lora_count = static_cast<uint32_t>(lora_vec.size());
|
||||
|
||||
|
||||
fprintf(stderr, "Set prompts with %zu LoRAs. Original prompt: %s\n", lora_vec.size(), prompt ? prompt : "(null)");
|
||||
fprintf(stderr, "Cleaned prompt: %s\n", cleaned_prompt_storage.c_str());
|
||||
|
||||
|
||||
// Debug: Verify LoRAs are set correctly
|
||||
if (params->loras && params->lora_count > 0) {
|
||||
fprintf(stderr, "DEBUG: LoRAs set in params structure:\n");
|
||||
@@ -1042,7 +1043,7 @@ int gen_image(sd_img_gen_params_t *p, int steps, char *dst, float cfg_scale, cha
|
||||
fprintf(stderr, "Using %u LoRA(s) in generation:\n", p->lora_count);
|
||||
for (uint32_t i = 0; i < p->lora_count; i++) {
|
||||
fprintf(stderr, " LoRA[%u]: path='%s', multiplier=%.2f, is_high_noise=%s\n",
|
||||
i,
|
||||
i,
|
||||
p->loras[i].path ? p->loras[i].path : "(null)",
|
||||
p->loras[i].multiplier,
|
||||
p->loras[i].is_high_noise ? "true" : "false");
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
set -e
|
||||
|
||||
CURDIR=$(dirname "$(realpath $0)")
|
||||
REPO_ROOT="${CURDIR}/../../.."
|
||||
|
||||
# Create lib directory
|
||||
mkdir -p $CURDIR/package/lib
|
||||
@@ -50,6 +51,15 @@ else
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Package GPU libraries based on BUILD_TYPE
|
||||
# The GPU library packaging script will detect BUILD_TYPE and copy appropriate GPU libraries
|
||||
GPU_LIB_SCRIPT="${REPO_ROOT}/scripts/build/package-gpu-libs.sh"
|
||||
if [ -f "$GPU_LIB_SCRIPT" ]; then
|
||||
echo "Packaging GPU libraries for BUILD_TYPE=${BUILD_TYPE:-cpu}..."
|
||||
source "$GPU_LIB_SCRIPT" "$CURDIR/package/lib"
|
||||
package_gpu_libs
|
||||
fi
|
||||
|
||||
echo "Packaging completed successfully"
|
||||
ls -liah $CURDIR/package/
|
||||
ls -liah $CURDIR/package/lib/
|
||||
|
||||
@@ -8,7 +8,7 @@ JOBS?=$(shell nproc --ignore=1)
|
||||
|
||||
# whisper.cpp version
|
||||
WHISPER_REPO?=https://github.com/ggml-org/whisper.cpp
|
||||
WHISPER_CPP_VERSION?=6c22e792cb0ee155b6587ce71a8410c3aeb06949
|
||||
WHISPER_CPP_VERSION?=679bdb53dbcbfb3e42685f50c7ff367949fd4d48
|
||||
SO_TARGET?=libgowhisper.so
|
||||
|
||||
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
set -e
|
||||
|
||||
CURDIR=$(dirname "$(realpath $0)")
|
||||
REPO_ROOT="${CURDIR}/../../.."
|
||||
|
||||
# Create lib directory
|
||||
mkdir -p $CURDIR/package/lib
|
||||
@@ -50,6 +51,15 @@ else
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Package GPU libraries based on BUILD_TYPE
|
||||
# The GPU library packaging script will detect BUILD_TYPE and copy appropriate GPU libraries
|
||||
GPU_LIB_SCRIPT="${REPO_ROOT}/scripts/build/package-gpu-libs.sh"
|
||||
if [ -f "$GPU_LIB_SCRIPT" ]; then
|
||||
echo "Packaging GPU libraries for BUILD_TYPE=${BUILD_TYPE:-cpu}..."
|
||||
source "$GPU_LIB_SCRIPT" "$CURDIR/package/lib"
|
||||
package_gpu_libs
|
||||
fi
|
||||
|
||||
echo "Packaging completed successfully"
|
||||
ls -liah $CURDIR/package/
|
||||
ls -liah $CURDIR/package/lib/
|
||||
|
||||
@@ -275,6 +275,24 @@
|
||||
amd: "rocm-faster-whisper"
|
||||
nvidia-cuda-13: "cuda13-faster-whisper"
|
||||
nvidia-cuda-12: "cuda12-faster-whisper"
|
||||
- &moonshine
|
||||
description: |
|
||||
Moonshine is a fast, accurate, and efficient speech-to-text transcription model using ONNX Runtime.
|
||||
It provides real-time transcription capabilities with support for multiple model sizes and GPU acceleration.
|
||||
urls:
|
||||
- https://github.com/moonshine-ai/moonshine
|
||||
tags:
|
||||
- speech-to-text
|
||||
- transcription
|
||||
- ONNX
|
||||
license: MIT
|
||||
name: "moonshine"
|
||||
alias: "moonshine"
|
||||
capabilities:
|
||||
nvidia: "cuda12-moonshine"
|
||||
default: "cpu-moonshine"
|
||||
nvidia-cuda-13: "cuda13-moonshine"
|
||||
nvidia-cuda-12: "cuda12-moonshine"
|
||||
- &kokoro
|
||||
icon: https://avatars.githubusercontent.com/u/166769057?v=4
|
||||
description: |
|
||||
@@ -634,11 +652,6 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-llama-cpp"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-cpu-llama-cpp
|
||||
- !!merge <<: *llamacpp
|
||||
name: "cuda11-llama-cpp"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-llama-cpp"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-11-llama-cpp
|
||||
- !!merge <<: *llamacpp
|
||||
name: "cuda12-llama-cpp"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-llama-cpp"
|
||||
@@ -679,11 +692,6 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-llama-cpp"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-metal-darwin-arm64-llama-cpp
|
||||
- !!merge <<: *llamacpp
|
||||
name: "cuda11-llama-cpp-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-llama-cpp"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-11-llama-cpp
|
||||
- !!merge <<: *llamacpp
|
||||
name: "cuda12-llama-cpp-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-llama-cpp"
|
||||
@@ -755,11 +763,6 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-whisper"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-cpu-whisper
|
||||
- !!merge <<: *whispercpp
|
||||
name: "cuda11-whisper"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-whisper"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-11-whisper
|
||||
- !!merge <<: *whispercpp
|
||||
name: "cuda12-whisper"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-whisper"
|
||||
@@ -800,11 +803,6 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-whisper"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-metal-darwin-arm64-whisper
|
||||
- !!merge <<: *whispercpp
|
||||
name: "cuda11-whisper-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-whisper"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-11-whisper
|
||||
- !!merge <<: *whispercpp
|
||||
name: "cuda12-whisper-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-whisper"
|
||||
@@ -879,11 +877,6 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f16-stablediffusion-ggml"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-intel-sycl-f16-stablediffusion-ggml
|
||||
- !!merge <<: *stablediffusionggml
|
||||
name: "cuda11-stablediffusion-ggml"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-stablediffusion-ggml"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-11-stablediffusion-ggml
|
||||
- !!merge <<: *stablediffusionggml
|
||||
name: "cuda12-stablediffusion-ggml-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-stablediffusion-ggml"
|
||||
@@ -899,11 +892,6 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f16-stablediffusion-ggml"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-intel-sycl-f16-stablediffusion-ggml
|
||||
- !!merge <<: *stablediffusionggml
|
||||
name: "cuda11-stablediffusion-ggml-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-stablediffusion-ggml"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-11-stablediffusion-ggml
|
||||
- !!merge <<: *stablediffusionggml
|
||||
name: "nvidia-l4t-arm64-stablediffusion-ggml-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-arm64-stablediffusion-ggml"
|
||||
@@ -1054,11 +1042,6 @@
|
||||
intel: "intel-rerankers-development"
|
||||
amd: "rocm-rerankers-development"
|
||||
nvidia-cuda-13: "cuda13-rerankers-development"
|
||||
- !!merge <<: *rerankers
|
||||
name: "cuda11-rerankers"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-rerankers"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-11-rerankers
|
||||
- !!merge <<: *rerankers
|
||||
name: "cuda12-rerankers"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-rerankers"
|
||||
@@ -1074,11 +1057,6 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-rerankers"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-rocm-hipblas-rerankers
|
||||
- !!merge <<: *rerankers
|
||||
name: "cuda11-rerankers-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-rerankers"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-11-rerankers
|
||||
- !!merge <<: *rerankers
|
||||
name: "cuda12-rerankers-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-rerankers"
|
||||
@@ -1127,16 +1105,6 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-transformers"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-intel-transformers
|
||||
- !!merge <<: *transformers
|
||||
name: "cuda11-transformers-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-transformers"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-11-transformers
|
||||
- !!merge <<: *transformers
|
||||
name: "cuda11-transformers"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-transformers"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-11-transformers
|
||||
- !!merge <<: *transformers
|
||||
name: "cuda12-transformers-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-transformers"
|
||||
@@ -1213,21 +1181,11 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-diffusers"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-rocm-hipblas-diffusers
|
||||
- !!merge <<: *diffusers
|
||||
name: "cuda11-diffusers"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-diffusers"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-11-diffusers
|
||||
- !!merge <<: *diffusers
|
||||
name: "intel-diffusers"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-diffusers"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-intel-diffusers
|
||||
- !!merge <<: *diffusers
|
||||
name: "cuda11-diffusers-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-diffusers"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-11-diffusers
|
||||
- !!merge <<: *diffusers
|
||||
name: "cuda12-diffusers-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-diffusers"
|
||||
@@ -1269,21 +1227,11 @@
|
||||
capabilities:
|
||||
nvidia: "cuda12-exllama2-development"
|
||||
intel: "intel-exllama2-development"
|
||||
- !!merge <<: *exllama2
|
||||
name: "cuda11-exllama2"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-exllama2"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-11-exllama2
|
||||
- !!merge <<: *exllama2
|
||||
name: "cuda12-exllama2"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-exllama2"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-exllama2
|
||||
- !!merge <<: *exllama2
|
||||
name: "cuda11-exllama2-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-exllama2"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-11-exllama2
|
||||
- !!merge <<: *exllama2
|
||||
name: "cuda12-exllama2-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-exllama2"
|
||||
@@ -1297,11 +1245,6 @@
|
||||
intel: "intel-kokoro-development"
|
||||
amd: "rocm-kokoro-development"
|
||||
nvidia-l4t: "nvidia-l4t-kokoro-development"
|
||||
- !!merge <<: *kokoro
|
||||
name: "cuda11-kokoro-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-kokoro"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-11-kokoro
|
||||
- !!merge <<: *kokoro
|
||||
name: "cuda12-kokoro-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-kokoro"
|
||||
@@ -1332,11 +1275,6 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-kokoro"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-nvidia-l4t-kokoro
|
||||
- !!merge <<: *kokoro
|
||||
name: "cuda11-kokoro"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-kokoro"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-11-kokoro
|
||||
- !!merge <<: *kokoro
|
||||
name: "cuda12-kokoro"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-kokoro"
|
||||
@@ -1365,11 +1303,6 @@
|
||||
intel: "intel-faster-whisper-development"
|
||||
amd: "rocm-faster-whisper-development"
|
||||
nvidia-cuda-13: "cuda13-faster-whisper-development"
|
||||
- !!merge <<: *faster-whisper
|
||||
name: "cuda11-faster-whisper"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-faster-whisper"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-11-faster-whisper
|
||||
- !!merge <<: *faster-whisper
|
||||
name: "cuda12-faster-whisper-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-faster-whisper"
|
||||
@@ -1400,6 +1333,44 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-faster-whisper"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-13-faster-whisper
|
||||
## moonshine
|
||||
- !!merge <<: *moonshine
|
||||
name: "moonshine-development"
|
||||
capabilities:
|
||||
nvidia: "cuda12-moonshine-development"
|
||||
default: "cpu-moonshine-development"
|
||||
nvidia-cuda-13: "cuda13-moonshine-development"
|
||||
nvidia-cuda-12: "cuda12-moonshine-development"
|
||||
- !!merge <<: *moonshine
|
||||
name: "cpu-moonshine"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-moonshine"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-cpu-moonshine
|
||||
- !!merge <<: *moonshine
|
||||
name: "cpu-moonshine-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-moonshine"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-cpu-moonshine
|
||||
- !!merge <<: *moonshine
|
||||
name: "cuda12-moonshine"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-moonshine"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-moonshine
|
||||
- !!merge <<: *moonshine
|
||||
name: "cuda12-moonshine-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-moonshine"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-12-moonshine
|
||||
- !!merge <<: *moonshine
|
||||
name: "cuda13-moonshine"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-moonshine"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-13-moonshine
|
||||
- !!merge <<: *moonshine
|
||||
name: "cuda13-moonshine-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-moonshine"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-13-moonshine
|
||||
## coqui
|
||||
|
||||
- !!merge <<: *coqui
|
||||
@@ -1408,21 +1379,11 @@
|
||||
nvidia: "cuda12-coqui-development"
|
||||
intel: "intel-coqui-development"
|
||||
amd: "rocm-coqui-development"
|
||||
- !!merge <<: *coqui
|
||||
name: "cuda11-coqui"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-coqui"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-11-coqui
|
||||
- !!merge <<: *coqui
|
||||
name: "cuda12-coqui"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-coqui"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-coqui
|
||||
- !!merge <<: *coqui
|
||||
name: "cuda11-coqui-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-coqui"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-11-coqui
|
||||
- !!merge <<: *coqui
|
||||
name: "cuda12-coqui-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-coqui"
|
||||
@@ -1455,16 +1416,6 @@
|
||||
nvidia: "cuda12-bark-development"
|
||||
intel: "intel-bark-development"
|
||||
amd: "rocm-bark-development"
|
||||
- !!merge <<: *bark
|
||||
name: "cuda11-bark-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-bark"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-11-bark
|
||||
- !!merge <<: *bark
|
||||
name: "cuda11-bark"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-bark"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-11-bark
|
||||
- !!merge <<: *bark
|
||||
name: "rocm-bark-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-bark"
|
||||
@@ -1546,16 +1497,6 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-chatterbox"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-12-chatterbox
|
||||
- !!merge <<: *chatterbox
|
||||
name: "cuda11-chatterbox"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-chatterbox"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-11-chatterbox
|
||||
- !!merge <<: *chatterbox
|
||||
name: "cuda11-chatterbox-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-chatterbox"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-11-chatterbox
|
||||
- !!merge <<: *chatterbox
|
||||
name: "cuda12-chatterbox"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-chatterbox"
|
||||
|
||||
@@ -85,7 +85,7 @@ runUnittests
|
||||
The build system automatically detects and configures for different hardware:
|
||||
|
||||
- **CPU** - Standard CPU-only builds
|
||||
- **CUDA** - NVIDIA GPU acceleration (supports CUDA 11/12)
|
||||
- **CUDA** - NVIDIA GPU acceleration (supports CUDA 12/13)
|
||||
- **Intel** - Intel XPU/GPU optimization
|
||||
- **MLX** - Apple Silicon (M1/M2/M3) optimization
|
||||
- **HIP** - AMD GPU acceleration
|
||||
@@ -95,8 +95,8 @@ The build system automatically detects and configures for different hardware:
|
||||
Backends can specify hardware-specific dependencies:
|
||||
- `requirements.txt` - Base requirements
|
||||
- `requirements-cpu.txt` - CPU-specific packages
|
||||
- `requirements-cublas11.txt` - CUDA 11 packages
|
||||
- `requirements-cublas12.txt` - CUDA 12 packages
|
||||
- `requirements-cublas13.txt` - CUDA 13 packages
|
||||
- `requirements-intel.txt` - Intel-optimized packages
|
||||
- `requirements-mps.txt` - Apple Silicon packages
|
||||
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch==2.4.1+cu118
|
||||
torchaudio==2.4.1+cu118
|
||||
transformers
|
||||
accelerate
|
||||
@@ -1,5 +1,5 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.0
|
||||
torch==2.4.1+rocm6.0
|
||||
torchaudio==2.4.1+rocm6.0
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
||||
torch==2.8.0+rocm6.4
|
||||
torchaudio==2.8.0+rocm6.4
|
||||
transformers
|
||||
accelerate
|
||||
@@ -17,4 +17,9 @@ if [ "x${BUILD_PROFILE}" == "xintel" ]; then
|
||||
fi
|
||||
EXTRA_PIP_INSTALL_FLAGS+=" --no-build-isolation"
|
||||
|
||||
if [ "x${BUILD_PROFILE}" == "xl4t12" ]; then
|
||||
USE_PIP=true
|
||||
fi
|
||||
|
||||
|
||||
installRequirements
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch==2.6.0+cu118
|
||||
torchaudio==2.6.0+cu118
|
||||
transformers==4.46.3
|
||||
numpy>=1.24.0,<1.26.0
|
||||
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289
|
||||
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
|
||||
accelerate
|
||||
@@ -1,6 +1,6 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.0
|
||||
torch==2.6.0+rocm6.1
|
||||
torchaudio==2.6.0+rocm6.1
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
||||
torch==2.9.1+rocm6.4
|
||||
torchaudio==2.9.1+rocm6.4
|
||||
transformers
|
||||
numpy>=1.24.0,<1.26.0
|
||||
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289
|
||||
|
||||
5
backend/python/chatterbox/requirements-install.txt
Normal file
5
backend/python/chatterbox/requirements-install.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
# Build dependencies needed for packages installed from source (e.g., git dependencies)
|
||||
# When using --no-build-isolation, these must be installed in the venv first
|
||||
wheel
|
||||
setuptools
|
||||
packaging
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
#
|
||||
#
|
||||
# use the library by adding the following line to a script:
|
||||
# source $(dirname $0)/../common/libbackend.sh
|
||||
#
|
||||
@@ -206,8 +206,8 @@ function init() {
|
||||
|
||||
# getBuildProfile will inspect the system to determine which build profile is appropriate:
|
||||
# returns one of the following:
|
||||
# - cublas11
|
||||
# - cublas12
|
||||
# - cublas13
|
||||
# - hipblas
|
||||
# - intel
|
||||
function getBuildProfile() {
|
||||
@@ -392,7 +392,7 @@ function runProtogen() {
|
||||
# - requirements-${BUILD_TYPE}.txt
|
||||
# - requirements-${BUILD_PROFILE}.txt
|
||||
#
|
||||
# BUILD_PROFILE is a more specific version of BUILD_TYPE, ex: cuda-11 or cuda-12
|
||||
# BUILD_PROFILE is a more specific version of BUILD_TYPE, ex: cuda-12 or cuda-13
|
||||
# it can also include some options that we do not have BUILD_TYPES for, ex: intel
|
||||
#
|
||||
# NOTE: for BUILD_PROFILE==intel, this function does NOT automatically use the Intel python package index.
|
||||
@@ -465,6 +465,14 @@ function startBackend() {
|
||||
if [ "x${PORTABLE_PYTHON}" == "xtrue" ] || [ -x "$(_portable_python)" ]; then
|
||||
_makeVenvPortable --update-pyvenv-cfg
|
||||
fi
|
||||
|
||||
# Set up GPU library paths if a lib directory exists
|
||||
# This allows backends to include their own GPU libraries (CUDA, ROCm, etc.)
|
||||
if [ -d "${EDIR}/lib" ]; then
|
||||
export LD_LIBRARY_PATH="${EDIR}/lib:${LD_LIBRARY_PATH:-}"
|
||||
echo "Added ${EDIR}/lib to LD_LIBRARY_PATH for GPU libraries"
|
||||
fi
|
||||
|
||||
if [ ! -z "${BACKEND_FILE:-}" ]; then
|
||||
exec "${EDIR}/venv/bin/python" "${BACKEND_FILE}" "$@"
|
||||
elif [ -e "${MY_DIR}/server.py" ]; then
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.0
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
||||
torch
|
||||
@@ -1,6 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch==2.4.1+cu118
|
||||
torchaudio==2.4.1+cu118
|
||||
transformers==4.48.3
|
||||
accelerate
|
||||
coqui-tts
|
||||
@@ -1,6 +1,6 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.0
|
||||
torch==2.4.1+rocm6.0
|
||||
torchaudio==2.4.1+rocm6.0
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
||||
torch==2.8.0+rocm6.4
|
||||
torchaudio==2.8.0+rocm6.4
|
||||
transformers==4.48.3
|
||||
accelerate
|
||||
coqui-tts
|
||||
@@ -16,8 +16,12 @@ if [ "x${BUILD_PROFILE}" == "xintel" ]; then
|
||||
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
|
||||
fi
|
||||
|
||||
if [ "x${BUILD_PROFILE}" == "xl4t12" ]; then
|
||||
USE_PIP=true
|
||||
fi
|
||||
|
||||
# Use python 3.12 for l4t
|
||||
if [ "x${BUILD_PROFILE}" == "xl4t12" ] || [ "x${BUILD_PROFILE}" == "xl4t13" ]; then
|
||||
if [ "x${BUILD_PROFILE}" == "xl4t13" ]; then
|
||||
PYTHON_VERSION="3.12"
|
||||
PYTHON_PATCH="12"
|
||||
PY_STANDALONE_TAG="20251120"
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
git+https://github.com/huggingface/diffusers
|
||||
opencv-python
|
||||
transformers
|
||||
torchvision==0.22.1
|
||||
accelerate
|
||||
compel
|
||||
peft
|
||||
sentencepiece
|
||||
torch==2.7.1
|
||||
optimum-quanto
|
||||
ftfy
|
||||
@@ -1,6 +1,6 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.3
|
||||
torch==2.7.1+rocm6.3
|
||||
torchvision==0.22.1+rocm6.3
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
||||
torch==2.8.0+rocm6.4
|
||||
torchvision==0.23.0+rocm6.4
|
||||
git+https://github.com/huggingface/diffusers
|
||||
opencv-python
|
||||
transformers
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch==2.4.1+cu118
|
||||
transformers
|
||||
accelerate
|
||||
@@ -1,9 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch==2.4.1+cu118
|
||||
faster-whisper
|
||||
opencv-python
|
||||
accelerate
|
||||
compel
|
||||
peft
|
||||
sentencepiece
|
||||
optimum-quanto
|
||||
@@ -1,3 +1,3 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.0
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
||||
torch
|
||||
faster-whisper
|
||||
@@ -16,4 +16,8 @@ if [ "x${BUILD_PROFILE}" == "xintel" ]; then
|
||||
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
|
||||
fi
|
||||
|
||||
if [ "x${BUILD_PROFILE}" == "xl4t12" ]; then
|
||||
USE_PIP=true
|
||||
fi
|
||||
|
||||
installRequirements
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch==2.7.1+cu118
|
||||
torchaudio==2.7.1+cu118
|
||||
transformers
|
||||
accelerate
|
||||
kokoro
|
||||
soundfile
|
||||
@@ -1,6 +1,6 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.3
|
||||
torch==2.7.1+rocm6.3
|
||||
torchaudio==2.7.1+rocm6.3
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
||||
torch==2.8.0+rocm6.4
|
||||
torchaudio==2.8.0+rocm6.4
|
||||
transformers
|
||||
accelerate
|
||||
kokoro
|
||||
|
||||
16
backend/python/moonshine/Makefile
Normal file
16
backend/python/moonshine/Makefile
Normal file
@@ -0,0 +1,16 @@
|
||||
.DEFAULT_GOAL := install
|
||||
|
||||
.PHONY: install
|
||||
install:
|
||||
bash install.sh
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
rm -rf venv __pycache__
|
||||
|
||||
test: install
|
||||
bash test.sh
|
||||
113
backend/python/moonshine/backend.py
Normal file
113
backend/python/moonshine/backend.py
Normal file
@@ -0,0 +1,113 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
This is an extra gRPC server of LocalAI for Moonshine transcription
|
||||
"""
|
||||
from concurrent import futures
|
||||
import time
|
||||
import argparse
|
||||
import signal
|
||||
import sys
|
||||
import os
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
import moonshine_onnx
|
||||
|
||||
import grpc
|
||||
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
|
||||
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
|
||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
||||
|
||||
# Implement the BackendServicer class with the service methods
|
||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
"""
|
||||
BackendServicer is the class that implements the gRPC service
|
||||
"""
|
||||
def Health(self, request, context):
|
||||
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
||||
|
||||
def LoadModel(self, request, context):
|
||||
try:
|
||||
print("Preparing models, please wait", file=sys.stderr)
|
||||
# Store the model name for use in transcription
|
||||
# Model name format: e.g., "moonshine/tiny"
|
||||
self.model_name = request.Model
|
||||
print(f"Model name set to: {self.model_name}", file=sys.stderr)
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
||||
|
||||
def AudioTranscription(self, request, context):
|
||||
resultSegments = []
|
||||
text = ""
|
||||
try:
|
||||
# moonshine_onnx.transcribe returns a list of strings
|
||||
transcriptions = moonshine_onnx.transcribe(request.dst, self.model_name)
|
||||
|
||||
# Combine all transcriptions into a single text
|
||||
if isinstance(transcriptions, list):
|
||||
text = " ".join(transcriptions)
|
||||
# Create segments for each transcription in the list
|
||||
for id, trans in enumerate(transcriptions):
|
||||
# Since moonshine doesn't provide timing info, we'll create a single segment
|
||||
# with id and text, using approximate timing
|
||||
resultSegments.append(backend_pb2.TranscriptSegment(
|
||||
id=id,
|
||||
start=0,
|
||||
end=0,
|
||||
text=trans
|
||||
))
|
||||
else:
|
||||
# Handle case where it's not a list (shouldn't happen, but be safe)
|
||||
text = str(transcriptions)
|
||||
resultSegments.append(backend_pb2.TranscriptSegment(
|
||||
id=0,
|
||||
start=0,
|
||||
end=0,
|
||||
text=text
|
||||
))
|
||||
except Exception as err:
|
||||
print(f"Unexpected {err=}, {type(err)=}", file=sys.stderr)
|
||||
return backend_pb2.TranscriptResult(segments=[], text="")
|
||||
|
||||
return backend_pb2.TranscriptResult(segments=resultSegments, text=text)
|
||||
|
||||
def serve(address):
|
||||
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
||||
options=[
|
||||
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
|
||||
])
|
||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||
server.add_insecure_port(address)
|
||||
server.start()
|
||||
print("Server started. Listening on: " + address, file=sys.stderr)
|
||||
|
||||
# Define the signal handler function
|
||||
def signal_handler(sig, frame):
|
||||
print("Received termination signal. Shutting down...")
|
||||
server.stop(0)
|
||||
sys.exit(0)
|
||||
|
||||
# Set the signal handlers for SIGINT and SIGTERM
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
try:
|
||||
while True:
|
||||
time.sleep(_ONE_DAY_IN_SECONDS)
|
||||
except KeyboardInterrupt:
|
||||
server.stop(0)
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Run the gRPC server.")
|
||||
parser.add_argument(
|
||||
"--addr", default="localhost:50051", help="The address to bind the server to."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
serve(args.addr)
|
||||
|
||||
12
backend/python/moonshine/install.sh
Executable file
12
backend/python/moonshine/install.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
installRequirements
|
||||
|
||||
12
backend/python/moonshine/protogen.sh
Executable file
12
backend/python/moonshine/protogen.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
|
||||
|
||||
4
backend/python/moonshine/requirements.txt
Normal file
4
backend/python/moonshine/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
grpcio==1.71.0
|
||||
protobuf
|
||||
grpcio-tools
|
||||
useful-moonshine-onnx@git+https://git@github.com/moonshine-ai/moonshine.git#subdirectory=moonshine-onnx
|
||||
10
backend/python/moonshine/run.sh
Executable file
10
backend/python/moonshine/run.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
startBackend $@
|
||||
|
||||
139
backend/python/moonshine/test.py
Normal file
139
backend/python/moonshine/test.py
Normal file
@@ -0,0 +1,139 @@
|
||||
"""
|
||||
A test script to test the gRPC service for Moonshine transcription
|
||||
"""
|
||||
import unittest
|
||||
import subprocess
|
||||
import time
|
||||
import os
|
||||
import tempfile
|
||||
import shutil
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
|
||||
import grpc
|
||||
|
||||
|
||||
class TestBackendServicer(unittest.TestCase):
|
||||
"""
|
||||
TestBackendServicer is the class that tests the gRPC service
|
||||
"""
|
||||
def setUp(self):
|
||||
"""
|
||||
This method sets up the gRPC service by starting the server
|
||||
"""
|
||||
self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"])
|
||||
time.sleep(10)
|
||||
|
||||
def tearDown(self) -> None:
|
||||
"""
|
||||
This method tears down the gRPC service by terminating the server
|
||||
"""
|
||||
self.service.terminate()
|
||||
self.service.wait()
|
||||
|
||||
def test_server_startup(self):
|
||||
"""
|
||||
This method tests if the server starts up successfully
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.Health(backend_pb2.HealthMessage())
|
||||
self.assertEqual(response.message, b'OK')
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("Server failed to start")
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
def test_load_model(self):
|
||||
"""
|
||||
This method tests if the model is loaded successfully
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="moonshine/tiny"))
|
||||
self.assertTrue(response.success)
|
||||
self.assertEqual(response.message, "Model loaded successfully")
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("LoadModel service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
def test_audio_transcription(self):
|
||||
"""
|
||||
This method tests if audio transcription works successfully
|
||||
"""
|
||||
# Create a temporary directory for the audio file
|
||||
temp_dir = tempfile.mkdtemp()
|
||||
audio_file = os.path.join(temp_dir, 'audio.wav')
|
||||
|
||||
try:
|
||||
# Download the audio file to the temporary directory
|
||||
print(f"Downloading audio file to {audio_file}...")
|
||||
url = "https://cdn.openai.com/whisper/draft-20220913a/micro-machines.wav"
|
||||
result = subprocess.run(
|
||||
["wget", "-q", url, "-O", audio_file],
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
if result.returncode != 0:
|
||||
self.fail(f"Failed to download audio file: {result.stderr}")
|
||||
|
||||
# Verify the file was downloaded
|
||||
if not os.path.exists(audio_file):
|
||||
self.fail(f"Audio file was not downloaded to {audio_file}")
|
||||
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
# Load the model first
|
||||
load_response = stub.LoadModel(backend_pb2.ModelOptions(Model="moonshine/tiny"))
|
||||
self.assertTrue(load_response.success)
|
||||
|
||||
# Perform transcription
|
||||
transcript_request = backend_pb2.TranscriptRequest(dst=audio_file)
|
||||
transcript_response = stub.AudioTranscription(transcript_request)
|
||||
|
||||
# Print the transcribed text for debugging
|
||||
print(f"Transcribed text: {transcript_response.text}")
|
||||
print(f"Number of segments: {len(transcript_response.segments)}")
|
||||
|
||||
# Verify response structure
|
||||
self.assertIsNotNone(transcript_response)
|
||||
self.assertIsNotNone(transcript_response.text)
|
||||
# Protobuf repeated fields return a sequence, not a list
|
||||
self.assertIsNotNone(transcript_response.segments)
|
||||
# Check if segments is iterable (has length)
|
||||
self.assertGreaterEqual(len(transcript_response.segments), 0)
|
||||
|
||||
# Verify the transcription contains the expected text
|
||||
expected_text = "This is the micro machine man presenting the most midget miniature"
|
||||
self.assertIn(
|
||||
expected_text.lower(),
|
||||
transcript_response.text.lower(),
|
||||
f"Expected text '{expected_text}' not found in transcription: '{transcript_response.text}'"
|
||||
)
|
||||
|
||||
# If we got segments, verify they have the expected structure
|
||||
if len(transcript_response.segments) > 0:
|
||||
segment = transcript_response.segments[0]
|
||||
self.assertIsNotNone(segment.text)
|
||||
self.assertIsInstance(segment.id, int)
|
||||
else:
|
||||
# Even if no segments, we should have text
|
||||
self.assertIsNotNone(transcript_response.text)
|
||||
self.assertGreater(len(transcript_response.text), 0)
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("AudioTranscription service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
# Clean up the temporary directory
|
||||
if os.path.exists(temp_dir):
|
||||
shutil.rmtree(temp_dir)
|
||||
|
||||
12
backend/python/moonshine/test.sh
Executable file
12
backend/python/moonshine/test.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
runUnittests
|
||||
|
||||
@@ -26,6 +26,12 @@ fi
|
||||
|
||||
EXTRA_PIP_INSTALL_FLAGS+=" --no-build-isolation"
|
||||
|
||||
|
||||
if [ "x${BUILD_PROFILE}" == "xl4t12" ]; then
|
||||
USE_PIP=true
|
||||
fi
|
||||
|
||||
|
||||
git clone https://github.com/neuphonic/neutts-air neutts-air
|
||||
|
||||
cp -rfv neutts-air/neuttsair ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.3
|
||||
torch==2.8.0+rocm6.3
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
||||
torch==2.8.0+rocm6.4
|
||||
transformers==4.56.1
|
||||
accelerate
|
||||
librosa==0.11.0
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
transformers
|
||||
accelerate
|
||||
torch==2.4.1+cu118
|
||||
rerankers[transformers]
|
||||
@@ -1,5 +1,5 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.0
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
||||
transformers
|
||||
accelerate
|
||||
torch==2.4.1+rocm6.0
|
||||
torch==2.8.0+rocm6.4
|
||||
rerankers[transformers]
|
||||
@@ -1,8 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch==2.7.1+cu118
|
||||
rfdetr
|
||||
opencv-python
|
||||
accelerate
|
||||
inference
|
||||
peft
|
||||
optimum-quanto
|
||||
@@ -1,6 +1,6 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.3
|
||||
torch==2.7.1+rocm6.3
|
||||
torchvision==0.22.1+rocm6.3
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
||||
torch==2.8.0+rocm6.4
|
||||
torchvision==0.23.0+rocm6.4
|
||||
rfdetr
|
||||
opencv-python
|
||||
accelerate
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch==2.7.1+cu118
|
||||
llvmlite==0.43.0
|
||||
numba==0.60.0
|
||||
accelerate
|
||||
transformers
|
||||
bitsandbytes
|
||||
outetts
|
||||
sentence-transformers==5.2.0
|
||||
protobuf==6.33.2
|
||||
@@ -1,5 +1,5 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.3
|
||||
torch==2.7.1+rocm6.3
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
||||
torch==2.8.0+rocm6.4
|
||||
accelerate
|
||||
transformers
|
||||
llvmlite==0.43.0
|
||||
|
||||
@@ -17,12 +17,16 @@ if [ "x${BUILD_PROFILE}" == "xintel" ]; then
|
||||
fi
|
||||
|
||||
# Use python 3.12 for l4t
|
||||
if [ "x${BUILD_PROFILE}" == "xl4t12" ] || [ "x${BUILD_PROFILE}" == "xl4t13" ]; then
|
||||
if [ "x${BUILD_PROFILE}" == "xl4t13" ]; then
|
||||
PYTHON_VERSION="3.12"
|
||||
PYTHON_PATCH="12"
|
||||
PY_STANDALONE_TAG="20251120"
|
||||
fi
|
||||
|
||||
if [ "x${BUILD_PROFILE}" == "xl4t12" ]; then
|
||||
USE_PIP=true
|
||||
fi
|
||||
|
||||
installRequirements
|
||||
|
||||
git clone https://github.com/microsoft/VibeVoice.git
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
git+https://github.com/huggingface/diffusers
|
||||
opencv-python
|
||||
transformers==4.51.3
|
||||
torchvision==0.22.1
|
||||
accelerate
|
||||
compel
|
||||
peft
|
||||
sentencepiece
|
||||
torch==2.7.1
|
||||
optimum-quanto
|
||||
ftfy
|
||||
llvmlite>=0.40.0
|
||||
numba>=0.57.0
|
||||
tqdm
|
||||
numpy
|
||||
scipy
|
||||
librosa
|
||||
ml-collections
|
||||
absl-py
|
||||
gradio
|
||||
av
|
||||
@@ -28,7 +28,7 @@ fi
|
||||
|
||||
# We don't embed this into the images as it is a large dependency and not always needed.
|
||||
# Besides, the speed inference are not actually usable in the current state for production use-cases.
|
||||
if [ "x${BUILD_TYPE}" == "x" ] && [ "x${FROM_SOURCE}" == "xtrue" ]; then
|
||||
if [ "x${BUILD_TYPE}" == "x" ] && [ "x${FROM_SOURCE:-}" == "xtrue" ]; then
|
||||
ensureVenv
|
||||
# https://docs.vllm.ai/en/v0.6.1/getting_started/cpu-installation.html
|
||||
if [ ! -d vllm ]; then
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
flash-attn
|
||||
@@ -1,5 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
accelerate
|
||||
torch==2.7.0+cu118
|
||||
transformers
|
||||
bitsandbytes
|
||||
@@ -1,4 +1,4 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/nightly/rocm6.3
|
||||
--extra-index-url https://download.pytorch.org/whl/nightly/rocm6.4
|
||||
accelerate
|
||||
torch
|
||||
transformers
|
||||
|
||||
@@ -49,6 +49,8 @@ type ReleaseManager struct {
|
||||
ChecksumsPath string
|
||||
// MetadataPath is where version metadata is stored
|
||||
MetadataPath string
|
||||
// HTTPClient is the HTTP client used for downloads
|
||||
HTTPClient *http.Client
|
||||
}
|
||||
|
||||
// NewReleaseManager creates a new release manager
|
||||
@@ -65,6 +67,9 @@ func NewReleaseManager() *ReleaseManager {
|
||||
CurrentVersion: internal.PrintableVersion(),
|
||||
ChecksumsPath: checksumsPath,
|
||||
MetadataPath: metadataPath,
|
||||
HTTPClient: &http.Client{
|
||||
Timeout: 30 * time.Second,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,7 +77,7 @@ func NewReleaseManager() *ReleaseManager {
|
||||
func (rm *ReleaseManager) GetLatestRelease() (*Release, error) {
|
||||
url := fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/latest", rm.GitHubOwner, rm.GitHubRepo)
|
||||
|
||||
resp, err := http.Get(url)
|
||||
resp, err := rm.HTTPClient.Get(url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch latest release: %w", err)
|
||||
}
|
||||
@@ -125,18 +130,43 @@ func (rm *ReleaseManager) DownloadRelease(version string, progressCallback func(
|
||||
rm.GitHubOwner, rm.GitHubRepo, version, version)
|
||||
|
||||
checksumPath := filepath.Join(rm.BinaryPath, "checksums.txt")
|
||||
if err := rm.downloadFile(checksumURL, checksumPath, nil); err != nil {
|
||||
return fmt.Errorf("failed to download checksums: %w", err)
|
||||
manualChecksumPath := filepath.Join(rm.ChecksumsPath, fmt.Sprintf("checksums-%s.txt", version))
|
||||
|
||||
// First, check if there's already a checksum file (either manually placed or previously downloaded)
|
||||
// and honor that, skipping download entirely in such case
|
||||
var downloadErr error
|
||||
if _, err := os.Stat(manualChecksumPath); err == nil {
|
||||
log.Printf("Using existing checksums from: %s", manualChecksumPath)
|
||||
checksumPath = manualChecksumPath
|
||||
} else if _, err := os.Stat(checksumPath); err == nil {
|
||||
log.Printf("Using existing checksums from: %s", checksumPath)
|
||||
} else {
|
||||
// No existing checksum file found, try to download
|
||||
downloadErr = rm.downloadFile(checksumURL, checksumPath, nil)
|
||||
|
||||
if downloadErr != nil {
|
||||
log.Printf("Warning: failed to download checksums: %v", downloadErr)
|
||||
log.Printf("Warning: Checksum verification will be skipped. For security, you can manually place checksums at: %s", manualChecksumPath)
|
||||
log.Printf("Download checksums from: %s", checksumURL)
|
||||
// Continue without verification - log warning but don't fail
|
||||
}
|
||||
}
|
||||
|
||||
// Verify the checksum
|
||||
if err := rm.VerifyChecksum(localPath, checksumPath, binaryName); err != nil {
|
||||
return fmt.Errorf("checksum verification failed: %w", err)
|
||||
}
|
||||
// Verify the checksum if we have a checksum file
|
||||
if _, err := os.Stat(checksumPath); err == nil {
|
||||
if err := rm.VerifyChecksum(localPath, checksumPath, binaryName); err != nil {
|
||||
return fmt.Errorf("checksum verification failed: %w", err)
|
||||
}
|
||||
log.Printf("Checksum verification successful")
|
||||
|
||||
// Save checksums persistently for future verification
|
||||
if err := rm.saveChecksums(version, checksumPath, binaryName); err != nil {
|
||||
log.Printf("Warning: failed to save checksums: %v", err)
|
||||
// Save checksums persistently for future verification
|
||||
if downloadErr == nil {
|
||||
if err := rm.saveChecksums(version, checksumPath, binaryName); err != nil {
|
||||
log.Printf("Warning: failed to save checksums: %v", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Printf("Warning: Proceeding without checksum verification")
|
||||
}
|
||||
|
||||
// Make the binary executable
|
||||
@@ -168,34 +198,61 @@ func (rm *ReleaseManager) GetBinaryName(version string) string {
|
||||
|
||||
// downloadFile downloads a file from a URL to a local path with optional progress callback
|
||||
func (rm *ReleaseManager) downloadFile(url, filepath string, progressCallback func(float64)) error {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return rm.downloadFileWithRetry(url, filepath, progressCallback, 3)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("bad status: %s", resp.Status)
|
||||
}
|
||||
// downloadFileWithRetry downloads a file from a URL with retry logic
|
||||
func (rm *ReleaseManager) downloadFileWithRetry(url, filepath string, progressCallback func(float64), maxRetries int) error {
|
||||
var lastErr error
|
||||
|
||||
out, err := os.Create(filepath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
// Create a progress reader if callback is provided
|
||||
var reader io.Reader = resp.Body
|
||||
if progressCallback != nil && resp.ContentLength > 0 {
|
||||
reader = &progressReader{
|
||||
Reader: resp.Body,
|
||||
Total: resp.ContentLength,
|
||||
Callback: progressCallback,
|
||||
for attempt := 1; attempt <= maxRetries; attempt++ {
|
||||
if attempt > 1 {
|
||||
log.Printf("Retrying download (attempt %d/%d): %s", attempt, maxRetries, url)
|
||||
time.Sleep(time.Duration(attempt) * time.Second)
|
||||
}
|
||||
|
||||
resp, err := rm.HTTPClient.Get(url)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
resp.Body.Close()
|
||||
lastErr = fmt.Errorf("bad status: %s", resp.Status)
|
||||
continue
|
||||
}
|
||||
|
||||
out, err := os.Create(filepath)
|
||||
if err != nil {
|
||||
resp.Body.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
// Create a progress reader if callback is provided
|
||||
var reader io.Reader = resp.Body
|
||||
if progressCallback != nil && resp.ContentLength > 0 {
|
||||
reader = &progressReader{
|
||||
Reader: resp.Body,
|
||||
Total: resp.ContentLength,
|
||||
Callback: progressCallback,
|
||||
}
|
||||
}
|
||||
|
||||
_, err = io.Copy(out, reader)
|
||||
resp.Body.Close()
|
||||
out.Close()
|
||||
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
os.Remove(filepath)
|
||||
continue
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = io.Copy(out, reader)
|
||||
return err
|
||||
return fmt.Errorf("failed after %d attempts: %w", maxRetries, lastErr)
|
||||
}
|
||||
|
||||
// saveChecksums saves checksums persistently for future verification
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
@@ -37,6 +38,8 @@ var _ = Describe("ReleaseManager", func() {
|
||||
Expect(newRM.GitHubOwner).To(Equal("mudler"))
|
||||
Expect(newRM.GitHubRepo).To(Equal("LocalAI"))
|
||||
Expect(newRM.BinaryPath).To(ContainSubstring(".localai"))
|
||||
Expect(newRM.HTTPClient).ToNot(BeNil())
|
||||
Expect(newRM.HTTPClient.Timeout).To(Equal(30 * time.Second))
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -382,7 +382,7 @@ func (sm *SystrayManager) showStatusDetails(status, version string) {
|
||||
// showErrorDialog shows a simple error dialog
|
||||
func (sm *SystrayManager) showErrorDialog(title, message string) {
|
||||
fyne.DoAndWait(func() {
|
||||
dialog.ShowError(fmt.Errorf(message), sm.window)
|
||||
dialog.ShowError(fmt.Errorf("%s", message), sm.window)
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -214,6 +214,9 @@ func readRuntimeSettingsJson(startupAppConfig config.ApplicationConfig) fileHand
|
||||
envAutoloadGalleries := appConfig.AutoloadGalleries == startupAppConfig.AutoloadGalleries
|
||||
envAutoloadBackendGalleries := appConfig.AutoloadBackendGalleries == startupAppConfig.AutoloadBackendGalleries
|
||||
envAgentJobRetentionDays := appConfig.AgentJobRetentionDays == startupAppConfig.AgentJobRetentionDays
|
||||
envForceEvictionWhenBusy := appConfig.ForceEvictionWhenBusy == startupAppConfig.ForceEvictionWhenBusy
|
||||
envLRUEvictionMaxRetries := appConfig.LRUEvictionMaxRetries == startupAppConfig.LRUEvictionMaxRetries
|
||||
envLRUEvictionRetryInterval := appConfig.LRUEvictionRetryInterval == startupAppConfig.LRUEvictionRetryInterval
|
||||
|
||||
if len(fileContent) > 0 {
|
||||
var settings config.RuntimeSettings
|
||||
@@ -277,6 +280,20 @@ func readRuntimeSettingsJson(startupAppConfig config.ApplicationConfig) fileHand
|
||||
if settings.MemoryReclaimerThreshold != nil && !envMemoryReclaimerThreshold {
|
||||
appConfig.MemoryReclaimerThreshold = *settings.MemoryReclaimerThreshold
|
||||
}
|
||||
if settings.ForceEvictionWhenBusy != nil && !envForceEvictionWhenBusy {
|
||||
appConfig.ForceEvictionWhenBusy = *settings.ForceEvictionWhenBusy
|
||||
}
|
||||
if settings.LRUEvictionMaxRetries != nil && !envLRUEvictionMaxRetries {
|
||||
appConfig.LRUEvictionMaxRetries = *settings.LRUEvictionMaxRetries
|
||||
}
|
||||
if settings.LRUEvictionRetryInterval != nil && !envLRUEvictionRetryInterval {
|
||||
dur, err := time.ParseDuration(*settings.LRUEvictionRetryInterval)
|
||||
if err == nil {
|
||||
appConfig.LRUEvictionRetryInterval = dur
|
||||
} else {
|
||||
xlog.Warn("invalid LRU eviction retry interval in runtime_settings.json", "error", err, "interval", *settings.LRUEvictionRetryInterval)
|
||||
}
|
||||
}
|
||||
if settings.Threads != nil && !envThreads {
|
||||
appConfig.Threads = *settings.Threads
|
||||
}
|
||||
|
||||
@@ -350,9 +350,16 @@ func initializeWatchdog(application *Application, options *config.ApplicationCon
|
||||
model.WithIdleCheck(options.WatchDogIdle),
|
||||
model.WithLRULimit(lruLimit),
|
||||
model.WithMemoryReclaimer(options.MemoryReclaimerEnabled, options.MemoryReclaimerThreshold),
|
||||
model.WithForceEvictionWhenBusy(options.ForceEvictionWhenBusy),
|
||||
)
|
||||
application.ModelLoader().SetWatchDog(wd)
|
||||
|
||||
// Initialize ModelLoader LRU eviction retry settings
|
||||
application.ModelLoader().SetLRUEvictionRetrySettings(
|
||||
options.LRUEvictionMaxRetries,
|
||||
options.LRUEvictionRetryInterval,
|
||||
)
|
||||
|
||||
// Start watchdog goroutine if any periodic checks are enabled
|
||||
// LRU eviction doesn't need the Run() loop - it's triggered on model load
|
||||
// But memory reclaimer needs the Run() loop for periodic checking
|
||||
|
||||
@@ -35,6 +35,7 @@ func (a *Application) startWatchdog() error {
|
||||
model.WithIdleCheck(appConfig.WatchDogIdle),
|
||||
model.WithLRULimit(lruLimit),
|
||||
model.WithMemoryReclaimer(appConfig.MemoryReclaimerEnabled, appConfig.MemoryReclaimerThreshold),
|
||||
model.WithForceEvictionWhenBusy(appConfig.ForceEvictionWhenBusy),
|
||||
)
|
||||
a.modelLoader.SetWatchDog(wd)
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
model "github.com/mudler/LocalAI/pkg/model"
|
||||
)
|
||||
|
||||
func ImageGeneration(height, width, mode, step, seed int, positive_prompt, negative_prompt, src, dst string, loader *model.ModelLoader, modelConfig config.ModelConfig, appConfig *config.ApplicationConfig, refImages []string) (func() error, error) {
|
||||
func ImageGeneration(height, width, step, seed int, positive_prompt, negative_prompt, src, dst string, loader *model.ModelLoader, modelConfig config.ModelConfig, appConfig *config.ApplicationConfig, refImages []string) (func() error, error) {
|
||||
|
||||
opts := ModelOptions(modelConfig, appConfig)
|
||||
inferenceModel, err := loader.Load(
|
||||
@@ -23,7 +23,6 @@ func ImageGeneration(height, width, mode, step, seed int, positive_prompt, negat
|
||||
&proto.GenerateImageRequest{
|
||||
Height: int32(height),
|
||||
Width: int32(width),
|
||||
Mode: int32(mode),
|
||||
Step: int32(step),
|
||||
Seed: int32(seed),
|
||||
CLIPSkip: int32(modelConfig.Diffusers.ClipSkip),
|
||||
|
||||
@@ -36,7 +36,7 @@ func ModelOptions(c config.ModelConfig, so *config.ApplicationConfig, opts ...mo
|
||||
|
||||
c.Threads = &threads
|
||||
|
||||
grpcOpts := grpcModelOpts(c)
|
||||
grpcOpts := grpcModelOpts(c, so.SystemState.Model.ModelsPath)
|
||||
defOpts = append(defOpts, model.WithLoadGRPCLoadModelOpts(grpcOpts))
|
||||
|
||||
if so.ParallelBackendRequests {
|
||||
@@ -72,7 +72,7 @@ func getSeed(c config.ModelConfig) int32 {
|
||||
return seed
|
||||
}
|
||||
|
||||
func grpcModelOpts(c config.ModelConfig) *pb.ModelOptions {
|
||||
func grpcModelOpts(c config.ModelConfig, modelPath string) *pb.ModelOptions {
|
||||
b := 512
|
||||
if c.Batch != 0 {
|
||||
b = c.Batch
|
||||
@@ -131,7 +131,7 @@ func grpcModelOpts(c config.ModelConfig) *pb.ModelOptions {
|
||||
})
|
||||
}
|
||||
|
||||
return &pb.ModelOptions{
|
||||
opts := &pb.ModelOptions{
|
||||
CUDA: c.CUDA || c.Diffusers.CUDA,
|
||||
SchedulerType: c.Diffusers.SchedulerType,
|
||||
GrammarTriggers: triggers,
|
||||
@@ -170,7 +170,6 @@ func grpcModelOpts(c config.ModelConfig) *pb.ModelOptions {
|
||||
LimitImagePerPrompt: int32(c.LimitMMPerPrompt.LimitImagePerPrompt),
|
||||
LimitVideoPerPrompt: int32(c.LimitMMPerPrompt.LimitVideoPerPrompt),
|
||||
LimitAudioPerPrompt: int32(c.LimitMMPerPrompt.LimitAudioPerPrompt),
|
||||
MMProj: c.MMProj,
|
||||
FlashAttention: flashAttention,
|
||||
CacheTypeKey: c.CacheTypeK,
|
||||
CacheTypeValue: c.CacheTypeV,
|
||||
@@ -198,6 +197,12 @@ func grpcModelOpts(c config.ModelConfig) *pb.ModelOptions {
|
||||
// RWKV
|
||||
Tokenizer: c.Tokenizer,
|
||||
}
|
||||
|
||||
if c.MMProj != "" {
|
||||
opts.MMProj = filepath.Join(modelPath, c.MMProj)
|
||||
}
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
func gRPCPredictOpts(c config.ModelConfig, modelPath string) *pb.PredictOptions {
|
||||
|
||||
@@ -80,7 +80,9 @@ func (mi *ModelsInstall) Run(ctx *cliContext.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
galleryService := services.NewGalleryService(&config.ApplicationConfig{}, model.NewModelLoader(systemState))
|
||||
galleryService := services.NewGalleryService(&config.ApplicationConfig{
|
||||
SystemState: systemState,
|
||||
}, model.NewModelLoader(systemState))
|
||||
err = galleryService.Start(context.Background(), config.NewModelConfigLoader(mi.ModelsPath), systemState)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -73,10 +73,15 @@ type RunCMD struct {
|
||||
WatchdogBusyTimeout string `env:"LOCALAI_WATCHDOG_BUSY_TIMEOUT,WATCHDOG_BUSY_TIMEOUT" default:"5m" help:"Threshold beyond which a busy backend should be stopped" group:"backends"`
|
||||
EnableMemoryReclaimer bool `env:"LOCALAI_MEMORY_RECLAIMER,MEMORY_RECLAIMER,LOCALAI_GPU_RECLAIMER,GPU_RECLAIMER" default:"false" help:"Enable memory threshold monitoring to auto-evict backends when memory usage exceeds threshold (uses GPU VRAM if available, otherwise RAM)" group:"backends"`
|
||||
MemoryReclaimerThreshold float64 `env:"LOCALAI_MEMORY_RECLAIMER_THRESHOLD,MEMORY_RECLAIMER_THRESHOLD,LOCALAI_GPU_RECLAIMER_THRESHOLD,GPU_RECLAIMER_THRESHOLD" default:"0.95" help:"Memory usage threshold (0.0-1.0) that triggers backend eviction (default 0.95 = 95%%)" group:"backends"`
|
||||
ForceEvictionWhenBusy bool `env:"LOCALAI_FORCE_EVICTION_WHEN_BUSY,FORCE_EVICTION_WHEN_BUSY" default:"false" help:"Force eviction even when models have active API calls (default: false for safety)" group:"backends"`
|
||||
LRUEvictionMaxRetries int `env:"LOCALAI_LRU_EVICTION_MAX_RETRIES,LRU_EVICTION_MAX_RETRIES" default:"30" help:"Maximum number of retries when waiting for busy models to become idle before eviction (default: 30)" group:"backends"`
|
||||
LRUEvictionRetryInterval string `env:"LOCALAI_LRU_EVICTION_RETRY_INTERVAL,LRU_EVICTION_RETRY_INTERVAL" default:"1s" help:"Interval between retries when waiting for busy models to become idle (e.g., 1s, 2s) (default: 1s)" group:"backends"`
|
||||
Federated bool `env:"LOCALAI_FEDERATED,FEDERATED" help:"Enable federated instance" group:"federated"`
|
||||
DisableGalleryEndpoint bool `env:"LOCALAI_DISABLE_GALLERY_ENDPOINT,DISABLE_GALLERY_ENDPOINT" help:"Disable the gallery endpoints" group:"api"`
|
||||
MachineTag string `env:"LOCALAI_MACHINE_TAG,MACHINE_TAG" help:"Add Machine-Tag header to each response which is useful to track the machine in the P2P network" group:"api"`
|
||||
LoadToMemory []string `env:"LOCALAI_LOAD_TO_MEMORY,LOAD_TO_MEMORY" help:"A list of models to load into memory at startup" group:"models"`
|
||||
EnableTracing bool `env:"LOCALAI_ENABLE_TRACING,ENABLE_TRACING" help:"Enable API tracing" group:"api"`
|
||||
TracingMaxItems int `env:"LOCALAI_TRACING_MAX_ITEMS" default:"1024" help:"Maximum number of traces to keep" group:"api"`
|
||||
AgentJobRetentionDays int `env:"LOCALAI_AGENT_JOB_RETENTION_DAYS,AGENT_JOB_RETENTION_DAYS" default:"30" help:"Number of days to keep agent job history (default: 30)" group:"api"`
|
||||
|
||||
Version bool
|
||||
@@ -149,6 +154,15 @@ func (r *RunCMD) Run(ctx *cliContext.Context) error {
|
||||
opts = append(opts, config.DisableRuntimeSettings)
|
||||
}
|
||||
|
||||
if r.EnableTracing {
|
||||
opts = append(opts, config.EnableTracing)
|
||||
}
|
||||
|
||||
if r.EnableTracing {
|
||||
opts = append(opts, config.EnableTracing)
|
||||
}
|
||||
opts = append(opts, config.WithTracingMaxItems(r.TracingMaxItems))
|
||||
|
||||
token := ""
|
||||
if r.Peer2Peer || r.Peer2PeerToken != "" {
|
||||
xlog.Info("P2P mode enabled")
|
||||
@@ -220,6 +234,21 @@ func (r *RunCMD) Run(ctx *cliContext.Context) error {
|
||||
opts = append(opts, config.EnableSingleBackend)
|
||||
}
|
||||
|
||||
// Handle LRU eviction settings
|
||||
if r.ForceEvictionWhenBusy {
|
||||
opts = append(opts, config.WithForceEvictionWhenBusy(true))
|
||||
}
|
||||
if r.LRUEvictionMaxRetries > 0 {
|
||||
opts = append(opts, config.WithLRUEvictionMaxRetries(r.LRUEvictionMaxRetries))
|
||||
}
|
||||
if r.LRUEvictionRetryInterval != "" {
|
||||
dur, err := time.ParseDuration(r.LRUEvictionRetryInterval)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid LRU eviction retry interval: %w", err)
|
||||
}
|
||||
opts = append(opts, config.WithLRUEvictionRetryInterval(dur))
|
||||
}
|
||||
|
||||
// split ":" to get backend name and the uri
|
||||
for _, v := range r.ExternalGRPCBackends {
|
||||
backend := v[:strings.IndexByte(v, ':')]
|
||||
|
||||
@@ -19,6 +19,8 @@ type ApplicationConfig struct {
|
||||
UploadLimitMB, Threads, ContextSize int
|
||||
F16 bool
|
||||
Debug bool
|
||||
EnableTracing bool
|
||||
TracingMaxItems int
|
||||
GeneratedContentDir string
|
||||
|
||||
UploadDir string
|
||||
@@ -64,6 +66,11 @@ type ApplicationConfig struct {
|
||||
MemoryReclaimerEnabled bool // Enable memory threshold monitoring
|
||||
MemoryReclaimerThreshold float64 // Threshold 0.0-1.0 (e.g., 0.95 = 95%)
|
||||
|
||||
// Eviction settings
|
||||
ForceEvictionWhenBusy bool // Force eviction even when models have active API calls (default: false for safety)
|
||||
LRUEvictionMaxRetries int // Maximum number of retries when waiting for busy models to become idle (default: 30)
|
||||
LRUEvictionRetryInterval time.Duration // Interval between retries when waiting for busy models (default: 1s)
|
||||
|
||||
ModelsURL []string
|
||||
|
||||
WatchDogBusyTimeout, WatchDogIdleTimeout time.Duration
|
||||
@@ -86,10 +93,13 @@ type AppOption func(*ApplicationConfig)
|
||||
|
||||
func NewApplicationConfig(o ...AppOption) *ApplicationConfig {
|
||||
opt := &ApplicationConfig{
|
||||
Context: context.Background(),
|
||||
UploadLimitMB: 15,
|
||||
Debug: true,
|
||||
AgentJobRetentionDays: 30, // Default: 30 days
|
||||
Context: context.Background(),
|
||||
UploadLimitMB: 15,
|
||||
Debug: true,
|
||||
AgentJobRetentionDays: 30, // Default: 30 days
|
||||
LRUEvictionMaxRetries: 30, // Default: 30 retries
|
||||
LRUEvictionRetryInterval: 1 * time.Second, // Default: 1 second
|
||||
TracingMaxItems: 1024,
|
||||
PathWithoutAuth: []string{
|
||||
"/static/",
|
||||
"/generated-audio/",
|
||||
@@ -158,6 +168,10 @@ var EnableWatchDog = func(o *ApplicationConfig) {
|
||||
o.WatchDog = true
|
||||
}
|
||||
|
||||
var EnableTracing = func(o *ApplicationConfig) {
|
||||
o.EnableTracing = true
|
||||
}
|
||||
|
||||
var EnableWatchDogIdleCheck = func(o *ApplicationConfig) {
|
||||
o.WatchDog = true
|
||||
o.WatchDogIdle = true
|
||||
@@ -259,6 +273,31 @@ func (o *ApplicationConfig) GetEffectiveMaxActiveBackends() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
// WithForceEvictionWhenBusy sets whether to force eviction even when models have active API calls
|
||||
func WithForceEvictionWhenBusy(enabled bool) AppOption {
|
||||
return func(o *ApplicationConfig) {
|
||||
o.ForceEvictionWhenBusy = enabled
|
||||
}
|
||||
}
|
||||
|
||||
// WithLRUEvictionMaxRetries sets the maximum number of retries when waiting for busy models to become idle
|
||||
func WithLRUEvictionMaxRetries(maxRetries int) AppOption {
|
||||
return func(o *ApplicationConfig) {
|
||||
if maxRetries > 0 {
|
||||
o.LRUEvictionMaxRetries = maxRetries
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithLRUEvictionRetryInterval sets the interval between retries when waiting for busy models
|
||||
func WithLRUEvictionRetryInterval(interval time.Duration) AppOption {
|
||||
return func(o *ApplicationConfig) {
|
||||
if interval > 0 {
|
||||
o.LRUEvictionRetryInterval = interval
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var EnableParallelBackendRequests = func(o *ApplicationConfig) {
|
||||
o.ParallelBackendRequests = true
|
||||
}
|
||||
@@ -386,6 +425,12 @@ func WithDebug(debug bool) AppOption {
|
||||
}
|
||||
}
|
||||
|
||||
func WithTracingMaxItems(items int) AppOption {
|
||||
return func(o *ApplicationConfig) {
|
||||
o.TracingMaxItems = items
|
||||
}
|
||||
}
|
||||
|
||||
func WithGeneratedContentDir(generatedContentDir string) AppOption {
|
||||
return func(o *ApplicationConfig) {
|
||||
o.GeneratedContentDir = generatedContentDir
|
||||
@@ -505,10 +550,14 @@ func (o *ApplicationConfig) ToRuntimeSettings() RuntimeSettings {
|
||||
parallelBackendRequests := o.ParallelBackendRequests
|
||||
memoryReclaimerEnabled := o.MemoryReclaimerEnabled
|
||||
memoryReclaimerThreshold := o.MemoryReclaimerThreshold
|
||||
forceEvictionWhenBusy := o.ForceEvictionWhenBusy
|
||||
lruEvictionMaxRetries := o.LRUEvictionMaxRetries
|
||||
threads := o.Threads
|
||||
contextSize := o.ContextSize
|
||||
f16 := o.F16
|
||||
debug := o.Debug
|
||||
tracingMaxItems := o.TracingMaxItems
|
||||
enableTracing := o.EnableTracing
|
||||
cors := o.CORS
|
||||
csrf := o.CSRF
|
||||
corsAllowOrigins := o.CORSAllowOrigins
|
||||
@@ -539,6 +588,12 @@ func (o *ApplicationConfig) ToRuntimeSettings() RuntimeSettings {
|
||||
} else {
|
||||
watchdogInterval = "2s" // default
|
||||
}
|
||||
var lruEvictionRetryInterval string
|
||||
if o.LRUEvictionRetryInterval > 0 {
|
||||
lruEvictionRetryInterval = o.LRUEvictionRetryInterval.String()
|
||||
} else {
|
||||
lruEvictionRetryInterval = "1s" // default
|
||||
}
|
||||
|
||||
return RuntimeSettings{
|
||||
WatchdogEnabled: &watchdogEnabled,
|
||||
@@ -552,10 +607,15 @@ func (o *ApplicationConfig) ToRuntimeSettings() RuntimeSettings {
|
||||
ParallelBackendRequests: ¶llelBackendRequests,
|
||||
MemoryReclaimerEnabled: &memoryReclaimerEnabled,
|
||||
MemoryReclaimerThreshold: &memoryReclaimerThreshold,
|
||||
ForceEvictionWhenBusy: &forceEvictionWhenBusy,
|
||||
LRUEvictionMaxRetries: &lruEvictionMaxRetries,
|
||||
LRUEvictionRetryInterval: &lruEvictionRetryInterval,
|
||||
Threads: &threads,
|
||||
ContextSize: &contextSize,
|
||||
F16: &f16,
|
||||
Debug: &debug,
|
||||
TracingMaxItems: &tracingMaxItems,
|
||||
EnableTracing: &enableTracing,
|
||||
CORS: &cors,
|
||||
CSRF: &csrf,
|
||||
CORSAllowOrigins: &corsAllowOrigins,
|
||||
@@ -644,6 +704,20 @@ func (o *ApplicationConfig) ApplyRuntimeSettings(settings *RuntimeSettings) (req
|
||||
requireRestart = true
|
||||
}
|
||||
}
|
||||
if settings.ForceEvictionWhenBusy != nil {
|
||||
o.ForceEvictionWhenBusy = *settings.ForceEvictionWhenBusy
|
||||
// This setting doesn't require restart, can be updated dynamically
|
||||
}
|
||||
if settings.LRUEvictionMaxRetries != nil {
|
||||
o.LRUEvictionMaxRetries = *settings.LRUEvictionMaxRetries
|
||||
// This setting doesn't require restart, can be updated dynamically
|
||||
}
|
||||
if settings.LRUEvictionRetryInterval != nil {
|
||||
if dur, err := time.ParseDuration(*settings.LRUEvictionRetryInterval); err == nil {
|
||||
o.LRUEvictionRetryInterval = dur
|
||||
// This setting doesn't require restart, can be updated dynamically
|
||||
}
|
||||
}
|
||||
if settings.Threads != nil {
|
||||
o.Threads = *settings.Threads
|
||||
}
|
||||
@@ -656,6 +730,12 @@ func (o *ApplicationConfig) ApplyRuntimeSettings(settings *RuntimeSettings) (req
|
||||
if settings.Debug != nil {
|
||||
o.Debug = *settings.Debug
|
||||
}
|
||||
if settings.EnableTracing != nil {
|
||||
o.EnableTracing = *settings.EnableTracing
|
||||
}
|
||||
if settings.TracingMaxItems != nil {
|
||||
o.TracingMaxItems = *settings.TracingMaxItems
|
||||
}
|
||||
if settings.CORS != nil {
|
||||
o.CORS = *settings.CORS
|
||||
}
|
||||
|
||||
@@ -501,7 +501,13 @@ func (c *ModelConfig) Validate() (bool, error) {
|
||||
if !re.MatchString(c.Backend) {
|
||||
return false, fmt.Errorf("invalid backend name: %s", c.Backend)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Validate MCP configuration if present
|
||||
if c.MCP.Servers != "" || c.MCP.Stdio != "" {
|
||||
if _, _, err := c.MCP.MCPConfigFromYAML(); err != nil {
|
||||
return false, fmt.Errorf("invalid MCP configuration: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
|
||||
@@ -169,8 +169,10 @@ func (bcl *ModelConfigLoader) LoadMultipleModelConfigsSingleFile(file string, op
|
||||
}
|
||||
|
||||
for _, cc := range c {
|
||||
if valid, _ := cc.Validate(); valid {
|
||||
if valid, err := cc.Validate(); valid {
|
||||
bcl.configs[cc.Name] = *cc
|
||||
} else {
|
||||
xlog.Warn("skipping invalid model config", "name", cc.Name, "error", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -184,9 +186,12 @@ func (bcl *ModelConfigLoader) ReadModelConfig(file string, opts ...ConfigLoaderO
|
||||
return fmt.Errorf("ReadModelConfig cannot read config file %q: %w", file, err)
|
||||
}
|
||||
|
||||
if valid, _ := c.Validate(); valid {
|
||||
if valid, err := c.Validate(); valid {
|
||||
bcl.configs[c.Name] = *c
|
||||
} else {
|
||||
if err != nil {
|
||||
return fmt.Errorf("config is not valid: %w", err)
|
||||
}
|
||||
return fmt.Errorf("config is not valid")
|
||||
}
|
||||
|
||||
@@ -364,10 +369,10 @@ func (bcl *ModelConfigLoader) LoadModelConfigsFromPath(path string, opts ...Conf
|
||||
xlog.Error("LoadModelConfigsFromPath cannot read config file", "error", err, "File Name", file.Name())
|
||||
continue
|
||||
}
|
||||
if valid, _ := c.Validate(); valid {
|
||||
if valid, validationErr := c.Validate(); valid {
|
||||
bcl.configs[c.Name] = *c
|
||||
} else {
|
||||
xlog.Error("config is not valid", "error", err, "Name", c.Name)
|
||||
xlog.Error("config is not valid", "error", validationErr, "Name", c.Name)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -166,4 +166,63 @@ parameters:
|
||||
Expect(i.HasUsecases(FLAG_COMPLETION)).To(BeTrue())
|
||||
Expect(i.HasUsecases(FLAG_CHAT)).To(BeTrue())
|
||||
})
|
||||
It("Test Validate with invalid MCP config", func() {
|
||||
tmp, err := os.CreateTemp("", "config.yaml")
|
||||
Expect(err).To(BeNil())
|
||||
defer os.Remove(tmp.Name())
|
||||
_, err = tmp.WriteString(
|
||||
`name: test-mcp
|
||||
backend: "llama-cpp"
|
||||
mcp:
|
||||
stdio: |
|
||||
{
|
||||
"mcpServers": {
|
||||
"ddg": {
|
||||
"command": "/docker/docker",
|
||||
"args": ["run", "-i"]
|
||||
}
|
||||
"weather": {
|
||||
"command": "/docker/docker",
|
||||
"args": ["run", "-i"]
|
||||
}
|
||||
}
|
||||
}`)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
config, err := readModelConfigFromFile(tmp.Name())
|
||||
Expect(err).To(BeNil())
|
||||
Expect(config).ToNot(BeNil())
|
||||
valid, err := config.Validate()
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(valid).To(BeFalse())
|
||||
Expect(err.Error()).To(ContainSubstring("invalid MCP configuration"))
|
||||
})
|
||||
It("Test Validate with valid MCP config", func() {
|
||||
tmp, err := os.CreateTemp("", "config.yaml")
|
||||
Expect(err).To(BeNil())
|
||||
defer os.Remove(tmp.Name())
|
||||
_, err = tmp.WriteString(
|
||||
`name: test-mcp-valid
|
||||
backend: "llama-cpp"
|
||||
mcp:
|
||||
stdio: |
|
||||
{
|
||||
"mcpServers": {
|
||||
"ddg": {
|
||||
"command": "/docker/docker",
|
||||
"args": ["run", "-i"]
|
||||
},
|
||||
"weather": {
|
||||
"command": "/docker/docker",
|
||||
"args": ["run", "-i"]
|
||||
}
|
||||
}
|
||||
}`)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
config, err := readModelConfigFromFile(tmp.Name())
|
||||
Expect(err).To(BeNil())
|
||||
Expect(config).ToNot(BeNil())
|
||||
valid, err := config.Validate()
|
||||
Expect(err).To(BeNil())
|
||||
Expect(valid).To(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -26,11 +26,18 @@ type RuntimeSettings struct {
|
||||
MemoryReclaimerEnabled *bool `json:"memory_reclaimer_enabled,omitempty"` // Enable memory threshold monitoring
|
||||
MemoryReclaimerThreshold *float64 `json:"memory_reclaimer_threshold,omitempty"` // Threshold 0.0-1.0 (e.g., 0.95 = 95%)
|
||||
|
||||
// Eviction settings
|
||||
ForceEvictionWhenBusy *bool `json:"force_eviction_when_busy,omitempty"` // Force eviction even when models have active API calls (default: false for safety)
|
||||
LRUEvictionMaxRetries *int `json:"lru_eviction_max_retries,omitempty"` // Maximum number of retries when waiting for busy models to become idle (default: 30)
|
||||
LRUEvictionRetryInterval *string `json:"lru_eviction_retry_interval,omitempty"` // Interval between retries when waiting for busy models (e.g., 1s, 2s) (default: 1s)
|
||||
|
||||
// Performance settings
|
||||
Threads *int `json:"threads,omitempty"`
|
||||
ContextSize *int `json:"context_size,omitempty"`
|
||||
F16 *bool `json:"f16,omitempty"`
|
||||
Debug *bool `json:"debug,omitempty"`
|
||||
Threads *int `json:"threads,omitempty"`
|
||||
ContextSize *int `json:"context_size,omitempty"`
|
||||
F16 *bool `json:"f16,omitempty"`
|
||||
Debug *bool `json:"debug,omitempty"`
|
||||
EnableTracing *bool `json:"enable_tracing,omitempty"`
|
||||
TracingMaxItems *int `json:"tracing_max_items,omitempty"`
|
||||
|
||||
// Security/CORS settings
|
||||
CORS *bool `json:"cors,omitempty"`
|
||||
|
||||
@@ -63,6 +63,25 @@ func (m *GalleryBackend) IsMeta() bool {
|
||||
return len(m.CapabilitiesMap) > 0 && m.URI == ""
|
||||
}
|
||||
|
||||
// IsCompatibleWith checks if the backend is compatible with the current system capability.
|
||||
// For meta backends, it checks if any of the capabilities in the map match the system capability.
|
||||
// For concrete backends, it delegates to SystemState.IsBackendCompatible.
|
||||
func (m *GalleryBackend) IsCompatibleWith(systemState *system.SystemState) bool {
|
||||
if systemState == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// Meta backends are compatible if the system capability matches one of the keys
|
||||
if m.IsMeta() {
|
||||
capability := systemState.Capability(m.CapabilitiesMap)
|
||||
_, exists := m.CapabilitiesMap[capability]
|
||||
return exists
|
||||
}
|
||||
|
||||
// For concrete backends, delegate to the system package
|
||||
return systemState.IsBackendCompatible(m.Name, m.URI)
|
||||
}
|
||||
|
||||
func (m *GalleryBackend) SetInstalled(installed bool) {
|
||||
m.Installed = installed
|
||||
}
|
||||
|
||||
@@ -172,6 +172,252 @@ var _ = Describe("Gallery Backends", func() {
|
||||
Expect(nilMetaBackend.IsMeta()).To(BeFalse())
|
||||
})
|
||||
|
||||
It("should check IsCompatibleWith correctly for meta backends", func() {
|
||||
metaBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "meta-backend",
|
||||
},
|
||||
CapabilitiesMap: map[string]string{
|
||||
"nvidia": "nvidia-backend",
|
||||
"amd": "amd-backend",
|
||||
"default": "default-backend",
|
||||
},
|
||||
}
|
||||
|
||||
// Test with nil state - should be compatible
|
||||
Expect(metaBackend.IsCompatibleWith(nil)).To(BeTrue())
|
||||
|
||||
// Test with NVIDIA system - should be compatible (has nvidia key)
|
||||
nvidiaState := &system.SystemState{GPUVendor: "nvidia", VRAM: 8 * 1024 * 1024 * 1024}
|
||||
Expect(metaBackend.IsCompatibleWith(nvidiaState)).To(BeTrue())
|
||||
|
||||
// Test with default (no GPU) - should be compatible (has default key)
|
||||
defaultState := &system.SystemState{}
|
||||
Expect(metaBackend.IsCompatibleWith(defaultState)).To(BeTrue())
|
||||
})
|
||||
|
||||
Describe("IsCompatibleWith for concrete backends", func() {
|
||||
Context("CPU backends", func() {
|
||||
It("should be compatible on all systems", func() {
|
||||
cpuBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "cpu-llama-cpp",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-cpu-llama-cpp",
|
||||
}
|
||||
Expect(cpuBackend.IsCompatibleWith(&system.SystemState{})).To(BeTrue())
|
||||
Expect(cpuBackend.IsCompatibleWith(&system.SystemState{GPUVendor: system.Nvidia, VRAM: 8 * 1024 * 1024 * 1024})).To(BeTrue())
|
||||
Expect(cpuBackend.IsCompatibleWith(&system.SystemState{GPUVendor: system.AMD, VRAM: 8 * 1024 * 1024 * 1024})).To(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
Context("Darwin/Metal backends", func() {
|
||||
When("running on darwin", func() {
|
||||
BeforeEach(func() {
|
||||
if runtime.GOOS != "darwin" {
|
||||
Skip("Skipping darwin-specific tests on non-darwin system")
|
||||
}
|
||||
})
|
||||
|
||||
It("should be compatible for MLX backend", func() {
|
||||
mlxBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "mlx",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-mlx",
|
||||
}
|
||||
Expect(mlxBackend.IsCompatibleWith(&system.SystemState{})).To(BeTrue())
|
||||
})
|
||||
|
||||
It("should be compatible for metal-llama-cpp backend", func() {
|
||||
metalBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "metal-llama-cpp",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-llama-cpp",
|
||||
}
|
||||
Expect(metalBackend.IsCompatibleWith(&system.SystemState{})).To(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
When("running on non-darwin", func() {
|
||||
BeforeEach(func() {
|
||||
if runtime.GOOS == "darwin" {
|
||||
Skip("Skipping non-darwin-specific tests on darwin system")
|
||||
}
|
||||
})
|
||||
|
||||
It("should NOT be compatible for MLX backend", func() {
|
||||
mlxBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "mlx",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-mlx",
|
||||
}
|
||||
Expect(mlxBackend.IsCompatibleWith(&system.SystemState{})).To(BeFalse())
|
||||
})
|
||||
|
||||
It("should NOT be compatible for metal-llama-cpp backend", func() {
|
||||
metalBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "metal-llama-cpp",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-llama-cpp",
|
||||
}
|
||||
Expect(metalBackend.IsCompatibleWith(&system.SystemState{})).To(BeFalse())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Context("NVIDIA/CUDA backends", func() {
|
||||
When("running on non-darwin", func() {
|
||||
BeforeEach(func() {
|
||||
if runtime.GOOS == "darwin" {
|
||||
Skip("Skipping CUDA tests on darwin system")
|
||||
}
|
||||
})
|
||||
|
||||
It("should NOT be compatible without nvidia GPU", func() {
|
||||
cudaBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "cuda12-llama-cpp",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-llama-cpp",
|
||||
}
|
||||
Expect(cudaBackend.IsCompatibleWith(&system.SystemState{})).To(BeFalse())
|
||||
Expect(cudaBackend.IsCompatibleWith(&system.SystemState{GPUVendor: system.AMD, VRAM: 8 * 1024 * 1024 * 1024})).To(BeFalse())
|
||||
})
|
||||
|
||||
It("should be compatible with nvidia GPU", func() {
|
||||
cudaBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "cuda12-llama-cpp",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-llama-cpp",
|
||||
}
|
||||
Expect(cudaBackend.IsCompatibleWith(&system.SystemState{GPUVendor: system.Nvidia, VRAM: 8 * 1024 * 1024 * 1024})).To(BeTrue())
|
||||
})
|
||||
|
||||
It("should be compatible with cuda13 backend on nvidia GPU", func() {
|
||||
cuda13Backend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "cuda13-llama-cpp",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-llama-cpp",
|
||||
}
|
||||
Expect(cuda13Backend.IsCompatibleWith(&system.SystemState{GPUVendor: system.Nvidia, VRAM: 8 * 1024 * 1024 * 1024})).To(BeTrue())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Context("AMD/ROCm backends", func() {
|
||||
When("running on non-darwin", func() {
|
||||
BeforeEach(func() {
|
||||
if runtime.GOOS == "darwin" {
|
||||
Skip("Skipping AMD/ROCm tests on darwin system")
|
||||
}
|
||||
})
|
||||
|
||||
It("should NOT be compatible without AMD GPU", func() {
|
||||
rocmBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "rocm-llama-cpp",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-llama-cpp",
|
||||
}
|
||||
Expect(rocmBackend.IsCompatibleWith(&system.SystemState{})).To(BeFalse())
|
||||
Expect(rocmBackend.IsCompatibleWith(&system.SystemState{GPUVendor: system.Nvidia, VRAM: 8 * 1024 * 1024 * 1024})).To(BeFalse())
|
||||
})
|
||||
|
||||
It("should be compatible with AMD GPU", func() {
|
||||
rocmBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "rocm-llama-cpp",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-llama-cpp",
|
||||
}
|
||||
Expect(rocmBackend.IsCompatibleWith(&system.SystemState{GPUVendor: system.AMD, VRAM: 8 * 1024 * 1024 * 1024})).To(BeTrue())
|
||||
})
|
||||
|
||||
It("should be compatible with hipblas backend on AMD GPU", func() {
|
||||
hipBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "hip-llama-cpp",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-gpu-hip-llama-cpp",
|
||||
}
|
||||
Expect(hipBackend.IsCompatibleWith(&system.SystemState{GPUVendor: system.AMD, VRAM: 8 * 1024 * 1024 * 1024})).To(BeTrue())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Context("Intel/SYCL backends", func() {
|
||||
When("running on non-darwin", func() {
|
||||
BeforeEach(func() {
|
||||
if runtime.GOOS == "darwin" {
|
||||
Skip("Skipping Intel/SYCL tests on darwin system")
|
||||
}
|
||||
})
|
||||
|
||||
It("should NOT be compatible without Intel GPU", func() {
|
||||
intelBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "intel-sycl-f16-llama-cpp",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f16-llama-cpp",
|
||||
}
|
||||
Expect(intelBackend.IsCompatibleWith(&system.SystemState{})).To(BeFalse())
|
||||
Expect(intelBackend.IsCompatibleWith(&system.SystemState{GPUVendor: system.Nvidia, VRAM: 8 * 1024 * 1024 * 1024})).To(BeFalse())
|
||||
})
|
||||
|
||||
It("should be compatible with Intel GPU", func() {
|
||||
intelBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "intel-sycl-f16-llama-cpp",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f16-llama-cpp",
|
||||
}
|
||||
Expect(intelBackend.IsCompatibleWith(&system.SystemState{GPUVendor: system.Intel, VRAM: 8 * 1024 * 1024 * 1024})).To(BeTrue())
|
||||
})
|
||||
|
||||
It("should be compatible with intel-sycl-f32 backend on Intel GPU", func() {
|
||||
intelF32Backend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "intel-sycl-f32-llama-cpp",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f32-llama-cpp",
|
||||
}
|
||||
Expect(intelF32Backend.IsCompatibleWith(&system.SystemState{GPUVendor: system.Intel, VRAM: 8 * 1024 * 1024 * 1024})).To(BeTrue())
|
||||
})
|
||||
|
||||
It("should be compatible with intel-transformers backend on Intel GPU", func() {
|
||||
intelTransformersBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "intel-transformers",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-intel-transformers",
|
||||
}
|
||||
Expect(intelTransformersBackend.IsCompatibleWith(&system.SystemState{GPUVendor: system.Intel, VRAM: 8 * 1024 * 1024 * 1024})).To(BeTrue())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Context("Vulkan backends", func() {
|
||||
It("should be compatible on CPU-only systems", func() {
|
||||
// Vulkan backends don't have a specific GPU vendor requirement in the current logic
|
||||
// They are compatible if no other GPU-specific pattern matches
|
||||
vulkanBackend := &GalleryBackend{
|
||||
Metadata: Metadata{
|
||||
Name: "vulkan-llama-cpp",
|
||||
},
|
||||
URI: "quay.io/go-skynet/local-ai-backends:latest-gpu-vulkan-llama-cpp",
|
||||
}
|
||||
// Vulkan doesn't have vendor-specific filtering in current implementation
|
||||
Expect(vulkanBackend.IsCompatibleWith(&system.SystemState{})).To(BeTrue())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
It("should find best backend from meta based on system capabilities", func() {
|
||||
|
||||
metaBackend := &GalleryBackend{
|
||||
|
||||
@@ -226,6 +226,16 @@ func AvailableGalleryModels(galleries []config.Gallery, systemState *system.Syst
|
||||
|
||||
// List available backends
|
||||
func AvailableBackends(galleries []config.Gallery, systemState *system.SystemState) (GalleryElements[*GalleryBackend], error) {
|
||||
return availableBackendsWithFilter(galleries, systemState, true)
|
||||
}
|
||||
|
||||
// AvailableBackendsUnfiltered returns all available backends without filtering by system capability.
|
||||
func AvailableBackendsUnfiltered(galleries []config.Gallery, systemState *system.SystemState) (GalleryElements[*GalleryBackend], error) {
|
||||
return availableBackendsWithFilter(galleries, systemState, false)
|
||||
}
|
||||
|
||||
// availableBackendsWithFilter is a helper function that lists available backends with optional filtering.
|
||||
func availableBackendsWithFilter(galleries []config.Gallery, systemState *system.SystemState, filterByCapability bool) (GalleryElements[*GalleryBackend], error) {
|
||||
var backends []*GalleryBackend
|
||||
|
||||
systemBackends, err := ListSystemBackends(systemState)
|
||||
@@ -241,7 +251,17 @@ func AvailableBackends(galleries []config.Gallery, systemState *system.SystemSta
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
backends = append(backends, galleryBackends...)
|
||||
|
||||
// Filter backends by system capability if requested
|
||||
if filterByCapability {
|
||||
for _, backend := range galleryBackends {
|
||||
if backend.IsCompatibleWith(systemState) {
|
||||
backends = append(backends, backend)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
backends = append(backends, galleryBackends...)
|
||||
}
|
||||
}
|
||||
|
||||
return backends, nil
|
||||
|
||||
@@ -123,8 +123,10 @@ func InstallModelFromGallery(
|
||||
config.Files = append(config.Files, model.AdditionalFiles...)
|
||||
|
||||
// TODO model.Overrides could be merged with user overrides (not defined yet)
|
||||
if err := mergo.Merge(&model.Overrides, req.Overrides, mergo.WithOverride); err != nil {
|
||||
return err
|
||||
if req.Overrides != nil {
|
||||
if err := mergo.Merge(&model.Overrides, req.Overrides, mergo.WithOverride); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
installedModel, err := InstallModel(ctx, systemState, installName, &config, model.Overrides, downloadStatus, enforceScan)
|
||||
@@ -245,8 +247,10 @@ func InstallModel(ctx context.Context, systemState *system.SystemState, nameOver
|
||||
|
||||
configMap["name"] = name
|
||||
|
||||
if err := mergo.Merge(&configMap, configOverrides, mergo.WithOverride); err != nil {
|
||||
return nil, err
|
||||
if configOverrides != nil {
|
||||
if err := mergo.Merge(&configMap, configOverrides, mergo.WithOverride); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Write updated config file
|
||||
|
||||
@@ -184,6 +184,26 @@ var _ = Describe("Model test", func() {
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
It("handles nil configOverrides without panic", func() {
|
||||
tempdir, err := os.MkdirTemp("", "test")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tempdir)
|
||||
c, err := ReadConfigFile[ModelConfig](filepath.Join(os.Getenv("FIXTURES"), "gallery_simple.yaml"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
systemState, err := system.GetSystemState(
|
||||
system.WithModelPath(tempdir),
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
_, err = InstallModel(context.TODO(), systemState, "test-model", c, nil, func(string, string, string, float64) {}, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
for _, f := range []string{"cerebras", "cerebras-completion.tmpl", "cerebras-chat.tmpl", "test-model.yaml"} {
|
||||
_, err = os.Stat(filepath.Join(tempdir, f))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
})
|
||||
|
||||
It("does not delete shared model files when one config is deleted", func() {
|
||||
tempdir, err := os.MkdirTemp("", "test")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -205,6 +205,7 @@ func API(application *application.Application) (*echo.Echo, error) {
|
||||
|
||||
routes.RegisterLocalAIRoutes(e, requestExtractor, application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig(), application.GalleryService(), opcache, application.TemplatesEvaluator(), application)
|
||||
routes.RegisterOpenAIRoutes(e, requestExtractor, application)
|
||||
routes.RegisterAnthropicRoutes(e, requestExtractor, application)
|
||||
if !application.ApplicationConfig().DisableWebUI {
|
||||
routes.RegisterUIAPIRoutes(e, application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig(), application.GalleryService(), opcache, application)
|
||||
routes.RegisterUIRoutes(e, application.ModelConfigLoader(), application.ModelLoader(), application.ApplicationConfig(), application.GalleryService())
|
||||
|
||||
537
core/http/endpoints/anthropic/messages.go
Normal file
537
core/http/endpoints/anthropic/messages.go
Normal file
@@ -0,0 +1,537 @@
|
||||
package anthropic
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/mudler/LocalAI/core/backend"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/http/middleware"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
"github.com/mudler/LocalAI/core/templates"
|
||||
"github.com/mudler/LocalAI/pkg/functions"
|
||||
"github.com/mudler/LocalAI/pkg/model"
|
||||
"github.com/mudler/xlog"
|
||||
)
|
||||
|
||||
// MessagesEndpoint is the Anthropic Messages API endpoint
|
||||
// https://docs.anthropic.com/claude/reference/messages_post
|
||||
// @Summary Generate a message response for the given messages and model.
|
||||
// @Param request body schema.AnthropicRequest true "query params"
|
||||
// @Success 200 {object} schema.AnthropicResponse "Response"
|
||||
// @Router /v1/messages [post]
|
||||
func MessagesEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator *templates.Evaluator, appConfig *config.ApplicationConfig) echo.HandlerFunc {
|
||||
return func(c echo.Context) error {
|
||||
id := uuid.New().String()
|
||||
|
||||
input, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_LOCALAI_REQUEST).(*schema.AnthropicRequest)
|
||||
if !ok || input.Model == "" {
|
||||
return sendAnthropicError(c, 400, "invalid_request_error", "model is required")
|
||||
}
|
||||
|
||||
cfg, ok := c.Get(middleware.CONTEXT_LOCALS_KEY_MODEL_CONFIG).(*config.ModelConfig)
|
||||
if !ok || cfg == nil {
|
||||
return sendAnthropicError(c, 400, "invalid_request_error", "model configuration not found")
|
||||
}
|
||||
|
||||
if input.MaxTokens <= 0 {
|
||||
return sendAnthropicError(c, 400, "invalid_request_error", "max_tokens is required and must be greater than 0")
|
||||
}
|
||||
|
||||
xlog.Debug("Anthropic Messages endpoint configuration read", "config", cfg)
|
||||
|
||||
// Convert Anthropic messages to OpenAI format for internal processing
|
||||
openAIMessages := convertAnthropicToOpenAIMessages(input)
|
||||
|
||||
// Convert Anthropic tools to internal Functions format
|
||||
funcs, shouldUseFn := convertAnthropicTools(input, cfg)
|
||||
|
||||
// Create an OpenAI-compatible request for internal processing
|
||||
openAIReq := &schema.OpenAIRequest{
|
||||
PredictionOptions: schema.PredictionOptions{
|
||||
BasicModelRequest: schema.BasicModelRequest{Model: input.Model},
|
||||
Temperature: input.Temperature,
|
||||
TopK: input.TopK,
|
||||
TopP: input.TopP,
|
||||
Maxtokens: &input.MaxTokens,
|
||||
},
|
||||
Messages: openAIMessages,
|
||||
Stream: input.Stream,
|
||||
Context: input.Context,
|
||||
Cancel: input.Cancel,
|
||||
}
|
||||
|
||||
// Set stop sequences
|
||||
if len(input.StopSequences) > 0 {
|
||||
openAIReq.Stop = input.StopSequences
|
||||
}
|
||||
|
||||
// Merge config settings
|
||||
if input.Temperature != nil {
|
||||
cfg.Temperature = input.Temperature
|
||||
}
|
||||
if input.TopK != nil {
|
||||
cfg.TopK = input.TopK
|
||||
}
|
||||
if input.TopP != nil {
|
||||
cfg.TopP = input.TopP
|
||||
}
|
||||
cfg.Maxtokens = &input.MaxTokens
|
||||
if len(input.StopSequences) > 0 {
|
||||
cfg.StopWords = append(cfg.StopWords, input.StopSequences...)
|
||||
}
|
||||
|
||||
// Template the prompt with tools if available
|
||||
predInput := evaluator.TemplateMessages(*openAIReq, openAIReq.Messages, cfg, funcs, shouldUseFn)
|
||||
xlog.Debug("Anthropic Messages - Prompt (after templating)", "prompt", predInput)
|
||||
|
||||
if input.Stream {
|
||||
return handleAnthropicStream(c, id, input, cfg, ml, predInput, openAIReq, funcs, shouldUseFn)
|
||||
}
|
||||
|
||||
return handleAnthropicNonStream(c, id, input, cfg, ml, predInput, openAIReq, funcs, shouldUseFn)
|
||||
}
|
||||
}
|
||||
|
||||
func handleAnthropicNonStream(c echo.Context, id string, input *schema.AnthropicRequest, cfg *config.ModelConfig, ml *model.ModelLoader, predInput string, openAIReq *schema.OpenAIRequest, funcs functions.Functions, shouldUseFn bool) error {
|
||||
images := []string{}
|
||||
for _, m := range openAIReq.Messages {
|
||||
images = append(images, m.StringImages...)
|
||||
}
|
||||
|
||||
predFunc, err := backend.ModelInference(
|
||||
input.Context, predInput, openAIReq.Messages, images, nil, nil, ml, cfg, nil, nil, nil, "", "", nil, nil, nil)
|
||||
if err != nil {
|
||||
xlog.Error("Anthropic model inference failed", "error", err)
|
||||
return sendAnthropicError(c, 500, "api_error", fmt.Sprintf("model inference failed: %v", err))
|
||||
}
|
||||
|
||||
prediction, err := predFunc()
|
||||
if err != nil {
|
||||
xlog.Error("Anthropic prediction failed", "error", err)
|
||||
return sendAnthropicError(c, 500, "api_error", fmt.Sprintf("prediction failed: %v", err))
|
||||
}
|
||||
|
||||
result := backend.Finetune(*cfg, predInput, prediction.Response)
|
||||
|
||||
// Check if the result contains tool calls
|
||||
toolCalls := functions.ParseFunctionCall(result, cfg.FunctionsConfig)
|
||||
|
||||
var contentBlocks []schema.AnthropicContentBlock
|
||||
var stopReason string
|
||||
|
||||
if shouldUseFn && len(toolCalls) > 0 {
|
||||
// Model wants to use tools
|
||||
stopReason = "tool_use"
|
||||
for _, tc := range toolCalls {
|
||||
// Parse arguments as JSON
|
||||
var inputArgs map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(tc.Arguments), &inputArgs); err != nil {
|
||||
xlog.Warn("Failed to parse tool call arguments as JSON", "error", err, "args", tc.Arguments)
|
||||
inputArgs = map[string]interface{}{"raw": tc.Arguments}
|
||||
}
|
||||
|
||||
contentBlocks = append(contentBlocks, schema.AnthropicContentBlock{
|
||||
Type: "tool_use",
|
||||
ID: fmt.Sprintf("toolu_%s_%d", id, len(contentBlocks)),
|
||||
Name: tc.Name,
|
||||
Input: inputArgs,
|
||||
})
|
||||
}
|
||||
|
||||
// Add any text content before the tool calls
|
||||
textContent := functions.ParseTextContent(result, cfg.FunctionsConfig)
|
||||
if textContent != "" {
|
||||
// Prepend text block
|
||||
contentBlocks = append([]schema.AnthropicContentBlock{{Type: "text", Text: textContent}}, contentBlocks...)
|
||||
}
|
||||
} else {
|
||||
// Normal text response
|
||||
stopReason = "end_turn"
|
||||
contentBlocks = []schema.AnthropicContentBlock{
|
||||
{Type: "text", Text: result},
|
||||
}
|
||||
}
|
||||
|
||||
resp := &schema.AnthropicResponse{
|
||||
ID: fmt.Sprintf("msg_%s", id),
|
||||
Type: "message",
|
||||
Role: "assistant",
|
||||
Model: input.Model,
|
||||
StopReason: &stopReason,
|
||||
Content: contentBlocks,
|
||||
Usage: schema.AnthropicUsage{
|
||||
InputTokens: prediction.Usage.Prompt,
|
||||
OutputTokens: prediction.Usage.Completion,
|
||||
},
|
||||
}
|
||||
|
||||
if respData, err := json.Marshal(resp); err == nil {
|
||||
xlog.Debug("Anthropic Response", "response", string(respData))
|
||||
}
|
||||
|
||||
return c.JSON(200, resp)
|
||||
}
|
||||
|
||||
func handleAnthropicStream(c echo.Context, id string, input *schema.AnthropicRequest, cfg *config.ModelConfig, ml *model.ModelLoader, predInput string, openAIReq *schema.OpenAIRequest, funcs functions.Functions, shouldUseFn bool) error {
|
||||
c.Response().Header().Set("Content-Type", "text/event-stream")
|
||||
c.Response().Header().Set("Cache-Control", "no-cache")
|
||||
c.Response().Header().Set("Connection", "keep-alive")
|
||||
|
||||
// Create OpenAI messages for inference
|
||||
openAIMessages := openAIReq.Messages
|
||||
|
||||
images := []string{}
|
||||
for _, m := range openAIMessages {
|
||||
images = append(images, m.StringImages...)
|
||||
}
|
||||
|
||||
// Send message_start event
|
||||
messageStart := schema.AnthropicStreamEvent{
|
||||
Type: "message_start",
|
||||
Message: &schema.AnthropicStreamMessage{
|
||||
ID: fmt.Sprintf("msg_%s", id),
|
||||
Type: "message",
|
||||
Role: "assistant",
|
||||
Content: []schema.AnthropicContentBlock{},
|
||||
Model: input.Model,
|
||||
Usage: schema.AnthropicUsage{InputTokens: 0, OutputTokens: 0},
|
||||
},
|
||||
}
|
||||
sendAnthropicSSE(c, messageStart)
|
||||
|
||||
// Track accumulated content for tool call detection
|
||||
accumulatedContent := ""
|
||||
currentBlockIndex := 0
|
||||
inToolCall := false
|
||||
toolCallsEmitted := 0
|
||||
|
||||
// Send initial content_block_start event
|
||||
contentBlockStart := schema.AnthropicStreamEvent{
|
||||
Type: "content_block_start",
|
||||
Index: currentBlockIndex,
|
||||
ContentBlock: &schema.AnthropicContentBlock{Type: "text", Text: ""},
|
||||
}
|
||||
sendAnthropicSSE(c, contentBlockStart)
|
||||
|
||||
// Stream content deltas
|
||||
tokenCallback := func(token string, usage backend.TokenUsage) bool {
|
||||
accumulatedContent += token
|
||||
|
||||
// If we're using functions, try to detect tool calls incrementally
|
||||
if shouldUseFn {
|
||||
cleanedResult := functions.CleanupLLMResult(accumulatedContent, cfg.FunctionsConfig)
|
||||
|
||||
// Try parsing for tool calls
|
||||
toolCalls := functions.ParseFunctionCall(cleanedResult, cfg.FunctionsConfig)
|
||||
|
||||
// If we detected new tool calls and haven't emitted them yet
|
||||
if len(toolCalls) > toolCallsEmitted {
|
||||
// Stop the current text block if we were in one
|
||||
if !inToolCall && currentBlockIndex == 0 {
|
||||
sendAnthropicSSE(c, schema.AnthropicStreamEvent{
|
||||
Type: "content_block_stop",
|
||||
Index: currentBlockIndex,
|
||||
})
|
||||
currentBlockIndex++
|
||||
inToolCall = true
|
||||
}
|
||||
|
||||
// Emit new tool calls
|
||||
for i := toolCallsEmitted; i < len(toolCalls); i++ {
|
||||
tc := toolCalls[i]
|
||||
|
||||
// Send content_block_start for tool_use
|
||||
sendAnthropicSSE(c, schema.AnthropicStreamEvent{
|
||||
Type: "content_block_start",
|
||||
Index: currentBlockIndex,
|
||||
ContentBlock: &schema.AnthropicContentBlock{
|
||||
Type: "tool_use",
|
||||
ID: fmt.Sprintf("toolu_%s_%d", id, i),
|
||||
Name: tc.Name,
|
||||
},
|
||||
})
|
||||
|
||||
// Send input_json_delta with the arguments
|
||||
sendAnthropicSSE(c, schema.AnthropicStreamEvent{
|
||||
Type: "content_block_delta",
|
||||
Index: currentBlockIndex,
|
||||
Delta: &schema.AnthropicStreamDelta{
|
||||
Type: "input_json_delta",
|
||||
PartialJSON: tc.Arguments,
|
||||
},
|
||||
})
|
||||
|
||||
// Send content_block_stop
|
||||
sendAnthropicSSE(c, schema.AnthropicStreamEvent{
|
||||
Type: "content_block_stop",
|
||||
Index: currentBlockIndex,
|
||||
})
|
||||
|
||||
currentBlockIndex++
|
||||
}
|
||||
toolCallsEmitted = len(toolCalls)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Send regular text delta if not in tool call mode
|
||||
if !inToolCall {
|
||||
delta := schema.AnthropicStreamEvent{
|
||||
Type: "content_block_delta",
|
||||
Index: 0,
|
||||
Delta: &schema.AnthropicStreamDelta{
|
||||
Type: "text_delta",
|
||||
Text: token,
|
||||
},
|
||||
}
|
||||
sendAnthropicSSE(c, delta)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
predFunc, err := backend.ModelInference(
|
||||
input.Context, predInput, openAIMessages, images, nil, nil, ml, cfg, nil, nil, tokenCallback, "", "", nil, nil, nil)
|
||||
if err != nil {
|
||||
xlog.Error("Anthropic stream model inference failed", "error", err)
|
||||
return sendAnthropicError(c, 500, "api_error", fmt.Sprintf("model inference failed: %v", err))
|
||||
}
|
||||
|
||||
prediction, err := predFunc()
|
||||
if err != nil {
|
||||
xlog.Error("Anthropic stream prediction failed", "error", err)
|
||||
return sendAnthropicError(c, 500, "api_error", fmt.Sprintf("prediction failed: %v", err))
|
||||
}
|
||||
|
||||
// Send content_block_stop event for last block if we didn't close it yet
|
||||
if !inToolCall {
|
||||
contentBlockStop := schema.AnthropicStreamEvent{
|
||||
Type: "content_block_stop",
|
||||
Index: 0,
|
||||
}
|
||||
sendAnthropicSSE(c, contentBlockStop)
|
||||
}
|
||||
|
||||
// Determine stop reason
|
||||
stopReason := "end_turn"
|
||||
if toolCallsEmitted > 0 {
|
||||
stopReason = "tool_use"
|
||||
}
|
||||
|
||||
// Send message_delta event with stop_reason
|
||||
messageDelta := schema.AnthropicStreamEvent{
|
||||
Type: "message_delta",
|
||||
Delta: &schema.AnthropicStreamDelta{
|
||||
StopReason: &stopReason,
|
||||
},
|
||||
Usage: &schema.AnthropicUsage{
|
||||
OutputTokens: prediction.Usage.Completion,
|
||||
},
|
||||
}
|
||||
sendAnthropicSSE(c, messageDelta)
|
||||
|
||||
// Send message_stop event
|
||||
messageStop := schema.AnthropicStreamEvent{
|
||||
Type: "message_stop",
|
||||
}
|
||||
sendAnthropicSSE(c, messageStop)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func sendAnthropicSSE(c echo.Context, event schema.AnthropicStreamEvent) {
|
||||
data, err := json.Marshal(event)
|
||||
if err != nil {
|
||||
xlog.Error("Failed to marshal SSE event", "error", err)
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(c.Response().Writer, "event: %s\ndata: %s\n\n", event.Type, string(data))
|
||||
c.Response().Flush()
|
||||
}
|
||||
|
||||
func sendAnthropicError(c echo.Context, statusCode int, errorType, message string) error {
|
||||
resp := schema.AnthropicErrorResponse{
|
||||
Type: "error",
|
||||
Error: schema.AnthropicError{
|
||||
Type: errorType,
|
||||
Message: message,
|
||||
},
|
||||
}
|
||||
return c.JSON(statusCode, resp)
|
||||
}
|
||||
|
||||
func convertAnthropicToOpenAIMessages(input *schema.AnthropicRequest) []schema.Message {
|
||||
var messages []schema.Message
|
||||
|
||||
// Add system message if present
|
||||
if input.System != "" {
|
||||
messages = append(messages, schema.Message{
|
||||
Role: "system",
|
||||
StringContent: input.System,
|
||||
Content: input.System,
|
||||
})
|
||||
}
|
||||
|
||||
// Convert Anthropic messages to OpenAI format
|
||||
for _, msg := range input.Messages {
|
||||
openAIMsg := schema.Message{
|
||||
Role: msg.Role,
|
||||
}
|
||||
|
||||
// Handle content (can be string or array of content blocks)
|
||||
switch content := msg.Content.(type) {
|
||||
case string:
|
||||
openAIMsg.StringContent = content
|
||||
openAIMsg.Content = content
|
||||
case []interface{}:
|
||||
// Handle array of content blocks
|
||||
var textContent string
|
||||
var stringImages []string
|
||||
var toolCalls []schema.ToolCall
|
||||
toolCallIndex := 0
|
||||
|
||||
for _, block := range content {
|
||||
if blockMap, ok := block.(map[string]interface{}); ok {
|
||||
blockType, _ := blockMap["type"].(string)
|
||||
switch blockType {
|
||||
case "text":
|
||||
if text, ok := blockMap["text"].(string); ok {
|
||||
textContent += text
|
||||
}
|
||||
case "image":
|
||||
// Handle image content
|
||||
if source, ok := blockMap["source"].(map[string]interface{}); ok {
|
||||
if sourceType, ok := source["type"].(string); ok && sourceType == "base64" {
|
||||
if data, ok := source["data"].(string); ok {
|
||||
mediaType, _ := source["media_type"].(string)
|
||||
// Format as data URI
|
||||
dataURI := fmt.Sprintf("data:%s;base64,%s", mediaType, data)
|
||||
stringImages = append(stringImages, dataURI)
|
||||
}
|
||||
}
|
||||
}
|
||||
case "tool_use":
|
||||
// Convert tool_use to ToolCall format
|
||||
toolID, _ := blockMap["id"].(string)
|
||||
toolName, _ := blockMap["name"].(string)
|
||||
toolInput := blockMap["input"]
|
||||
|
||||
// Serialize input to JSON string
|
||||
inputJSON, err := json.Marshal(toolInput)
|
||||
if err != nil {
|
||||
xlog.Warn("Failed to marshal tool input", "error", err)
|
||||
inputJSON = []byte("{}")
|
||||
}
|
||||
|
||||
toolCalls = append(toolCalls, schema.ToolCall{
|
||||
Index: toolCallIndex,
|
||||
ID: toolID,
|
||||
Type: "function",
|
||||
FunctionCall: schema.FunctionCall{
|
||||
Name: toolName,
|
||||
Arguments: string(inputJSON),
|
||||
},
|
||||
})
|
||||
toolCallIndex++
|
||||
case "tool_result":
|
||||
// Convert tool_result to a message with role "tool"
|
||||
// This is handled by creating a separate message after this block
|
||||
// For now, we'll add it as text content
|
||||
toolUseID, _ := blockMap["tool_use_id"].(string)
|
||||
isError := false
|
||||
if isErrorPtr, ok := blockMap["is_error"].(*bool); ok && isErrorPtr != nil {
|
||||
isError = *isErrorPtr
|
||||
}
|
||||
|
||||
var resultText string
|
||||
if resultContent, ok := blockMap["content"]; ok {
|
||||
switch rc := resultContent.(type) {
|
||||
case string:
|
||||
resultText = rc
|
||||
case []interface{}:
|
||||
// Array of content blocks
|
||||
for _, cb := range rc {
|
||||
if cbMap, ok := cb.(map[string]interface{}); ok {
|
||||
if cbMap["type"] == "text" {
|
||||
if text, ok := cbMap["text"].(string); ok {
|
||||
resultText += text
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add tool result as a tool role message
|
||||
// We need to handle this differently - create a new message
|
||||
if msg.Role == "user" {
|
||||
// Store tool result info for creating separate message
|
||||
prefix := ""
|
||||
if isError {
|
||||
prefix = "Error: "
|
||||
}
|
||||
textContent += fmt.Sprintf("\n[Tool Result for %s]: %s%s", toolUseID, prefix, resultText)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
openAIMsg.StringContent = textContent
|
||||
openAIMsg.Content = textContent
|
||||
openAIMsg.StringImages = stringImages
|
||||
|
||||
// Add tool calls if present
|
||||
if len(toolCalls) > 0 {
|
||||
openAIMsg.ToolCalls = toolCalls
|
||||
}
|
||||
}
|
||||
|
||||
messages = append(messages, openAIMsg)
|
||||
}
|
||||
|
||||
return messages
|
||||
}
|
||||
|
||||
// convertAnthropicTools converts Anthropic tools to internal Functions format
|
||||
func convertAnthropicTools(input *schema.AnthropicRequest, cfg *config.ModelConfig) (functions.Functions, bool) {
|
||||
if len(input.Tools) == 0 {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
var funcs functions.Functions
|
||||
for _, tool := range input.Tools {
|
||||
f := functions.Function{
|
||||
Name: tool.Name,
|
||||
Description: tool.Description,
|
||||
Parameters: tool.InputSchema,
|
||||
}
|
||||
funcs = append(funcs, f)
|
||||
}
|
||||
|
||||
// Handle tool_choice
|
||||
if input.ToolChoice != nil {
|
||||
switch tc := input.ToolChoice.(type) {
|
||||
case string:
|
||||
// "auto", "any", or "none"
|
||||
if tc == "any" {
|
||||
// Force the model to use one of the tools
|
||||
cfg.SetFunctionCallString("required")
|
||||
} else if tc == "none" {
|
||||
// Don't use tools
|
||||
return nil, false
|
||||
}
|
||||
// "auto" is the default - let model decide
|
||||
case map[string]interface{}:
|
||||
// Specific tool selection: {"type": "tool", "name": "tool_name"}
|
||||
if tcType, ok := tc["type"].(string); ok && tcType == "tool" {
|
||||
if name, ok := tc["name"].(string); ok {
|
||||
// Force specific tool
|
||||
cfg.SetFunctionCallString(name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return funcs, len(funcs) > 0 && cfg.ShouldUseFunctions()
|
||||
}
|
||||
@@ -53,12 +53,12 @@ type MCPErrorEvent struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// MCPStreamEndpoint is the SSE streaming endpoint for MCP chat completions
|
||||
// MCPEndpoint is the endpoint for MCP chat completions. Supports SSE mode, but it is not compatible with the OpenAI apis.
|
||||
// @Summary Stream MCP chat completions with reasoning, tool calls, and results
|
||||
// @Param request body schema.OpenAIRequest true "query params"
|
||||
// @Success 200 {object} schema.OpenAIResponse "Response"
|
||||
// @Router /v1/mcp/chat/completions [post]
|
||||
func MCPStreamEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator *templates.Evaluator, appConfig *config.ApplicationConfig) echo.HandlerFunc {
|
||||
func MCPEndpoint(cl *config.ModelConfigLoader, ml *model.ModelLoader, evaluator *templates.Evaluator, appConfig *config.ApplicationConfig) echo.HandlerFunc {
|
||||
return func(c echo.Context) error {
|
||||
ctx := c.Request().Context()
|
||||
created := int(time.Now().Unix())
|
||||
|
||||
@@ -76,6 +76,14 @@ func UpdateSettingsEndpoint(app *application.Application) echo.HandlerFunc {
|
||||
})
|
||||
}
|
||||
}
|
||||
if settings.LRUEvictionRetryInterval != nil {
|
||||
if _, err := time.ParseDuration(*settings.LRUEvictionRetryInterval); err != nil {
|
||||
return c.JSON(http.StatusBadRequest, schema.SettingsResponse{
|
||||
Success: false,
|
||||
Error: "Invalid lru_eviction_retry_interval format: " + err.Error(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Save to file
|
||||
if appConfig.DynamicConfigsDir == "" {
|
||||
@@ -111,6 +119,31 @@ func UpdateSettingsEndpoint(app *application.Application) echo.HandlerFunc {
|
||||
appConfig.ApiKeys = append(envKeys, runtimeKeys...)
|
||||
}
|
||||
|
||||
// Update watchdog dynamically for settings that don't require restart
|
||||
if settings.ForceEvictionWhenBusy != nil {
|
||||
currentWD := app.ModelLoader().GetWatchDog()
|
||||
if currentWD != nil {
|
||||
currentWD.SetForceEvictionWhenBusy(*settings.ForceEvictionWhenBusy)
|
||||
xlog.Info("Updated watchdog force eviction when busy setting", "forceEvictionWhenBusy", *settings.ForceEvictionWhenBusy)
|
||||
}
|
||||
}
|
||||
|
||||
// Update ModelLoader LRU eviction retry settings dynamically
|
||||
maxRetries := appConfig.LRUEvictionMaxRetries
|
||||
retryInterval := appConfig.LRUEvictionRetryInterval
|
||||
if settings.LRUEvictionMaxRetries != nil {
|
||||
maxRetries = *settings.LRUEvictionMaxRetries
|
||||
}
|
||||
if settings.LRUEvictionRetryInterval != nil {
|
||||
if dur, err := time.ParseDuration(*settings.LRUEvictionRetryInterval); err == nil {
|
||||
retryInterval = dur
|
||||
}
|
||||
}
|
||||
if settings.LRUEvictionMaxRetries != nil || settings.LRUEvictionRetryInterval != nil {
|
||||
app.ModelLoader().SetLRUEvictionRetrySettings(maxRetries, retryInterval)
|
||||
xlog.Info("Updated LRU eviction retry settings", "maxRetries", maxRetries, "retryInterval", retryInterval)
|
||||
}
|
||||
|
||||
// Check if agent job retention changed
|
||||
agentJobChanged := settings.AgentJobRetentionDays != nil
|
||||
|
||||
|
||||
@@ -65,13 +65,9 @@ func WelcomeEndpoint(appConfig *config.ApplicationConfig,
|
||||
// The client expects a JSON response
|
||||
return c.JSON(200, summary)
|
||||
} else {
|
||||
// Check if this is the manage route
|
||||
templateName := "views/index"
|
||||
if strings.HasSuffix(c.Request().URL.Path, "/manage") || c.Request().URL.Path == "/manage" {
|
||||
templateName = "views/manage"
|
||||
}
|
||||
// Render appropriate template
|
||||
return c.Render(200, templateName, summary)
|
||||
// Serve the SPA for both index and manage routes
|
||||
// The SPA handles routing client-side via Alpine.js
|
||||
return c.Render(200, "views/spa", summary)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user