mirror of
https://github.com/mudler/LocalAI.git
synced 2026-02-27 04:08:57 -05:00
Compare commits
2 Commits
faster-qwe
...
copilot/fi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6a1e44c8ff | ||
|
|
bda40b266c |
@@ -10,8 +10,7 @@ services:
|
||||
- 8080:8080
|
||||
volumes:
|
||||
- localai_workspace:/workspace
|
||||
- models:/host-models
|
||||
- backends:/host-backends
|
||||
- ../models:/host-models
|
||||
- ./customization:/devcontainer-customization
|
||||
command: /bin/sh -c "while sleep 1000; do :; done"
|
||||
cap_add:
|
||||
@@ -40,9 +39,6 @@ services:
|
||||
- GF_SECURITY_ADMIN_PASSWORD=grafana
|
||||
volumes:
|
||||
- ./grafana:/etc/grafana/provisioning/datasources
|
||||
|
||||
volumes:
|
||||
prom_data:
|
||||
localai_workspace:
|
||||
models:
|
||||
backends:
|
||||
localai_workspace:
|
||||
3
.env
3
.env
@@ -26,9 +26,6 @@
|
||||
## Disables COMPEL (Diffusers)
|
||||
# COMPEL=0
|
||||
|
||||
## Disables SD_EMBED (Diffusers)
|
||||
# SD_EMBED=0
|
||||
|
||||
## Enable/Disable single backend (useful if only one GPU is available)
|
||||
# LOCALAI_SINGLE_ACTIVE_BACKEND=true
|
||||
|
||||
|
||||
2
.github/gallery-agent/agent.go
vendored
2
.github/gallery-agent/agent.go
vendored
@@ -146,7 +146,7 @@ func getRealReadme(ctx context.Context, repository string) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
content := result.LastMessage().Content
|
||||
content := newFragment.LastMessage().Content
|
||||
return cleanTextContent(content), nil
|
||||
}
|
||||
|
||||
|
||||
501
.github/workflows/backend.yml
vendored
501
.github/workflows/backend.yml
vendored
@@ -14,7 +14,6 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
backend-jobs:
|
||||
if: github.repository == 'mudler/LocalAI'
|
||||
uses: ./.github/workflows/backend_build.yml
|
||||
with:
|
||||
tag-latest: ${{ matrix.tag-latest }}
|
||||
@@ -105,58 +104,6 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-cpu-ace-step'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'true'
|
||||
backend: "ace-step"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-cpu-mlx'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'true'
|
||||
backend: "mlx"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-cpu-mlx-vlm'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'true'
|
||||
backend: "mlx-vlm"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-cpu-mlx-audio'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'true'
|
||||
backend: "mlx-audio"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
# CUDA 12 builds
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
@@ -184,19 +131,6 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-nemo'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "nemo"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "8"
|
||||
@@ -210,19 +144,6 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-faster-qwen3-tts'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "faster-qwen3-tts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "8"
|
||||
@@ -327,19 +248,6 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-ace-step'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "ace-step"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "8"
|
||||
@@ -392,19 +300,6 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-outetts'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "outetts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "8"
|
||||
@@ -431,45 +326,6 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-mlx'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "mlx"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-mlx-vlm'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "mlx-vlm"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-mlx-audio'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "mlx-audio"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "8"
|
||||
@@ -562,19 +418,6 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-nemo'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "nemo"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
@@ -588,19 +431,6 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-faster-qwen3-tts'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "faster-qwen3-tts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
@@ -679,19 +509,6 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-ace-step'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "ace-step"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
@@ -731,19 +548,6 @@ jobs:
|
||||
backend: "qwen-tts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-cuda-13-arm64-faster-qwen3-tts'
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
ubuntu-version: '2404'
|
||||
backend: "faster-qwen3-tts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
@@ -757,19 +561,6 @@ jobs:
|
||||
backend: "pocket-tts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-cuda-13-arm64-chatterbox'
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
ubuntu-version: '2404'
|
||||
backend: "chatterbox"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
@@ -783,45 +574,6 @@ jobs:
|
||||
backend: "diffusers"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-cuda-13-arm64-mlx'
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
ubuntu-version: '2404'
|
||||
backend: "mlx"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-cuda-13-arm64-mlx-vlm'
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
ubuntu-version: '2404'
|
||||
backend: "mlx-vlm"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-cuda-13-arm64-mlx-audio'
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
ubuntu-version: '2404'
|
||||
backend: "mlx-audio"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
@@ -887,45 +639,6 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-mlx'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "mlx"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-mlx-vlm'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "mlx-vlm"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-mlx-audio'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "mlx-audio"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
@@ -1070,19 +783,6 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'hipblas'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-rocm-hipblas-ace-step'
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
|
||||
skip-drivers: 'false'
|
||||
backend: "ace-step"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
# ROCm additional backends
|
||||
- build-type: 'hipblas'
|
||||
cuda-major-version: ""
|
||||
@@ -1123,19 +823,6 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'hipblas'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-rocm-hipblas-nemo'
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
|
||||
skip-drivers: 'false'
|
||||
backend: "nemo"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'hipblas'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
@@ -1293,19 +980,6 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-ace-step'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "ace-step"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
@@ -1345,19 +1019,6 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-faster-qwen3-tts'
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
||||
skip-drivers: 'true'
|
||||
backend: "faster-qwen3-tts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
@@ -1384,45 +1045,6 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-mlx'
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
||||
skip-drivers: 'true'
|
||||
backend: "mlx"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-mlx-vlm'
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
||||
skip-drivers: 'true'
|
||||
backend: "mlx-vlm"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-mlx-audio'
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
||||
skip-drivers: 'true'
|
||||
backend: "mlx-audio"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2204'
|
||||
# SYCL additional backends
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
@@ -1476,19 +1098,6 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-nemo'
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "nemo"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
@@ -1739,20 +1348,6 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.golang"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
# voxtral
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64,linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-cpu-voxtral'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "voxtral"
|
||||
dockerfile: "./backend/Dockerfile.golang"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
#silero-vad
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
@@ -1928,19 +1523,6 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64,linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-cpu-nemo'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "nemo"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
@@ -1957,7 +1539,7 @@ jobs:
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64,linux/arm64'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-cpu-voxcpm'
|
||||
runs-on: 'ubuntu-latest'
|
||||
@@ -1980,19 +1562,6 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-cpu-outetts'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'true'
|
||||
backend: "outetts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
backend-jobs-darwin:
|
||||
uses: ./.github/workflows/backend_build_darwin.yml
|
||||
strategy:
|
||||
@@ -2001,9 +1570,6 @@ jobs:
|
||||
- backend: "diffusers"
|
||||
tag-suffix: "-metal-darwin-arm64-diffusers"
|
||||
build-type: "mps"
|
||||
- backend: "ace-step"
|
||||
tag-suffix: "-metal-darwin-arm64-ace-step"
|
||||
build-type: "mps"
|
||||
- backend: "mlx"
|
||||
tag-suffix: "-metal-darwin-arm64-mlx"
|
||||
build-type: "mps"
|
||||
@@ -2024,71 +1590,6 @@ jobs:
|
||||
tag-suffix: "-metal-darwin-arm64-whisper"
|
||||
build-type: "metal"
|
||||
lang: "go"
|
||||
- backend: "voxtral"
|
||||
tag-suffix: "-metal-darwin-arm64-voxtral"
|
||||
build-type: "metal"
|
||||
lang: "go"
|
||||
- backend: "vibevoice"
|
||||
tag-suffix: "-metal-darwin-arm64-vibevoice"
|
||||
build-type: "mps"
|
||||
- backend: "qwen-asr"
|
||||
tag-suffix: "-metal-darwin-arm64-qwen-asr"
|
||||
build-type: "mps"
|
||||
- backend: "nemo"
|
||||
tag-suffix: "-metal-darwin-arm64-nemo"
|
||||
build-type: "mps"
|
||||
- backend: "qwen-tts"
|
||||
tag-suffix: "-metal-darwin-arm64-qwen-tts"
|
||||
build-type: "mps"
|
||||
- backend: "voxcpm"
|
||||
tag-suffix: "-metal-darwin-arm64-voxcpm"
|
||||
build-type: "mps"
|
||||
- backend: "pocket-tts"
|
||||
tag-suffix: "-metal-darwin-arm64-pocket-tts"
|
||||
build-type: "mps"
|
||||
- backend: "moonshine"
|
||||
tag-suffix: "-metal-darwin-arm64-moonshine"
|
||||
build-type: "mps"
|
||||
- backend: "whisperx"
|
||||
tag-suffix: "-metal-darwin-arm64-whisperx"
|
||||
build-type: "mps"
|
||||
- backend: "rerankers"
|
||||
tag-suffix: "-metal-darwin-arm64-rerankers"
|
||||
build-type: "mps"
|
||||
- backend: "transformers"
|
||||
tag-suffix: "-metal-darwin-arm64-transformers"
|
||||
build-type: "mps"
|
||||
- backend: "kokoro"
|
||||
tag-suffix: "-metal-darwin-arm64-kokoro"
|
||||
build-type: "mps"
|
||||
- backend: "faster-whisper"
|
||||
tag-suffix: "-metal-darwin-arm64-faster-whisper"
|
||||
build-type: "mps"
|
||||
- backend: "coqui"
|
||||
tag-suffix: "-metal-darwin-arm64-coqui"
|
||||
build-type: "mps"
|
||||
- backend: "rfdetr"
|
||||
tag-suffix: "-metal-darwin-arm64-rfdetr"
|
||||
build-type: "mps"
|
||||
- backend: "kitten-tts"
|
||||
tag-suffix: "-metal-darwin-arm64-kitten-tts"
|
||||
build-type: "mps"
|
||||
- backend: "piper"
|
||||
tag-suffix: "-metal-darwin-arm64-piper"
|
||||
build-type: "metal"
|
||||
lang: "go"
|
||||
- backend: "silero-vad"
|
||||
tag-suffix: "-metal-darwin-arm64-silero-vad"
|
||||
build-type: "metal"
|
||||
lang: "go"
|
||||
- backend: "local-store"
|
||||
tag-suffix: "-metal-darwin-arm64-local-store"
|
||||
build-type: "metal"
|
||||
lang: "go"
|
||||
- backend: "huggingface"
|
||||
tag-suffix: "-metal-darwin-arm64-huggingface"
|
||||
build-type: "metal"
|
||||
lang: "go"
|
||||
with:
|
||||
backend: ${{ matrix.backend }}
|
||||
build-type: ${{ matrix.build-type }}
|
||||
|
||||
9
.github/workflows/bump_deps.yaml
vendored
9
.github/workflows/bump_deps.yaml
vendored
@@ -5,7 +5,6 @@ on:
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
bump-backends:
|
||||
if: github.repository == 'mudler/LocalAI'
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -18,6 +17,10 @@ jobs:
|
||||
variable: "WHISPER_CPP_VERSION"
|
||||
branch: "master"
|
||||
file: "backend/go/whisper/Makefile"
|
||||
- repository: "PABannier/bark.cpp"
|
||||
variable: "BARKCPP_VERSION"
|
||||
branch: "main"
|
||||
file: "Makefile"
|
||||
- repository: "leejet/stable-diffusion.cpp"
|
||||
variable: "STABLEDIFFUSION_GGML_VERSION"
|
||||
branch: "master"
|
||||
@@ -26,10 +29,6 @@ jobs:
|
||||
variable: "PIPER_VERSION"
|
||||
branch: "master"
|
||||
file: "backend/go/piper/Makefile"
|
||||
- repository: "antirez/voxtral.c"
|
||||
variable: "VOXTRAL_VERSION"
|
||||
branch: "main"
|
||||
file: "backend/go/voxtral/Makefile"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
1
.github/workflows/bump_docs.yaml
vendored
1
.github/workflows/bump_docs.yaml
vendored
@@ -5,7 +5,6 @@ on:
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
bump-docs:
|
||||
if: github.repository == 'mudler/LocalAI'
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
|
||||
1
.github/workflows/checksum_checker.yaml
vendored
1
.github/workflows/checksum_checker.yaml
vendored
@@ -5,7 +5,6 @@ on:
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
checksum_check:
|
||||
if: github.repository == 'mudler/LocalAI'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Force Install GIT latest
|
||||
|
||||
2
.github/workflows/dependabot_auto.yml
vendored
2
.github/workflows/dependabot_auto.yml
vendored
@@ -9,8 +9,8 @@ permissions:
|
||||
|
||||
jobs:
|
||||
dependabot:
|
||||
if: github.repository == 'mudler/LocalAI' && github.actor == 'dependabot[bot]'
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.actor == 'dependabot[bot]' }}
|
||||
steps:
|
||||
- name: Dependabot metadata
|
||||
id: metadata
|
||||
|
||||
1
.github/workflows/deploy-explorer.yaml
vendored
1
.github/workflows/deploy-explorer.yaml
vendored
@@ -12,7 +12,6 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
build-linux:
|
||||
if: github.repository == 'mudler/LocalAI'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Clone
|
||||
|
||||
1
.github/workflows/gallery-agent.yaml
vendored
1
.github/workflows/gallery-agent.yaml
vendored
@@ -27,7 +27,6 @@ on:
|
||||
type: string
|
||||
jobs:
|
||||
gallery-agent:
|
||||
if: github.repository == 'mudler/LocalAI'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
|
||||
1
.github/workflows/generate_grpc_cache.yaml
vendored
1
.github/workflows/generate_grpc_cache.yaml
vendored
@@ -13,7 +13,6 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
generate_caches:
|
||||
if: github.repository == 'mudler/LocalAI'
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
|
||||
1
.github/workflows/generate_intel_image.yaml
vendored
1
.github/workflows/generate_intel_image.yaml
vendored
@@ -12,7 +12,6 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
generate_caches:
|
||||
if: github.repository == 'mudler/LocalAI'
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
|
||||
3
.github/workflows/image.yml
vendored
3
.github/workflows/image.yml
vendored
@@ -14,7 +14,6 @@
|
||||
|
||||
jobs:
|
||||
hipblas-jobs:
|
||||
if: github.repository == 'mudler/LocalAI'
|
||||
uses: ./.github/workflows/image_build.yml
|
||||
with:
|
||||
tag-latest: ${{ matrix.tag-latest }}
|
||||
@@ -51,7 +50,6 @@
|
||||
ubuntu-codename: 'noble'
|
||||
|
||||
core-image-build:
|
||||
if: github.repository == 'mudler/LocalAI'
|
||||
uses: ./.github/workflows/image_build.yml
|
||||
with:
|
||||
tag-latest: ${{ matrix.tag-latest }}
|
||||
@@ -138,7 +136,6 @@
|
||||
ubuntu-codename: 'noble'
|
||||
|
||||
gh-runner:
|
||||
if: github.repository == 'mudler/LocalAI'
|
||||
uses: ./.github/workflows/image_build.yml
|
||||
with:
|
||||
tag-latest: ${{ matrix.tag-latest }}
|
||||
|
||||
2
.github/workflows/localaibot_automerge.yml
vendored
2
.github/workflows/localaibot_automerge.yml
vendored
@@ -10,8 +10,8 @@ permissions:
|
||||
actions: write # to dispatch publish workflow
|
||||
jobs:
|
||||
dependabot:
|
||||
if: github.repository == 'mudler/LocalAI' && github.actor == 'localai-bot' && contains(github.event.pull_request.title, 'chore:')
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.actor == 'localai-bot' && !contains(github.event.pull_request.title, 'chore(model gallery):') }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
|
||||
4
.github/workflows/notify-models.yaml
vendored
4
.github/workflows/notify-models.yaml
vendored
@@ -10,7 +10,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
notify-discord:
|
||||
if: github.repository == 'mudler/LocalAI' && (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'area/ai-model'))
|
||||
if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'area/ai-model')) }}
|
||||
env:
|
||||
MODEL_NAME: gemma-3-12b-it-qat
|
||||
runs-on: ubuntu-latest
|
||||
@@ -90,7 +90,7 @@ jobs:
|
||||
connect-timeout-seconds: 180
|
||||
limit-access-to-actor: true
|
||||
notify-twitter:
|
||||
if: github.repository == 'mudler/LocalAI' && (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'area/ai-model'))
|
||||
if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'area/ai-model')) }}
|
||||
env:
|
||||
MODEL_NAME: gemma-3-12b-it-qat
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
1
.github/workflows/notify-releases.yaml
vendored
1
.github/workflows/notify-releases.yaml
vendored
@@ -6,7 +6,6 @@ on:
|
||||
|
||||
jobs:
|
||||
notify-discord:
|
||||
if: github.repository == 'mudler/LocalAI'
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
RELEASE_BODY: ${{ github.event.release.body }}
|
||||
|
||||
2
.github/workflows/release.yaml
vendored
2
.github/workflows/release.yaml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
with:
|
||||
go-version: 1.23
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v7
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
version: v2.11.0
|
||||
args: release --clean
|
||||
|
||||
3
.github/workflows/stalebot.yml
vendored
3
.github/workflows/stalebot.yml
vendored
@@ -8,10 +8,9 @@ on:
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
if: github.repository == 'mudler/LocalAI'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@b5d41d4e1d5dceea10e7104786b73624c18a190f # v9
|
||||
- uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v9
|
||||
with:
|
||||
stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
|
||||
stale-pr-message: 'This PR is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 10 days.'
|
||||
|
||||
50
.github/workflows/test-extra.yml
vendored
50
.github/workflows/test-extra.yml
vendored
@@ -323,25 +323,6 @@ jobs:
|
||||
run: |
|
||||
make --jobs=5 --output-sync=target -C backend/python/qwen-asr
|
||||
make --jobs=5 --output-sync=target -C backend/python/qwen-asr test
|
||||
tests-nemo:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: true
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y build-essential ffmpeg sox
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
# Install UV
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
pip install --user --no-cache-dir grpcio-tools==1.64.1
|
||||
- name: Test nemo
|
||||
run: |
|
||||
make --jobs=5 --output-sync=target -C backend/python/nemo
|
||||
make --jobs=5 --output-sync=target -C backend/python/nemo test
|
||||
tests-voxcpm:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -361,34 +342,3 @@ jobs:
|
||||
run: |
|
||||
make --jobs=5 --output-sync=target -C backend/python/voxcpm
|
||||
make --jobs=5 --output-sync=target -C backend/python/voxcpm test
|
||||
tests-voxtral:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: true
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y build-essential cmake curl libopenblas-dev ffmpeg
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
# You can test your matrix by printing the current Go version
|
||||
- name: Display Go version
|
||||
run: go version
|
||||
- name: Proto Dependencies
|
||||
run: |
|
||||
# Install protoc
|
||||
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v26.1/protoc-26.1-linux-x86_64.zip -o protoc.zip && \
|
||||
unzip -j -d /usr/local/bin protoc.zip bin/protoc && \
|
||||
rm protoc.zip
|
||||
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2
|
||||
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
|
||||
PATH="$PATH:$HOME/go/bin" make protogen-go
|
||||
- name: Build voxtral
|
||||
run: |
|
||||
make --jobs=5 --output-sync=target -C backend/go/voxtral
|
||||
- name: Test voxtral
|
||||
run: |
|
||||
make --jobs=5 --output-sync=target -C backend/go/voxtral test
|
||||
|
||||
1
.github/workflows/update_swagger.yaml
vendored
1
.github/workflows/update_swagger.yaml
vendored
@@ -5,7 +5,6 @@ on:
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
swagger:
|
||||
if: github.repository == 'mudler/LocalAI'
|
||||
strategy:
|
||||
fail-fast: false
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
22
Makefile
22
Makefile
@@ -1,5 +1,5 @@
|
||||
# Disable parallel execution for backend builds
|
||||
.NOTPARALLEL: backends/diffusers backends/llama-cpp backends/outetts backends/piper backends/stablediffusion-ggml backends/whisper backends/faster-whisper backends/silero-vad backends/local-store backends/huggingface backends/rfdetr backends/kitten-tts backends/kokoro backends/chatterbox backends/llama-cpp-darwin backends/neutts build-darwin-python-backend build-darwin-go-backend backends/mlx backends/diffuser-darwin backends/mlx-vlm backends/mlx-audio backends/stablediffusion-ggml-darwin backends/vllm backends/vllm-omni backends/moonshine backends/pocket-tts backends/qwen-tts backends/faster-qwen3-tts backends/qwen-asr backends/nemo backends/voxcpm backends/whisperx backends/ace-step backends/voxtral
|
||||
.NOTPARALLEL: backends/diffusers backends/llama-cpp backends/piper backends/stablediffusion-ggml backends/whisper backends/faster-whisper backends/silero-vad backends/local-store backends/huggingface backends/rfdetr backends/kitten-tts backends/kokoro backends/chatterbox backends/llama-cpp-darwin backends/neutts build-darwin-python-backend build-darwin-go-backend backends/mlx backends/diffuser-darwin backends/mlx-vlm backends/mlx-audio backends/stablediffusion-ggml-darwin backends/vllm backends/vllm-omni backends/moonshine backends/pocket-tts backends/qwen-tts backends/qwen-asr backends/voxcpm backends/whisperx
|
||||
|
||||
GOCMD=go
|
||||
GOTEST=$(GOCMD) test
|
||||
@@ -308,7 +308,6 @@ protogen-go-clean:
|
||||
|
||||
prepare-test-extra: protogen-python
|
||||
$(MAKE) -C backend/python/transformers
|
||||
$(MAKE) -C backend/python/outetts
|
||||
$(MAKE) -C backend/python/diffusers
|
||||
$(MAKE) -C backend/python/chatterbox
|
||||
$(MAKE) -C backend/python/vllm
|
||||
@@ -317,16 +316,12 @@ prepare-test-extra: protogen-python
|
||||
$(MAKE) -C backend/python/moonshine
|
||||
$(MAKE) -C backend/python/pocket-tts
|
||||
$(MAKE) -C backend/python/qwen-tts
|
||||
$(MAKE) -C backend/python/faster-qwen3-tts
|
||||
$(MAKE) -C backend/python/qwen-asr
|
||||
$(MAKE) -C backend/python/nemo
|
||||
$(MAKE) -C backend/python/voxcpm
|
||||
$(MAKE) -C backend/python/whisperx
|
||||
$(MAKE) -C backend/python/ace-step
|
||||
|
||||
test-extra: prepare-test-extra
|
||||
$(MAKE) -C backend/python/transformers test
|
||||
$(MAKE) -C backend/python/outetts test
|
||||
$(MAKE) -C backend/python/diffusers test
|
||||
$(MAKE) -C backend/python/chatterbox test
|
||||
$(MAKE) -C backend/python/vllm test
|
||||
@@ -335,12 +330,9 @@ test-extra: prepare-test-extra
|
||||
$(MAKE) -C backend/python/moonshine test
|
||||
$(MAKE) -C backend/python/pocket-tts test
|
||||
$(MAKE) -C backend/python/qwen-tts test
|
||||
$(MAKE) -C backend/python/faster-qwen3-tts test
|
||||
$(MAKE) -C backend/python/qwen-asr test
|
||||
$(MAKE) -C backend/python/nemo test
|
||||
$(MAKE) -C backend/python/voxcpm test
|
||||
$(MAKE) -C backend/python/whisperx test
|
||||
$(MAKE) -C backend/python/ace-step test
|
||||
|
||||
DOCKER_IMAGE?=local-ai
|
||||
DOCKER_AIO_IMAGE?=local-ai-aio
|
||||
@@ -455,12 +447,10 @@ BACKEND_HUGGINGFACE = huggingface|golang|.|false|true
|
||||
BACKEND_SILERO_VAD = silero-vad|golang|.|false|true
|
||||
BACKEND_STABLEDIFFUSION_GGML = stablediffusion-ggml|golang|.|--progress=plain|true
|
||||
BACKEND_WHISPER = whisper|golang|.|false|true
|
||||
BACKEND_VOXTRAL = voxtral|golang|.|false|true
|
||||
|
||||
# Python backends with root context
|
||||
BACKEND_RERANKERS = rerankers|python|.|false|true
|
||||
BACKEND_TRANSFORMERS = transformers|python|.|false|true
|
||||
BACKEND_OUTETTS = outetts|python|.|false|true
|
||||
BACKEND_FASTER_WHISPER = faster-whisper|python|.|false|true
|
||||
BACKEND_COQUI = coqui|python|.|false|true
|
||||
BACKEND_RFDETR = rfdetr|python|.|false|true
|
||||
@@ -475,12 +465,9 @@ BACKEND_VIBEVOICE = vibevoice|python|.|--progress=plain|true
|
||||
BACKEND_MOONSHINE = moonshine|python|.|false|true
|
||||
BACKEND_POCKET_TTS = pocket-tts|python|.|false|true
|
||||
BACKEND_QWEN_TTS = qwen-tts|python|.|false|true
|
||||
BACKEND_FASTER_QWEN3_TTS = faster-qwen3-tts|python|.|false|true
|
||||
BACKEND_QWEN_ASR = qwen-asr|python|.|false|true
|
||||
BACKEND_NEMO = nemo|python|.|false|true
|
||||
BACKEND_VOXCPM = voxcpm|python|.|false|true
|
||||
BACKEND_WHISPERX = whisperx|python|.|false|true
|
||||
BACKEND_ACE_STEP = ace-step|python|.|false|true
|
||||
|
||||
# Helper function to build docker image for a backend
|
||||
# Usage: $(call docker-build-backend,BACKEND_NAME,DOCKERFILE_TYPE,BUILD_CONTEXT,PROGRESS_FLAG,NEEDS_BACKEND_ARG)
|
||||
@@ -510,10 +497,8 @@ $(eval $(call generate-docker-build-target,$(BACKEND_HUGGINGFACE)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_SILERO_VAD)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_STABLEDIFFUSION_GGML)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_WHISPER)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_VOXTRAL)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_RERANKERS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_TRANSFORMERS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_OUTETTS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_FASTER_WHISPER)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_COQUI)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_RFDETR)))
|
||||
@@ -528,18 +513,15 @@ $(eval $(call generate-docker-build-target,$(BACKEND_VIBEVOICE)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_MOONSHINE)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_POCKET_TTS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_QWEN_TTS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_FASTER_QWEN3_TTS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_QWEN_ASR)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_NEMO)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_VOXCPM)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_WHISPERX)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_ACE_STEP)))
|
||||
|
||||
# Pattern rule for docker-save targets
|
||||
docker-save-%: backend-images
|
||||
docker save local-ai-backend:$* -o backend-images/$*.tar
|
||||
|
||||
docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-vllm-omni docker-build-transformers docker-build-outetts docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-chatterbox docker-build-vibevoice docker-build-moonshine docker-build-pocket-tts docker-build-qwen-tts docker-build-faster-qwen3-tts docker-build-qwen-asr docker-build-nemo docker-build-voxcpm docker-build-whisperx docker-build-ace-step docker-build-voxtral
|
||||
docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-vllm-omni docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-chatterbox docker-build-vibevoice docker-build-moonshine docker-build-pocket-tts docker-build-qwen-tts docker-build-qwen-asr docker-build-voxcpm docker-build-whisperx
|
||||
|
||||
########################################################
|
||||
### Mock Backend for E2E Tests
|
||||
|
||||
24
README.md
24
README.md
@@ -93,7 +93,16 @@ Liking LocalAI? LocalAI is part of an integrated suite of AI infrastructure tool
|
||||
|
||||
## 💻 Quickstart
|
||||
|
||||
> ⚠️ **Note:** The `install.sh` script is currently experiencing issues due to the heavy changes currently undergoing in LocalAI and may produce broken or misconfigured installations. Please use Docker installation (see below) or manual binary installation until [issue #8032](https://github.com/mudler/LocalAI/issues/8032) is resolved.
|
||||
|
||||
Run the installer script:
|
||||
|
||||
```bash
|
||||
# Basic installation
|
||||
curl https://localai.io/install.sh | sh
|
||||
```
|
||||
|
||||
For more installation options, see [Installer Options](https://localai.io/installation/).
|
||||
|
||||
### macOS Download:
|
||||
|
||||
@@ -194,8 +203,7 @@ local-ai run oci://localai/phi-2:latest
|
||||
For more information, see [💻 Getting started](https://localai.io/basics/getting_started/index.html), if you are interested in our roadmap items and future enhancements, you can see the [Issues labeled as Roadmap here](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap)
|
||||
|
||||
## 📰 Latest project news
|
||||
- February 2026: [Realtime API for audio-to-audio with tool calling](https://github.com/mudler/LocalAI/pull/6245), [ACE-Step 1.5 support](https://github.com/mudler/LocalAI/pull/8396)
|
||||
- January 2026: **LocalAI 3.10.0** - Major release with Anthropic API support, Open Responses API for stateful agents, video & image generation suite (LTX-2), unified GPU backends, tool streaming & XML parsing, system-aware backend gallery, crash fixes for AVX-only CPUs and AMD VRAM reporting, request tracing, and new backends: **Moonshine** (ultra-fast transcription), **Pocket-TTS** (lightweight TTS). Vulkan arm64 builds now available. [Release notes](https://github.com/mudler/LocalAI/releases/tag/v3.10.0).
|
||||
|
||||
- December 2025: [Dynamic Memory Resource reclaimer](https://github.com/mudler/LocalAI/pull/7583), [Automatic fitting of models to multiple GPUS(llama.cpp)](https://github.com/mudler/LocalAI/pull/7584), [Added Vibevoice backend](https://github.com/mudler/LocalAI/pull/7494)
|
||||
- November 2025: Major improvements to the UX. Among these: [Import models via URL](https://github.com/mudler/LocalAI/pull/7245) and [Multiple chats and history](https://github.com/mudler/LocalAI/pull/7325)
|
||||
- October 2025: 🔌 [Model Context Protocol (MCP)](https://localai.io/docs/features/mcp/) support added for agentic capabilities with external tools
|
||||
@@ -228,7 +236,7 @@ Roadmap items: [List of issues](https://github.com/mudler/LocalAI/issues?q=is%3A
|
||||
- 🧩 [Backend Gallery](https://localai.io/backends/): Install/remove backends on the fly, powered by OCI images — fully customizable and API-driven.
|
||||
- 📖 [Text generation with GPTs](https://localai.io/features/text-generation/) (`llama.cpp`, `transformers`, `vllm` ... [:book: and more](https://localai.io/model-compatibility/index.html#model-compatibility-table))
|
||||
- 🗣 [Text to Audio](https://localai.io/features/text-to-audio/)
|
||||
- 🔈 [Audio to Text](https://localai.io/features/audio-to-text/)
|
||||
- 🔈 [Audio to Text](https://localai.io/features/audio-to-text/) (Audio transcription with `whisper.cpp`)
|
||||
- 🎨 [Image generation](https://localai.io/features/image-generation)
|
||||
- 🔥 [OpenAI-alike tools API](https://localai.io/features/openai-functions/)
|
||||
- ⚡ [Realtime API](https://localai.io/features/openai-realtime/) (Speech-to-speech)
|
||||
@@ -261,7 +269,6 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
|
||||
|---------|-------------|---------------------|
|
||||
| **whisper.cpp** | OpenAI Whisper in C/C++ | CUDA 12/13, ROCm, Intel SYCL, Vulkan, CPU |
|
||||
| **faster-whisper** | Fast Whisper with CTranslate2 | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **moonshine** | Ultra-fast transcription engine for low-end devices | CUDA 12/13, Metal, CPU |
|
||||
| **coqui** | Advanced TTS with 1100+ languages | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **kokoro** | Lightweight TTS model | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **chatterbox** | Production-grade TTS | CUDA 12/13, CPU |
|
||||
@@ -272,7 +279,6 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
|
||||
| **vibevoice** | Real-time TTS with voice cloning | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **pocket-tts** | Lightweight CPU-based TTS | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **qwen-tts** | High-quality TTS with custom voice, voice design, and voice cloning | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **ace-step** | Music generation from text descriptions, lyrics, or audio samples | CUDA 12/13, ROCm, Intel, Metal, CPU |
|
||||
|
||||
### Image & Video Generation
|
||||
| Backend | Description | Acceleration Support |
|
||||
@@ -294,11 +300,11 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
|
||||
|-------------------|-------------------|------------------|
|
||||
| **NVIDIA CUDA 12** | All CUDA-compatible backends | Nvidia hardware |
|
||||
| **NVIDIA CUDA 13** | All CUDA-compatible backends | Nvidia hardware |
|
||||
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, neutts, vibevoice, pocket-tts, qwen-tts, ace-step | AMD Graphics |
|
||||
| **Intel oneAPI** | llama.cpp, whisper, stablediffusion, vllm, transformers, diffusers, rfdetr, rerankers, coqui, kokoro, vibevoice, pocket-tts, qwen-tts, ace-step | Intel Arc, Intel iGPUs |
|
||||
| **Apple Metal** | llama.cpp, whisper, diffusers, MLX, MLX-VLM, moonshine, ace-step | Apple M1/M2/M3+ |
|
||||
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, neutts, vibevoice, pocket-tts, qwen-tts | AMD Graphics |
|
||||
| **Intel oneAPI** | llama.cpp, whisper, stablediffusion, vllm, transformers, diffusers, rfdetr, rerankers, coqui, kokoro, vibevoice, pocket-tts, qwen-tts | Intel Arc, Intel iGPUs |
|
||||
| **Apple Metal** | llama.cpp, whisper, diffusers, MLX, MLX-VLM | Apple M1/M2/M3+ |
|
||||
| **Vulkan** | llama.cpp, whisper, stablediffusion | Cross-platform GPUs |
|
||||
| **NVIDIA Jetson (CUDA 12)** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr, ace-step | ARM64 embedded AI (AGX Orin, etc.) |
|
||||
| **NVIDIA Jetson (CUDA 12)** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr | ARM64 embedded AI (AGX Orin, etc.) |
|
||||
| **NVIDIA Jetson (CUDA 13)** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr | ARM64 embedded AI (DGX Spark) |
|
||||
| **CPU Optimized** | All backends | AVX/AVX2/AVX512, quantization support |
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ RUN apt-get update && \
|
||||
build-essential \
|
||||
git ccache \
|
||||
ca-certificates \
|
||||
make cmake wget libopenblas-dev \
|
||||
make cmake wget \
|
||||
curl unzip \
|
||||
libssl-dev && \
|
||||
apt-get clean && \
|
||||
|
||||
@@ -365,14 +365,6 @@ message SoundGenerationRequest {
|
||||
optional bool sample = 6;
|
||||
optional string src = 7;
|
||||
optional int32 src_divisor = 8;
|
||||
optional bool think = 9;
|
||||
optional string caption = 10;
|
||||
optional string lyrics = 11;
|
||||
optional int32 bpm = 12;
|
||||
optional string keyscale = 13;
|
||||
optional string language = 14;
|
||||
optional string timesignature = 15;
|
||||
optional bool instrumental = 17;
|
||||
}
|
||||
|
||||
message TokenizationResponse {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
|
||||
LLAMA_VERSION?=723c71064da0908c19683f8c344715fbf6d986fd
|
||||
LLAMA_VERSION?=2634ed207a17db1a54bd8df0555bd8499a6ab691
|
||||
LLAMA_REPO?=https://github.com/ggerganov/llama.cpp
|
||||
|
||||
CMAKE_ARGS?=
|
||||
|
||||
@@ -417,12 +417,6 @@ static void params_parse(server_context& /*ctx_server*/, const backend::ModelOpt
|
||||
// n_ctx_checkpoints: max context checkpoints per slot (default: 8)
|
||||
params.n_ctx_checkpoints = 8;
|
||||
|
||||
// llama memory fit fails if we don't provide a buffer for tensor overrides
|
||||
const size_t ntbo = llama_max_tensor_buft_overrides();
|
||||
while (params.tensor_buft_overrides.size() < ntbo) {
|
||||
params.tensor_buft_overrides.push_back({nullptr, nullptr});
|
||||
}
|
||||
|
||||
// decode options. Options are in form optname:optvale, or if booleans only optname.
|
||||
for (int i = 0; i < request->options_size(); i++) {
|
||||
std::string opt = request->options(i);
|
||||
@@ -1261,42 +1255,6 @@ public:
|
||||
body_json["add_generation_prompt"] = data["add_generation_prompt"];
|
||||
}
|
||||
|
||||
// Pass sampling parameters to body_json so oaicompat_chat_params_parse respects them
|
||||
// and doesn't overwrite them with defaults in the returned parsed_data
|
||||
if (data.contains("n_predict")) {
|
||||
body_json["max_tokens"] = data["n_predict"];
|
||||
}
|
||||
if (data.contains("ignore_eos")) {
|
||||
body_json["ignore_eos"] = data["ignore_eos"];
|
||||
}
|
||||
if (data.contains("stop")) {
|
||||
body_json["stop"] = data["stop"];
|
||||
}
|
||||
if (data.contains("temperature")) {
|
||||
body_json["temperature"] = data["temperature"];
|
||||
}
|
||||
if (data.contains("top_p")) {
|
||||
body_json["top_p"] = data["top_p"];
|
||||
}
|
||||
if (data.contains("frequency_penalty")) {
|
||||
body_json["frequency_penalty"] = data["frequency_penalty"];
|
||||
}
|
||||
if (data.contains("presence_penalty")) {
|
||||
body_json["presence_penalty"] = data["presence_penalty"];
|
||||
}
|
||||
if (data.contains("seed")) {
|
||||
body_json["seed"] = data["seed"];
|
||||
}
|
||||
if (data.contains("logit_bias")) {
|
||||
body_json["logit_bias"] = data["logit_bias"];
|
||||
}
|
||||
if (data.contains("top_k")) {
|
||||
body_json["top_k"] = data["top_k"];
|
||||
}
|
||||
if (data.contains("min_p")) {
|
||||
body_json["min_p"] = data["min_p"];
|
||||
}
|
||||
|
||||
// Debug: Print full body_json before template processing (includes messages, tools, tool_choice, etc.)
|
||||
SRV_DBG("[CONVERSATION DEBUG] PredictStream: Full body_json before oaicompat_chat_params_parse:\n%s\n", body_json.dump(2).c_str());
|
||||
|
||||
@@ -2028,42 +1986,6 @@ public:
|
||||
body_json["add_generation_prompt"] = data["add_generation_prompt"];
|
||||
}
|
||||
|
||||
// Pass sampling parameters to body_json so oaicompat_chat_params_parse respects them
|
||||
// and doesn't overwrite them with defaults in the returned parsed_data
|
||||
if (data.contains("n_predict")) {
|
||||
body_json["max_tokens"] = data["n_predict"];
|
||||
}
|
||||
if (data.contains("ignore_eos")) {
|
||||
body_json["ignore_eos"] = data["ignore_eos"];
|
||||
}
|
||||
if (data.contains("stop")) {
|
||||
body_json["stop"] = data["stop"];
|
||||
}
|
||||
if (data.contains("temperature")) {
|
||||
body_json["temperature"] = data["temperature"];
|
||||
}
|
||||
if (data.contains("top_p")) {
|
||||
body_json["top_p"] = data["top_p"];
|
||||
}
|
||||
if (data.contains("frequency_penalty")) {
|
||||
body_json["frequency_penalty"] = data["frequency_penalty"];
|
||||
}
|
||||
if (data.contains("presence_penalty")) {
|
||||
body_json["presence_penalty"] = data["presence_penalty"];
|
||||
}
|
||||
if (data.contains("seed")) {
|
||||
body_json["seed"] = data["seed"];
|
||||
}
|
||||
if (data.contains("logit_bias")) {
|
||||
body_json["logit_bias"] = data["logit_bias"];
|
||||
}
|
||||
if (data.contains("top_k")) {
|
||||
body_json["top_k"] = data["top_k"];
|
||||
}
|
||||
if (data.contains("min_p")) {
|
||||
body_json["min_p"] = data["min_p"];
|
||||
}
|
||||
|
||||
// Debug: Print full body_json before template processing (includes messages, tools, tool_choice, etc.)
|
||||
SRV_DBG("[CONVERSATION DEBUG] Predict: Full body_json before oaicompat_chat_params_parse:\n%s\n", body_json.dump(2).c_str());
|
||||
|
||||
|
||||
@@ -6,7 +6,4 @@ huggingface:
|
||||
package:
|
||||
bash package.sh
|
||||
|
||||
build: huggingface package
|
||||
|
||||
clean:
|
||||
rm -f huggingface
|
||||
build: huggingface package
|
||||
@@ -8,5 +8,5 @@ set -e
|
||||
CURDIR=$(dirname "$(realpath $0)")
|
||||
|
||||
mkdir -p $CURDIR/package
|
||||
cp -avf $CURDIR/huggingface $CURDIR/package/
|
||||
cp -avrf $CURDIR/huggingface $CURDIR/package/
|
||||
cp -rfv $CURDIR/run.sh $CURDIR/package/
|
||||
@@ -6,7 +6,4 @@ local-store:
|
||||
package:
|
||||
bash package.sh
|
||||
|
||||
build: local-store package
|
||||
|
||||
clean:
|
||||
rm -f local-store
|
||||
build: local-store package
|
||||
@@ -8,5 +8,5 @@ set -e
|
||||
CURDIR=$(dirname "$(realpath $0)")
|
||||
|
||||
mkdir -p $CURDIR/package
|
||||
cp -avf $CURDIR/local-store $CURDIR/package/
|
||||
cp -avrf $CURDIR/local-store $CURDIR/package/
|
||||
cp -rfv $CURDIR/run.sh $CURDIR/package/
|
||||
@@ -34,7 +34,4 @@ piper: sources/go-piper sources/go-piper/libpiper_binding.a espeak-ng-data
|
||||
package:
|
||||
bash package.sh
|
||||
|
||||
build: piper package
|
||||
|
||||
clean:
|
||||
rm -f piper
|
||||
build: piper package
|
||||
@@ -10,8 +10,8 @@ CURDIR=$(dirname "$(realpath $0)")
|
||||
# Create lib directory
|
||||
mkdir -p $CURDIR/package/lib
|
||||
|
||||
cp -avf $CURDIR/piper $CURDIR/package/
|
||||
cp -avf $CURDIR/espeak-ng-data $CURDIR/package/
|
||||
cp -avrf $CURDIR/piper $CURDIR/package/
|
||||
cp -avrf $CURDIR/espeak-ng-data $CURDIR/package/
|
||||
cp -rfv $CURDIR/run.sh $CURDIR/package/
|
||||
cp -rfLv $CURDIR/sources/go-piper/piper-phonemize/pi/lib/* $CURDIR/package/lib/
|
||||
|
||||
|
||||
@@ -44,7 +44,4 @@ silero-vad: backend-assets/lib/libonnxruntime.so.1
|
||||
package:
|
||||
bash package.sh
|
||||
|
||||
build: silero-vad package
|
||||
|
||||
clean:
|
||||
rm -f silero-vad
|
||||
build: silero-vad package
|
||||
@@ -10,8 +10,8 @@ CURDIR=$(dirname "$(realpath $0)")
|
||||
# Create lib directory
|
||||
mkdir -p $CURDIR/package/lib
|
||||
|
||||
cp -avf $CURDIR/silero-vad $CURDIR/package/
|
||||
cp -avf $CURDIR/run.sh $CURDIR/package/
|
||||
cp -avrf $CURDIR/silero-vad $CURDIR/package/
|
||||
cp -avrf $CURDIR/run.sh $CURDIR/package/
|
||||
cp -rfLv $CURDIR/backend-assets/lib/* $CURDIR/package/lib/
|
||||
|
||||
# Detect architecture and copy appropriate libraries
|
||||
|
||||
2
backend/go/stablediffusion-ggml/.gitignore
vendored
2
backend/go/stablediffusion-ggml/.gitignore
vendored
@@ -2,5 +2,5 @@ package/
|
||||
sources/
|
||||
.cache/
|
||||
build/
|
||||
*.so
|
||||
libgosd.so
|
||||
stablediffusion-ggml
|
||||
|
||||
@@ -66,18 +66,15 @@ sources/stablediffusion-ggml.cpp:
|
||||
git checkout $(STABLEDIFFUSION_GGML_VERSION) && \
|
||||
git submodule update --init --recursive --depth 1 --single-branch
|
||||
|
||||
# Detect OS
|
||||
UNAME_S := $(shell uname -s)
|
||||
libgosd.so: sources/stablediffusion-ggml.cpp CMakeLists.txt gosd.cpp gosd.h
|
||||
mkdir -p build && \
|
||||
cd build && \
|
||||
cmake .. $(CMAKE_ARGS) && \
|
||||
cmake --build . --config Release -j$(JOBS) && \
|
||||
cd .. && \
|
||||
mv build/libgosd.so ./
|
||||
|
||||
# Only build CPU variants on Linux
|
||||
ifeq ($(UNAME_S),Linux)
|
||||
VARIANT_TARGETS = libgosd-avx.so libgosd-avx2.so libgosd-avx512.so libgosd-fallback.so
|
||||
else
|
||||
# On non-Linux (e.g., Darwin), build only fallback variant
|
||||
VARIANT_TARGETS = libgosd-fallback.so
|
||||
endif
|
||||
|
||||
stablediffusion-ggml: main.go gosd.go $(VARIANT_TARGETS)
|
||||
stablediffusion-ggml: main.go gosd.go libgosd.so
|
||||
CGO_ENABLED=0 $(GOCMD) build -tags "$(GO_TAGS)" -o stablediffusion-ggml ./
|
||||
|
||||
package: stablediffusion-ggml
|
||||
@@ -85,46 +82,5 @@ package: stablediffusion-ggml
|
||||
|
||||
build: package
|
||||
|
||||
clean: purge
|
||||
rm -rf libgosd*.so stablediffusion-ggml package sources
|
||||
|
||||
purge:
|
||||
rm -rf build*
|
||||
|
||||
# Build all variants (Linux only)
|
||||
ifeq ($(UNAME_S),Linux)
|
||||
libgosd-avx.so: sources/stablediffusion-ggml.cpp
|
||||
$(MAKE) purge
|
||||
$(info ${GREEN}I stablediffusion-ggml build info:avx${RESET})
|
||||
SO_TARGET=libgosd-avx.so CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_BMI2=off" $(MAKE) libgosd-custom
|
||||
rm -rfv build*
|
||||
|
||||
libgosd-avx2.so: sources/stablediffusion-ggml.cpp
|
||||
$(MAKE) purge
|
||||
$(info ${GREEN}I stablediffusion-ggml build info:avx2${RESET})
|
||||
SO_TARGET=libgosd-avx2.so CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on -DGGML_BMI2=on" $(MAKE) libgosd-custom
|
||||
rm -rfv build*
|
||||
|
||||
libgosd-avx512.so: sources/stablediffusion-ggml.cpp
|
||||
$(MAKE) purge
|
||||
$(info ${GREEN}I stablediffusion-ggml build info:avx512${RESET})
|
||||
SO_TARGET=libgosd-avx512.so CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=on -DGGML_FMA=on -DGGML_F16C=on -DGGML_BMI2=on" $(MAKE) libgosd-custom
|
||||
rm -rfv build*
|
||||
endif
|
||||
|
||||
# Build fallback variant (all platforms)
|
||||
libgosd-fallback.so: sources/stablediffusion-ggml.cpp
|
||||
$(MAKE) purge
|
||||
$(info ${GREEN}I stablediffusion-ggml build info:fallback${RESET})
|
||||
SO_TARGET=libgosd-fallback.so CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_BMI2=off" $(MAKE) libgosd-custom
|
||||
rm -rfv build*
|
||||
|
||||
libgosd-custom: CMakeLists.txt gosd.cpp gosd.h
|
||||
mkdir -p build-$(SO_TARGET) && \
|
||||
cd build-$(SO_TARGET) && \
|
||||
cmake .. $(CMAKE_ARGS) && \
|
||||
cmake --build . --config Release -j$(JOBS) && \
|
||||
cd .. && \
|
||||
mv build-$(SO_TARGET)/libgosd.so ./$(SO_TARGET)
|
||||
|
||||
all: stablediffusion-ggml package
|
||||
clean:
|
||||
rm -rf libgosd.so build stablediffusion-ggml package sources
|
||||
|
||||
@@ -2,7 +2,6 @@ package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
|
||||
"github.com/ebitengine/purego"
|
||||
grpc "github.com/mudler/LocalAI/pkg/grpc"
|
||||
@@ -18,13 +17,7 @@ type LibFuncs struct {
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Get library name from environment variable, default to fallback
|
||||
libName := os.Getenv("SD_LIBRARY")
|
||||
if libName == "" {
|
||||
libName = "./libgosd-fallback.so"
|
||||
}
|
||||
|
||||
gosd, err := purego.Dlopen(libName, purego.RTLD_NOW|purego.RTLD_GLOBAL)
|
||||
gosd, err := purego.Dlopen("./libgosd.so", purego.RTLD_NOW|purego.RTLD_GLOBAL)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ REPO_ROOT="${CURDIR}/../../.."
|
||||
# Create lib directory
|
||||
mkdir -p $CURDIR/package/lib
|
||||
|
||||
cp -avf $CURDIR/libgosd-*.so $CURDIR/package/
|
||||
cp -avf $CURDIR/libgosd.so $CURDIR/package/
|
||||
cp -avf $CURDIR/stablediffusion-ggml $CURDIR/package/
|
||||
cp -fv $CURDIR/run.sh $CURDIR/package/
|
||||
|
||||
|
||||
@@ -1,52 +1,14 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
# Get the absolute current dir where the script is located
|
||||
CURDIR=$(dirname "$(realpath $0)")
|
||||
|
||||
cd /
|
||||
|
||||
echo "CPU info:"
|
||||
if [ "$(uname)" != "Darwin" ]; then
|
||||
grep -e "model\sname" /proc/cpuinfo | head -1
|
||||
grep -e "flags" /proc/cpuinfo | head -1
|
||||
fi
|
||||
|
||||
LIBRARY="$CURDIR/libgosd-fallback.so"
|
||||
|
||||
if [ "$(uname)" != "Darwin" ]; then
|
||||
if grep -q -e "\savx\s" /proc/cpuinfo ; then
|
||||
echo "CPU: AVX found OK"
|
||||
if [ -e $CURDIR/libgosd-avx.so ]; then
|
||||
LIBRARY="$CURDIR/libgosd-avx.so"
|
||||
fi
|
||||
fi
|
||||
|
||||
if grep -q -e "\savx2\s" /proc/cpuinfo ; then
|
||||
echo "CPU: AVX2 found OK"
|
||||
if [ -e $CURDIR/libgosd-avx2.so ]; then
|
||||
LIBRARY="$CURDIR/libgosd-avx2.so"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check avx 512
|
||||
if grep -q -e "\savx512f\s" /proc/cpuinfo ; then
|
||||
echo "CPU: AVX512F found OK"
|
||||
if [ -e $CURDIR/libgosd-avx512.so ]; then
|
||||
LIBRARY="$CURDIR/libgosd-avx512.so"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
export LD_LIBRARY_PATH=$CURDIR/lib:$LD_LIBRARY_PATH
|
||||
export SD_LIBRARY=$LIBRARY
|
||||
|
||||
# If there is a lib/ld.so, use it
|
||||
if [ -f $CURDIR/lib/ld.so ]; then
|
||||
echo "Using lib/ld.so"
|
||||
echo "Using library: $LIBRARY"
|
||||
exec $CURDIR/lib/ld.so $CURDIR/stablediffusion-ggml "$@"
|
||||
fi
|
||||
|
||||
echo "Using library: $LIBRARY"
|
||||
exec $CURDIR/stablediffusion-ggml "$@"
|
||||
exec $CURDIR/stablediffusion-ggml "$@"
|
||||
9
backend/go/voxtral/.gitignore
vendored
9
backend/go/voxtral/.gitignore
vendored
@@ -1,9 +0,0 @@
|
||||
.cache/
|
||||
sources/
|
||||
build/
|
||||
build-*/
|
||||
package/
|
||||
voxtral
|
||||
*.so
|
||||
*.dylib
|
||||
compile_commands.json
|
||||
@@ -1,84 +0,0 @@
|
||||
cmake_minimum_required(VERSION 3.12)
|
||||
|
||||
if(USE_METAL)
|
||||
project(govoxtral LANGUAGES C OBJC)
|
||||
else()
|
||||
project(govoxtral LANGUAGES C)
|
||||
endif()
|
||||
|
||||
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
||||
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
||||
|
||||
# Workaround: CMake + GCC linker depfile generation fails for MODULE libraries
|
||||
set(CMAKE_C_LINKER_DEPFILE_SUPPORTED FALSE)
|
||||
|
||||
# Build voxtral.c as a library
|
||||
set(VOXTRAL_SOURCES
|
||||
sources/voxtral.c/voxtral.c
|
||||
sources/voxtral.c/voxtral_kernels.c
|
||||
sources/voxtral.c/voxtral_audio.c
|
||||
sources/voxtral.c/voxtral_encoder.c
|
||||
sources/voxtral.c/voxtral_decoder.c
|
||||
sources/voxtral.c/voxtral_tokenizer.c
|
||||
sources/voxtral.c/voxtral_safetensors.c
|
||||
)
|
||||
|
||||
# Metal GPU acceleration (macOS arm64 only)
|
||||
if(USE_METAL)
|
||||
# Generate embedded shader header from .metal source via xxd
|
||||
add_custom_command(
|
||||
OUTPUT ${CMAKE_CURRENT_SOURCE_DIR}/sources/voxtral.c/voxtral_shaders_source.h
|
||||
COMMAND xxd -i voxtral_shaders.metal > voxtral_shaders_source.h
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/sources/voxtral.c
|
||||
DEPENDS sources/voxtral.c/voxtral_shaders.metal
|
||||
COMMENT "Generating embedded Metal shaders header"
|
||||
)
|
||||
list(APPEND VOXTRAL_SOURCES sources/voxtral.c/voxtral_metal.m)
|
||||
set_source_files_properties(sources/voxtral.c/voxtral_metal.m PROPERTIES
|
||||
COMPILE_FLAGS "-fobjc-arc"
|
||||
)
|
||||
endif()
|
||||
|
||||
add_library(govoxtral MODULE csrc/govoxtral.c ${VOXTRAL_SOURCES})
|
||||
|
||||
target_include_directories(govoxtral PRIVATE sources/voxtral.c csrc)
|
||||
|
||||
target_compile_options(govoxtral PRIVATE -O3 -ffast-math)
|
||||
|
||||
if(USE_METAL)
|
||||
target_compile_definitions(govoxtral PRIVATE USE_BLAS USE_METAL ACCELERATE_NEW_LAPACK)
|
||||
target_link_libraries(govoxtral PRIVATE
|
||||
"-framework Accelerate"
|
||||
"-framework Metal"
|
||||
"-framework MetalPerformanceShaders"
|
||||
"-framework MetalPerformanceShadersGraph"
|
||||
"-framework Foundation"
|
||||
"-framework AudioToolbox"
|
||||
"-framework CoreFoundation"
|
||||
m
|
||||
)
|
||||
# Ensure the generated shader header is built before compiling
|
||||
target_sources(govoxtral PRIVATE
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/sources/voxtral.c/voxtral_shaders_source.h
|
||||
)
|
||||
elseif(USE_OPENBLAS)
|
||||
# Try to find OpenBLAS; use it if available, otherwise fall back to pure C
|
||||
find_package(BLAS)
|
||||
if(BLAS_FOUND)
|
||||
target_compile_definitions(govoxtral PRIVATE USE_BLAS USE_OPENBLAS)
|
||||
target_link_libraries(govoxtral PRIVATE ${BLAS_LIBRARIES} m)
|
||||
target_include_directories(govoxtral PRIVATE /usr/include/openblas)
|
||||
else()
|
||||
message(WARNING "OpenBLAS requested but not found, building without BLAS")
|
||||
target_link_libraries(govoxtral PRIVATE m)
|
||||
endif()
|
||||
elseif(APPLE)
|
||||
# macOS without Metal: use Accelerate framework
|
||||
target_compile_definitions(govoxtral PRIVATE USE_BLAS ACCELERATE_NEW_LAPACK)
|
||||
target_link_libraries(govoxtral PRIVATE "-framework Accelerate" m)
|
||||
else()
|
||||
target_link_libraries(govoxtral PRIVATE m)
|
||||
endif()
|
||||
|
||||
set_property(TARGET govoxtral PROPERTY C_STANDARD 11)
|
||||
set_target_properties(govoxtral PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
|
||||
@@ -1,107 +0,0 @@
|
||||
.NOTPARALLEL:
|
||||
|
||||
CMAKE_ARGS?=
|
||||
BUILD_TYPE?=
|
||||
NATIVE?=true
|
||||
|
||||
GOCMD?=go
|
||||
GO_TAGS?=
|
||||
JOBS?=$(shell nproc --ignore=1 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 4)
|
||||
|
||||
# voxtral.c version
|
||||
VOXTRAL_REPO?=https://github.com/antirez/voxtral.c
|
||||
VOXTRAL_VERSION?=134d366c24d20c64b614a3dcc8bda2a6922d077d
|
||||
|
||||
# Detect OS
|
||||
UNAME_S := $(shell uname -s)
|
||||
|
||||
# Shared library extension
|
||||
ifeq ($(UNAME_S),Darwin)
|
||||
SO_EXT=dylib
|
||||
else
|
||||
SO_EXT=so
|
||||
endif
|
||||
|
||||
SO_TARGET?=libgovoxtral.$(SO_EXT)
|
||||
|
||||
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF
|
||||
|
||||
ifeq ($(NATIVE),false)
|
||||
ifneq ($(UNAME_S),Darwin)
|
||||
CMAKE_ARGS+=-DCMAKE_C_FLAGS="-march=x86-64"
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(BUILD_TYPE),cublas)
|
||||
CMAKE_ARGS+=-DUSE_OPENBLAS=OFF
|
||||
else ifeq ($(BUILD_TYPE),hipblas)
|
||||
CMAKE_ARGS+=-DUSE_OPENBLAS=OFF
|
||||
else ifeq ($(BUILD_TYPE),metal)
|
||||
CMAKE_ARGS+=-DUSE_OPENBLAS=OFF -DUSE_METAL=ON
|
||||
else ifeq ($(UNAME_S),Darwin)
|
||||
# Default on macOS: use Accelerate (no OpenBLAS needed)
|
||||
CMAKE_ARGS+=-DUSE_OPENBLAS=OFF
|
||||
else
|
||||
CMAKE_ARGS+=-DUSE_OPENBLAS=ON
|
||||
endif
|
||||
|
||||
# Single library target
|
||||
ifeq ($(UNAME_S),Darwin)
|
||||
VARIANT_TARGETS = libgovoxtral.dylib
|
||||
else
|
||||
VARIANT_TARGETS = libgovoxtral.so
|
||||
endif
|
||||
|
||||
sources/voxtral.c:
|
||||
mkdir -p sources/voxtral.c
|
||||
cd sources/voxtral.c && \
|
||||
git init && \
|
||||
git remote add origin $(VOXTRAL_REPO) && \
|
||||
git fetch origin && \
|
||||
git checkout $(VOXTRAL_VERSION) && \
|
||||
git submodule update --init --recursive --depth 1 --single-branch
|
||||
|
||||
voxtral: main.go govoxtral.go $(VARIANT_TARGETS)
|
||||
CGO_ENABLED=0 $(GOCMD) build -tags "$(GO_TAGS)" -o voxtral ./
|
||||
|
||||
package: voxtral
|
||||
bash package.sh
|
||||
|
||||
build: package
|
||||
|
||||
clean: purge
|
||||
rm -rf libgovoxtral.so libgovoxtral.dylib package sources/voxtral.c voxtral
|
||||
|
||||
purge:
|
||||
rm -rf build*
|
||||
|
||||
# Build single library
|
||||
ifeq ($(UNAME_S),Darwin)
|
||||
libgovoxtral.dylib: sources/voxtral.c
|
||||
$(MAKE) purge
|
||||
$(info Building voxtral: darwin)
|
||||
SO_TARGET=libgovoxtral.dylib NATIVE=true $(MAKE) libgovoxtral-custom
|
||||
rm -rfv build*
|
||||
else
|
||||
libgovoxtral.so: sources/voxtral.c
|
||||
$(MAKE) purge
|
||||
$(info Building voxtral)
|
||||
SO_TARGET=libgovoxtral.so $(MAKE) libgovoxtral-custom
|
||||
rm -rfv build*
|
||||
endif
|
||||
|
||||
libgovoxtral-custom: CMakeLists.txt csrc/govoxtral.c csrc/govoxtral.h
|
||||
mkdir -p build-$(SO_TARGET) && \
|
||||
cd build-$(SO_TARGET) && \
|
||||
cmake .. $(CMAKE_ARGS) && \
|
||||
cmake --build . --config Release -j$(JOBS) && \
|
||||
cd .. && \
|
||||
(mv build-$(SO_TARGET)/libgovoxtral.so ./$(SO_TARGET) 2>/dev/null || \
|
||||
mv build-$(SO_TARGET)/libgovoxtral.dylib ./$(SO_TARGET) 2>/dev/null)
|
||||
|
||||
test: voxtral
|
||||
@echo "Running voxtral tests..."
|
||||
bash test.sh
|
||||
@echo "voxtral tests completed."
|
||||
|
||||
all: voxtral package
|
||||
@@ -1,62 +0,0 @@
|
||||
#include "govoxtral.h"
|
||||
#include "voxtral.h"
|
||||
#include "voxtral_audio.h"
|
||||
#ifdef USE_METAL
|
||||
#include "voxtral_metal.h"
|
||||
#endif
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
|
||||
static vox_ctx_t *ctx = NULL;
|
||||
static char *last_result = NULL;
|
||||
static int metal_initialized = 0;
|
||||
|
||||
int load_model(const char *model_dir) {
|
||||
if (ctx != NULL) {
|
||||
vox_free(ctx);
|
||||
ctx = NULL;
|
||||
}
|
||||
|
||||
#ifdef USE_METAL
|
||||
if (!metal_initialized) {
|
||||
vox_metal_init();
|
||||
metal_initialized = 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
ctx = vox_load(model_dir);
|
||||
if (ctx == NULL) {
|
||||
fprintf(stderr, "error: failed to load voxtral model from %s\n", model_dir);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const char *transcribe(const char *wav_path) {
|
||||
if (ctx == NULL) {
|
||||
fprintf(stderr, "error: model not loaded\n");
|
||||
return "";
|
||||
}
|
||||
|
||||
if (last_result != NULL) {
|
||||
free(last_result);
|
||||
last_result = NULL;
|
||||
}
|
||||
|
||||
last_result = vox_transcribe(ctx, wav_path);
|
||||
if (last_result == NULL) {
|
||||
fprintf(stderr, "error: transcription failed for %s\n", wav_path);
|
||||
return "";
|
||||
}
|
||||
|
||||
return last_result;
|
||||
}
|
||||
|
||||
void free_result(void) {
|
||||
if (last_result != NULL) {
|
||||
free(last_result);
|
||||
last_result = NULL;
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
#ifndef GOVOXTRAL_H
|
||||
#define GOVOXTRAL_H
|
||||
|
||||
extern int load_model(const char *model_dir);
|
||||
extern const char *transcribe(const char *wav_path);
|
||||
extern void free_result(void);
|
||||
|
||||
#endif /* GOVOXTRAL_H */
|
||||
@@ -1,60 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/mudler/LocalAI/pkg/grpc/base"
|
||||
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||
"github.com/mudler/LocalAI/pkg/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
CppLoadModel func(modelDir string) int
|
||||
CppTranscribe func(wavPath string) string
|
||||
CppFreeResult func()
|
||||
)
|
||||
|
||||
type Voxtral struct {
|
||||
base.SingleThread
|
||||
}
|
||||
|
||||
func (v *Voxtral) Load(opts *pb.ModelOptions) error {
|
||||
if ret := CppLoadModel(opts.ModelFile); ret != 0 {
|
||||
return fmt.Errorf("failed to load Voxtral model from %s", opts.ModelFile)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *Voxtral) AudioTranscription(opts *pb.TranscriptRequest) (pb.TranscriptResult, error) {
|
||||
dir, err := os.MkdirTemp("", "voxtral")
|
||||
if err != nil {
|
||||
return pb.TranscriptResult{}, err
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
convertedPath := dir + "/converted.wav"
|
||||
|
||||
if err := utils.AudioToWav(opts.Dst, convertedPath); err != nil {
|
||||
return pb.TranscriptResult{}, err
|
||||
}
|
||||
|
||||
result := strings.Clone(CppTranscribe(convertedPath))
|
||||
CppFreeResult()
|
||||
|
||||
text := strings.TrimSpace(result)
|
||||
|
||||
segments := []*pb.TranscriptSegment{}
|
||||
if text != "" {
|
||||
segments = append(segments, &pb.TranscriptSegment{
|
||||
Id: 0,
|
||||
Text: text,
|
||||
})
|
||||
}
|
||||
|
||||
return pb.TranscriptResult{
|
||||
Segments: segments,
|
||||
Text: text,
|
||||
}, nil
|
||||
}
|
||||
@@ -1,53 +0,0 @@
|
||||
package main
|
||||
|
||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/ebitengine/purego"
|
||||
grpc "github.com/mudler/LocalAI/pkg/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
addr = flag.String("addr", "localhost:50051", "the address to connect to")
|
||||
)
|
||||
|
||||
type LibFuncs struct {
|
||||
FuncPtr any
|
||||
Name string
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Get library name from environment variable, default to fallback
|
||||
libName := os.Getenv("VOXTRAL_LIBRARY")
|
||||
if libName == "" {
|
||||
if runtime.GOOS == "darwin" {
|
||||
libName = "./libgovoxtral.dylib"
|
||||
} else {
|
||||
libName = "./libgovoxtral.so"
|
||||
}
|
||||
}
|
||||
|
||||
gosd, err := purego.Dlopen(libName, purego.RTLD_NOW|purego.RTLD_GLOBAL)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
libFuncs := []LibFuncs{
|
||||
{&CppLoadModel, "load_model"},
|
||||
{&CppTranscribe, "transcribe"},
|
||||
{&CppFreeResult, "free_result"},
|
||||
}
|
||||
|
||||
for _, lf := range libFuncs {
|
||||
purego.RegisterLibFunc(lf.FuncPtr, gosd, lf.Name)
|
||||
}
|
||||
|
||||
flag.Parse()
|
||||
|
||||
if err := grpc.StartServer(*addr, &Voxtral{}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Script to copy the appropriate libraries based on architecture
|
||||
|
||||
set -e
|
||||
|
||||
CURDIR=$(dirname "$(realpath $0)")
|
||||
REPO_ROOT="${CURDIR}/../../.."
|
||||
|
||||
# Create lib directory
|
||||
mkdir -p $CURDIR/package/lib
|
||||
|
||||
cp -avf $CURDIR/voxtral $CURDIR/package/
|
||||
cp -fv $CURDIR/libgovoxtral-*.so $CURDIR/package/ 2>/dev/null || true
|
||||
cp -fv $CURDIR/libgovoxtral-*.dylib $CURDIR/package/ 2>/dev/null || true
|
||||
cp -fv $CURDIR/run.sh $CURDIR/package/
|
||||
|
||||
# Detect architecture and copy appropriate libraries
|
||||
if [ -f "/lib64/ld-linux-x86-64.so.2" ]; then
|
||||
# x86_64 architecture
|
||||
echo "Detected x86_64 architecture, copying x86_64 libraries..."
|
||||
cp -arfLv /lib64/ld-linux-x86-64.so.2 $CURDIR/package/lib/ld.so
|
||||
cp -arfLv /lib/x86_64-linux-gnu/libc.so.6 $CURDIR/package/lib/libc.so.6
|
||||
cp -arfLv /lib/x86_64-linux-gnu/libgcc_s.so.1 $CURDIR/package/lib/libgcc_s.so.1
|
||||
cp -arfLv /lib/x86_64-linux-gnu/libstdc++.so.6 $CURDIR/package/lib/libstdc++.so.6
|
||||
cp -arfLv /lib/x86_64-linux-gnu/libm.so.6 $CURDIR/package/lib/libm.so.6
|
||||
cp -arfLv /lib/x86_64-linux-gnu/libgomp.so.1 $CURDIR/package/lib/libgomp.so.1
|
||||
cp -arfLv /lib/x86_64-linux-gnu/libdl.so.2 $CURDIR/package/lib/libdl.so.2
|
||||
cp -arfLv /lib/x86_64-linux-gnu/librt.so.1 $CURDIR/package/lib/librt.so.1
|
||||
cp -arfLv /lib/x86_64-linux-gnu/libpthread.so.0 $CURDIR/package/lib/libpthread.so.0
|
||||
# OpenBLAS if available
|
||||
if [ -f /usr/lib/x86_64-linux-gnu/libopenblas.so.0 ]; then
|
||||
cp -arfLv /usr/lib/x86_64-linux-gnu/libopenblas.so.0 $CURDIR/package/lib/
|
||||
fi
|
||||
elif [ -f "/lib/ld-linux-aarch64.so.1" ]; then
|
||||
# ARM64 architecture
|
||||
echo "Detected ARM64 architecture, copying ARM64 libraries..."
|
||||
cp -arfLv /lib/ld-linux-aarch64.so.1 $CURDIR/package/lib/ld.so
|
||||
cp -arfLv /lib/aarch64-linux-gnu/libc.so.6 $CURDIR/package/lib/libc.so.6
|
||||
cp -arfLv /lib/aarch64-linux-gnu/libgcc_s.so.1 $CURDIR/package/lib/libgcc_s.so.1
|
||||
cp -arfLv /lib/aarch64-linux-gnu/libstdc++.so.6 $CURDIR/package/lib/libstdc++.so.6
|
||||
cp -arfLv /lib/aarch64-linux-gnu/libm.so.6 $CURDIR/package/lib/libm.so.6
|
||||
cp -arfLv /lib/aarch64-linux-gnu/libgomp.so.1 $CURDIR/package/lib/libgomp.so.1
|
||||
cp -arfLv /lib/aarch64-linux-gnu/libdl.so.2 $CURDIR/package/lib/libdl.so.2
|
||||
cp -arfLv /lib/aarch64-linux-gnu/librt.so.1 $CURDIR/package/lib/librt.so.1
|
||||
cp -arfLv /lib/aarch64-linux-gnu/libpthread.so.0 $CURDIR/package/lib/libpthread.so.0
|
||||
# OpenBLAS if available
|
||||
if [ -f /usr/lib/aarch64-linux-gnu/libopenblas.so.0 ]; then
|
||||
cp -arfLv /usr/lib/aarch64-linux-gnu/libopenblas.so.0 $CURDIR/package/lib/
|
||||
fi
|
||||
elif [ $(uname -s) = "Darwin" ]; then
|
||||
echo "Detected Darwin — system frameworks linked dynamically, no bundled libs needed"
|
||||
else
|
||||
echo "Error: Could not detect architecture"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Package GPU libraries based on BUILD_TYPE
|
||||
GPU_LIB_SCRIPT="${REPO_ROOT}/scripts/build/package-gpu-libs.sh"
|
||||
if [ -f "$GPU_LIB_SCRIPT" ]; then
|
||||
echo "Packaging GPU libraries for BUILD_TYPE=${BUILD_TYPE:-cpu}..."
|
||||
source "$GPU_LIB_SCRIPT" "$CURDIR/package/lib"
|
||||
package_gpu_libs
|
||||
fi
|
||||
|
||||
echo "Packaging completed successfully"
|
||||
ls -liah $CURDIR/package/
|
||||
ls -liah $CURDIR/package/lib/
|
||||
@@ -1,49 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
# Get the absolute current dir where the script is located
|
||||
CURDIR=$(dirname "$(realpath $0)")
|
||||
|
||||
cd /
|
||||
|
||||
echo "CPU info:"
|
||||
if [ "$(uname)" != "Darwin" ]; then
|
||||
grep -e "model\sname" /proc/cpuinfo | head -1
|
||||
grep -e "flags" /proc/cpuinfo | head -1
|
||||
fi
|
||||
|
||||
if [ "$(uname)" = "Darwin" ]; then
|
||||
# macOS: single dylib variant (Metal or Accelerate)
|
||||
LIBRARY="$CURDIR/libgovoxtral-fallback.dylib"
|
||||
export DYLD_LIBRARY_PATH=$CURDIR/lib:$DYLD_LIBRARY_PATH
|
||||
else
|
||||
LIBRARY="$CURDIR/libgovoxtral-fallback.so"
|
||||
|
||||
if grep -q -e "\savx\s" /proc/cpuinfo ; then
|
||||
echo "CPU: AVX found OK"
|
||||
if [ -e $CURDIR/libgovoxtral-avx.so ]; then
|
||||
LIBRARY="$CURDIR/libgovoxtral-avx.so"
|
||||
fi
|
||||
fi
|
||||
|
||||
if grep -q -e "\savx2\s" /proc/cpuinfo ; then
|
||||
echo "CPU: AVX2 found OK"
|
||||
if [ -e $CURDIR/libgovoxtral-avx2.so ]; then
|
||||
LIBRARY="$CURDIR/libgovoxtral-avx2.so"
|
||||
fi
|
||||
fi
|
||||
|
||||
export LD_LIBRARY_PATH=$CURDIR/lib:$LD_LIBRARY_PATH
|
||||
fi
|
||||
|
||||
export VOXTRAL_LIBRARY=$LIBRARY
|
||||
|
||||
# If there is a lib/ld.so, use it (Linux only)
|
||||
if [ -f $CURDIR/lib/ld.so ]; then
|
||||
echo "Using lib/ld.so"
|
||||
echo "Using library: $LIBRARY"
|
||||
exec $CURDIR/lib/ld.so $CURDIR/voxtral "$@"
|
||||
fi
|
||||
|
||||
echo "Using library: $LIBRARY"
|
||||
exec $CURDIR/voxtral "$@"
|
||||
@@ -1,48 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
CURDIR=$(dirname "$(realpath $0)")
|
||||
|
||||
echo "Running voxtral backend tests..."
|
||||
|
||||
# The test requires:
|
||||
# - VOXTRAL_MODEL_DIR: path to directory containing consolidated.safetensors + tekken.json
|
||||
# - VOXTRAL_BINARY: path to the voxtral binary (defaults to ./voxtral)
|
||||
#
|
||||
# Tests that require the model will be skipped if VOXTRAL_MODEL_DIR is not set.
|
||||
|
||||
cd "$CURDIR"
|
||||
export VOXTRAL_MODEL_DIR="${VOXTRAL_MODEL_DIR:-./voxtral-model}"
|
||||
|
||||
if [ ! -d "$VOXTRAL_MODEL_DIR" ]; then
|
||||
echo "Creating voxtral-model directory for tests..."
|
||||
mkdir -p "$VOXTRAL_MODEL_DIR"
|
||||
MODEL_ID="mistralai/Voxtral-Mini-4B-Realtime-2602"
|
||||
echo "Model: ${MODEL_ID}"
|
||||
echo ""
|
||||
|
||||
# Files to download
|
||||
FILES=(
|
||||
"consolidated.safetensors"
|
||||
"params.json"
|
||||
"tekken.json"
|
||||
)
|
||||
|
||||
BASE_URL="https://huggingface.co/${MODEL_ID}/resolve/main"
|
||||
|
||||
for file in "${FILES[@]}"; do
|
||||
dest="${VOXTRAL_MODEL_DIR}/${file}"
|
||||
if [ -f "${dest}" ]; then
|
||||
echo " [skip] ${file} (already exists)"
|
||||
else
|
||||
echo " [download] ${file}..."
|
||||
curl -L -o "${dest}" "${BASE_URL}/${file}" --progress-bar
|
||||
echo " [done] ${file}"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Run Go tests
|
||||
go test -v -timeout 300s ./...
|
||||
|
||||
echo "All voxtral tests passed."
|
||||
@@ -1,201 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
const (
|
||||
testAddr = "localhost:50051"
|
||||
sampleAudio = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen3-ASR-Repo/asr_en.wav"
|
||||
startupWait = 5 * time.Second
|
||||
)
|
||||
|
||||
func skipIfNoModel(t *testing.T) string {
|
||||
t.Helper()
|
||||
modelDir := os.Getenv("VOXTRAL_MODEL_DIR")
|
||||
if modelDir == "" {
|
||||
t.Skip("VOXTRAL_MODEL_DIR not set, skipping test (set to voxtral model directory)")
|
||||
}
|
||||
if _, err := os.Stat(filepath.Join(modelDir, "consolidated.safetensors")); os.IsNotExist(err) {
|
||||
t.Skipf("Model file not found in %s, skipping", modelDir)
|
||||
}
|
||||
return modelDir
|
||||
}
|
||||
|
||||
func startServer(t *testing.T) *exec.Cmd {
|
||||
t.Helper()
|
||||
binary := os.Getenv("VOXTRAL_BINARY")
|
||||
if binary == "" {
|
||||
binary = "./voxtral"
|
||||
}
|
||||
if _, err := os.Stat(binary); os.IsNotExist(err) {
|
||||
t.Skipf("Backend binary not found at %s, skipping", binary)
|
||||
}
|
||||
cmd := exec.Command(binary, "--addr", testAddr)
|
||||
cmd.Stdout = os.Stderr
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Start(); err != nil {
|
||||
t.Fatalf("Failed to start server: %v", err)
|
||||
}
|
||||
time.Sleep(startupWait)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func stopServer(cmd *exec.Cmd) {
|
||||
if cmd != nil && cmd.Process != nil {
|
||||
cmd.Process.Kill()
|
||||
cmd.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func dialGRPC(t *testing.T) *grpc.ClientConn {
|
||||
t.Helper()
|
||||
conn, err := grpc.Dial(testAddr,
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallRecvMsgSize(50*1024*1024),
|
||||
grpc.MaxCallSendMsgSize(50*1024*1024),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to dial gRPC: %v", err)
|
||||
}
|
||||
return conn
|
||||
}
|
||||
|
||||
func downloadFile(url, dest string) error {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return fmt.Errorf("HTTP GET failed: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("bad status: %s", resp.Status)
|
||||
}
|
||||
f, err := os.Create(dest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = io.Copy(f, resp.Body)
|
||||
return err
|
||||
}
|
||||
|
||||
func TestServerHealth(t *testing.T) {
|
||||
cmd := startServer(t)
|
||||
defer stopServer(cmd)
|
||||
|
||||
conn := dialGRPC(t)
|
||||
defer conn.Close()
|
||||
|
||||
client := pb.NewBackendClient(conn)
|
||||
resp, err := client.Health(context.Background(), &pb.HealthMessage{})
|
||||
if err != nil {
|
||||
t.Fatalf("Health check failed: %v", err)
|
||||
}
|
||||
if string(resp.Message) != "OK" {
|
||||
t.Fatalf("Expected OK, got %s", string(resp.Message))
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadModel(t *testing.T) {
|
||||
modelDir := skipIfNoModel(t)
|
||||
cmd := startServer(t)
|
||||
defer stopServer(cmd)
|
||||
|
||||
conn := dialGRPC(t)
|
||||
defer conn.Close()
|
||||
|
||||
client := pb.NewBackendClient(conn)
|
||||
resp, err := client.LoadModel(context.Background(), &pb.ModelOptions{
|
||||
ModelFile: modelDir,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("LoadModel failed: %v", err)
|
||||
}
|
||||
if !resp.Success {
|
||||
t.Fatalf("LoadModel returned failure: %s", resp.Message)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAudioTranscription(t *testing.T) {
|
||||
modelDir := skipIfNoModel(t)
|
||||
|
||||
tmpDir, err := os.MkdirTemp("", "voxtral-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// Download sample audio — JFK "ask not what your country can do for you" clip
|
||||
audioFile := filepath.Join(tmpDir, "sample.wav")
|
||||
t.Log("Downloading sample audio...")
|
||||
if err := downloadFile(sampleAudio, audioFile); err != nil {
|
||||
t.Fatalf("Failed to download sample audio: %v", err)
|
||||
}
|
||||
|
||||
cmd := startServer(t)
|
||||
defer stopServer(cmd)
|
||||
|
||||
conn := dialGRPC(t)
|
||||
defer conn.Close()
|
||||
|
||||
client := pb.NewBackendClient(conn)
|
||||
|
||||
// Load model
|
||||
loadResp, err := client.LoadModel(context.Background(), &pb.ModelOptions{
|
||||
ModelFile: modelDir,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("LoadModel failed: %v", err)
|
||||
}
|
||||
if !loadResp.Success {
|
||||
t.Fatalf("LoadModel returned failure: %s", loadResp.Message)
|
||||
}
|
||||
|
||||
// Transcribe
|
||||
transcriptResp, err := client.AudioTranscription(context.Background(), &pb.TranscriptRequest{
|
||||
Dst: audioFile,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("AudioTranscription failed: %v", err)
|
||||
}
|
||||
if transcriptResp == nil {
|
||||
t.Fatal("AudioTranscription returned nil")
|
||||
}
|
||||
|
||||
t.Logf("Transcribed text: %s", transcriptResp.Text)
|
||||
t.Logf("Number of segments: %d", len(transcriptResp.Segments))
|
||||
|
||||
if transcriptResp.Text == "" {
|
||||
t.Fatal("Transcription returned empty text")
|
||||
}
|
||||
|
||||
allText := strings.ToLower(transcriptResp.Text)
|
||||
for _, seg := range transcriptResp.Segments {
|
||||
allText += " " + strings.ToLower(seg.Text)
|
||||
}
|
||||
t.Logf("All text: %s", allText)
|
||||
|
||||
if !strings.Contains(allText, "big") {
|
||||
t.Errorf("Expected 'big' in transcription, got: %s", allText)
|
||||
}
|
||||
|
||||
// The sample audio should contain recognizable speech
|
||||
if len(allText) < 10 {
|
||||
t.Errorf("Transcription too short: %q", allText)
|
||||
}
|
||||
}
|
||||
@@ -8,7 +8,7 @@ JOBS?=$(shell nproc --ignore=1)
|
||||
|
||||
# whisper.cpp version
|
||||
WHISPER_REPO?=https://github.com/ggml-org/whisper.cpp
|
||||
WHISPER_CPP_VERSION?=21411d81ea736ed5d9cdea4df360d3c4b60a4adb
|
||||
WHISPER_CPP_VERSION?=aa1bc0d1a6dfd70dbb9f60c11df12441e03a9075
|
||||
SO_TARGET?=libgowhisper.so
|
||||
|
||||
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF
|
||||
@@ -78,7 +78,7 @@ package: whisper
|
||||
build: package
|
||||
|
||||
clean: purge
|
||||
rm -rf libgowhisper*.so package sources/whisper.cpp whisper
|
||||
rm -rf libgowhisper*.so sources/whisper.cpp whisper
|
||||
|
||||
purge:
|
||||
rm -rf build*
|
||||
@@ -88,19 +88,19 @@ ifeq ($(UNAME_S),Linux)
|
||||
libgowhisper-avx.so: sources/whisper.cpp
|
||||
$(MAKE) purge
|
||||
$(info ${GREEN}I whisper build info:avx${RESET})
|
||||
SO_TARGET=libgowhisper-avx.so CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_BMI2=off" $(MAKE) libgowhisper-custom
|
||||
SO_TARGET=libgowhisper-avx.so CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE) libgowhisper-custom
|
||||
rm -rfv build*
|
||||
|
||||
libgowhisper-avx2.so: sources/whisper.cpp
|
||||
$(MAKE) purge
|
||||
$(info ${GREEN}I whisper build info:avx2${RESET})
|
||||
SO_TARGET=libgowhisper-avx2.so CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on -DGGML_BMI2=on" $(MAKE) libgowhisper-custom
|
||||
SO_TARGET=libgowhisper-avx2.so CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on" $(MAKE) libgowhisper-custom
|
||||
rm -rfv build*
|
||||
|
||||
libgowhisper-avx512.so: sources/whisper.cpp
|
||||
$(MAKE) purge
|
||||
$(info ${GREEN}I whisper build info:avx512${RESET})
|
||||
SO_TARGET=libgowhisper-avx512.so CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=on -DGGML_FMA=on -DGGML_F16C=on -DGGML_BMI2=on" $(MAKE) libgowhisper-custom
|
||||
SO_TARGET=libgowhisper-avx512.so CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=on -DGGML_FMA=on -DGGML_F16C=on" $(MAKE) libgowhisper-custom
|
||||
rm -rfv build*
|
||||
endif
|
||||
|
||||
@@ -108,7 +108,7 @@ endif
|
||||
libgowhisper-fallback.so: sources/whisper.cpp
|
||||
$(MAKE) purge
|
||||
$(info ${GREEN}I whisper build info:fallback${RESET})
|
||||
SO_TARGET=libgowhisper-fallback.so CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_BMI2=off" $(MAKE) libgowhisper-custom
|
||||
SO_TARGET=libgowhisper-fallback.so CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE) libgowhisper-custom
|
||||
rm -rfv build*
|
||||
|
||||
libgowhisper-custom: CMakeLists.txt gowhisper.cpp gowhisper.h
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,16 +0,0 @@
|
||||
.DEFAULT_GOAL := install
|
||||
|
||||
.PHONY: install
|
||||
install:
|
||||
bash install.sh
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
rm -rf venv __pycache__
|
||||
|
||||
test: install
|
||||
bash test.sh
|
||||
@@ -1,472 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
LocalAI ACE-Step Backend
|
||||
|
||||
gRPC backend for ACE-Step 1.5 music generation. Aligns with upstream acestep API:
|
||||
- LoadModel: initializes AceStepHandler (DiT) and LLMHandler, parses Options.
|
||||
- SoundGeneration: uses create_sample (simple mode), format_sample (optional), then
|
||||
generate_music from acestep.inference. Writes first output to request.dst.
|
||||
- Fail hard: no fallback WAV on error; exceptions propagate to gRPC.
|
||||
"""
|
||||
from concurrent import futures
|
||||
import argparse
|
||||
import shutil
|
||||
import signal
|
||||
import sys
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
import grpc
|
||||
from acestep.inference import (
|
||||
GenerationParams,
|
||||
GenerationConfig,
|
||||
generate_music,
|
||||
create_sample,
|
||||
format_sample,
|
||||
)
|
||||
from acestep.handler import AceStepHandler
|
||||
from acestep.llm_inference import LLMHandler
|
||||
from acestep.model_downloader import ensure_lm_model
|
||||
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
MAX_WORKERS = int(os.environ.get("PYTHON_GRPC_MAX_WORKERS", "1"))
|
||||
|
||||
# Model name -> HuggingFace/ModelScope repo (from upstream api_server.py)
|
||||
MODEL_REPO_MAPPING = {
|
||||
"acestep-v15-turbo": "ACE-Step/Ace-Step1.5",
|
||||
"acestep-5Hz-lm-0.6B": "ACE-Step/Ace-Step1.5",
|
||||
"acestep-5Hz-lm-1.7B": "ACE-Step/Ace-Step1.5",
|
||||
"vae": "ACE-Step/Ace-Step1.5",
|
||||
"Qwen3-Embedding-0.6B": "ACE-Step/Ace-Step1.5",
|
||||
"acestep-v15-base": "ACE-Step/acestep-v15-base",
|
||||
"acestep-v15-sft": "ACE-Step/acestep-v15-sft",
|
||||
"acestep-v15-turbo-shift3": "ACE-Step/acestep-v15-turbo-shift3",
|
||||
"acestep-5Hz-lm-4B": "ACE-Step/acestep-5Hz-lm-4B",
|
||||
}
|
||||
DEFAULT_REPO_ID = "ACE-Step/Ace-Step1.5"
|
||||
|
||||
def _is_float(s):
|
||||
try:
|
||||
float(s)
|
||||
return True
|
||||
except (ValueError, TypeError):
|
||||
return False
|
||||
|
||||
|
||||
def _is_int(s):
|
||||
try:
|
||||
int(s)
|
||||
return True
|
||||
except (ValueError, TypeError):
|
||||
return False
|
||||
|
||||
|
||||
def _parse_timesteps(s):
|
||||
if s is None or (isinstance(s, str) and not s.strip()):
|
||||
return None
|
||||
if isinstance(s, (list, tuple)):
|
||||
return [float(x) for x in s]
|
||||
try:
|
||||
return [float(x.strip()) for x in str(s).split(",") if x.strip()]
|
||||
except (ValueError, TypeError):
|
||||
return None
|
||||
|
||||
|
||||
def _parse_options(opts_list):
|
||||
"""Parse repeated 'key:value' options into a dict. Coerce numeric and bool."""
|
||||
out = {}
|
||||
for opt in opts_list or []:
|
||||
if ":" not in opt:
|
||||
continue
|
||||
key, value = opt.split(":", 1)
|
||||
key = key.strip()
|
||||
value = value.strip()
|
||||
if _is_int(value):
|
||||
out[key] = int(value)
|
||||
elif _is_float(value):
|
||||
out[key] = float(value)
|
||||
elif value.lower() in ("true", "false"):
|
||||
out[key] = value.lower() == "true"
|
||||
else:
|
||||
out[key] = value
|
||||
return out
|
||||
|
||||
|
||||
def _generate_audio_sync(servicer, payload, dst_path):
|
||||
"""
|
||||
Run full ACE-Step pipeline using acestep.inference:
|
||||
- If sample_mode/sample_query: create_sample() for caption/lyrics/metadata.
|
||||
- If use_format and caption/lyrics: format_sample().
|
||||
- Build GenerationParams and GenerationConfig, then generate_music().
|
||||
Writes the first generated audio to dst_path. Raises on failure.
|
||||
"""
|
||||
|
||||
opts = servicer.options
|
||||
dit_handler = servicer.dit_handler
|
||||
llm_handler = servicer.llm_handler
|
||||
|
||||
for key, value in opts.items():
|
||||
if key not in payload:
|
||||
payload[key] = value
|
||||
|
||||
def _opt(name, default):
|
||||
return opts.get(name, default)
|
||||
|
||||
lm_temperature = _opt("temperature", 0.85)
|
||||
lm_cfg_scale = _opt("lm_cfg_scale", _opt("cfg_scale", 2.0))
|
||||
lm_top_k = opts.get("top_k")
|
||||
lm_top_p = _opt("top_p", 0.9)
|
||||
if lm_top_p is not None and lm_top_p >= 1.0:
|
||||
lm_top_p = None
|
||||
inference_steps = _opt("inference_steps", 8)
|
||||
guidance_scale = _opt("guidance_scale", 7.0)
|
||||
batch_size = max(1, int(_opt("batch_size", 1)))
|
||||
|
||||
use_simple = bool(payload.get("sample_query") or payload.get("text"))
|
||||
sample_mode = use_simple and (payload.get("thinking") or payload.get("sample_mode"))
|
||||
sample_query = (payload.get("sample_query") or payload.get("text") or "").strip()
|
||||
use_format = bool(payload.get("use_format"))
|
||||
caption = (payload.get("prompt") or payload.get("caption") or "").strip()
|
||||
lyrics = (payload.get("lyrics") or "").strip()
|
||||
vocal_language = (payload.get("vocal_language") or "en").strip()
|
||||
instrumental = bool(payload.get("instrumental"))
|
||||
bpm = payload.get("bpm")
|
||||
key_scale = (payload.get("key_scale") or "").strip()
|
||||
time_signature = (payload.get("time_signature") or "").strip()
|
||||
audio_duration = payload.get("audio_duration")
|
||||
if audio_duration is not None:
|
||||
try:
|
||||
audio_duration = float(audio_duration)
|
||||
except (TypeError, ValueError):
|
||||
audio_duration = None
|
||||
|
||||
if sample_mode and llm_handler and getattr(llm_handler, "llm_initialized", False):
|
||||
parsed_language = None
|
||||
if sample_query:
|
||||
for hint in ("english", "en", "chinese", "zh", "japanese", "ja"):
|
||||
if hint in sample_query.lower():
|
||||
parsed_language = "en" if hint == "english" or hint == "en" else hint
|
||||
break
|
||||
vocal_lang = vocal_language if vocal_language and vocal_language != "unknown" else parsed_language
|
||||
sample_result = create_sample(
|
||||
llm_handler=llm_handler,
|
||||
query=sample_query or "NO USER INPUT",
|
||||
instrumental=instrumental,
|
||||
vocal_language=vocal_lang,
|
||||
temperature=lm_temperature,
|
||||
top_k=lm_top_k,
|
||||
top_p=lm_top_p,
|
||||
use_constrained_decoding=True,
|
||||
)
|
||||
if not sample_result.success:
|
||||
raise RuntimeError(f"create_sample failed: {sample_result.error or sample_result.status_message}")
|
||||
caption = sample_result.caption or caption
|
||||
lyrics = sample_result.lyrics or lyrics
|
||||
bpm = sample_result.bpm
|
||||
key_scale = sample_result.keyscale or key_scale
|
||||
time_signature = sample_result.timesignature or time_signature
|
||||
if sample_result.duration is not None:
|
||||
audio_duration = sample_result.duration
|
||||
if getattr(sample_result, "language", None):
|
||||
vocal_language = sample_result.language
|
||||
|
||||
if use_format and (caption or lyrics) and llm_handler and getattr(llm_handler, "llm_initialized", False):
|
||||
user_metadata = {}
|
||||
if bpm is not None:
|
||||
user_metadata["bpm"] = bpm
|
||||
if audio_duration is not None and float(audio_duration) > 0:
|
||||
user_metadata["duration"] = int(audio_duration)
|
||||
if key_scale:
|
||||
user_metadata["keyscale"] = key_scale
|
||||
if time_signature:
|
||||
user_metadata["timesignature"] = time_signature
|
||||
if vocal_language and vocal_language != "unknown":
|
||||
user_metadata["language"] = vocal_language
|
||||
format_result = format_sample(
|
||||
llm_handler=llm_handler,
|
||||
caption=caption,
|
||||
lyrics=lyrics,
|
||||
user_metadata=user_metadata if user_metadata else None,
|
||||
temperature=lm_temperature,
|
||||
top_k=lm_top_k,
|
||||
top_p=lm_top_p,
|
||||
use_constrained_decoding=True,
|
||||
)
|
||||
if format_result.success:
|
||||
caption = format_result.caption or caption
|
||||
lyrics = format_result.lyrics or lyrics
|
||||
if format_result.duration is not None:
|
||||
audio_duration = format_result.duration
|
||||
if format_result.bpm is not None:
|
||||
bpm = format_result.bpm
|
||||
if format_result.keyscale:
|
||||
key_scale = format_result.keyscale
|
||||
if format_result.timesignature:
|
||||
time_signature = format_result.timesignature
|
||||
if getattr(format_result, "language", None):
|
||||
vocal_language = format_result.language
|
||||
|
||||
thinking = bool(payload.get("thinking"))
|
||||
use_cot_metas = not sample_mode
|
||||
params = GenerationParams(
|
||||
task_type=payload.get("task_type", "text2music"),
|
||||
instruction=payload.get("instruction", "Fill the audio semantic mask based on the given conditions:"),
|
||||
reference_audio=payload.get("reference_audio_path"),
|
||||
src_audio=payload.get("src_audio_path"),
|
||||
audio_codes=payload.get("audio_code_string", ""),
|
||||
caption=caption,
|
||||
lyrics=lyrics,
|
||||
instrumental=instrumental or (not lyrics or str(lyrics).strip().lower() in ("[inst]", "[instrumental]")),
|
||||
vocal_language=vocal_language or "unknown",
|
||||
bpm=bpm,
|
||||
keyscale=key_scale,
|
||||
timesignature=time_signature,
|
||||
duration=float(audio_duration) if audio_duration and float(audio_duration) > 0 else -1.0,
|
||||
inference_steps=inference_steps,
|
||||
seed=int(payload.get("seed", -1)),
|
||||
guidance_scale=guidance_scale,
|
||||
use_adg=bool(payload.get("use_adg")),
|
||||
cfg_interval_start=float(payload.get("cfg_interval_start", 0.0)),
|
||||
cfg_interval_end=float(payload.get("cfg_interval_end", 1.0)),
|
||||
shift=float(payload.get("shift", 1.0)),
|
||||
infer_method=(payload.get("infer_method") or "ode").strip(),
|
||||
timesteps=_parse_timesteps(payload.get("timesteps")),
|
||||
repainting_start=float(payload.get("repainting_start", 0.0)),
|
||||
repainting_end=float(payload.get("repainting_end", -1)) if payload.get("repainting_end") is not None else -1,
|
||||
audio_cover_strength=float(payload.get("audio_cover_strength", 1.0)),
|
||||
thinking=thinking,
|
||||
lm_temperature=lm_temperature,
|
||||
lm_cfg_scale=lm_cfg_scale,
|
||||
lm_top_k=lm_top_k or 0,
|
||||
lm_top_p=lm_top_p if lm_top_p is not None and lm_top_p < 1.0 else 0.9,
|
||||
lm_negative_prompt=payload.get("lm_negative_prompt", "NO USER INPUT"),
|
||||
use_cot_metas=use_cot_metas,
|
||||
use_cot_caption=bool(payload.get("use_cot_caption", True)),
|
||||
use_cot_language=bool(payload.get("use_cot_language", True)),
|
||||
use_constrained_decoding=True,
|
||||
)
|
||||
|
||||
config = GenerationConfig(
|
||||
batch_size=batch_size,
|
||||
allow_lm_batch=bool(payload.get("allow_lm_batch", False)),
|
||||
use_random_seed=bool(payload.get("use_random_seed", True)),
|
||||
seeds=payload.get("seeds"),
|
||||
lm_batch_chunk_size=max(1, int(payload.get("lm_batch_chunk_size", 8))),
|
||||
constrained_decoding_debug=bool(payload.get("constrained_decoding_debug")),
|
||||
audio_format=(payload.get("audio_format") or "flac").strip() or "flac",
|
||||
)
|
||||
|
||||
save_dir = tempfile.mkdtemp(prefix="ace_step_")
|
||||
try:
|
||||
result = generate_music(
|
||||
dit_handler=dit_handler,
|
||||
llm_handler=llm_handler if (llm_handler and getattr(llm_handler, "llm_initialized", False)) else None,
|
||||
params=params,
|
||||
config=config,
|
||||
save_dir=save_dir,
|
||||
progress=None,
|
||||
)
|
||||
if not result.success:
|
||||
raise RuntimeError(result.error or result.status_message or "generate_music failed")
|
||||
|
||||
audios = result.audios or []
|
||||
if not audios:
|
||||
raise RuntimeError("generate_music returned no audio")
|
||||
|
||||
first_path = audios[0].get("path") or ""
|
||||
if not first_path or not os.path.isfile(first_path):
|
||||
raise RuntimeError("first generated audio path missing or not a file")
|
||||
|
||||
shutil.copy2(first_path, dst_path)
|
||||
finally:
|
||||
try:
|
||||
shutil.rmtree(save_dir, ignore_errors=True)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
def __init__(self):
|
||||
self.model_path = None
|
||||
self.model_dir = None
|
||||
self.checkpoint_dir = None
|
||||
self.project_root = None
|
||||
self.options = {}
|
||||
self.dit_handler = None
|
||||
self.llm_handler = None
|
||||
|
||||
def Health(self, request, context):
|
||||
return backend_pb2.Reply(message=b"OK")
|
||||
|
||||
def LoadModel(self, request, context):
|
||||
try:
|
||||
self.options = _parse_options(list(getattr(request, "Options", []) or []))
|
||||
model_path = getattr(request, "ModelPath", None) or ""
|
||||
model_name = (request.Model or "").strip()
|
||||
model_file = (getattr(request, "ModelFile", None) or "").strip()
|
||||
|
||||
# Model dir: where we store checkpoints (always under LocalAI models path, never backend dir)
|
||||
if model_path and model_name:
|
||||
model_dir = os.path.join(model_path, model_name)
|
||||
elif model_file:
|
||||
model_dir = model_file
|
||||
else:
|
||||
model_dir = os.path.abspath(model_name or ".")
|
||||
self.model_dir = model_dir
|
||||
self.checkpoint_dir = os.path.join(model_dir, "checkpoints")
|
||||
self.project_root = model_dir
|
||||
self.model_path = os.path.join(self.checkpoint_dir, model_name or os.path.basename(model_dir.rstrip("/\\")))
|
||||
|
||||
config_path = model_name or os.path.basename(model_dir.rstrip("/\\"))
|
||||
os.makedirs(self.checkpoint_dir, exist_ok=True)
|
||||
|
||||
self.dit_handler = AceStepHandler()
|
||||
# Patch handler so it uses our model dir instead of site-packages/checkpoints
|
||||
self.dit_handler._get_project_root = lambda: self.project_root
|
||||
device = self.options.get("device", "auto")
|
||||
use_flash = self.options.get("use_flash_attention", True)
|
||||
if isinstance(use_flash, str):
|
||||
use_flash = str(use_flash).lower() in ("1", "true", "yes")
|
||||
offload = self.options.get("offload_to_cpu", False)
|
||||
if isinstance(offload, str):
|
||||
offload = str(offload).lower() in ("1", "true", "yes")
|
||||
status_msg, ok = self.dit_handler.initialize_service(
|
||||
project_root=self.project_root,
|
||||
config_path=config_path,
|
||||
device=device,
|
||||
use_flash_attention=use_flash,
|
||||
compile_model=False,
|
||||
offload_to_cpu=offload,
|
||||
offload_dit_to_cpu=bool(self.options.get("offload_dit_to_cpu", False)),
|
||||
)
|
||||
if not ok:
|
||||
return backend_pb2.Result(success=False, message=f"DiT init failed: {status_msg}")
|
||||
|
||||
self.llm_handler = None
|
||||
if self.options.get("init_lm", True):
|
||||
lm_model = self.options.get("lm_model_path", "acestep-5Hz-lm-0.6B")
|
||||
|
||||
# Ensure LM model is downloaded before initializing
|
||||
try:
|
||||
from pathlib import Path
|
||||
lm_success, lm_msg = ensure_lm_model(
|
||||
model_name=lm_model,
|
||||
checkpoints_dir=Path(self.checkpoint_dir),
|
||||
prefer_source=None, # Auto-detect HuggingFace vs ModelScope
|
||||
)
|
||||
if not lm_success:
|
||||
print(f"[ace-step] Warning: LM model download failed: {lm_msg}", file=sys.stderr)
|
||||
# Continue anyway - LLM initialization will fail gracefully
|
||||
else:
|
||||
print(f"[ace-step] LM model ready: {lm_msg}", file=sys.stderr)
|
||||
except Exception as e:
|
||||
print(f"[ace-step] Warning: LM model download check failed: {e}", file=sys.stderr)
|
||||
# Continue anyway - LLM initialization will fail gracefully
|
||||
|
||||
self.llm_handler = LLMHandler()
|
||||
lm_backend = (self.options.get("lm_backend") or "vllm").strip().lower()
|
||||
if lm_backend not in ("vllm", "pt"):
|
||||
lm_backend = "vllm"
|
||||
lm_status, lm_ok = self.llm_handler.initialize(
|
||||
checkpoint_dir=self.checkpoint_dir,
|
||||
lm_model_path=lm_model,
|
||||
backend=lm_backend,
|
||||
device=device,
|
||||
offload_to_cpu=offload,
|
||||
dtype=getattr(self.dit_handler, "dtype", None),
|
||||
)
|
||||
if not lm_ok:
|
||||
self.llm_handler = None
|
||||
print(f"[ace-step] LM init failed (optional): {lm_status}", file=sys.stderr)
|
||||
|
||||
print(f"[ace-step] LoadModel: model={self.model_path}, options={list(self.options.keys())}", file=sys.stderr)
|
||||
return backend_pb2.Result(success=True, message="Model loaded successfully")
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"LoadModel error: {err}")
|
||||
|
||||
def SoundGeneration(self, request, context):
|
||||
if not request.dst:
|
||||
return backend_pb2.Result(success=False, message="request.dst is required")
|
||||
|
||||
use_simple = bool(request.text)
|
||||
if use_simple:
|
||||
payload = {
|
||||
"sample_query": request.text or "",
|
||||
"sample_mode": True,
|
||||
"thinking": True,
|
||||
"vocal_language": request.language or request.GetLanguage() or "en",
|
||||
"instrumental": request.instrumental if request.HasField("instrumental") else False,
|
||||
}
|
||||
else:
|
||||
caption = request.caption or request.GetCaption() or request.text
|
||||
payload = {
|
||||
"prompt": caption,
|
||||
"lyrics": request.lyrics or request.lyrics or "",
|
||||
"thinking": request.think if request.HasField("think") else False,
|
||||
"vocal_language": request.language or request.GetLanguage() or "en",
|
||||
}
|
||||
if request.HasField("bpm"):
|
||||
payload["bpm"] = request.bpm
|
||||
if request.HasField("keyscale") and request.keyscale:
|
||||
payload["key_scale"] = request.keyscale
|
||||
if request.HasField("timesignature") and request.timesignature:
|
||||
payload["time_signature"] = request.timesignature
|
||||
if request.HasField("duration") and request.duration:
|
||||
payload["audio_duration"] = int(request.duration) if request.duration else None
|
||||
if request.src:
|
||||
payload["src_audio_path"] = request.src
|
||||
|
||||
_generate_audio_sync(self, payload, request.dst)
|
||||
return backend_pb2.Result(success=True, message="Sound generated successfully")
|
||||
|
||||
def TTS(self, request, context):
|
||||
if not request.dst:
|
||||
return backend_pb2.Result(success=False, message="request.dst is required")
|
||||
payload = {
|
||||
"sample_query": request.text,
|
||||
"sample_mode": True,
|
||||
"thinking": False,
|
||||
"vocal_language": (request.language if request.language else "") or "en",
|
||||
"instrumental": False,
|
||||
}
|
||||
_generate_audio_sync(self, payload, request.dst)
|
||||
return backend_pb2.Result(success=True, message="TTS (music fallback) generated successfully")
|
||||
|
||||
|
||||
def serve(address):
|
||||
server = grpc.server(
|
||||
futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
||||
options=[
|
||||
("grpc.max_message_length", 50 * 1024 * 1024),
|
||||
("grpc.max_send_message_length", 50 * 1024 * 1024),
|
||||
("grpc.max_receive_message_length", 50 * 1024 * 1024),
|
||||
],
|
||||
)
|
||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||
server.add_insecure_port(address)
|
||||
server.start()
|
||||
print(f"[ace-step] Server listening on {address}", file=sys.stderr)
|
||||
|
||||
def shutdown(sig, frame):
|
||||
server.stop(0)
|
||||
sys.exit(0)
|
||||
|
||||
signal.signal(signal.SIGINT, shutdown)
|
||||
signal.signal(signal.SIGTERM, shutdown)
|
||||
|
||||
try:
|
||||
while True:
|
||||
import time
|
||||
time.sleep(_ONE_DAY_IN_SECONDS)
|
||||
except KeyboardInterrupt:
|
||||
server.stop(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--addr", default="localhost:50051", help="Listen address")
|
||||
args = parser.parse_args()
|
||||
serve(args.addr)
|
||||
@@ -1,26 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
PYTHON_VERSION="3.11"
|
||||
PYTHON_PATCH="14"
|
||||
PY_STANDALONE_TAG="20260203"
|
||||
|
||||
installRequirements
|
||||
|
||||
if [ ! -d ACE-Step-1.5 ]; then
|
||||
git clone https://github.com/ace-step/ACE-Step-1.5
|
||||
cd ACE-Step-1.5/
|
||||
if [ "x${USE_PIP}" == "xtrue" ]; then
|
||||
pip install ${EXTRA_PIP_INSTALL_FLAGS:-} --no-deps .
|
||||
else
|
||||
uv pip install ${EXTRA_PIP_INSTALL_FLAGS:-} --no-deps .
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
torch
|
||||
torchaudio
|
||||
torchvision
|
||||
|
||||
# Core dependencies
|
||||
transformers>=4.51.0,<4.58.0
|
||||
diffusers
|
||||
gradio
|
||||
matplotlib>=3.7.5
|
||||
scipy>=1.10.1
|
||||
soundfile>=0.13.1
|
||||
loguru>=0.7.3
|
||||
einops>=0.8.1
|
||||
accelerate>=1.12.0
|
||||
fastapi>=0.110.0
|
||||
uvicorn[standard]>=0.27.0
|
||||
numba>=0.63.1
|
||||
vector-quantize-pytorch>=1.27.15
|
||||
torchcodec>=0.9.1
|
||||
torchao
|
||||
modelscope
|
||||
@@ -1,22 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu128
|
||||
torch
|
||||
torchaudio
|
||||
torchvision
|
||||
|
||||
# Core dependencies
|
||||
transformers>=4.51.0,<4.58.0
|
||||
diffusers
|
||||
gradio>=6.5.1
|
||||
matplotlib>=3.7.5
|
||||
scipy>=1.10.1
|
||||
soundfile>=0.13.1
|
||||
loguru>=0.7.3
|
||||
einops>=0.8.1
|
||||
accelerate>=1.12.0
|
||||
fastapi>=0.110.0
|
||||
uvicorn[standard]>=0.27.0
|
||||
numba>=0.63.1
|
||||
vector-quantize-pytorch>=1.27.15
|
||||
torchcodec>=0.9.1
|
||||
torchao
|
||||
modelscope
|
||||
@@ -1,22 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu130
|
||||
torch
|
||||
torchaudio
|
||||
torchvision
|
||||
|
||||
# Core dependencies
|
||||
transformers>=4.51.0,<4.58.0
|
||||
diffusers
|
||||
gradio>=6.5.1
|
||||
matplotlib>=3.7.5
|
||||
scipy>=1.10.1
|
||||
soundfile>=0.13.1
|
||||
loguru>=0.7.3
|
||||
einops>=0.8.1
|
||||
accelerate>=1.12.0
|
||||
fastapi>=0.110.0
|
||||
uvicorn[standard]>=0.27.0
|
||||
numba>=0.63.1
|
||||
vector-quantize-pytorch>=1.27.15
|
||||
torchcodec>=0.9.1
|
||||
torchao
|
||||
modelscope
|
||||
@@ -1,22 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
||||
torch==2.8.0+rocm6.4
|
||||
torchaudio
|
||||
torchvision
|
||||
|
||||
# Core dependencies
|
||||
transformers>=4.51.0,<4.58.0
|
||||
diffusers
|
||||
gradio>=6.5.1
|
||||
matplotlib>=3.7.5
|
||||
scipy>=1.10.1
|
||||
soundfile>=0.13.1
|
||||
loguru>=0.7.3
|
||||
einops>=0.8.1
|
||||
accelerate>=1.12.0
|
||||
fastapi>=0.110.0
|
||||
uvicorn[standard]>=0.27.0
|
||||
numba>=0.63.1
|
||||
vector-quantize-pytorch>=1.27.15
|
||||
torchcodec>=0.9.1
|
||||
torchao
|
||||
modelscope
|
||||
@@ -1,26 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
torch
|
||||
torchaudio
|
||||
torchvision
|
||||
|
||||
# Core dependencies
|
||||
transformers>=4.51.0,<4.58.0
|
||||
diffusers
|
||||
gradio
|
||||
matplotlib>=3.7.5
|
||||
scipy>=1.10.1
|
||||
soundfile>=0.13.1
|
||||
loguru>=0.7.3
|
||||
einops>=0.8.1
|
||||
accelerate>=1.12.0
|
||||
fastapi>=0.110.0
|
||||
uvicorn[standard]>=0.27.0
|
||||
numba>=0.63.1
|
||||
vector-quantize-pytorch>=1.27.15
|
||||
torchcodec>=0.9.1
|
||||
torchao
|
||||
modelscope
|
||||
|
||||
# LoRA Training dependencies (optional)
|
||||
peft>=0.7.0
|
||||
lightning>=2.0.0
|
||||
@@ -1,21 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu130
|
||||
torch
|
||||
torchaudio
|
||||
torchvision
|
||||
# Core dependencies
|
||||
transformers>=4.51.0,<4.58.0
|
||||
diffusers
|
||||
gradio>=6.5.1
|
||||
matplotlib>=3.7.5
|
||||
scipy>=1.10.1
|
||||
soundfile>=0.13.1
|
||||
loguru>=0.7.3
|
||||
einops>=0.8.1
|
||||
accelerate>=1.12.0
|
||||
fastapi>=0.110.0
|
||||
uvicorn[standard]>=0.27.0
|
||||
numba>=0.63.1
|
||||
vector-quantize-pytorch>=1.27.15
|
||||
torchcodec>=0.9.1
|
||||
torchao
|
||||
modelscope
|
||||
@@ -1,25 +0,0 @@
|
||||
torch
|
||||
torchaudio
|
||||
torchvision
|
||||
|
||||
# Core dependencies
|
||||
transformers>=4.51.0,<4.58.0
|
||||
diffusers
|
||||
gradio
|
||||
matplotlib>=3.7.5
|
||||
scipy>=1.10.1
|
||||
soundfile>=0.13.1
|
||||
loguru>=0.7.3
|
||||
einops>=0.8.1
|
||||
accelerate>=1.12.0
|
||||
fastapi>=0.110.0
|
||||
uvicorn[standard]>=0.27.0
|
||||
numba>=0.63.1
|
||||
vector-quantize-pytorch>=1.27.15
|
||||
torchcodec>=0.9.1
|
||||
torchao
|
||||
modelscope
|
||||
|
||||
# LoRA Training dependencies (optional)
|
||||
peft>=0.7.0
|
||||
lightning>=2.0.0
|
||||
@@ -1,4 +0,0 @@
|
||||
setuptools
|
||||
grpcio==1.76.0
|
||||
protobuf
|
||||
certifi
|
||||
@@ -1,9 +0,0 @@
|
||||
#!/bin/bash
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
startBackend $@
|
||||
@@ -1,53 +0,0 @@
|
||||
"""
|
||||
Tests for the ACE-Step gRPC backend.
|
||||
"""
|
||||
import os
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
import grpc
|
||||
|
||||
|
||||
class TestACEStepBackend(unittest.TestCase):
|
||||
"""Test Health, LoadModel, and SoundGeneration (minimal; no real model required)."""
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
port = os.environ.get("BACKEND_PORT", "50051")
|
||||
cls.channel = grpc.insecure_channel(f"localhost:{port}")
|
||||
cls.stub = backend_pb2_grpc.BackendStub(cls.channel)
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
cls.channel.close()
|
||||
|
||||
def test_health(self):
|
||||
response = self.stub.Health(backend_pb2.HealthMessage())
|
||||
self.assertEqual(response.message, b"OK")
|
||||
|
||||
def test_load_model(self):
|
||||
response = self.stub.LoadModel(backend_pb2.ModelOptions(Model="ace-step-test"))
|
||||
self.assertTrue(response.success, response.message)
|
||||
|
||||
def test_sound_generation_minimal(self):
|
||||
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
|
||||
dst = f.name
|
||||
try:
|
||||
req = backend_pb2.SoundGenerationRequest(
|
||||
text="upbeat pop song",
|
||||
model="ace-step-test",
|
||||
dst=dst,
|
||||
)
|
||||
response = self.stub.SoundGeneration(req)
|
||||
self.assertTrue(response.success, response.message)
|
||||
self.assertTrue(os.path.exists(dst), f"Output file not created: {dst}")
|
||||
self.assertGreater(os.path.getsize(dst), 0)
|
||||
finally:
|
||||
if os.path.exists(dst):
|
||||
os.unlink(dst)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -1,19 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
# Start backend in background (use env to avoid port conflict in parallel tests)
|
||||
export PYTHONUNBUFFERED=1
|
||||
BACKEND_PORT=${BACKEND_PORT:-50051}
|
||||
python backend.py --addr "localhost:${BACKEND_PORT}" &
|
||||
BACKEND_PID=$!
|
||||
trap "kill $BACKEND_PID 2>/dev/null || true" EXIT
|
||||
sleep 3
|
||||
export BACKEND_PORT
|
||||
runUnittests
|
||||
@@ -1,7 +0,0 @@
|
||||
torch
|
||||
torchaudio
|
||||
accelerate
|
||||
numpy>=1.24.0,<1.26.0
|
||||
transformers
|
||||
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289
|
||||
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
|
||||
@@ -1,3 +1,3 @@
|
||||
grpcio==1.78.1
|
||||
grpcio==1.76.0
|
||||
protobuf
|
||||
grpcio-tools
|
||||
@@ -1,4 +0,0 @@
|
||||
torch==2.7.1
|
||||
transformers==4.48.3
|
||||
accelerate
|
||||
coqui-tts
|
||||
@@ -1,4 +1,4 @@
|
||||
grpcio==1.78.1
|
||||
grpcio==1.76.0
|
||||
protobuf
|
||||
certifi
|
||||
packaging==24.1
|
||||
@@ -115,7 +115,6 @@ Available pipelines: AnimateDiffPipeline, AnimateDiffVideoToVideoPipeline, ...
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `COMPEL` | `0` | Enable Compel for prompt weighting |
|
||||
| `SD_EMBED` | `0` | Enable sd_embed for prompt weighting |
|
||||
| `XPU` | `0` | Enable Intel XPU support |
|
||||
| `CLIPSKIP` | `1` | Enable CLIP skip support |
|
||||
| `SAFETENSORS` | `1` | Use safetensors format |
|
||||
|
||||
@@ -40,21 +40,6 @@ from compel import Compel, ReturnedEmbeddingsType
|
||||
from optimum.quanto import freeze, qfloat8, quantize
|
||||
from transformers import T5EncoderModel
|
||||
from safetensors.torch import load_file
|
||||
# Try to import sd_embed - it might not always be available
|
||||
try:
|
||||
from sd_embed.embedding_funcs import (
|
||||
get_weighted_text_embeddings_sd15,
|
||||
get_weighted_text_embeddings_sdxl,
|
||||
get_weighted_text_embeddings_sd3,
|
||||
get_weighted_text_embeddings_flux1,
|
||||
)
|
||||
SD_EMBED_AVAILABLE = True
|
||||
except ImportError:
|
||||
get_weighted_text_embeddings_sd15 = None
|
||||
get_weighted_text_embeddings_sdxl = None
|
||||
get_weighted_text_embeddings_sd3 = None
|
||||
get_weighted_text_embeddings_flux1 = None
|
||||
SD_EMBED_AVAILABLE = False
|
||||
|
||||
# Import LTX-2 specific utilities
|
||||
from diffusers.pipelines.ltx2.export_utils import encode_video as ltx2_encode_video
|
||||
@@ -62,10 +47,6 @@ from diffusers import LTX2VideoTransformer3DModel, GGUFQuantizationConfig
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
COMPEL = os.environ.get("COMPEL", "0") == "1"
|
||||
SD_EMBED = os.environ.get("SD_EMBED", "0") == "1"
|
||||
# Warn if SD_EMBED is enabled but the module is not available
|
||||
if SD_EMBED and not SD_EMBED_AVAILABLE:
|
||||
print("WARNING: SD_EMBED is enabled but sd_embed module is not available. Falling back to standard prompt processing.", file=sys.stderr)
|
||||
XPU = os.environ.get("XPU", "0") == "1"
|
||||
CLIPSKIP = os.environ.get("CLIPSKIP", "1") == "1"
|
||||
SAFETENSORS = os.environ.get("SAFETENSORS", "1") == "1"
|
||||
@@ -196,7 +177,7 @@ def get_scheduler(name: str, config: dict = {}):
|
||||
# Implement the BackendServicer class with the service methods
|
||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
|
||||
def _load_pipeline(self, request, modelFile, fromSingleFile, torchType, variant, device_map=None):
|
||||
def _load_pipeline(self, request, modelFile, fromSingleFile, torchType, variant):
|
||||
"""
|
||||
Load a diffusers pipeline dynamically using the dynamic loader.
|
||||
|
||||
@@ -210,7 +191,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
fromSingleFile: Whether to use from_single_file() vs from_pretrained()
|
||||
torchType: The torch dtype to use
|
||||
variant: Model variant (e.g., "fp16")
|
||||
device_map: Device mapping strategy (e.g., "auto" for multi-GPU)
|
||||
|
||||
Returns:
|
||||
The loaded pipeline instance
|
||||
@@ -232,14 +212,14 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
dtype = torch.bfloat16
|
||||
bfl_repo = os.environ.get("BFL_REPO", "ChuckMcSneed/FLUX.1-dev")
|
||||
|
||||
transformer = FluxTransformer2DModel.from_single_file(modelFile, torch_dtype=dtype, device_map=device_map)
|
||||
transformer = FluxTransformer2DModel.from_single_file(modelFile, torch_dtype=dtype)
|
||||
quantize(transformer, weights=qfloat8)
|
||||
freeze(transformer)
|
||||
text_encoder_2 = T5EncoderModel.from_pretrained(bfl_repo, subfolder="text_encoder_2", torch_dtype=dtype, device_map=device_map)
|
||||
text_encoder_2 = T5EncoderModel.from_pretrained(bfl_repo, subfolder="text_encoder_2", torch_dtype=dtype)
|
||||
quantize(text_encoder_2, weights=qfloat8)
|
||||
freeze(text_encoder_2)
|
||||
|
||||
pipe = FluxPipeline.from_pretrained(bfl_repo, transformer=None, text_encoder_2=None, torch_dtype=dtype, device_map=device_map)
|
||||
pipe = FluxPipeline.from_pretrained(bfl_repo, transformer=None, text_encoder_2=None, torch_dtype=dtype)
|
||||
pipe.transformer = transformer
|
||||
pipe.text_encoder_2 = text_encoder_2
|
||||
|
||||
@@ -252,15 +232,13 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
vae = AutoencoderKLWan.from_pretrained(
|
||||
request.Model,
|
||||
subfolder="vae",
|
||||
torch_dtype=torch.float32,
|
||||
device_map=device_map
|
||||
torch_dtype=torch.float32
|
||||
)
|
||||
pipe = load_diffusers_pipeline(
|
||||
class_name="WanPipeline",
|
||||
model_id=request.Model,
|
||||
vae=vae,
|
||||
torch_dtype=torchType,
|
||||
device_map=device_map
|
||||
torch_dtype=torchType
|
||||
)
|
||||
self.txt2vid = True
|
||||
return pipe
|
||||
@@ -270,15 +248,13 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
vae = AutoencoderKLWan.from_pretrained(
|
||||
request.Model,
|
||||
subfolder="vae",
|
||||
torch_dtype=torch.float32,
|
||||
device_map=device_map
|
||||
torch_dtype=torch.float32
|
||||
)
|
||||
pipe = load_diffusers_pipeline(
|
||||
class_name="WanImageToVideoPipeline",
|
||||
model_id=request.Model,
|
||||
vae=vae,
|
||||
torch_dtype=torchType,
|
||||
device_map=device_map
|
||||
torch_dtype=torchType
|
||||
)
|
||||
self.img2vid = True
|
||||
return pipe
|
||||
@@ -289,8 +265,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
class_name="SanaPipeline",
|
||||
model_id=request.Model,
|
||||
variant="bf16",
|
||||
torch_dtype=torch.bfloat16,
|
||||
device_map=device_map
|
||||
torch_dtype=torch.bfloat16
|
||||
)
|
||||
pipe.vae.to(torch.bfloat16)
|
||||
pipe.text_encoder.to(torch.bfloat16)
|
||||
@@ -302,8 +277,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
pipe = load_diffusers_pipeline(
|
||||
class_name="DiffusionPipeline",
|
||||
model_id=request.Model,
|
||||
torch_dtype=torchType,
|
||||
device_map=device_map
|
||||
torch_dtype=torchType
|
||||
)
|
||||
return pipe
|
||||
|
||||
@@ -314,8 +288,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
class_name="StableVideoDiffusionPipeline",
|
||||
model_id=request.Model,
|
||||
torch_dtype=torchType,
|
||||
variant=variant,
|
||||
device_map=device_map
|
||||
variant=variant
|
||||
)
|
||||
if not DISABLE_CPU_OFFLOAD:
|
||||
pipe.enable_model_cpu_offload()
|
||||
@@ -339,7 +312,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
modelFile,
|
||||
config=request.Model, # Use request.Model as the config/model_id
|
||||
subfolder="transformer",
|
||||
device_map=device_map,
|
||||
**transformer_kwargs,
|
||||
)
|
||||
|
||||
@@ -349,7 +321,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
model_id=request.Model,
|
||||
transformer=transformer,
|
||||
torch_dtype=torchType,
|
||||
device_map=device_map,
|
||||
)
|
||||
else:
|
||||
# Single file but not GGUF - use standard single file loading
|
||||
@@ -358,7 +329,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
model_id=modelFile,
|
||||
from_single_file=True,
|
||||
torch_dtype=torchType,
|
||||
device_map=device_map,
|
||||
)
|
||||
else:
|
||||
# Standard loading from pretrained
|
||||
@@ -366,8 +336,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
class_name="LTX2ImageToVideoPipeline",
|
||||
model_id=request.Model,
|
||||
torch_dtype=torchType,
|
||||
variant=variant,
|
||||
device_map=device_map
|
||||
variant=variant
|
||||
)
|
||||
|
||||
if not DISABLE_CPU_OFFLOAD:
|
||||
@@ -392,7 +361,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
modelFile,
|
||||
config=request.Model, # Use request.Model as the config/model_id
|
||||
subfolder="transformer",
|
||||
device_map=device_map,
|
||||
**transformer_kwargs,
|
||||
)
|
||||
|
||||
@@ -402,7 +370,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
model_id=request.Model,
|
||||
transformer=transformer,
|
||||
torch_dtype=torchType,
|
||||
device_map=device_map,
|
||||
)
|
||||
else:
|
||||
# Single file but not GGUF - use standard single file loading
|
||||
@@ -411,7 +378,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
model_id=modelFile,
|
||||
from_single_file=True,
|
||||
torch_dtype=torchType,
|
||||
device_map=device_map,
|
||||
)
|
||||
else:
|
||||
# Standard loading from pretrained
|
||||
@@ -419,8 +385,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
class_name="LTX2Pipeline",
|
||||
model_id=request.Model,
|
||||
torch_dtype=torchType,
|
||||
variant=variant,
|
||||
device_map=device_map
|
||||
variant=variant
|
||||
)
|
||||
|
||||
if not DISABLE_CPU_OFFLOAD:
|
||||
@@ -443,10 +408,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
if not fromSingleFile:
|
||||
load_kwargs["use_safetensors"] = SAFETENSORS
|
||||
|
||||
# Add device_map for multi-GPU support (when TensorParallelSize > 1)
|
||||
if device_map:
|
||||
load_kwargs["device_map"] = device_map
|
||||
|
||||
# Determine pipeline class name - default to AutoPipelineForText2Image
|
||||
effective_pipeline_type = pipeline_type if pipeline_type else "AutoPipelineForText2Image"
|
||||
|
||||
@@ -549,13 +510,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
|
||||
print(f"LoadModel: PipelineType from request: {request.PipelineType}", file=sys.stderr)
|
||||
|
||||
# Determine device_map for multi-GPU support based on TensorParallelSize
|
||||
# When TensorParallelSize > 1, use device_map='auto' to distribute model across GPUs
|
||||
device_map = None
|
||||
if hasattr(request, 'TensorParallelSize') and request.TensorParallelSize > 1:
|
||||
device_map = "auto"
|
||||
print(f"LoadModel: Multi-GPU mode enabled with TensorParallelSize={request.TensorParallelSize}, using device_map='auto'", file=sys.stderr)
|
||||
|
||||
# Load pipeline using dynamic loader
|
||||
# Special cases that require custom initialization are handled first
|
||||
self.pipe = self._load_pipeline(
|
||||
@@ -563,8 +517,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
modelFile=modelFile,
|
||||
fromSingleFile=fromSingleFile,
|
||||
torchType=torchType,
|
||||
variant=variant,
|
||||
device_map=device_map
|
||||
variant=variant
|
||||
)
|
||||
|
||||
print(f"LoadModel: After loading - ltx2_pipeline: {self.ltx2_pipeline}, img2vid: {self.img2vid}, txt2vid: {self.txt2vid}, PipelineType: {self.PipelineType}", file=sys.stderr)
|
||||
@@ -589,7 +542,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
|
||||
if request.ControlNet:
|
||||
self.controlnet = ControlNetModel.from_pretrained(
|
||||
request.ControlNet, torch_dtype=torchType, variant=variant, device_map=device_map
|
||||
request.ControlNet, torch_dtype=torchType, variant=variant
|
||||
)
|
||||
self.pipe.controlnet = self.controlnet
|
||||
else:
|
||||
@@ -628,9 +581,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
|
||||
self.pipe.set_adapters(adapters_name, adapter_weights=adapters_weights)
|
||||
|
||||
# Only move pipeline to device if NOT using device_map
|
||||
# device_map handles device placement automatically
|
||||
if device_map is None and device != "cpu":
|
||||
if device != "cpu":
|
||||
self.pipe.to(device)
|
||||
if self.controlnet:
|
||||
self.controlnet.to(device)
|
||||
@@ -786,51 +737,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
kwargs["prompt_embeds"] = conditioning
|
||||
kwargs["pooled_prompt_embeds"] = pooled
|
||||
# pass the kwargs dictionary to the self.pipe method
|
||||
image = self.pipe(
|
||||
guidance_scale=self.cfg_scale,
|
||||
**kwargs
|
||||
).images[0]
|
||||
elif SD_EMBED and SD_EMBED_AVAILABLE:
|
||||
if self.PipelineType == "StableDiffusionPipeline":
|
||||
(
|
||||
kwargs["prompt_embeds"],
|
||||
kwargs["negative_prompt_embeds"],
|
||||
) = get_weighted_text_embeddings_sd15(
|
||||
pipe = self.pipe,
|
||||
prompt = prompt,
|
||||
neg_prompt = request.negative_prompt if hasattr(request, 'negative_prompt') else None,
|
||||
)
|
||||
if self.PipelineType == "StableDiffusionXLPipeline":
|
||||
(
|
||||
kwargs["prompt_embeds"],
|
||||
kwargs["negative_prompt_embeds"],
|
||||
kwargs["pooled_prompt_embeds"],
|
||||
kwargs["negative_pooled_prompt_embeds"],
|
||||
) = get_weighted_text_embeddings_sdxl(
|
||||
pipe = self.pipe,
|
||||
prompt = prompt,
|
||||
neg_prompt = request.negative_prompt if hasattr(request, 'negative_prompt') else None
|
||||
)
|
||||
if self.PipelineType == "StableDiffusion3Pipeline":
|
||||
(
|
||||
kwargs["prompt_embeds"],
|
||||
kwargs["negative_prompt_embeds"],
|
||||
kwargs["pooled_prompt_embeds"],
|
||||
kwargs["negative_pooled_prompt_embeds"],
|
||||
) = get_weighted_text_embeddings_sd3(
|
||||
pipe = self.pipe,
|
||||
prompt = prompt,
|
||||
neg_prompt = request.negative_prompt if hasattr(request, 'negative_prompt') else None
|
||||
)
|
||||
if self.PipelineType == "FluxTransformer2DModel":
|
||||
(
|
||||
kwargs["prompt_embeds"],
|
||||
kwargs["pooled_prompt_embeds"],
|
||||
) = get_weighted_text_embeddings_flux1(
|
||||
pipe = self.pipe,
|
||||
prompt = prompt,
|
||||
)
|
||||
|
||||
image = self.pipe(
|
||||
guidance_scale=self.cfg_scale,
|
||||
**kwargs
|
||||
|
||||
@@ -5,7 +5,6 @@ transformers
|
||||
torchvision==0.22.1
|
||||
accelerate
|
||||
compel
|
||||
git+https://github.com/xhinker/sd_embed
|
||||
peft
|
||||
sentencepiece
|
||||
torch==2.7.1
|
||||
|
||||
@@ -5,7 +5,6 @@ transformers
|
||||
torchvision
|
||||
accelerate
|
||||
compel
|
||||
git+https://github.com/xhinker/sd_embed
|
||||
peft
|
||||
sentencepiece
|
||||
torch
|
||||
|
||||
@@ -5,7 +5,6 @@ transformers
|
||||
torchvision
|
||||
accelerate
|
||||
compel
|
||||
git+https://github.com/xhinker/sd_embed
|
||||
peft
|
||||
sentencepiece
|
||||
torch
|
||||
|
||||
@@ -8,7 +8,6 @@ opencv-python
|
||||
transformers
|
||||
accelerate
|
||||
compel
|
||||
git+https://github.com/xhinker/sd_embed
|
||||
peft
|
||||
sentencepiece
|
||||
optimum-quanto
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
.PHONY: faster-qwen3-tts
|
||||
faster-qwen3-tts:
|
||||
bash install.sh
|
||||
|
||||
.PHONY: run
|
||||
run: faster-qwen3-tts
|
||||
@echo "Running faster-qwen3-tts..."
|
||||
bash run.sh
|
||||
@echo "faster-qwen3-tts run."
|
||||
|
||||
.PHONY: test
|
||||
test: faster-qwen3-tts
|
||||
@echo "Testing faster-qwen3-tts..."
|
||||
bash test.sh
|
||||
@echo "faster-qwen3-tts tested."
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
rm -rf venv __pycache__
|
||||
@@ -1,193 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
gRPC server of LocalAI for Faster Qwen3-TTS (CUDA graph capture, voice clone only).
|
||||
"""
|
||||
from concurrent import futures
|
||||
import time
|
||||
import argparse
|
||||
import signal
|
||||
import sys
|
||||
import os
|
||||
import traceback
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
import torch
|
||||
import soundfile as sf
|
||||
|
||||
import grpc
|
||||
|
||||
|
||||
def is_float(s):
|
||||
try:
|
||||
float(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def is_int(s):
|
||||
try:
|
||||
int(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
||||
|
||||
|
||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
def Health(self, request, context):
|
||||
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
||||
|
||||
def LoadModel(self, request, context):
|
||||
if not torch.cuda.is_available():
|
||||
return backend_pb2.Result(
|
||||
success=False,
|
||||
message="faster-qwen3-tts requires NVIDIA GPU with CUDA"
|
||||
)
|
||||
|
||||
self.options = {}
|
||||
for opt in request.Options:
|
||||
if ":" not in opt:
|
||||
continue
|
||||
key, value = opt.split(":", 1)
|
||||
if is_float(value):
|
||||
value = float(value)
|
||||
elif is_int(value):
|
||||
value = int(value)
|
||||
elif value.lower() in ["true", "false"]:
|
||||
value = value.lower() == "true"
|
||||
self.options[key] = value
|
||||
|
||||
model_path = request.Model or "Qwen/Qwen3-TTS-12Hz-0.6B-Base"
|
||||
self.audio_path = request.AudioPath if hasattr(request, 'AudioPath') and request.AudioPath else None
|
||||
self.model_file = request.ModelFile if hasattr(request, 'ModelFile') and request.ModelFile else None
|
||||
self.model_path = request.ModelPath if hasattr(request, 'ModelPath') and request.ModelPath else None
|
||||
|
||||
from faster_qwen3_tts import FasterQwen3TTS
|
||||
print(f"Loading model from: {model_path}", file=sys.stderr)
|
||||
try:
|
||||
self.model = FasterQwen3TTS.from_pretrained(model_path)
|
||||
except Exception as e:
|
||||
print(f"[ERROR] Loading model: {type(e).__name__}: {e}", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
return backend_pb2.Result(success=False, message=str(e))
|
||||
|
||||
print(f"Model loaded successfully: {model_path}", file=sys.stderr)
|
||||
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
||||
|
||||
def _get_ref_audio_path(self, request):
|
||||
if not self.audio_path:
|
||||
return None
|
||||
if os.path.isabs(self.audio_path):
|
||||
return self.audio_path
|
||||
if self.model_file:
|
||||
model_file_base = os.path.dirname(self.model_file)
|
||||
ref_path = os.path.join(model_file_base, self.audio_path)
|
||||
if os.path.exists(ref_path):
|
||||
return ref_path
|
||||
if self.model_path:
|
||||
ref_path = os.path.join(self.model_path, self.audio_path)
|
||||
if os.path.exists(ref_path):
|
||||
return ref_path
|
||||
return self.audio_path
|
||||
|
||||
def TTS(self, request, context):
|
||||
try:
|
||||
if not request.dst:
|
||||
return backend_pb2.Result(
|
||||
success=False,
|
||||
message="dst (output path) is required"
|
||||
)
|
||||
text = request.text.strip()
|
||||
if not text:
|
||||
return backend_pb2.Result(
|
||||
success=False,
|
||||
message="Text is empty"
|
||||
)
|
||||
|
||||
language = request.language if hasattr(request, 'language') and request.language else None
|
||||
if not language or language == "":
|
||||
language = "English"
|
||||
|
||||
ref_audio = self._get_ref_audio_path(request)
|
||||
if not ref_audio:
|
||||
return backend_pb2.Result(
|
||||
success=False,
|
||||
message="AudioPath is required for voice clone (set in LoadModel)"
|
||||
)
|
||||
ref_text = self.options.get("ref_text")
|
||||
if not ref_text and hasattr(request, 'ref_text') and request.ref_text:
|
||||
ref_text = request.ref_text
|
||||
if not ref_text:
|
||||
return backend_pb2.Result(
|
||||
success=False,
|
||||
message="ref_text is required for voice clone (set via LoadModel Options, e.g. ref_text:Your reference transcript)"
|
||||
)
|
||||
|
||||
chunk_size = self.options.get("chunk_size")
|
||||
generation_kwargs = {}
|
||||
if chunk_size is not None:
|
||||
generation_kwargs["chunk_size"] = int(chunk_size)
|
||||
|
||||
audio_list, sr = self.model.generate_voice_clone(
|
||||
text=text,
|
||||
language=language,
|
||||
ref_audio=ref_audio,
|
||||
ref_text=ref_text,
|
||||
**generation_kwargs
|
||||
)
|
||||
|
||||
if audio_list is None or (isinstance(audio_list, list) and len(audio_list) == 0):
|
||||
return backend_pb2.Result(
|
||||
success=False,
|
||||
message="No audio output generated"
|
||||
)
|
||||
audio_data = audio_list[0] if isinstance(audio_list, list) else audio_list
|
||||
sf.write(request.dst, audio_data, sr)
|
||||
print(f"Saved output to {request.dst}", file=sys.stderr)
|
||||
|
||||
except Exception as err:
|
||||
print(f"Error in TTS: {err}", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
|
||||
return backend_pb2.Result(success=True)
|
||||
|
||||
|
||||
def serve(address):
|
||||
server = grpc.server(
|
||||
futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
||||
options=[
|
||||
('grpc.max_message_length', 50 * 1024 * 1024),
|
||||
('grpc.max_send_message_length', 50 * 1024 * 1024),
|
||||
('grpc.max_receive_message_length', 50 * 1024 * 1024),
|
||||
]
|
||||
)
|
||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||
server.add_insecure_port(address)
|
||||
server.start()
|
||||
print("Server started. Listening on: " + address, file=sys.stderr)
|
||||
|
||||
def signal_handler(sig, frame):
|
||||
print("Received termination signal. Shutting down...")
|
||||
server.stop(0)
|
||||
sys.exit(0)
|
||||
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
try:
|
||||
while True:
|
||||
time.sleep(_ONE_DAY_IN_SECONDS)
|
||||
except KeyboardInterrupt:
|
||||
server.stop(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Run the gRPC server.")
|
||||
parser.add_argument("--addr", default="localhost:50051", help="The address to bind the server to.")
|
||||
args = parser.parse_args()
|
||||
serve(args.addr)
|
||||
@@ -1,13 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
EXTRA_PIP_INSTALL_FLAGS="--no-build-isolation"
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
installRequirements
|
||||
@@ -1,4 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu121
|
||||
torch
|
||||
torchaudio
|
||||
faster-qwen3-tts
|
||||
@@ -1,4 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu130
|
||||
torch
|
||||
torchaudio
|
||||
faster-qwen3-tts
|
||||
@@ -1,4 +0,0 @@
|
||||
--extra-index-url https://pypi.jetson-ai-lab.io/jp6/cu129/
|
||||
torch
|
||||
torchaudio
|
||||
faster-qwen3-tts
|
||||
@@ -1,4 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu130
|
||||
torch
|
||||
torchaudio
|
||||
faster-qwen3-tts
|
||||
@@ -1,8 +0,0 @@
|
||||
grpcio==1.71.0
|
||||
protobuf
|
||||
certifi
|
||||
packaging==24.1
|
||||
soundfile
|
||||
setuptools
|
||||
six
|
||||
sox
|
||||
@@ -1,9 +0,0 @@
|
||||
#!/bin/bash
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
startBackend $@
|
||||
@@ -1,104 +0,0 @@
|
||||
"""
|
||||
Tests for the faster-qwen3-tts gRPC backend.
|
||||
"""
|
||||
import unittest
|
||||
import subprocess
|
||||
import time
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
import grpc
|
||||
|
||||
|
||||
class TestBackendServicer(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.service = subprocess.Popen(
|
||||
["python3", "backend.py", "--addr", "localhost:50052"],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
cwd=os.path.dirname(os.path.abspath(__file__)),
|
||||
)
|
||||
time.sleep(15)
|
||||
|
||||
def tearDown(self):
|
||||
self.service.terminate()
|
||||
try:
|
||||
self.service.communicate(timeout=5)
|
||||
except subprocess.TimeoutExpired:
|
||||
self.service.kill()
|
||||
self.service.communicate()
|
||||
|
||||
def test_health(self):
|
||||
with grpc.insecure_channel("localhost:50052") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
reply = stub.Health(backend_pb2.HealthMessage(), timeout=5.0)
|
||||
self.assertEqual(reply.message, b"OK")
|
||||
|
||||
def test_load_model_requires_cuda(self):
|
||||
with grpc.insecure_channel("localhost:50052") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.LoadModel(
|
||||
backend_pb2.ModelOptions(
|
||||
Model="Qwen/Qwen3-TTS-12Hz-0.6B-Base",
|
||||
CUDA=True,
|
||||
),
|
||||
timeout=10.0,
|
||||
)
|
||||
self.assertFalse(response.success)
|
||||
|
||||
@unittest.skipUnless(
|
||||
__import__("torch").cuda.is_available(),
|
||||
"faster-qwen3-tts TTS requires CUDA",
|
||||
)
|
||||
def test_tts(self):
|
||||
import soundfile as sf
|
||||
try:
|
||||
with grpc.insecure_channel("localhost:50052") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
ref_audio = tempfile.NamedTemporaryFile(suffix='.wav', delete=False)
|
||||
ref_audio.close()
|
||||
try:
|
||||
sr = 22050
|
||||
duration = 1.0
|
||||
samples = int(sr * duration)
|
||||
sf.write(ref_audio.name, [0.0] * samples, sr)
|
||||
|
||||
response = stub.LoadModel(
|
||||
backend_pb2.ModelOptions(
|
||||
Model="Qwen/Qwen3-TTS-12Hz-0.6B-Base",
|
||||
AudioPath=ref_audio.name,
|
||||
Options=["ref_text:Hello world"],
|
||||
),
|
||||
timeout=600.0,
|
||||
)
|
||||
self.assertTrue(response.success, response.message)
|
||||
|
||||
with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as out:
|
||||
output_path = out.name
|
||||
try:
|
||||
tts_response = stub.TTS(
|
||||
backend_pb2.TTSRequest(
|
||||
text="Test output.",
|
||||
dst=output_path,
|
||||
language="English",
|
||||
),
|
||||
timeout=120.0,
|
||||
)
|
||||
self.assertTrue(tts_response.success, tts_response.message)
|
||||
self.assertTrue(os.path.exists(output_path))
|
||||
self.assertGreater(os.path.getsize(output_path), 0)
|
||||
finally:
|
||||
if os.path.exists(output_path):
|
||||
os.unlink(output_path)
|
||||
finally:
|
||||
if os.path.exists(ref_audio.name):
|
||||
os.unlink(ref_audio.name)
|
||||
except Exception as err:
|
||||
self.fail(f"TTS test failed: {err}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -1,11 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
runUnittests
|
||||
@@ -1,8 +0,0 @@
|
||||
torch==2.7.1
|
||||
faster-whisper
|
||||
opencv-python
|
||||
accelerate
|
||||
compel
|
||||
peft
|
||||
sentencepiece
|
||||
optimum-quanto
|
||||
@@ -1,5 +0,0 @@
|
||||
grpcio==1.71.0
|
||||
protobuf
|
||||
certifi
|
||||
packaging==24.1
|
||||
https://github.com/KittenML/KittenTTS/releases/download/0.1/kittentts-0.1.0-py3-none-any.whl
|
||||
@@ -1,5 +0,0 @@
|
||||
torch==2.7.1
|
||||
transformers
|
||||
accelerate
|
||||
kokoro
|
||||
soundfile
|
||||
@@ -1,2 +0,0 @@
|
||||
git+https://github.com/Blaizzy/mlx-audio
|
||||
mlx[cpu]
|
||||
@@ -1,2 +0,0 @@
|
||||
git+https://github.com/Blaizzy/mlx-audio
|
||||
mlx[cuda12]
|
||||
@@ -1,2 +0,0 @@
|
||||
git+https://github.com/Blaizzy/mlx-audio
|
||||
mlx[cuda13]
|
||||
@@ -1,2 +0,0 @@
|
||||
git+https://github.com/Blaizzy/mlx-audio
|
||||
mlx[cuda12]
|
||||
@@ -1,2 +0,0 @@
|
||||
git+https://github.com/Blaizzy/mlx-audio
|
||||
mlx[cuda13]
|
||||
@@ -1,2 +0,0 @@
|
||||
git+https://github.com/Blaizzy/mlx-vlm
|
||||
mlx[cpu]
|
||||
@@ -1,2 +0,0 @@
|
||||
git+https://github.com/Blaizzy/mlx-vlm
|
||||
mlx[cuda12]
|
||||
@@ -1,2 +0,0 @@
|
||||
git+https://github.com/Blaizzy/mlx-vlm
|
||||
mlx[cuda13]
|
||||
@@ -1,2 +0,0 @@
|
||||
git+https://github.com/Blaizzy/mlx-vlm
|
||||
mlx[cuda12]
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user