Compare commits

..

1 Commits

Author SHA1 Message Date
Ettore Di Giacinto
856d90400a chore(uv): add --index-strategy=unsafe-first-match to l4t
This is because the main index might not contain all the dependencies
for torch

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2026-01-08 21:47:18 +00:00
288 changed files with 4420 additions and 27405 deletions

View File

@@ -91,23 +91,10 @@ jobs:
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: ''
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-cpu-whisperx'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
skip-drivers: 'true'
backend: "whisperx"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
# CUDA 12 builds
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "9"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-vibevoice'
@@ -118,58 +105,6 @@ jobs:
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-qwen-asr'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
backend: "qwen-asr"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-qwen-tts'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
backend: "qwen-tts"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-voxcpm'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
backend: "voxcpm"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-pocket-tts'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
backend: "pocket-tts"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "0"
@@ -185,11 +120,11 @@ jobs:
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "9"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-llama-cpp'
runs-on: 'bigger-runner'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
backend: "llama-cpp"
@@ -198,7 +133,7 @@ jobs:
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "9"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-vllm'
@@ -211,20 +146,7 @@ jobs:
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-vllm-omni'
runs-on: 'arc-runner-set'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
backend: "vllm-omni"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "9"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-transformers'
@@ -237,7 +159,7 @@ jobs:
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "9"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-diffusers'
@@ -250,7 +172,7 @@ jobs:
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "9"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-kokoro'
@@ -263,7 +185,7 @@ jobs:
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "9"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-faster-whisper'
@@ -274,19 +196,6 @@ jobs:
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-whisperx'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
backend: "whisperx"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "9"
@@ -302,7 +211,20 @@ jobs:
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "9"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-bark'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
backend: "bark"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "9"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-chatterbox'
@@ -315,7 +237,7 @@ jobs:
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "9"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-moonshine'
@@ -328,7 +250,7 @@ jobs:
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "9"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-stablediffusion-ggml'
@@ -341,7 +263,7 @@ jobs:
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "9"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-whisper'
@@ -354,7 +276,7 @@ jobs:
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "9"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-rfdetr'
@@ -367,7 +289,20 @@ jobs:
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "9"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-exllama2'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
backend: "exllama2"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "9"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-neutts'
@@ -405,58 +340,6 @@ jobs:
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "13"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-13-qwen-asr'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
backend: "qwen-asr"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "13"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-13-qwen-tts'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
backend: "qwen-tts"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "13"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-13-voxcpm'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
backend: "voxcpm"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "13"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-13-pocket-tts'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
backend: "pocket-tts"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "13"
cuda-minor-version: "0"
@@ -522,45 +405,6 @@ jobs:
backend: "vibevoice"
dockerfile: "./backend/Dockerfile.python"
context: "./"
- build-type: 'l4t'
cuda-major-version: "13"
cuda-minor-version: "0"
platforms: 'linux/arm64'
tag-latest: 'auto'
tag-suffix: '-nvidia-l4t-cuda-13-arm64-qwen-asr'
runs-on: 'ubuntu-24.04-arm'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
ubuntu-version: '2404'
backend: "qwen-asr"
dockerfile: "./backend/Dockerfile.python"
context: "./"
- build-type: 'l4t'
cuda-major-version: "13"
cuda-minor-version: "0"
platforms: 'linux/arm64'
tag-latest: 'auto'
tag-suffix: '-nvidia-l4t-cuda-13-arm64-qwen-tts'
runs-on: 'ubuntu-24.04-arm'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
ubuntu-version: '2404'
backend: "qwen-tts"
dockerfile: "./backend/Dockerfile.python"
context: "./"
- build-type: 'l4t'
cuda-major-version: "13"
cuda-minor-version: "0"
platforms: 'linux/arm64'
tag-latest: 'auto'
tag-suffix: '-nvidia-l4t-cuda-13-arm64-pocket-tts'
runs-on: 'ubuntu-24.04-arm'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
ubuntu-version: '2404'
backend: "pocket-tts"
dockerfile: "./backend/Dockerfile.python"
context: "./"
- build-type: 'l4t'
cuda-major-version: "13"
cuda-minor-version: "0"
@@ -605,11 +449,11 @@ jobs:
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-13-whisperx'
tag-suffix: '-gpu-nvidia-cuda-13-bark'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
backend: "whisperx"
backend: "bark"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
@@ -744,19 +588,6 @@ jobs:
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'hipblas'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-vllm-omni'
runs-on: 'arc-runner-set'
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
skip-drivers: 'false'
backend: "vllm-omni"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'hipblas'
cuda-major-version: ""
cuda-minor-version: ""
@@ -810,65 +641,13 @@ jobs:
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'hipblas'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-qwen-asr'
runs-on: 'arc-runner-set'
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
skip-drivers: 'false'
backend: "qwen-asr"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'hipblas'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-qwen-tts'
runs-on: 'arc-runner-set'
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
skip-drivers: 'false'
backend: "qwen-tts"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'hipblas'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-voxcpm'
runs-on: 'arc-runner-set'
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
skip-drivers: 'false'
backend: "voxcpm"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'hipblas'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-pocket-tts'
runs-on: 'arc-runner-set'
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
skip-drivers: 'false'
backend: "pocket-tts"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'hipblas'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-faster-whisper'
runs-on: 'bigger-runner'
runs-on: 'ubuntu-latest'
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
skip-drivers: 'false'
backend: "faster-whisper"
@@ -880,11 +659,11 @@ jobs:
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-whisperx'
runs-on: 'bigger-runner'
tag-suffix: '-gpu-rocm-hipblas-coqui'
runs-on: 'ubuntu-latest'
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
skip-drivers: 'false'
backend: "whisperx"
backend: "coqui"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
@@ -893,11 +672,11 @@ jobs:
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-coqui'
runs-on: 'bigger-runner'
tag-suffix: '-gpu-rocm-hipblas-bark'
runs-on: 'arc-runner-set'
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
skip-drivers: 'false'
backend: "coqui"
backend: "bark"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
@@ -993,45 +772,6 @@ jobs:
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2204'
- build-type: 'l4t'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/arm64'
tag-latest: 'auto'
tag-suffix: '-nvidia-l4t-qwen-asr'
runs-on: 'ubuntu-24.04-arm'
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
skip-drivers: 'true'
backend: "qwen-asr"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2204'
- build-type: 'l4t'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/arm64'
tag-latest: 'auto'
tag-suffix: '-nvidia-l4t-qwen-tts'
runs-on: 'ubuntu-24.04-arm'
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
skip-drivers: 'true'
backend: "qwen-tts"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2204'
- build-type: 'l4t'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/arm64'
tag-latest: 'auto'
tag-suffix: '-nvidia-l4t-pocket-tts'
runs-on: 'ubuntu-24.04-arm'
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
skip-drivers: 'true'
backend: "pocket-tts"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2204'
- build-type: 'l4t'
cuda-major-version: "12"
cuda-minor-version: "0"
@@ -1085,58 +825,6 @@ jobs:
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'intel'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-intel-qwen-asr'
runs-on: 'arc-runner-set'
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
skip-drivers: 'false'
backend: "qwen-asr"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'intel'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-intel-qwen-tts'
runs-on: 'arc-runner-set'
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
skip-drivers: 'false'
backend: "qwen-tts"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'intel'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-intel-voxcpm'
runs-on: 'arc-runner-set'
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
skip-drivers: 'false'
backend: "voxcpm"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'intel'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-intel-pocket-tts'
runs-on: 'arc-runner-set'
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
skip-drivers: 'false'
backend: "pocket-tts"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'intel'
cuda-major-version: ""
cuda-minor-version: ""
@@ -1150,6 +838,19 @@ jobs:
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'intel'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-intel-bark'
runs-on: 'ubuntu-latest'
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
skip-drivers: 'false'
backend: "bark"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
# piper
- build-type: ''
cuda-major-version: ""
@@ -1164,13 +865,27 @@ jobs:
dockerfile: "./backend/Dockerfile.golang"
context: "./"
ubuntu-version: '2404'
# bark-cpp
- build-type: ''
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-bark-cpp'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
backend: "bark-cpp"
dockerfile: "./backend/Dockerfile.golang"
context: "./"
ubuntu-version: '2404'
- build-type: ''
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64,linux/arm64'
tag-latest: 'auto'
tag-suffix: '-cpu-llama-cpp'
runs-on: 'bigger-runner'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
backend: "llama-cpp"
@@ -1196,7 +911,7 @@ jobs:
platforms: 'linux/amd64,linux/arm64'
tag-latest: 'auto'
tag-suffix: '-gpu-vulkan-llama-cpp'
runs-on: 'bigger-runner'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
backend: "llama-cpp"
@@ -1430,6 +1145,46 @@ jobs:
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2204'
# exllama2
- build-type: ''
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-cpu-exllama2'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
backend: "exllama2"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'intel'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-intel-exllama2'
runs-on: 'ubuntu-latest'
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
skip-drivers: 'false'
backend: "exllama2"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'hipblas'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
skip-drivers: 'true'
tag-latest: 'auto'
tag-suffix: '-gpu-hipblas-exllama2'
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
runs-on: 'ubuntu-latest'
backend: "exllama2"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'l4t'
cuda-major-version: "12"
cuda-minor-version: "0"
@@ -1497,6 +1252,19 @@ jobs:
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: 'l4t'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/arm64'
skip-drivers: 'true'
tag-latest: 'auto'
tag-suffix: '-nvidia-l4t-arm64-neutts'
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
runs-on: 'ubuntu-24.04-arm'
backend: "neutts"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2204'
- build-type: ''
cuda-major-version: ""
cuda-minor-version: ""
@@ -1510,58 +1278,6 @@ jobs:
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: ''
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64,linux/arm64'
tag-latest: 'auto'
tag-suffix: '-cpu-qwen-asr'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
backend: "qwen-asr"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: ''
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64,linux/arm64'
tag-latest: 'auto'
tag-suffix: '-cpu-qwen-tts'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
backend: "qwen-tts"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: ''
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-cpu-voxcpm'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
backend: "voxcpm"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
- build-type: ''
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64,linux/arm64'
tag-latest: 'auto'
tag-suffix: '-cpu-pocket-tts'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
backend: "pocket-tts"
dockerfile: "./backend/Dockerfile.python"
context: "./"
ubuntu-version: '2404'
backend-jobs-darwin:
uses: ./.github/workflows/backend_build_darwin.yml
strategy:

View File

@@ -37,7 +37,7 @@
include:
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "9"
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-gpu-nvidia-cuda-12'

View File

@@ -88,7 +88,7 @@
ubuntu-codename: 'noble'
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "9"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12'

View File

@@ -238,7 +238,7 @@ jobs:
- name: Dependencies
run: |
sudo apt-get update
sudo apt-get install -y build-essential ffmpeg
sudo apt-get install build-essential ffmpeg
sudo apt-get install -y ca-certificates cmake curl patch espeak espeak-ng python3-pip
# Install UV
curl -LsSf https://astral.sh/uv/install.sh | sh
@@ -257,7 +257,7 @@ jobs:
- name: Dependencies
run: |
sudo apt-get update
sudo apt-get install -y build-essential ffmpeg
sudo apt-get install build-essential ffmpeg
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
# Install UV
curl -LsSf https://astral.sh/uv/install.sh | sh
@@ -265,80 +265,4 @@ jobs:
- name: Test moonshine
run: |
make --jobs=5 --output-sync=target -C backend/python/moonshine
make --jobs=5 --output-sync=target -C backend/python/moonshine test
tests-pocket-tts:
runs-on: ubuntu-latest
steps:
- name: Clone
uses: actions/checkout@v6
with:
submodules: true
- name: Dependencies
run: |
sudo apt-get update
sudo apt-get install -y build-essential ffmpeg
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
# Install UV
curl -LsSf https://astral.sh/uv/install.sh | sh
pip install --user --no-cache-dir grpcio-tools==1.64.1
- name: Test pocket-tts
run: |
make --jobs=5 --output-sync=target -C backend/python/pocket-tts
make --jobs=5 --output-sync=target -C backend/python/pocket-tts test
tests-qwen-tts:
runs-on: ubuntu-latest
steps:
- name: Clone
uses: actions/checkout@v6
with:
submodules: true
- name: Dependencies
run: |
sudo apt-get update
sudo apt-get install -y build-essential ffmpeg
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
# Install UV
curl -LsSf https://astral.sh/uv/install.sh | sh
pip install --user --no-cache-dir grpcio-tools==1.64.1
- name: Test qwen-tts
run: |
make --jobs=5 --output-sync=target -C backend/python/qwen-tts
make --jobs=5 --output-sync=target -C backend/python/qwen-tts test
tests-qwen-asr:
runs-on: ubuntu-latest
steps:
- name: Clone
uses: actions/checkout@v6
with:
submodules: true
- name: Dependencies
run: |
sudo apt-get update
sudo apt-get install -y build-essential ffmpeg sox
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
# Install UV
curl -LsSf https://astral.sh/uv/install.sh | sh
pip install --user --no-cache-dir grpcio-tools==1.64.1
- name: Test qwen-asr
run: |
make --jobs=5 --output-sync=target -C backend/python/qwen-asr
make --jobs=5 --output-sync=target -C backend/python/qwen-asr test
tests-voxcpm:
runs-on: ubuntu-latest
steps:
- name: Clone
uses: actions/checkout@v6
with:
submodules: true
- name: Dependencies
run: |
sudo apt-get update
sudo apt-get install build-essential ffmpeg
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
# Install UV
curl -LsSf https://astral.sh/uv/install.sh | sh
pip install --user --no-cache-dir grpcio-tools==1.64.1
- name: Test voxcpm
run: |
make --jobs=5 --output-sync=target -C backend/python/voxcpm
make --jobs=5 --output-sync=target -C backend/python/voxcpm test
make --jobs=5 --output-sync=target -C backend/python/moonshine test

View File

@@ -1,56 +0,0 @@
---
name: 'E2E Backend Tests'
on:
pull_request:
push:
branches:
- master
tags:
- '*'
concurrency:
group: ci-tests-e2e-backend-${{ github.head_ref || github.ref }}-${{ github.repository }}
cancel-in-progress: true
jobs:
tests-e2e-backend:
runs-on: ubuntu-latest
strategy:
matrix:
go-version: ['1.25.x']
steps:
- name: Clone
uses: actions/checkout@v6
with:
submodules: true
- name: Setup Go ${{ matrix.go-version }}
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
cache: false
- name: Display Go version
run: go version
- name: Proto Dependencies
run: |
# Install protoc
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v26.1/protoc-26.1-linux-x86_64.zip -o protoc.zip && \
unzip -j -d /usr/local/bin protoc.zip bin/protoc && \
rm protoc.zip
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
PATH="$PATH:$HOME/go/bin" make protogen-go
- name: Dependencies
run: |
sudo apt-get update
sudo apt-get install -y build-essential
- name: Test Backend E2E
run: |
PATH="$PATH:$HOME/go/bin" make build-mock-backend test-e2e
- name: Setup tmate session if tests fail
if: ${{ failure() }}
uses: mxschmitt/action-tmate@v3.23
with:
detached: true
connect-timeout-seconds: 180
limit-access-to-actor: true

2
.gitignore vendored
View File

@@ -36,8 +36,6 @@ LocalAI
models/*
test-models/
test-dir/
tests/e2e-aio/backends
tests/e2e-aio/models
release/

View File

@@ -4,13 +4,13 @@ Building and testing the project depends on the components involved and the plat
## Building a specified backend
Let's say the user wants to build a particular backend for a given platform. For example let's say they want to build coqui for ROCM/hipblas
Let's say the user wants to build a particular backend for a given platform. For example let's say they want to build bark for ROCM/hipblas
- The Makefile has targets like `docker-build-coqui` created with `generate-docker-build-target` at the time of writing. Recently added backends may require a new target.
- The Makefile has targets like `docker-build-bark` created with `generate-docker-build-target` at the time of writing. Recently added backends may require a new target.
- At a minimum we need to set the BUILD_TYPE, BASE_IMAGE build-args
- Use .github/workflows/backend.yml as a reference it lists the needed args in the `include` job strategy matrix
- l4t and cublas also requires the CUDA major and minor version
- You can pretty print a command like `DOCKER_MAKEFLAGS=-j$(nproc --ignore=1) BUILD_TYPE=hipblas BASE_IMAGE=rocm/dev-ubuntu-24.04:6.4.4 make docker-build-coqui`
- You can pretty print a command like `DOCKER_MAKEFLAGS=-j$(nproc --ignore=1) BUILD_TYPE=hipblas BASE_IMAGE=rocm/dev-ubuntu-24.04:6.4.4 make docker-build-bark`
- Unless the user specifies that they want you to run the command, then just print it because not all agent frontends handle long running jobs well and the output may overflow your context
- The user may say they want to build AMD or ROCM instead of hipblas, or Intel instead of SYCL or NVIDIA insted of l4t or cublas. Ask for confirmation if there is ambiguity.
- Sometimes the user may need extra parameters to be added to `docker build` (e.g. `--platform` for cross-platform builds or `--progress` to view the full logs), in which case you can generate the `docker build` command directly.
@@ -95,7 +95,7 @@ test-extra: prepare-test-extra
Add a backend definition variable in the backend definitions section (around line 428-457). The format depends on the backend type:
**For Python backends with root context** (like `faster-whisper`, `coqui`):
**For Python backends with root context** (like `faster-whisper`, `bark`):
```makefile
BACKEND_<BACKEND_NAME> = <backend-name>|python|.|false|true
```
@@ -280,11 +280,3 @@ Always check `llama.cpp` for new model configuration options that should be supp
- `llama.cpp/common/chat-parser.cpp` - Format presets and model-specific handlers
- `llama.cpp/common/chat.h` - Format enums and parameter structures
- `llama.cpp/tools/server/server-context.cpp` - Server configuration options
# Documentation
The project documentation is located in `docs/content`. When adding new features or changing existing functionality, it is crucial to update the documentation to reflect these changes. This helps users understand how to use the new capabilities and ensures the documentation stays relevant.
- **Feature Documentation**: If you add a new feature (like a new backend or API endpoint), create a new markdown file in `docs/content/features/` explaining what it is, how to configure it, and how to use it.
- **Configuration**: If you modify configuration options, update the relevant sections in `docs/content/`.
- **Examples**: providing concrete examples (like YAML configuration blocks) is highly encouraged to help users get started quickly.

View File

@@ -10,7 +10,7 @@ ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -y --no-install-recommends \
ca-certificates curl wget espeak-ng libgomp1 \
ffmpeg libopenblas0 libopenblas-dev sox && \
ffmpeg libopenblas0 libopenblas-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
@@ -42,22 +42,22 @@ RUN <<EOT bash
ocaml-core ninja-build pkg-config libxml2-dev wayland-protocols python3-jsonschema \
clang-format qtbase5-dev qt6-base-dev libxcb-glx0-dev sudo xz-utils mesa-vulkan-drivers
if [ "amd64" = "$TARGETARCH" ]; then
wget "https://sdk.lunarg.com/sdk/download/1.4.335.0/linux/vulkansdk-linux-x86_64-1.4.335.0.tar.xz" && \
tar -xf vulkansdk-linux-x86_64-1.4.335.0.tar.xz && \
rm vulkansdk-linux-x86_64-1.4.335.0.tar.xz && \
wget "https://sdk.lunarg.com/sdk/download/1.4.328.1/linux/vulkansdk-linux-x86_64-1.4.328.1.tar.xz" && \
tar -xf vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
rm vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
mkdir -p /opt/vulkan-sdk && \
mv 1.4.335.0 /opt/vulkan-sdk/ && \
cd /opt/vulkan-sdk/1.4.335.0 && \
mv 1.4.328.1 /opt/vulkan-sdk/ && \
cd /opt/vulkan-sdk/1.4.328.1 && \
./vulkansdk --no-deps --maxjobs \
vulkan-loader \
vulkan-validationlayers \
vulkan-extensionlayer \
vulkan-tools \
shaderc && \
cp -rfv /opt/vulkan-sdk/1.4.335.0/x86_64/bin/* /usr/bin/ && \
cp -rfv /opt/vulkan-sdk/1.4.335.0/x86_64/lib/* /usr/lib/x86_64-linux-gnu/ && \
cp -rfv /opt/vulkan-sdk/1.4.335.0/x86_64/include/* /usr/include/ && \
cp -rfv /opt/vulkan-sdk/1.4.335.0/x86_64/share/* /usr/share/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/bin/* /usr/bin/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/lib/* /usr/lib/x86_64-linux-gnu/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/include/* /usr/include/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/share/* /usr/share/ && \
rm -rf /opt/vulkan-sdk
fi
if [ "arm64" = "$TARGETARCH" ]; then
@@ -106,7 +106,7 @@ RUN <<EOT bash
libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION}
if [ "${CUDA_MAJOR_VERSION}" = "13" ] && [ "arm64" = "$TARGETARCH" ]; then
if [ "arm64" = "$TARGETARCH" ]; then
apt-get install -y --no-install-recommends \
libcufile-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libcudnn9-cuda-${CUDA_MAJOR_VERSION} cuda-cupti-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libnvjitlink-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION}
fi

View File

@@ -1,5 +1,5 @@
# Disable parallel execution for backend builds
.NOTPARALLEL: backends/diffusers backends/llama-cpp backends/piper backends/stablediffusion-ggml backends/whisper backends/faster-whisper backends/silero-vad backends/local-store backends/huggingface backends/rfdetr backends/kitten-tts backends/kokoro backends/chatterbox backends/llama-cpp-darwin backends/neutts build-darwin-python-backend build-darwin-go-backend backends/mlx backends/diffuser-darwin backends/mlx-vlm backends/mlx-audio backends/stablediffusion-ggml-darwin backends/vllm backends/vllm-omni backends/moonshine backends/pocket-tts backends/qwen-tts backends/qwen-asr backends/voxcpm backends/whisperx
.NOTPARALLEL: backends/diffusers backends/llama-cpp backends/piper backends/stablediffusion-ggml backends/whisper backends/faster-whisper backends/silero-vad backends/local-store backends/huggingface backends/rfdetr backends/kitten-tts backends/kokoro backends/chatterbox backends/llama-cpp-darwin backends/neutts build-darwin-python-backend build-darwin-go-backend backends/mlx backends/diffuser-darwin backends/mlx-vlm backends/mlx-audio backends/stablediffusion-ggml-darwin backends/vllm backends/moonshine
GOCMD=go
GOTEST=$(GOCMD) test
@@ -7,14 +7,16 @@ GOVET=$(GOCMD) vet
BINARY_NAME=local-ai
LAUNCHER_BINARY_NAME=local-ai-launcher
UBUNTU_VERSION?=2404
CUDA_MAJOR_VERSION?=13
CUDA_MINOR_VERSION?=0
UBUNTU_VERSION?=2204
UBUNTU_CODENAME?=noble
GORELEASER?=
export BUILD_TYPE?=
export CUDA_MAJOR_VERSION?=13
export CUDA_MINOR_VERSION?=0
export CUDA_MAJOR_VERSION?=12
export CUDA_MINOR_VERSION?=9
GO_TAGS?=
BUILD_ID?=
@@ -189,6 +191,9 @@ run-e2e-aio: protogen-go
########################################################
prepare-e2e:
mkdir -p $(TEST_DIR)
cp -rfv $(abspath ./tests/e2e-fixtures)/gpu.yaml $(TEST_DIR)/gpu.yaml
test -e $(TEST_DIR)/ggllm-test-model.bin || wget -q https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GGUF/resolve/main/codellama-7b-instruct.Q2_K.gguf -O $(TEST_DIR)/ggllm-test-model.bin
docker build \
--build-arg IMAGE_TYPE=core \
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
@@ -202,16 +207,14 @@ prepare-e2e:
-t localai-tests .
run-e2e-image:
docker run -p 5390:8080 -e MODELS_PATH=/models -e THREADS=1 -e DEBUG=true -d --rm -v $(TEST_DIR):/models --name e2e-tests-$(RANDOM) localai-tests
ls -liah $(abspath ./tests/e2e-fixtures)
docker run -p 5390:8080 -e MODELS_PATH=/models -e THREADS=1 -e DEBUG=true -d --rm -v $(TEST_DIR):/models --gpus all --name e2e-tests-$(RANDOM) localai-tests
test-e2e: build-mock-backend prepare-e2e run-e2e-image
test-e2e:
@echo 'Running e2e tests'
BUILD_TYPE=$(BUILD_TYPE) \
LOCALAI_API=http://$(E2E_BRIDGE_IP):5390 \
LOCALAI_API=http://$(E2E_BRIDGE_IP):5390/v1 \
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --flake-attempts $(TEST_FLAKES) -v -r ./tests/e2e
$(MAKE) clean-mock-backend
$(MAKE) teardown-e2e
docker rmi localai-tests
teardown-e2e:
rm -rf $(TEST_DIR) || true
@@ -311,28 +314,16 @@ prepare-test-extra: protogen-python
$(MAKE) -C backend/python/diffusers
$(MAKE) -C backend/python/chatterbox
$(MAKE) -C backend/python/vllm
$(MAKE) -C backend/python/vllm-omni
$(MAKE) -C backend/python/vibevoice
$(MAKE) -C backend/python/moonshine
$(MAKE) -C backend/python/pocket-tts
$(MAKE) -C backend/python/qwen-tts
$(MAKE) -C backend/python/qwen-asr
$(MAKE) -C backend/python/voxcpm
$(MAKE) -C backend/python/whisperx
test-extra: prepare-test-extra
$(MAKE) -C backend/python/transformers test
$(MAKE) -C backend/python/diffusers test
$(MAKE) -C backend/python/chatterbox test
$(MAKE) -C backend/python/vllm test
$(MAKE) -C backend/python/vllm-omni test
$(MAKE) -C backend/python/vibevoice test
$(MAKE) -C backend/python/moonshine test
$(MAKE) -C backend/python/pocket-tts test
$(MAKE) -C backend/python/qwen-tts test
$(MAKE) -C backend/python/qwen-asr test
$(MAKE) -C backend/python/voxcpm test
$(MAKE) -C backend/python/whisperx test
DOCKER_IMAGE?=local-ai
DOCKER_AIO_IMAGE?=local-ai-aio
@@ -441,6 +432,7 @@ backend-images:
BACKEND_LLAMA_CPP = llama-cpp|llama-cpp|.|false|false
# Golang backends
BACKEND_BARK_CPP = bark-cpp|golang|.|false|true
BACKEND_PIPER = piper|golang|.|false|true
BACKEND_LOCAL_STORE = local-store|golang|.|false|true
BACKEND_HUGGINGFACE = huggingface|golang|.|false|true
@@ -453,21 +445,19 @@ BACKEND_RERANKERS = rerankers|python|.|false|true
BACKEND_TRANSFORMERS = transformers|python|.|false|true
BACKEND_FASTER_WHISPER = faster-whisper|python|.|false|true
BACKEND_COQUI = coqui|python|.|false|true
BACKEND_RFDETR = rfdetr|python|.|false|true
BACKEND_KITTEN_TTS = kitten-tts|python|.|false|true
BACKEND_NEUTTS = neutts|python|.|false|true
BACKEND_KOKORO = kokoro|python|.|false|true
BACKEND_VLLM = vllm|python|.|false|true
BACKEND_VLLM_OMNI = vllm-omni|python|.|false|true
BACKEND_DIFFUSERS = diffusers|python|.|--progress=plain|true
BACKEND_CHATTERBOX = chatterbox|python|.|false|true
BACKEND_VIBEVOICE = vibevoice|python|.|--progress=plain|true
BACKEND_MOONSHINE = moonshine|python|.|false|true
BACKEND_POCKET_TTS = pocket-tts|python|.|false|true
BACKEND_QWEN_TTS = qwen-tts|python|.|false|true
BACKEND_QWEN_ASR = qwen-asr|python|.|false|true
BACKEND_VOXCPM = voxcpm|python|.|false|true
BACKEND_WHISPERX = whisperx|python|.|false|true
BACKEND_BARK = bark|python|.|false|true
BACKEND_EXLLAMA2 = exllama2|python|.|false|true
# Python backends with ./backend context
BACKEND_RFDETR = rfdetr|python|./backend|false|true
BACKEND_KITTEN_TTS = kitten-tts|python|./backend|false|true
BACKEND_NEUTTS = neutts|python|./backend|false|true
BACKEND_KOKORO = kokoro|python|./backend|false|true
BACKEND_VLLM = vllm|python|./backend|false|true
BACKEND_DIFFUSERS = diffusers|python|./backend|--progress=plain|true
BACKEND_CHATTERBOX = chatterbox|python|./backend|false|true
BACKEND_VIBEVOICE = vibevoice|python|./backend|--progress=plain|true
BACKEND_MOONSHINE = moonshine|python|./backend|false|true
# Helper function to build docker image for a backend
# Usage: $(call docker-build-backend,BACKEND_NAME,DOCKERFILE_TYPE,BUILD_CONTEXT,PROGRESS_FLAG,NEEDS_BACKEND_ARG)
@@ -491,6 +481,7 @@ endef
# Generate all docker-build targets
$(eval $(call generate-docker-build-target,$(BACKEND_LLAMA_CPP)))
$(eval $(call generate-docker-build-target,$(BACKEND_BARK_CPP)))
$(eval $(call generate-docker-build-target,$(BACKEND_PIPER)))
$(eval $(call generate-docker-build-target,$(BACKEND_LOCAL_STORE)))
$(eval $(call generate-docker-build-target,$(BACKEND_HUGGINGFACE)))
@@ -501,37 +492,23 @@ $(eval $(call generate-docker-build-target,$(BACKEND_RERANKERS)))
$(eval $(call generate-docker-build-target,$(BACKEND_TRANSFORMERS)))
$(eval $(call generate-docker-build-target,$(BACKEND_FASTER_WHISPER)))
$(eval $(call generate-docker-build-target,$(BACKEND_COQUI)))
$(eval $(call generate-docker-build-target,$(BACKEND_BARK)))
$(eval $(call generate-docker-build-target,$(BACKEND_EXLLAMA2)))
$(eval $(call generate-docker-build-target,$(BACKEND_RFDETR)))
$(eval $(call generate-docker-build-target,$(BACKEND_KITTEN_TTS)))
$(eval $(call generate-docker-build-target,$(BACKEND_NEUTTS)))
$(eval $(call generate-docker-build-target,$(BACKEND_KOKORO)))
$(eval $(call generate-docker-build-target,$(BACKEND_VLLM)))
$(eval $(call generate-docker-build-target,$(BACKEND_VLLM_OMNI)))
$(eval $(call generate-docker-build-target,$(BACKEND_DIFFUSERS)))
$(eval $(call generate-docker-build-target,$(BACKEND_CHATTERBOX)))
$(eval $(call generate-docker-build-target,$(BACKEND_VIBEVOICE)))
$(eval $(call generate-docker-build-target,$(BACKEND_MOONSHINE)))
$(eval $(call generate-docker-build-target,$(BACKEND_POCKET_TTS)))
$(eval $(call generate-docker-build-target,$(BACKEND_QWEN_TTS)))
$(eval $(call generate-docker-build-target,$(BACKEND_QWEN_ASR)))
$(eval $(call generate-docker-build-target,$(BACKEND_VOXCPM)))
$(eval $(call generate-docker-build-target,$(BACKEND_WHISPERX)))
# Pattern rule for docker-save targets
docker-save-%: backend-images
docker save local-ai-backend:$* -o backend-images/$*.tar
docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-vllm-omni docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-chatterbox docker-build-vibevoice docker-build-moonshine docker-build-pocket-tts docker-build-qwen-tts docker-build-qwen-asr docker-build-voxcpm docker-build-whisperx
########################################################
### Mock Backend for E2E Tests
########################################################
build-mock-backend: protogen-go
$(GOCMD) build -o tests/e2e/mock-backend/mock-backend ./tests/e2e/mock-backend
clean-mock-backend:
rm -f tests/e2e/mock-backend/mock-backend
docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-bark docker-build-chatterbox docker-build-vibevoice docker-build-exllama2 docker-build-moonshine
########################################################
### END Backends

View File

@@ -51,16 +51,34 @@
**LocalAI** is the free, Open Source OpenAI alternative. LocalAI act as a drop-in replacement REST API that's compatible with OpenAI (Elevenlabs, Anthropic... ) API specifications for local AI inferencing. It allows you to run LLMs, generate images, audio (and not only) locally or on-prem with consumer grade hardware, supporting multiple model families. Does not require GPU. It is created and maintained by [Ettore Di Giacinto](https://github.com/mudler).
## Local Stack Family
## 📚🆕 Local Stack Family
Liking LocalAI? LocalAI is part of an integrated suite of AI infrastructure tools, you might also like:
- **[LocalAGI](https://github.com/mudler/LocalAGI)** - AI agent orchestration platform with OpenAI Responses API compatibility and advanced agentic capabilities
- **[LocalRecall](https://github.com/mudler/LocalRecall)** - MCP/REST API knowledge base system providing persistent memory and storage for AI agents
- 🆕 **[Cogito](https://github.com/mudler/cogito)** - Go library for building intelligent, co-operative agentic software and LLM-powered workflows, focusing on improving results for small, open source language models that scales to any LLM. Powers LocalAGI and LocalAI MCP/Agentic capabilities
- 🆕 **[Wiz](https://github.com/mudler/wiz)** - Terminal-based AI agent accessible via Ctrl+Space keybinding. Portable, local-LLM friendly shell assistant with TUI/CLI modes, tool execution with approval, MCP protocol support, and multi-shell compatibility (zsh, bash, fish)
- 🆕 **[SkillServer](https://github.com/mudler/skillserver)** - Simple, centralized skills database for AI agents via MCP. Manages skills as Markdown files with MCP server integration, web UI for editing, Git synchronization, and full-text search capabilities
🆕 LocalAI is now part of a comprehensive suite of AI tools designed to work together:
<table>
<tr>
<td width="50%" valign="top">
<a href="https://github.com/mudler/LocalAGI">
<img src="https://raw.githubusercontent.com/mudler/LocalAGI/refs/heads/main/webui/react-ui/public/logo_2.png" width="300" alt="LocalAGI Logo">
</a>
</td>
<td width="50%" valign="top">
<h3><a href="https://github.com/mudler/LocalAGI">LocalAGI</a></h3>
<p>A powerful Local AI agent management platform that serves as a drop-in replacement for OpenAI's Responses API, enhanced with advanced agentic capabilities.</p>
</td>
</tr>
<tr>
<td width="50%" valign="top">
<a href="https://github.com/mudler/LocalRecall">
<img src="https://raw.githubusercontent.com/mudler/LocalRecall/refs/heads/main/static/localrecall_horizontal.png" width="300" alt="LocalRecall Logo">
</a>
</td>
<td width="50%" valign="top">
<h3><a href="https://github.com/mudler/LocalRecall">LocalRecall</a></h3>
<p>A REST-ful API and knowledge base management system that provides persistent memory and storage capabilities for AI agents.</p>
</td>
</tr>
</table>
## Screenshots / Video
@@ -93,8 +111,6 @@ Liking LocalAI? LocalAI is part of an integrated suite of AI infrastructure tool
## 💻 Quickstart
> ⚠️ **Note:** The `install.sh` script is currently experiencing issues due to the heavy changes currently undergoing in LocalAI and may produce broken or misconfigured installations. Please use Docker installation (see below) or manual binary installation until [issue #8032](https://github.com/mudler/LocalAI/issues/8032) is resolved.
Run the installer script:
```bash
@@ -112,7 +128,7 @@ For more installation options, see [Installer Options](https://localai.io/instal
> Note: the DMGs are not signed by Apple as quarantined. See https://github.com/mudler/LocalAI/issues/6268 for a workaround, fix is tracked here: https://github.com/mudler/LocalAI/issues/6244
### Containers (Docker, podman, ...)
Or run with docker:
> **💡 Docker Run vs Docker Start**
>
@@ -121,13 +137,13 @@ For more installation options, see [Installer Options](https://localai.io/instal
>
> If you've already run LocalAI before and want to start it again, use: `docker start -i local-ai`
#### CPU only image:
### CPU only image:
```bash
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest
```
#### NVIDIA GPU Images:
### NVIDIA GPU Images:
```bash
# CUDA 13.0
@@ -144,25 +160,25 @@ docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-nv
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-nvidia-l4t-arm64-cuda-13
```
#### AMD GPU Images (ROCm):
### AMD GPU Images (ROCm):
```bash
docker run -ti --name local-ai -p 8080:8080 --device=/dev/kfd --device=/dev/dri --group-add=video localai/localai:latest-gpu-hipblas
```
#### Intel GPU Images (oneAPI):
### Intel GPU Images (oneAPI):
```bash
docker run -ti --name local-ai -p 8080:8080 --device=/dev/dri/card1 --device=/dev/dri/renderD128 localai/localai:latest-gpu-intel
```
#### Vulkan GPU Images:
### Vulkan GPU Images:
```bash
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-gpu-vulkan
```
#### AIO Images (pre-downloaded models):
### AIO Images (pre-downloaded models):
```bash
# CPU version
@@ -239,7 +255,6 @@ Roadmap items: [List of issues](https://github.com/mudler/LocalAI/issues?q=is%3A
- 🔈 [Audio to Text](https://localai.io/features/audio-to-text/) (Audio transcription with `whisper.cpp`)
- 🎨 [Image generation](https://localai.io/features/image-generation)
- 🔥 [OpenAI-alike tools API](https://localai.io/features/openai-functions/)
- ⚡ [Realtime API](https://localai.io/features/openai-realtime/) (Speech-to-speech)
- 🧠 [Embeddings generation for vector databases](https://localai.io/features/embeddings/)
- ✍️ [Constrained grammars](https://localai.io/features/constrained_grammars/)
- 🖼️ [Download Models directly from Huggingface ](https://localai.io/models/)
@@ -261,6 +276,7 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
| **llama.cpp** | LLM inference in C/C++ | CUDA 12/13, ROCm, Intel SYCL, Vulkan, Metal, CPU |
| **vLLM** | Fast LLM inference with PagedAttention | CUDA 12/13, ROCm, Intel |
| **transformers** | HuggingFace transformers framework | CUDA 12/13, ROCm, Intel, CPU |
| **exllama2** | GPTQ inference library | CUDA 12/13 |
| **MLX** | Apple Silicon LLM inference | Metal (M1/M2/M3+) |
| **MLX-VLM** | Apple Silicon Vision-Language Models | Metal (M1/M2/M3+) |
@@ -269,6 +285,8 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
|---------|-------------|---------------------|
| **whisper.cpp** | OpenAI Whisper in C/C++ | CUDA 12/13, ROCm, Intel SYCL, Vulkan, CPU |
| **faster-whisper** | Fast Whisper with CTranslate2 | CUDA 12/13, ROCm, Intel, CPU |
| **bark** | Text-to-audio generation | CUDA 12/13, ROCm, Intel |
| **bark-cpp** | C++ implementation of Bark | CUDA, Metal, CPU |
| **coqui** | Advanced TTS with 1100+ languages | CUDA 12/13, ROCm, Intel, CPU |
| **kokoro** | Lightweight TTS model | CUDA 12/13, ROCm, Intel, CPU |
| **chatterbox** | Production-grade TTS | CUDA 12/13, CPU |
@@ -277,8 +295,6 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
| **silero-vad** | Voice Activity Detection | CPU |
| **neutts** | Text-to-speech with voice cloning | CUDA 12/13, ROCm, CPU |
| **vibevoice** | Real-time TTS with voice cloning | CUDA 12/13, ROCm, Intel, CPU |
| **pocket-tts** | Lightweight CPU-based TTS | CUDA 12/13, ROCm, Intel, CPU |
| **qwen-tts** | High-quality TTS with custom voice, voice design, and voice cloning | CUDA 12/13, ROCm, Intel, CPU |
### Image & Video Generation
| Backend | Description | Acceleration Support |
@@ -300,9 +316,9 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
|-------------------|-------------------|------------------|
| **NVIDIA CUDA 12** | All CUDA-compatible backends | Nvidia hardware |
| **NVIDIA CUDA 13** | All CUDA-compatible backends | Nvidia hardware |
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, neutts, vibevoice, pocket-tts, qwen-tts | AMD Graphics |
| **Intel oneAPI** | llama.cpp, whisper, stablediffusion, vllm, transformers, diffusers, rfdetr, rerankers, coqui, kokoro, vibevoice, pocket-tts, qwen-tts | Intel Arc, Intel iGPUs |
| **Apple Metal** | llama.cpp, whisper, diffusers, MLX, MLX-VLM | Apple M1/M2/M3+ |
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, bark, neutts, vibevoice | AMD Graphics |
| **Intel oneAPI** | llama.cpp, whisper, stablediffusion, vllm, transformers, diffusers, rfdetr, rerankers, exllama2, coqui, kokoro, bark, vibevoice | Intel Arc, Intel iGPUs |
| **Apple Metal** | llama.cpp, whisper, diffusers, MLX, MLX-VLM, bark-cpp | Apple M1/M2/M3+ |
| **Vulkan** | llama.cpp, whisper, stablediffusion | Cross-platform GPUs |
| **NVIDIA Jetson (CUDA 12)** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr | ARM64 embedded AI (AGX Orin, etc.) |
| **NVIDIA Jetson (CUDA 13)** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr | ARM64 embedded AI (DGX Spark) |
@@ -324,10 +340,6 @@ Agentic Libraries:
MCPs:
- https://github.com/mudler/MCPs
OS Assistant:
- https://github.com/mudler/Keygeist - Keygeist is an AI-powered keyboard operator that listens for key combinations and responds with AI-generated text typed directly into your Linux box.
Model galleries
- https://github.com/go-skynet/model-gallery

View File

@@ -47,22 +47,22 @@ RUN <<EOT bash
ocaml-core ninja-build pkg-config libxml2-dev wayland-protocols python3-jsonschema \
clang-format qtbase5-dev qt6-base-dev libxcb-glx0-dev sudo xz-utils
if [ "amd64" = "$TARGETARCH" ]; then
wget "https://sdk.lunarg.com/sdk/download/1.4.335.0/linux/vulkansdk-linux-x86_64-1.4.335.0.tar.xz" && \
tar -xf vulkansdk-linux-x86_64-1.4.335.0.tar.xz && \
rm vulkansdk-linux-x86_64-1.4.335.0.tar.xz && \
wget "https://sdk.lunarg.com/sdk/download/1.4.328.1/linux/vulkansdk-linux-x86_64-1.4.328.1.tar.xz" && \
tar -xf vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
rm vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
mkdir -p /opt/vulkan-sdk && \
mv 1.4.335.0 /opt/vulkan-sdk/ && \
cd /opt/vulkan-sdk/1.4.335.0 && \
mv 1.4.328.1 /opt/vulkan-sdk/ && \
cd /opt/vulkan-sdk/1.4.328.1 && \
./vulkansdk --no-deps --maxjobs \
vulkan-loader \
vulkan-validationlayers \
vulkan-extensionlayer \
vulkan-tools \
shaderc && \
cp -rfv /opt/vulkan-sdk/1.4.335.0/x86_64/bin/* /usr/bin/ && \
cp -rfv /opt/vulkan-sdk/1.4.335.0/x86_64/lib/* /usr/lib/x86_64-linux-gnu/ && \
cp -rfv /opt/vulkan-sdk/1.4.335.0/x86_64/include/* /usr/include/ && \
cp -rfv /opt/vulkan-sdk/1.4.335.0/x86_64/share/* /usr/share/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/bin/* /usr/bin/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/lib/* /usr/lib/x86_64-linux-gnu/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/include/* /usr/include/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/share/* /usr/share/ && \
rm -rf /opt/vulkan-sdk
fi
if [ "arm64" = "$TARGETARCH" ]; then
@@ -94,11 +94,7 @@ RUN <<EOT bash
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/x86_64/cuda-keyring_1.1-1_all.deb
fi
if [ "arm64" = "$TARGETARCH" ]; then
if [ "${CUDA_MAJOR_VERSION}" = "13" ]; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/sbsa/cuda-keyring_1.1-1_all.deb
else
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/arm64/cuda-keyring_1.1-1_all.deb
fi
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/sbsa/cuda-keyring_1.1-1_all.deb
fi
dpkg -i cuda-keyring_1.1-1_all.deb && \
rm -f cuda-keyring_1.1-1_all.deb && \
@@ -110,7 +106,7 @@ RUN <<EOT bash
libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION}
if [ "${CUDA_MAJOR_VERSION}" = "13" ] && [ "arm64" = "$TARGETARCH" ]; then
if [ "arm64" = "$TARGETARCH" ]; then
apt-get install -y --no-install-recommends \
libcufile-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libcudnn9-cuda-${CUDA_MAJOR_VERSION} cuda-cupti-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libnvjitlink-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION}
fi

View File

@@ -104,22 +104,22 @@ RUN <<EOT bash
ocaml-core ninja-build pkg-config libxml2-dev wayland-protocols python3-jsonschema \
clang-format qtbase5-dev qt6-base-dev libxcb-glx0-dev sudo xz-utils
if [ "amd64" = "$TARGETARCH" ]; then
wget "https://sdk.lunarg.com/sdk/download/1.4.335.0/linux/vulkansdk-linux-x86_64-1.4.335.0.tar.xz" && \
tar -xf vulkansdk-linux-x86_64-1.4.335.0.tar.xz && \
rm vulkansdk-linux-x86_64-1.4.335.0.tar.xz && \
wget "https://sdk.lunarg.com/sdk/download/1.4.328.1/linux/vulkansdk-linux-x86_64-1.4.328.1.tar.xz" && \
tar -xf vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
rm vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
mkdir -p /opt/vulkan-sdk && \
mv 1.4.335.0 /opt/vulkan-sdk/ && \
cd /opt/vulkan-sdk/1.4.335.0 && \
mv 1.4.328.1 /opt/vulkan-sdk/ && \
cd /opt/vulkan-sdk/1.4.328.1 && \
./vulkansdk --no-deps --maxjobs \
vulkan-loader \
vulkan-validationlayers \
vulkan-extensionlayer \
vulkan-tools \
shaderc && \
cp -rfv /opt/vulkan-sdk/1.4.335.0/x86_64/bin/* /usr/bin/ && \
cp -rfv /opt/vulkan-sdk/1.4.335.0/x86_64/lib/* /usr/lib/x86_64-linux-gnu/ && \
cp -rfv /opt/vulkan-sdk/1.4.335.0/x86_64/include/* /usr/include/ && \
cp -rfv /opt/vulkan-sdk/1.4.335.0/x86_64/share/* /usr/share/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/bin/* /usr/bin/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/lib/* /usr/lib/x86_64-linux-gnu/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/include/* /usr/include/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/share/* /usr/share/ && \
rm -rf /opt/vulkan-sdk
fi
if [ "arm64" = "$TARGETARCH" ]; then
@@ -148,14 +148,11 @@ RUN <<EOT bash
apt-get install -y --no-install-recommends \
software-properties-common pciutils
if [ "amd64" = "$TARGETARCH" ]; then
echo https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/x86_64/cuda-keyring_1.1-1_all.deb
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/x86_64/cuda-keyring_1.1-1_all.deb
fi
if [ "arm64" = "$TARGETARCH" ]; then
if [ "${CUDA_MAJOR_VERSION}" = "13" ]; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/sbsa/cuda-keyring_1.1-1_all.deb
else
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/arm64/cuda-keyring_1.1-1_all.deb
fi
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/sbsa/cuda-keyring_1.1-1_all.deb
fi
dpkg -i cuda-keyring_1.1-1_all.deb && \
rm -f cuda-keyring_1.1-1_all.deb && \
@@ -167,7 +164,7 @@ RUN <<EOT bash
libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION}
if [ "${CUDA_MAJOR_VERSION}" = "13" ] && [ "arm64" = "$TARGETARCH" ]; then
if [ "arm64" = "$TARGETARCH" ]; then
apt-get install -y --no-install-recommends \
libcufile-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libcudnn9-cuda-${CUDA_MAJOR_VERSION} cuda-cupti-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libnvjitlink-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION}
fi

View File

@@ -61,22 +61,22 @@ RUN <<EOT bash
ocaml-core ninja-build pkg-config libxml2-dev wayland-protocols python3-jsonschema \
clang-format qtbase5-dev qt6-base-dev libxcb-glx0-dev sudo xz-utils
if [ "amd64" = "$TARGETARCH" ]; then
wget "https://sdk.lunarg.com/sdk/download/1.4.335.0/linux/vulkansdk-linux-x86_64-1.4.335.0.tar.xz" && \
tar -xf vulkansdk-linux-x86_64-1.4.335.0.tar.xz && \
rm vulkansdk-linux-x86_64-1.4.335.0.tar.xz && \
wget "https://sdk.lunarg.com/sdk/download/1.4.328.1/linux/vulkansdk-linux-x86_64-1.4.328.1.tar.xz" && \
tar -xf vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
rm vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
mkdir -p /opt/vulkan-sdk && \
mv 1.4.335.0 /opt/vulkan-sdk/ && \
cd /opt/vulkan-sdk/1.4.335.0 && \
mv 1.4.328.1 /opt/vulkan-sdk/ && \
cd /opt/vulkan-sdk/1.4.328.1 && \
./vulkansdk --no-deps --maxjobs \
vulkan-loader \
vulkan-validationlayers \
vulkan-extensionlayer \
vulkan-tools \
shaderc && \
cp -rfv /opt/vulkan-sdk/1.4.335.0/x86_64/bin/* /usr/bin/ && \
cp -rfv /opt/vulkan-sdk/1.4.335.0/x86_64/lib/* /usr/lib/x86_64-linux-gnu/ && \
cp -rfv /opt/vulkan-sdk/1.4.335.0/x86_64/include/* /usr/include/ && \
cp -rfv /opt/vulkan-sdk/1.4.335.0/x86_64/share/* /usr/share/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/bin/* /usr/bin/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/lib/* /usr/lib/x86_64-linux-gnu/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/include/* /usr/include/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/share/* /usr/share/ && \
rm -rf /opt/vulkan-sdk
fi
if [ "arm64" = "$TARGETARCH" ]; then
@@ -108,11 +108,7 @@ RUN <<EOT bash
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/x86_64/cuda-keyring_1.1-1_all.deb
fi
if [ "arm64" = "$TARGETARCH" ]; then
if [ "${CUDA_MAJOR_VERSION}" = "13" ]; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/sbsa/cuda-keyring_1.1-1_all.deb
else
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/arm64/cuda-keyring_1.1-1_all.deb
fi
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/sbsa/cuda-keyring_1.1-1_all.deb
fi
dpkg -i cuda-keyring_1.1-1_all.deb && \
rm -f cuda-keyring_1.1-1_all.deb && \
@@ -124,7 +120,7 @@ RUN <<EOT bash
libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION}
if [ "${CUDA_MAJOR_VERSION}" = "13" ] && [ "arm64" = "$TARGETARCH" ]; then
if [ "arm64" = "$TARGETARCH" ]; then
apt-get install -y --no-install-recommends \
libcufile-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libcudnn9-cuda-${CUDA_MAJOR_VERSION} cuda-cupti-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libnvjitlink-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION}
fi

View File

@@ -46,7 +46,7 @@ The backend system provides language-specific Dockerfiles that handle the build
- **vllm**: High-performance LLM inference
- **mlx**: Apple Silicon optimization
- **diffusers**: Stable Diffusion models
- **Audio**: coqui, faster-whisper, kitten-tts
- **Audio**: bark, coqui, faster-whisper, kitten-tts
- **Vision**: mlx-vlm, rfdetr
- **Specialized**: rerankers, chatterbox, kokoro
@@ -55,6 +55,7 @@ The backend system provides language-specific Dockerfiles that handle the build
- **stablediffusion-ggml**: Stable Diffusion in Go with GGML Cpp backend
- **huggingface**: Hugging Face model integration
- **piper**: Text-to-speech synthesis Golang with C bindings using rhaspy/piper
- **bark-cpp**: Bark TTS models Golang with Cpp bindings
- **local-store**: Vector storage backend
#### C++ Backends (`cpp/`)

View File

@@ -17,7 +17,6 @@ service Backend {
rpc GenerateVideo(GenerateVideoRequest) returns (Result) {}
rpc AudioTranscription(TranscriptRequest) returns (TranscriptResult) {}
rpc TTS(TTSRequest) returns (Result) {}
rpc TTSStream(TTSRequest) returns (stream Reply) {}
rpc SoundGeneration(SoundGenerationRequest) returns (Result) {}
rpc TokenizeString(PredictOptions) returns (TokenizationResponse) {}
rpc Status(HealthMessage) returns (StatusResponse) {}
@@ -33,8 +32,6 @@ service Backend {
rpc GetMetrics(MetricsRequest) returns (MetricsResponse);
rpc VAD(VADRequest) returns (VADResponse) {}
rpc ModelMetadata(ModelOptions) returns (ModelMetadataResponse) {}
}
// Define the empty request
@@ -299,7 +296,6 @@ message TranscriptSegment {
int64 end = 3;
string text = 4;
repeated int32 tokens = 5;
string speaker = 6;
}
message GenerateImageRequest {
@@ -414,8 +410,3 @@ message Detection {
message DetectResponse {
repeated Detection Detections = 1;
}
message ModelMetadataResponse {
bool supports_thinking = 1;
string rendered_template = 2; // The rendered chat template with enable_thinking=true (empty if not applicable)
}

View File

@@ -1,5 +1,5 @@
LLAMA_VERSION?=2634ed207a17db1a54bd8df0555bd8499a6ab691
LLAMA_VERSION?=ae9f8df77882716b1702df2bed8919499e64cc28
LLAMA_REPO?=https://github.com/ggerganov/llama.cpp
CMAKE_ARGS?=

View File

@@ -23,7 +23,6 @@
#include <grpcpp/health_check_service_interface.h>
#include <regex>
#include <atomic>
#include <mutex>
#include <signal.h>
#include <thread>
@@ -83,8 +82,8 @@ static void start_llama_server(server_context& ctx_server) {
// print sample chat example to make it clear which template is used
// LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__,
// common_chat_templates_source(ctx_server.impl->chat_params.tmpls.get()),
// common_chat_format_example(ctx_server.impl->chat_params.tmpls.get(), ctx_server.impl->params_base.use_jinja).c_str(), ctx_server.impl->params_base.default_template_kwargs);
// common_chat_templates_source(ctx_server.impl->chat_templates.get()),
// common_chat_format_example(ctx_server.impl->chat_templates.get(), ctx_server.impl->params_base.use_jinja).c_str(), ctx_server.impl->params_base.default_template_kwargs);
// Keep the chat templates initialized in load_model() so they can be used when UseTokenizerTemplate is enabled
// Templates will only be used conditionally in Predict/PredictStream when UseTokenizerTemplate is true and Messages are provided
@@ -391,9 +390,8 @@ static void params_parse(server_context& /*ctx_server*/, const backend::ModelOpt
// Initialize fit_params options (can be overridden by options)
// fit_params: whether to auto-adjust params to fit device memory (default: true as in llama.cpp)
params.fit_params = true;
// fit_params_target: target margin per device in bytes (default: 1GB per device)
// Initialize as vector with default value for all devices
params.fit_params_target = std::vector<size_t>(llama_max_devices(), 1024 * 1024 * 1024);
// fit_params_target: target margin per device in bytes (default: 1GB)
params.fit_params_target = 1024 * 1024 * 1024;
// fit_params_min_ctx: minimum context size for fit (default: 4096)
params.fit_params_min_ctx = 4096;
@@ -470,28 +468,10 @@ static void params_parse(server_context& /*ctx_server*/, const backend::ModelOpt
} else if (!strcmp(optname, "fit_params_target") || !strcmp(optname, "fit_target")) {
if (optval != NULL) {
try {
// Value is in MiB, can be comma-separated list for multiple devices
// Single value is broadcast across all devices
std::string arg_next = optval_str;
const std::regex regex{ R"([,/]+)" };
std::sregex_token_iterator it{ arg_next.begin(), arg_next.end(), regex, -1 };
std::vector<std::string> split_arg{ it, {} };
if (split_arg.size() >= llama_max_devices()) {
// Too many values provided
continue;
}
if (split_arg.size() == 1) {
// Single value: broadcast to all devices
size_t value_mib = std::stoul(split_arg[0]);
std::fill(params.fit_params_target.begin(), params.fit_params_target.end(), value_mib * 1024 * 1024);
} else {
// Multiple values: set per device
for (size_t i = 0; i < split_arg.size() && i < params.fit_params_target.size(); i++) {
params.fit_params_target[i] = std::stoul(split_arg[i]) * 1024 * 1024;
}
}
// Value is in MiB, convert to bytes
params.fit_params_target = static_cast<size_t>(std::stoi(optval_str)) * 1024 * 1024;
} catch (const std::exception& e) {
// If conversion fails, keep default value (1GB per device)
// If conversion fails, keep default value (1GB)
}
}
} else if (!strcmp(optname, "fit_params_min_ctx") || !strcmp(optname, "fit_ctx")) {
@@ -706,13 +686,13 @@ private:
public:
BackendServiceImpl(server_context& ctx) : ctx_server(ctx) {}
grpc::Status Health(ServerContext* /*context*/, const backend::HealthMessage* /*request*/, backend::Reply* reply) override {
grpc::Status Health(ServerContext* /*context*/, const backend::HealthMessage* /*request*/, backend::Reply* reply) {
// Implement Health RPC
reply->set_message("OK");
return Status::OK;
}
grpc::Status LoadModel(ServerContext* /*context*/, const backend::ModelOptions* request, backend::Result* result) override {
grpc::Status LoadModel(ServerContext* /*context*/, const backend::ModelOptions* request, backend::Result* result) {
// Implement LoadModel RPC
common_params params;
params_parse(ctx_server, request, params);
@@ -729,72 +709,11 @@ public:
LOG_INF("\n");
LOG_INF("%s\n", common_params_get_system_info(params).c_str());
LOG_INF("\n");
// Capture error messages during model loading
struct error_capture {
std::string captured_error;
std::mutex error_mutex;
ggml_log_callback original_callback;
void* original_user_data;
} error_capture_data;
// Get original log callback
llama_log_get(&error_capture_data.original_callback, &error_capture_data.original_user_data);
// Set custom callback to capture errors
llama_log_set([](ggml_log_level level, const char * text, void * user_data) {
auto* capture = static_cast<error_capture*>(user_data);
// Capture error messages
if (level == GGML_LOG_LEVEL_ERROR) {
std::lock_guard<std::mutex> lock(capture->error_mutex);
// Append error message, removing trailing newlines
std::string msg(text);
while (!msg.empty() && (msg.back() == '\n' || msg.back() == '\r')) {
msg.pop_back();
}
if (!msg.empty()) {
if (!capture->captured_error.empty()) {
capture->captured_error.append("; ");
}
capture->captured_error.append(msg);
}
}
// Also call original callback to preserve logging
if (capture->original_callback) {
capture->original_callback(level, text, capture->original_user_data);
}
}, &error_capture_data);
// load the model
bool load_success = ctx_server.load_model(params);
// Restore original log callback
llama_log_set(error_capture_data.original_callback, error_capture_data.original_user_data);
if (!load_success) {
std::string error_msg = "Failed to load model: " + params.model.path;
if (!params.mmproj.path.empty()) {
error_msg += " (with mmproj: " + params.mmproj.path + ")";
}
if (params.speculative.has_dft() && !params.speculative.mparams_dft.path.empty()) {
error_msg += " (with draft model: " + params.speculative.mparams_dft.path + ")";
}
// Add captured error details if available
{
std::lock_guard<std::mutex> lock(error_capture_data.error_mutex);
if (!error_capture_data.captured_error.empty()) {
error_msg += ". Error: " + error_capture_data.captured_error;
} else {
error_msg += ". Model file may not exist or be invalid.";
}
}
result->set_message(error_msg);
if (!ctx_server.load_model(params)) {
result->set_message("Failed loading model");
result->set_success(false);
return grpc::Status(grpc::StatusCode::INTERNAL, error_msg);
return Status::CANCELLED;
}
// Process grammar triggers now that vocab is available
@@ -882,7 +801,7 @@ public:
std::string prompt_str;
std::vector<raw_buffer> files; // Declare files early so it's accessible in both branches
// Handle chat templates when UseTokenizerTemplate is enabled and Messages are provided
if (request->usetokenizertemplate() && request->messages_size() > 0 && ctx_server.impl->chat_params.tmpls != nullptr) {
if (request->usetokenizertemplate() && request->messages_size() > 0 && ctx_server.impl->chat_templates != nullptr) {
// Convert proto Messages to JSON format compatible with oaicompat_chat_params_parse
json body_json;
json messages_json = json::array();
@@ -1261,7 +1180,12 @@ public:
// Use the same approach as server.cpp: call oaicompat_chat_params_parse
// This handles all template application, grammar merging, etc. automatically
// Files extracted from multimodal content in messages will be added to the files vector
// chat_params already contains tmpls, allow_image, and allow_audio set during model loading
// Create parser options with current chat_templates to ensure tmpls is not null
oaicompat_parser_options parser_opt = ctx_server.impl->oai_parser_opt;
parser_opt.tmpls = ctx_server.impl->chat_templates.get(); // Ensure tmpls is set to current chat_templates
// Update allow_image and allow_audio based on current mctx state
parser_opt.allow_image = ctx_server.impl->mctx ? mtmd_support_vision(ctx_server.impl->mctx) : false;
parser_opt.allow_audio = ctx_server.impl->mctx ? mtmd_support_audio(ctx_server.impl->mctx) : false;
// Debug: Log tools before template processing
if (body_json.contains("tools")) {
@@ -1307,7 +1231,7 @@ public:
}
}
json parsed_data = oaicompat_chat_params_parse(body_json, ctx_server.impl->chat_params, files);
json parsed_data = oaicompat_chat_params_parse(body_json, parser_opt, files);
// Debug: Log tools after template processing
if (parsed_data.contains("tools")) {
@@ -1360,7 +1284,7 @@ public:
// If not using chat templates, extract files from image_data/audio_data fields
// (If using chat templates, files were already extracted by oaicompat_chat_params_parse)
if (!request->usetokenizertemplate() || request->messages_size() == 0 || ctx_server.impl->chat_params.tmpls == nullptr) {
if (!request->usetokenizertemplate() || request->messages_size() == 0 || ctx_server.impl->chat_templates == nullptr) {
const auto &images_data = data.find("image_data");
if (images_data != data.end() && images_data->is_array())
{
@@ -1568,7 +1492,7 @@ public:
return grpc::Status::OK;
}
grpc::Status Predict(ServerContext* context, const backend::PredictOptions* request, backend::Reply* reply) override {
grpc::Status Predict(ServerContext* context, const backend::PredictOptions* request, backend::Reply* reply) {
if (params_base.model.path.empty()) {
return grpc::Status(grpc::StatusCode::FAILED_PRECONDITION, "Model not loaded");
}
@@ -1588,7 +1512,7 @@ public:
std::string prompt_str;
std::vector<raw_buffer> files; // Declare files early so it's accessible in both branches
// Handle chat templates when UseTokenizerTemplate is enabled and Messages are provided
if (request->usetokenizertemplate() && request->messages_size() > 0 && ctx_server.impl->chat_params.tmpls != nullptr) {
if (request->usetokenizertemplate() && request->messages_size() > 0 && ctx_server.impl->chat_templates != nullptr) {
// Convert proto Messages to JSON format compatible with oaicompat_chat_params_parse
json body_json;
json messages_json = json::array();
@@ -1992,7 +1916,12 @@ public:
// Use the same approach as server.cpp: call oaicompat_chat_params_parse
// This handles all template application, grammar merging, etc. automatically
// Files extracted from multimodal content in messages will be added to the files vector
// chat_params already contains tmpls, allow_image, and allow_audio set during model loading
// Create parser options with current chat_templates to ensure tmpls is not null
oaicompat_parser_options parser_opt = ctx_server.impl->oai_parser_opt;
parser_opt.tmpls = ctx_server.impl->chat_templates.get(); // Ensure tmpls is set to current chat_templates
// Update allow_image and allow_audio based on current mctx state
parser_opt.allow_image = ctx_server.impl->mctx ? mtmd_support_vision(ctx_server.impl->mctx) : false;
parser_opt.allow_audio = ctx_server.impl->mctx ? mtmd_support_audio(ctx_server.impl->mctx) : false;
// Debug: Log tools before template processing
if (body_json.contains("tools")) {
@@ -2038,7 +1967,7 @@ public:
}
}
json parsed_data = oaicompat_chat_params_parse(body_json, ctx_server.impl->chat_params, files);
json parsed_data = oaicompat_chat_params_parse(body_json, parser_opt, files);
// Debug: Log tools after template processing
if (parsed_data.contains("tools")) {
@@ -2091,7 +2020,7 @@ public:
// If not using chat templates, extract files from image_data/audio_data fields
// (If using chat templates, files were already extracted by oaicompat_chat_params_parse)
if (!request->usetokenizertemplate() || request->messages_size() == 0 || ctx_server.impl->chat_params.tmpls == nullptr) {
if (!request->usetokenizertemplate() || request->messages_size() == 0 || ctx_server.impl->chat_templates == nullptr) {
const auto &images_data = data.find("image_data");
if (images_data != data.end() && images_data->is_array())
{
@@ -2234,7 +2163,7 @@ public:
return grpc::Status::OK;
}
grpc::Status Embedding(ServerContext* context, const backend::PredictOptions* request, backend::EmbeddingResult* embeddingResult) override {
grpc::Status Embedding(ServerContext* context, const backend::PredictOptions* request, backend::EmbeddingResult* embeddingResult) {
if (params_base.model.path.empty()) {
return grpc::Status(grpc::StatusCode::FAILED_PRECONDITION, "Model not loaded");
}
@@ -2329,7 +2258,7 @@ public:
return grpc::Status::OK;
}
grpc::Status Rerank(ServerContext* context, const backend::RerankRequest* request, backend::RerankResult* rerankResult) override {
grpc::Status Rerank(ServerContext* context, const backend::RerankRequest* request, backend::RerankResult* rerankResult) {
if (!params_base.embedding || params_base.pooling_type != LLAMA_POOLING_TYPE_RANK) {
return grpc::Status(grpc::StatusCode::UNIMPLEMENTED, "This server does not support reranking. Start it with `--reranking` and without `--embedding`");
}
@@ -2415,7 +2344,7 @@ public:
return grpc::Status::OK;
}
grpc::Status TokenizeString(ServerContext* /*context*/, const backend::PredictOptions* request, backend::TokenizationResponse* response) override {
grpc::Status TokenizeString(ServerContext* /*context*/, const backend::PredictOptions* request, backend::TokenizationResponse* response) {
if (params_base.model.path.empty()) {
return grpc::Status(grpc::StatusCode::FAILED_PRECONDITION, "Model not loaded");
}
@@ -2438,7 +2367,7 @@ public:
return grpc::Status::OK;
}
grpc::Status GetMetrics(ServerContext* /*context*/, const backend::MetricsRequest* /*request*/, backend::MetricsResponse* response) override {
grpc::Status GetMetrics(ServerContext* /*context*/, const backend::MetricsRequest* /*request*/, backend::MetricsResponse* response) {
// request slots data using task queue
auto rd = ctx_server.get_response_reader();
@@ -2476,47 +2405,6 @@ public:
response->set_prompt_tokens_processed(res_metrics->n_prompt_tokens_processed_total);
return grpc::Status::OK;
}
grpc::Status ModelMetadata(ServerContext* /*context*/, const backend::ModelOptions* /*request*/, backend::ModelMetadataResponse* response) override {
// Check if model is loaded
if (params_base.model.path.empty()) {
return grpc::Status(grpc::StatusCode::FAILED_PRECONDITION, "Model not loaded");
}
// Check if chat templates are initialized
if (ctx_server.impl->chat_params.tmpls == nullptr) {
// If templates are not initialized, we can't detect thinking support
// Return false as default
response->set_supports_thinking(false);
response->set_rendered_template("");
return grpc::Status::OK;
}
// Detect thinking support using llama.cpp's function
bool supports_thinking = common_chat_templates_support_enable_thinking(ctx_server.impl->chat_params.tmpls.get());
response->set_supports_thinking(supports_thinking);
// Render the template with enable_thinking=true so Go code can detect thinking tokens
// This allows reusing existing detection functions in Go
std::string rendered_template = "";
if (params_base.use_jinja) {
// Render the template with enable_thinking=true to see what the actual prompt looks like
common_chat_templates_inputs dummy_inputs;
common_chat_msg msg;
msg.role = "user";
msg.content = "test";
dummy_inputs.messages = {msg};
dummy_inputs.enable_thinking = true;
dummy_inputs.use_jinja = params_base.use_jinja;
const auto rendered = common_chat_templates_apply(ctx_server.impl->chat_params.tmpls.get(), dummy_inputs);
rendered_template = rendered.prompt;
}
response->set_rendered_template(rendered_template);
return grpc::Status::OK;
}
};

View File

@@ -0,0 +1,51 @@
INCLUDE_PATH := $(abspath ./)
LIBRARY_PATH := $(abspath ./)
AR?=ar
CMAKE_ARGS?=-DGGML_NATIVE=OFF
BUILD_TYPE?=
GOCMD=go
# keep standard at C11 and C++11
CXXFLAGS = -I. -I$(INCLUDE_PATH)/sources/bark.cpp/examples -I$(INCLUDE_PATH)/sources/bark.cpp/encodec.cpp/ggml/include -I$(INCLUDE_PATH)/sources/bark.cpp/spm-headers -I$(INCLUDE_PATH)/sources/bark.cpp -O3 -DNDEBUG -std=c++17 -fPIC
LDFLAGS = -L$(LIBRARY_PATH) -L$(LIBRARY_PATH)/sources/bark.cpp/build/examples -lbark -lstdc++ -lm
# bark.cpp
BARKCPP_REPO?=https://github.com/PABannier/bark.cpp.git
BARKCPP_VERSION?=5d5be84f089ab9ea53b7a793f088d3fbf7247495
# warnings
CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function
## bark.cpp
sources/bark.cpp:
git clone --recursive $(BARKCPP_REPO) sources/bark.cpp && \
cd sources/bark.cpp && \
git checkout $(BARKCPP_VERSION) && \
git submodule update --init --recursive --depth 1 --single-branch
sources/bark.cpp/build/libbark.a: sources/bark.cpp
cd sources/bark.cpp && \
mkdir -p build && \
cd build && \
cmake $(CMAKE_ARGS) .. && \
cmake --build . --config Release
gobark.o:
$(CXX) $(CXXFLAGS) gobark.cpp -o gobark.o -c $(LDFLAGS)
libbark.a: sources/bark.cpp/build/libbark.a gobark.o
cp $(INCLUDE_PATH)/sources/bark.cpp/build/libbark.a ./
$(AR) rcs libbark.a gobark.o
bark-cpp: libbark.a
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH="$(CURDIR)" LIBRARY_PATH=$(CURDIR) \
$(GOCMD) build -v -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o bark-cpp ./
package:
bash package.sh
build: bark-cpp package
clean:
rm -f gobark.o libbark.a

View File

@@ -0,0 +1,85 @@
#include <iostream>
#include <tuple>
#include "bark.h"
#include "gobark.h"
#include "common.h"
#include "ggml.h"
struct bark_context *c;
void bark_print_progress_callback(struct bark_context *bctx, enum bark_encoding_step step, int progress, void *user_data) {
if (step == bark_encoding_step::SEMANTIC) {
printf("\rGenerating semantic tokens... %d%%", progress);
} else if (step == bark_encoding_step::COARSE) {
printf("\rGenerating coarse tokens... %d%%", progress);
} else if (step == bark_encoding_step::FINE) {
printf("\rGenerating fine tokens... %d%%", progress);
}
fflush(stdout);
}
int load_model(char *model) {
// initialize bark context
struct bark_context_params ctx_params = bark_context_default_params();
bark_params params;
params.model_path = model;
// ctx_params.verbosity = verbosity;
ctx_params.progress_callback = bark_print_progress_callback;
ctx_params.progress_callback_user_data = nullptr;
struct bark_context *bctx = bark_load_model(params.model_path.c_str(), ctx_params, params.seed);
if (!bctx) {
fprintf(stderr, "%s: Could not load model\n", __func__);
return 1;
}
c = bctx;
return 0;
}
int tts(char *text,int threads, char *dst ) {
ggml_time_init();
const int64_t t_main_start_us = ggml_time_us();
// generate audio
if (!bark_generate_audio(c, text, threads)) {
fprintf(stderr, "%s: An error occurred. If the problem persists, feel free to open an issue to report it.\n", __func__);
return 1;
}
const float *audio_data = bark_get_audio_data(c);
if (audio_data == NULL) {
fprintf(stderr, "%s: Could not get audio data\n", __func__);
return 1;
}
const int audio_arr_size = bark_get_audio_data_size(c);
std::vector<float> audio_arr(audio_data, audio_data + audio_arr_size);
write_wav_on_disk(audio_arr, dst);
// report timing
{
const int64_t t_main_end_us = ggml_time_us();
const int64_t t_load_us = bark_get_load_time(c);
const int64_t t_eval_us = bark_get_eval_time(c);
printf("\n\n");
printf("%s: load time = %8.2f ms\n", __func__, t_load_us / 1000.0f);
printf("%s: eval time = %8.2f ms\n", __func__, t_eval_us / 1000.0f);
printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us) / 1000.0f);
}
return 0;
}
int unload() {
bark_free(c);
}

View File

@@ -0,0 +1,52 @@
package main
// #cgo CXXFLAGS: -I${SRCDIR}/sources/bark.cpp/ -I${SRCDIR}/sources/bark.cpp/encodec.cpp -I${SRCDIR}/sources/bark.cpp/encodec.cpp/ggml/include -I${SRCDIR}/sources/bark.cpp/examples -I${SRCDIR}/sources/bark.cpp/spm-headers
// #cgo LDFLAGS: -L${SRCDIR}/ -L${SRCDIR}/sources/bark.cpp/build/examples -L${SRCDIR}/sources/bark.cpp/build/encodec.cpp/ggml/src/ -L${SRCDIR}/sources/bark.cpp/build/encodec.cpp/ -lbark -lencodec -lcommon -lggml -lgomp
// #include <gobark.h>
// #include <stdlib.h>
import "C"
import (
"fmt"
"unsafe"
"github.com/mudler/LocalAI/pkg/grpc/base"
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
)
type Bark struct {
base.SingleThread
threads int
}
func (sd *Bark) Load(opts *pb.ModelOptions) error {
sd.threads = int(opts.Threads)
modelFile := C.CString(opts.ModelFile)
defer C.free(unsafe.Pointer(modelFile))
ret := C.load_model(modelFile)
if ret != 0 {
return fmt.Errorf("inference failed")
}
return nil
}
func (sd *Bark) TTS(opts *pb.TTSRequest) error {
t := C.CString(opts.Text)
defer C.free(unsafe.Pointer(t))
dst := C.CString(opts.Dst)
defer C.free(unsafe.Pointer(dst))
threads := C.int(sd.threads)
ret := C.tts(t, threads, dst)
if ret != 0 {
return fmt.Errorf("inference failed")
}
return nil
}

View File

@@ -0,0 +1,8 @@
#ifdef __cplusplus
extern "C" {
#endif
int load_model(char *model);
int tts(char *text,int threads, char *dst );
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,20 @@
package main
// Note: this is started internally by LocalAI and a server is allocated for each model
import (
"flag"
grpc "github.com/mudler/LocalAI/pkg/grpc"
)
var (
addr = flag.String("addr", "localhost:50051", "the address to connect to")
)
func main() {
flag.Parse()
if err := grpc.StartServer(*addr, &Bark{}); err != nil {
panic(err)
}
}

41
backend/go/bark-cpp/package.sh Executable file
View File

@@ -0,0 +1,41 @@
#!/bin/bash
# Script to copy the appropriate libraries based on architecture
# This script is used in the final stage of the Dockerfile
set -e
CURDIR=$(dirname "$(realpath $0)")
# Create lib directory
mkdir -p $CURDIR/package/lib
cp -avrf $CURDIR/bark-cpp $CURDIR/package/
cp -rfv $CURDIR/run.sh $CURDIR/package/
# Detect architecture and copy appropriate libraries
if [ -f "/lib64/ld-linux-x86-64.so.2" ]; then
# x86_64 architecture
echo "Detected x86_64 architecture, copying x86_64 libraries..."
cp -arfLv /lib64/ld-linux-x86-64.so.2 $CURDIR/package/lib/ld.so
cp -arfLv /lib/x86_64-linux-gnu/libc.so.6 $CURDIR/package/lib/libc.so.6
cp -arfLv /lib/x86_64-linux-gnu/libgcc_s.so.1 $CURDIR/package/lib/libgcc_s.so.1
cp -arfLv /lib/x86_64-linux-gnu/libstdc++.so.6 $CURDIR/package/lib/libstdc++.so.6
cp -arfLv /lib/x86_64-linux-gnu/libm.so.6 $CURDIR/package/lib/libm.so.6
cp -arfLv /lib/x86_64-linux-gnu/libgomp.so.1 $CURDIR/package/lib/libgomp.so.1
elif [ -f "/lib/ld-linux-aarch64.so.1" ]; then
# ARM64 architecture
echo "Detected ARM64 architecture, copying ARM64 libraries..."
cp -arfLv /lib/ld-linux-aarch64.so.1 $CURDIR/package/lib/ld.so
cp -arfLv /lib/aarch64-linux-gnu/libc.so.6 $CURDIR/package/lib/libc.so.6
cp -arfLv /lib/aarch64-linux-gnu/libgcc_s.so.1 $CURDIR/package/lib/libgcc_s.so.1
cp -arfLv /lib/aarch64-linux-gnu/libstdc++.so.6 $CURDIR/package/lib/libstdc++.so.6
cp -arfLv /lib/aarch64-linux-gnu/libm.so.6 $CURDIR/package/lib/libm.so.6
cp -arfLv /lib/aarch64-linux-gnu/libgomp.so.1 $CURDIR/package/lib/libgomp.so.1
else
echo "Error: Could not detect architecture"
exit 1
fi
echo "Packaging completed successfully"
ls -liah $CURDIR/package/
ls -liah $CURDIR/package/lib/

13
backend/go/bark-cpp/run.sh Executable file
View File

@@ -0,0 +1,13 @@
#!/bin/bash
set -ex
CURDIR=$(dirname "$(realpath $0)")
export LD_LIBRARY_PATH=$CURDIR/lib:$LD_LIBRARY_PATH
# If there is a lib/ld.so, use it
if [ -f $CURDIR/lib/ld.so ]; then
echo "Using lib/ld.so"
exec $CURDIR/lib/ld.so $CURDIR/bark-cpp "$@"
fi
exec $CURDIR/bark-cpp "$@"

View File

@@ -8,7 +8,7 @@ JOBS?=$(shell nproc --ignore=1)
# stablediffusion.cpp (ggml)
STABLEDIFFUSION_GGML_REPO?=https://github.com/leejet/stable-diffusion.cpp
STABLEDIFFUSION_GGML_VERSION?=e411520407663e1ddf8ff2e5ed4ff3a116fbbc97
STABLEDIFFUSION_GGML_VERSION?=9be0b91927dfa4007d053df72dea7302990226bb
CMAKE_ARGS+=-DGGML_MAX_NAME=128

View File

@@ -8,7 +8,7 @@ JOBS?=$(shell nproc --ignore=1)
# whisper.cpp version
WHISPER_REPO?=https://github.com/ggml-org/whisper.cpp
WHISPER_CPP_VERSION?=aa1bc0d1a6dfd70dbb9f60c11df12441e03a9075
WHISPER_CPP_VERSION?=679bdb53dbcbfb3e42685f50c7ff367949fd4d48
SO_TARGET?=libgowhisper.so
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF

View File

@@ -130,9 +130,8 @@ func (w *Whisper) AudioTranscription(opts *pb.TranscriptRequest) (pb.TranscriptR
segments := []*pb.TranscriptSegment{}
text := ""
for i := range int(segsLen) {
// segment start/end conversion factor taken from https://github.com/ggml-org/whisper.cpp/blob/master/examples/cli/cli.cpp#L895
s := CppGetSegmentStart(i) * (10000000)
t := CppGetSegmentEnd(i) * (10000000)
s := CppGetSegmentStart(i)
t := CppGetSegmentEnd(i)
txt := strings.Clone(CppGetSegmentText(i))
tokens := make([]int32, CppNTokens(i))

View File

@@ -142,31 +142,6 @@
amd: "rocm-vllm"
intel: "intel-vllm"
nvidia-cuda-12: "cuda12-vllm"
- &vllm-omni
name: "vllm-omni"
license: apache-2.0
urls:
- https://github.com/vllm-project/vllm-omni
tags:
- text-to-image
- image-generation
- text-to-video
- video-generation
- text-to-speech
- TTS
- multimodal
- LLM
icon: https://raw.githubusercontent.com/vllm-project/vllm/main/docs/assets/logos/vllm-logo-text-dark.png
description: |
vLLM-Omni is a unified interface for multimodal generation with vLLM.
It supports image generation (text-to-image, image editing), video generation
(text-to-video, image-to-video), text generation with multimodal inputs, and
text-to-speech generation. Only supports NVIDIA (CUDA) and ROCm platforms.
alias: "vllm-omni"
capabilities:
nvidia: "cuda12-vllm-omni"
amd: "rocm-vllm-omni"
nvidia-cuda-12: "cuda12-vllm-omni"
- &mlx
name: "mlx"
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-mlx"
@@ -225,7 +200,7 @@
amd: "rocm-rerankers"
- &transformers
name: "transformers"
icon: https://avatars.githubusercontent.com/u/25720743?s=200&v=4
icon: https://camo.githubusercontent.com/26569a27b8a30a488dd345024b71dbc05da7ff1b2ba97bb6080c9f1ee0f26cc7/68747470733a2f2f68756767696e67666163652e636f2f64617461736574732f68756767696e67666163652f646f63756d656e746174696f6e2d696d616765732f7265736f6c76652f6d61696e2f7472616e73666f726d6572732f7472616e73666f726d6572735f61735f615f6d6f64656c5f646566696e6974696f6e2e706e67
alias: "transformers"
license: apache-2.0
description: |
@@ -266,6 +241,22 @@
nvidia-cuda-12: "cuda12-diffusers"
nvidia-l4t-cuda-12: "nvidia-l4t-arm64-diffusers"
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-diffusers"
- &exllama2
name: "exllama2"
urls:
- https://github.com/turboderp-org/exllamav2
tags:
- text-to-text
- LLM
- EXL2
license: MIT
description: |
ExLlamaV2 is an inference library for running local LLMs on modern consumer GPUs.
alias: "exllama2"
capabilities:
nvidia: "cuda12-exllama2"
intel: "intel-exllama2"
nvidia-cuda-12: "cuda12-exllama2"
- &faster-whisper
icon: https://avatars.githubusercontent.com/u/1520500?s=200&v=4
description: |
@@ -302,25 +293,6 @@
default: "cpu-moonshine"
nvidia-cuda-13: "cuda13-moonshine"
nvidia-cuda-12: "cuda12-moonshine"
- &whisperx
description: |
WhisperX provides fast automatic speech recognition with word-level timestamps, speaker diarization,
and forced alignment. Built on faster-whisper and pyannote-audio for high-accuracy transcription
with speaker identification.
urls:
- https://github.com/m-bain/whisperX
tags:
- speech-to-text
- diarization
- whisperx
license: BSD-4-Clause
name: "whisperx"
capabilities:
nvidia: "cuda12-whisperx"
amd: "rocm-whisperx"
default: "cpu-whisperx"
nvidia-cuda-13: "cuda13-whisperx"
nvidia-cuda-12: "cuda12-whisperx"
- &kokoro
icon: https://avatars.githubusercontent.com/u/166769057?v=4
description: |
@@ -367,6 +339,51 @@
nvidia-cuda-13: "cuda13-coqui"
nvidia-cuda-12: "cuda12-coqui"
icon: https://avatars.githubusercontent.com/u/1338804?s=200&v=4
- &bark
urls:
- https://github.com/suno-ai/bark
description: |
Bark is a transformer-based text-to-audio model created by Suno. Bark can generate highly realistic, multilingual speech as well as other audio - including music, background noise and simple sound effects. The model can also produce nonverbal communications like laughing, sighing and crying. To support the research community, we are providing access to pretrained model checkpoints, which are ready for inference and available for commercial use.
tags:
- text-to-speech
- TTS
license: MIT
name: "bark"
alias: "bark"
capabilities:
cuda: "cuda12-bark"
intel: "intel-bark"
rocm: "rocm-bark"
nvidia-cuda-13: "cuda13-bark"
nvidia-cuda-12: "cuda12-bark"
icon: https://avatars.githubusercontent.com/u/99442120?s=200&v=4
- &barkcpp
urls:
- https://github.com/PABannier/bark.cpp
description: |
With bark.cpp, our goal is to bring real-time realistic multilingual text-to-speech generation to the community.
Plain C/C++ implementation without dependencies
AVX, AVX2 and AVX512 for x86 architectures
CPU and GPU compatible backends
Mixed F16 / F32 precision
4-bit, 5-bit and 8-bit integer quantization
Metal and CUDA backends
Models supported
Bark Small
Bark Large
tags:
- text-to-speech
- TTS
license: MIT
icon: https://github.com/PABannier/bark.cpp/raw/main/assets/banner.png
name: "bark-cpp"
uri: "quay.io/go-skynet/local-ai-backends:latest-bark-cpp"
mirrors:
- localai/localai-backends:latest-bark-cpp
alias: "bark-cpp"
- &chatterbox
urls:
- https://github.com/resemble-ai/chatterbox
@@ -377,7 +394,7 @@
- text-to-speech
- TTS
license: MIT
icon: https://avatars.githubusercontent.com/u/49844015?s=200&v=4
icon: https://private-user-images.githubusercontent.com/660224/448166653-bd8c5f03-e91d-4ee5-b680-57355da204d1.png?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3NTAxOTE0MDAsIm5iZiI6MTc1MDE5MTEwMCwicGF0aCI6Ii82NjAyMjQvNDQ4MTY2NjUzLWJkOGM1ZjAzLWU5MWQtNGVlNS1iNjgwLTU3MzU1ZGEyMDRkMS5wbmc_WC1BbXotQWxnb3JpdGhtPUFXUzQtSE1BQy1TSEEyNTYmWC1BbXotQ3JlZGVudGlhbD1BS0lBVkNPRFlMU0E1M1BRSzRaQSUyRjIwMjUwNjE3JTJGdXMtZWFzdC0xJTJGczMlMkZhd3M0X3JlcXVlc3QmWC1BbXotRGF0ZT0yMDI1MDYxN1QyMDExNDBaJlgtQW16LUV4cGlyZXM9MzAwJlgtQW16LVNpZ25hdHVyZT1hMmI1NGY3OGFiZTlhNGFkNTVlYTY4NTIwMWEzODRiZGE4YzdhNGQ5MGNhNzE3MDYyYTA2NDIxYTkyYzhiODkwJlgtQW16LVNpZ25lZEhlYWRlcnM9aG9zdCJ9.mR9kM9xX0TdzPuSpuspCllHYQiq79dFQ2rtuNvjrl6w
name: "chatterbox"
alias: "chatterbox"
capabilities:
@@ -411,91 +428,6 @@
nvidia-l4t-cuda-12: "nvidia-l4t-vibevoice"
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-vibevoice"
icon: https://avatars.githubusercontent.com/u/6154722?s=200&v=4
- &qwen-tts
urls:
- https://github.com/QwenLM/Qwen3-TTS
description: |
Qwen3-TTS is a high-quality text-to-speech model supporting custom voice, voice design, and voice cloning.
tags:
- text-to-speech
- TTS
license: apache-2.0
name: "qwen-tts"
alias: "qwen-tts"
capabilities:
nvidia: "cuda12-qwen-tts"
intel: "intel-qwen-tts"
amd: "rocm-qwen-tts"
nvidia-l4t: "nvidia-l4t-qwen-tts"
default: "cpu-qwen-tts"
nvidia-cuda-13: "cuda13-qwen-tts"
nvidia-cuda-12: "cuda12-qwen-tts"
nvidia-l4t-cuda-12: "nvidia-l4t-qwen-tts"
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-qwen-tts"
icon: https://cdn-avatars.huggingface.co/v1/production/uploads/620760a26e3b7210c2ff1943/-s1gyJfvbE1RgO5iBeNOi.png
- &qwen-asr
urls:
- https://github.com/QwenLM/Qwen3-ASR
description: |
Qwen3-ASR is an automatic speech recognition model supporting multiple languages and batch inference.
tags:
- speech-recognition
- ASR
license: apache-2.0
name: "qwen-asr"
alias: "qwen-asr"
capabilities:
nvidia: "cuda12-qwen-asr"
intel: "intel-qwen-asr"
amd: "rocm-qwen-asr"
nvidia-l4t: "nvidia-l4t-qwen-asr"
default: "cpu-qwen-asr"
nvidia-cuda-13: "cuda13-qwen-asr"
nvidia-cuda-12: "cuda12-qwen-asr"
nvidia-l4t-cuda-12: "nvidia-l4t-qwen-asr"
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-qwen-asr"
icon: https://cdn-avatars.huggingface.co/v1/production/uploads/620760a26e3b7210c2ff1943/-s1gyJfvbE1RgO5iBeNOi.png
- &voxcpm
urls:
- https://github.com/ModelBest/VoxCPM
description: |
VoxCPM is an innovative end-to-end TTS model from ModelBest, designed to generate highly expressive speech.
tags:
- text-to-speech
- TTS
license: mit
name: "voxcpm"
alias: "voxcpm"
capabilities:
nvidia: "cuda12-voxcpm"
intel: "intel-voxcpm"
amd: "rocm-voxcpm"
default: "cpu-voxcpm"
nvidia-cuda-13: "cuda13-voxcpm"
nvidia-cuda-12: "cuda12-voxcpm"
icon: https://avatars.githubusercontent.com/u/6154722?s=200&v=4
- &pocket-tts
urls:
- https://github.com/kyutai-labs/pocket-tts
description: |
Pocket TTS is a lightweight text-to-speech model designed to run efficiently on CPUs.
tags:
- text-to-speech
- TTS
license: mit
name: "pocket-tts"
alias: "pocket-tts"
capabilities:
nvidia: "cuda12-pocket-tts"
intel: "intel-pocket-tts"
amd: "rocm-pocket-tts"
nvidia-l4t: "nvidia-l4t-pocket-tts"
default: "cpu-pocket-tts"
nvidia-cuda-13: "cuda13-pocket-tts"
nvidia-cuda-12: "cuda12-pocket-tts"
nvidia-l4t-cuda-12: "nvidia-l4t-pocket-tts"
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-pocket-tts"
icon: https://avatars.githubusercontent.com/u/151010778?s=200&v=4
- &piper
name: "piper"
uri: "quay.io/go-skynet/local-ai-backends:latest-piper"
@@ -583,14 +515,18 @@
default: "cpu-neutts"
nvidia: "cuda12-neutts"
amd: "rocm-neutts"
nvidia-l4t: "nvidia-l4t-neutts"
nvidia-cuda-12: "cuda12-neutts"
nvidia-l4t-cuda-12: "nvidia-l4t-arm64-neutts"
- !!merge <<: *neutts
name: "neutts-development"
capabilities:
default: "cpu-neutts-development"
nvidia: "cuda12-neutts-development"
amd: "rocm-neutts-development"
nvidia-l4t: "nvidia-l4t-neutts-development"
nvidia-cuda-12: "cuda12-neutts-development"
nvidia-l4t-cuda-12: "nvidia-l4t-arm64-neutts-development"
- !!merge <<: *llamacpp
name: "llama-cpp-development"
capabilities:
@@ -620,6 +556,11 @@
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-neutts"
mirrors:
- localai/localai-backends:latest-gpu-rocm-hipblas-neutts
- !!merge <<: *neutts
name: "nvidia-l4t-arm64-neutts"
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-arm64-neutts"
mirrors:
- localai/localai-backends:latest-nvidia-l4t-arm64-neutts
- !!merge <<: *neutts
name: "cpu-neutts-development"
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-neutts"
@@ -635,6 +576,11 @@
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-neutts"
mirrors:
- localai/localai-backends:master-gpu-rocm-hipblas-neutts
- !!merge <<: *neutts
name: "nvidia-l4t-arm64-neutts-development"
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-arm64-neutts"
mirrors:
- localai/localai-backends:master-nvidia-l4t-arm64-neutts
- !!merge <<: *mlx
name: "mlx-development"
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-mlx"
@@ -1013,33 +959,6 @@
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-vllm"
mirrors:
- localai/localai-backends:master-gpu-intel-vllm
# vllm-omni
- !!merge <<: *vllm-omni
name: "vllm-omni-development"
capabilities:
nvidia: "cuda12-vllm-omni-development"
amd: "rocm-vllm-omni-development"
nvidia-cuda-12: "cuda12-vllm-omni-development"
- !!merge <<: *vllm-omni
name: "cuda12-vllm-omni"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-vllm-omni"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-vllm-omni
- !!merge <<: *vllm-omni
name: "rocm-vllm-omni"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-vllm-omni"
mirrors:
- localai/localai-backends:latest-gpu-rocm-hipblas-vllm-omni
- !!merge <<: *vllm-omni
name: "cuda12-vllm-omni-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-vllm-omni"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-12-vllm-omni
- !!merge <<: *vllm-omni
name: "rocm-vllm-omni-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-vllm-omni"
mirrors:
- localai/localai-backends:master-gpu-rocm-hipblas-vllm-omni
# rfdetr
- !!merge <<: *rfdetr
name: "rfdetr-development"
@@ -1302,6 +1221,22 @@
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-diffusers"
mirrors:
- localai/localai-backends:master-metal-darwin-arm64-diffusers
## exllama2
- !!merge <<: *exllama2
name: "exllama2-development"
capabilities:
nvidia: "cuda12-exllama2-development"
intel: "intel-exllama2-development"
- !!merge <<: *exllama2
name: "cuda12-exllama2"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-exllama2"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-exllama2
- !!merge <<: *exllama2
name: "cuda12-exllama2-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-exllama2"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-12-exllama2
## kokoro
- !!merge <<: *kokoro
name: "kokoro-development"
@@ -1436,55 +1371,6 @@
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-moonshine"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-13-moonshine
## whisperx
- !!merge <<: *whisperx
name: "whisperx-development"
capabilities:
nvidia: "cuda12-whisperx-development"
amd: "rocm-whisperx-development"
default: "cpu-whisperx-development"
nvidia-cuda-13: "cuda13-whisperx-development"
nvidia-cuda-12: "cuda12-whisperx-development"
- !!merge <<: *whisperx
name: "cpu-whisperx"
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-whisperx"
mirrors:
- localai/localai-backends:latest-cpu-whisperx
- !!merge <<: *whisperx
name: "cpu-whisperx-development"
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-whisperx"
mirrors:
- localai/localai-backends:master-cpu-whisperx
- !!merge <<: *whisperx
name: "cuda12-whisperx"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-whisperx"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-whisperx
- !!merge <<: *whisperx
name: "cuda12-whisperx-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-whisperx"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-12-whisperx
- !!merge <<: *whisperx
name: "rocm-whisperx"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-whisperx"
mirrors:
- localai/localai-backends:latest-gpu-rocm-hipblas-whisperx
- !!merge <<: *whisperx
name: "rocm-whisperx-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-whisperx"
mirrors:
- localai/localai-backends:master-gpu-rocm-hipblas-whisperx
- !!merge <<: *whisperx
name: "cuda13-whisperx"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-whisperx"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-13-whisperx
- !!merge <<: *whisperx
name: "cuda13-whisperx-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-whisperx"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-13-whisperx
## coqui
- !!merge <<: *coqui
@@ -1523,6 +1409,47 @@
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-coqui"
mirrors:
- localai/localai-backends:latest-gpu-rocm-hipblas-coqui
## bark
- !!merge <<: *bark
name: "bark-development"
capabilities:
nvidia: "cuda12-bark-development"
intel: "intel-bark-development"
amd: "rocm-bark-development"
- !!merge <<: *bark
name: "rocm-bark-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-bark"
mirrors:
- localai/localai-backends:master-gpu-rocm-hipblas-bark
- !!merge <<: *bark
name: "intel-bark"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-bark"
mirrors:
- localai/localai-backends:latest-gpu-intel-bark
- !!merge <<: *bark
name: "intel-bark-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-bark"
mirrors:
- localai/localai-backends:master-gpu-intel-bark
- !!merge <<: *bark
name: "cuda12-bark"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-bark"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-bark
- !!merge <<: *bark
name: "rocm-bark"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-bark"
mirrors:
- localai/localai-backends:latest-gpu-rocm-hipblas-bark
- !!merge <<: *bark
name: "cuda12-bark-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-bark"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-12-bark
- !!merge <<: *barkcpp
name: "bark-cpp-development"
uri: "quay.io/go-skynet/local-ai-backends:master-bark-cpp"
alias: "bark-cpp"
## chatterbox
- !!merge <<: *chatterbox
name: "chatterbox-development"
@@ -1678,312 +1605,3 @@
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-vibevoice"
mirrors:
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-vibevoice
## qwen-tts
- !!merge <<: *qwen-tts
name: "qwen-tts-development"
capabilities:
nvidia: "cuda12-qwen-tts-development"
intel: "intel-qwen-tts-development"
amd: "rocm-qwen-tts-development"
nvidia-l4t: "nvidia-l4t-qwen-tts-development"
default: "cpu-qwen-tts-development"
nvidia-cuda-13: "cuda13-qwen-tts-development"
nvidia-cuda-12: "cuda12-qwen-tts-development"
nvidia-l4t-cuda-12: "nvidia-l4t-qwen-tts-development"
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-qwen-tts-development"
- !!merge <<: *qwen-tts
name: "cpu-qwen-tts"
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-qwen-tts"
mirrors:
- localai/localai-backends:latest-cpu-qwen-tts
- !!merge <<: *qwen-tts
name: "cpu-qwen-tts-development"
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-qwen-tts"
mirrors:
- localai/localai-backends:master-cpu-qwen-tts
- !!merge <<: *qwen-tts
name: "cuda12-qwen-tts"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-qwen-tts"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-qwen-tts
- !!merge <<: *qwen-tts
name: "cuda12-qwen-tts-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-qwen-tts"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-12-qwen-tts
- !!merge <<: *qwen-tts
name: "cuda13-qwen-tts"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-qwen-tts"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-13-qwen-tts
- !!merge <<: *qwen-tts
name: "cuda13-qwen-tts-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-qwen-tts"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-13-qwen-tts
- !!merge <<: *qwen-tts
name: "intel-qwen-tts"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-qwen-tts"
mirrors:
- localai/localai-backends:latest-gpu-intel-qwen-tts
- !!merge <<: *qwen-tts
name: "intel-qwen-tts-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-qwen-tts"
mirrors:
- localai/localai-backends:master-gpu-intel-qwen-tts
- !!merge <<: *qwen-tts
name: "rocm-qwen-tts"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-qwen-tts"
mirrors:
- localai/localai-backends:latest-gpu-rocm-hipblas-qwen-tts
- !!merge <<: *qwen-tts
name: "rocm-qwen-tts-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-qwen-tts"
mirrors:
- localai/localai-backends:master-gpu-rocm-hipblas-qwen-tts
- !!merge <<: *qwen-tts
name: "nvidia-l4t-qwen-tts"
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-qwen-tts"
mirrors:
- localai/localai-backends:latest-nvidia-l4t-qwen-tts
- !!merge <<: *qwen-tts
name: "nvidia-l4t-qwen-tts-development"
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-qwen-tts"
mirrors:
- localai/localai-backends:master-nvidia-l4t-qwen-tts
- !!merge <<: *qwen-tts
name: "cuda13-nvidia-l4t-arm64-qwen-tts"
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-cuda-13-arm64-qwen-tts"
mirrors:
- localai/localai-backends:latest-nvidia-l4t-cuda-13-arm64-qwen-tts
- !!merge <<: *qwen-tts
name: "cuda13-nvidia-l4t-arm64-qwen-tts-development"
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-qwen-tts"
mirrors:
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-qwen-tts
## qwen-asr
- !!merge <<: *qwen-asr
name: "qwen-asr-development"
capabilities:
nvidia: "cuda12-qwen-asr-development"
intel: "intel-qwen-asr-development"
amd: "rocm-qwen-asr-development"
nvidia-l4t: "nvidia-l4t-qwen-asr-development"
default: "cpu-qwen-asr-development"
nvidia-cuda-13: "cuda13-qwen-asr-development"
nvidia-cuda-12: "cuda12-qwen-asr-development"
nvidia-l4t-cuda-12: "nvidia-l4t-qwen-asr-development"
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-qwen-asr-development"
- !!merge <<: *qwen-asr
name: "cpu-qwen-asr"
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-qwen-asr"
mirrors:
- localai/localai-backends:latest-cpu-qwen-asr
- !!merge <<: *qwen-asr
name: "cpu-qwen-asr-development"
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-qwen-asr"
mirrors:
- localai/localai-backends:master-cpu-qwen-asr
- !!merge <<: *qwen-asr
name: "cuda12-qwen-asr"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-qwen-asr"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-qwen-asr
- !!merge <<: *qwen-asr
name: "cuda12-qwen-asr-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-qwen-asr"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-12-qwen-asr
- !!merge <<: *qwen-asr
name: "cuda13-qwen-asr"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-qwen-asr"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-13-qwen-asr
- !!merge <<: *qwen-asr
name: "cuda13-qwen-asr-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-qwen-asr"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-13-qwen-asr
- !!merge <<: *qwen-asr
name: "intel-qwen-asr"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-qwen-asr"
mirrors:
- localai/localai-backends:latest-gpu-intel-qwen-asr
- !!merge <<: *qwen-asr
name: "intel-qwen-asr-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-qwen-asr"
mirrors:
- localai/localai-backends:master-gpu-intel-qwen-asr
- !!merge <<: *qwen-asr
name: "rocm-qwen-asr"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-qwen-asr"
mirrors:
- localai/localai-backends:latest-gpu-rocm-hipblas-qwen-asr
- !!merge <<: *qwen-asr
name: "rocm-qwen-asr-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-qwen-asr"
mirrors:
- localai/localai-backends:master-gpu-rocm-hipblas-qwen-asr
- !!merge <<: *qwen-asr
name: "nvidia-l4t-qwen-asr"
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-qwen-asr"
mirrors:
- localai/localai-backends:latest-nvidia-l4t-qwen-asr
- !!merge <<: *qwen-asr
name: "nvidia-l4t-qwen-asr-development"
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-qwen-asr"
mirrors:
- localai/localai-backends:master-nvidia-l4t-qwen-asr
- !!merge <<: *qwen-asr
name: "cuda13-nvidia-l4t-arm64-qwen-asr"
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-cuda-13-arm64-qwen-asr"
mirrors:
- localai/localai-backends:latest-nvidia-l4t-cuda-13-arm64-qwen-asr
- !!merge <<: *qwen-asr
name: "cuda13-nvidia-l4t-arm64-qwen-asr-development"
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-qwen-asr"
mirrors:
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-qwen-asr
## voxcpm
- !!merge <<: *voxcpm
name: "voxcpm-development"
capabilities:
nvidia: "cuda12-voxcpm-development"
intel: "intel-voxcpm-development"
amd: "rocm-voxcpm-development"
default: "cpu-voxcpm-development"
nvidia-cuda-13: "cuda13-voxcpm-development"
nvidia-cuda-12: "cuda12-voxcpm-development"
- !!merge <<: *voxcpm
name: "cpu-voxcpm"
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-voxcpm"
mirrors:
- localai/localai-backends:latest-cpu-voxcpm
- !!merge <<: *voxcpm
name: "cpu-voxcpm-development"
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-voxcpm"
mirrors:
- localai/localai-backends:master-cpu-voxcpm
- !!merge <<: *voxcpm
name: "cuda12-voxcpm"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-voxcpm"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-voxcpm
- !!merge <<: *voxcpm
name: "cuda12-voxcpm-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-voxcpm"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-12-voxcpm
- !!merge <<: *voxcpm
name: "cuda13-voxcpm"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-voxcpm"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-13-voxcpm
- !!merge <<: *voxcpm
name: "cuda13-voxcpm-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-voxcpm"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-13-voxcpm
- !!merge <<: *voxcpm
name: "intel-voxcpm"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-voxcpm"
mirrors:
- localai/localai-backends:latest-gpu-intel-voxcpm
- !!merge <<: *voxcpm
name: "intel-voxcpm-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-voxcpm"
mirrors:
- localai/localai-backends:master-gpu-intel-voxcpm
- !!merge <<: *voxcpm
name: "rocm-voxcpm"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-voxcpm"
mirrors:
- localai/localai-backends:latest-gpu-rocm-hipblas-voxcpm
- !!merge <<: *voxcpm
name: "rocm-voxcpm-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-voxcpm"
mirrors:
- localai/localai-backends:master-gpu-rocm-hipblas-voxcpm
## pocket-tts
- !!merge <<: *pocket-tts
name: "pocket-tts-development"
capabilities:
nvidia: "cuda12-pocket-tts-development"
intel: "intel-pocket-tts-development"
amd: "rocm-pocket-tts-development"
nvidia-l4t: "nvidia-l4t-pocket-tts-development"
default: "cpu-pocket-tts-development"
nvidia-cuda-13: "cuda13-pocket-tts-development"
nvidia-cuda-12: "cuda12-pocket-tts-development"
nvidia-l4t-cuda-12: "nvidia-l4t-pocket-tts-development"
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-pocket-tts-development"
- !!merge <<: *pocket-tts
name: "cpu-pocket-tts"
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-pocket-tts"
mirrors:
- localai/localai-backends:latest-cpu-pocket-tts
- !!merge <<: *pocket-tts
name: "cpu-pocket-tts-development"
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-pocket-tts"
mirrors:
- localai/localai-backends:master-cpu-pocket-tts
- !!merge <<: *pocket-tts
name: "cuda12-pocket-tts"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-pocket-tts"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-pocket-tts
- !!merge <<: *pocket-tts
name: "cuda12-pocket-tts-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-pocket-tts"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-12-pocket-tts
- !!merge <<: *pocket-tts
name: "cuda13-pocket-tts"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-pocket-tts"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-13-pocket-tts
- !!merge <<: *pocket-tts
name: "cuda13-pocket-tts-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-pocket-tts"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-13-pocket-tts
- !!merge <<: *pocket-tts
name: "intel-pocket-tts"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-pocket-tts"
mirrors:
- localai/localai-backends:latest-gpu-intel-pocket-tts
- !!merge <<: *pocket-tts
name: "intel-pocket-tts-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-pocket-tts"
mirrors:
- localai/localai-backends:master-gpu-intel-pocket-tts
- !!merge <<: *pocket-tts
name: "rocm-pocket-tts"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-pocket-tts"
mirrors:
- localai/localai-backends:latest-gpu-rocm-hipblas-pocket-tts
- !!merge <<: *pocket-tts
name: "rocm-pocket-tts-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-pocket-tts"
mirrors:
- localai/localai-backends:master-gpu-rocm-hipblas-pocket-tts
- !!merge <<: *pocket-tts
name: "nvidia-l4t-pocket-tts"
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-pocket-tts"
mirrors:
- localai/localai-backends:latest-nvidia-l4t-pocket-tts
- !!merge <<: *pocket-tts
name: "nvidia-l4t-pocket-tts-development"
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-pocket-tts"
mirrors:
- localai/localai-backends:master-nvidia-l4t-pocket-tts
- !!merge <<: *pocket-tts
name: "cuda13-nvidia-l4t-arm64-pocket-tts"
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-cuda-13-arm64-pocket-tts"
mirrors:
- localai/localai-backends:latest-nvidia-l4t-cuda-13-arm64-pocket-tts
- !!merge <<: *pocket-tts
name: "cuda13-nvidia-l4t-arm64-pocket-tts-development"
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-pocket-tts"
mirrors:
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-pocket-tts

View File

@@ -16,8 +16,10 @@ The Python backends use a unified build system based on `libbackend.sh` that pro
- **transformers** - Hugging Face Transformers framework (PyTorch-based)
- **vllm** - High-performance LLM inference engine
- **mlx** - Apple Silicon optimized ML framework
- **exllama2** - ExLlama2 quantized models
### Audio & Speech
- **bark** - Text-to-speech synthesis
- **coqui** - Coqui TTS models
- **faster-whisper** - Fast Whisper speech recognition
- **kitten-tts** - Lightweight TTS

View File

@@ -1,18 +1,18 @@
.PHONY: voxcpm
voxcpm:
.PHONY: ttsbark
ttsbark:
bash install.sh
.PHONY: run
run: voxcpm
@echo "Running voxcpm..."
run: ttsbark
@echo "Running bark..."
bash run.sh
@echo "voxcpm run."
@echo "bark run."
.PHONY: test
test: voxcpm
@echo "Testing voxcpm..."
test: ttsbark
@echo "Testing bark..."
bash test.sh
@echo "voxcpm tested."
@echo "bark tested."
.PHONY: protogen-clean
protogen-clean:
@@ -20,4 +20,4 @@ protogen-clean:
.PHONY: clean
clean: protogen-clean
rm -rf venv __pycache__
rm -rf venv __pycache__

View File

@@ -0,0 +1,16 @@
# Creating a separate environment for ttsbark project
```
make ttsbark
```
# Testing the gRPC server
```
<The path of your python interpreter> -m unittest test_ttsbark.py
```
For example
```
/opt/conda/envs/bark/bin/python -m unittest extra/grpc/bark/test_ttsbark.py
``````

View File

@@ -0,0 +1,98 @@
#!/usr/bin/env python3
"""
This is an extra gRPC server of LocalAI for Bark TTS
"""
from concurrent import futures
import time
import argparse
import signal
import sys
import os
from scipy.io.wavfile import write as write_wav
import backend_pb2
import backend_pb2_grpc
from bark import SAMPLE_RATE, generate_audio, preload_models
import grpc
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
# Implement the BackendServicer class with the service methods
class BackendServicer(backend_pb2_grpc.BackendServicer):
"""
BackendServicer is the class that implements the gRPC service
"""
def Health(self, request, context):
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
def LoadModel(self, request, context):
model_name = request.Model
try:
print("Preparing models, please wait", file=sys.stderr)
# download and load all models
preload_models()
except Exception as err:
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
# Implement your logic here for the LoadModel service
# Replace this with your desired response
return backend_pb2.Result(message="Model loaded successfully", success=True)
def TTS(self, request, context):
model = request.model
print(request, file=sys.stderr)
try:
audio_array = None
if model != "":
audio_array = generate_audio(request.text, history_prompt=model)
else:
audio_array = generate_audio(request.text)
print("saving to", request.dst, file=sys.stderr)
# save audio to disk
write_wav(request.dst, SAMPLE_RATE, audio_array)
print("saved to", request.dst, file=sys.stderr)
print("tts for", file=sys.stderr)
print(request, file=sys.stderr)
except Exception as err:
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
return backend_pb2.Result(success=True)
def serve(address):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
options=[
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
])
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
server.add_insecure_port(address)
server.start()
print("Server started. Listening on: " + address, file=sys.stderr)
# Define the signal handler function
def signal_handler(sig, frame):
print("Received termination signal. Shutting down...")
server.stop(0)
sys.exit(0)
# Set the signal handlers for SIGINT and SIGTERM
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the gRPC server.")
parser.add_argument(
"--addr", default="localhost:50051", help="The address to bind the server to."
)
args = parser.parse_args()
serve(args.addr)

View File

@@ -16,15 +16,4 @@ if [ "x${BUILD_PROFILE}" == "xintel" ]; then
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
fi
# Use python 3.12 for l4t
if [ "x${BUILD_PROFILE}" == "xl4t13" ]; then
PYTHON_VERSION="3.12"
PYTHON_PATCH="12"
PY_STANDALONE_TAG="20251120"
fi
if [ "x${BUILD_PROFILE}" == "xl4t12" ]; then
USE_PIP=true
fi
installRequirements

View File

@@ -0,0 +1,4 @@
transformers
accelerate
torch==2.4.1
torchaudio==2.4.1

View File

@@ -0,0 +1,4 @@
torch==2.4.1
torchaudio==2.4.1
transformers
accelerate

View File

@@ -0,0 +1,5 @@
--extra-index-url https://download.pytorch.org/whl/rocm6.4
torch==2.8.0+rocm6.4
torchaudio==2.8.0+rocm6.4
transformers
accelerate

View File

@@ -0,0 +1,9 @@
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
intel-extension-for-pytorch==2.8.10+xpu
torch==2.3.1+cxx11.abi
torchaudio==2.3.1+cxx11.abi
oneccl_bind_pt==2.3.100+xpu
optimum[openvino]
setuptools
transformers
accelerate

View File

@@ -0,0 +1,4 @@
bark==0.1.5
grpcio==1.76.0
protobuf
certifi

View File

@@ -6,4 +6,4 @@ else
source $backend_dir/../common/libbackend.sh
fi
startBackend $@
startBackend $@

View File

@@ -1,3 +1,6 @@
"""
A test script to test the gRPC service
"""
import unittest
import subprocess
import time
@@ -6,21 +9,29 @@ import backend_pb2_grpc
import grpc
class TestBackendServicer(unittest.TestCase):
"""
TestBackendServicer is the class that tests the gRPC service.
This class contains methods to test the startup and shutdown of the gRPC service.
TestBackendServicer is the class that tests the gRPC service
"""
def setUp(self):
self.service = subprocess.Popen(["python", "backend.py", "--addr", "localhost:50051"])
"""
This method sets up the gRPC service by starting the server
"""
self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"])
time.sleep(10)
def tearDown(self) -> None:
"""
This method tears down the gRPC service by terminating the server
"""
self.service.terminate()
self.service.wait()
def test_server_startup(self):
"""
This method tests if the server starts up successfully
"""
try:
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
@@ -41,8 +52,7 @@ class TestBackendServicer(unittest.TestCase):
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
# Use a small image generation model for testing
response = stub.LoadModel(backend_pb2.ModelOptions(Model="Tongyi-MAI/Z-Image-Turbo"))
response = stub.LoadModel(backend_pb2.ModelOptions(Model="v2/en_speaker_4"))
self.assertTrue(response.success)
self.assertEqual(response.message, "Model loaded successfully")
except Exception as err:
@@ -51,32 +61,21 @@ class TestBackendServicer(unittest.TestCase):
finally:
self.tearDown()
def test_generate_image(self):
def test_tts(self):
"""
This method tests if image generation works
This method tests if the embeddings are generated successfully
"""
try:
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
response = stub.LoadModel(backend_pb2.ModelOptions(Model="Tongyi-MAI/Z-Image-Turbo"))
response = stub.LoadModel(backend_pb2.ModelOptions(Model="v2/en_speaker_4"))
self.assertTrue(response.success)
req = backend_pb2.GenerateImageRequest(
positive_prompt="a cup of coffee on the table",
dst="/tmp/test_output.png",
width=512,
height=512,
step=20,
seed=42additional_information
)
resp = stub.GenerateImage(req)
self.assertTrue(resp.success)
tts_request = backend_pb2.TTSRequest(text="80s TV news production music hit for tonight's biggest story")
tts_response = stub.TTS(tts_request)
self.assertIsNotNone(tts_response)
except Exception as err:
print(err)
self.fail("GenerateImage service failed")
self.fail("TTS service failed")
finally:
self.tearDown()
additional_information
if __name__ == "__main__":
unittest.main()
self.tearDown()

View File

@@ -15,11 +15,14 @@ fi
if [ "x${BUILD_PROFILE}" == "xintel" ]; then
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
fi
EXTRA_PIP_INSTALL_FLAGS+=" --no-build-isolation"
if [ "x${BUILD_PROFILE}" == "xl4t12" ]; then
USE_PIP=true
# This is here because the jetson-ai-lab.io PyPI mirror's root PyPI endpoint (pypi.jetson-ai-lab.io/root/pypi/)
# returns 503 errors when uv tries to fall back to it for packages not found in the specific subdirectory.
# We need uv to continue falling through to the official PyPI index when it encounters these errors.
if [ "x${BUILD_PROFILE}" == "xl4t12" ] || [ "x${BUILD_PROFILE}" == "xl4t13" ]; then
EXTRA_PIP_INSTALL_FLAGS+=" --index-strategy=unsafe-first-match"
fi
EXTRA_PIP_INSTALL_FLAGS+=" --no-build-isolation"
installRequirements

View File

@@ -1,5 +0,0 @@
# Build dependencies needed for packages installed from source (e.g., git dependencies)
# When using --no-build-isolation, these must be installed in the venv first
wheel
setuptools
packaging

View File

@@ -1,6 +1,7 @@
--extra-index-url https://download.pytorch.org/whl/xpu
torch
torchaudio
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
intel-extension-for-pytorch==2.3.110+xpu
torch==2.3.1+cxx11.abi
torchaudio==2.3.1+cxx11.abi
transformers
numpy>=1.24.0,<1.26.0
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289

View File

@@ -398,7 +398,7 @@ function runProtogen() {
# NOTE: for BUILD_PROFILE==intel, this function does NOT automatically use the Intel python package index.
# you may want to add the following line to a requirements-intel.txt if you use one:
#
# --index-url https://download.pytorch.org/whl/xpu
# --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
#
# If you need to add extra flags into the pip install command you can do so by setting the variable EXTRA_PIP_INSTALL_FLAGS
# before calling installRequirements. For example:

View File

@@ -1,4 +1,5 @@
--extra-index-url https://download.pytorch.org/whl/xpu
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
intel-extension-for-pytorch==2.8.10+xpu
torch==2.8.0
oneccl_bind_pt==2.8.0+xpu
optimum[openvino]

View File

@@ -1,4 +1,4 @@
# Creating a separate environment for coqui project
# Creating a separate environment for ttsbark project
```
make coqui

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env python3
"""
This is an extra gRPC server of LocalAI for Coqui TTS
This is an extra gRPC server of LocalAI for Bark TTS
"""
from concurrent import futures
import time

View File

@@ -1,6 +1,8 @@
--extra-index-url https://download.pytorch.org/whl/xpu
torch==2.8.0+xpu
torchaudio==2.8.0+xpu
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
intel-extension-for-pytorch==2.3.110+xpu
torch==2.3.1+cxx11.abi
torchaudio==2.3.1+cxx11.abi
oneccl_bind_pt==2.3.100+xpu
optimum[openvino]
setuptools
transformers==4.48.3

View File

@@ -41,10 +41,6 @@ from optimum.quanto import freeze, qfloat8, quantize
from transformers import T5EncoderModel
from safetensors.torch import load_file
# Import LTX-2 specific utilities
from diffusers.pipelines.ltx2.export_utils import encode_video as ltx2_encode_video
from diffusers import LTX2VideoTransformer3DModel, GGUFQuantizationConfig
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
COMPEL = os.environ.get("COMPEL", "0") == "1"
XPU = os.environ.get("XPU", "0") == "1"
@@ -294,104 +290,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
pipe.enable_model_cpu_offload()
return pipe
# LTX2ImageToVideoPipeline - needs img2vid flag, CPU offload, and special handling
if pipeline_type == "LTX2ImageToVideoPipeline":
self.img2vid = True
self.ltx2_pipeline = True
# Check if loading from single file (GGUF)
if fromSingleFile and LTX2VideoTransformer3DModel is not None:
_, single_file_ext = os.path.splitext(modelFile)
if single_file_ext == ".gguf":
# Load transformer from single GGUF file with quantization
transformer_kwargs = {}
quantization_config = GGUFQuantizationConfig(compute_dtype=torchType)
transformer_kwargs["quantization_config"] = quantization_config
transformer = LTX2VideoTransformer3DModel.from_single_file(
modelFile,
config=request.Model, # Use request.Model as the config/model_id
subfolder="transformer",
**transformer_kwargs,
)
# Load pipeline with custom transformer
pipe = load_diffusers_pipeline(
class_name="LTX2ImageToVideoPipeline",
model_id=request.Model,
transformer=transformer,
torch_dtype=torchType,
)
else:
# Single file but not GGUF - use standard single file loading
pipe = load_diffusers_pipeline(
class_name="LTX2ImageToVideoPipeline",
model_id=modelFile,
from_single_file=True,
torch_dtype=torchType,
)
else:
# Standard loading from pretrained
pipe = load_diffusers_pipeline(
class_name="LTX2ImageToVideoPipeline",
model_id=request.Model,
torch_dtype=torchType,
variant=variant
)
if not DISABLE_CPU_OFFLOAD:
pipe.enable_model_cpu_offload()
return pipe
# LTX2Pipeline - text-to-video pipeline, needs txt2vid flag, CPU offload, and special handling
if pipeline_type == "LTX2Pipeline":
self.txt2vid = True
self.ltx2_pipeline = True
# Check if loading from single file (GGUF)
if fromSingleFile and LTX2VideoTransformer3DModel is not None:
_, single_file_ext = os.path.splitext(modelFile)
if single_file_ext == ".gguf":
# Load transformer from single GGUF file with quantization
transformer_kwargs = {}
quantization_config = GGUFQuantizationConfig(compute_dtype=torchType)
transformer_kwargs["quantization_config"] = quantization_config
transformer = LTX2VideoTransformer3DModel.from_single_file(
modelFile,
config=request.Model, # Use request.Model as the config/model_id
subfolder="transformer",
**transformer_kwargs,
)
# Load pipeline with custom transformer
pipe = load_diffusers_pipeline(
class_name="LTX2Pipeline",
model_id=request.Model,
transformer=transformer,
torch_dtype=torchType,
)
else:
# Single file but not GGUF - use standard single file loading
pipe = load_diffusers_pipeline(
class_name="LTX2Pipeline",
model_id=modelFile,
from_single_file=True,
torch_dtype=torchType,
)
else:
# Standard loading from pretrained
pipe = load_diffusers_pipeline(
class_name="LTX2Pipeline",
model_id=request.Model,
torch_dtype=torchType,
variant=variant
)
if not DISABLE_CPU_OFFLOAD:
pipe.enable_model_cpu_offload()
return pipe
# ================================================================
# Dynamic pipeline loading - the default path for most pipelines
# Uses the dynamic loader to instantiate any pipeline by class name
@@ -506,9 +404,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
fromSingleFile = request.Model.startswith("http") or request.Model.startswith("/") or local
self.img2vid = False
self.txt2vid = False
self.ltx2_pipeline = False
print(f"LoadModel: PipelineType from request: {request.PipelineType}", file=sys.stderr)
# Load pipeline using dynamic loader
# Special cases that require custom initialization are handled first
@@ -519,8 +414,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
torchType=torchType,
variant=variant
)
print(f"LoadModel: After loading - ltx2_pipeline: {self.ltx2_pipeline}, img2vid: {self.img2vid}, txt2vid: {self.txt2vid}, PipelineType: {self.PipelineType}", file=sys.stderr)
if CLIPSKIP and request.CLIPSkip != 0:
self.clip_skip = request.CLIPSkip
@@ -758,20 +651,14 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
try:
prompt = request.prompt
if not prompt:
print(f"GenerateVideo: No prompt provided for video generation.", file=sys.stderr)
return backend_pb2.Result(success=False, message="No prompt provided for video generation")
# Debug: Print raw request values
print(f"GenerateVideo: Raw request values - num_frames: {request.num_frames}, fps: {request.fps}, cfg_scale: {request.cfg_scale}, step: {request.step}", file=sys.stderr)
# Set default values from request or use defaults
num_frames = request.num_frames if request.num_frames > 0 else 81
fps = request.fps if request.fps > 0 else 16
cfg_scale = request.cfg_scale if request.cfg_scale > 0 else 4.0
num_inference_steps = request.step if request.step > 0 else 40
print(f"GenerateVideo: Using values - num_frames: {num_frames}, fps: {fps}, cfg_scale: {cfg_scale}, num_inference_steps: {num_inference_steps}", file=sys.stderr)
# Prepare generation parameters
kwargs = {
"prompt": prompt,
@@ -797,86 +684,9 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
kwargs["end_image"] = load_image(request.end_image)
print(f"Generating video with {kwargs=}", file=sys.stderr)
print(f"GenerateVideo: Pipeline type: {self.PipelineType}, ltx2_pipeline flag: {self.ltx2_pipeline}", file=sys.stderr)
# Generate video frames based on pipeline type
if self.ltx2_pipeline or self.PipelineType in ["LTX2Pipeline", "LTX2ImageToVideoPipeline"]:
# LTX-2 generation with audio (supports both text-to-video and image-to-video)
# Determine if this is text-to-video (no image) or image-to-video (has image)
has_image = bool(request.start_image)
# Remove image-related parameters that might have been added earlier
kwargs.pop("start_image", None)
kwargs.pop("end_image", None)
# LTX2ImageToVideoPipeline uses 'image' parameter for image-to-video
# LTX2Pipeline (text-to-video) doesn't need an image parameter
if has_image:
# Image-to-video: use 'image' parameter
if self.PipelineType == "LTX2ImageToVideoPipeline":
image = load_image(request.start_image)
kwargs["image"] = image
print(f"LTX-2: Using image-to-video mode with image", file=sys.stderr)
else:
# If pipeline type is LTX2Pipeline but we have an image, we can't do image-to-video
return backend_pb2.Result(success=False, message="LTX2Pipeline does not support image-to-video. Use LTX2ImageToVideoPipeline for image-to-video generation.")
else:
# Text-to-video: no image parameter needed
# Ensure no image-related kwargs are present
kwargs.pop("image", None)
print(f"LTX-2: Using text-to-video mode (no image)", file=sys.stderr)
# LTX-2 uses 'frame_rate' instead of 'fps'
frame_rate = float(fps)
kwargs["frame_rate"] = frame_rate
# LTX-2 requires output_type="np" and return_dict=False
kwargs["output_type"] = "np"
kwargs["return_dict"] = False
# Generate video and audio
print(f"LTX-2: Generating with kwargs: {kwargs}", file=sys.stderr)
try:
video, audio = self.pipe(**kwargs)
print(f"LTX-2: Generated video shape: {video.shape}, audio shape: {audio.shape}", file=sys.stderr)
except Exception as e:
print(f"LTX-2: Error during pipe() call: {e}", file=sys.stderr)
traceback.print_exc()
return backend_pb2.Result(success=False, message=f"Error generating video with LTX-2 pipeline: {e}")
# Convert video to uint8 format
video = (video * 255).round().astype("uint8")
video = torch.from_numpy(video)
print(f"LTX-2: Converting video, shape after conversion: {video.shape}", file=sys.stderr)
print(f"LTX-2: Audio sample rate: {self.pipe.vocoder.config.output_sampling_rate}", file=sys.stderr)
print(f"LTX-2: Output path: {request.dst}", file=sys.stderr)
# Use LTX-2's encode_video function which handles audio
try:
ltx2_encode_video(
video[0],
fps=frame_rate,
audio=audio[0].float().cpu(),
audio_sample_rate=self.pipe.vocoder.config.output_sampling_rate,
output_path=request.dst,
)
# Verify file was created and has content
import os
if os.path.exists(request.dst):
file_size = os.path.getsize(request.dst)
print(f"LTX-2: Video file created successfully, size: {file_size} bytes", file=sys.stderr)
if file_size == 0:
return backend_pb2.Result(success=False, message=f"Video file was created but is empty (0 bytes). Check LTX-2 encode_video function.")
else:
return backend_pb2.Result(success=False, message=f"Video file was not created at {request.dst}")
except Exception as e:
print(f"LTX-2: Error encoding video: {e}", file=sys.stderr)
traceback.print_exc()
return backend_pb2.Result(success=False, message=f"Error encoding video: {e}")
return backend_pb2.Result(message="Video generated successfully", success=True)
elif self.PipelineType == "WanPipeline":
if self.PipelineType == "WanPipeline":
# WAN2.2 text-to-video generation
output = self.pipe(**kwargs)
frames = output.frames[0] # WAN2.2 returns frames in this format
@@ -915,23 +725,11 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
output = self.pipe(**kwargs)
frames = output.frames[0]
else:
print(f"GenerateVideo: Pipeline {self.PipelineType} does not match any known video pipeline handler", file=sys.stderr)
return backend_pb2.Result(success=False, message=f"Pipeline {self.PipelineType} does not support video generation")
# Export video (for non-LTX-2 pipelines)
print(f"GenerateVideo: Exporting video to {request.dst} with fps={fps}", file=sys.stderr)
# Export video
export_to_video(frames, request.dst, fps=fps)
# Verify file was created
import os
if os.path.exists(request.dst):
file_size = os.path.getsize(request.dst)
print(f"GenerateVideo: Video file created, size: {file_size} bytes", file=sys.stderr)
if file_size == 0:
return backend_pb2.Result(success=False, message=f"Video file was created but is empty (0 bytes)")
else:
return backend_pb2.Result(success=False, message=f"Video file was not created at {request.dst}")
return backend_pb2.Result(message="Video generated successfully", success=True)
except Exception as err:

View File

@@ -16,10 +16,6 @@ if [ "x${BUILD_PROFILE}" == "xintel" ]; then
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
fi
if [ "x${BUILD_PROFILE}" == "xl4t12" ]; then
USE_PIP=true
fi
# Use python 3.12 for l4t
if [ "x${BUILD_PROFILE}" == "xl4t13" ]; then
PYTHON_VERSION="3.12"
@@ -27,4 +23,11 @@ if [ "x${BUILD_PROFILE}" == "xl4t13" ]; then
PY_STANDALONE_TAG="20251120"
fi
# This is here because the jetson-ai-lab.io PyPI mirror's root PyPI endpoint (pypi.jetson-ai-lab.io/root/pypi/)
# returns 503 errors when uv tries to fall back to it for packages not found in the specific subdirectory.
# We need uv to continue falling through to the official PyPI index when it encounters these errors.
if [ "x${BUILD_PROFILE}" == "xl4t12" ] || [ "x${BUILD_PROFILE}" == "xl4t13" ]; then
EXTRA_PIP_INSTALL_FLAGS+=" --index-strategy=unsafe-first-match"
fi
installRequirements

View File

@@ -1,6 +1,8 @@
--extra-index-url https://download.pytorch.org/whl/xpu
torch
torchvision
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
intel-extension-for-pytorch==2.3.110+xpu
torch==2.5.1+cxx11.abi
torchvision==0.20.1+cxx11.abi
oneccl_bind_pt==2.8.0+xpu
optimum[openvino]
setuptools
git+https://github.com/huggingface/diffusers

View File

@@ -3,4 +3,3 @@ grpcio==1.76.0
pillow
protobuf
certifi
av

1
backend/python/exllama2/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
source

View File

@@ -0,0 +1,17 @@
.PHONY: exllama2
exllama2:
bash install.sh
.PHONY: run
run: exllama2
@echo "Running exllama2..."
bash run.sh
@echo "exllama2 run."
.PHONY: protogen-clean
protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py
.PHONY: clean
clean: protogen-clean
$(RM) -r venv source __pycache__

View File

@@ -0,0 +1,143 @@
#!/usr/bin/env python3
import grpc
from concurrent import futures
import time
import backend_pb2
import backend_pb2_grpc
import argparse
import signal
import sys
import os
import glob
from pathlib import Path
import torch
import torch.nn.functional as F
from torch import version as torch_version
from exllamav2.generator import (
ExLlamaV2BaseGenerator,
ExLlamaV2Sampler
)
from exllamav2 import (
ExLlamaV2,
ExLlamaV2Config,
ExLlamaV2Cache,
ExLlamaV2Cache_8bit,
ExLlamaV2Tokenizer,
model_init,
)
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
# Implement the BackendServicer class with the service methods
class BackendServicer(backend_pb2_grpc.BackendServicer):
def Health(self, request, context):
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
def LoadModel(self, request, context):
try:
model_directory = request.ModelFile
config = ExLlamaV2Config()
config.model_dir = model_directory
config.prepare()
model = ExLlamaV2(config)
cache = ExLlamaV2Cache(model, lazy=True)
model.load_autosplit(cache)
tokenizer = ExLlamaV2Tokenizer(config)
# Initialize generator
generator = ExLlamaV2BaseGenerator(model, cache, tokenizer)
self.generator = generator
generator.warmup()
self.model = model
self.tokenizer = tokenizer
self.cache = cache
except Exception as err:
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
return backend_pb2.Result(message="Model loaded successfully", success=True)
def Predict(self, request, context):
penalty = 1.15
if request.Penalty != 0.0:
penalty = request.Penalty
settings = ExLlamaV2Sampler.Settings()
settings.temperature = request.Temperature
settings.top_k = request.TopK
settings.top_p = request.TopP
settings.token_repetition_penalty = penalty
settings.disallow_tokens(self.tokenizer, [self.tokenizer.eos_token_id])
tokens = 512
if request.Tokens != 0:
tokens = request.Tokens
output = self.generator.generate_simple(
request.Prompt, settings, tokens)
# Remove prompt from response if present
if request.Prompt in output:
output = output.replace(request.Prompt, "")
return backend_pb2.Result(message=bytes(output, encoding='utf-8'))
def PredictStream(self, request, context):
# Implement PredictStream RPC
# for reply in some_data_generator():
# yield reply
# Not implemented yet
return self.Predict(request, context)
def serve(address):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
options=[
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
])
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
server.add_insecure_port(address)
server.start()
print("Server started. Listening on: " + address, file=sys.stderr)
# Define the signal handler function
def signal_handler(sig, frame):
print("Received termination signal. Shutting down...")
server.stop(0)
sys.exit(0)
# Set the signal handlers for SIGINT and SIGTERM
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the gRPC server.")
parser.add_argument(
"--addr", default="localhost:50051", help="The address to bind the server to."
)
args = parser.parse_args()
serve(args.addr)

View File

@@ -0,0 +1,21 @@
#!/bin/bash
set -e
LIMIT_TARGETS="cublas"
EXTRA_PIP_INSTALL_FLAGS="--no-build-isolation"
EXLLAMA2_VERSION=c0ddebaaaf8ffd1b3529c2bb654e650bce2f790f
backend_dir=$(dirname $0)
if [ -d $backend_dir/common ]; then
source $backend_dir/common/libbackend.sh
else
source $backend_dir/../common/libbackend.sh
fi
installRequirements
git clone https://github.com/turboderp/exllamav2 $MY_DIR/source
pushd ${MY_DIR}/source && git checkout -b build ${EXLLAMA2_VERSION} && popd
# This installs exllamav2 in JIT mode so it will compile the appropriate torch extension at runtime
EXLLAMA_NOCOMPILE= uv pip install ${EXTRA_PIP_INSTALL_FLAGS} ${MY_DIR}/source/

View File

@@ -0,0 +1,3 @@
transformers
accelerate
torch==2.4.1

View File

@@ -0,0 +1,3 @@
torch==2.4.1
transformers
accelerate

View File

@@ -0,0 +1,4 @@
# This is here to trigger the install script to add --no-build-isolation to the uv pip install commands
# exllama2 does not specify it's build requirements per PEP517, so we need to provide some things ourselves
wheel
setuptools

View File

@@ -0,0 +1,5 @@
grpcio==1.76.0
protobuf
certifi
wheel
setuptools

View File

@@ -1,11 +1,11 @@
#!/bin/bash
LIMIT_TARGETS="cublas"
backend_dir=$(dirname $0)
if [ -d $backend_dir/common ]; then
source $backend_dir/common/libbackend.sh
else
source $backend_dir/../common/libbackend.sh
fi
startBackend $@
startBackend $@

View File

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env python3
"""
This is an extra gRPC server of LocalAI for Faster Whisper TTS
This is an extra gRPC server of LocalAI for Bark TTS
"""
from concurrent import futures
import time
@@ -40,7 +40,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
device = "mps"
try:
print("Preparing models, please wait", file=sys.stderr)
self.model = WhisperModel(request.Model, device=device, compute_type="default")
self.model = WhisperModel(request.Model, device=device, compute_type="float16")
except Exception as err:
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
# Implement your logic here for the LoadModel service
@@ -55,12 +55,11 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
id = 0
for segment in segments:
print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text))
resultSegments.append(backend_pb2.TranscriptSegment(id=id, start=int(segment.start)*1e9, end=int(segment.end)*1e9, text=segment.text))
resultSegments.append(backend_pb2.TranscriptSegment(id=id, start=segment.start, end=segment.end, text=segment.text))
text += segment.text
id += 1
id += 1
except Exception as err:
print(f"Unexpected {err=}, {type(err)=}", file=sys.stderr)
raise err
return backend_pb2.TranscriptResult(segments=resultSegments, text=text)

View File

@@ -1,4 +1,6 @@
--extra-index-url https://download.pytorch.org/whl/xpu
torch
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
intel-extension-for-pytorch==2.3.110+xpu
torch==2.3.1+cxx11.abi
oneccl_bind_pt==2.3.100+xpu
optimum[openvino]
faster-whisper

View File

@@ -16,8 +16,11 @@ if [ "x${BUILD_PROFILE}" == "xintel" ]; then
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
fi
if [ "x${BUILD_PROFILE}" == "xl4t12" ]; then
USE_PIP=true
# This is here because the jetson-ai-lab.io PyPI mirror's root PyPI endpoint (pypi.jetson-ai-lab.io/root/pypi/)
# returns 503 errors when uv tries to fall back to it for packages not found in the specific subdirectory.
# We need uv to continue falling through to the official PyPI index when it encounters these errors.
if [ "x${BUILD_PROFILE}" == "xl4t12" ] || [ "x${BUILD_PROFILE}" == "xl4t13" ]; then
EXTRA_PIP_INSTALL_FLAGS+=" --index-strategy=unsafe-first-match"
fi
installRequirements

View File

@@ -1,6 +1,8 @@
--extra-index-url https://download.pytorch.org/whl/xpu
torch
torchaudio
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
intel-extension-for-pytorch==2.8.10+xpu
torch==2.5.1+cxx11.abi
oneccl_bind_pt==2.8.0+xpu
torchaudio==2.5.1+cxx11.abi
optimum[openvino]
setuptools
transformers==4.48.3

View File

@@ -16,6 +16,13 @@ if [ "x${BUILD_PROFILE}" == "xintel" ]; then
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
fi
# This is here because the jetson-ai-lab.io PyPI mirror's root PyPI endpoint (pypi.jetson-ai-lab.io/root/pypi/)
# returns 503 errors when uv tries to fall back to it for packages not found in the specific subdirectory.
# We need uv to continue falling through to the official PyPI index when it encounters these errors.
if [ "x${BUILD_PROFILE}" == "xl4t12" ] || [ "x${BUILD_PROFILE}" == "xl4t13" ]; then
EXTRA_PIP_INSTALL_FLAGS+=" --index-strategy=unsafe-first-match"
fi
if [ "x${BUILD_TYPE}" == "xcublas" ] || [ "x${BUILD_TYPE}" == "xl4t" ]; then
export CMAKE_ARGS="-DGGML_CUDA=on"
fi
@@ -26,12 +33,6 @@ fi
EXTRA_PIP_INSTALL_FLAGS+=" --no-build-isolation"
if [ "x${BUILD_PROFILE}" == "xl4t12" ]; then
USE_PIP=true
fi
git clone https://github.com/neuphonic/neutts-air neutts-air
cp -rfv neutts-air/neuttsair ./

View File

@@ -1,23 +0,0 @@
.PHONY: pocket-tts
pocket-tts:
bash install.sh
.PHONY: run
run: pocket-tts
@echo "Running pocket-tts..."
bash run.sh
@echo "pocket-tts run."
.PHONY: test
test: pocket-tts
@echo "Testing pocket-tts..."
bash test.sh
@echo "pocket-tts tested."
.PHONY: protogen-clean
protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py
.PHONY: clean
clean: protogen-clean
rm -rf venv __pycache__

View File

@@ -1,255 +0,0 @@
#!/usr/bin/env python3
"""
This is an extra gRPC server of LocalAI for Pocket TTS
"""
from concurrent import futures
import time
import argparse
import signal
import sys
import os
import traceback
import scipy.io.wavfile
import backend_pb2
import backend_pb2_grpc
import torch
from pocket_tts import TTSModel
import grpc
def is_float(s):
"""Check if a string can be converted to float."""
try:
float(s)
return True
except ValueError:
return False
def is_int(s):
"""Check if a string can be converted to int."""
try:
int(s)
return True
except ValueError:
return False
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
# Implement the BackendServicer class with the service methods
class BackendServicer(backend_pb2_grpc.BackendServicer):
"""
BackendServicer is the class that implements the gRPC service
"""
def Health(self, request, context):
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
def LoadModel(self, request, context):
# Get device
if torch.cuda.is_available():
print("CUDA is available", file=sys.stderr)
device = "cuda"
else:
print("CUDA is not available", file=sys.stderr)
device = "cpu"
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
if mps_available:
device = "mps"
if not torch.cuda.is_available() and request.CUDA:
return backend_pb2.Result(success=False, message="CUDA is not available")
# Normalize potential 'mpx' typo to 'mps'
if device == "mpx":
print("Note: device 'mpx' detected, treating it as 'mps'.", file=sys.stderr)
device = "mps"
# Validate mps availability if requested
if device == "mps" and not torch.backends.mps.is_available():
print("Warning: MPS not available. Falling back to CPU.", file=sys.stderr)
device = "cpu"
self.device = device
options = request.Options
# empty dict
self.options = {}
# The options are a list of strings in this form optname:optvalue
# We are storing all the options in a dict so we can use it later when
# generating the audio
for opt in options:
if ":" not in opt:
continue
key, value = opt.split(":", 1) # Split only on first colon
# if value is a number, convert it to the appropriate type
if is_float(value):
value = float(value)
elif is_int(value):
value = int(value)
elif value.lower() in ["true", "false"]:
value = value.lower() == "true"
self.options[key] = value
# Default voice for caching
self.default_voice_url = self.options.get("default_voice", None)
self._voice_cache = {}
try:
print("Loading Pocket TTS model", file=sys.stderr)
self.tts_model = TTSModel.load_model()
print(f"Model loaded successfully. Sample rate: {self.tts_model.sample_rate}", file=sys.stderr)
# Pre-load default voice if specified
if self.default_voice_url:
try:
print(f"Pre-loading default voice: {self.default_voice_url}", file=sys.stderr)
voice_state = self.tts_model.get_state_for_audio_prompt(self.default_voice_url)
self._voice_cache[self.default_voice_url] = voice_state
print("Default voice loaded successfully", file=sys.stderr)
except Exception as e:
print(f"Warning: Failed to pre-load default voice: {e}", file=sys.stderr)
except Exception as err:
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
return backend_pb2.Result(message="Model loaded successfully", success=True)
def _get_voice_state(self, voice_input):
"""
Get voice state from cache or load it.
voice_input can be:
- HuggingFace URL (e.g., hf://kyutai/tts-voices/alba-mackenna/casual.wav)
- Local file path
- None (use default)
"""
# Use default if no voice specified
if not voice_input:
voice_input = self.default_voice_url
if not voice_input:
return None
# Check cache first
if voice_input in self._voice_cache:
return self._voice_cache[voice_input]
# Load voice state
try:
print(f"Loading voice from: {voice_input}", file=sys.stderr)
voice_state = self.tts_model.get_state_for_audio_prompt(voice_input)
self._voice_cache[voice_input] = voice_state
return voice_state
except Exception as e:
print(f"Error loading voice from {voice_input}: {e}", file=sys.stderr)
return None
def TTS(self, request, context):
try:
# Determine voice input
# Priority: request.voice > AudioPath (from ModelOptions) > default
voice_input = None
if request.voice:
voice_input = request.voice
elif hasattr(request, 'AudioPath') and request.AudioPath:
# Use AudioPath as voice file
if os.path.isabs(request.AudioPath):
voice_input = request.AudioPath
elif hasattr(request, 'ModelFile') and request.ModelFile:
model_file_base = os.path.dirname(request.ModelFile)
voice_input = os.path.join(model_file_base, request.AudioPath)
elif hasattr(request, 'ModelPath') and request.ModelPath:
voice_input = os.path.join(request.ModelPath, request.AudioPath)
else:
voice_input = request.AudioPath
# Get voice state
voice_state = self._get_voice_state(voice_input)
if voice_state is None:
return backend_pb2.Result(
success=False,
message=f"Voice not found or failed to load: {voice_input}. Please provide a valid voice URL or file path."
)
# Prepare text
text = request.text.strip()
if not text:
return backend_pb2.Result(
success=False,
message="Text is empty"
)
print(f"Generating audio for text: {text[:50]}...", file=sys.stderr)
# Generate audio
audio = self.tts_model.generate_audio(voice_state, text)
# Audio is a 1D torch tensor containing PCM data
if audio is None or audio.numel() == 0:
return backend_pb2.Result(
success=False,
message="No audio generated"
)
# Save audio to file
output_path = request.dst
if not output_path:
output_path = "/tmp/pocket-tts-output.wav"
# Ensure output directory exists
output_dir = os.path.dirname(output_path)
if output_dir and not os.path.exists(output_dir):
os.makedirs(output_dir, exist_ok=True)
# Convert torch tensor to numpy and save
audio_numpy = audio.numpy()
scipy.io.wavfile.write(output_path, self.tts_model.sample_rate, audio_numpy)
print(f"Saved audio to {output_path}", file=sys.stderr)
except Exception as err:
print(f"Error in TTS: {err}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
return backend_pb2.Result(success=True)
def serve(address):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
options=[
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
])
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
server.add_insecure_port(address)
server.start()
print("Server started. Listening on: " + address, file=sys.stderr)
# Define the signal handler function
def signal_handler(sig, frame):
print("Received termination signal. Shutting down...")
server.stop(0)
sys.exit(0)
# Set the signal handlers for SIGINT and SIGTERM
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the gRPC server.")
parser.add_argument(
"--addr", default="localhost:50051", help="The address to bind the server to."
)
args = parser.parse_args()
serve(args.addr)

View File

@@ -1,11 +0,0 @@
#!/bin/bash
set -e
backend_dir=$(dirname $0)
if [ -d $backend_dir/common ]; then
source $backend_dir/common/libbackend.sh
else
source $backend_dir/../common/libbackend.sh
fi
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto

View File

@@ -1,4 +0,0 @@
--extra-index-url https://download.pytorch.org/whl/cpu
pocket-tts
scipy
torch

View File

@@ -1,4 +0,0 @@
--extra-index-url https://download.pytorch.org/whl/cu121
pocket-tts
scipy
torch

View File

@@ -1,4 +0,0 @@
--extra-index-url https://download.pytorch.org/whl/cu130
pocket-tts
scipy
torch

View File

@@ -1,4 +0,0 @@
--extra-index-url https://download.pytorch.org/whl/rocm6.3
pocket-tts
scipy
torch==2.7.1+rocm6.3

View File

@@ -1,4 +0,0 @@
--extra-index-url https://download.pytorch.org/whl/xpu
pocket-tts
scipy
torch

View File

@@ -1,4 +0,0 @@
--extra-index-url https://pypi.jetson-ai-lab.io/jp6/cu129/
pocket-tts
scipy
torch

View File

@@ -1,4 +0,0 @@
--extra-index-url https://download.pytorch.org/whl/cu130
pocket-tts
scipy
torch

View File

@@ -1,4 +0,0 @@
pocket-tts
scipy
torch==2.7.1
torchvision==0.22.1

View File

@@ -1,4 +0,0 @@
grpcio==1.71.0
protobuf
certifi
packaging==24.1

View File

@@ -1,9 +0,0 @@
#!/bin/bash
backend_dir=$(dirname $0)
if [ -d $backend_dir/common ]; then
source $backend_dir/common/libbackend.sh
else
source $backend_dir/../common/libbackend.sh
fi
startBackend $@

View File

@@ -1,141 +0,0 @@
"""
A test script to test the gRPC service
"""
import unittest
import subprocess
import time
import os
import tempfile
import backend_pb2
import backend_pb2_grpc
import grpc
class TestBackendServicer(unittest.TestCase):
"""
TestBackendServicer is the class that tests the gRPC service
"""
def setUp(self):
"""
This method sets up the gRPC service by starting the server
"""
self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"])
time.sleep(30)
def tearDown(self) -> None:
"""
This method tears down the gRPC service by terminating the server
"""
self.service.terminate()
self.service.wait()
def test_server_startup(self):
"""
This method tests if the server starts up successfully
"""
try:
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
response = stub.Health(backend_pb2.HealthMessage())
self.assertEqual(response.message, b'OK')
except Exception as err:
print(err)
self.fail("Server failed to start")
finally:
self.tearDown()
def test_load_model(self):
"""
This method tests if the model is loaded successfully
"""
try:
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
response = stub.LoadModel(backend_pb2.ModelOptions())
print(response)
self.assertTrue(response.success)
self.assertEqual(response.message, "Model loaded successfully")
except Exception as err:
print(err)
self.fail("LoadModel service failed")
finally:
self.tearDown()
def test_tts_with_hf_voice(self):
"""
This method tests TTS generation with HuggingFace voice URL
"""
try:
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
# Load model
response = stub.LoadModel(backend_pb2.ModelOptions())
self.assertTrue(response.success)
# Create temporary output file
with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as tmp_file:
output_path = tmp_file.name
# Test TTS with HuggingFace voice URL
tts_request = backend_pb2.TTSRequest(
text="Hello world, this is a test.",
dst=output_path,
voice="azelma"
)
tts_response = stub.TTS(tts_request)
self.assertTrue(tts_response.success)
# Verify output file exists and is not empty
self.assertTrue(os.path.exists(output_path))
self.assertGreater(os.path.getsize(output_path), 0)
# Cleanup
os.unlink(output_path)
except Exception as err:
print(err)
self.fail("TTS service failed")
finally:
self.tearDown()
def test_tts_with_default_voice(self):
"""
This method tests TTS generation with default voice (via AudioPath in LoadModel)
"""
try:
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
# Load model with default voice
load_request = backend_pb2.ModelOptions(
Options=["default_voice:azelma"]
)
response = stub.LoadModel(load_request)
self.assertTrue(response.success)
# Create temporary output file
with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as tmp_file:
output_path = tmp_file.name
# Test TTS without specifying voice (should use default)
tts_request = backend_pb2.TTSRequest(
text="Hello world, this is a test.",
dst=output_path
)
tts_response = stub.TTS(tts_request)
self.assertTrue(tts_response.success)
# Verify output file exists and is not empty
self.assertTrue(os.path.exists(output_path))
self.assertGreater(os.path.getsize(output_path), 0)
# Cleanup
os.unlink(output_path)
except Exception as err:
print(err)
self.fail("TTS service with default voice failed")
finally:
self.tearDown()

View File

@@ -1,25 +0,0 @@
.DEFAULT_GOAL := install
.PHONY: qwen-asr
qwen-asr:
bash install.sh
.PHONY: run
run: qwen-asr
@echo "Running qwen-asr..."
bash run.sh
@echo "qwen-asr run."
.PHONY: test
test: qwen-asr
@echo "Testing qwen-asr..."
bash test.sh
@echo "qwen-asr tested."
.PHONY: protogen-clean
protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py
.PHONY: clean
clean: protogen-clean
rm -rf venv __pycache__

View File

@@ -1,212 +0,0 @@
#!/usr/bin/env python3
"""
gRPC server of LocalAI for Qwen3-ASR (transformers backend, non-vLLM).
"""
from concurrent import futures
import time
import argparse
import signal
import sys
import os
import backend_pb2
import backend_pb2_grpc
import torch
from qwen_asr import Qwen3ASRModel
import grpc
def is_float(s):
try:
float(s)
return True
except ValueError:
return False
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
class BackendServicer(backend_pb2_grpc.BackendServicer):
def Health(self, request, context):
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
def LoadModel(self, request, context):
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
if mps_available:
device = "mps"
if not torch.cuda.is_available() and request.CUDA:
return backend_pb2.Result(success=False, message="CUDA is not available")
self.device = device
self.options = {}
for opt in request.Options:
if ":" not in opt:
continue
key, value = opt.split(":", 1)
if is_float(value):
value = float(value)
elif is_int(value):
value = int(value)
elif value.lower() in ["true", "false"]:
value = value.lower() == "true"
self.options[key] = value
model_path = request.Model or "Qwen/Qwen3-ASR-1.7B"
default_dtype = torch.bfloat16 if self.device == "cuda" else torch.float32
load_dtype = default_dtype
if "torch_dtype" in self.options:
d = str(self.options["torch_dtype"]).lower()
if d == "fp16":
load_dtype = torch.float16
elif d == "bf16":
load_dtype = torch.bfloat16
elif d == "fp32":
load_dtype = torch.float32
del self.options["torch_dtype"]
self.max_inference_batch_size = self.options.get("max_inference_batch_size", 32)
self.max_new_tokens = self.options.get("max_new_tokens", 256)
forced_aligner = self.options.get("forced_aligner")
if forced_aligner is not None and isinstance(forced_aligner, str):
forced_aligner = forced_aligner.strip() or None
attn_implementation = self.options.get("attn_implementation")
if attn_implementation is not None and isinstance(attn_implementation, str):
attn_implementation = attn_implementation.strip() or None
if self.device == "mps":
device_map = None
elif self.device == "cuda":
device_map = "cuda:0"
else:
device_map = "cpu"
load_kwargs = dict(
dtype=load_dtype,
device_map=device_map,
max_inference_batch_size=self.max_inference_batch_size,
max_new_tokens=self.max_new_tokens,
)
if attn_implementation:
load_kwargs["attn_implementation"] = attn_implementation
if forced_aligner:
load_kwargs["forced_aligner"] = forced_aligner
forced_aligner_kwargs = dict(
dtype=load_dtype,
device_map=device_map,
)
if attn_implementation:
forced_aligner_kwargs["attn_implementation"] = attn_implementation
load_kwargs["forced_aligner_kwargs"] = forced_aligner_kwargs
try:
print(f"Loading Qwen3-ASR from {model_path}", file=sys.stderr)
if attn_implementation:
print(f"Using attn_implementation: {attn_implementation}", file=sys.stderr)
if forced_aligner:
print(f"Loading with forced_aligner: {forced_aligner}", file=sys.stderr)
self.model = Qwen3ASRModel.from_pretrained(model_path, **load_kwargs)
print("Qwen3-ASR model loaded successfully", file=sys.stderr)
except Exception as err:
print(f"[ERROR] LoadModel failed: {err}", file=sys.stderr)
import traceback
traceback.print_exc(file=sys.stderr)
return backend_pb2.Result(success=False, message=str(err))
return backend_pb2.Result(message="Model loaded successfully", success=True)
def AudioTranscription(self, request, context):
result_segments = []
text = ""
try:
audio_path = request.dst
if not audio_path or not os.path.exists(audio_path):
print(f"Error: Audio file not found: {audio_path}", file=sys.stderr)
return backend_pb2.TranscriptResult(segments=[], text="")
language = None
if request.language and request.language.strip():
language = request.language.strip()
results = self.model.transcribe(audio=audio_path, language=language)
if not results:
return backend_pb2.TranscriptResult(segments=[], text="")
r = results[0]
text = r.text or ""
if getattr(r, 'time_stamps', None) and len(r.time_stamps) > 0:
for idx, ts in enumerate(r.time_stamps):
start_ms = 0
end_ms = 0
seg_text = text
if isinstance(ts, (list, tuple)) and len(ts) >= 3:
start_ms = int(float(ts[0]) * 1000) if ts[0] is not None else 0
end_ms = int(float(ts[1]) * 1000) if ts[1] is not None else 0
seg_text = ts[2] if len(ts) > 2 and ts[2] is not None else ""
result_segments.append(backend_pb2.TranscriptSegment(
id=idx, start=start_ms, end=end_ms, text=seg_text
))
else:
if text:
result_segments.append(backend_pb2.TranscriptSegment(
id=0, start=0, end=0, text=text
))
except Exception as err:
print(f"Error in AudioTranscription: {err}", file=sys.stderr)
import traceback
traceback.print_exc(file=sys.stderr)
return backend_pb2.TranscriptResult(segments=[], text="")
return backend_pb2.TranscriptResult(segments=result_segments, text=text)
def serve(address):
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
options=[
('grpc.max_message_length', 50 * 1024 * 1024),
('grpc.max_send_message_length', 50 * 1024 * 1024),
('grpc.max_receive_message_length', 50 * 1024 * 1024),
])
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
server.add_insecure_port(address)
server.start()
print("Server started. Listening on: " + address, file=sys.stderr)
def signal_handler(sig, frame):
print("Received termination signal. Shutting down...")
server.stop(0)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the gRPC server.")
parser.add_argument("--addr", default="localhost:50051", help="The address to bind the server to.")
args = parser.parse_args()
serve(args.addr)

View File

@@ -1,21 +0,0 @@
#!/bin/bash
set -e
EXTRA_PIP_INSTALL_FLAGS="--no-build-isolation"
backend_dir=$(dirname $0)
if [ -d $backend_dir/common ]; then
source $backend_dir/common/libbackend.sh
else
source $backend_dir/../common/libbackend.sh
fi
if [ "x${BUILD_PROFILE}" == "xintel" ]; then
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
fi
PYTHON_VERSION="3.12"
PYTHON_PATCH="12"
PY_STANDALONE_TAG="20251120"
installRequirements

View File

@@ -1,3 +0,0 @@
--extra-index-url https://download.pytorch.org/whl/cpu
torch
qwen-asr

View File

@@ -1 +0,0 @@
flash-attn

View File

@@ -1,3 +0,0 @@
--extra-index-url https://download.pytorch.org/whl/cu121
torch
qwen-asr

View File

@@ -1,3 +0,0 @@
--extra-index-url https://download.pytorch.org/whl/cu130
torch
qwen-asr

View File

@@ -1,3 +0,0 @@
--extra-index-url https://download.pytorch.org/whl/rocm6.3
torch==2.7.1+rocm6.3
qwen-asr

View File

@@ -1 +0,0 @@
flash-attn

View File

@@ -1,3 +0,0 @@
--extra-index-url https://download.pytorch.org/whl/xpu
torch
qwen-asr

View File

@@ -1,3 +0,0 @@
--extra-index-url https://pypi.jetson-ai-lab.io/jp6/cu129/
torch
qwen-asr

View File

@@ -1,3 +0,0 @@
--extra-index-url https://download.pytorch.org/whl/cu130
torch
qwen-asr

View File

@@ -1,2 +0,0 @@
torch==2.7.1
qwen-asr

View File

@@ -1,5 +0,0 @@
grpcio==1.71.0
protobuf
certifi
packaging==24.1
setuptools

View File

@@ -1,9 +0,0 @@
#!/bin/bash
backend_dir=$(dirname $0)
if [ -d $backend_dir/common ]; then
source $backend_dir/common/libbackend.sh
else
source $backend_dir/../common/libbackend.sh
fi
startBackend $@

View File

@@ -1,94 +0,0 @@
"""
Tests for the Qwen3-ASR gRPC backend.
"""
import unittest
import subprocess
import time
import os
import tempfile
import shutil
import backend_pb2
import backend_pb2_grpc
import grpc
# Skip heavy transcription test in CI (model download + inference)
SKIP_ASR_TESTS = os.environ.get("SKIP_ASR_TESTS", "false").lower() == "true"
class TestBackendServicer(unittest.TestCase):
def setUp(self):
self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"])
time.sleep(15)
def tearDown(self):
self.service.terminate()
self.service.wait()
def test_server_startup(self):
try:
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
response = stub.Health(backend_pb2.HealthMessage())
self.assertEqual(response.message, b'OK')
except Exception as err:
print(err)
self.fail("Server failed to start")
finally:
self.tearDown()
def test_load_model(self):
try:
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
response = stub.LoadModel(backend_pb2.ModelOptions(Model="Qwen/Qwen3-ASR-1.7B"))
self.assertTrue(response.success, response.message)
self.assertEqual(response.message, "Model loaded successfully")
except Exception as err:
print(err)
self.fail("LoadModel service failed")
finally:
self.tearDown()
@unittest.skipIf(SKIP_ASR_TESTS, "ASR transcription test skipped (SKIP_ASR_TESTS=true)")
def test_audio_transcription(self):
temp_dir = tempfile.mkdtemp()
audio_file = os.path.join(temp_dir, 'audio.wav')
try:
url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen3-ASR-Repo/asr_en.wav"
result = subprocess.run(
["wget", "-q", url, "-O", audio_file],
capture_output=True,
text=True,
timeout=30,
)
if result.returncode != 0:
self.skipTest(f"Could not download sample audio: {result.stderr}")
if not os.path.exists(audio_file):
self.skipTest("Sample audio file not found after download")
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
load_response = stub.LoadModel(backend_pb2.ModelOptions(Model="Qwen/Qwen3-ASR-0.6B"))
self.assertTrue(load_response.success, load_response.message)
transcript_response = stub.AudioTranscription(
backend_pb2.TranscriptRequest(dst=audio_file)
)
self.assertIsNotNone(transcript_response)
self.assertIsNotNone(transcript_response.text)
self.assertGreaterEqual(len(transcript_response.segments), 0)
all_text = ""
for segment in transcript_response.segments:
all_text += segment.text
print(f"All text: {all_text}")
self.assertIn("big", all_text)
if transcript_response.segments:
self.assertIsNotNone(transcript_response.segments[0].text)
finally:
self.tearDown()
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)

View File

@@ -1,23 +0,0 @@
.PHONY: qwen-tts
qwen-tts:
bash install.sh
.PHONY: run
run: qwen-tts
@echo "Running qwen-tts..."
bash run.sh
@echo "qwen-tts run."
.PHONY: test
test: qwen-tts
@echo "Testing qwen-tts..."
bash test.sh
@echo "qwen-tts tested."
.PHONY: protogen-clean
protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py
.PHONY: clean
clean: protogen-clean
rm -rf venv __pycache__

View File

@@ -1,475 +0,0 @@
#!/usr/bin/env python3
"""
This is an extra gRPC server of LocalAI for Qwen3-TTS
"""
from concurrent import futures
import time
import argparse
import signal
import sys
import os
import copy
import traceback
from pathlib import Path
import backend_pb2
import backend_pb2_grpc
import torch
import soundfile as sf
from qwen_tts import Qwen3TTSModel
import grpc
def is_float(s):
"""Check if a string can be converted to float."""
try:
float(s)
return True
except ValueError:
return False
def is_int(s):
"""Check if a string can be converted to int."""
try:
int(s)
return True
except ValueError:
return False
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
# Implement the BackendServicer class with the service methods
class BackendServicer(backend_pb2_grpc.BackendServicer):
"""
BackendServicer is the class that implements the gRPC service
"""
def Health(self, request, context):
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
def LoadModel(self, request, context):
# Get device
if torch.cuda.is_available():
print("CUDA is available", file=sys.stderr)
device = "cuda"
else:
print("CUDA is not available", file=sys.stderr)
device = "cpu"
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
if mps_available:
device = "mps"
if not torch.cuda.is_available() and request.CUDA:
return backend_pb2.Result(success=False, message="CUDA is not available")
# Normalize potential 'mpx' typo to 'mps'
if device == "mpx":
print("Note: device 'mpx' detected, treating it as 'mps'.", file=sys.stderr)
device = "mps"
# Validate mps availability if requested
if device == "mps" and not torch.backends.mps.is_available():
print("Warning: MPS not available. Falling back to CPU.", file=sys.stderr)
device = "cpu"
self.device = device
self._torch_device = torch.device(device)
options = request.Options
# empty dict
self.options = {}
# The options are a list of strings in this form optname:optvalue
# We are storing all the options in a dict so we can use it later when
# generating the audio
for opt in options:
if ":" not in opt:
continue
key, value = opt.split(":", 1) # Split only on first colon
# if value is a number, convert it to the appropriate type
if is_float(value):
value = float(value)
elif is_int(value):
value = int(value)
elif value.lower() in ["true", "false"]:
value = value.lower() == "true"
self.options[key] = value
# Get model path from request
model_path = request.Model
if not model_path:
model_path = "Qwen/Qwen3-TTS-12Hz-1.7B-CustomVoice"
# Determine model type from model path or options
self.model_type = self.options.get("model_type", None)
if not self.model_type:
if "CustomVoice" in model_path:
self.model_type = "CustomVoice"
elif "VoiceDesign" in model_path:
self.model_type = "VoiceDesign"
elif "Base" in model_path or "0.6B" in model_path or "1.7B" in model_path:
self.model_type = "Base" # VoiceClone model
else:
# Default to CustomVoice
self.model_type = "CustomVoice"
# Cache for voice clone prompts
self._voice_clone_cache = {}
# Store AudioPath, ModelFile, and ModelPath from LoadModel request
# These are used later in TTS for VoiceClone mode
self.audio_path = request.AudioPath if hasattr(request, 'AudioPath') and request.AudioPath else None
self.model_file = request.ModelFile if hasattr(request, 'ModelFile') and request.ModelFile else None
self.model_path = request.ModelPath if hasattr(request, 'ModelPath') and request.ModelPath else None
# Decide dtype & attention implementation
if self.device == "mps":
load_dtype = torch.float32 # MPS requires float32
device_map = None
attn_impl_primary = "sdpa" # flash_attention_2 not supported on MPS
elif self.device == "cuda":
load_dtype = torch.bfloat16
device_map = "cuda"
attn_impl_primary = "flash_attention_2"
else: # cpu
load_dtype = torch.float32
device_map = "cpu"
attn_impl_primary = "sdpa"
print(f"Using device: {self.device}, torch_dtype: {load_dtype}, attn_implementation: {attn_impl_primary}, model_type: {self.model_type}", file=sys.stderr)
print(f"Loading model from: {model_path}", file=sys.stderr)
# Load model with device-specific logic
# Common parameters for all devices
load_kwargs = {
"dtype": load_dtype,
"attn_implementation": attn_impl_primary,
"trust_remote_code": True, # Required for qwen-tts models
}
try:
if self.device == "mps":
load_kwargs["device_map"] = None # load then move
self.model = Qwen3TTSModel.from_pretrained(model_path, **load_kwargs)
self.model.to("mps")
elif self.device == "cuda":
load_kwargs["device_map"] = device_map
self.model = Qwen3TTSModel.from_pretrained(model_path, **load_kwargs)
else: # cpu
load_kwargs["device_map"] = device_map
self.model = Qwen3TTSModel.from_pretrained(model_path, **load_kwargs)
except Exception as e:
error_msg = str(e)
print(f"[ERROR] Loading model: {type(e).__name__}: {error_msg}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
# Check if it's a missing feature extractor/tokenizer error
if "speech_tokenizer" in error_msg or "preprocessor_config.json" in error_msg or "feature extractor" in error_msg.lower():
print("\n[ERROR] Model files appear to be incomplete. This usually means:", file=sys.stderr)
print(" 1. The model download was interrupted or incomplete", file=sys.stderr)
print(" 2. The model cache is corrupted", file=sys.stderr)
print("\nTo fix this, try:", file=sys.stderr)
print(f" rm -rf ~/.cache/huggingface/hub/models--Qwen--Qwen3-TTS-*", file=sys.stderr)
print(" Then re-run to trigger a fresh download.", file=sys.stderr)
print("\nAlternatively, try using a different model variant:", file=sys.stderr)
print(" - Qwen/Qwen3-TTS-12Hz-1.7B-CustomVoice", file=sys.stderr)
print(" - Qwen/Qwen3-TTS-12Hz-1.7B-VoiceDesign", file=sys.stderr)
print(" - Qwen/Qwen3-TTS-12Hz-1.7B-Base", file=sys.stderr)
if attn_impl_primary == 'flash_attention_2':
print("\nTrying to use SDPA instead of flash_attention_2...", file=sys.stderr)
load_kwargs["attn_implementation"] = 'sdpa'
try:
if self.device == "mps":
load_kwargs["device_map"] = None
self.model = Qwen3TTSModel.from_pretrained(model_path, **load_kwargs)
self.model.to("mps")
else:
load_kwargs["device_map"] = (self.device if self.device in ("cuda", "cpu") else None)
self.model = Qwen3TTSModel.from_pretrained(model_path, **load_kwargs)
except Exception as e2:
print(f"[ERROR] Failed to load with SDPA: {type(e2).__name__}: {e2}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
raise e2
else:
raise e
print(f"Model loaded successfully: {model_path}", file=sys.stderr)
return backend_pb2.Result(message="Model loaded successfully", success=True)
def _detect_mode(self, request):
"""Detect which mode to use based on request parameters."""
# Priority: VoiceClone > VoiceDesign > CustomVoice
# model_type explicitly set
if self.model_type == "CustomVoice":
return "CustomVoice"
if self.model_type == "VoiceClone":
return "VoiceClone"
if self.model_type == "VoiceDesign":
return "VoiceDesign"
# VoiceClone: AudioPath is provided (from LoadModel, stored in self.audio_path)
if self.audio_path:
return "VoiceClone"
# VoiceDesign: instruct option is provided
if "instruct" in self.options and self.options["instruct"]:
return "VoiceDesign"
# Default to CustomVoice
return "CustomVoice"
def _get_ref_audio_path(self, request):
"""Get reference audio path from stored AudioPath (from LoadModel)."""
if not self.audio_path:
return None
# If absolute path, use as-is
if os.path.isabs(self.audio_path):
return self.audio_path
# Try relative to ModelFile
if self.model_file:
model_file_base = os.path.dirname(self.model_file)
ref_path = os.path.join(model_file_base, self.audio_path)
if os.path.exists(ref_path):
return ref_path
# Try relative to ModelPath
if self.model_path:
ref_path = os.path.join(self.model_path, self.audio_path)
if os.path.exists(ref_path):
return ref_path
# Return as-is (might be URL or base64)
return self.audio_path
def _get_voice_clone_prompt(self, request, ref_audio, ref_text):
"""Get or create voice clone prompt, with caching."""
cache_key = f"{ref_audio}:{ref_text}"
if cache_key not in self._voice_clone_cache:
print(f"Creating voice clone prompt from {ref_audio}", file=sys.stderr)
try:
prompt_items = self.model.create_voice_clone_prompt(
ref_audio=ref_audio,
ref_text=ref_text,
x_vector_only_mode=self.options.get("x_vector_only_mode", False),
)
self._voice_clone_cache[cache_key] = prompt_items
except Exception as e:
print(f"Error creating voice clone prompt: {e}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
return None
return self._voice_clone_cache[cache_key]
def TTS(self, request, context):
try:
# Check if dst is provided
if not request.dst:
return backend_pb2.Result(
success=False,
message="dst (output path) is required"
)
# Prepare text
text = request.text.strip()
if not text:
return backend_pb2.Result(
success=False,
message="Text is empty"
)
# Get language (auto-detect if not provided)
language = request.language if hasattr(request, 'language') and request.language else None
if not language or language == "":
language = "Auto" # Auto-detect language
# Detect mode
mode = self._detect_mode(request)
print(f"Detected mode: {mode}", file=sys.stderr)
# Get generation parameters from options
max_new_tokens = self.options.get("max_new_tokens", None)
top_p = self.options.get("top_p", None)
temperature = self.options.get("temperature", None)
do_sample = self.options.get("do_sample", None)
# Prepare generation kwargs
generation_kwargs = {}
if max_new_tokens is not None:
generation_kwargs["max_new_tokens"] = max_new_tokens
if top_p is not None:
generation_kwargs["top_p"] = top_p
if temperature is not None:
generation_kwargs["temperature"] = temperature
if do_sample is not None:
generation_kwargs["do_sample"] = do_sample
instruct = self.options.get("instruct", "")
if instruct is not None and instruct != "":
generation_kwargs["instruct"] = instruct
# Generate audio based on mode
if mode == "VoiceClone":
# VoiceClone mode
ref_audio = self._get_ref_audio_path(request)
if not ref_audio:
return backend_pb2.Result(
success=False,
message="AudioPath is required for VoiceClone mode"
)
ref_text = self.options.get("ref_text", None)
if not ref_text:
# Try to get from request if available
if hasattr(request, 'ref_text') and request.ref_text:
ref_text = request.ref_text
else:
# x_vector_only_mode doesn't require ref_text
if not self.options.get("x_vector_only_mode", False):
return backend_pb2.Result(
success=False,
message="ref_text is required for VoiceClone mode (or set x_vector_only_mode=true)"
)
# Check if we should use cached prompt
use_cached_prompt = self.options.get("use_cached_prompt", True)
voice_clone_prompt = None
if use_cached_prompt:
voice_clone_prompt = self._get_voice_clone_prompt(request, ref_audio, ref_text)
if voice_clone_prompt is None:
return backend_pb2.Result(
success=False,
message="Failed to create voice clone prompt"
)
if voice_clone_prompt:
# Use cached prompt
wavs, sr = self.model.generate_voice_clone(
text=text,
language=language,
voice_clone_prompt=voice_clone_prompt,
**generation_kwargs
)
else:
# Create prompt on-the-fly
wavs, sr = self.model.generate_voice_clone(
text=text,
language=language,
ref_audio=ref_audio,
ref_text=ref_text,
x_vector_only_mode=self.options.get("x_vector_only_mode", False),
**generation_kwargs
)
elif mode == "VoiceDesign":
# VoiceDesign mode
if not instruct:
return backend_pb2.Result(
success=False,
message="instruct option is required for VoiceDesign mode"
)
wavs, sr = self.model.generate_voice_design(
text=text,
language=language,
instruct=instruct,
**generation_kwargs
)
else:
# CustomVoice mode (default)
speaker = request.voice if request.voice else None
if not speaker:
# Try to get from options
speaker = self.options.get("speaker", None)
if not speaker:
# Use default speaker
speaker = "Vivian"
print(f"No speaker specified, using default: {speaker}", file=sys.stderr)
# Validate speaker if model supports it
if hasattr(self.model, 'get_supported_speakers'):
try:
supported_speakers = self.model.get_supported_speakers()
if speaker not in supported_speakers:
print(f"Warning: Speaker '{speaker}' not in supported list. Available: {supported_speakers}", file=sys.stderr)
# Try to find a close match (case-insensitive)
speaker_lower = speaker.lower()
for sup_speaker in supported_speakers:
if sup_speaker.lower() == speaker_lower:
speaker = sup_speaker
print(f"Using matched speaker: {speaker}", file=sys.stderr)
break
except Exception as e:
print(f"Warning: Could not get supported speakers: {e}", file=sys.stderr)
wavs, sr = self.model.generate_custom_voice(
text=text,
language=language,
speaker=speaker,
**generation_kwargs
)
# Save output
if wavs is not None and len(wavs) > 0:
# wavs is a list, take first element
audio_data = wavs[0] if isinstance(wavs, list) else wavs
sf.write(request.dst, audio_data, sr)
print(f"Saved output to {request.dst}", file=sys.stderr)
else:
return backend_pb2.Result(
success=False,
message="No audio output generated"
)
except Exception as err:
print(f"Error in TTS: {err}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
return backend_pb2.Result(success=True)
def serve(address):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
options=[
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
])
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
server.add_insecure_port(address)
server.start()
print("Server started. Listening on: " + address, file=sys.stderr)
# Define the signal handler function
def signal_handler(sig, frame):
print("Received termination signal. Shutting down...")
server.stop(0)
sys.exit(0)
# Set the signal handlers for SIGINT and SIGTERM
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the gRPC server.")
parser.add_argument(
"--addr", default="localhost:50051", help="The address to bind the server to."
)
args = parser.parse_args()
serve(args.addr)

Some files were not shown because too many files have changed in this diff Show More