mirror of
https://github.com/mudler/LocalAI.git
synced 2026-02-03 11:13:31 -05:00
Compare commits
66 Commits
v3.10.1
...
copilot/fi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6a1e44c8ff | ||
|
|
bda40b266c | ||
|
|
c86edf06f2 | ||
|
|
5d14c2fe4d | ||
|
|
4601143998 | ||
|
|
7913ea2bfb | ||
|
|
d6409bd2eb | ||
|
|
98872791e5 | ||
|
|
c6d47cb4e5 | ||
|
|
f754a8edb1 | ||
|
|
4c0e70086d | ||
|
|
f8bd527dfe | ||
|
|
08b2b8d755 | ||
|
|
10a1e6c74d | ||
|
|
b7585ca738 | ||
|
|
8cae99229c | ||
|
|
04e0f444e1 | ||
|
|
6f410f4cbe | ||
|
|
800f749c7b | ||
|
|
b6459ddd57 | ||
|
|
397f7f0862 | ||
|
|
234072769c | ||
|
|
3445415b3d | ||
|
|
b05e110aa6 | ||
|
|
e69cba2444 | ||
|
|
f7903597ac | ||
|
|
ee76a0cd1c | ||
|
|
4ca5b737bf | ||
|
|
4077aaf978 | ||
|
|
68dd9765a0 | ||
|
|
2c44b06a67 | ||
|
|
7cc90db3e5 | ||
|
|
1e08e02598 | ||
|
|
dd8e74a486 | ||
|
|
48e08772f3 | ||
|
|
c28c0227c6 | ||
|
|
856ca2d6b1 | ||
|
|
9b973b79f6 | ||
|
|
cba8ef4e38 | ||
|
|
f729e300d6 | ||
|
|
9916811a79 | ||
|
|
2f7c595cd1 | ||
|
|
73decac746 | ||
|
|
ec1598868b | ||
|
|
93d7e5d4b8 | ||
|
|
ff5a54b9d1 | ||
|
|
3c1f823c47 | ||
|
|
4024220d00 | ||
|
|
f76958d761 | ||
|
|
2bd5ca45de | ||
|
|
6804ce1c39 | ||
|
|
d499071bff | ||
|
|
26a374b717 | ||
|
|
980de0e25b | ||
|
|
4767371aee | ||
|
|
131d247b78 | ||
|
|
b2a8a63899 | ||
|
|
05a332cd5f | ||
|
|
05904c77f5 | ||
|
|
17783fa7d9 | ||
|
|
4019094111 | ||
|
|
ca65fc751a | ||
|
|
a1e3acc590 | ||
|
|
a36960e069 | ||
|
|
58bb6a29ed | ||
|
|
5881c82413 |
345
.github/workflows/backend.yml
vendored
345
.github/workflows/backend.yml
vendored
@@ -91,10 +91,23 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-cpu-whisperx'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'true'
|
||||
backend: "whisperx"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
# CUDA 12 builds
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-vibevoice'
|
||||
@@ -107,7 +120,20 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-qwen-asr'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "qwen-asr"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-qwen-tts'
|
||||
@@ -120,7 +146,20 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-voxcpm'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "voxcpm"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-pocket-tts'
|
||||
@@ -146,7 +185,7 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-llama-cpp'
|
||||
@@ -159,7 +198,7 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-vllm'
|
||||
@@ -172,7 +211,20 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-vllm-omni'
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "vllm-omni"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-transformers'
|
||||
@@ -185,7 +237,7 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-diffusers'
|
||||
@@ -198,7 +250,7 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-kokoro'
|
||||
@@ -211,7 +263,7 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-faster-whisper'
|
||||
@@ -222,6 +274,19 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-whisperx'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "whisperx"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
@@ -237,20 +302,7 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-bark'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "bark"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-chatterbox'
|
||||
@@ -263,7 +315,7 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-moonshine'
|
||||
@@ -276,7 +328,7 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-stablediffusion-ggml'
|
||||
@@ -289,7 +341,7 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-whisper'
|
||||
@@ -302,7 +354,7 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-rfdetr'
|
||||
@@ -315,20 +367,7 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-exllama2'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "exllama2"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-neutts'
|
||||
@@ -366,6 +405,19 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-qwen-asr'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "qwen-asr"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
@@ -379,6 +431,19 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-voxcpm'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "voxcpm"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
@@ -457,6 +522,19 @@ jobs:
|
||||
backend: "vibevoice"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-cuda-13-arm64-qwen-asr'
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
ubuntu-version: '2404'
|
||||
backend: "qwen-asr"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
@@ -527,11 +605,11 @@ jobs:
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-bark'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-whisperx'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "bark"
|
||||
backend: "whisperx"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
@@ -666,6 +744,19 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'hipblas'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-rocm-hipblas-vllm-omni'
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
|
||||
skip-drivers: 'false'
|
||||
backend: "vllm-omni"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'hipblas'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
@@ -719,6 +810,19 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'hipblas'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-rocm-hipblas-qwen-asr'
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
|
||||
skip-drivers: 'false'
|
||||
backend: "qwen-asr"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'hipblas'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
@@ -732,6 +836,19 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'hipblas'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-rocm-hipblas-voxcpm'
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
|
||||
skip-drivers: 'false'
|
||||
backend: "voxcpm"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'hipblas'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
@@ -763,11 +880,11 @@ jobs:
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-rocm-hipblas-coqui'
|
||||
tag-suffix: '-gpu-rocm-hipblas-whisperx'
|
||||
runs-on: 'bigger-runner'
|
||||
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
|
||||
skip-drivers: 'false'
|
||||
backend: "coqui"
|
||||
backend: "whisperx"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
@@ -776,11 +893,11 @@ jobs:
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-rocm-hipblas-bark'
|
||||
runs-on: 'arc-runner-set'
|
||||
tag-suffix: '-gpu-rocm-hipblas-coqui'
|
||||
runs-on: 'bigger-runner'
|
||||
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
|
||||
skip-drivers: 'false'
|
||||
backend: "bark"
|
||||
backend: "coqui"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
@@ -876,6 +993,19 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-qwen-asr'
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
||||
skip-drivers: 'true'
|
||||
backend: "qwen-asr"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
@@ -955,6 +1085,19 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-qwen-asr'
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "qwen-asr"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
@@ -968,6 +1111,19 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-voxcpm'
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "voxcpm"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
@@ -994,19 +1150,6 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-bark'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "bark"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
# piper
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
@@ -1021,20 +1164,6 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.golang"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
# bark-cpp
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-bark-cpp'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "bark-cpp"
|
||||
dockerfile: "./backend/Dockerfile.golang"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
@@ -1301,46 +1430,6 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2204'
|
||||
# exllama2
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-cpu-exllama2'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "exllama2"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-exllama2'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "exllama2"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'hipblas'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
skip-drivers: 'true'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-hipblas-exllama2'
|
||||
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
|
||||
runs-on: 'ubuntu-latest'
|
||||
backend: "exllama2"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
@@ -1421,6 +1510,19 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64,linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-cpu-qwen-asr'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "qwen-asr"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
@@ -1434,6 +1536,19 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-cpu-voxcpm'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "voxcpm"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
|
||||
4
.github/workflows/deploy-explorer.yaml
vendored
4
.github/workflows/deploy-explorer.yaml
vendored
@@ -33,7 +33,7 @@ jobs:
|
||||
run: |
|
||||
CGO_ENABLED=0 make build
|
||||
- name: rm
|
||||
uses: appleboy/ssh-action@v1.2.4
|
||||
uses: appleboy/ssh-action@v1.2.5
|
||||
with:
|
||||
host: ${{ secrets.EXPLORER_SSH_HOST }}
|
||||
username: ${{ secrets.EXPLORER_SSH_USERNAME }}
|
||||
@@ -53,7 +53,7 @@ jobs:
|
||||
rm: true
|
||||
target: ./local-ai
|
||||
- name: restarting
|
||||
uses: appleboy/ssh-action@v1.2.4
|
||||
uses: appleboy/ssh-action@v1.2.5
|
||||
with:
|
||||
host: ${{ secrets.EXPLORER_SSH_HOST }}
|
||||
username: ${{ secrets.EXPLORER_SSH_USERNAME }}
|
||||
|
||||
2
.github/workflows/image-pr.yml
vendored
2
.github/workflows/image-pr.yml
vendored
@@ -37,7 +37,7 @@
|
||||
include:
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12'
|
||||
|
||||
2
.github/workflows/image.yml
vendored
2
.github/workflows/image.yml
vendored
@@ -88,7 +88,7 @@
|
||||
ubuntu-codename: 'noble'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12'
|
||||
|
||||
48
.github/workflows/test-extra.yml
vendored
48
.github/workflows/test-extra.yml
vendored
@@ -238,7 +238,7 @@ jobs:
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential ffmpeg
|
||||
sudo apt-get install -y build-essential ffmpeg
|
||||
sudo apt-get install -y ca-certificates cmake curl patch espeak espeak-ng python3-pip
|
||||
# Install UV
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
@@ -257,7 +257,7 @@ jobs:
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential ffmpeg
|
||||
sudo apt-get install -y build-essential ffmpeg
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
# Install UV
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
@@ -276,7 +276,7 @@ jobs:
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential ffmpeg
|
||||
sudo apt-get install -y build-essential ffmpeg
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
# Install UV
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
@@ -295,7 +295,7 @@ jobs:
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential ffmpeg
|
||||
sudo apt-get install -y build-essential ffmpeg
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
# Install UV
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
@@ -303,4 +303,42 @@ jobs:
|
||||
- name: Test qwen-tts
|
||||
run: |
|
||||
make --jobs=5 --output-sync=target -C backend/python/qwen-tts
|
||||
make --jobs=5 --output-sync=target -C backend/python/qwen-tts test
|
||||
make --jobs=5 --output-sync=target -C backend/python/qwen-tts test
|
||||
tests-qwen-asr:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: true
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y build-essential ffmpeg sox
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
# Install UV
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
pip install --user --no-cache-dir grpcio-tools==1.64.1
|
||||
- name: Test qwen-asr
|
||||
run: |
|
||||
make --jobs=5 --output-sync=target -C backend/python/qwen-asr
|
||||
make --jobs=5 --output-sync=target -C backend/python/qwen-asr test
|
||||
tests-voxcpm:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: true
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential ffmpeg
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
# Install UV
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
pip install --user --no-cache-dir grpcio-tools==1.64.1
|
||||
- name: Test voxcpm
|
||||
run: |
|
||||
make --jobs=5 --output-sync=target -C backend/python/voxcpm
|
||||
make --jobs=5 --output-sync=target -C backend/python/voxcpm test
|
||||
|
||||
56
.github/workflows/tests-e2e.yml
vendored
Normal file
56
.github/workflows/tests-e2e.yml
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
---
|
||||
name: 'E2E Backend Tests'
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
concurrency:
|
||||
group: ci-tests-e2e-backend-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
tests-e2e-backend:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: ['1.25.x']
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go ${{ matrix.go-version }}
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
cache: false
|
||||
- name: Display Go version
|
||||
run: go version
|
||||
- name: Proto Dependencies
|
||||
run: |
|
||||
# Install protoc
|
||||
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v26.1/protoc-26.1-linux-x86_64.zip -o protoc.zip && \
|
||||
unzip -j -d /usr/local/bin protoc.zip bin/protoc && \
|
||||
rm protoc.zip
|
||||
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2
|
||||
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
|
||||
PATH="$PATH:$HOME/go/bin" make protogen-go
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y build-essential
|
||||
- name: Test Backend E2E
|
||||
run: |
|
||||
PATH="$PATH:$HOME/go/bin" make build-mock-backend test-e2e
|
||||
- name: Setup tmate session if tests fail
|
||||
if: ${{ failure() }}
|
||||
uses: mxschmitt/action-tmate@v3.23
|
||||
with:
|
||||
detached: true
|
||||
connect-timeout-seconds: 180
|
||||
limit-access-to-actor: true
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -36,6 +36,8 @@ LocalAI
|
||||
models/*
|
||||
test-models/
|
||||
test-dir/
|
||||
tests/e2e-aio/backends
|
||||
tests/e2e-aio/models
|
||||
|
||||
release/
|
||||
|
||||
|
||||
16
AGENTS.md
16
AGENTS.md
@@ -4,13 +4,13 @@ Building and testing the project depends on the components involved and the plat
|
||||
|
||||
## Building a specified backend
|
||||
|
||||
Let's say the user wants to build a particular backend for a given platform. For example let's say they want to build bark for ROCM/hipblas
|
||||
Let's say the user wants to build a particular backend for a given platform. For example let's say they want to build coqui for ROCM/hipblas
|
||||
|
||||
- The Makefile has targets like `docker-build-bark` created with `generate-docker-build-target` at the time of writing. Recently added backends may require a new target.
|
||||
- The Makefile has targets like `docker-build-coqui` created with `generate-docker-build-target` at the time of writing. Recently added backends may require a new target.
|
||||
- At a minimum we need to set the BUILD_TYPE, BASE_IMAGE build-args
|
||||
- Use .github/workflows/backend.yml as a reference it lists the needed args in the `include` job strategy matrix
|
||||
- l4t and cublas also requires the CUDA major and minor version
|
||||
- You can pretty print a command like `DOCKER_MAKEFLAGS=-j$(nproc --ignore=1) BUILD_TYPE=hipblas BASE_IMAGE=rocm/dev-ubuntu-24.04:6.4.4 make docker-build-bark`
|
||||
- You can pretty print a command like `DOCKER_MAKEFLAGS=-j$(nproc --ignore=1) BUILD_TYPE=hipblas BASE_IMAGE=rocm/dev-ubuntu-24.04:6.4.4 make docker-build-coqui`
|
||||
- Unless the user specifies that they want you to run the command, then just print it because not all agent frontends handle long running jobs well and the output may overflow your context
|
||||
- The user may say they want to build AMD or ROCM instead of hipblas, or Intel instead of SYCL or NVIDIA insted of l4t or cublas. Ask for confirmation if there is ambiguity.
|
||||
- Sometimes the user may need extra parameters to be added to `docker build` (e.g. `--platform` for cross-platform builds or `--progress` to view the full logs), in which case you can generate the `docker build` command directly.
|
||||
@@ -95,7 +95,7 @@ test-extra: prepare-test-extra
|
||||
|
||||
Add a backend definition variable in the backend definitions section (around line 428-457). The format depends on the backend type:
|
||||
|
||||
**For Python backends with root context** (like `faster-whisper`, `bark`):
|
||||
**For Python backends with root context** (like `faster-whisper`, `coqui`):
|
||||
```makefile
|
||||
BACKEND_<BACKEND_NAME> = <backend-name>|python|.|false|true
|
||||
```
|
||||
@@ -280,3 +280,11 @@ Always check `llama.cpp` for new model configuration options that should be supp
|
||||
- `llama.cpp/common/chat-parser.cpp` - Format presets and model-specific handlers
|
||||
- `llama.cpp/common/chat.h` - Format enums and parameter structures
|
||||
- `llama.cpp/tools/server/server-context.cpp` - Server configuration options
|
||||
|
||||
# Documentation
|
||||
|
||||
The project documentation is located in `docs/content`. When adding new features or changing existing functionality, it is crucial to update the documentation to reflect these changes. This helps users understand how to use the new capabilities and ensures the documentation stays relevant.
|
||||
|
||||
- **Feature Documentation**: If you add a new feature (like a new backend or API endpoint), create a new markdown file in `docs/content/features/` explaining what it is, how to configure it, and how to use it.
|
||||
- **Configuration**: If you modify configuration options, update the relevant sections in `docs/content/`.
|
||||
- **Examples**: providing concrete examples (like YAML configuration blocks) is highly encouraged to help users get started quickly.
|
||||
|
||||
55
Makefile
55
Makefile
@@ -1,5 +1,5 @@
|
||||
# Disable parallel execution for backend builds
|
||||
.NOTPARALLEL: backends/diffusers backends/llama-cpp backends/piper backends/stablediffusion-ggml backends/whisper backends/faster-whisper backends/silero-vad backends/local-store backends/huggingface backends/rfdetr backends/kitten-tts backends/kokoro backends/chatterbox backends/llama-cpp-darwin backends/neutts build-darwin-python-backend build-darwin-go-backend backends/mlx backends/diffuser-darwin backends/mlx-vlm backends/mlx-audio backends/stablediffusion-ggml-darwin backends/vllm backends/moonshine backends/pocket-tts backends/qwen-tts
|
||||
.NOTPARALLEL: backends/diffusers backends/llama-cpp backends/piper backends/stablediffusion-ggml backends/whisper backends/faster-whisper backends/silero-vad backends/local-store backends/huggingface backends/rfdetr backends/kitten-tts backends/kokoro backends/chatterbox backends/llama-cpp-darwin backends/neutts build-darwin-python-backend build-darwin-go-backend backends/mlx backends/diffuser-darwin backends/mlx-vlm backends/mlx-audio backends/stablediffusion-ggml-darwin backends/vllm backends/vllm-omni backends/moonshine backends/pocket-tts backends/qwen-tts backends/qwen-asr backends/voxcpm backends/whisperx
|
||||
|
||||
GOCMD=go
|
||||
GOTEST=$(GOCMD) test
|
||||
@@ -7,16 +7,14 @@ GOVET=$(GOCMD) vet
|
||||
BINARY_NAME=local-ai
|
||||
LAUNCHER_BINARY_NAME=local-ai-launcher
|
||||
|
||||
CUDA_MAJOR_VERSION?=13
|
||||
CUDA_MINOR_VERSION?=0
|
||||
UBUNTU_VERSION?=2404
|
||||
UBUNTU_CODENAME?=noble
|
||||
|
||||
GORELEASER?=
|
||||
|
||||
export BUILD_TYPE?=
|
||||
export CUDA_MAJOR_VERSION?=12
|
||||
export CUDA_MINOR_VERSION?=9
|
||||
export CUDA_MAJOR_VERSION?=13
|
||||
export CUDA_MINOR_VERSION?=0
|
||||
|
||||
GO_TAGS?=
|
||||
BUILD_ID?=
|
||||
@@ -191,9 +189,6 @@ run-e2e-aio: protogen-go
|
||||
########################################################
|
||||
|
||||
prepare-e2e:
|
||||
mkdir -p $(TEST_DIR)
|
||||
cp -rfv $(abspath ./tests/e2e-fixtures)/gpu.yaml $(TEST_DIR)/gpu.yaml
|
||||
test -e $(TEST_DIR)/ggllm-test-model.bin || wget -q https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GGUF/resolve/main/codellama-7b-instruct.Q2_K.gguf -O $(TEST_DIR)/ggllm-test-model.bin
|
||||
docker build \
|
||||
--build-arg IMAGE_TYPE=core \
|
||||
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
|
||||
@@ -207,14 +202,16 @@ prepare-e2e:
|
||||
-t localai-tests .
|
||||
|
||||
run-e2e-image:
|
||||
ls -liah $(abspath ./tests/e2e-fixtures)
|
||||
docker run -p 5390:8080 -e MODELS_PATH=/models -e THREADS=1 -e DEBUG=true -d --rm -v $(TEST_DIR):/models --gpus all --name e2e-tests-$(RANDOM) localai-tests
|
||||
docker run -p 5390:8080 -e MODELS_PATH=/models -e THREADS=1 -e DEBUG=true -d --rm -v $(TEST_DIR):/models --name e2e-tests-$(RANDOM) localai-tests
|
||||
|
||||
test-e2e:
|
||||
test-e2e: build-mock-backend prepare-e2e run-e2e-image
|
||||
@echo 'Running e2e tests'
|
||||
BUILD_TYPE=$(BUILD_TYPE) \
|
||||
LOCALAI_API=http://$(E2E_BRIDGE_IP):5390/v1 \
|
||||
LOCALAI_API=http://$(E2E_BRIDGE_IP):5390 \
|
||||
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --flake-attempts $(TEST_FLAKES) -v -r ./tests/e2e
|
||||
$(MAKE) clean-mock-backend
|
||||
$(MAKE) teardown-e2e
|
||||
docker rmi localai-tests
|
||||
|
||||
teardown-e2e:
|
||||
rm -rf $(TEST_DIR) || true
|
||||
@@ -314,20 +311,28 @@ prepare-test-extra: protogen-python
|
||||
$(MAKE) -C backend/python/diffusers
|
||||
$(MAKE) -C backend/python/chatterbox
|
||||
$(MAKE) -C backend/python/vllm
|
||||
$(MAKE) -C backend/python/vllm-omni
|
||||
$(MAKE) -C backend/python/vibevoice
|
||||
$(MAKE) -C backend/python/moonshine
|
||||
$(MAKE) -C backend/python/pocket-tts
|
||||
$(MAKE) -C backend/python/qwen-tts
|
||||
$(MAKE) -C backend/python/qwen-asr
|
||||
$(MAKE) -C backend/python/voxcpm
|
||||
$(MAKE) -C backend/python/whisperx
|
||||
|
||||
test-extra: prepare-test-extra
|
||||
$(MAKE) -C backend/python/transformers test
|
||||
$(MAKE) -C backend/python/diffusers test
|
||||
$(MAKE) -C backend/python/chatterbox test
|
||||
$(MAKE) -C backend/python/vllm test
|
||||
$(MAKE) -C backend/python/vllm-omni test
|
||||
$(MAKE) -C backend/python/vibevoice test
|
||||
$(MAKE) -C backend/python/moonshine test
|
||||
$(MAKE) -C backend/python/pocket-tts test
|
||||
$(MAKE) -C backend/python/qwen-tts test
|
||||
$(MAKE) -C backend/python/qwen-asr test
|
||||
$(MAKE) -C backend/python/voxcpm test
|
||||
$(MAKE) -C backend/python/whisperx test
|
||||
|
||||
DOCKER_IMAGE?=local-ai
|
||||
DOCKER_AIO_IMAGE?=local-ai-aio
|
||||
@@ -436,7 +441,6 @@ backend-images:
|
||||
BACKEND_LLAMA_CPP = llama-cpp|llama-cpp|.|false|false
|
||||
|
||||
# Golang backends
|
||||
BACKEND_BARK_CPP = bark-cpp|golang|.|false|true
|
||||
BACKEND_PIPER = piper|golang|.|false|true
|
||||
BACKEND_LOCAL_STORE = local-store|golang|.|false|true
|
||||
BACKEND_HUGGINGFACE = huggingface|golang|.|false|true
|
||||
@@ -449,19 +453,21 @@ BACKEND_RERANKERS = rerankers|python|.|false|true
|
||||
BACKEND_TRANSFORMERS = transformers|python|.|false|true
|
||||
BACKEND_FASTER_WHISPER = faster-whisper|python|.|false|true
|
||||
BACKEND_COQUI = coqui|python|.|false|true
|
||||
BACKEND_BARK = bark|python|.|false|true
|
||||
BACKEND_EXLLAMA2 = exllama2|python|.|false|true
|
||||
BACKEND_RFDETR = rfdetr|python|.|false|true
|
||||
BACKEND_KITTEN_TTS = kitten-tts|python|.|false|true
|
||||
BACKEND_NEUTTS = neutts|python|.|false|true
|
||||
BACKEND_KOKORO = kokoro|python|.|false|true
|
||||
BACKEND_VLLM = vllm|python|.|false|true
|
||||
BACKEND_VLLM_OMNI = vllm-omni|python|.|false|true
|
||||
BACKEND_DIFFUSERS = diffusers|python|.|--progress=plain|true
|
||||
BACKEND_CHATTERBOX = chatterbox|python|.|false|true
|
||||
BACKEND_VIBEVOICE = vibevoice|python|.|--progress=plain|true
|
||||
BACKEND_MOONSHINE = moonshine|python|.|false|true
|
||||
BACKEND_POCKET_TTS = pocket-tts|python|.|false|true
|
||||
BACKEND_QWEN_TTS = qwen-tts|python|.|false|true
|
||||
BACKEND_QWEN_ASR = qwen-asr|python|.|false|true
|
||||
BACKEND_VOXCPM = voxcpm|python|.|false|true
|
||||
BACKEND_WHISPERX = whisperx|python|.|false|true
|
||||
|
||||
# Helper function to build docker image for a backend
|
||||
# Usage: $(call docker-build-backend,BACKEND_NAME,DOCKERFILE_TYPE,BUILD_CONTEXT,PROGRESS_FLAG,NEEDS_BACKEND_ARG)
|
||||
@@ -485,7 +491,6 @@ endef
|
||||
|
||||
# Generate all docker-build targets
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_LLAMA_CPP)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_BARK_CPP)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_PIPER)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_LOCAL_STORE)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_HUGGINGFACE)))
|
||||
@@ -496,25 +501,37 @@ $(eval $(call generate-docker-build-target,$(BACKEND_RERANKERS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_TRANSFORMERS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_FASTER_WHISPER)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_COQUI)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_BARK)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_EXLLAMA2)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_RFDETR)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_KITTEN_TTS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_NEUTTS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_KOKORO)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_VLLM)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_VLLM_OMNI)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_DIFFUSERS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_CHATTERBOX)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_VIBEVOICE)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_MOONSHINE)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_POCKET_TTS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_QWEN_TTS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_QWEN_ASR)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_VOXCPM)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_WHISPERX)))
|
||||
|
||||
# Pattern rule for docker-save targets
|
||||
docker-save-%: backend-images
|
||||
docker save local-ai-backend:$* -o backend-images/$*.tar
|
||||
|
||||
docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-bark docker-build-chatterbox docker-build-vibevoice docker-build-exllama2 docker-build-moonshine docker-build-pocket-tts docker-build-qwen-tts
|
||||
docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-vllm-omni docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-chatterbox docker-build-vibevoice docker-build-moonshine docker-build-pocket-tts docker-build-qwen-tts docker-build-qwen-asr docker-build-voxcpm docker-build-whisperx
|
||||
|
||||
########################################################
|
||||
### Mock Backend for E2E Tests
|
||||
########################################################
|
||||
|
||||
build-mock-backend: protogen-go
|
||||
$(GOCMD) build -o tests/e2e/mock-backend/mock-backend ./tests/e2e/mock-backend
|
||||
|
||||
clean-mock-backend:
|
||||
rm -f tests/e2e/mock-backend/mock-backend
|
||||
|
||||
########################################################
|
||||
### END Backends
|
||||
|
||||
48
README.md
48
README.md
@@ -51,34 +51,16 @@
|
||||
**LocalAI** is the free, Open Source OpenAI alternative. LocalAI act as a drop-in replacement REST API that's compatible with OpenAI (Elevenlabs, Anthropic... ) API specifications for local AI inferencing. It allows you to run LLMs, generate images, audio (and not only) locally or on-prem with consumer grade hardware, supporting multiple model families. Does not require GPU. It is created and maintained by [Ettore Di Giacinto](https://github.com/mudler).
|
||||
|
||||
|
||||
## 📚🆕 Local Stack Family
|
||||
## Local Stack Family
|
||||
|
||||
🆕 LocalAI is now part of a comprehensive suite of AI tools designed to work together:
|
||||
Liking LocalAI? LocalAI is part of an integrated suite of AI infrastructure tools, you might also like:
|
||||
|
||||
- **[LocalAGI](https://github.com/mudler/LocalAGI)** - AI agent orchestration platform with OpenAI Responses API compatibility and advanced agentic capabilities
|
||||
- **[LocalRecall](https://github.com/mudler/LocalRecall)** - MCP/REST API knowledge base system providing persistent memory and storage for AI agents
|
||||
- 🆕 **[Cogito](https://github.com/mudler/cogito)** - Go library for building intelligent, co-operative agentic software and LLM-powered workflows, focusing on improving results for small, open source language models that scales to any LLM. Powers LocalAGI and LocalAI MCP/Agentic capabilities
|
||||
- 🆕 **[Wiz](https://github.com/mudler/wiz)** - Terminal-based AI agent accessible via Ctrl+Space keybinding. Portable, local-LLM friendly shell assistant with TUI/CLI modes, tool execution with approval, MCP protocol support, and multi-shell compatibility (zsh, bash, fish)
|
||||
- 🆕 **[SkillServer](https://github.com/mudler/skillserver)** - Simple, centralized skills database for AI agents via MCP. Manages skills as Markdown files with MCP server integration, web UI for editing, Git synchronization, and full-text search capabilities
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td width="50%" valign="top">
|
||||
<a href="https://github.com/mudler/LocalAGI">
|
||||
<img src="https://raw.githubusercontent.com/mudler/LocalAGI/refs/heads/main/webui/react-ui/public/logo_2.png" width="300" alt="LocalAGI Logo">
|
||||
</a>
|
||||
</td>
|
||||
<td width="50%" valign="top">
|
||||
<h3><a href="https://github.com/mudler/LocalAGI">LocalAGI</a></h3>
|
||||
<p>A powerful Local AI agent management platform that serves as a drop-in replacement for OpenAI's Responses API, enhanced with advanced agentic capabilities.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td width="50%" valign="top">
|
||||
<a href="https://github.com/mudler/LocalRecall">
|
||||
<img src="https://raw.githubusercontent.com/mudler/LocalRecall/refs/heads/main/static/localrecall_horizontal.png" width="300" alt="LocalRecall Logo">
|
||||
</a>
|
||||
</td>
|
||||
<td width="50%" valign="top">
|
||||
<h3><a href="https://github.com/mudler/LocalRecall">LocalRecall</a></h3>
|
||||
<p>A REST-ful API and knowledge base management system that provides persistent memory and storage capabilities for AI agents.</p>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
## Screenshots / Video
|
||||
|
||||
@@ -257,6 +239,7 @@ Roadmap items: [List of issues](https://github.com/mudler/LocalAI/issues?q=is%3A
|
||||
- 🔈 [Audio to Text](https://localai.io/features/audio-to-text/) (Audio transcription with `whisper.cpp`)
|
||||
- 🎨 [Image generation](https://localai.io/features/image-generation)
|
||||
- 🔥 [OpenAI-alike tools API](https://localai.io/features/openai-functions/)
|
||||
- ⚡ [Realtime API](https://localai.io/features/openai-realtime/) (Speech-to-speech)
|
||||
- 🧠 [Embeddings generation for vector databases](https://localai.io/features/embeddings/)
|
||||
- ✍️ [Constrained grammars](https://localai.io/features/constrained_grammars/)
|
||||
- 🖼️ [Download Models directly from Huggingface ](https://localai.io/models/)
|
||||
@@ -278,7 +261,6 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
|
||||
| **llama.cpp** | LLM inference in C/C++ | CUDA 12/13, ROCm, Intel SYCL, Vulkan, Metal, CPU |
|
||||
| **vLLM** | Fast LLM inference with PagedAttention | CUDA 12/13, ROCm, Intel |
|
||||
| **transformers** | HuggingFace transformers framework | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **exllama2** | GPTQ inference library | CUDA 12/13 |
|
||||
| **MLX** | Apple Silicon LLM inference | Metal (M1/M2/M3+) |
|
||||
| **MLX-VLM** | Apple Silicon Vision-Language Models | Metal (M1/M2/M3+) |
|
||||
|
||||
@@ -287,8 +269,6 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
|
||||
|---------|-------------|---------------------|
|
||||
| **whisper.cpp** | OpenAI Whisper in C/C++ | CUDA 12/13, ROCm, Intel SYCL, Vulkan, CPU |
|
||||
| **faster-whisper** | Fast Whisper with CTranslate2 | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **bark** | Text-to-audio generation | CUDA 12/13, ROCm, Intel |
|
||||
| **bark-cpp** | C++ implementation of Bark | CUDA, Metal, CPU |
|
||||
| **coqui** | Advanced TTS with 1100+ languages | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **kokoro** | Lightweight TTS model | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **chatterbox** | Production-grade TTS | CUDA 12/13, CPU |
|
||||
@@ -320,9 +300,9 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
|
||||
|-------------------|-------------------|------------------|
|
||||
| **NVIDIA CUDA 12** | All CUDA-compatible backends | Nvidia hardware |
|
||||
| **NVIDIA CUDA 13** | All CUDA-compatible backends | Nvidia hardware |
|
||||
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, bark, neutts, vibevoice, pocket-tts, qwen-tts | AMD Graphics |
|
||||
| **Intel oneAPI** | llama.cpp, whisper, stablediffusion, vllm, transformers, diffusers, rfdetr, rerankers, exllama2, coqui, kokoro, bark, vibevoice, pocket-tts, qwen-tts | Intel Arc, Intel iGPUs |
|
||||
| **Apple Metal** | llama.cpp, whisper, diffusers, MLX, MLX-VLM, bark-cpp | Apple M1/M2/M3+ |
|
||||
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, neutts, vibevoice, pocket-tts, qwen-tts | AMD Graphics |
|
||||
| **Intel oneAPI** | llama.cpp, whisper, stablediffusion, vllm, transformers, diffusers, rfdetr, rerankers, coqui, kokoro, vibevoice, pocket-tts, qwen-tts | Intel Arc, Intel iGPUs |
|
||||
| **Apple Metal** | llama.cpp, whisper, diffusers, MLX, MLX-VLM | Apple M1/M2/M3+ |
|
||||
| **Vulkan** | llama.cpp, whisper, stablediffusion | Cross-platform GPUs |
|
||||
| **NVIDIA Jetson (CUDA 12)** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr | ARM64 embedded AI (AGX Orin, etc.) |
|
||||
| **NVIDIA Jetson (CUDA 13)** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr | ARM64 embedded AI (DGX Spark) |
|
||||
@@ -344,6 +324,10 @@ Agentic Libraries:
|
||||
MCPs:
|
||||
- https://github.com/mudler/MCPs
|
||||
|
||||
OS Assistant:
|
||||
|
||||
- https://github.com/mudler/Keygeist - Keygeist is an AI-powered keyboard operator that listens for key combinations and responds with AI-generated text typed directly into your Linux box.
|
||||
|
||||
Model galleries
|
||||
- https://github.com/go-skynet/model-gallery
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ The backend system provides language-specific Dockerfiles that handle the build
|
||||
- **vllm**: High-performance LLM inference
|
||||
- **mlx**: Apple Silicon optimization
|
||||
- **diffusers**: Stable Diffusion models
|
||||
- **Audio**: bark, coqui, faster-whisper, kitten-tts
|
||||
- **Audio**: coqui, faster-whisper, kitten-tts
|
||||
- **Vision**: mlx-vlm, rfdetr
|
||||
- **Specialized**: rerankers, chatterbox, kokoro
|
||||
|
||||
@@ -55,7 +55,6 @@ The backend system provides language-specific Dockerfiles that handle the build
|
||||
- **stablediffusion-ggml**: Stable Diffusion in Go with GGML Cpp backend
|
||||
- **huggingface**: Hugging Face model integration
|
||||
- **piper**: Text-to-speech synthesis Golang with C bindings using rhaspy/piper
|
||||
- **bark-cpp**: Bark TTS models Golang with Cpp bindings
|
||||
- **local-store**: Vector storage backend
|
||||
|
||||
#### C++ Backends (`cpp/`)
|
||||
|
||||
@@ -17,6 +17,7 @@ service Backend {
|
||||
rpc GenerateVideo(GenerateVideoRequest) returns (Result) {}
|
||||
rpc AudioTranscription(TranscriptRequest) returns (TranscriptResult) {}
|
||||
rpc TTS(TTSRequest) returns (Result) {}
|
||||
rpc TTSStream(TTSRequest) returns (stream Reply) {}
|
||||
rpc SoundGeneration(SoundGenerationRequest) returns (Result) {}
|
||||
rpc TokenizeString(PredictOptions) returns (TokenizationResponse) {}
|
||||
rpc Status(HealthMessage) returns (StatusResponse) {}
|
||||
@@ -298,6 +299,7 @@ message TranscriptSegment {
|
||||
int64 end = 3;
|
||||
string text = 4;
|
||||
repeated int32 tokens = 5;
|
||||
string speaker = 6;
|
||||
}
|
||||
|
||||
message GenerateImageRequest {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
|
||||
LLAMA_VERSION?=a5eaa1d6a3732bc0f460b02b61c95680bba5a012
|
||||
LLAMA_VERSION?=2634ed207a17db1a54bd8df0555bd8499a6ab691
|
||||
LLAMA_REPO?=https://github.com/ggerganov/llama.cpp
|
||||
|
||||
CMAKE_ARGS?=
|
||||
|
||||
@@ -778,8 +778,8 @@ public:
|
||||
if (!params.mmproj.path.empty()) {
|
||||
error_msg += " (with mmproj: " + params.mmproj.path + ")";
|
||||
}
|
||||
if (params.has_speculative() && !params.speculative.model.path.empty()) {
|
||||
error_msg += " (with draft model: " + params.speculative.model.path + ")";
|
||||
if (params.speculative.has_dft() && !params.speculative.mparams_dft.path.empty()) {
|
||||
error_msg += " (with draft model: " + params.speculative.mparams_dft.path + ")";
|
||||
}
|
||||
|
||||
// Add captured error details if available
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
INCLUDE_PATH := $(abspath ./)
|
||||
LIBRARY_PATH := $(abspath ./)
|
||||
|
||||
AR?=ar
|
||||
|
||||
CMAKE_ARGS?=-DGGML_NATIVE=OFF
|
||||
BUILD_TYPE?=
|
||||
GOCMD=go
|
||||
# keep standard at C11 and C++11
|
||||
CXXFLAGS = -I. -I$(INCLUDE_PATH)/sources/bark.cpp/examples -I$(INCLUDE_PATH)/sources/bark.cpp/encodec.cpp/ggml/include -I$(INCLUDE_PATH)/sources/bark.cpp/spm-headers -I$(INCLUDE_PATH)/sources/bark.cpp -O3 -DNDEBUG -std=c++17 -fPIC
|
||||
LDFLAGS = -L$(LIBRARY_PATH) -L$(LIBRARY_PATH)/sources/bark.cpp/build/examples -lbark -lstdc++ -lm
|
||||
|
||||
# bark.cpp
|
||||
BARKCPP_REPO?=https://github.com/PABannier/bark.cpp.git
|
||||
BARKCPP_VERSION?=5d5be84f089ab9ea53b7a793f088d3fbf7247495
|
||||
|
||||
# warnings
|
||||
CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function
|
||||
|
||||
## bark.cpp
|
||||
sources/bark.cpp:
|
||||
git clone --recursive $(BARKCPP_REPO) sources/bark.cpp && \
|
||||
cd sources/bark.cpp && \
|
||||
git checkout $(BARKCPP_VERSION) && \
|
||||
git submodule update --init --recursive --depth 1 --single-branch
|
||||
|
||||
sources/bark.cpp/build/libbark.a: sources/bark.cpp
|
||||
cd sources/bark.cpp && \
|
||||
mkdir -p build && \
|
||||
cd build && \
|
||||
cmake $(CMAKE_ARGS) .. && \
|
||||
cmake --build . --config Release
|
||||
|
||||
gobark.o:
|
||||
$(CXX) $(CXXFLAGS) gobark.cpp -o gobark.o -c $(LDFLAGS)
|
||||
|
||||
libbark.a: sources/bark.cpp/build/libbark.a gobark.o
|
||||
cp $(INCLUDE_PATH)/sources/bark.cpp/build/libbark.a ./
|
||||
$(AR) rcs libbark.a gobark.o
|
||||
|
||||
bark-cpp: libbark.a
|
||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH="$(CURDIR)" LIBRARY_PATH=$(CURDIR) \
|
||||
$(GOCMD) build -v -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o bark-cpp ./
|
||||
|
||||
package:
|
||||
bash package.sh
|
||||
|
||||
build: bark-cpp package
|
||||
|
||||
clean:
|
||||
rm -f gobark.o libbark.a
|
||||
@@ -1,85 +0,0 @@
|
||||
#include <iostream>
|
||||
#include <tuple>
|
||||
|
||||
#include "bark.h"
|
||||
#include "gobark.h"
|
||||
#include "common.h"
|
||||
#include "ggml.h"
|
||||
|
||||
struct bark_context *c;
|
||||
|
||||
void bark_print_progress_callback(struct bark_context *bctx, enum bark_encoding_step step, int progress, void *user_data) {
|
||||
if (step == bark_encoding_step::SEMANTIC) {
|
||||
printf("\rGenerating semantic tokens... %d%%", progress);
|
||||
} else if (step == bark_encoding_step::COARSE) {
|
||||
printf("\rGenerating coarse tokens... %d%%", progress);
|
||||
} else if (step == bark_encoding_step::FINE) {
|
||||
printf("\rGenerating fine tokens... %d%%", progress);
|
||||
}
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
int load_model(char *model) {
|
||||
// initialize bark context
|
||||
struct bark_context_params ctx_params = bark_context_default_params();
|
||||
bark_params params;
|
||||
|
||||
params.model_path = model;
|
||||
|
||||
// ctx_params.verbosity = verbosity;
|
||||
ctx_params.progress_callback = bark_print_progress_callback;
|
||||
ctx_params.progress_callback_user_data = nullptr;
|
||||
|
||||
struct bark_context *bctx = bark_load_model(params.model_path.c_str(), ctx_params, params.seed);
|
||||
if (!bctx) {
|
||||
fprintf(stderr, "%s: Could not load model\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
c = bctx;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int tts(char *text,int threads, char *dst ) {
|
||||
|
||||
ggml_time_init();
|
||||
const int64_t t_main_start_us = ggml_time_us();
|
||||
|
||||
// generate audio
|
||||
if (!bark_generate_audio(c, text, threads)) {
|
||||
fprintf(stderr, "%s: An error occurred. If the problem persists, feel free to open an issue to report it.\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
const float *audio_data = bark_get_audio_data(c);
|
||||
if (audio_data == NULL) {
|
||||
fprintf(stderr, "%s: Could not get audio data\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
const int audio_arr_size = bark_get_audio_data_size(c);
|
||||
|
||||
std::vector<float> audio_arr(audio_data, audio_data + audio_arr_size);
|
||||
|
||||
write_wav_on_disk(audio_arr, dst);
|
||||
|
||||
// report timing
|
||||
{
|
||||
const int64_t t_main_end_us = ggml_time_us();
|
||||
const int64_t t_load_us = bark_get_load_time(c);
|
||||
const int64_t t_eval_us = bark_get_eval_time(c);
|
||||
|
||||
printf("\n\n");
|
||||
printf("%s: load time = %8.2f ms\n", __func__, t_load_us / 1000.0f);
|
||||
printf("%s: eval time = %8.2f ms\n", __func__, t_eval_us / 1000.0f);
|
||||
printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us) / 1000.0f);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int unload() {
|
||||
bark_free(c);
|
||||
}
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
package main
|
||||
|
||||
// #cgo CXXFLAGS: -I${SRCDIR}/sources/bark.cpp/ -I${SRCDIR}/sources/bark.cpp/encodec.cpp -I${SRCDIR}/sources/bark.cpp/encodec.cpp/ggml/include -I${SRCDIR}/sources/bark.cpp/examples -I${SRCDIR}/sources/bark.cpp/spm-headers
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/ -L${SRCDIR}/sources/bark.cpp/build/examples -L${SRCDIR}/sources/bark.cpp/build/encodec.cpp/ggml/src/ -L${SRCDIR}/sources/bark.cpp/build/encodec.cpp/ -lbark -lencodec -lcommon -lggml -lgomp
|
||||
// #include <gobark.h>
|
||||
// #include <stdlib.h>
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
|
||||
"github.com/mudler/LocalAI/pkg/grpc/base"
|
||||
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||
)
|
||||
|
||||
type Bark struct {
|
||||
base.SingleThread
|
||||
threads int
|
||||
}
|
||||
|
||||
func (sd *Bark) Load(opts *pb.ModelOptions) error {
|
||||
|
||||
sd.threads = int(opts.Threads)
|
||||
|
||||
modelFile := C.CString(opts.ModelFile)
|
||||
defer C.free(unsafe.Pointer(modelFile))
|
||||
|
||||
ret := C.load_model(modelFile)
|
||||
if ret != 0 {
|
||||
return fmt.Errorf("inference failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sd *Bark) TTS(opts *pb.TTSRequest) error {
|
||||
t := C.CString(opts.Text)
|
||||
defer C.free(unsafe.Pointer(t))
|
||||
|
||||
dst := C.CString(opts.Dst)
|
||||
defer C.free(unsafe.Pointer(dst))
|
||||
|
||||
threads := C.int(sd.threads)
|
||||
|
||||
ret := C.tts(t, threads, dst)
|
||||
if ret != 0 {
|
||||
return fmt.Errorf("inference failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
int load_model(char *model);
|
||||
int tts(char *text,int threads, char *dst );
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
@@ -1,20 +0,0 @@
|
||||
package main
|
||||
|
||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
||||
import (
|
||||
"flag"
|
||||
|
||||
grpc "github.com/mudler/LocalAI/pkg/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
addr = flag.String("addr", "localhost:50051", "the address to connect to")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if err := grpc.StartServer(*addr, &Bark{}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Script to copy the appropriate libraries based on architecture
|
||||
# This script is used in the final stage of the Dockerfile
|
||||
|
||||
set -e
|
||||
|
||||
CURDIR=$(dirname "$(realpath $0)")
|
||||
|
||||
# Create lib directory
|
||||
mkdir -p $CURDIR/package/lib
|
||||
cp -avrf $CURDIR/bark-cpp $CURDIR/package/
|
||||
cp -rfv $CURDIR/run.sh $CURDIR/package/
|
||||
|
||||
# Detect architecture and copy appropriate libraries
|
||||
if [ -f "/lib64/ld-linux-x86-64.so.2" ]; then
|
||||
# x86_64 architecture
|
||||
echo "Detected x86_64 architecture, copying x86_64 libraries..."
|
||||
cp -arfLv /lib64/ld-linux-x86-64.so.2 $CURDIR/package/lib/ld.so
|
||||
cp -arfLv /lib/x86_64-linux-gnu/libc.so.6 $CURDIR/package/lib/libc.so.6
|
||||
cp -arfLv /lib/x86_64-linux-gnu/libgcc_s.so.1 $CURDIR/package/lib/libgcc_s.so.1
|
||||
cp -arfLv /lib/x86_64-linux-gnu/libstdc++.so.6 $CURDIR/package/lib/libstdc++.so.6
|
||||
cp -arfLv /lib/x86_64-linux-gnu/libm.so.6 $CURDIR/package/lib/libm.so.6
|
||||
cp -arfLv /lib/x86_64-linux-gnu/libgomp.so.1 $CURDIR/package/lib/libgomp.so.1
|
||||
elif [ -f "/lib/ld-linux-aarch64.so.1" ]; then
|
||||
# ARM64 architecture
|
||||
echo "Detected ARM64 architecture, copying ARM64 libraries..."
|
||||
cp -arfLv /lib/ld-linux-aarch64.so.1 $CURDIR/package/lib/ld.so
|
||||
cp -arfLv /lib/aarch64-linux-gnu/libc.so.6 $CURDIR/package/lib/libc.so.6
|
||||
cp -arfLv /lib/aarch64-linux-gnu/libgcc_s.so.1 $CURDIR/package/lib/libgcc_s.so.1
|
||||
cp -arfLv /lib/aarch64-linux-gnu/libstdc++.so.6 $CURDIR/package/lib/libstdc++.so.6
|
||||
cp -arfLv /lib/aarch64-linux-gnu/libm.so.6 $CURDIR/package/lib/libm.so.6
|
||||
cp -arfLv /lib/aarch64-linux-gnu/libgomp.so.1 $CURDIR/package/lib/libgomp.so.1
|
||||
else
|
||||
echo "Error: Could not detect architecture"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Packaging completed successfully"
|
||||
ls -liah $CURDIR/package/
|
||||
ls -liah $CURDIR/package/lib/
|
||||
@@ -1,13 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
CURDIR=$(dirname "$(realpath $0)")
|
||||
export LD_LIBRARY_PATH=$CURDIR/lib:$LD_LIBRARY_PATH
|
||||
|
||||
# If there is a lib/ld.so, use it
|
||||
if [ -f $CURDIR/lib/ld.so ]; then
|
||||
echo "Using lib/ld.so"
|
||||
exec $CURDIR/lib/ld.so $CURDIR/bark-cpp "$@"
|
||||
fi
|
||||
|
||||
exec $CURDIR/bark-cpp "$@"
|
||||
@@ -8,7 +8,7 @@ JOBS?=$(shell nproc --ignore=1)
|
||||
|
||||
# stablediffusion.cpp (ggml)
|
||||
STABLEDIFFUSION_GGML_REPO?=https://github.com/leejet/stable-diffusion.cpp
|
||||
STABLEDIFFUSION_GGML_VERSION?=5e4579c11d0678f9765463582d024e58270faa9c
|
||||
STABLEDIFFUSION_GGML_VERSION?=e411520407663e1ddf8ff2e5ed4ff3a116fbbc97
|
||||
|
||||
CMAKE_ARGS+=-DGGML_MAX_NAME=128
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ JOBS?=$(shell nproc --ignore=1)
|
||||
|
||||
# whisper.cpp version
|
||||
WHISPER_REPO?=https://github.com/ggml-org/whisper.cpp
|
||||
WHISPER_CPP_VERSION?=7aa8818647303b567c3a21fe4220b2681988e220
|
||||
WHISPER_CPP_VERSION?=aa1bc0d1a6dfd70dbb9f60c11df12441e03a9075
|
||||
SO_TARGET?=libgowhisper.so
|
||||
|
||||
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF
|
||||
|
||||
@@ -130,8 +130,9 @@ func (w *Whisper) AudioTranscription(opts *pb.TranscriptRequest) (pb.TranscriptR
|
||||
segments := []*pb.TranscriptSegment{}
|
||||
text := ""
|
||||
for i := range int(segsLen) {
|
||||
s := CppGetSegmentStart(i)
|
||||
t := CppGetSegmentEnd(i)
|
||||
// segment start/end conversion factor taken from https://github.com/ggml-org/whisper.cpp/blob/master/examples/cli/cli.cpp#L895
|
||||
s := CppGetSegmentStart(i) * (10000000)
|
||||
t := CppGetSegmentEnd(i) * (10000000)
|
||||
txt := strings.Clone(CppGetSegmentText(i))
|
||||
tokens := make([]int32, CppNTokens(i))
|
||||
|
||||
|
||||
@@ -142,6 +142,31 @@
|
||||
amd: "rocm-vllm"
|
||||
intel: "intel-vllm"
|
||||
nvidia-cuda-12: "cuda12-vllm"
|
||||
- &vllm-omni
|
||||
name: "vllm-omni"
|
||||
license: apache-2.0
|
||||
urls:
|
||||
- https://github.com/vllm-project/vllm-omni
|
||||
tags:
|
||||
- text-to-image
|
||||
- image-generation
|
||||
- text-to-video
|
||||
- video-generation
|
||||
- text-to-speech
|
||||
- TTS
|
||||
- multimodal
|
||||
- LLM
|
||||
icon: https://raw.githubusercontent.com/vllm-project/vllm/main/docs/assets/logos/vllm-logo-text-dark.png
|
||||
description: |
|
||||
vLLM-Omni is a unified interface for multimodal generation with vLLM.
|
||||
It supports image generation (text-to-image, image editing), video generation
|
||||
(text-to-video, image-to-video), text generation with multimodal inputs, and
|
||||
text-to-speech generation. Only supports NVIDIA (CUDA) and ROCm platforms.
|
||||
alias: "vllm-omni"
|
||||
capabilities:
|
||||
nvidia: "cuda12-vllm-omni"
|
||||
amd: "rocm-vllm-omni"
|
||||
nvidia-cuda-12: "cuda12-vllm-omni"
|
||||
- &mlx
|
||||
name: "mlx"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-mlx"
|
||||
@@ -200,7 +225,7 @@
|
||||
amd: "rocm-rerankers"
|
||||
- &transformers
|
||||
name: "transformers"
|
||||
icon: https://camo.githubusercontent.com/26569a27b8a30a488dd345024b71dbc05da7ff1b2ba97bb6080c9f1ee0f26cc7/68747470733a2f2f68756767696e67666163652e636f2f64617461736574732f68756767696e67666163652f646f63756d656e746174696f6e2d696d616765732f7265736f6c76652f6d61696e2f7472616e73666f726d6572732f7472616e73666f726d6572735f61735f615f6d6f64656c5f646566696e6974696f6e2e706e67
|
||||
icon: https://avatars.githubusercontent.com/u/25720743?s=200&v=4
|
||||
alias: "transformers"
|
||||
license: apache-2.0
|
||||
description: |
|
||||
@@ -241,22 +266,6 @@
|
||||
nvidia-cuda-12: "cuda12-diffusers"
|
||||
nvidia-l4t-cuda-12: "nvidia-l4t-arm64-diffusers"
|
||||
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-diffusers"
|
||||
- &exllama2
|
||||
name: "exllama2"
|
||||
urls:
|
||||
- https://github.com/turboderp-org/exllamav2
|
||||
tags:
|
||||
- text-to-text
|
||||
- LLM
|
||||
- EXL2
|
||||
license: MIT
|
||||
description: |
|
||||
ExLlamaV2 is an inference library for running local LLMs on modern consumer GPUs.
|
||||
alias: "exllama2"
|
||||
capabilities:
|
||||
nvidia: "cuda12-exllama2"
|
||||
intel: "intel-exllama2"
|
||||
nvidia-cuda-12: "cuda12-exllama2"
|
||||
- &faster-whisper
|
||||
icon: https://avatars.githubusercontent.com/u/1520500?s=200&v=4
|
||||
description: |
|
||||
@@ -293,6 +302,25 @@
|
||||
default: "cpu-moonshine"
|
||||
nvidia-cuda-13: "cuda13-moonshine"
|
||||
nvidia-cuda-12: "cuda12-moonshine"
|
||||
- &whisperx
|
||||
description: |
|
||||
WhisperX provides fast automatic speech recognition with word-level timestamps, speaker diarization,
|
||||
and forced alignment. Built on faster-whisper and pyannote-audio for high-accuracy transcription
|
||||
with speaker identification.
|
||||
urls:
|
||||
- https://github.com/m-bain/whisperX
|
||||
tags:
|
||||
- speech-to-text
|
||||
- diarization
|
||||
- whisperx
|
||||
license: BSD-4-Clause
|
||||
name: "whisperx"
|
||||
capabilities:
|
||||
nvidia: "cuda12-whisperx"
|
||||
amd: "rocm-whisperx"
|
||||
default: "cpu-whisperx"
|
||||
nvidia-cuda-13: "cuda13-whisperx"
|
||||
nvidia-cuda-12: "cuda12-whisperx"
|
||||
- &kokoro
|
||||
icon: https://avatars.githubusercontent.com/u/166769057?v=4
|
||||
description: |
|
||||
@@ -339,51 +367,6 @@
|
||||
nvidia-cuda-13: "cuda13-coqui"
|
||||
nvidia-cuda-12: "cuda12-coqui"
|
||||
icon: https://avatars.githubusercontent.com/u/1338804?s=200&v=4
|
||||
- &bark
|
||||
urls:
|
||||
- https://github.com/suno-ai/bark
|
||||
description: |
|
||||
Bark is a transformer-based text-to-audio model created by Suno. Bark can generate highly realistic, multilingual speech as well as other audio - including music, background noise and simple sound effects. The model can also produce nonverbal communications like laughing, sighing and crying. To support the research community, we are providing access to pretrained model checkpoints, which are ready for inference and available for commercial use.
|
||||
tags:
|
||||
- text-to-speech
|
||||
- TTS
|
||||
license: MIT
|
||||
name: "bark"
|
||||
alias: "bark"
|
||||
capabilities:
|
||||
cuda: "cuda12-bark"
|
||||
intel: "intel-bark"
|
||||
rocm: "rocm-bark"
|
||||
nvidia-cuda-13: "cuda13-bark"
|
||||
nvidia-cuda-12: "cuda12-bark"
|
||||
icon: https://avatars.githubusercontent.com/u/99442120?s=200&v=4
|
||||
- &barkcpp
|
||||
urls:
|
||||
- https://github.com/PABannier/bark.cpp
|
||||
description: |
|
||||
With bark.cpp, our goal is to bring real-time realistic multilingual text-to-speech generation to the community.
|
||||
|
||||
Plain C/C++ implementation without dependencies
|
||||
AVX, AVX2 and AVX512 for x86 architectures
|
||||
CPU and GPU compatible backends
|
||||
Mixed F16 / F32 precision
|
||||
4-bit, 5-bit and 8-bit integer quantization
|
||||
Metal and CUDA backends
|
||||
|
||||
Models supported
|
||||
|
||||
Bark Small
|
||||
Bark Large
|
||||
tags:
|
||||
- text-to-speech
|
||||
- TTS
|
||||
license: MIT
|
||||
icon: https://github.com/PABannier/bark.cpp/raw/main/assets/banner.png
|
||||
name: "bark-cpp"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-bark-cpp"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-bark-cpp
|
||||
alias: "bark-cpp"
|
||||
- &chatterbox
|
||||
urls:
|
||||
- https://github.com/resemble-ai/chatterbox
|
||||
@@ -394,7 +377,7 @@
|
||||
- text-to-speech
|
||||
- TTS
|
||||
license: MIT
|
||||
icon: https://private-user-images.githubusercontent.com/660224/448166653-bd8c5f03-e91d-4ee5-b680-57355da204d1.png?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3NTAxOTE0MDAsIm5iZiI6MTc1MDE5MTEwMCwicGF0aCI6Ii82NjAyMjQvNDQ4MTY2NjUzLWJkOGM1ZjAzLWU5MWQtNGVlNS1iNjgwLTU3MzU1ZGEyMDRkMS5wbmc_WC1BbXotQWxnb3JpdGhtPUFXUzQtSE1BQy1TSEEyNTYmWC1BbXotQ3JlZGVudGlhbD1BS0lBVkNPRFlMU0E1M1BRSzRaQSUyRjIwMjUwNjE3JTJGdXMtZWFzdC0xJTJGczMlMkZhd3M0X3JlcXVlc3QmWC1BbXotRGF0ZT0yMDI1MDYxN1QyMDExNDBaJlgtQW16LUV4cGlyZXM9MzAwJlgtQW16LVNpZ25hdHVyZT1hMmI1NGY3OGFiZTlhNGFkNTVlYTY4NTIwMWEzODRiZGE4YzdhNGQ5MGNhNzE3MDYyYTA2NDIxYTkyYzhiODkwJlgtQW16LVNpZ25lZEhlYWRlcnM9aG9zdCJ9.mR9kM9xX0TdzPuSpuspCllHYQiq79dFQ2rtuNvjrl6w
|
||||
icon: https://avatars.githubusercontent.com/u/49844015?s=200&v=4
|
||||
name: "chatterbox"
|
||||
alias: "chatterbox"
|
||||
capabilities:
|
||||
@@ -449,6 +432,47 @@
|
||||
nvidia-cuda-12: "cuda12-qwen-tts"
|
||||
nvidia-l4t-cuda-12: "nvidia-l4t-qwen-tts"
|
||||
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-qwen-tts"
|
||||
icon: https://cdn-avatars.huggingface.co/v1/production/uploads/620760a26e3b7210c2ff1943/-s1gyJfvbE1RgO5iBeNOi.png
|
||||
- &qwen-asr
|
||||
urls:
|
||||
- https://github.com/QwenLM/Qwen3-ASR
|
||||
description: |
|
||||
Qwen3-ASR is an automatic speech recognition model supporting multiple languages and batch inference.
|
||||
tags:
|
||||
- speech-recognition
|
||||
- ASR
|
||||
license: apache-2.0
|
||||
name: "qwen-asr"
|
||||
alias: "qwen-asr"
|
||||
capabilities:
|
||||
nvidia: "cuda12-qwen-asr"
|
||||
intel: "intel-qwen-asr"
|
||||
amd: "rocm-qwen-asr"
|
||||
nvidia-l4t: "nvidia-l4t-qwen-asr"
|
||||
default: "cpu-qwen-asr"
|
||||
nvidia-cuda-13: "cuda13-qwen-asr"
|
||||
nvidia-cuda-12: "cuda12-qwen-asr"
|
||||
nvidia-l4t-cuda-12: "nvidia-l4t-qwen-asr"
|
||||
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-qwen-asr"
|
||||
icon: https://cdn-avatars.huggingface.co/v1/production/uploads/620760a26e3b7210c2ff1943/-s1gyJfvbE1RgO5iBeNOi.png
|
||||
- &voxcpm
|
||||
urls:
|
||||
- https://github.com/ModelBest/VoxCPM
|
||||
description: |
|
||||
VoxCPM is an innovative end-to-end TTS model from ModelBest, designed to generate highly expressive speech.
|
||||
tags:
|
||||
- text-to-speech
|
||||
- TTS
|
||||
license: mit
|
||||
name: "voxcpm"
|
||||
alias: "voxcpm"
|
||||
capabilities:
|
||||
nvidia: "cuda12-voxcpm"
|
||||
intel: "intel-voxcpm"
|
||||
amd: "rocm-voxcpm"
|
||||
default: "cpu-voxcpm"
|
||||
nvidia-cuda-13: "cuda13-voxcpm"
|
||||
nvidia-cuda-12: "cuda12-voxcpm"
|
||||
icon: https://avatars.githubusercontent.com/u/6154722?s=200&v=4
|
||||
- &pocket-tts
|
||||
urls:
|
||||
@@ -471,7 +495,7 @@
|
||||
nvidia-cuda-12: "cuda12-pocket-tts"
|
||||
nvidia-l4t-cuda-12: "nvidia-l4t-pocket-tts"
|
||||
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-pocket-tts"
|
||||
icon: https://avatars.githubusercontent.com/u/6154722?s=200&v=4
|
||||
icon: https://avatars.githubusercontent.com/u/151010778?s=200&v=4
|
||||
- &piper
|
||||
name: "piper"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-piper"
|
||||
@@ -989,6 +1013,33 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-vllm"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-intel-vllm
|
||||
# vllm-omni
|
||||
- !!merge <<: *vllm-omni
|
||||
name: "vllm-omni-development"
|
||||
capabilities:
|
||||
nvidia: "cuda12-vllm-omni-development"
|
||||
amd: "rocm-vllm-omni-development"
|
||||
nvidia-cuda-12: "cuda12-vllm-omni-development"
|
||||
- !!merge <<: *vllm-omni
|
||||
name: "cuda12-vllm-omni"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-vllm-omni"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-vllm-omni
|
||||
- !!merge <<: *vllm-omni
|
||||
name: "rocm-vllm-omni"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-vllm-omni"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-rocm-hipblas-vllm-omni
|
||||
- !!merge <<: *vllm-omni
|
||||
name: "cuda12-vllm-omni-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-vllm-omni"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-12-vllm-omni
|
||||
- !!merge <<: *vllm-omni
|
||||
name: "rocm-vllm-omni-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-vllm-omni"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-rocm-hipblas-vllm-omni
|
||||
# rfdetr
|
||||
- !!merge <<: *rfdetr
|
||||
name: "rfdetr-development"
|
||||
@@ -1251,22 +1302,6 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-diffusers"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-metal-darwin-arm64-diffusers
|
||||
## exllama2
|
||||
- !!merge <<: *exllama2
|
||||
name: "exllama2-development"
|
||||
capabilities:
|
||||
nvidia: "cuda12-exllama2-development"
|
||||
intel: "intel-exllama2-development"
|
||||
- !!merge <<: *exllama2
|
||||
name: "cuda12-exllama2"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-exllama2"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-exllama2
|
||||
- !!merge <<: *exllama2
|
||||
name: "cuda12-exllama2-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-exllama2"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-12-exllama2
|
||||
## kokoro
|
||||
- !!merge <<: *kokoro
|
||||
name: "kokoro-development"
|
||||
@@ -1401,6 +1436,55 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-moonshine"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-13-moonshine
|
||||
## whisperx
|
||||
- !!merge <<: *whisperx
|
||||
name: "whisperx-development"
|
||||
capabilities:
|
||||
nvidia: "cuda12-whisperx-development"
|
||||
amd: "rocm-whisperx-development"
|
||||
default: "cpu-whisperx-development"
|
||||
nvidia-cuda-13: "cuda13-whisperx-development"
|
||||
nvidia-cuda-12: "cuda12-whisperx-development"
|
||||
- !!merge <<: *whisperx
|
||||
name: "cpu-whisperx"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-whisperx"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-cpu-whisperx
|
||||
- !!merge <<: *whisperx
|
||||
name: "cpu-whisperx-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-whisperx"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-cpu-whisperx
|
||||
- !!merge <<: *whisperx
|
||||
name: "cuda12-whisperx"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-whisperx"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-whisperx
|
||||
- !!merge <<: *whisperx
|
||||
name: "cuda12-whisperx-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-whisperx"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-12-whisperx
|
||||
- !!merge <<: *whisperx
|
||||
name: "rocm-whisperx"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-whisperx"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-rocm-hipblas-whisperx
|
||||
- !!merge <<: *whisperx
|
||||
name: "rocm-whisperx-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-whisperx"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-rocm-hipblas-whisperx
|
||||
- !!merge <<: *whisperx
|
||||
name: "cuda13-whisperx"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-whisperx"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-13-whisperx
|
||||
- !!merge <<: *whisperx
|
||||
name: "cuda13-whisperx-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-whisperx"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-13-whisperx
|
||||
## coqui
|
||||
|
||||
- !!merge <<: *coqui
|
||||
@@ -1439,47 +1523,6 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-coqui"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-rocm-hipblas-coqui
|
||||
## bark
|
||||
- !!merge <<: *bark
|
||||
name: "bark-development"
|
||||
capabilities:
|
||||
nvidia: "cuda12-bark-development"
|
||||
intel: "intel-bark-development"
|
||||
amd: "rocm-bark-development"
|
||||
- !!merge <<: *bark
|
||||
name: "rocm-bark-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-bark"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-rocm-hipblas-bark
|
||||
- !!merge <<: *bark
|
||||
name: "intel-bark"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-bark"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-intel-bark
|
||||
- !!merge <<: *bark
|
||||
name: "intel-bark-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-bark"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-intel-bark
|
||||
- !!merge <<: *bark
|
||||
name: "cuda12-bark"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-bark"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-bark
|
||||
- !!merge <<: *bark
|
||||
name: "rocm-bark"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-bark"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-rocm-hipblas-bark
|
||||
- !!merge <<: *bark
|
||||
name: "cuda12-bark-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-bark"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-12-bark
|
||||
- !!merge <<: *barkcpp
|
||||
name: "bark-cpp-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-bark-cpp"
|
||||
alias: "bark-cpp"
|
||||
## chatterbox
|
||||
- !!merge <<: *chatterbox
|
||||
name: "chatterbox-development"
|
||||
@@ -1718,6 +1761,149 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-qwen-tts
|
||||
## qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "qwen-asr-development"
|
||||
capabilities:
|
||||
nvidia: "cuda12-qwen-asr-development"
|
||||
intel: "intel-qwen-asr-development"
|
||||
amd: "rocm-qwen-asr-development"
|
||||
nvidia-l4t: "nvidia-l4t-qwen-asr-development"
|
||||
default: "cpu-qwen-asr-development"
|
||||
nvidia-cuda-13: "cuda13-qwen-asr-development"
|
||||
nvidia-cuda-12: "cuda12-qwen-asr-development"
|
||||
nvidia-l4t-cuda-12: "nvidia-l4t-qwen-asr-development"
|
||||
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-qwen-asr-development"
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "cpu-qwen-asr"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-cpu-qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "cpu-qwen-asr-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-cpu-qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "cuda12-qwen-asr"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "cuda12-qwen-asr-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-12-qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "cuda13-qwen-asr"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-13-qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "cuda13-qwen-asr-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-13-qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "intel-qwen-asr"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-intel-qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "intel-qwen-asr-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-intel-qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "rocm-qwen-asr"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-rocm-hipblas-qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "rocm-qwen-asr-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-rocm-hipblas-qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "nvidia-l4t-qwen-asr"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-nvidia-l4t-qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "nvidia-l4t-qwen-asr-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-nvidia-l4t-qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "cuda13-nvidia-l4t-arm64-qwen-asr"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-cuda-13-arm64-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-nvidia-l4t-cuda-13-arm64-qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "cuda13-nvidia-l4t-arm64-qwen-asr-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-qwen-asr
|
||||
## voxcpm
|
||||
- !!merge <<: *voxcpm
|
||||
name: "voxcpm-development"
|
||||
capabilities:
|
||||
nvidia: "cuda12-voxcpm-development"
|
||||
intel: "intel-voxcpm-development"
|
||||
amd: "rocm-voxcpm-development"
|
||||
default: "cpu-voxcpm-development"
|
||||
nvidia-cuda-13: "cuda13-voxcpm-development"
|
||||
nvidia-cuda-12: "cuda12-voxcpm-development"
|
||||
- !!merge <<: *voxcpm
|
||||
name: "cpu-voxcpm"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-voxcpm"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-cpu-voxcpm
|
||||
- !!merge <<: *voxcpm
|
||||
name: "cpu-voxcpm-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-voxcpm"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-cpu-voxcpm
|
||||
- !!merge <<: *voxcpm
|
||||
name: "cuda12-voxcpm"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-voxcpm"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-voxcpm
|
||||
- !!merge <<: *voxcpm
|
||||
name: "cuda12-voxcpm-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-voxcpm"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-12-voxcpm
|
||||
- !!merge <<: *voxcpm
|
||||
name: "cuda13-voxcpm"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-voxcpm"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-13-voxcpm
|
||||
- !!merge <<: *voxcpm
|
||||
name: "cuda13-voxcpm-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-voxcpm"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-13-voxcpm
|
||||
- !!merge <<: *voxcpm
|
||||
name: "intel-voxcpm"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-voxcpm"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-intel-voxcpm
|
||||
- !!merge <<: *voxcpm
|
||||
name: "intel-voxcpm-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-voxcpm"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-intel-voxcpm
|
||||
- !!merge <<: *voxcpm
|
||||
name: "rocm-voxcpm"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-voxcpm"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-rocm-hipblas-voxcpm
|
||||
- !!merge <<: *voxcpm
|
||||
name: "rocm-voxcpm-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-voxcpm"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-rocm-hipblas-voxcpm
|
||||
## pocket-tts
|
||||
- !!merge <<: *pocket-tts
|
||||
name: "pocket-tts-development"
|
||||
|
||||
@@ -16,10 +16,8 @@ The Python backends use a unified build system based on `libbackend.sh` that pro
|
||||
- **transformers** - Hugging Face Transformers framework (PyTorch-based)
|
||||
- **vllm** - High-performance LLM inference engine
|
||||
- **mlx** - Apple Silicon optimized ML framework
|
||||
- **exllama2** - ExLlama2 quantized models
|
||||
|
||||
### Audio & Speech
|
||||
- **bark** - Text-to-speech synthesis
|
||||
- **coqui** - Coqui TTS models
|
||||
- **faster-whisper** - Fast Whisper speech recognition
|
||||
- **kitten-tts** - Lightweight TTS
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
# Creating a separate environment for ttsbark project
|
||||
|
||||
```
|
||||
make ttsbark
|
||||
```
|
||||
|
||||
# Testing the gRPC server
|
||||
|
||||
```
|
||||
<The path of your python interpreter> -m unittest test_ttsbark.py
|
||||
```
|
||||
|
||||
For example
|
||||
```
|
||||
/opt/conda/envs/bark/bin/python -m unittest extra/grpc/bark/test_ttsbark.py
|
||||
``````
|
||||
@@ -1,98 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
This is an extra gRPC server of LocalAI for Bark TTS
|
||||
"""
|
||||
from concurrent import futures
|
||||
import time
|
||||
import argparse
|
||||
import signal
|
||||
import sys
|
||||
import os
|
||||
from scipy.io.wavfile import write as write_wav
|
||||
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
from bark import SAMPLE_RATE, generate_audio, preload_models
|
||||
|
||||
import grpc
|
||||
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
|
||||
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
|
||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
||||
|
||||
# Implement the BackendServicer class with the service methods
|
||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
"""
|
||||
BackendServicer is the class that implements the gRPC service
|
||||
"""
|
||||
def Health(self, request, context):
|
||||
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
||||
def LoadModel(self, request, context):
|
||||
model_name = request.Model
|
||||
try:
|
||||
print("Preparing models, please wait", file=sys.stderr)
|
||||
# download and load all models
|
||||
preload_models()
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
# Implement your logic here for the LoadModel service
|
||||
# Replace this with your desired response
|
||||
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
||||
|
||||
def TTS(self, request, context):
|
||||
model = request.model
|
||||
print(request, file=sys.stderr)
|
||||
try:
|
||||
audio_array = None
|
||||
if model != "":
|
||||
audio_array = generate_audio(request.text, history_prompt=model)
|
||||
else:
|
||||
audio_array = generate_audio(request.text)
|
||||
print("saving to", request.dst, file=sys.stderr)
|
||||
# save audio to disk
|
||||
write_wav(request.dst, SAMPLE_RATE, audio_array)
|
||||
print("saved to", request.dst, file=sys.stderr)
|
||||
print("tts for", file=sys.stderr)
|
||||
print(request, file=sys.stderr)
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
return backend_pb2.Result(success=True)
|
||||
|
||||
def serve(address):
|
||||
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
||||
options=[
|
||||
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
|
||||
])
|
||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||
server.add_insecure_port(address)
|
||||
server.start()
|
||||
print("Server started. Listening on: " + address, file=sys.stderr)
|
||||
|
||||
# Define the signal handler function
|
||||
def signal_handler(sig, frame):
|
||||
print("Received termination signal. Shutting down...")
|
||||
server.stop(0)
|
||||
sys.exit(0)
|
||||
|
||||
# Set the signal handlers for SIGINT and SIGTERM
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
try:
|
||||
while True:
|
||||
time.sleep(_ONE_DAY_IN_SECONDS)
|
||||
except KeyboardInterrupt:
|
||||
server.stop(0)
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Run the gRPC server.")
|
||||
parser.add_argument(
|
||||
"--addr", default="localhost:50051", help="The address to bind the server to."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
serve(args.addr)
|
||||
@@ -1,19 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
# This is here because the Intel pip index is broken and returns 200 status codes for every package name, it just doesn't return any package links.
|
||||
# This makes uv think that the package exists in the Intel pip index, and by default it stops looking at other pip indexes once it finds a match.
|
||||
# We need uv to continue falling through to the pypi default index to find optimum[openvino] in the pypi index
|
||||
# the --upgrade actually allows us to *downgrade* torch to the version provided in the Intel pip index
|
||||
if [ "x${BUILD_PROFILE}" == "xintel" ]; then
|
||||
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
|
||||
fi
|
||||
|
||||
installRequirements
|
||||
@@ -1,4 +0,0 @@
|
||||
transformers
|
||||
accelerate
|
||||
torch==2.4.1
|
||||
torchaudio==2.4.1
|
||||
@@ -1,4 +0,0 @@
|
||||
torch==2.4.1
|
||||
torchaudio==2.4.1
|
||||
transformers
|
||||
accelerate
|
||||
@@ -1,5 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
||||
torch==2.8.0+rocm6.4
|
||||
torchaudio==2.8.0+rocm6.4
|
||||
transformers
|
||||
accelerate
|
||||
@@ -1,4 +0,0 @@
|
||||
bark==0.1.5
|
||||
grpcio==1.76.0
|
||||
protobuf
|
||||
certifi
|
||||
@@ -1,4 +1,4 @@
|
||||
# Creating a separate environment for ttsbark project
|
||||
# Creating a separate environment for coqui project
|
||||
|
||||
```
|
||||
make coqui
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
This is an extra gRPC server of LocalAI for Bark TTS
|
||||
This is an extra gRPC server of LocalAI for Coqui TTS
|
||||
"""
|
||||
from concurrent import futures
|
||||
import time
|
||||
|
||||
1
backend/python/exllama2/.gitignore
vendored
1
backend/python/exllama2/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
source
|
||||
@@ -1,17 +0,0 @@
|
||||
.PHONY: exllama2
|
||||
exllama2:
|
||||
bash install.sh
|
||||
|
||||
.PHONY: run
|
||||
run: exllama2
|
||||
@echo "Running exllama2..."
|
||||
bash run.sh
|
||||
@echo "exllama2 run."
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
$(RM) -r venv source __pycache__
|
||||
@@ -1,143 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import grpc
|
||||
from concurrent import futures
|
||||
import time
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
import argparse
|
||||
import signal
|
||||
import sys
|
||||
import os
|
||||
import glob
|
||||
|
||||
from pathlib import Path
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from torch import version as torch_version
|
||||
|
||||
|
||||
from exllamav2.generator import (
|
||||
ExLlamaV2BaseGenerator,
|
||||
ExLlamaV2Sampler
|
||||
)
|
||||
|
||||
|
||||
from exllamav2 import (
|
||||
ExLlamaV2,
|
||||
ExLlamaV2Config,
|
||||
ExLlamaV2Cache,
|
||||
ExLlamaV2Cache_8bit,
|
||||
ExLlamaV2Tokenizer,
|
||||
model_init,
|
||||
)
|
||||
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
|
||||
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
|
||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
||||
|
||||
# Implement the BackendServicer class with the service methods
|
||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
def Health(self, request, context):
|
||||
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
||||
|
||||
def LoadModel(self, request, context):
|
||||
try:
|
||||
model_directory = request.ModelFile
|
||||
|
||||
config = ExLlamaV2Config()
|
||||
config.model_dir = model_directory
|
||||
config.prepare()
|
||||
|
||||
model = ExLlamaV2(config)
|
||||
|
||||
cache = ExLlamaV2Cache(model, lazy=True)
|
||||
model.load_autosplit(cache)
|
||||
|
||||
tokenizer = ExLlamaV2Tokenizer(config)
|
||||
|
||||
# Initialize generator
|
||||
|
||||
generator = ExLlamaV2BaseGenerator(model, cache, tokenizer)
|
||||
|
||||
self.generator = generator
|
||||
|
||||
generator.warmup()
|
||||
self.model = model
|
||||
self.tokenizer = tokenizer
|
||||
self.cache = cache
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
||||
|
||||
def Predict(self, request, context):
|
||||
|
||||
penalty = 1.15
|
||||
if request.Penalty != 0.0:
|
||||
penalty = request.Penalty
|
||||
|
||||
settings = ExLlamaV2Sampler.Settings()
|
||||
settings.temperature = request.Temperature
|
||||
settings.top_k = request.TopK
|
||||
settings.top_p = request.TopP
|
||||
settings.token_repetition_penalty = penalty
|
||||
settings.disallow_tokens(self.tokenizer, [self.tokenizer.eos_token_id])
|
||||
tokens = 512
|
||||
|
||||
if request.Tokens != 0:
|
||||
tokens = request.Tokens
|
||||
output = self.generator.generate_simple(
|
||||
request.Prompt, settings, tokens)
|
||||
|
||||
# Remove prompt from response if present
|
||||
if request.Prompt in output:
|
||||
output = output.replace(request.Prompt, "")
|
||||
|
||||
return backend_pb2.Result(message=bytes(output, encoding='utf-8'))
|
||||
|
||||
def PredictStream(self, request, context):
|
||||
# Implement PredictStream RPC
|
||||
# for reply in some_data_generator():
|
||||
# yield reply
|
||||
# Not implemented yet
|
||||
return self.Predict(request, context)
|
||||
|
||||
|
||||
def serve(address):
|
||||
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
||||
options=[
|
||||
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
|
||||
])
|
||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||
server.add_insecure_port(address)
|
||||
server.start()
|
||||
print("Server started. Listening on: " + address, file=sys.stderr)
|
||||
|
||||
# Define the signal handler function
|
||||
def signal_handler(sig, frame):
|
||||
print("Received termination signal. Shutting down...")
|
||||
server.stop(0)
|
||||
sys.exit(0)
|
||||
|
||||
# Set the signal handlers for SIGINT and SIGTERM
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
try:
|
||||
while True:
|
||||
time.sleep(_ONE_DAY_IN_SECONDS)
|
||||
except KeyboardInterrupt:
|
||||
server.stop(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Run the gRPC server.")
|
||||
parser.add_argument(
|
||||
"--addr", default="localhost:50051", help="The address to bind the server to."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
serve(args.addr)
|
||||
@@ -1,21 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
LIMIT_TARGETS="cublas"
|
||||
EXTRA_PIP_INSTALL_FLAGS="--no-build-isolation"
|
||||
EXLLAMA2_VERSION=c0ddebaaaf8ffd1b3529c2bb654e650bce2f790f
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
installRequirements
|
||||
|
||||
git clone https://github.com/turboderp/exllamav2 $MY_DIR/source
|
||||
pushd ${MY_DIR}/source && git checkout -b build ${EXLLAMA2_VERSION} && popd
|
||||
|
||||
# This installs exllamav2 in JIT mode so it will compile the appropriate torch extension at runtime
|
||||
EXLLAMA_NOCOMPILE= uv pip install ${EXTRA_PIP_INSTALL_FLAGS} ${MY_DIR}/source/
|
||||
@@ -1,3 +0,0 @@
|
||||
transformers
|
||||
accelerate
|
||||
torch==2.4.1
|
||||
@@ -1,3 +0,0 @@
|
||||
torch==2.4.1
|
||||
transformers
|
||||
accelerate
|
||||
@@ -1,4 +0,0 @@
|
||||
# This is here to trigger the install script to add --no-build-isolation to the uv pip install commands
|
||||
# exllama2 does not specify it's build requirements per PEP517, so we need to provide some things ourselves
|
||||
wheel
|
||||
setuptools
|
||||
@@ -1,5 +0,0 @@
|
||||
grpcio==1.76.0
|
||||
protobuf
|
||||
certifi
|
||||
wheel
|
||||
setuptools
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
This is an extra gRPC server of LocalAI for Bark TTS
|
||||
This is an extra gRPC server of LocalAI for Faster Whisper TTS
|
||||
"""
|
||||
from concurrent import futures
|
||||
import time
|
||||
@@ -40,7 +40,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
device = "mps"
|
||||
try:
|
||||
print("Preparing models, please wait", file=sys.stderr)
|
||||
self.model = WhisperModel(request.Model, device=device, compute_type="float16")
|
||||
self.model = WhisperModel(request.Model, device=device, compute_type="default")
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
# Implement your logic here for the LoadModel service
|
||||
@@ -55,11 +55,12 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
id = 0
|
||||
for segment in segments:
|
||||
print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text))
|
||||
resultSegments.append(backend_pb2.TranscriptSegment(id=id, start=segment.start, end=segment.end, text=segment.text))
|
||||
resultSegments.append(backend_pb2.TranscriptSegment(id=id, start=int(segment.start)*1e9, end=int(segment.end)*1e9, text=segment.text))
|
||||
text += segment.text
|
||||
id += 1
|
||||
id += 1
|
||||
except Exception as err:
|
||||
print(f"Unexpected {err=}, {type(err)=}", file=sys.stderr)
|
||||
raise err
|
||||
|
||||
return backend_pb2.TranscriptResult(segments=resultSegments, text=text)
|
||||
|
||||
|
||||
25
backend/python/qwen-asr/Makefile
Normal file
25
backend/python/qwen-asr/Makefile
Normal file
@@ -0,0 +1,25 @@
|
||||
.DEFAULT_GOAL := install
|
||||
|
||||
.PHONY: qwen-asr
|
||||
qwen-asr:
|
||||
bash install.sh
|
||||
|
||||
.PHONY: run
|
||||
run: qwen-asr
|
||||
@echo "Running qwen-asr..."
|
||||
bash run.sh
|
||||
@echo "qwen-asr run."
|
||||
|
||||
.PHONY: test
|
||||
test: qwen-asr
|
||||
@echo "Testing qwen-asr..."
|
||||
bash test.sh
|
||||
@echo "qwen-asr tested."
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
rm -rf venv __pycache__
|
||||
212
backend/python/qwen-asr/backend.py
Normal file
212
backend/python/qwen-asr/backend.py
Normal file
@@ -0,0 +1,212 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
gRPC server of LocalAI for Qwen3-ASR (transformers backend, non-vLLM).
|
||||
"""
|
||||
from concurrent import futures
|
||||
import time
|
||||
import argparse
|
||||
import signal
|
||||
import sys
|
||||
import os
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
import torch
|
||||
from qwen_asr import Qwen3ASRModel
|
||||
|
||||
import grpc
|
||||
|
||||
|
||||
def is_float(s):
|
||||
try:
|
||||
float(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def is_int(s):
|
||||
try:
|
||||
int(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
||||
|
||||
|
||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
def Health(self, request, context):
|
||||
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
||||
|
||||
def LoadModel(self, request, context):
|
||||
if torch.cuda.is_available():
|
||||
device = "cuda"
|
||||
else:
|
||||
device = "cpu"
|
||||
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
||||
if mps_available:
|
||||
device = "mps"
|
||||
if not torch.cuda.is_available() and request.CUDA:
|
||||
return backend_pb2.Result(success=False, message="CUDA is not available")
|
||||
|
||||
self.device = device
|
||||
self.options = {}
|
||||
|
||||
for opt in request.Options:
|
||||
if ":" not in opt:
|
||||
continue
|
||||
key, value = opt.split(":", 1)
|
||||
if is_float(value):
|
||||
value = float(value)
|
||||
elif is_int(value):
|
||||
value = int(value)
|
||||
elif value.lower() in ["true", "false"]:
|
||||
value = value.lower() == "true"
|
||||
self.options[key] = value
|
||||
|
||||
model_path = request.Model or "Qwen/Qwen3-ASR-1.7B"
|
||||
default_dtype = torch.bfloat16 if self.device == "cuda" else torch.float32
|
||||
load_dtype = default_dtype
|
||||
if "torch_dtype" in self.options:
|
||||
d = str(self.options["torch_dtype"]).lower()
|
||||
if d == "fp16":
|
||||
load_dtype = torch.float16
|
||||
elif d == "bf16":
|
||||
load_dtype = torch.bfloat16
|
||||
elif d == "fp32":
|
||||
load_dtype = torch.float32
|
||||
del self.options["torch_dtype"]
|
||||
|
||||
self.max_inference_batch_size = self.options.get("max_inference_batch_size", 32)
|
||||
self.max_new_tokens = self.options.get("max_new_tokens", 256)
|
||||
|
||||
forced_aligner = self.options.get("forced_aligner")
|
||||
if forced_aligner is not None and isinstance(forced_aligner, str):
|
||||
forced_aligner = forced_aligner.strip() or None
|
||||
attn_implementation = self.options.get("attn_implementation")
|
||||
if attn_implementation is not None and isinstance(attn_implementation, str):
|
||||
attn_implementation = attn_implementation.strip() or None
|
||||
|
||||
if self.device == "mps":
|
||||
device_map = None
|
||||
elif self.device == "cuda":
|
||||
device_map = "cuda:0"
|
||||
else:
|
||||
device_map = "cpu"
|
||||
|
||||
load_kwargs = dict(
|
||||
dtype=load_dtype,
|
||||
device_map=device_map,
|
||||
max_inference_batch_size=self.max_inference_batch_size,
|
||||
max_new_tokens=self.max_new_tokens,
|
||||
)
|
||||
if attn_implementation:
|
||||
load_kwargs["attn_implementation"] = attn_implementation
|
||||
if forced_aligner:
|
||||
load_kwargs["forced_aligner"] = forced_aligner
|
||||
forced_aligner_kwargs = dict(
|
||||
dtype=load_dtype,
|
||||
device_map=device_map,
|
||||
)
|
||||
if attn_implementation:
|
||||
forced_aligner_kwargs["attn_implementation"] = attn_implementation
|
||||
load_kwargs["forced_aligner_kwargs"] = forced_aligner_kwargs
|
||||
|
||||
try:
|
||||
print(f"Loading Qwen3-ASR from {model_path}", file=sys.stderr)
|
||||
if attn_implementation:
|
||||
print(f"Using attn_implementation: {attn_implementation}", file=sys.stderr)
|
||||
if forced_aligner:
|
||||
print(f"Loading with forced_aligner: {forced_aligner}", file=sys.stderr)
|
||||
self.model = Qwen3ASRModel.from_pretrained(model_path, **load_kwargs)
|
||||
print("Qwen3-ASR model loaded successfully", file=sys.stderr)
|
||||
except Exception as err:
|
||||
print(f"[ERROR] LoadModel failed: {err}", file=sys.stderr)
|
||||
import traceback
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
return backend_pb2.Result(success=False, message=str(err))
|
||||
|
||||
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
||||
|
||||
def AudioTranscription(self, request, context):
|
||||
result_segments = []
|
||||
text = ""
|
||||
try:
|
||||
audio_path = request.dst
|
||||
if not audio_path or not os.path.exists(audio_path):
|
||||
print(f"Error: Audio file not found: {audio_path}", file=sys.stderr)
|
||||
return backend_pb2.TranscriptResult(segments=[], text="")
|
||||
|
||||
language = None
|
||||
if request.language and request.language.strip():
|
||||
language = request.language.strip()
|
||||
|
||||
results = self.model.transcribe(audio=audio_path, language=language)
|
||||
|
||||
if not results:
|
||||
return backend_pb2.TranscriptResult(segments=[], text="")
|
||||
|
||||
r = results[0]
|
||||
text = r.text or ""
|
||||
|
||||
if getattr(r, 'time_stamps', None) and len(r.time_stamps) > 0:
|
||||
for idx, ts in enumerate(r.time_stamps):
|
||||
start_ms = 0
|
||||
end_ms = 0
|
||||
seg_text = text
|
||||
if isinstance(ts, (list, tuple)) and len(ts) >= 3:
|
||||
start_ms = int(float(ts[0]) * 1000) if ts[0] is not None else 0
|
||||
end_ms = int(float(ts[1]) * 1000) if ts[1] is not None else 0
|
||||
seg_text = ts[2] if len(ts) > 2 and ts[2] is not None else ""
|
||||
result_segments.append(backend_pb2.TranscriptSegment(
|
||||
id=idx, start=start_ms, end=end_ms, text=seg_text
|
||||
))
|
||||
else:
|
||||
if text:
|
||||
result_segments.append(backend_pb2.TranscriptSegment(
|
||||
id=0, start=0, end=0, text=text
|
||||
))
|
||||
except Exception as err:
|
||||
print(f"Error in AudioTranscription: {err}", file=sys.stderr)
|
||||
import traceback
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
return backend_pb2.TranscriptResult(segments=[], text="")
|
||||
|
||||
return backend_pb2.TranscriptResult(segments=result_segments, text=text)
|
||||
|
||||
|
||||
def serve(address):
|
||||
server = grpc.server(
|
||||
futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
||||
options=[
|
||||
('grpc.max_message_length', 50 * 1024 * 1024),
|
||||
('grpc.max_send_message_length', 50 * 1024 * 1024),
|
||||
('grpc.max_receive_message_length', 50 * 1024 * 1024),
|
||||
])
|
||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||
server.add_insecure_port(address)
|
||||
server.start()
|
||||
print("Server started. Listening on: " + address, file=sys.stderr)
|
||||
|
||||
def signal_handler(sig, frame):
|
||||
print("Received termination signal. Shutting down...")
|
||||
server.stop(0)
|
||||
sys.exit(0)
|
||||
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
try:
|
||||
while True:
|
||||
time.sleep(_ONE_DAY_IN_SECONDS)
|
||||
except KeyboardInterrupt:
|
||||
server.stop(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Run the gRPC server.")
|
||||
parser.add_argument("--addr", default="localhost:50051", help="The address to bind the server to.")
|
||||
args = parser.parse_args()
|
||||
serve(args.addr)
|
||||
21
backend/python/qwen-asr/install.sh
Normal file
21
backend/python/qwen-asr/install.sh
Normal file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
EXTRA_PIP_INSTALL_FLAGS="--no-build-isolation"
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
if [ "x${BUILD_PROFILE}" == "xintel" ]; then
|
||||
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
|
||||
fi
|
||||
|
||||
PYTHON_VERSION="3.12"
|
||||
PYTHON_PATCH="12"
|
||||
PY_STANDALONE_TAG="20251120"
|
||||
|
||||
installRequirements
|
||||
3
backend/python/qwen-asr/requirements-cpu.txt
Normal file
3
backend/python/qwen-asr/requirements-cpu.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
torch
|
||||
qwen-asr
|
||||
1
backend/python/qwen-asr/requirements-cublas12-after.txt
Normal file
1
backend/python/qwen-asr/requirements-cublas12-after.txt
Normal file
@@ -0,0 +1 @@
|
||||
flash-attn
|
||||
3
backend/python/qwen-asr/requirements-cublas12.txt
Normal file
3
backend/python/qwen-asr/requirements-cublas12.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu121
|
||||
torch
|
||||
qwen-asr
|
||||
3
backend/python/qwen-asr/requirements-cublas13.txt
Normal file
3
backend/python/qwen-asr/requirements-cublas13.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu130
|
||||
torch
|
||||
qwen-asr
|
||||
3
backend/python/qwen-asr/requirements-hipblas.txt
Normal file
3
backend/python/qwen-asr/requirements-hipblas.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.3
|
||||
torch==2.7.1+rocm6.3
|
||||
qwen-asr
|
||||
1
backend/python/qwen-asr/requirements-intel-after.txt
Normal file
1
backend/python/qwen-asr/requirements-intel-after.txt
Normal file
@@ -0,0 +1 @@
|
||||
flash-attn
|
||||
3
backend/python/qwen-asr/requirements-intel.txt
Normal file
3
backend/python/qwen-asr/requirements-intel.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
torch
|
||||
qwen-asr
|
||||
3
backend/python/qwen-asr/requirements-l4t12.txt
Normal file
3
backend/python/qwen-asr/requirements-l4t12.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
--extra-index-url https://pypi.jetson-ai-lab.io/jp6/cu129/
|
||||
torch
|
||||
qwen-asr
|
||||
3
backend/python/qwen-asr/requirements-l4t13.txt
Normal file
3
backend/python/qwen-asr/requirements-l4t13.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu130
|
||||
torch
|
||||
qwen-asr
|
||||
2
backend/python/qwen-asr/requirements-mps.txt
Normal file
2
backend/python/qwen-asr/requirements-mps.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
torch==2.7.1
|
||||
qwen-asr
|
||||
5
backend/python/qwen-asr/requirements.txt
Normal file
5
backend/python/qwen-asr/requirements.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
grpcio==1.71.0
|
||||
protobuf
|
||||
certifi
|
||||
packaging==24.1
|
||||
setuptools
|
||||
2
backend/python/bark/run.sh → backend/python/qwen-asr/run.sh
Executable file → Normal file
2
backend/python/bark/run.sh → backend/python/qwen-asr/run.sh
Executable file → Normal file
@@ -6,4 +6,4 @@ else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
startBackend $@
|
||||
startBackend $@
|
||||
94
backend/python/qwen-asr/test.py
Normal file
94
backend/python/qwen-asr/test.py
Normal file
@@ -0,0 +1,94 @@
|
||||
"""
|
||||
Tests for the Qwen3-ASR gRPC backend.
|
||||
"""
|
||||
import unittest
|
||||
import subprocess
|
||||
import time
|
||||
import os
|
||||
import tempfile
|
||||
import shutil
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
|
||||
import grpc
|
||||
|
||||
# Skip heavy transcription test in CI (model download + inference)
|
||||
SKIP_ASR_TESTS = os.environ.get("SKIP_ASR_TESTS", "false").lower() == "true"
|
||||
|
||||
|
||||
class TestBackendServicer(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"])
|
||||
time.sleep(15)
|
||||
|
||||
def tearDown(self):
|
||||
self.service.terminate()
|
||||
self.service.wait()
|
||||
|
||||
def test_server_startup(self):
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.Health(backend_pb2.HealthMessage())
|
||||
self.assertEqual(response.message, b'OK')
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("Server failed to start")
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
def test_load_model(self):
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="Qwen/Qwen3-ASR-1.7B"))
|
||||
self.assertTrue(response.success, response.message)
|
||||
self.assertEqual(response.message, "Model loaded successfully")
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("LoadModel service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
@unittest.skipIf(SKIP_ASR_TESTS, "ASR transcription test skipped (SKIP_ASR_TESTS=true)")
|
||||
def test_audio_transcription(self):
|
||||
temp_dir = tempfile.mkdtemp()
|
||||
audio_file = os.path.join(temp_dir, 'audio.wav')
|
||||
try:
|
||||
url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen3-ASR-Repo/asr_en.wav"
|
||||
result = subprocess.run(
|
||||
["wget", "-q", url, "-O", audio_file],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=30,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
self.skipTest(f"Could not download sample audio: {result.stderr}")
|
||||
if not os.path.exists(audio_file):
|
||||
self.skipTest("Sample audio file not found after download")
|
||||
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
load_response = stub.LoadModel(backend_pb2.ModelOptions(Model="Qwen/Qwen3-ASR-0.6B"))
|
||||
self.assertTrue(load_response.success, load_response.message)
|
||||
|
||||
transcript_response = stub.AudioTranscription(
|
||||
backend_pb2.TranscriptRequest(dst=audio_file)
|
||||
)
|
||||
self.assertIsNotNone(transcript_response)
|
||||
self.assertIsNotNone(transcript_response.text)
|
||||
self.assertGreaterEqual(len(transcript_response.segments), 0)
|
||||
all_text = ""
|
||||
for segment in transcript_response.segments:
|
||||
all_text += segment.text
|
||||
print(f"All text: {all_text}")
|
||||
self.assertIn("big", all_text)
|
||||
if transcript_response.segments:
|
||||
self.assertIsNotNone(transcript_response.segments[0].text)
|
||||
finally:
|
||||
self.tearDown()
|
||||
if os.path.exists(temp_dir):
|
||||
shutil.rmtree(temp_dir)
|
||||
0
backend/python/bark/test.sh → backend/python/qwen-asr/test.sh
Executable file → Normal file
0
backend/python/bark/test.sh → backend/python/qwen-asr/test.sh
Executable file → Normal file
@@ -5,5 +5,5 @@ accelerate
|
||||
transformers
|
||||
bitsandbytes
|
||||
outetts
|
||||
sentence-transformers==5.2.0
|
||||
protobuf==6.33.4
|
||||
sentence-transformers==5.2.2
|
||||
protobuf==6.33.5
|
||||
@@ -5,5 +5,5 @@ numba==0.60.0
|
||||
transformers
|
||||
bitsandbytes
|
||||
outetts
|
||||
sentence-transformers==5.2.0
|
||||
protobuf==6.33.4
|
||||
sentence-transformers==5.2.2
|
||||
protobuf==6.33.5
|
||||
@@ -5,5 +5,5 @@ numba==0.60.0
|
||||
transformers
|
||||
bitsandbytes
|
||||
outetts
|
||||
sentence-transformers==5.2.0
|
||||
protobuf==6.33.4
|
||||
sentence-transformers==5.2.2
|
||||
protobuf==6.33.5
|
||||
@@ -7,5 +7,5 @@ numba==0.60.0
|
||||
bitsandbytes
|
||||
outetts
|
||||
bitsandbytes
|
||||
sentence-transformers==5.2.0
|
||||
protobuf==6.33.4
|
||||
sentence-transformers==5.2.2
|
||||
protobuf==6.33.5
|
||||
@@ -6,5 +6,5 @@ numba==0.60.0
|
||||
transformers
|
||||
bitsandbytes
|
||||
outetts
|
||||
sentence-transformers==5.2.0
|
||||
protobuf==6.33.4
|
||||
sentence-transformers==5.2.2
|
||||
protobuf==6.33.5
|
||||
@@ -1,5 +1,5 @@
|
||||
grpcio==1.76.0
|
||||
protobuf==6.33.4
|
||||
protobuf==6.33.5
|
||||
certifi
|
||||
setuptools
|
||||
scipy==1.15.1
|
||||
|
||||
@@ -2,6 +2,43 @@
|
||||
vibevoice:
|
||||
bash install.sh
|
||||
|
||||
.PHONY: download-voices
|
||||
download-voices:
|
||||
@echo "Downloading voice preset files..."
|
||||
@mkdir -p voices/streaming_model
|
||||
@if command -v wget >/dev/null 2>&1; then \
|
||||
wget -q -O voices/streaming_model/en-Frank_man.pt \
|
||||
https://raw.githubusercontent.com/microsoft/VibeVoice/main/demo/voices/streaming_model/en-Frank_man.pt && \
|
||||
wget -q -O voices/streaming_model/en-Grace_woman.pt \
|
||||
https://raw.githubusercontent.com/microsoft/VibeVoice/main/demo/voices/streaming_model/en-Grace_woman.pt && \
|
||||
wget -q -O voices/streaming_model/en-Mike_man.pt \
|
||||
https://raw.githubusercontent.com/microsoft/VibeVoice/main/demo/voices/streaming_model/en-Mike_man.pt && \
|
||||
wget -q -O voices/streaming_model/en-Emma_woman.pt \
|
||||
https://raw.githubusercontent.com/microsoft/VibeVoice/main/demo/voices/streaming_model/en-Emma_woman.pt && \
|
||||
wget -q -O voices/streaming_model/en-Carter_man.pt \
|
||||
https://raw.githubusercontent.com/microsoft/VibeVoice/main/demo/voices/streaming_model/en-Carter_man.pt && \
|
||||
wget -q -O voices/streaming_model/en-Davis_man.pt \
|
||||
https://raw.githubusercontent.com/microsoft/VibeVoice/main/demo/voices/streaming_model/en-Davis_man.pt && \
|
||||
echo "Voice files downloaded successfully"; \
|
||||
elif command -v curl >/dev/null 2>&1; then \
|
||||
curl -sL -o voices/streaming_model/en-Frank_man.pt \
|
||||
https://raw.githubusercontent.com/microsoft/VibeVoice/main/demo/voices/streaming_model/en-Frank_man.pt && \
|
||||
curl -sL -o voices/streaming_model/en-Grace_woman.pt \
|
||||
https://raw.githubusercontent.com/microsoft/VibeVoice/main/demo/voices/streaming_model/en-Grace_woman.pt && \
|
||||
curl -sL -o voices/streaming_model/en-Mike_man.pt \
|
||||
https://raw.githubusercontent.com/microsoft/VibeVoice/main/demo/voices/streaming_model/en-Mike_man.pt && \
|
||||
curl -sL -o voices/streaming_model/en-Emma_woman.pt \
|
||||
https://raw.githubusercontent.com/microsoft/VibeVoice/main/demo/voices/streaming_model/en-Emma_woman.pt && \
|
||||
curl -sL -o voices/streaming_model/en-Carter_man.pt \
|
||||
https://raw.githubusercontent.com/microsoft/VibeVoice/main/demo/voices/streaming_model/en-Carter_man.pt && \
|
||||
curl -sL -o voices/streaming_model/en-Davis_man.pt \
|
||||
https://raw.githubusercontent.com/microsoft/VibeVoice/main/demo/voices/streaming_model/en-Davis_man.pt && \
|
||||
echo "Voice files downloaded successfully"; \
|
||||
else \
|
||||
echo "Error: Neither wget nor curl found. Cannot download voice files."; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
.PHONY: run
|
||||
run: vibevoice
|
||||
@echo "Running vibevoice..."
|
||||
@@ -9,7 +46,7 @@ run: vibevoice
|
||||
@echo "vibevoice run."
|
||||
|
||||
.PHONY: test
|
||||
test: vibevoice
|
||||
test: vibevoice download-voices
|
||||
@echo "Testing vibevoice..."
|
||||
bash test.sh
|
||||
@echo "vibevoice tested."
|
||||
|
||||
@@ -16,6 +16,8 @@ import backend_pb2_grpc
|
||||
import torch
|
||||
from vibevoice.modular.modeling_vibevoice_streaming_inference import VibeVoiceStreamingForConditionalGenerationInference
|
||||
from vibevoice.processor.vibevoice_streaming_processor import VibeVoiceStreamingProcessor
|
||||
from vibevoice.modular.modeling_vibevoice_asr import VibeVoiceASRForConditionalGeneration
|
||||
from vibevoice.processor.vibevoice_asr_processor import VibeVoiceASRProcessor
|
||||
|
||||
import grpc
|
||||
|
||||
@@ -95,21 +97,72 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
value = value.lower() == "true"
|
||||
self.options[key] = value
|
||||
|
||||
# Check if ASR mode is enabled
|
||||
self.asr_mode = self.options.get("asr_mode", False)
|
||||
if not isinstance(self.asr_mode, bool):
|
||||
# Handle string "true"/"false" case
|
||||
self.asr_mode = str(self.asr_mode).lower() == "true"
|
||||
|
||||
# Get model path from request
|
||||
model_path = request.Model
|
||||
if not model_path:
|
||||
model_path = "microsoft/VibeVoice-Realtime-0.5B"
|
||||
if self.asr_mode:
|
||||
model_path = "microsoft/VibeVoice-ASR" # Default ASR model
|
||||
else:
|
||||
model_path = "microsoft/VibeVoice-Realtime-0.5B" # Default TTS model
|
||||
|
||||
# Get inference steps from options, default to 5
|
||||
default_dtype = torch.bfloat16 if self.device == "cuda" else torch.float32
|
||||
|
||||
load_dtype = default_dtype
|
||||
if "torch_dtype" in self.options:
|
||||
torch_dtype_str = str(self.options["torch_dtype"]).lower()
|
||||
if torch_dtype_str == "fp16":
|
||||
load_dtype = torch.float16
|
||||
elif torch_dtype_str == "bf16":
|
||||
load_dtype = torch.bfloat16
|
||||
elif torch_dtype_str == "fp32":
|
||||
load_dtype = torch.float32
|
||||
# remove it from options after reading
|
||||
del self.options["torch_dtype"]
|
||||
|
||||
# Get inference steps from options, default to 5 (TTS only)
|
||||
self.inference_steps = self.options.get("inference_steps", 5)
|
||||
if not isinstance(self.inference_steps, int) or self.inference_steps <= 0:
|
||||
self.inference_steps = 5
|
||||
|
||||
# Get cfg_scale from options, default to 1.5
|
||||
# Get cfg_scale from options, default to 1.5 (TTS only)
|
||||
self.cfg_scale = self.options.get("cfg_scale", 1.5)
|
||||
if not isinstance(self.cfg_scale, (int, float)) or self.cfg_scale <= 0:
|
||||
self.cfg_scale = 1.5
|
||||
|
||||
# Get ASR generation parameters from options
|
||||
self.max_new_tokens = self.options.get("max_new_tokens", 512)
|
||||
if not isinstance(self.max_new_tokens, int) or self.max_new_tokens <= 0:
|
||||
self.max_new_tokens = 512
|
||||
|
||||
self.temperature = self.options.get("temperature", 0.0)
|
||||
if not isinstance(self.temperature, (int, float)) or self.temperature < 0:
|
||||
self.temperature = 0.0
|
||||
|
||||
self.top_p = self.options.get("top_p", 1.0)
|
||||
if not isinstance(self.top_p, (int, float)) or self.top_p <= 0:
|
||||
self.top_p = 1.0
|
||||
|
||||
self.do_sample = self.options.get("do_sample", None)
|
||||
if self.do_sample is None:
|
||||
# Default: use sampling if temperature > 0
|
||||
self.do_sample = self.temperature > 0
|
||||
elif not isinstance(self.do_sample, bool):
|
||||
self.do_sample = str(self.do_sample).lower() == "true"
|
||||
|
||||
self.num_beams = self.options.get("num_beams", 1)
|
||||
if not isinstance(self.num_beams, int) or self.num_beams < 1:
|
||||
self.num_beams = 1
|
||||
|
||||
self.repetition_penalty = self.options.get("repetition_penalty", 1.0)
|
||||
if not isinstance(self.repetition_penalty, (int, float)) or self.repetition_penalty <= 0:
|
||||
self.repetition_penalty = 1.0
|
||||
|
||||
# Determine voices directory
|
||||
# Priority order:
|
||||
# 1. voices_dir option (explicitly set by user - highest priority)
|
||||
@@ -163,91 +216,151 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
else:
|
||||
voices_dir = None
|
||||
|
||||
# Initialize voice-related attributes (TTS only)
|
||||
self.voices_dir = voices_dir
|
||||
self.voice_presets = {}
|
||||
self._voice_cache = {}
|
||||
self.default_voice_key = None
|
||||
|
||||
# Load voice presets if directory exists
|
||||
if self.voices_dir and os.path.exists(self.voices_dir):
|
||||
self._load_voice_presets()
|
||||
else:
|
||||
print(f"Warning: Voices directory not found. Voice presets will not be available.", file=sys.stderr)
|
||||
|
||||
# Store AudioPath, ModelFile, and ModelPath from LoadModel request for use in TTS
|
||||
self.audio_path = request.AudioPath if hasattr(request, 'AudioPath') and request.AudioPath else None
|
||||
self.model_file = request.ModelFile if hasattr(request, 'ModelFile') and request.ModelFile else None
|
||||
self.model_path = request.ModelPath if hasattr(request, 'ModelPath') and request.ModelPath else None
|
||||
|
||||
# Decide attention implementation and device_map (matching upstream example)
|
||||
if self.device == "mps":
|
||||
device_map = None
|
||||
attn_impl_primary = "sdpa" # flash_attention_2 not supported on MPS
|
||||
elif self.device == "cuda":
|
||||
device_map = "cuda"
|
||||
attn_impl_primary = "flash_attention_2"
|
||||
else: # cpu
|
||||
device_map = "cpu" # Match upstream example: use "cpu" for CPU device_map
|
||||
attn_impl_primary = "sdpa"
|
||||
|
||||
try:
|
||||
print(f"Loading processor & model from {model_path}", file=sys.stderr)
|
||||
self.processor = VibeVoiceStreamingProcessor.from_pretrained(model_path)
|
||||
if self.asr_mode:
|
||||
# Load ASR model and processor
|
||||
print(f"Loading ASR processor & model from {model_path}", file=sys.stderr)
|
||||
|
||||
# Load ASR processor
|
||||
self.processor = VibeVoiceASRProcessor.from_pretrained(
|
||||
model_path,
|
||||
language_model_pretrained_name="Qwen/Qwen2.5-7B"
|
||||
)
|
||||
|
||||
# Decide dtype & attention implementation
|
||||
if self.device == "mps":
|
||||
load_dtype = torch.float32 # MPS requires float32
|
||||
device_map = None
|
||||
attn_impl_primary = "sdpa" # flash_attention_2 not supported on MPS
|
||||
elif self.device == "cuda":
|
||||
load_dtype = torch.bfloat16
|
||||
device_map = "cuda"
|
||||
attn_impl_primary = "flash_attention_2"
|
||||
else: # cpu
|
||||
load_dtype = torch.float32
|
||||
device_map = "cpu"
|
||||
attn_impl_primary = "sdpa"
|
||||
print(f"Using device: {self.device}, torch_dtype: {load_dtype}, attn_implementation: {attn_impl_primary}", file=sys.stderr)
|
||||
|
||||
print(f"Using device: {self.device}, torch_dtype: {load_dtype}, attn_implementation: {attn_impl_primary}", file=sys.stderr)
|
||||
|
||||
# Load model with device-specific logic
|
||||
try:
|
||||
if self.device == "mps":
|
||||
self.model = VibeVoiceStreamingForConditionalGenerationInference.from_pretrained(
|
||||
# Load ASR model - use device_map=None and move manually to avoid JSON serialization issues
|
||||
# Load with dtype to ensure all components are in correct dtype from the start
|
||||
try:
|
||||
print(f"Using attention implementation: {attn_impl_primary}", file=sys.stderr)
|
||||
# Load model with dtype to ensure all components are in correct dtype
|
||||
self.model = VibeVoiceASRForConditionalGeneration.from_pretrained(
|
||||
model_path,
|
||||
torch_dtype=load_dtype,
|
||||
dtype=load_dtype,
|
||||
device_map=None, # Always use None, move manually to avoid JSON serialization issues
|
||||
attn_implementation=attn_impl_primary,
|
||||
device_map=None, # load then move
|
||||
trust_remote_code=True
|
||||
)
|
||||
self.model.to("mps")
|
||||
elif self.device == "cuda":
|
||||
self.model = VibeVoiceStreamingForConditionalGenerationInference.from_pretrained(
|
||||
model_path,
|
||||
torch_dtype=load_dtype,
|
||||
device_map="cuda",
|
||||
attn_implementation=attn_impl_primary,
|
||||
)
|
||||
else: # cpu
|
||||
self.model = VibeVoiceStreamingForConditionalGenerationInference.from_pretrained(
|
||||
model_path,
|
||||
torch_dtype=load_dtype,
|
||||
device_map="cpu",
|
||||
attn_implementation=attn_impl_primary,
|
||||
)
|
||||
except Exception as e:
|
||||
if attn_impl_primary == 'flash_attention_2':
|
||||
print(f"[ERROR] : {type(e).__name__}: {e}", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
print("Error loading the model. Trying to use SDPA. However, note that only flash_attention_2 has been fully tested, and using SDPA may result in lower audio quality.", file=sys.stderr)
|
||||
self.model = VibeVoiceStreamingForConditionalGenerationInference.from_pretrained(
|
||||
model_path,
|
||||
torch_dtype=load_dtype,
|
||||
device_map=(self.device if self.device in ("cuda", "cpu") else None),
|
||||
attn_implementation='sdpa'
|
||||
)
|
||||
if self.device == "mps":
|
||||
self.model.to("mps")
|
||||
else:
|
||||
raise e
|
||||
# Move to device manually
|
||||
self.model = self.model.to(self.device)
|
||||
except Exception as e:
|
||||
if attn_impl_primary == 'flash_attention_2':
|
||||
print(f"[ERROR] : {type(e).__name__}: {e}", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
print("Error loading the ASR model. Trying to use SDPA.", file=sys.stderr)
|
||||
self.model = VibeVoiceASRForConditionalGeneration.from_pretrained(
|
||||
model_path,
|
||||
dtype=load_dtype,
|
||||
device_map=None,
|
||||
attn_implementation='sdpa',
|
||||
trust_remote_code=True
|
||||
)
|
||||
# Move to device manually
|
||||
self.model = self.model.to(self.device)
|
||||
else:
|
||||
raise e
|
||||
|
||||
self.model.eval()
|
||||
self.model.set_ddpm_inference_steps(num_steps=self.inference_steps)
|
||||
|
||||
# Set default voice key
|
||||
if self.voice_presets:
|
||||
# Try to get default from environment or use first available
|
||||
preset_name = os.environ.get("VOICE_PRESET")
|
||||
self.default_voice_key = self._determine_voice_key(preset_name)
|
||||
print(f"Default voice preset: {self.default_voice_key}", file=sys.stderr)
|
||||
self.model.eval()
|
||||
print(f"ASR model loaded successfully", file=sys.stderr)
|
||||
else:
|
||||
print("Warning: No voice presets available. Voice selection will not work.", file=sys.stderr)
|
||||
# Load TTS model and processor (existing logic)
|
||||
# Load voice presets if directory exists
|
||||
if self.voices_dir and os.path.exists(self.voices_dir):
|
||||
self._load_voice_presets()
|
||||
else:
|
||||
print(f"Warning: Voices directory not found. Voice presets will not be available.", file=sys.stderr)
|
||||
|
||||
print(f"Loading TTS processor & model from {model_path}", file=sys.stderr)
|
||||
self.processor = VibeVoiceStreamingProcessor.from_pretrained(model_path)
|
||||
|
||||
|
||||
print(f"Using device: {self.device}, torch_dtype: {load_dtype}, attn_implementation: {attn_impl_primary}", file=sys.stderr)
|
||||
|
||||
# Load model with device-specific logic (matching upstream example exactly)
|
||||
try:
|
||||
if self.device == "mps":
|
||||
self.model = VibeVoiceStreamingForConditionalGenerationInference.from_pretrained(
|
||||
model_path,
|
||||
torch_dtype=load_dtype,
|
||||
attn_implementation=attn_impl_primary,
|
||||
device_map=None, # load then move
|
||||
)
|
||||
self.model.to("mps")
|
||||
elif self.device == "cuda":
|
||||
self.model = VibeVoiceStreamingForConditionalGenerationInference.from_pretrained(
|
||||
model_path,
|
||||
torch_dtype=load_dtype,
|
||||
device_map=device_map,
|
||||
attn_implementation=attn_impl_primary,
|
||||
)
|
||||
else: # cpu
|
||||
# Match upstream example: use device_map="cpu" for CPU
|
||||
self.model = VibeVoiceStreamingForConditionalGenerationInference.from_pretrained(
|
||||
model_path,
|
||||
torch_dtype=load_dtype,
|
||||
device_map="cpu",
|
||||
attn_implementation=attn_impl_primary,
|
||||
)
|
||||
except Exception as e:
|
||||
if attn_impl_primary == 'flash_attention_2':
|
||||
print(f"[ERROR] : {type(e).__name__}: {e}", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
print("Error loading the model. Trying to use SDPA. However, note that only flash_attention_2 has been fully tested, and using SDPA may result in lower audio quality.", file=sys.stderr)
|
||||
# Match upstream example fallback pattern
|
||||
self.model = VibeVoiceStreamingForConditionalGenerationInference.from_pretrained(
|
||||
model_path,
|
||||
torch_dtype=load_dtype,
|
||||
device_map=(self.device if self.device in ("cuda", "cpu") else None),
|
||||
attn_implementation='sdpa'
|
||||
)
|
||||
if self.device == "mps":
|
||||
self.model.to("mps")
|
||||
else:
|
||||
raise e
|
||||
|
||||
self.model.eval()
|
||||
self.model.set_ddpm_inference_steps(num_steps=self.inference_steps)
|
||||
|
||||
# Set default voice key
|
||||
if self.voice_presets:
|
||||
# Try to get default from environment or use first available
|
||||
preset_name = os.environ.get("VOICE_PRESET")
|
||||
self.default_voice_key = self._determine_voice_key(preset_name)
|
||||
print(f"Default voice preset: {self.default_voice_key}", file=sys.stderr)
|
||||
else:
|
||||
print("Warning: No voice presets available. Voice selection will not work.", file=sys.stderr)
|
||||
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
# Format error message safely, avoiding JSON serialization issues
|
||||
error_msg = str(err)
|
||||
error_type = type(err).__name__
|
||||
# Include traceback for debugging
|
||||
tb_str = traceback.format_exc()
|
||||
print(f"[ERROR] LoadModel failed: {error_type}: {error_msg}", file=sys.stderr)
|
||||
print(tb_str, file=sys.stderr)
|
||||
return backend_pb2.Result(success=False, message=f"{error_type}: {error_msg}")
|
||||
|
||||
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
||||
|
||||
@@ -327,14 +440,30 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
if not voice_path or not os.path.exists(voice_path):
|
||||
return None
|
||||
|
||||
# Ensure cache exists (should be initialized in LoadModel)
|
||||
if not hasattr(self, '_voice_cache'):
|
||||
self._voice_cache = {}
|
||||
|
||||
# Use path as cache key
|
||||
if voice_path not in self._voice_cache:
|
||||
print(f"Loading prefilled prompt from {voice_path}", file=sys.stderr)
|
||||
prefilled_outputs = torch.load(
|
||||
voice_path,
|
||||
map_location=self._torch_device,
|
||||
weights_only=False,
|
||||
)
|
||||
# Match self-test.py: use string device name for map_location
|
||||
# Ensure self.device exists (should be set in LoadModel)
|
||||
try:
|
||||
if not hasattr(self, 'device'):
|
||||
# Fallback to CPU if device not set
|
||||
device_str = "cpu"
|
||||
else:
|
||||
device_str = str(self.device)
|
||||
except AttributeError as e:
|
||||
print(f"Error accessing self.device: {e}, falling back to CPU", file=sys.stderr)
|
||||
device_str = "cpu"
|
||||
if device_str != "cpu":
|
||||
map_loc = device_str
|
||||
else:
|
||||
map_loc = "cpu"
|
||||
# Call torch.load with explicit arguments
|
||||
prefilled_outputs = torch.load(voice_path, map_location=map_loc, weights_only=False)
|
||||
self._voice_cache[voice_path] = prefilled_outputs
|
||||
|
||||
return self._voice_cache[voice_path]
|
||||
@@ -351,17 +480,17 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
voice_path = self._get_voice_path(request.voice)
|
||||
if voice_path:
|
||||
voice_key = request.voice
|
||||
elif request.AudioPath:
|
||||
# Use AudioPath as voice file
|
||||
if os.path.isabs(request.AudioPath):
|
||||
voice_path = request.AudioPath
|
||||
elif request.ModelFile:
|
||||
model_file_base = os.path.dirname(request.ModelFile)
|
||||
voice_path = os.path.join(model_file_base, request.AudioPath)
|
||||
elif hasattr(request, 'ModelPath') and request.ModelPath:
|
||||
voice_path = os.path.join(request.ModelPath, request.AudioPath)
|
||||
elif self.audio_path:
|
||||
# Use AudioPath from LoadModel as voice file
|
||||
if os.path.isabs(self.audio_path):
|
||||
voice_path = self.audio_path
|
||||
elif self.model_file:
|
||||
model_file_base = os.path.dirname(self.model_file)
|
||||
voice_path = os.path.join(model_file_base, self.audio_path)
|
||||
elif self.model_path:
|
||||
voice_path = os.path.join(self.model_path, self.audio_path)
|
||||
else:
|
||||
voice_path = request.AudioPath
|
||||
voice_path = self.audio_path
|
||||
elif self.default_voice_key:
|
||||
voice_path = self._get_voice_path(self.default_voice_key)
|
||||
voice_key = self.default_voice_key
|
||||
@@ -404,8 +533,9 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
return_attention_mask=True,
|
||||
)
|
||||
|
||||
# Move tensors to target device
|
||||
target_device = self._torch_device
|
||||
# Move tensors to target device (matching self-test.py exactly)
|
||||
# Explicitly ensure it's a string to avoid any variable name collisions
|
||||
target_device = str(self.device) if str(self.device) != "cpu" else "cpu"
|
||||
for k, v in inputs.items():
|
||||
if torch.is_tensor(v):
|
||||
inputs[k] = v.to(target_device)
|
||||
@@ -447,6 +577,147 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
|
||||
return backend_pb2.Result(success=True)
|
||||
|
||||
def AudioTranscription(self, request, context):
|
||||
"""Transcribe audio file to text using ASR model."""
|
||||
try:
|
||||
# Validate ASR mode is active
|
||||
if not self.asr_mode:
|
||||
return backend_pb2.TranscriptResult(
|
||||
segments=[],
|
||||
text="",
|
||||
)
|
||||
# Note: We return empty result instead of error to match faster-whisper behavior
|
||||
|
||||
# Get audio file path
|
||||
audio_path = request.dst
|
||||
if not audio_path or not os.path.exists(audio_path):
|
||||
print(f"Error: Audio file not found: {audio_path}", file=sys.stderr)
|
||||
return backend_pb2.TranscriptResult(
|
||||
segments=[],
|
||||
text="",
|
||||
)
|
||||
|
||||
print(f"Transcribing audio file: {audio_path}", file=sys.stderr)
|
||||
|
||||
# Get context_info from options if available
|
||||
context_info = self.options.get("context_info", None)
|
||||
if context_info and isinstance(context_info, str) and context_info.strip():
|
||||
context_info = context_info.strip()
|
||||
else:
|
||||
context_info = None
|
||||
|
||||
# Process audio with ASR processor (matching gradio example)
|
||||
inputs = self.processor(
|
||||
audio=audio_path,
|
||||
sampling_rate=None,
|
||||
return_tensors="pt",
|
||||
add_generation_prompt=True,
|
||||
context_info=context_info
|
||||
)
|
||||
|
||||
# Move to device (matching gradio example)
|
||||
inputs = {k: v.to(self.device) if isinstance(v, torch.Tensor) else v
|
||||
for k, v in inputs.items()}
|
||||
|
||||
# Prepare generation config (matching gradio example)
|
||||
generation_config = {
|
||||
"max_new_tokens": self.max_new_tokens,
|
||||
"temperature": self.temperature if self.temperature > 0 else None,
|
||||
"top_p": self.top_p if self.do_sample else None,
|
||||
"do_sample": self.do_sample,
|
||||
"num_beams": self.num_beams,
|
||||
"repetition_penalty": self.repetition_penalty,
|
||||
"pad_token_id": self.processor.pad_id,
|
||||
"eos_token_id": self.processor.tokenizer.eos_token_id,
|
||||
}
|
||||
|
||||
# Remove None values (matching gradio example)
|
||||
generation_config = {k: v for k, v in generation_config.items() if v is not None}
|
||||
|
||||
print(f"Generating transcription with max_new_tokens: {self.max_new_tokens}, temperature: {self.temperature}, do_sample: {self.do_sample}, num_beams: {self.num_beams}, repetition_penalty: {self.repetition_penalty}", file=sys.stderr)
|
||||
|
||||
# Generate transcription (matching gradio example)
|
||||
with torch.no_grad():
|
||||
output_ids = self.model.generate(
|
||||
**inputs,
|
||||
**generation_config
|
||||
)
|
||||
|
||||
# Decode output (matching gradio example)
|
||||
generated_ids = output_ids[0, inputs['input_ids'].shape[1]:]
|
||||
generated_text = self.processor.decode(generated_ids, skip_special_tokens=True)
|
||||
|
||||
# Parse structured output to get segments
|
||||
result_segments = []
|
||||
try:
|
||||
transcription_segments = self.processor.post_process_transcription(generated_text)
|
||||
|
||||
if transcription_segments:
|
||||
# Map segments to TranscriptSegment format
|
||||
for idx, seg in enumerate(transcription_segments):
|
||||
# Extract timing information (if available)
|
||||
# Handle both dict and object with attributes
|
||||
if isinstance(seg, dict):
|
||||
start_time = seg.get('start_time', 0)
|
||||
end_time = seg.get('end_time', 0)
|
||||
text = seg.get('text', '')
|
||||
speaker_id = seg.get('speaker_id', None)
|
||||
else:
|
||||
# Handle object with attributes
|
||||
start_time = getattr(seg, 'start_time', 0)
|
||||
end_time = getattr(seg, 'end_time', 0)
|
||||
text = getattr(seg, 'text', '')
|
||||
speaker_id = getattr(seg, 'speaker_id', None)
|
||||
|
||||
# Convert time to milliseconds (assuming seconds)
|
||||
start_ms = int(start_time * 1000) if isinstance(start_time, (int, float)) else 0
|
||||
end_ms = int(end_time * 1000) if isinstance(end_time, (int, float)) else 0
|
||||
|
||||
# Add speaker info to text if available
|
||||
if speaker_id is not None:
|
||||
text = f"[Speaker {speaker_id}] {text}"
|
||||
|
||||
result_segments.append(backend_pb2.TranscriptSegment(
|
||||
id=idx,
|
||||
start=start_ms,
|
||||
end=end_ms,
|
||||
text=text,
|
||||
tokens=[] # Token IDs not extracted for now
|
||||
))
|
||||
except Exception as e:
|
||||
print(f"Warning: Failed to parse structured output: {e}", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
# Fallback: create a single segment with the full text
|
||||
if generated_text:
|
||||
result_segments.append(backend_pb2.TranscriptSegment(
|
||||
id=0,
|
||||
start=0,
|
||||
end=0,
|
||||
text=generated_text,
|
||||
tokens=[]
|
||||
))
|
||||
|
||||
# Combine all segment texts into full transcription
|
||||
if result_segments:
|
||||
full_text = " ".join([seg.text for seg in result_segments])
|
||||
else:
|
||||
full_text = generated_text if generated_text else ""
|
||||
|
||||
print(f"Transcription completed: {len(result_segments)} segments", file=sys.stderr)
|
||||
|
||||
return backend_pb2.TranscriptResult(
|
||||
segments=result_segments,
|
||||
text=full_text
|
||||
)
|
||||
|
||||
except Exception as err:
|
||||
print(f"Error in AudioTranscription: {err}", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
return backend_pb2.TranscriptResult(
|
||||
segments=[],
|
||||
text="",
|
||||
)
|
||||
|
||||
def serve(address):
|
||||
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
||||
options=[
|
||||
|
||||
@@ -29,11 +29,13 @@ fi
|
||||
|
||||
installRequirements
|
||||
|
||||
git clone https://github.com/microsoft/VibeVoice.git
|
||||
cd VibeVoice/
|
||||
if [ ! -d VibeVoice ]; then
|
||||
git clone https://github.com/microsoft/VibeVoice.git
|
||||
cd VibeVoice/
|
||||
|
||||
if [ "x${USE_PIP}" == "xtrue" ]; then
|
||||
pip install ${EXTRA_PIP_INSTALL_FLAGS:-} .
|
||||
else
|
||||
uv pip install ${EXTRA_PIP_INSTALL_FLAGS:-} .
|
||||
if [ "x${USE_PIP}" == "xtrue" ]; then
|
||||
pip install ${EXTRA_PIP_INSTALL_FLAGS:-} .
|
||||
else
|
||||
uv pip install ${EXTRA_PIP_INSTALL_FLAGS:-} .
|
||||
fi
|
||||
fi
|
||||
@@ -1,7 +1,7 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
git+https://github.com/huggingface/diffusers
|
||||
opencv-python
|
||||
transformers==4.51.3
|
||||
transformers>=4.51.3,<5.0.0
|
||||
torchvision==0.22.1
|
||||
accelerate
|
||||
compel
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu121
|
||||
git+https://github.com/huggingface/diffusers
|
||||
opencv-python
|
||||
transformers==4.51.3
|
||||
transformers>=4.51.3,<5.0.0
|
||||
torchvision
|
||||
accelerate
|
||||
compel
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu130
|
||||
git+https://github.com/huggingface/diffusers
|
||||
opencv-python
|
||||
transformers==4.51.3
|
||||
transformers>=4.51.3,<5.0.0
|
||||
torchvision
|
||||
accelerate
|
||||
compel
|
||||
|
||||
@@ -3,7 +3,7 @@ torch==2.7.1+rocm6.3
|
||||
torchvision==0.22.1+rocm6.3
|
||||
git+https://github.com/huggingface/diffusers
|
||||
opencv-python
|
||||
transformers==4.51.3
|
||||
transformers>=4.51.3,<5.0.0
|
||||
accelerate
|
||||
compel
|
||||
peft
|
||||
|
||||
@@ -5,7 +5,7 @@ optimum[openvino]
|
||||
setuptools
|
||||
git+https://github.com/huggingface/diffusers
|
||||
opencv-python
|
||||
transformers==4.51.3
|
||||
transformers>=4.51.3,<5.0.0
|
||||
accelerate
|
||||
compel
|
||||
peft
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
--extra-index-url https://pypi.jetson-ai-lab.io/jp6/cu129/
|
||||
torch
|
||||
git+https://github.com/huggingface/diffusers
|
||||
transformers==4.51.3
|
||||
transformers>=4.51.3,<5.0.0
|
||||
accelerate
|
||||
compel
|
||||
peft
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu130
|
||||
torch
|
||||
git+https://github.com/huggingface/diffusers
|
||||
transformers==4.51.3
|
||||
transformers>=4.51.3,<5.0.0
|
||||
accelerate
|
||||
compel
|
||||
peft
|
||||
|
||||
@@ -2,7 +2,7 @@ torch==2.7.1
|
||||
torchvision==0.22.1
|
||||
git+https://github.com/huggingface/diffusers
|
||||
opencv-python
|
||||
transformers==4.51.3
|
||||
transformers>=4.51.3,<5.0.0
|
||||
accelerate
|
||||
compel
|
||||
peft
|
||||
|
||||
@@ -1,14 +1,21 @@
|
||||
"""
|
||||
A test script to test the gRPC service
|
||||
A test script to test the gRPC service for VibeVoice TTS and ASR
|
||||
"""
|
||||
import unittest
|
||||
import subprocess
|
||||
import time
|
||||
import os
|
||||
import tempfile
|
||||
import shutil
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
|
||||
import grpc
|
||||
|
||||
# Check if we should skip ASR tests (they require large models ~14B parameters total)
|
||||
# Skip in CI or if explicitly disabled
|
||||
SKIP_ASR_TESTS = os.environ.get("SKIP_ASR_TESTS", "false").lower() == "true"
|
||||
|
||||
|
||||
class TestBackendServicer(unittest.TestCase):
|
||||
"""
|
||||
@@ -44,15 +51,15 @@ class TestBackendServicer(unittest.TestCase):
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
def test_load_model(self):
|
||||
def test_load_tts_model(self):
|
||||
"""
|
||||
This method tests if the model is loaded successfully
|
||||
This method tests if the TTS model is loaded successfully
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="tts_models/en/vctk/vits"))
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="microsoft/VibeVoice-Realtime-0.5B"))
|
||||
print(response)
|
||||
self.assertTrue(response.success)
|
||||
self.assertEqual(response.message, "Model loaded successfully")
|
||||
@@ -62,21 +69,142 @@ class TestBackendServicer(unittest.TestCase):
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
def test_tts(self):
|
||||
@unittest.skipIf(SKIP_ASR_TESTS, "ASR tests require large models (~14B parameters) and are skipped in CI")
|
||||
def test_load_asr_model(self):
|
||||
"""
|
||||
This method tests if the embeddings are generated successfully
|
||||
This method tests if the ASR model is loaded successfully with asr_mode option
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="tts_models/en/vctk/vits"))
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(
|
||||
Model="microsoft/VibeVoice-ASR",
|
||||
Options=["asr_mode:true"]
|
||||
))
|
||||
print(f"LoadModel response: {response}")
|
||||
if not response.success:
|
||||
print(f"LoadModel failed with message: {response.message}")
|
||||
self.assertTrue(response.success, f"LoadModel failed: {response.message}")
|
||||
self.assertEqual(response.message, "Model loaded successfully")
|
||||
except Exception as err:
|
||||
print(f"Exception during LoadModel: {err}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
self.fail("LoadModel service failed for ASR mode")
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
def test_tts(self):
|
||||
"""
|
||||
This method tests if TTS generation works successfully
|
||||
"""
|
||||
# Create a temporary directory for the output audio file
|
||||
temp_dir = tempfile.mkdtemp()
|
||||
output_file = os.path.join(temp_dir, 'output.wav')
|
||||
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
# Load TTS model
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="microsoft/VibeVoice-Realtime-0.5B"))
|
||||
self.assertTrue(response.success)
|
||||
tts_request = backend_pb2.TTSRequest(text="80s TV news production music hit for tonight's biggest story")
|
||||
|
||||
# Generate TTS
|
||||
tts_request = backend_pb2.TTSRequest(
|
||||
text="Hello, this is a test of the VibeVoice text to speech system.",
|
||||
dst=output_file
|
||||
)
|
||||
tts_response = stub.TTS(tts_request)
|
||||
|
||||
# Verify response
|
||||
self.assertIsNotNone(tts_response)
|
||||
self.assertTrue(tts_response.success)
|
||||
|
||||
# Verify output file was created
|
||||
self.assertTrue(os.path.exists(output_file), f"Output file was not created: {output_file}")
|
||||
self.assertGreater(os.path.getsize(output_file), 0, "Output file is empty")
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("TTS service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
self.tearDown()
|
||||
# Clean up the temporary directory
|
||||
if os.path.exists(temp_dir):
|
||||
shutil.rmtree(temp_dir)
|
||||
|
||||
@unittest.skipIf(SKIP_ASR_TESTS, "ASR tests require large models (~14B parameters) and are skipped in CI")
|
||||
def test_audio_transcription(self):
|
||||
"""
|
||||
This method tests if audio transcription works successfully
|
||||
"""
|
||||
# Create a temporary directory for the audio file
|
||||
temp_dir = tempfile.mkdtemp()
|
||||
audio_file = os.path.join(temp_dir, 'audio.wav')
|
||||
|
||||
try:
|
||||
# Download the audio file to the temporary directory
|
||||
print(f"Downloading audio file to {audio_file}...")
|
||||
url = "https://cdn.openai.com/whisper/draft-20220913a/micro-machines.wav"
|
||||
result = subprocess.run(
|
||||
["wget", "-q", url, "-O", audio_file],
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
if result.returncode != 0:
|
||||
self.fail(f"Failed to download audio file: {result.stderr}")
|
||||
|
||||
# Verify the file was downloaded
|
||||
if not os.path.exists(audio_file):
|
||||
self.fail(f"Audio file was not downloaded to {audio_file}")
|
||||
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
# Load the ASR model first
|
||||
load_response = stub.LoadModel(backend_pb2.ModelOptions(
|
||||
Model="microsoft/VibeVoice-ASR",
|
||||
Options=["asr_mode:true"]
|
||||
))
|
||||
print(f"LoadModel response: {load_response}")
|
||||
if not load_response.success:
|
||||
print(f"LoadModel failed with message: {load_response.message}")
|
||||
self.assertTrue(load_response.success, f"LoadModel failed: {load_response.message}")
|
||||
|
||||
# Perform transcription
|
||||
transcript_request = backend_pb2.TranscriptRequest(dst=audio_file)
|
||||
transcript_response = stub.AudioTranscription(transcript_request)
|
||||
|
||||
# Print the transcribed text for debugging
|
||||
print(f"Transcribed text: {transcript_response.text}")
|
||||
print(f"Number of segments: {len(transcript_response.segments)}")
|
||||
|
||||
# Verify response structure
|
||||
self.assertIsNotNone(transcript_response)
|
||||
self.assertIsNotNone(transcript_response.text)
|
||||
# Protobuf repeated fields return a sequence, not a list
|
||||
self.assertIsNotNone(transcript_response.segments)
|
||||
# Check if segments is iterable (has length)
|
||||
self.assertGreaterEqual(len(transcript_response.segments), 0)
|
||||
|
||||
# Verify the transcription contains some text
|
||||
self.assertGreater(len(transcript_response.text), 0, "Transcription should not be empty")
|
||||
|
||||
# If we got segments, verify they have the expected structure
|
||||
if len(transcript_response.segments) > 0:
|
||||
segment = transcript_response.segments[0]
|
||||
self.assertIsNotNone(segment.text)
|
||||
self.assertIsInstance(segment.id, int)
|
||||
else:
|
||||
# Even if no segments, we should have text
|
||||
self.assertIsNotNone(transcript_response.text)
|
||||
self.assertGreater(len(transcript_response.text), 0)
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("AudioTranscription service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
# Clean up the temporary directory
|
||||
if os.path.exists(temp_dir):
|
||||
shutil.rmtree(temp_dir)
|
||||
23
backend/python/vllm-omni/Makefile
Normal file
23
backend/python/vllm-omni/Makefile
Normal file
@@ -0,0 +1,23 @@
|
||||
.PHONY: vllm-omni
|
||||
vllm-omni:
|
||||
bash install.sh
|
||||
|
||||
.PHONY: run
|
||||
run: vllm-omni
|
||||
@echo "Running vllm-omni..."
|
||||
bash run.sh
|
||||
@echo "vllm-omni run."
|
||||
|
||||
.PHONY: test
|
||||
test: vllm-omni
|
||||
@echo "Testing vllm-omni..."
|
||||
bash test.sh
|
||||
@echo "vllm-omni tested."
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
rm -rf venv __pycache__
|
||||
682
backend/python/vllm-omni/backend.py
Normal file
682
backend/python/vllm-omni/backend.py
Normal file
@@ -0,0 +1,682 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
LocalAI vLLM-Omni Backend
|
||||
|
||||
This backend provides gRPC access to vllm-omni for multimodal generation:
|
||||
- Image generation (text-to-image, image editing)
|
||||
- Video generation (text-to-video, image-to-video)
|
||||
- Text generation with multimodal inputs (LLM)
|
||||
- Text-to-speech generation
|
||||
"""
|
||||
from concurrent import futures
|
||||
import traceback
|
||||
import argparse
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
import base64
|
||||
import io
|
||||
|
||||
from PIL import Image
|
||||
import torch
|
||||
import numpy as np
|
||||
import soundfile as sf
|
||||
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
|
||||
import grpc
|
||||
|
||||
from vllm_omni.entrypoints.omni import Omni
|
||||
from vllm_omni.outputs import OmniRequestOutput
|
||||
from vllm_omni.diffusion.data import DiffusionParallelConfig
|
||||
from vllm_omni.utils.platform_utils import detect_device_type, is_npu
|
||||
from vllm import SamplingParams
|
||||
from diffusers.utils import export_to_video
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
|
||||
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
|
||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
||||
|
||||
|
||||
def is_float(s):
|
||||
"""Check if a string can be converted to float."""
|
||||
try:
|
||||
float(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def is_int(s):
|
||||
"""Check if a string can be converted to int."""
|
||||
try:
|
||||
int(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
# Implement the BackendServicer class with the service methods
|
||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
|
||||
def _detect_model_type(self, model_name):
|
||||
"""Detect model type from model name."""
|
||||
model_lower = model_name.lower()
|
||||
if "tts" in model_lower or "qwen3-tts" in model_lower:
|
||||
return "tts"
|
||||
elif "omni" in model_lower and "qwen3" in model_lower:
|
||||
return "llm"
|
||||
elif "wan" in model_lower or "t2v" in model_lower or "i2v" in model_lower:
|
||||
return "video"
|
||||
elif "image" in model_lower or "z-image" in model_lower or "qwen-image" in model_lower:
|
||||
return "image"
|
||||
else:
|
||||
# Default to image for diffusion models, llm for others
|
||||
return "image"
|
||||
|
||||
def _detect_tts_task_type(self):
|
||||
"""Detect TTS task type from model name."""
|
||||
model_lower = self.model_name.lower()
|
||||
if "customvoice" in model_lower:
|
||||
return "CustomVoice"
|
||||
elif "voicedesign" in model_lower:
|
||||
return "VoiceDesign"
|
||||
elif "base" in model_lower:
|
||||
return "Base"
|
||||
else:
|
||||
# Default to CustomVoice
|
||||
return "CustomVoice"
|
||||
|
||||
def _load_image(self, image_path):
|
||||
"""Load an image from file path or base64 encoded data."""
|
||||
# Try file path first
|
||||
if os.path.exists(image_path):
|
||||
return Image.open(image_path)
|
||||
# Try base64 decode
|
||||
try:
|
||||
image_data = base64.b64decode(image_path)
|
||||
return Image.open(io.BytesIO(image_data))
|
||||
except:
|
||||
return None
|
||||
|
||||
def _load_video(self, video_path):
|
||||
"""Load a video from file path or base64 encoded data."""
|
||||
from vllm.assets.video import VideoAsset, video_to_ndarrays
|
||||
if os.path.exists(video_path):
|
||||
return video_to_ndarrays(video_path, num_frames=16)
|
||||
# Try base64 decode
|
||||
try:
|
||||
timestamp = str(int(time.time() * 1000))
|
||||
p = f"/tmp/vl-{timestamp}.data"
|
||||
with open(p, "wb") as f:
|
||||
f.write(base64.b64decode(video_path))
|
||||
video = VideoAsset(name=p).np_ndarrays
|
||||
os.remove(p)
|
||||
return video
|
||||
except:
|
||||
return None
|
||||
|
||||
def _load_audio(self, audio_path):
|
||||
"""Load audio from file path or base64 encoded data."""
|
||||
import librosa
|
||||
if os.path.exists(audio_path):
|
||||
audio_signal, sr = librosa.load(audio_path, sr=16000)
|
||||
return (audio_signal.astype(np.float32), sr)
|
||||
# Try base64 decode
|
||||
try:
|
||||
audio_data = base64.b64decode(audio_path)
|
||||
# Save to temp file and load
|
||||
timestamp = str(int(time.time() * 1000))
|
||||
p = f"/tmp/audio-{timestamp}.wav"
|
||||
with open(p, "wb") as f:
|
||||
f.write(audio_data)
|
||||
audio_signal, sr = librosa.load(p, sr=16000)
|
||||
os.remove(p)
|
||||
return (audio_signal.astype(np.float32), sr)
|
||||
except:
|
||||
return None
|
||||
|
||||
def Health(self, request, context):
|
||||
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
||||
|
||||
def LoadModel(self, request, context):
|
||||
try:
|
||||
print(f"Loading model {request.Model}...", file=sys.stderr)
|
||||
print(f"Request {request}", file=sys.stderr)
|
||||
|
||||
# Parse options from request.Options (key:value pairs)
|
||||
self.options = {}
|
||||
for opt in request.Options:
|
||||
if ":" not in opt:
|
||||
continue
|
||||
key, value = opt.split(":", 1)
|
||||
# Convert value to appropriate type
|
||||
if is_float(value):
|
||||
value = float(value)
|
||||
elif is_int(value):
|
||||
value = int(value)
|
||||
elif value.lower() in ["true", "false"]:
|
||||
value = value.lower() == "true"
|
||||
self.options[key] = value
|
||||
|
||||
print(f"Options: {self.options}", file=sys.stderr)
|
||||
|
||||
# Detect model type
|
||||
self.model_name = request.Model
|
||||
self.model_type = request.Type if request.Type else self._detect_model_type(request.Model)
|
||||
print(f"Detected model type: {self.model_type}", file=sys.stderr)
|
||||
|
||||
# Build DiffusionParallelConfig if diffusion model (image or video)
|
||||
parallel_config = None
|
||||
if self.model_type in ["image", "video"]:
|
||||
parallel_config = DiffusionParallelConfig(
|
||||
ulysses_degree=self.options.get("ulysses_degree", 1),
|
||||
ring_degree=self.options.get("ring_degree", 1),
|
||||
cfg_parallel_size=self.options.get("cfg_parallel_size", 1),
|
||||
tensor_parallel_size=self.options.get("tensor_parallel_size", 1),
|
||||
)
|
||||
|
||||
# Build cache_config dict if cache_backend specified
|
||||
cache_backend = self.options.get("cache_backend") # "cache_dit" or "tea_cache"
|
||||
cache_config = None
|
||||
if cache_backend == "cache_dit":
|
||||
cache_config = {
|
||||
"Fn_compute_blocks": self.options.get("cache_dit_fn_compute_blocks", 1),
|
||||
"Bn_compute_blocks": self.options.get("cache_dit_bn_compute_blocks", 0),
|
||||
"max_warmup_steps": self.options.get("cache_dit_max_warmup_steps", 4),
|
||||
"residual_diff_threshold": self.options.get("cache_dit_residual_diff_threshold", 0.24),
|
||||
"max_continuous_cached_steps": self.options.get("cache_dit_max_continuous_cached_steps", 3),
|
||||
"enable_taylorseer": self.options.get("cache_dit_enable_taylorseer", False),
|
||||
"taylorseer_order": self.options.get("cache_dit_taylorseer_order", 1),
|
||||
"scm_steps_mask_policy": self.options.get("cache_dit_scm_steps_mask_policy"),
|
||||
"scm_steps_policy": self.options.get("cache_dit_scm_steps_policy", "dynamic"),
|
||||
}
|
||||
elif cache_backend == "tea_cache":
|
||||
cache_config = {
|
||||
"rel_l1_thresh": self.options.get("tea_cache_rel_l1_thresh", 0.2),
|
||||
}
|
||||
|
||||
# Base Omni initialization parameters
|
||||
omni_kwargs = {
|
||||
"model": request.Model,
|
||||
}
|
||||
|
||||
# Add diffusion-specific parameters (image/video models)
|
||||
if self.model_type in ["image", "video"]:
|
||||
omni_kwargs.update({
|
||||
"vae_use_slicing": is_npu(),
|
||||
"vae_use_tiling": is_npu(),
|
||||
"cache_backend": cache_backend,
|
||||
"cache_config": cache_config,
|
||||
"parallel_config": parallel_config,
|
||||
"enforce_eager": self.options.get("enforce_eager", request.EnforceEager),
|
||||
"enable_cpu_offload": self.options.get("enable_cpu_offload", False),
|
||||
})
|
||||
# Video-specific parameters
|
||||
if self.model_type == "video":
|
||||
omni_kwargs.update({
|
||||
"boundary_ratio": self.options.get("boundary_ratio", 0.875),
|
||||
"flow_shift": self.options.get("flow_shift", 5.0),
|
||||
})
|
||||
|
||||
# Add LLM/TTS-specific parameters
|
||||
if self.model_type in ["llm", "tts"]:
|
||||
omni_kwargs.update({
|
||||
"stage_configs_path": self.options.get("stage_configs_path"),
|
||||
"log_stats": self.options.get("enable_stats", False),
|
||||
"stage_init_timeout": self.options.get("stage_init_timeout", 300),
|
||||
})
|
||||
# vllm engine options (passed through Omni for LLM/TTS)
|
||||
if request.GPUMemoryUtilization > 0:
|
||||
omni_kwargs["gpu_memory_utilization"] = request.GPUMemoryUtilization
|
||||
if request.TensorParallelSize > 0:
|
||||
omni_kwargs["tensor_parallel_size"] = request.TensorParallelSize
|
||||
if request.TrustRemoteCode:
|
||||
omni_kwargs["trust_remote_code"] = request.TrustRemoteCode
|
||||
if request.MaxModelLen > 0:
|
||||
omni_kwargs["max_model_len"] = request.MaxModelLen
|
||||
|
||||
self.omni = Omni(**omni_kwargs)
|
||||
print("Model loaded successfully", file=sys.stderr)
|
||||
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
||||
|
||||
except Exception as err:
|
||||
print(f"Unexpected {err=}, {type(err)=}", file=sys.stderr)
|
||||
traceback.print_exc()
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
|
||||
def GenerateImage(self, request, context):
|
||||
try:
|
||||
# Validate model is loaded and is image/diffusion type
|
||||
if not hasattr(self, 'omni'):
|
||||
return backend_pb2.Result(success=False, message="Model not loaded. Call LoadModel first.")
|
||||
if self.model_type not in ["image"]:
|
||||
return backend_pb2.Result(success=False, message=f"Model type {self.model_type} does not support image generation")
|
||||
|
||||
# Extract parameters
|
||||
prompt = request.positive_prompt
|
||||
negative_prompt = request.negative_prompt if request.negative_prompt else None
|
||||
width = request.width if request.width > 0 else 1024
|
||||
height = request.height if request.height > 0 else 1024
|
||||
seed = request.seed if request.seed > 0 else None
|
||||
num_inference_steps = request.step if request.step > 0 else 50
|
||||
cfg_scale = self.options.get("cfg_scale", 4.0)
|
||||
guidance_scale = self.options.get("guidance_scale", 1.0)
|
||||
|
||||
# Create generator if seed provided
|
||||
generator = None
|
||||
if seed:
|
||||
device = detect_device_type()
|
||||
generator = torch.Generator(device=device).manual_seed(seed)
|
||||
|
||||
# Handle image input for image editing
|
||||
pil_image = None
|
||||
if request.src or (request.ref_images and len(request.ref_images) > 0):
|
||||
image_path = request.ref_images[0] if request.ref_images else request.src
|
||||
pil_image = self._load_image(image_path)
|
||||
if pil_image is None:
|
||||
return backend_pb2.Result(success=False, message=f"Invalid image source: {image_path}")
|
||||
pil_image = pil_image.convert("RGB")
|
||||
|
||||
# Build generate kwargs
|
||||
generate_kwargs = {
|
||||
"prompt": prompt,
|
||||
"negative_prompt": negative_prompt,
|
||||
"height": height,
|
||||
"width": width,
|
||||
"generator": generator,
|
||||
"true_cfg_scale": cfg_scale,
|
||||
"guidance_scale": guidance_scale,
|
||||
"num_inference_steps": num_inference_steps,
|
||||
}
|
||||
if pil_image:
|
||||
generate_kwargs["pil_image"] = pil_image
|
||||
|
||||
# Call omni.generate()
|
||||
outputs = self.omni.generate(**generate_kwargs)
|
||||
|
||||
# Extract images (following example pattern)
|
||||
if not outputs or len(outputs) == 0:
|
||||
return backend_pb2.Result(success=False, message="No output generated")
|
||||
|
||||
first_output = outputs[0]
|
||||
if not hasattr(first_output, "request_output") or not first_output.request_output:
|
||||
return backend_pb2.Result(success=False, message="Invalid output structure")
|
||||
|
||||
req_out = first_output.request_output[0]
|
||||
if not isinstance(req_out, OmniRequestOutput) or not hasattr(req_out, "images"):
|
||||
return backend_pb2.Result(success=False, message="No images in output")
|
||||
|
||||
images = req_out.images
|
||||
if not images or len(images) == 0:
|
||||
return backend_pb2.Result(success=False, message="Empty images list")
|
||||
|
||||
# Save image
|
||||
output_image = images[0]
|
||||
output_image.save(request.dst)
|
||||
return backend_pb2.Result(message="Image generated successfully", success=True)
|
||||
|
||||
except Exception as err:
|
||||
print(f"Error generating image: {err}", file=sys.stderr)
|
||||
traceback.print_exc()
|
||||
return backend_pb2.Result(success=False, message=f"Error generating image: {err}")
|
||||
|
||||
def GenerateVideo(self, request, context):
|
||||
try:
|
||||
# Validate model is loaded and is video/diffusion type
|
||||
if not hasattr(self, 'omni'):
|
||||
return backend_pb2.Result(success=False, message="Model not loaded. Call LoadModel first.")
|
||||
if self.model_type not in ["video"]:
|
||||
return backend_pb2.Result(success=False, message=f"Model type {self.model_type} does not support video generation")
|
||||
|
||||
# Extract parameters
|
||||
prompt = request.prompt
|
||||
negative_prompt = request.negative_prompt if request.negative_prompt else ""
|
||||
width = request.width if request.width > 0 else 1280
|
||||
height = request.height if request.height > 0 else 720
|
||||
num_frames = request.num_frames if request.num_frames > 0 else 81
|
||||
fps = request.fps if request.fps > 0 else 24
|
||||
seed = request.seed if request.seed > 0 else None
|
||||
guidance_scale = request.cfg_scale if request.cfg_scale > 0 else 4.0
|
||||
guidance_scale_high = self.options.get("guidance_scale_high")
|
||||
num_inference_steps = request.step if request.step > 0 else 40
|
||||
|
||||
# Create generator
|
||||
generator = None
|
||||
if seed:
|
||||
device = detect_device_type()
|
||||
generator = torch.Generator(device=device).manual_seed(seed)
|
||||
|
||||
# Handle image input for image-to-video
|
||||
pil_image = None
|
||||
if request.start_image:
|
||||
pil_image = self._load_image(request.start_image)
|
||||
if pil_image is None:
|
||||
return backend_pb2.Result(success=False, message=f"Invalid start_image: {request.start_image}")
|
||||
pil_image = pil_image.convert("RGB")
|
||||
# Resize to target dimensions
|
||||
pil_image = pil_image.resize((width, height), Image.Resampling.LANCZOS)
|
||||
|
||||
# Build generate kwargs
|
||||
generate_kwargs = {
|
||||
"prompt": prompt,
|
||||
"negative_prompt": negative_prompt,
|
||||
"height": height,
|
||||
"width": width,
|
||||
"generator": generator,
|
||||
"guidance_scale": guidance_scale,
|
||||
"num_inference_steps": num_inference_steps,
|
||||
"num_frames": num_frames,
|
||||
}
|
||||
if pil_image:
|
||||
generate_kwargs["pil_image"] = pil_image
|
||||
if guidance_scale_high:
|
||||
generate_kwargs["guidance_scale_2"] = guidance_scale_high
|
||||
|
||||
# Call omni.generate()
|
||||
frames = self.omni.generate(**generate_kwargs)
|
||||
|
||||
# Extract video frames (following example pattern)
|
||||
if isinstance(frames, list) and len(frames) > 0:
|
||||
first_item = frames[0]
|
||||
|
||||
if hasattr(first_item, "final_output_type"):
|
||||
if first_item.final_output_type != "image":
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected output type: {first_item.final_output_type}")
|
||||
|
||||
# Pipeline mode: extract from nested request_output
|
||||
if hasattr(first_item, "is_pipeline_output") and first_item.is_pipeline_output:
|
||||
if isinstance(first_item.request_output, list) and len(first_item.request_output) > 0:
|
||||
inner_output = first_item.request_output[0]
|
||||
if isinstance(inner_output, OmniRequestOutput) and hasattr(inner_output, "images"):
|
||||
frames = inner_output.images[0] if inner_output.images else None
|
||||
# Diffusion mode: use direct images field
|
||||
elif hasattr(first_item, "images") and first_item.images:
|
||||
frames = first_item.images
|
||||
else:
|
||||
return backend_pb2.Result(success=False, message="No video frames found")
|
||||
|
||||
if frames is None:
|
||||
return backend_pb2.Result(success=False, message="No video frames found in output")
|
||||
|
||||
# Convert frames to numpy array (following example)
|
||||
if isinstance(frames, torch.Tensor):
|
||||
video_tensor = frames.detach().cpu()
|
||||
# Handle different tensor shapes [B, C, F, H, W] or [B, F, H, W, C]
|
||||
if video_tensor.dim() == 5:
|
||||
if video_tensor.shape[1] in (3, 4):
|
||||
video_tensor = video_tensor[0].permute(1, 2, 3, 0)
|
||||
else:
|
||||
video_tensor = video_tensor[0]
|
||||
elif video_tensor.dim() == 4 and video_tensor.shape[0] in (3, 4):
|
||||
video_tensor = video_tensor.permute(1, 2, 3, 0)
|
||||
# Normalize from [-1,1] to [0,1] if float
|
||||
if video_tensor.is_floating_point():
|
||||
video_tensor = video_tensor.clamp(-1, 1) * 0.5 + 0.5
|
||||
video_array = video_tensor.float().numpy()
|
||||
else:
|
||||
video_array = frames
|
||||
if hasattr(video_array, "shape") and video_array.ndim == 5:
|
||||
video_array = video_array[0]
|
||||
|
||||
# Convert 4D array (frames, H, W, C) to list of frames
|
||||
if isinstance(video_array, np.ndarray) and video_array.ndim == 4:
|
||||
video_array = list(video_array)
|
||||
|
||||
# Save video
|
||||
export_to_video(video_array, request.dst, fps=fps)
|
||||
return backend_pb2.Result(message="Video generated successfully", success=True)
|
||||
|
||||
except Exception as err:
|
||||
print(f"Error generating video: {err}", file=sys.stderr)
|
||||
traceback.print_exc()
|
||||
return backend_pb2.Result(success=False, message=f"Error generating video: {err}")
|
||||
|
||||
def Predict(self, request, context):
|
||||
"""Non-streaming text generation with multimodal inputs."""
|
||||
gen = self._predict(request, context, streaming=False)
|
||||
try:
|
||||
res = next(gen)
|
||||
return res
|
||||
except StopIteration:
|
||||
return backend_pb2.Reply(message=bytes("", 'utf-8'))
|
||||
|
||||
def PredictStream(self, request, context):
|
||||
"""Streaming text generation with multimodal inputs."""
|
||||
return self._predict(request, context, streaming=True)
|
||||
|
||||
def _predict(self, request, context, streaming=False):
|
||||
"""Internal method for text generation (streaming and non-streaming)."""
|
||||
try:
|
||||
# Validate model is loaded and is LLM type
|
||||
if not hasattr(self, 'omni'):
|
||||
yield backend_pb2.Reply(message=bytes("Model not loaded. Call LoadModel first.", 'utf-8'))
|
||||
return
|
||||
if self.model_type not in ["llm"]:
|
||||
yield backend_pb2.Reply(message=bytes(f"Model type {self.model_type} does not support text generation", 'utf-8'))
|
||||
return
|
||||
|
||||
# Extract prompt
|
||||
if request.Prompt:
|
||||
prompt = request.Prompt
|
||||
elif request.Messages and request.UseTokenizerTemplate:
|
||||
# Build prompt from messages (simplified - would need tokenizer for full template)
|
||||
prompt = ""
|
||||
for msg in request.Messages:
|
||||
role = msg.role
|
||||
content = msg.content
|
||||
prompt += f"<|im_start|>{role}\n{content}<|im_end|>\n"
|
||||
prompt += "<|im_start|>assistant\n"
|
||||
else:
|
||||
yield backend_pb2.Reply(message=bytes("", 'utf-8'))
|
||||
return
|
||||
|
||||
# Build multi_modal_data dict
|
||||
multi_modal_data = {}
|
||||
|
||||
# Process images
|
||||
if request.Images:
|
||||
image_data = []
|
||||
for img_path in request.Images:
|
||||
img = self._load_image(img_path)
|
||||
if img:
|
||||
# Convert to format expected by vllm
|
||||
from vllm.multimodal.image import convert_image_mode
|
||||
img_data = convert_image_mode(img, "RGB")
|
||||
image_data.append(img_data)
|
||||
if image_data:
|
||||
multi_modal_data["image"] = image_data
|
||||
|
||||
# Process videos
|
||||
if request.Videos:
|
||||
video_data = []
|
||||
for video_path in request.Videos:
|
||||
video = self._load_video(video_path)
|
||||
if video is not None:
|
||||
video_data.append(video)
|
||||
if video_data:
|
||||
multi_modal_data["video"] = video_data
|
||||
|
||||
# Process audio
|
||||
if request.Audios:
|
||||
audio_data = []
|
||||
for audio_path in request.Audios:
|
||||
audio = self._load_audio(audio_path)
|
||||
if audio is not None:
|
||||
audio_data.append(audio)
|
||||
if audio_data:
|
||||
multi_modal_data["audio"] = audio_data
|
||||
|
||||
# Build inputs dict
|
||||
inputs = {
|
||||
"prompt": prompt,
|
||||
"multi_modal_data": multi_modal_data if multi_modal_data else None,
|
||||
}
|
||||
|
||||
# Build sampling params
|
||||
sampling_params = SamplingParams(
|
||||
temperature=request.Temperature if request.Temperature > 0 else 0.7,
|
||||
top_p=request.TopP if request.TopP > 0 else 0.9,
|
||||
top_k=request.TopK if request.TopK > 0 else -1,
|
||||
max_tokens=request.Tokens if request.Tokens > 0 else 200,
|
||||
presence_penalty=request.PresencePenalty if request.PresencePenalty != 0 else 0.0,
|
||||
frequency_penalty=request.FrequencyPenalty if request.FrequencyPenalty != 0 else 0.0,
|
||||
repetition_penalty=request.RepetitionPenalty if request.RepetitionPenalty != 0 else 1.0,
|
||||
seed=request.Seed if request.Seed > 0 else None,
|
||||
stop=request.StopPrompts if request.StopPrompts else None,
|
||||
stop_token_ids=request.StopTokenIds if request.StopTokenIds else None,
|
||||
ignore_eos=request.IgnoreEOS,
|
||||
)
|
||||
sampling_params_list = [sampling_params]
|
||||
|
||||
# Call omni.generate() (returns generator for LLM mode)
|
||||
omni_generator = self.omni.generate([inputs], sampling_params_list)
|
||||
|
||||
# Extract text from outputs
|
||||
generated_text = ""
|
||||
for stage_outputs in omni_generator:
|
||||
if stage_outputs.final_output_type == "text":
|
||||
for output in stage_outputs.request_output:
|
||||
text_output = output.outputs[0].text
|
||||
if streaming:
|
||||
# Remove already sent text (vllm concatenates)
|
||||
delta_text = text_output.removeprefix(generated_text)
|
||||
yield backend_pb2.Reply(message=bytes(delta_text, encoding='utf-8'))
|
||||
generated_text = text_output
|
||||
|
||||
if not streaming:
|
||||
yield backend_pb2.Reply(message=bytes(generated_text, encoding='utf-8'))
|
||||
|
||||
except Exception as err:
|
||||
print(f"Error in Predict: {err}", file=sys.stderr)
|
||||
traceback.print_exc()
|
||||
yield backend_pb2.Reply(message=bytes(f"Error: {err}", encoding='utf-8'))
|
||||
|
||||
def TTS(self, request, context):
|
||||
try:
|
||||
# Validate model is loaded and is TTS type
|
||||
if not hasattr(self, 'omni'):
|
||||
return backend_pb2.Result(success=False, message="Model not loaded. Call LoadModel first.")
|
||||
if self.model_type not in ["tts"]:
|
||||
return backend_pb2.Result(success=False, message=f"Model type {self.model_type} does not support TTS")
|
||||
|
||||
# Extract parameters
|
||||
text = request.text
|
||||
language = request.language if request.language else "Auto"
|
||||
voice = request.voice if request.voice else None
|
||||
task_type = self._detect_tts_task_type()
|
||||
|
||||
# Build prompt with chat template
|
||||
# TODO: for now vllm-omni supports only qwen3-tts, so we hardcode it, however, we want to support other models in the future.
|
||||
# and we might need to use the chat template here
|
||||
prompt = f"<|im_start|>assistant\n{text}<|im_end|>\n<|im_start|>assistant\n"
|
||||
|
||||
# Build inputs dict
|
||||
inputs = {
|
||||
"prompt": prompt,
|
||||
"additional_information": {
|
||||
"task_type": [task_type],
|
||||
"text": [text],
|
||||
"language": [language],
|
||||
"max_new_tokens": [2048],
|
||||
}
|
||||
}
|
||||
|
||||
# Add task-specific fields
|
||||
if task_type == "CustomVoice":
|
||||
if voice:
|
||||
inputs["additional_information"]["speaker"] = [voice]
|
||||
# Add instruct if provided in options
|
||||
if "instruct" in self.options:
|
||||
inputs["additional_information"]["instruct"] = [self.options["instruct"]]
|
||||
elif task_type == "VoiceDesign":
|
||||
if "instruct" in self.options:
|
||||
inputs["additional_information"]["instruct"] = [self.options["instruct"]]
|
||||
inputs["additional_information"]["non_streaming_mode"] = [True]
|
||||
elif task_type == "Base":
|
||||
# Voice cloning requires ref_audio and ref_text
|
||||
if "ref_audio" in self.options:
|
||||
inputs["additional_information"]["ref_audio"] = [self.options["ref_audio"]]
|
||||
if "ref_text" in self.options:
|
||||
inputs["additional_information"]["ref_text"] = [self.options["ref_text"]]
|
||||
if "x_vector_only_mode" in self.options:
|
||||
inputs["additional_information"]["x_vector_only_mode"] = [self.options["x_vector_only_mode"]]
|
||||
|
||||
# Build sampling params
|
||||
sampling_params = SamplingParams(
|
||||
temperature=0.9,
|
||||
top_p=1.0,
|
||||
top_k=50,
|
||||
max_tokens=2048,
|
||||
seed=42,
|
||||
detokenize=False,
|
||||
repetition_penalty=1.05,
|
||||
)
|
||||
sampling_params_list = [sampling_params]
|
||||
|
||||
# Call omni.generate()
|
||||
omni_generator = self.omni.generate(inputs, sampling_params_list)
|
||||
|
||||
# Extract audio (following TTS example)
|
||||
for stage_outputs in omni_generator:
|
||||
for output in stage_outputs.request_output:
|
||||
if "audio" in output.multimodal_output:
|
||||
audio_tensor = output.multimodal_output["audio"]
|
||||
audio_samplerate = output.multimodal_output["sr"].item()
|
||||
|
||||
# Convert to numpy
|
||||
audio_numpy = audio_tensor.float().detach().cpu().numpy()
|
||||
if audio_numpy.ndim > 1:
|
||||
audio_numpy = audio_numpy.flatten()
|
||||
|
||||
# Save audio file
|
||||
sf.write(request.dst, audio_numpy, samplerate=audio_samplerate, format="WAV")
|
||||
return backend_pb2.Result(message="TTS audio generated successfully", success=True)
|
||||
|
||||
return backend_pb2.Result(success=False, message="No audio output generated")
|
||||
|
||||
except Exception as err:
|
||||
print(f"Error generating TTS: {err}", file=sys.stderr)
|
||||
traceback.print_exc()
|
||||
return backend_pb2.Result(success=False, message=f"Error generating TTS: {err}")
|
||||
|
||||
|
||||
def serve(address):
|
||||
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
||||
options=[
|
||||
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_send_message_length', 50 * 1024 * 1024),
|
||||
('grpc.max_receive_message_length', 50 * 1024 * 1024),
|
||||
])
|
||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||
server.add_insecure_port(address)
|
||||
server.start()
|
||||
print("Server started. Listening on: " + address, file=sys.stderr)
|
||||
|
||||
# Signal handlers for graceful shutdown
|
||||
def signal_handler(sig, frame):
|
||||
print("Received termination signal. Shutting down...")
|
||||
server.stop(0)
|
||||
sys.exit(0)
|
||||
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
try:
|
||||
while True:
|
||||
time.sleep(_ONE_DAY_IN_SECONDS)
|
||||
except KeyboardInterrupt:
|
||||
server.stop(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Run the gRPC server.")
|
||||
parser.add_argument(
|
||||
"--addr", default="localhost:50051", help="The address to bind the server to."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
serve(args.addr)
|
||||
62
backend/python/vllm-omni/install.sh
Executable file
62
backend/python/vllm-omni/install.sh
Executable file
@@ -0,0 +1,62 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
PYTHON_VERSION="3.12"
|
||||
PYTHON_PATCH="12"
|
||||
PY_STANDALONE_TAG="20251120"
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
# Handle l4t build profiles (Python 3.12, pip fallback) if needed
|
||||
if [ "x${BUILD_PROFILE}" == "xl4t13" ]; then
|
||||
PYTHON_VERSION="3.12"
|
||||
PYTHON_PATCH="12"
|
||||
PY_STANDALONE_TAG="20251120"
|
||||
fi
|
||||
|
||||
if [ "x${BUILD_PROFILE}" == "xl4t12" ]; then
|
||||
USE_PIP=true
|
||||
fi
|
||||
|
||||
# Install base requirements first
|
||||
installRequirements
|
||||
|
||||
# Install vllm based on build type
|
||||
if [ "x${BUILD_TYPE}" == "xhipblas" ]; then
|
||||
# ROCm
|
||||
if [ "x${USE_PIP}" == "xtrue" ]; then
|
||||
pip install vllm==0.14.0 --extra-index-url https://wheels.vllm.ai/rocm/0.14.0/rocm700
|
||||
else
|
||||
uv pip install vllm==0.14.0 --extra-index-url https://wheels.vllm.ai/rocm/0.14.0/rocm700
|
||||
fi
|
||||
elif [ "x${BUILD_TYPE}" == "xcublas" ] || [ "x${BUILD_TYPE}" == "x" ]; then
|
||||
# CUDA (default) or CPU
|
||||
if [ "x${USE_PIP}" == "xtrue" ]; then
|
||||
pip install vllm==0.14.0 --torch-backend=auto
|
||||
else
|
||||
uv pip install vllm==0.14.0 --torch-backend=auto
|
||||
fi
|
||||
else
|
||||
echo "Unsupported build type: ${BUILD_TYPE}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Clone and install vllm-omni from source
|
||||
if [ ! -d vllm-omni ]; then
|
||||
git clone https://github.com/vllm-project/vllm-omni.git
|
||||
fi
|
||||
|
||||
cd vllm-omni/
|
||||
|
||||
if [ "x${USE_PIP}" == "xtrue" ]; then
|
||||
pip install ${EXTRA_PIP_INSTALL_FLAGS:-} -e .
|
||||
else
|
||||
uv pip install ${EXTRA_PIP_INSTALL_FLAGS:-} -e .
|
||||
fi
|
||||
|
||||
cd ..
|
||||
2
backend/python/vllm-omni/requirements-after.txt
Normal file
2
backend/python/vllm-omni/requirements-after.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
diffusers
|
||||
librosa
|
||||
1
backend/python/vllm-omni/requirements-cublas12-after.txt
Normal file
1
backend/python/vllm-omni/requirements-cublas12-after.txt
Normal file
@@ -0,0 +1 @@
|
||||
https://github.com/Dao-AILab/flash-attention/releases/download/v2.8.3/flash_attn-2.8.3+cu12torch2.7cxx11abiTRUE-cp310-cp310-linux_x86_64.whl
|
||||
4
backend/python/vllm-omni/requirements-cublas12.txt
Normal file
4
backend/python/vllm-omni/requirements-cublas12.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
accelerate
|
||||
torch==2.7.0
|
||||
transformers
|
||||
bitsandbytes
|
||||
5
backend/python/vllm-omni/requirements-hipblas.txt
Normal file
5
backend/python/vllm-omni/requirements-hipblas.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/nightly/rocm6.4
|
||||
accelerate
|
||||
torch
|
||||
transformers
|
||||
bitsandbytes
|
||||
7
backend/python/vllm-omni/requirements.txt
Normal file
7
backend/python/vllm-omni/requirements.txt
Normal file
@@ -0,0 +1,7 @@
|
||||
grpcio==1.76.0
|
||||
protobuf
|
||||
certifi
|
||||
setuptools
|
||||
pillow
|
||||
numpy
|
||||
soundfile
|
||||
@@ -1,11 +1,11 @@
|
||||
#!/bin/bash
|
||||
LIMIT_TARGETS="cublas"
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
startBackend $@
|
||||
startBackend $@
|
||||
@@ -1,6 +1,3 @@
|
||||
"""
|
||||
A test script to test the gRPC service
|
||||
"""
|
||||
import unittest
|
||||
import subprocess
|
||||
import time
|
||||
@@ -9,29 +6,21 @@ import backend_pb2_grpc
|
||||
|
||||
import grpc
|
||||
|
||||
|
||||
class TestBackendServicer(unittest.TestCase):
|
||||
"""
|
||||
TestBackendServicer is the class that tests the gRPC service
|
||||
TestBackendServicer is the class that tests the gRPC service.
|
||||
|
||||
This class contains methods to test the startup and shutdown of the gRPC service.
|
||||
"""
|
||||
def setUp(self):
|
||||
"""
|
||||
This method sets up the gRPC service by starting the server
|
||||
"""
|
||||
self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"])
|
||||
self.service = subprocess.Popen(["python", "backend.py", "--addr", "localhost:50051"])
|
||||
time.sleep(10)
|
||||
|
||||
def tearDown(self) -> None:
|
||||
"""
|
||||
This method tears down the gRPC service by terminating the server
|
||||
"""
|
||||
self.service.terminate()
|
||||
self.service.wait()
|
||||
|
||||
def test_server_startup(self):
|
||||
"""
|
||||
This method tests if the server starts up successfully
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
@@ -52,7 +41,8 @@ class TestBackendServicer(unittest.TestCase):
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="v2/en_speaker_4"))
|
||||
# Use a small image generation model for testing
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="Tongyi-MAI/Z-Image-Turbo"))
|
||||
self.assertTrue(response.success)
|
||||
self.assertEqual(response.message, "Model loaded successfully")
|
||||
except Exception as err:
|
||||
@@ -61,21 +51,32 @@ class TestBackendServicer(unittest.TestCase):
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
def test_tts(self):
|
||||
def test_generate_image(self):
|
||||
"""
|
||||
This method tests if the embeddings are generated successfully
|
||||
This method tests if image generation works
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="v2/en_speaker_4"))
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="Tongyi-MAI/Z-Image-Turbo"))
|
||||
self.assertTrue(response.success)
|
||||
tts_request = backend_pb2.TTSRequest(text="80s TV news production music hit for tonight's biggest story")
|
||||
tts_response = stub.TTS(tts_request)
|
||||
self.assertIsNotNone(tts_response)
|
||||
|
||||
req = backend_pb2.GenerateImageRequest(
|
||||
positive_prompt="a cup of coffee on the table",
|
||||
dst="/tmp/test_output.png",
|
||||
width=512,
|
||||
height=512,
|
||||
step=20,
|
||||
seed=42additional_information
|
||||
)
|
||||
resp = stub.GenerateImage(req)
|
||||
self.assertTrue(resp.success)
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("TTS service failed")
|
||||
self.fail("GenerateImage service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
self.tearDown()
|
||||
additional_information
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
12
backend/python/vllm-omni/test.sh
Executable file
12
backend/python/vllm-omni/test.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
runUnittests
|
||||
@@ -1,18 +1,18 @@
|
||||
.PHONY: ttsbark
|
||||
ttsbark:
|
||||
.PHONY: voxcpm
|
||||
voxcpm:
|
||||
bash install.sh
|
||||
|
||||
.PHONY: run
|
||||
run: ttsbark
|
||||
@echo "Running bark..."
|
||||
run: voxcpm
|
||||
@echo "Running voxcpm..."
|
||||
bash run.sh
|
||||
@echo "bark run."
|
||||
@echo "voxcpm run."
|
||||
|
||||
.PHONY: test
|
||||
test: ttsbark
|
||||
@echo "Testing bark..."
|
||||
test: voxcpm
|
||||
@echo "Testing voxcpm..."
|
||||
bash test.sh
|
||||
@echo "bark tested."
|
||||
@echo "voxcpm tested."
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
@@ -20,4 +20,4 @@ protogen-clean:
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
rm -rf venv __pycache__
|
||||
rm -rf venv __pycache__
|
||||
329
backend/python/voxcpm/backend.py
Normal file
329
backend/python/voxcpm/backend.py
Normal file
@@ -0,0 +1,329 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
This is an extra gRPC server of LocalAI for VoxCPM
|
||||
"""
|
||||
from concurrent import futures
|
||||
import time
|
||||
import argparse
|
||||
import signal
|
||||
import sys
|
||||
import os
|
||||
import traceback
|
||||
import numpy as np
|
||||
import soundfile as sf
|
||||
from voxcpm import VoxCPM
|
||||
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
import torch
|
||||
|
||||
import grpc
|
||||
|
||||
def is_float(s):
|
||||
"""Check if a string can be converted to float."""
|
||||
try:
|
||||
float(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
def is_int(s):
|
||||
"""Check if a string can be converted to int."""
|
||||
try:
|
||||
int(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
|
||||
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
|
||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
||||
|
||||
# Implement the BackendServicer class with the service methods
|
||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
"""
|
||||
BackendServicer is the class that implements the gRPC service
|
||||
"""
|
||||
def Health(self, request, context):
|
||||
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
||||
|
||||
def LoadModel(self, request, context):
|
||||
# Get device
|
||||
if torch.cuda.is_available():
|
||||
print("CUDA is available", file=sys.stderr)
|
||||
device = "cuda"
|
||||
else:
|
||||
print("CUDA is not available", file=sys.stderr)
|
||||
device = "cpu"
|
||||
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
||||
if mps_available:
|
||||
device = "mps"
|
||||
if not torch.cuda.is_available() and request.CUDA:
|
||||
return backend_pb2.Result(success=False, message="CUDA is not available")
|
||||
|
||||
# Normalize potential 'mpx' typo to 'mps'
|
||||
if device == "mpx":
|
||||
print("Note: device 'mpx' detected, treating it as 'mps'.", file=sys.stderr)
|
||||
device = "mps"
|
||||
|
||||
# Validate mps availability if requested
|
||||
if device == "mps" and not torch.backends.mps.is_available():
|
||||
print("Warning: MPS not available. Falling back to CPU.", file=sys.stderr)
|
||||
device = "cpu"
|
||||
|
||||
self.device = device
|
||||
|
||||
options = request.Options
|
||||
|
||||
# empty dict
|
||||
self.options = {}
|
||||
|
||||
# The options are a list of strings in this form optname:optvalue
|
||||
# We are storing all the options in a dict so we can use it later when
|
||||
# generating the audio
|
||||
for opt in options:
|
||||
if ":" not in opt:
|
||||
continue
|
||||
key, value = opt.split(":", 1) # Split only on first colon
|
||||
# if value is a number, convert it to the appropriate type
|
||||
if is_float(value):
|
||||
value = float(value)
|
||||
elif is_int(value):
|
||||
value = int(value)
|
||||
elif value.lower() in ["true", "false"]:
|
||||
value = value.lower() == "true"
|
||||
self.options[key] = value
|
||||
|
||||
# Get model path from request
|
||||
model_path = request.Model
|
||||
if not model_path:
|
||||
model_path = "openbmb/VoxCPM1.5"
|
||||
|
||||
try:
|
||||
print(f"Loading model from {model_path}", file=sys.stderr)
|
||||
self.model = VoxCPM.from_pretrained(model_path)
|
||||
print(f"Model loaded successfully on device: {self.device}", file=sys.stderr)
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
|
||||
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
||||
|
||||
def TTS(self, request, context):
|
||||
try:
|
||||
# Get generation parameters from options with defaults
|
||||
cfg_value = self.options.get("cfg_value", 2.0)
|
||||
inference_timesteps = self.options.get("inference_timesteps", 10)
|
||||
normalize = self.options.get("normalize", False)
|
||||
denoise = self.options.get("denoise", False)
|
||||
retry_badcase = self.options.get("retry_badcase", True)
|
||||
retry_badcase_max_times = self.options.get("retry_badcase_max_times", 3)
|
||||
retry_badcase_ratio_threshold = self.options.get("retry_badcase_ratio_threshold", 6.0)
|
||||
use_streaming = self.options.get("streaming", False)
|
||||
|
||||
# Handle voice cloning via prompt_wav_path and prompt_text
|
||||
prompt_wav_path = None
|
||||
prompt_text = None
|
||||
|
||||
# Priority: request.voice > AudioPath > options
|
||||
if hasattr(request, 'voice') and request.voice:
|
||||
# If voice is provided, try to use it as a path
|
||||
if os.path.exists(request.voice):
|
||||
prompt_wav_path = request.voice
|
||||
elif hasattr(request, 'ModelFile') and request.ModelFile:
|
||||
model_file_base = os.path.dirname(request.ModelFile)
|
||||
potential_path = os.path.join(model_file_base, request.voice)
|
||||
if os.path.exists(potential_path):
|
||||
prompt_wav_path = potential_path
|
||||
elif hasattr(request, 'ModelPath') and request.ModelPath:
|
||||
potential_path = os.path.join(request.ModelPath, request.voice)
|
||||
if os.path.exists(potential_path):
|
||||
prompt_wav_path = potential_path
|
||||
|
||||
if hasattr(request, 'AudioPath') and request.AudioPath:
|
||||
if os.path.isabs(request.AudioPath):
|
||||
prompt_wav_path = request.AudioPath
|
||||
elif hasattr(request, 'ModelFile') and request.ModelFile:
|
||||
model_file_base = os.path.dirname(request.ModelFile)
|
||||
prompt_wav_path = os.path.join(model_file_base, request.AudioPath)
|
||||
elif hasattr(request, 'ModelPath') and request.ModelPath:
|
||||
prompt_wav_path = os.path.join(request.ModelPath, request.AudioPath)
|
||||
else:
|
||||
prompt_wav_path = request.AudioPath
|
||||
|
||||
# Get prompt_text from options if available
|
||||
if "prompt_text" in self.options:
|
||||
prompt_text = self.options["prompt_text"]
|
||||
|
||||
# Prepare text
|
||||
text = request.text.strip()
|
||||
|
||||
print(f"Generating audio with cfg_value: {cfg_value}, inference_timesteps: {inference_timesteps}, streaming: {use_streaming}", file=sys.stderr)
|
||||
|
||||
# Generate audio
|
||||
if use_streaming:
|
||||
# Streaming generation
|
||||
chunks = []
|
||||
for chunk in self.model.generate_streaming(
|
||||
text=text,
|
||||
prompt_wav_path=prompt_wav_path,
|
||||
prompt_text=prompt_text,
|
||||
cfg_value=cfg_value,
|
||||
inference_timesteps=inference_timesteps,
|
||||
normalize=normalize,
|
||||
denoise=denoise,
|
||||
retry_badcase=retry_badcase,
|
||||
retry_badcase_max_times=retry_badcase_max_times,
|
||||
retry_badcase_ratio_threshold=retry_badcase_ratio_threshold,
|
||||
):
|
||||
chunks.append(chunk)
|
||||
wav = np.concatenate(chunks)
|
||||
else:
|
||||
# Non-streaming generation
|
||||
wav = self.model.generate(
|
||||
text=text,
|
||||
prompt_wav_path=prompt_wav_path,
|
||||
prompt_text=prompt_text,
|
||||
cfg_value=cfg_value,
|
||||
inference_timesteps=inference_timesteps,
|
||||
normalize=normalize,
|
||||
denoise=denoise,
|
||||
retry_badcase=retry_badcase,
|
||||
retry_badcase_max_times=retry_badcase_max_times,
|
||||
retry_badcase_ratio_threshold=retry_badcase_ratio_threshold,
|
||||
)
|
||||
|
||||
# Get sample rate from model
|
||||
sample_rate = self.model.tts_model.sample_rate
|
||||
|
||||
# Save output
|
||||
sf.write(request.dst, wav, sample_rate)
|
||||
print(f"Saved output to {request.dst}", file=sys.stderr)
|
||||
|
||||
except Exception as err:
|
||||
print(f"Error in TTS: {err}", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
|
||||
return backend_pb2.Result(success=True)
|
||||
|
||||
def TTSStream(self, request, context):
|
||||
try:
|
||||
# Get generation parameters from options with defaults
|
||||
cfg_value = self.options.get("cfg_value", 2.0)
|
||||
inference_timesteps = self.options.get("inference_timesteps", 10)
|
||||
normalize = self.options.get("normalize", False)
|
||||
denoise = self.options.get("denoise", False)
|
||||
retry_badcase = self.options.get("retry_badcase", True)
|
||||
retry_badcase_max_times = self.options.get("retry_badcase_max_times", 3)
|
||||
retry_badcase_ratio_threshold = self.options.get("retry_badcase_ratio_threshold", 6.0)
|
||||
|
||||
# Handle voice cloning via prompt_wav_path and prompt_text
|
||||
prompt_wav_path = None
|
||||
prompt_text = None
|
||||
|
||||
# Priority: request.voice > AudioPath > options
|
||||
if hasattr(request, 'voice') and request.voice:
|
||||
# If voice is provided, try to use it as a path
|
||||
if os.path.exists(request.voice):
|
||||
prompt_wav_path = request.voice
|
||||
elif hasattr(request, 'ModelFile') and request.ModelFile:
|
||||
model_file_base = os.path.dirname(request.ModelFile)
|
||||
potential_path = os.path.join(model_file_base, request.voice)
|
||||
if os.path.exists(potential_path):
|
||||
prompt_wav_path = potential_path
|
||||
elif hasattr(request, 'ModelPath') and request.ModelPath:
|
||||
potential_path = os.path.join(request.ModelPath, request.voice)
|
||||
if os.path.exists(potential_path):
|
||||
prompt_wav_path = potential_path
|
||||
|
||||
if hasattr(request, 'AudioPath') and request.AudioPath:
|
||||
if os.path.isabs(request.AudioPath):
|
||||
prompt_wav_path = request.AudioPath
|
||||
elif hasattr(request, 'ModelFile') and request.ModelFile:
|
||||
model_file_base = os.path.dirname(request.ModelFile)
|
||||
prompt_wav_path = os.path.join(model_file_base, request.AudioPath)
|
||||
elif hasattr(request, 'ModelPath') and request.ModelPath:
|
||||
prompt_wav_path = os.path.join(request.ModelPath, request.AudioPath)
|
||||
else:
|
||||
prompt_wav_path = request.AudioPath
|
||||
|
||||
# Get prompt_text from options if available
|
||||
if "prompt_text" in self.options:
|
||||
prompt_text = self.options["prompt_text"]
|
||||
|
||||
# Prepare text
|
||||
text = request.text.strip()
|
||||
|
||||
# Get sample rate from model (needed for WAV header)
|
||||
sample_rate = self.model.tts_model.sample_rate
|
||||
|
||||
print(f"Streaming audio with cfg_value: {cfg_value}, inference_timesteps: {inference_timesteps}, sample_rate: {sample_rate}", file=sys.stderr)
|
||||
|
||||
# Send sample rate as first message (in message field as JSON or string)
|
||||
# Format: "sample_rate:16000" so we can parse it
|
||||
import json
|
||||
sample_rate_info = json.dumps({"sample_rate": int(sample_rate)})
|
||||
yield backend_pb2.Reply(message=bytes(sample_rate_info, 'utf-8'))
|
||||
|
||||
# Stream audio chunks
|
||||
for chunk in self.model.generate_streaming(
|
||||
text=text,
|
||||
prompt_wav_path=prompt_wav_path,
|
||||
prompt_text=prompt_text,
|
||||
cfg_value=cfg_value,
|
||||
inference_timesteps=inference_timesteps,
|
||||
normalize=normalize,
|
||||
denoise=denoise,
|
||||
retry_badcase=retry_badcase,
|
||||
retry_badcase_max_times=retry_badcase_max_times,
|
||||
retry_badcase_ratio_threshold=retry_badcase_ratio_threshold,
|
||||
):
|
||||
# Convert numpy array to int16 PCM and then to bytes
|
||||
# Ensure values are in int16 range
|
||||
chunk_int16 = np.clip(chunk * 32767, -32768, 32767).astype(np.int16)
|
||||
chunk_bytes = chunk_int16.tobytes()
|
||||
yield backend_pb2.Reply(audio=chunk_bytes)
|
||||
|
||||
except Exception as err:
|
||||
print(f"Error in TTSStream: {err}", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
# Yield an error reply
|
||||
yield backend_pb2.Reply(message=bytes(f"Error: {err}", 'utf-8'))
|
||||
|
||||
def serve(address):
|
||||
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
||||
options=[
|
||||
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
|
||||
])
|
||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||
server.add_insecure_port(address)
|
||||
server.start()
|
||||
print("Server started. Listening on: " + address, file=sys.stderr)
|
||||
|
||||
# Define the signal handler function
|
||||
def signal_handler(sig, frame):
|
||||
print("Received termination signal. Shutting down...")
|
||||
server.stop(0)
|
||||
sys.exit(0)
|
||||
|
||||
# Set the signal handlers for SIGINT and SIGTERM
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
try:
|
||||
while True:
|
||||
time.sleep(_ONE_DAY_IN_SECONDS)
|
||||
except KeyboardInterrupt:
|
||||
server.stop(0)
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Run the gRPC server.")
|
||||
parser.add_argument(
|
||||
"--addr", default="localhost:50051", help="The address to bind the server to."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
serve(args.addr)
|
||||
30
backend/python/voxcpm/install.sh
Executable file
30
backend/python/voxcpm/install.sh
Executable file
@@ -0,0 +1,30 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
installRequirements
|
||||
|
||||
# Apply patch to fix PyTorch compatibility issue in voxcpm
|
||||
# This fixes the "Dimension out of range" error in scaled_dot_product_attention
|
||||
# by changing .contiguous() to .unsqueeze(0) in the attention module
|
||||
# The patch is needed because voxcpm's initialization test generation fails with
|
||||
# certain PyTorch versions due to a bug in scaled_dot_product_attention
|
||||
# https://github.com/OpenBMB/VoxCPM/issues/71#issuecomment-3441789452
|
||||
VOXCPM_PATH=$(python -c "import voxcpm; import os; print(os.path.dirname(voxcpm.__file__))" 2>/dev/null || echo "")
|
||||
if [ -n "$VOXCPM_PATH" ] && [ -f "$VOXCPM_PATH/modules/minicpm4/model.py" ]; then
|
||||
echo "Applying patch to voxcpm at $VOXCPM_PATH/modules/minicpm4/model.py"
|
||||
# Replace .contiguous() with .unsqueeze(0) for the three lines in the attention forward_step method
|
||||
# This fixes the dimension error in scaled_dot_product_attention
|
||||
sed -i 's/query_states = query_states\.contiguous()/query_states = query_states.unsqueeze(0)/g' "$VOXCPM_PATH/modules/minicpm4/model.py"
|
||||
sed -i 's/key_cache = key_cache\.contiguous()/key_cache = key_cache.unsqueeze(0)/g' "$VOXCPM_PATH/modules/minicpm4/model.py"
|
||||
sed -i 's/value_cache = value_cache\.contiguous()/value_cache = value_cache.unsqueeze(0)/g' "$VOXCPM_PATH/modules/minicpm4/model.py"
|
||||
echo "Patch applied successfully"
|
||||
else
|
||||
echo "Warning: Could not find voxcpm installation to apply patch (path: ${VOXCPM_PATH:-not found})"
|
||||
fi
|
||||
11
backend/python/voxcpm/protogen.sh
Executable file
11
backend/python/voxcpm/protogen.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
runProtogen
|
||||
6
backend/python/voxcpm/requirements-cpu.txt
Normal file
6
backend/python/voxcpm/requirements-cpu.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
torch
|
||||
soundfile
|
||||
numpy
|
||||
voxcpm
|
||||
torchcodec
|
||||
5
backend/python/voxcpm/requirements-cublas12.txt
Normal file
5
backend/python/voxcpm/requirements-cublas12.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu121
|
||||
torch
|
||||
soundfile
|
||||
numpy
|
||||
voxcpm
|
||||
5
backend/python/voxcpm/requirements-cublas13.txt
Normal file
5
backend/python/voxcpm/requirements-cublas13.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu130
|
||||
torch
|
||||
soundfile
|
||||
numpy
|
||||
voxcpm
|
||||
5
backend/python/voxcpm/requirements-hipblas.txt
Normal file
5
backend/python/voxcpm/requirements-hipblas.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.3
|
||||
torch==2.7.1+rocm6.3
|
||||
soundfile
|
||||
numpy
|
||||
voxcpm
|
||||
@@ -1,7 +1,6 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
torch
|
||||
torchaudio
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
transformers
|
||||
accelerate
|
||||
soundfile
|
||||
numpy
|
||||
voxcpm
|
||||
5
backend/python/voxcpm/requirements-l4t12.txt
Normal file
5
backend/python/voxcpm/requirements-l4t12.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
--extra-index-url https://pypi.jetson-ai-lab.io/jp6/cu129/
|
||||
torch
|
||||
soundfile
|
||||
numpy
|
||||
voxcpm
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user