mirror of
https://github.com/mudler/LocalAI.git
synced 2026-02-03 11:13:31 -05:00
Compare commits
88 Commits
fix/reason
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5195062e12 | ||
|
|
c86edf06f2 | ||
|
|
5d14c2fe4d | ||
|
|
4601143998 | ||
|
|
7913ea2bfb | ||
|
|
d6409bd2eb | ||
|
|
98872791e5 | ||
|
|
c6d47cb4e5 | ||
|
|
f754a8edb1 | ||
|
|
4c0e70086d | ||
|
|
f8bd527dfe | ||
|
|
08b2b8d755 | ||
|
|
10a1e6c74d | ||
|
|
b7585ca738 | ||
|
|
8cae99229c | ||
|
|
04e0f444e1 | ||
|
|
6f410f4cbe | ||
|
|
800f749c7b | ||
|
|
b6459ddd57 | ||
|
|
397f7f0862 | ||
|
|
234072769c | ||
|
|
3445415b3d | ||
|
|
b05e110aa6 | ||
|
|
e69cba2444 | ||
|
|
f7903597ac | ||
|
|
ee76a0cd1c | ||
|
|
4ca5b737bf | ||
|
|
4077aaf978 | ||
|
|
68dd9765a0 | ||
|
|
2c44b06a67 | ||
|
|
7cc90db3e5 | ||
|
|
1e08e02598 | ||
|
|
dd8e74a486 | ||
|
|
48e08772f3 | ||
|
|
c28c0227c6 | ||
|
|
856ca2d6b1 | ||
|
|
9b973b79f6 | ||
|
|
cba8ef4e38 | ||
|
|
f729e300d6 | ||
|
|
9916811a79 | ||
|
|
2f7c595cd1 | ||
|
|
73decac746 | ||
|
|
ec1598868b | ||
|
|
93d7e5d4b8 | ||
|
|
ff5a54b9d1 | ||
|
|
3c1f823c47 | ||
|
|
4024220d00 | ||
|
|
f76958d761 | ||
|
|
2bd5ca45de | ||
|
|
6804ce1c39 | ||
|
|
d499071bff | ||
|
|
26a374b717 | ||
|
|
980de0e25b | ||
|
|
4767371aee | ||
|
|
131d247b78 | ||
|
|
b2a8a63899 | ||
|
|
05a332cd5f | ||
|
|
05904c77f5 | ||
|
|
17783fa7d9 | ||
|
|
4019094111 | ||
|
|
ca65fc751a | ||
|
|
a1e3acc590 | ||
|
|
a36960e069 | ||
|
|
58bb6a29ed | ||
|
|
5881c82413 | ||
|
|
923ebbb344 | ||
|
|
ea51567b89 | ||
|
|
552c62a19c | ||
|
|
c0b21a921b | ||
|
|
b10045adc2 | ||
|
|
61b5e3b629 | ||
|
|
e35d7cb3b3 | ||
|
|
0fa0ac4797 | ||
|
|
be7ed85838 | ||
|
|
c12b310028 | ||
|
|
0447d5564d | ||
|
|
22c0eb5421 | ||
|
|
a0a00fb937 | ||
|
|
6dd44742ea | ||
|
|
00c72e7d3e | ||
|
|
d01c335cf6 | ||
|
|
5687df4535 | ||
|
|
f5fade97e6 | ||
|
|
b88ae31e4e | ||
|
|
f6daaa7c35 | ||
|
|
c491c6ca90 | ||
|
|
34e054f607 | ||
|
|
e886bb291a |
434
.github/workflows/backend.yml
vendored
434
.github/workflows/backend.yml
vendored
@@ -91,10 +91,23 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-cpu-whisperx'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'true'
|
||||
backend: "whisperx"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
# CUDA 12 builds
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-vibevoice'
|
||||
@@ -107,7 +120,46 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-qwen-asr'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "qwen-asr"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-qwen-tts'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "qwen-tts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-voxcpm'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "voxcpm"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-pocket-tts'
|
||||
@@ -133,7 +185,7 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-llama-cpp'
|
||||
@@ -146,7 +198,7 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-vllm'
|
||||
@@ -159,7 +211,20 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-vllm-omni'
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "vllm-omni"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-transformers'
|
||||
@@ -172,7 +237,7 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-diffusers'
|
||||
@@ -185,7 +250,7 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-kokoro'
|
||||
@@ -198,7 +263,7 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-faster-whisper'
|
||||
@@ -209,6 +274,19 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-whisperx'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "whisperx"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
@@ -224,20 +302,7 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-bark'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "bark"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-chatterbox'
|
||||
@@ -250,7 +315,7 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-moonshine'
|
||||
@@ -263,7 +328,7 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-stablediffusion-ggml'
|
||||
@@ -276,7 +341,7 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-whisper'
|
||||
@@ -289,7 +354,7 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-rfdetr'
|
||||
@@ -302,20 +367,7 @@ jobs:
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-exllama2'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "exllama2"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-neutts'
|
||||
@@ -353,6 +405,45 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-qwen-asr'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "qwen-asr"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-qwen-tts'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "qwen-tts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-voxcpm'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "voxcpm"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
@@ -431,6 +522,32 @@ jobs:
|
||||
backend: "vibevoice"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-cuda-13-arm64-qwen-asr'
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
ubuntu-version: '2404'
|
||||
backend: "qwen-asr"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-cuda-13-arm64-qwen-tts'
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
ubuntu-version: '2404'
|
||||
backend: "qwen-tts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
@@ -488,11 +605,11 @@ jobs:
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-bark'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-whisperx'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "bark"
|
||||
backend: "whisperx"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
@@ -627,6 +744,19 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'hipblas'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-rocm-hipblas-vllm-omni'
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
|
||||
skip-drivers: 'false'
|
||||
backend: "vllm-omni"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'hipblas'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
@@ -680,6 +810,45 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'hipblas'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-rocm-hipblas-qwen-asr'
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
|
||||
skip-drivers: 'false'
|
||||
backend: "qwen-asr"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'hipblas'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-rocm-hipblas-qwen-tts'
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
|
||||
skip-drivers: 'false'
|
||||
backend: "qwen-tts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'hipblas'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-rocm-hipblas-voxcpm'
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
|
||||
skip-drivers: 'false'
|
||||
backend: "voxcpm"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'hipblas'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
@@ -711,11 +880,11 @@ jobs:
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-rocm-hipblas-coqui'
|
||||
tag-suffix: '-gpu-rocm-hipblas-whisperx'
|
||||
runs-on: 'bigger-runner'
|
||||
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
|
||||
skip-drivers: 'false'
|
||||
backend: "coqui"
|
||||
backend: "whisperx"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
@@ -724,11 +893,11 @@ jobs:
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-rocm-hipblas-bark'
|
||||
runs-on: 'arc-runner-set'
|
||||
tag-suffix: '-gpu-rocm-hipblas-coqui'
|
||||
runs-on: 'bigger-runner'
|
||||
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
|
||||
skip-drivers: 'false'
|
||||
backend: "bark"
|
||||
backend: "coqui"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
@@ -824,6 +993,32 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-qwen-asr'
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
||||
skip-drivers: 'true'
|
||||
backend: "qwen-asr"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-qwen-tts'
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
||||
skip-drivers: 'true'
|
||||
backend: "qwen-tts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2204'
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
@@ -890,6 +1085,45 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-qwen-asr'
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "qwen-asr"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-qwen-tts'
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "qwen-tts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-voxcpm'
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "voxcpm"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
@@ -916,19 +1150,6 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-bark'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "bark"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
# piper
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
@@ -943,20 +1164,6 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.golang"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
# bark-cpp
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-bark-cpp'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "bark-cpp"
|
||||
dockerfile: "./backend/Dockerfile.golang"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
@@ -1223,46 +1430,6 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2204'
|
||||
# exllama2
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-cpu-exllama2'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "exllama2"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-exllama2'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "exllama2"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'hipblas'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
skip-drivers: 'true'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-hipblas-exllama2'
|
||||
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
|
||||
runs-on: 'ubuntu-latest'
|
||||
backend: "exllama2"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
@@ -1343,6 +1510,45 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64,linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-cpu-qwen-asr'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "qwen-asr"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64,linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-cpu-qwen-tts'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "qwen-tts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-cpu-voxcpm'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:24.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "voxcpm"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./"
|
||||
ubuntu-version: '2404'
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
|
||||
4
.github/workflows/deploy-explorer.yaml
vendored
4
.github/workflows/deploy-explorer.yaml
vendored
@@ -33,7 +33,7 @@ jobs:
|
||||
run: |
|
||||
CGO_ENABLED=0 make build
|
||||
- name: rm
|
||||
uses: appleboy/ssh-action@v1.2.4
|
||||
uses: appleboy/ssh-action@v1.2.5
|
||||
with:
|
||||
host: ${{ secrets.EXPLORER_SSH_HOST }}
|
||||
username: ${{ secrets.EXPLORER_SSH_USERNAME }}
|
||||
@@ -53,7 +53,7 @@ jobs:
|
||||
rm: true
|
||||
target: ./local-ai
|
||||
- name: restarting
|
||||
uses: appleboy/ssh-action@v1.2.4
|
||||
uses: appleboy/ssh-action@v1.2.5
|
||||
with:
|
||||
host: ${{ secrets.EXPLORER_SSH_HOST }}
|
||||
username: ${{ secrets.EXPLORER_SSH_USERNAME }}
|
||||
|
||||
2
.github/workflows/image-pr.yml
vendored
2
.github/workflows/image-pr.yml
vendored
@@ -37,7 +37,7 @@
|
||||
include:
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12'
|
||||
|
||||
2
.github/workflows/image.yml
vendored
2
.github/workflows/image.yml
vendored
@@ -88,7 +88,7 @@
|
||||
ubuntu-codename: 'noble'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "9"
|
||||
cuda-minor-version: "8"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12'
|
||||
|
||||
65
.github/workflows/test-extra.yml
vendored
65
.github/workflows/test-extra.yml
vendored
@@ -238,7 +238,7 @@ jobs:
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential ffmpeg
|
||||
sudo apt-get install -y build-essential ffmpeg
|
||||
sudo apt-get install -y ca-certificates cmake curl patch espeak espeak-ng python3-pip
|
||||
# Install UV
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
@@ -257,7 +257,7 @@ jobs:
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential ffmpeg
|
||||
sudo apt-get install -y build-essential ffmpeg
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
# Install UV
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
@@ -276,7 +276,7 @@ jobs:
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential ffmpeg
|
||||
sudo apt-get install -y build-essential ffmpeg
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
# Install UV
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
@@ -284,4 +284,61 @@ jobs:
|
||||
- name: Test pocket-tts
|
||||
run: |
|
||||
make --jobs=5 --output-sync=target -C backend/python/pocket-tts
|
||||
make --jobs=5 --output-sync=target -C backend/python/pocket-tts test
|
||||
make --jobs=5 --output-sync=target -C backend/python/pocket-tts test
|
||||
tests-qwen-tts:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: true
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y build-essential ffmpeg
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
# Install UV
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
pip install --user --no-cache-dir grpcio-tools==1.64.1
|
||||
- name: Test qwen-tts
|
||||
run: |
|
||||
make --jobs=5 --output-sync=target -C backend/python/qwen-tts
|
||||
make --jobs=5 --output-sync=target -C backend/python/qwen-tts test
|
||||
tests-qwen-asr:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: true
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y build-essential ffmpeg sox
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
# Install UV
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
pip install --user --no-cache-dir grpcio-tools==1.64.1
|
||||
- name: Test qwen-asr
|
||||
run: |
|
||||
make --jobs=5 --output-sync=target -C backend/python/qwen-asr
|
||||
make --jobs=5 --output-sync=target -C backend/python/qwen-asr test
|
||||
tests-voxcpm:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: true
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential ffmpeg
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
# Install UV
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
pip install --user --no-cache-dir grpcio-tools==1.64.1
|
||||
- name: Test voxcpm
|
||||
run: |
|
||||
make --jobs=5 --output-sync=target -C backend/python/voxcpm
|
||||
make --jobs=5 --output-sync=target -C backend/python/voxcpm test
|
||||
|
||||
56
.github/workflows/tests-e2e.yml
vendored
Normal file
56
.github/workflows/tests-e2e.yml
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
---
|
||||
name: 'E2E Backend Tests'
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
concurrency:
|
||||
group: ci-tests-e2e-backend-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
tests-e2e-backend:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: ['1.25.x']
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go ${{ matrix.go-version }}
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
cache: false
|
||||
- name: Display Go version
|
||||
run: go version
|
||||
- name: Proto Dependencies
|
||||
run: |
|
||||
# Install protoc
|
||||
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v26.1/protoc-26.1-linux-x86_64.zip -o protoc.zip && \
|
||||
unzip -j -d /usr/local/bin protoc.zip bin/protoc && \
|
||||
rm protoc.zip
|
||||
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2
|
||||
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
|
||||
PATH="$PATH:$HOME/go/bin" make protogen-go
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y build-essential
|
||||
- name: Test Backend E2E
|
||||
run: |
|
||||
PATH="$PATH:$HOME/go/bin" make build-mock-backend test-e2e
|
||||
- name: Setup tmate session if tests fail
|
||||
if: ${{ failure() }}
|
||||
uses: mxschmitt/action-tmate@v3.23
|
||||
with:
|
||||
detached: true
|
||||
connect-timeout-seconds: 180
|
||||
limit-access-to-actor: true
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -36,6 +36,8 @@ LocalAI
|
||||
models/*
|
||||
test-models/
|
||||
test-dir/
|
||||
tests/e2e-aio/backends
|
||||
tests/e2e-aio/models
|
||||
|
||||
release/
|
||||
|
||||
|
||||
16
AGENTS.md
16
AGENTS.md
@@ -4,13 +4,13 @@ Building and testing the project depends on the components involved and the plat
|
||||
|
||||
## Building a specified backend
|
||||
|
||||
Let's say the user wants to build a particular backend for a given platform. For example let's say they want to build bark for ROCM/hipblas
|
||||
Let's say the user wants to build a particular backend for a given platform. For example let's say they want to build coqui for ROCM/hipblas
|
||||
|
||||
- The Makefile has targets like `docker-build-bark` created with `generate-docker-build-target` at the time of writing. Recently added backends may require a new target.
|
||||
- The Makefile has targets like `docker-build-coqui` created with `generate-docker-build-target` at the time of writing. Recently added backends may require a new target.
|
||||
- At a minimum we need to set the BUILD_TYPE, BASE_IMAGE build-args
|
||||
- Use .github/workflows/backend.yml as a reference it lists the needed args in the `include` job strategy matrix
|
||||
- l4t and cublas also requires the CUDA major and minor version
|
||||
- You can pretty print a command like `DOCKER_MAKEFLAGS=-j$(nproc --ignore=1) BUILD_TYPE=hipblas BASE_IMAGE=rocm/dev-ubuntu-24.04:6.4.4 make docker-build-bark`
|
||||
- You can pretty print a command like `DOCKER_MAKEFLAGS=-j$(nproc --ignore=1) BUILD_TYPE=hipblas BASE_IMAGE=rocm/dev-ubuntu-24.04:6.4.4 make docker-build-coqui`
|
||||
- Unless the user specifies that they want you to run the command, then just print it because not all agent frontends handle long running jobs well and the output may overflow your context
|
||||
- The user may say they want to build AMD or ROCM instead of hipblas, or Intel instead of SYCL or NVIDIA insted of l4t or cublas. Ask for confirmation if there is ambiguity.
|
||||
- Sometimes the user may need extra parameters to be added to `docker build` (e.g. `--platform` for cross-platform builds or `--progress` to view the full logs), in which case you can generate the `docker build` command directly.
|
||||
@@ -95,7 +95,7 @@ test-extra: prepare-test-extra
|
||||
|
||||
Add a backend definition variable in the backend definitions section (around line 428-457). The format depends on the backend type:
|
||||
|
||||
**For Python backends with root context** (like `faster-whisper`, `bark`):
|
||||
**For Python backends with root context** (like `faster-whisper`, `coqui`):
|
||||
```makefile
|
||||
BACKEND_<BACKEND_NAME> = <backend-name>|python|.|false|true
|
||||
```
|
||||
@@ -280,3 +280,11 @@ Always check `llama.cpp` for new model configuration options that should be supp
|
||||
- `llama.cpp/common/chat-parser.cpp` - Format presets and model-specific handlers
|
||||
- `llama.cpp/common/chat.h` - Format enums and parameter structures
|
||||
- `llama.cpp/tools/server/server-context.cpp` - Server configuration options
|
||||
|
||||
# Documentation
|
||||
|
||||
The project documentation is located in `docs/content`. When adding new features or changing existing functionality, it is crucial to update the documentation to reflect these changes. This helps users understand how to use the new capabilities and ensures the documentation stays relevant.
|
||||
|
||||
- **Feature Documentation**: If you add a new feature (like a new backend or API endpoint), create a new markdown file in `docs/content/features/` explaining what it is, how to configure it, and how to use it.
|
||||
- **Configuration**: If you modify configuration options, update the relevant sections in `docs/content/`.
|
||||
- **Examples**: providing concrete examples (like YAML configuration blocks) is highly encouraged to help users get started quickly.
|
||||
|
||||
@@ -10,7 +10,7 @@ ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
ca-certificates curl wget espeak-ng libgomp1 \
|
||||
ffmpeg libopenblas0 libopenblas-dev && \
|
||||
ffmpeg libopenblas0 libopenblas-dev sox && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
59
Makefile
59
Makefile
@@ -1,5 +1,5 @@
|
||||
# Disable parallel execution for backend builds
|
||||
.NOTPARALLEL: backends/diffusers backends/llama-cpp backends/piper backends/stablediffusion-ggml backends/whisper backends/faster-whisper backends/silero-vad backends/local-store backends/huggingface backends/rfdetr backends/kitten-tts backends/kokoro backends/chatterbox backends/llama-cpp-darwin backends/neutts build-darwin-python-backend build-darwin-go-backend backends/mlx backends/diffuser-darwin backends/mlx-vlm backends/mlx-audio backends/stablediffusion-ggml-darwin backends/vllm backends/moonshine backends/pocket-tts
|
||||
.NOTPARALLEL: backends/diffusers backends/llama-cpp backends/piper backends/stablediffusion-ggml backends/whisper backends/faster-whisper backends/silero-vad backends/local-store backends/huggingface backends/rfdetr backends/kitten-tts backends/kokoro backends/chatterbox backends/llama-cpp-darwin backends/neutts build-darwin-python-backend build-darwin-go-backend backends/mlx backends/diffuser-darwin backends/mlx-vlm backends/mlx-audio backends/stablediffusion-ggml-darwin backends/vllm backends/vllm-omni backends/moonshine backends/pocket-tts backends/qwen-tts backends/qwen-asr backends/voxcpm backends/whisperx
|
||||
|
||||
GOCMD=go
|
||||
GOTEST=$(GOCMD) test
|
||||
@@ -7,16 +7,14 @@ GOVET=$(GOCMD) vet
|
||||
BINARY_NAME=local-ai
|
||||
LAUNCHER_BINARY_NAME=local-ai-launcher
|
||||
|
||||
CUDA_MAJOR_VERSION?=13
|
||||
CUDA_MINOR_VERSION?=0
|
||||
UBUNTU_VERSION?=2404
|
||||
UBUNTU_CODENAME?=noble
|
||||
|
||||
GORELEASER?=
|
||||
|
||||
export BUILD_TYPE?=
|
||||
export CUDA_MAJOR_VERSION?=12
|
||||
export CUDA_MINOR_VERSION?=9
|
||||
export CUDA_MAJOR_VERSION?=13
|
||||
export CUDA_MINOR_VERSION?=0
|
||||
|
||||
GO_TAGS?=
|
||||
BUILD_ID?=
|
||||
@@ -191,9 +189,6 @@ run-e2e-aio: protogen-go
|
||||
########################################################
|
||||
|
||||
prepare-e2e:
|
||||
mkdir -p $(TEST_DIR)
|
||||
cp -rfv $(abspath ./tests/e2e-fixtures)/gpu.yaml $(TEST_DIR)/gpu.yaml
|
||||
test -e $(TEST_DIR)/ggllm-test-model.bin || wget -q https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GGUF/resolve/main/codellama-7b-instruct.Q2_K.gguf -O $(TEST_DIR)/ggllm-test-model.bin
|
||||
docker build \
|
||||
--build-arg IMAGE_TYPE=core \
|
||||
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
|
||||
@@ -207,14 +202,16 @@ prepare-e2e:
|
||||
-t localai-tests .
|
||||
|
||||
run-e2e-image:
|
||||
ls -liah $(abspath ./tests/e2e-fixtures)
|
||||
docker run -p 5390:8080 -e MODELS_PATH=/models -e THREADS=1 -e DEBUG=true -d --rm -v $(TEST_DIR):/models --gpus all --name e2e-tests-$(RANDOM) localai-tests
|
||||
docker run -p 5390:8080 -e MODELS_PATH=/models -e THREADS=1 -e DEBUG=true -d --rm -v $(TEST_DIR):/models --name e2e-tests-$(RANDOM) localai-tests
|
||||
|
||||
test-e2e:
|
||||
test-e2e: build-mock-backend prepare-e2e run-e2e-image
|
||||
@echo 'Running e2e tests'
|
||||
BUILD_TYPE=$(BUILD_TYPE) \
|
||||
LOCALAI_API=http://$(E2E_BRIDGE_IP):5390/v1 \
|
||||
LOCALAI_API=http://$(E2E_BRIDGE_IP):5390 \
|
||||
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --flake-attempts $(TEST_FLAKES) -v -r ./tests/e2e
|
||||
$(MAKE) clean-mock-backend
|
||||
$(MAKE) teardown-e2e
|
||||
docker rmi localai-tests
|
||||
|
||||
teardown-e2e:
|
||||
rm -rf $(TEST_DIR) || true
|
||||
@@ -314,18 +311,28 @@ prepare-test-extra: protogen-python
|
||||
$(MAKE) -C backend/python/diffusers
|
||||
$(MAKE) -C backend/python/chatterbox
|
||||
$(MAKE) -C backend/python/vllm
|
||||
$(MAKE) -C backend/python/vllm-omni
|
||||
$(MAKE) -C backend/python/vibevoice
|
||||
$(MAKE) -C backend/python/moonshine
|
||||
$(MAKE) -C backend/python/pocket-tts
|
||||
$(MAKE) -C backend/python/qwen-tts
|
||||
$(MAKE) -C backend/python/qwen-asr
|
||||
$(MAKE) -C backend/python/voxcpm
|
||||
$(MAKE) -C backend/python/whisperx
|
||||
|
||||
test-extra: prepare-test-extra
|
||||
$(MAKE) -C backend/python/transformers test
|
||||
$(MAKE) -C backend/python/diffusers test
|
||||
$(MAKE) -C backend/python/chatterbox test
|
||||
$(MAKE) -C backend/python/vllm test
|
||||
$(MAKE) -C backend/python/vllm-omni test
|
||||
$(MAKE) -C backend/python/vibevoice test
|
||||
$(MAKE) -C backend/python/moonshine test
|
||||
$(MAKE) -C backend/python/pocket-tts test
|
||||
$(MAKE) -C backend/python/qwen-tts test
|
||||
$(MAKE) -C backend/python/qwen-asr test
|
||||
$(MAKE) -C backend/python/voxcpm test
|
||||
$(MAKE) -C backend/python/whisperx test
|
||||
|
||||
DOCKER_IMAGE?=local-ai
|
||||
DOCKER_AIO_IMAGE?=local-ai-aio
|
||||
@@ -434,7 +441,6 @@ backend-images:
|
||||
BACKEND_LLAMA_CPP = llama-cpp|llama-cpp|.|false|false
|
||||
|
||||
# Golang backends
|
||||
BACKEND_BARK_CPP = bark-cpp|golang|.|false|true
|
||||
BACKEND_PIPER = piper|golang|.|false|true
|
||||
BACKEND_LOCAL_STORE = local-store|golang|.|false|true
|
||||
BACKEND_HUGGINGFACE = huggingface|golang|.|false|true
|
||||
@@ -447,18 +453,21 @@ BACKEND_RERANKERS = rerankers|python|.|false|true
|
||||
BACKEND_TRANSFORMERS = transformers|python|.|false|true
|
||||
BACKEND_FASTER_WHISPER = faster-whisper|python|.|false|true
|
||||
BACKEND_COQUI = coqui|python|.|false|true
|
||||
BACKEND_BARK = bark|python|.|false|true
|
||||
BACKEND_EXLLAMA2 = exllama2|python|.|false|true
|
||||
BACKEND_RFDETR = rfdetr|python|.|false|true
|
||||
BACKEND_KITTEN_TTS = kitten-tts|python|.|false|true
|
||||
BACKEND_NEUTTS = neutts|python|.|false|true
|
||||
BACKEND_KOKORO = kokoro|python|.|false|true
|
||||
BACKEND_VLLM = vllm|python|.|false|true
|
||||
BACKEND_VLLM_OMNI = vllm-omni|python|.|false|true
|
||||
BACKEND_DIFFUSERS = diffusers|python|.|--progress=plain|true
|
||||
BACKEND_CHATTERBOX = chatterbox|python|.|false|true
|
||||
BACKEND_VIBEVOICE = vibevoice|python|.|--progress=plain|true
|
||||
BACKEND_MOONSHINE = moonshine|python|.|false|true
|
||||
BACKEND_POCKET_TTS = pocket-tts|python|.|false|true
|
||||
BACKEND_QWEN_TTS = qwen-tts|python|.|false|true
|
||||
BACKEND_QWEN_ASR = qwen-asr|python|.|false|true
|
||||
BACKEND_VOXCPM = voxcpm|python|.|false|true
|
||||
BACKEND_WHISPERX = whisperx|python|.|false|true
|
||||
|
||||
# Helper function to build docker image for a backend
|
||||
# Usage: $(call docker-build-backend,BACKEND_NAME,DOCKERFILE_TYPE,BUILD_CONTEXT,PROGRESS_FLAG,NEEDS_BACKEND_ARG)
|
||||
@@ -482,7 +491,6 @@ endef
|
||||
|
||||
# Generate all docker-build targets
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_LLAMA_CPP)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_BARK_CPP)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_PIPER)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_LOCAL_STORE)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_HUGGINGFACE)))
|
||||
@@ -493,24 +501,37 @@ $(eval $(call generate-docker-build-target,$(BACKEND_RERANKERS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_TRANSFORMERS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_FASTER_WHISPER)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_COQUI)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_BARK)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_EXLLAMA2)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_RFDETR)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_KITTEN_TTS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_NEUTTS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_KOKORO)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_VLLM)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_VLLM_OMNI)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_DIFFUSERS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_CHATTERBOX)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_VIBEVOICE)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_MOONSHINE)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_POCKET_TTS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_QWEN_TTS)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_QWEN_ASR)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_VOXCPM)))
|
||||
$(eval $(call generate-docker-build-target,$(BACKEND_WHISPERX)))
|
||||
|
||||
# Pattern rule for docker-save targets
|
||||
docker-save-%: backend-images
|
||||
docker save local-ai-backend:$* -o backend-images/$*.tar
|
||||
|
||||
docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-bark docker-build-chatterbox docker-build-vibevoice docker-build-exllama2 docker-build-moonshine docker-build-pocket-tts
|
||||
docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-vllm-omni docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-chatterbox docker-build-vibevoice docker-build-moonshine docker-build-pocket-tts docker-build-qwen-tts docker-build-qwen-asr docker-build-voxcpm docker-build-whisperx
|
||||
|
||||
########################################################
|
||||
### Mock Backend for E2E Tests
|
||||
########################################################
|
||||
|
||||
build-mock-backend: protogen-go
|
||||
$(GOCMD) build -o tests/e2e/mock-backend/mock-backend ./tests/e2e/mock-backend
|
||||
|
||||
clean-mock-backend:
|
||||
rm -f tests/e2e/mock-backend/mock-backend
|
||||
|
||||
########################################################
|
||||
### END Backends
|
||||
|
||||
49
README.md
49
README.md
@@ -51,34 +51,16 @@
|
||||
**LocalAI** is the free, Open Source OpenAI alternative. LocalAI act as a drop-in replacement REST API that's compatible with OpenAI (Elevenlabs, Anthropic... ) API specifications for local AI inferencing. It allows you to run LLMs, generate images, audio (and not only) locally or on-prem with consumer grade hardware, supporting multiple model families. Does not require GPU. It is created and maintained by [Ettore Di Giacinto](https://github.com/mudler).
|
||||
|
||||
|
||||
## 📚🆕 Local Stack Family
|
||||
## Local Stack Family
|
||||
|
||||
🆕 LocalAI is now part of a comprehensive suite of AI tools designed to work together:
|
||||
Liking LocalAI? LocalAI is part of an integrated suite of AI infrastructure tools, you might also like:
|
||||
|
||||
- **[LocalAGI](https://github.com/mudler/LocalAGI)** - AI agent orchestration platform with OpenAI Responses API compatibility and advanced agentic capabilities
|
||||
- **[LocalRecall](https://github.com/mudler/LocalRecall)** - MCP/REST API knowledge base system providing persistent memory and storage for AI agents
|
||||
- 🆕 **[Cogito](https://github.com/mudler/cogito)** - Go library for building intelligent, co-operative agentic software and LLM-powered workflows, focusing on improving results for small, open source language models that scales to any LLM. Powers LocalAGI and LocalAI MCP/Agentic capabilities
|
||||
- 🆕 **[Wiz](https://github.com/mudler/wiz)** - Terminal-based AI agent accessible via Ctrl+Space keybinding. Portable, local-LLM friendly shell assistant with TUI/CLI modes, tool execution with approval, MCP protocol support, and multi-shell compatibility (zsh, bash, fish)
|
||||
- 🆕 **[SkillServer](https://github.com/mudler/skillserver)** - Simple, centralized skills database for AI agents via MCP. Manages skills as Markdown files with MCP server integration, web UI for editing, Git synchronization, and full-text search capabilities
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td width="50%" valign="top">
|
||||
<a href="https://github.com/mudler/LocalAGI">
|
||||
<img src="https://raw.githubusercontent.com/mudler/LocalAGI/refs/heads/main/webui/react-ui/public/logo_2.png" width="300" alt="LocalAGI Logo">
|
||||
</a>
|
||||
</td>
|
||||
<td width="50%" valign="top">
|
||||
<h3><a href="https://github.com/mudler/LocalAGI">LocalAGI</a></h3>
|
||||
<p>A powerful Local AI agent management platform that serves as a drop-in replacement for OpenAI's Responses API, enhanced with advanced agentic capabilities.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td width="50%" valign="top">
|
||||
<a href="https://github.com/mudler/LocalRecall">
|
||||
<img src="https://raw.githubusercontent.com/mudler/LocalRecall/refs/heads/main/static/localrecall_horizontal.png" width="300" alt="LocalRecall Logo">
|
||||
</a>
|
||||
</td>
|
||||
<td width="50%" valign="top">
|
||||
<h3><a href="https://github.com/mudler/LocalRecall">LocalRecall</a></h3>
|
||||
<p>A REST-ful API and knowledge base management system that provides persistent memory and storage capabilities for AI agents.</p>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
## Screenshots / Video
|
||||
|
||||
@@ -257,6 +239,7 @@ Roadmap items: [List of issues](https://github.com/mudler/LocalAI/issues?q=is%3A
|
||||
- 🔈 [Audio to Text](https://localai.io/features/audio-to-text/) (Audio transcription with `whisper.cpp`)
|
||||
- 🎨 [Image generation](https://localai.io/features/image-generation)
|
||||
- 🔥 [OpenAI-alike tools API](https://localai.io/features/openai-functions/)
|
||||
- ⚡ [Realtime API](https://localai.io/features/openai-realtime/) (Speech-to-speech)
|
||||
- 🧠 [Embeddings generation for vector databases](https://localai.io/features/embeddings/)
|
||||
- ✍️ [Constrained grammars](https://localai.io/features/constrained_grammars/)
|
||||
- 🖼️ [Download Models directly from Huggingface ](https://localai.io/models/)
|
||||
@@ -278,7 +261,6 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
|
||||
| **llama.cpp** | LLM inference in C/C++ | CUDA 12/13, ROCm, Intel SYCL, Vulkan, Metal, CPU |
|
||||
| **vLLM** | Fast LLM inference with PagedAttention | CUDA 12/13, ROCm, Intel |
|
||||
| **transformers** | HuggingFace transformers framework | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **exllama2** | GPTQ inference library | CUDA 12/13 |
|
||||
| **MLX** | Apple Silicon LLM inference | Metal (M1/M2/M3+) |
|
||||
| **MLX-VLM** | Apple Silicon Vision-Language Models | Metal (M1/M2/M3+) |
|
||||
|
||||
@@ -287,8 +269,6 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
|
||||
|---------|-------------|---------------------|
|
||||
| **whisper.cpp** | OpenAI Whisper in C/C++ | CUDA 12/13, ROCm, Intel SYCL, Vulkan, CPU |
|
||||
| **faster-whisper** | Fast Whisper with CTranslate2 | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **bark** | Text-to-audio generation | CUDA 12/13, ROCm, Intel |
|
||||
| **bark-cpp** | C++ implementation of Bark | CUDA, Metal, CPU |
|
||||
| **coqui** | Advanced TTS with 1100+ languages | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **kokoro** | Lightweight TTS model | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **chatterbox** | Production-grade TTS | CUDA 12/13, CPU |
|
||||
@@ -298,6 +278,7 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
|
||||
| **neutts** | Text-to-speech with voice cloning | CUDA 12/13, ROCm, CPU |
|
||||
| **vibevoice** | Real-time TTS with voice cloning | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **pocket-tts** | Lightweight CPU-based TTS | CUDA 12/13, ROCm, Intel, CPU |
|
||||
| **qwen-tts** | High-quality TTS with custom voice, voice design, and voice cloning | CUDA 12/13, ROCm, Intel, CPU |
|
||||
|
||||
### Image & Video Generation
|
||||
| Backend | Description | Acceleration Support |
|
||||
@@ -319,9 +300,9 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
|
||||
|-------------------|-------------------|------------------|
|
||||
| **NVIDIA CUDA 12** | All CUDA-compatible backends | Nvidia hardware |
|
||||
| **NVIDIA CUDA 13** | All CUDA-compatible backends | Nvidia hardware |
|
||||
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, bark, neutts, vibevoice, pocket-tts | AMD Graphics |
|
||||
| **Intel oneAPI** | llama.cpp, whisper, stablediffusion, vllm, transformers, diffusers, rfdetr, rerankers, exllama2, coqui, kokoro, bark, vibevoice, pocket-tts | Intel Arc, Intel iGPUs |
|
||||
| **Apple Metal** | llama.cpp, whisper, diffusers, MLX, MLX-VLM, bark-cpp | Apple M1/M2/M3+ |
|
||||
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, neutts, vibevoice, pocket-tts, qwen-tts | AMD Graphics |
|
||||
| **Intel oneAPI** | llama.cpp, whisper, stablediffusion, vllm, transformers, diffusers, rfdetr, rerankers, coqui, kokoro, vibevoice, pocket-tts, qwen-tts | Intel Arc, Intel iGPUs |
|
||||
| **Apple Metal** | llama.cpp, whisper, diffusers, MLX, MLX-VLM | Apple M1/M2/M3+ |
|
||||
| **Vulkan** | llama.cpp, whisper, stablediffusion | Cross-platform GPUs |
|
||||
| **NVIDIA Jetson (CUDA 12)** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr | ARM64 embedded AI (AGX Orin, etc.) |
|
||||
| **NVIDIA Jetson (CUDA 13)** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr | ARM64 embedded AI (DGX Spark) |
|
||||
@@ -343,6 +324,10 @@ Agentic Libraries:
|
||||
MCPs:
|
||||
- https://github.com/mudler/MCPs
|
||||
|
||||
OS Assistant:
|
||||
|
||||
- https://github.com/mudler/Keygeist - Keygeist is an AI-powered keyboard operator that listens for key combinations and responds with AI-generated text typed directly into your Linux box.
|
||||
|
||||
Model galleries
|
||||
- https://github.com/go-skynet/model-gallery
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ The backend system provides language-specific Dockerfiles that handle the build
|
||||
- **vllm**: High-performance LLM inference
|
||||
- **mlx**: Apple Silicon optimization
|
||||
- **diffusers**: Stable Diffusion models
|
||||
- **Audio**: bark, coqui, faster-whisper, kitten-tts
|
||||
- **Audio**: coqui, faster-whisper, kitten-tts
|
||||
- **Vision**: mlx-vlm, rfdetr
|
||||
- **Specialized**: rerankers, chatterbox, kokoro
|
||||
|
||||
@@ -55,7 +55,6 @@ The backend system provides language-specific Dockerfiles that handle the build
|
||||
- **stablediffusion-ggml**: Stable Diffusion in Go with GGML Cpp backend
|
||||
- **huggingface**: Hugging Face model integration
|
||||
- **piper**: Text-to-speech synthesis Golang with C bindings using rhaspy/piper
|
||||
- **bark-cpp**: Bark TTS models Golang with Cpp bindings
|
||||
- **local-store**: Vector storage backend
|
||||
|
||||
#### C++ Backends (`cpp/`)
|
||||
|
||||
@@ -17,6 +17,7 @@ service Backend {
|
||||
rpc GenerateVideo(GenerateVideoRequest) returns (Result) {}
|
||||
rpc AudioTranscription(TranscriptRequest) returns (TranscriptResult) {}
|
||||
rpc TTS(TTSRequest) returns (Result) {}
|
||||
rpc TTSStream(TTSRequest) returns (stream Reply) {}
|
||||
rpc SoundGeneration(SoundGenerationRequest) returns (Result) {}
|
||||
rpc TokenizeString(PredictOptions) returns (TokenizationResponse) {}
|
||||
rpc Status(HealthMessage) returns (StatusResponse) {}
|
||||
@@ -32,6 +33,8 @@ service Backend {
|
||||
rpc GetMetrics(MetricsRequest) returns (MetricsResponse);
|
||||
|
||||
rpc VAD(VADRequest) returns (VADResponse) {}
|
||||
|
||||
rpc ModelMetadata(ModelOptions) returns (ModelMetadataResponse) {}
|
||||
}
|
||||
|
||||
// Define the empty request
|
||||
@@ -296,6 +299,7 @@ message TranscriptSegment {
|
||||
int64 end = 3;
|
||||
string text = 4;
|
||||
repeated int32 tokens = 5;
|
||||
string speaker = 6;
|
||||
}
|
||||
|
||||
message GenerateImageRequest {
|
||||
@@ -410,3 +414,8 @@ message Detection {
|
||||
message DetectResponse {
|
||||
repeated Detection Detections = 1;
|
||||
}
|
||||
|
||||
message ModelMetadataResponse {
|
||||
bool supports_thinking = 1;
|
||||
string rendered_template = 2; // The rendered chat template with enable_thinking=true (empty if not applicable)
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
|
||||
LLAMA_VERSION?=959ecf7f234dc0bc0cd6829b25cb0ee1481aa78a
|
||||
LLAMA_VERSION?=2634ed207a17db1a54bd8df0555bd8499a6ab691
|
||||
LLAMA_REPO?=https://github.com/ggerganov/llama.cpp
|
||||
|
||||
CMAKE_ARGS?=
|
||||
|
||||
@@ -83,8 +83,8 @@ static void start_llama_server(server_context& ctx_server) {
|
||||
|
||||
// print sample chat example to make it clear which template is used
|
||||
// LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__,
|
||||
// common_chat_templates_source(ctx_server.impl->chat_templates.get()),
|
||||
// common_chat_format_example(ctx_server.impl->chat_templates.get(), ctx_server.impl->params_base.use_jinja).c_str(), ctx_server.impl->params_base.default_template_kwargs);
|
||||
// common_chat_templates_source(ctx_server.impl->chat_params.tmpls.get()),
|
||||
// common_chat_format_example(ctx_server.impl->chat_params.tmpls.get(), ctx_server.impl->params_base.use_jinja).c_str(), ctx_server.impl->params_base.default_template_kwargs);
|
||||
|
||||
// Keep the chat templates initialized in load_model() so they can be used when UseTokenizerTemplate is enabled
|
||||
// Templates will only be used conditionally in Predict/PredictStream when UseTokenizerTemplate is true and Messages are provided
|
||||
@@ -778,8 +778,8 @@ public:
|
||||
if (!params.mmproj.path.empty()) {
|
||||
error_msg += " (with mmproj: " + params.mmproj.path + ")";
|
||||
}
|
||||
if (params.has_speculative() && !params.speculative.model.path.empty()) {
|
||||
error_msg += " (with draft model: " + params.speculative.model.path + ")";
|
||||
if (params.speculative.has_dft() && !params.speculative.mparams_dft.path.empty()) {
|
||||
error_msg += " (with draft model: " + params.speculative.mparams_dft.path + ")";
|
||||
}
|
||||
|
||||
// Add captured error details if available
|
||||
@@ -882,7 +882,7 @@ public:
|
||||
std::string prompt_str;
|
||||
std::vector<raw_buffer> files; // Declare files early so it's accessible in both branches
|
||||
// Handle chat templates when UseTokenizerTemplate is enabled and Messages are provided
|
||||
if (request->usetokenizertemplate() && request->messages_size() > 0 && ctx_server.impl->chat_templates != nullptr) {
|
||||
if (request->usetokenizertemplate() && request->messages_size() > 0 && ctx_server.impl->chat_params.tmpls != nullptr) {
|
||||
// Convert proto Messages to JSON format compatible with oaicompat_chat_params_parse
|
||||
json body_json;
|
||||
json messages_json = json::array();
|
||||
@@ -1261,12 +1261,7 @@ public:
|
||||
// Use the same approach as server.cpp: call oaicompat_chat_params_parse
|
||||
// This handles all template application, grammar merging, etc. automatically
|
||||
// Files extracted from multimodal content in messages will be added to the files vector
|
||||
// Create parser options with current chat_templates to ensure tmpls is not null
|
||||
oaicompat_parser_options parser_opt = ctx_server.impl->oai_parser_opt;
|
||||
parser_opt.tmpls = ctx_server.impl->chat_templates.get(); // Ensure tmpls is set to current chat_templates
|
||||
// Update allow_image and allow_audio based on current mctx state
|
||||
parser_opt.allow_image = ctx_server.impl->mctx ? mtmd_support_vision(ctx_server.impl->mctx) : false;
|
||||
parser_opt.allow_audio = ctx_server.impl->mctx ? mtmd_support_audio(ctx_server.impl->mctx) : false;
|
||||
// chat_params already contains tmpls, allow_image, and allow_audio set during model loading
|
||||
|
||||
// Debug: Log tools before template processing
|
||||
if (body_json.contains("tools")) {
|
||||
@@ -1312,7 +1307,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
json parsed_data = oaicompat_chat_params_parse(body_json, parser_opt, files);
|
||||
json parsed_data = oaicompat_chat_params_parse(body_json, ctx_server.impl->chat_params, files);
|
||||
|
||||
// Debug: Log tools after template processing
|
||||
if (parsed_data.contains("tools")) {
|
||||
@@ -1365,7 +1360,7 @@ public:
|
||||
|
||||
// If not using chat templates, extract files from image_data/audio_data fields
|
||||
// (If using chat templates, files were already extracted by oaicompat_chat_params_parse)
|
||||
if (!request->usetokenizertemplate() || request->messages_size() == 0 || ctx_server.impl->chat_templates == nullptr) {
|
||||
if (!request->usetokenizertemplate() || request->messages_size() == 0 || ctx_server.impl->chat_params.tmpls == nullptr) {
|
||||
const auto &images_data = data.find("image_data");
|
||||
if (images_data != data.end() && images_data->is_array())
|
||||
{
|
||||
@@ -1593,7 +1588,7 @@ public:
|
||||
std::string prompt_str;
|
||||
std::vector<raw_buffer> files; // Declare files early so it's accessible in both branches
|
||||
// Handle chat templates when UseTokenizerTemplate is enabled and Messages are provided
|
||||
if (request->usetokenizertemplate() && request->messages_size() > 0 && ctx_server.impl->chat_templates != nullptr) {
|
||||
if (request->usetokenizertemplate() && request->messages_size() > 0 && ctx_server.impl->chat_params.tmpls != nullptr) {
|
||||
// Convert proto Messages to JSON format compatible with oaicompat_chat_params_parse
|
||||
json body_json;
|
||||
json messages_json = json::array();
|
||||
@@ -1997,12 +1992,7 @@ public:
|
||||
// Use the same approach as server.cpp: call oaicompat_chat_params_parse
|
||||
// This handles all template application, grammar merging, etc. automatically
|
||||
// Files extracted from multimodal content in messages will be added to the files vector
|
||||
// Create parser options with current chat_templates to ensure tmpls is not null
|
||||
oaicompat_parser_options parser_opt = ctx_server.impl->oai_parser_opt;
|
||||
parser_opt.tmpls = ctx_server.impl->chat_templates.get(); // Ensure tmpls is set to current chat_templates
|
||||
// Update allow_image and allow_audio based on current mctx state
|
||||
parser_opt.allow_image = ctx_server.impl->mctx ? mtmd_support_vision(ctx_server.impl->mctx) : false;
|
||||
parser_opt.allow_audio = ctx_server.impl->mctx ? mtmd_support_audio(ctx_server.impl->mctx) : false;
|
||||
// chat_params already contains tmpls, allow_image, and allow_audio set during model loading
|
||||
|
||||
// Debug: Log tools before template processing
|
||||
if (body_json.contains("tools")) {
|
||||
@@ -2048,7 +2038,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
json parsed_data = oaicompat_chat_params_parse(body_json, parser_opt, files);
|
||||
json parsed_data = oaicompat_chat_params_parse(body_json, ctx_server.impl->chat_params, files);
|
||||
|
||||
// Debug: Log tools after template processing
|
||||
if (parsed_data.contains("tools")) {
|
||||
@@ -2101,7 +2091,7 @@ public:
|
||||
|
||||
// If not using chat templates, extract files from image_data/audio_data fields
|
||||
// (If using chat templates, files were already extracted by oaicompat_chat_params_parse)
|
||||
if (!request->usetokenizertemplate() || request->messages_size() == 0 || ctx_server.impl->chat_templates == nullptr) {
|
||||
if (!request->usetokenizertemplate() || request->messages_size() == 0 || ctx_server.impl->chat_params.tmpls == nullptr) {
|
||||
const auto &images_data = data.find("image_data");
|
||||
if (images_data != data.end() && images_data->is_array())
|
||||
{
|
||||
@@ -2486,6 +2476,47 @@ public:
|
||||
response->set_prompt_tokens_processed(res_metrics->n_prompt_tokens_processed_total);
|
||||
|
||||
|
||||
return grpc::Status::OK;
|
||||
}
|
||||
|
||||
grpc::Status ModelMetadata(ServerContext* /*context*/, const backend::ModelOptions* /*request*/, backend::ModelMetadataResponse* response) override {
|
||||
// Check if model is loaded
|
||||
if (params_base.model.path.empty()) {
|
||||
return grpc::Status(grpc::StatusCode::FAILED_PRECONDITION, "Model not loaded");
|
||||
}
|
||||
|
||||
// Check if chat templates are initialized
|
||||
if (ctx_server.impl->chat_params.tmpls == nullptr) {
|
||||
// If templates are not initialized, we can't detect thinking support
|
||||
// Return false as default
|
||||
response->set_supports_thinking(false);
|
||||
response->set_rendered_template("");
|
||||
return grpc::Status::OK;
|
||||
}
|
||||
|
||||
// Detect thinking support using llama.cpp's function
|
||||
bool supports_thinking = common_chat_templates_support_enable_thinking(ctx_server.impl->chat_params.tmpls.get());
|
||||
response->set_supports_thinking(supports_thinking);
|
||||
|
||||
// Render the template with enable_thinking=true so Go code can detect thinking tokens
|
||||
// This allows reusing existing detection functions in Go
|
||||
std::string rendered_template = "";
|
||||
if (params_base.use_jinja) {
|
||||
// Render the template with enable_thinking=true to see what the actual prompt looks like
|
||||
common_chat_templates_inputs dummy_inputs;
|
||||
common_chat_msg msg;
|
||||
msg.role = "user";
|
||||
msg.content = "test";
|
||||
dummy_inputs.messages = {msg};
|
||||
dummy_inputs.enable_thinking = true;
|
||||
dummy_inputs.use_jinja = params_base.use_jinja;
|
||||
|
||||
const auto rendered = common_chat_templates_apply(ctx_server.impl->chat_params.tmpls.get(), dummy_inputs);
|
||||
rendered_template = rendered.prompt;
|
||||
}
|
||||
|
||||
response->set_rendered_template(rendered_template);
|
||||
|
||||
return grpc::Status::OK;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
INCLUDE_PATH := $(abspath ./)
|
||||
LIBRARY_PATH := $(abspath ./)
|
||||
|
||||
AR?=ar
|
||||
|
||||
CMAKE_ARGS?=-DGGML_NATIVE=OFF
|
||||
BUILD_TYPE?=
|
||||
GOCMD=go
|
||||
# keep standard at C11 and C++11
|
||||
CXXFLAGS = -I. -I$(INCLUDE_PATH)/sources/bark.cpp/examples -I$(INCLUDE_PATH)/sources/bark.cpp/encodec.cpp/ggml/include -I$(INCLUDE_PATH)/sources/bark.cpp/spm-headers -I$(INCLUDE_PATH)/sources/bark.cpp -O3 -DNDEBUG -std=c++17 -fPIC
|
||||
LDFLAGS = -L$(LIBRARY_PATH) -L$(LIBRARY_PATH)/sources/bark.cpp/build/examples -lbark -lstdc++ -lm
|
||||
|
||||
# bark.cpp
|
||||
BARKCPP_REPO?=https://github.com/PABannier/bark.cpp.git
|
||||
BARKCPP_VERSION?=5d5be84f089ab9ea53b7a793f088d3fbf7247495
|
||||
|
||||
# warnings
|
||||
CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function
|
||||
|
||||
## bark.cpp
|
||||
sources/bark.cpp:
|
||||
git clone --recursive $(BARKCPP_REPO) sources/bark.cpp && \
|
||||
cd sources/bark.cpp && \
|
||||
git checkout $(BARKCPP_VERSION) && \
|
||||
git submodule update --init --recursive --depth 1 --single-branch
|
||||
|
||||
sources/bark.cpp/build/libbark.a: sources/bark.cpp
|
||||
cd sources/bark.cpp && \
|
||||
mkdir -p build && \
|
||||
cd build && \
|
||||
cmake $(CMAKE_ARGS) .. && \
|
||||
cmake --build . --config Release
|
||||
|
||||
gobark.o:
|
||||
$(CXX) $(CXXFLAGS) gobark.cpp -o gobark.o -c $(LDFLAGS)
|
||||
|
||||
libbark.a: sources/bark.cpp/build/libbark.a gobark.o
|
||||
cp $(INCLUDE_PATH)/sources/bark.cpp/build/libbark.a ./
|
||||
$(AR) rcs libbark.a gobark.o
|
||||
|
||||
bark-cpp: libbark.a
|
||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH="$(CURDIR)" LIBRARY_PATH=$(CURDIR) \
|
||||
$(GOCMD) build -v -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o bark-cpp ./
|
||||
|
||||
package:
|
||||
bash package.sh
|
||||
|
||||
build: bark-cpp package
|
||||
|
||||
clean:
|
||||
rm -f gobark.o libbark.a
|
||||
@@ -1,85 +0,0 @@
|
||||
#include <iostream>
|
||||
#include <tuple>
|
||||
|
||||
#include "bark.h"
|
||||
#include "gobark.h"
|
||||
#include "common.h"
|
||||
#include "ggml.h"
|
||||
|
||||
struct bark_context *c;
|
||||
|
||||
void bark_print_progress_callback(struct bark_context *bctx, enum bark_encoding_step step, int progress, void *user_data) {
|
||||
if (step == bark_encoding_step::SEMANTIC) {
|
||||
printf("\rGenerating semantic tokens... %d%%", progress);
|
||||
} else if (step == bark_encoding_step::COARSE) {
|
||||
printf("\rGenerating coarse tokens... %d%%", progress);
|
||||
} else if (step == bark_encoding_step::FINE) {
|
||||
printf("\rGenerating fine tokens... %d%%", progress);
|
||||
}
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
int load_model(char *model) {
|
||||
// initialize bark context
|
||||
struct bark_context_params ctx_params = bark_context_default_params();
|
||||
bark_params params;
|
||||
|
||||
params.model_path = model;
|
||||
|
||||
// ctx_params.verbosity = verbosity;
|
||||
ctx_params.progress_callback = bark_print_progress_callback;
|
||||
ctx_params.progress_callback_user_data = nullptr;
|
||||
|
||||
struct bark_context *bctx = bark_load_model(params.model_path.c_str(), ctx_params, params.seed);
|
||||
if (!bctx) {
|
||||
fprintf(stderr, "%s: Could not load model\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
c = bctx;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int tts(char *text,int threads, char *dst ) {
|
||||
|
||||
ggml_time_init();
|
||||
const int64_t t_main_start_us = ggml_time_us();
|
||||
|
||||
// generate audio
|
||||
if (!bark_generate_audio(c, text, threads)) {
|
||||
fprintf(stderr, "%s: An error occurred. If the problem persists, feel free to open an issue to report it.\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
const float *audio_data = bark_get_audio_data(c);
|
||||
if (audio_data == NULL) {
|
||||
fprintf(stderr, "%s: Could not get audio data\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
const int audio_arr_size = bark_get_audio_data_size(c);
|
||||
|
||||
std::vector<float> audio_arr(audio_data, audio_data + audio_arr_size);
|
||||
|
||||
write_wav_on_disk(audio_arr, dst);
|
||||
|
||||
// report timing
|
||||
{
|
||||
const int64_t t_main_end_us = ggml_time_us();
|
||||
const int64_t t_load_us = bark_get_load_time(c);
|
||||
const int64_t t_eval_us = bark_get_eval_time(c);
|
||||
|
||||
printf("\n\n");
|
||||
printf("%s: load time = %8.2f ms\n", __func__, t_load_us / 1000.0f);
|
||||
printf("%s: eval time = %8.2f ms\n", __func__, t_eval_us / 1000.0f);
|
||||
printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us) / 1000.0f);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int unload() {
|
||||
bark_free(c);
|
||||
}
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
package main
|
||||
|
||||
// #cgo CXXFLAGS: -I${SRCDIR}/sources/bark.cpp/ -I${SRCDIR}/sources/bark.cpp/encodec.cpp -I${SRCDIR}/sources/bark.cpp/encodec.cpp/ggml/include -I${SRCDIR}/sources/bark.cpp/examples -I${SRCDIR}/sources/bark.cpp/spm-headers
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/ -L${SRCDIR}/sources/bark.cpp/build/examples -L${SRCDIR}/sources/bark.cpp/build/encodec.cpp/ggml/src/ -L${SRCDIR}/sources/bark.cpp/build/encodec.cpp/ -lbark -lencodec -lcommon -lggml -lgomp
|
||||
// #include <gobark.h>
|
||||
// #include <stdlib.h>
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
|
||||
"github.com/mudler/LocalAI/pkg/grpc/base"
|
||||
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||
)
|
||||
|
||||
type Bark struct {
|
||||
base.SingleThread
|
||||
threads int
|
||||
}
|
||||
|
||||
func (sd *Bark) Load(opts *pb.ModelOptions) error {
|
||||
|
||||
sd.threads = int(opts.Threads)
|
||||
|
||||
modelFile := C.CString(opts.ModelFile)
|
||||
defer C.free(unsafe.Pointer(modelFile))
|
||||
|
||||
ret := C.load_model(modelFile)
|
||||
if ret != 0 {
|
||||
return fmt.Errorf("inference failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sd *Bark) TTS(opts *pb.TTSRequest) error {
|
||||
t := C.CString(opts.Text)
|
||||
defer C.free(unsafe.Pointer(t))
|
||||
|
||||
dst := C.CString(opts.Dst)
|
||||
defer C.free(unsafe.Pointer(dst))
|
||||
|
||||
threads := C.int(sd.threads)
|
||||
|
||||
ret := C.tts(t, threads, dst)
|
||||
if ret != 0 {
|
||||
return fmt.Errorf("inference failed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
int load_model(char *model);
|
||||
int tts(char *text,int threads, char *dst );
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
@@ -1,20 +0,0 @@
|
||||
package main
|
||||
|
||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
||||
import (
|
||||
"flag"
|
||||
|
||||
grpc "github.com/mudler/LocalAI/pkg/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
addr = flag.String("addr", "localhost:50051", "the address to connect to")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if err := grpc.StartServer(*addr, &Bark{}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Script to copy the appropriate libraries based on architecture
|
||||
# This script is used in the final stage of the Dockerfile
|
||||
|
||||
set -e
|
||||
|
||||
CURDIR=$(dirname "$(realpath $0)")
|
||||
|
||||
# Create lib directory
|
||||
mkdir -p $CURDIR/package/lib
|
||||
cp -avrf $CURDIR/bark-cpp $CURDIR/package/
|
||||
cp -rfv $CURDIR/run.sh $CURDIR/package/
|
||||
|
||||
# Detect architecture and copy appropriate libraries
|
||||
if [ -f "/lib64/ld-linux-x86-64.so.2" ]; then
|
||||
# x86_64 architecture
|
||||
echo "Detected x86_64 architecture, copying x86_64 libraries..."
|
||||
cp -arfLv /lib64/ld-linux-x86-64.so.2 $CURDIR/package/lib/ld.so
|
||||
cp -arfLv /lib/x86_64-linux-gnu/libc.so.6 $CURDIR/package/lib/libc.so.6
|
||||
cp -arfLv /lib/x86_64-linux-gnu/libgcc_s.so.1 $CURDIR/package/lib/libgcc_s.so.1
|
||||
cp -arfLv /lib/x86_64-linux-gnu/libstdc++.so.6 $CURDIR/package/lib/libstdc++.so.6
|
||||
cp -arfLv /lib/x86_64-linux-gnu/libm.so.6 $CURDIR/package/lib/libm.so.6
|
||||
cp -arfLv /lib/x86_64-linux-gnu/libgomp.so.1 $CURDIR/package/lib/libgomp.so.1
|
||||
elif [ -f "/lib/ld-linux-aarch64.so.1" ]; then
|
||||
# ARM64 architecture
|
||||
echo "Detected ARM64 architecture, copying ARM64 libraries..."
|
||||
cp -arfLv /lib/ld-linux-aarch64.so.1 $CURDIR/package/lib/ld.so
|
||||
cp -arfLv /lib/aarch64-linux-gnu/libc.so.6 $CURDIR/package/lib/libc.so.6
|
||||
cp -arfLv /lib/aarch64-linux-gnu/libgcc_s.so.1 $CURDIR/package/lib/libgcc_s.so.1
|
||||
cp -arfLv /lib/aarch64-linux-gnu/libstdc++.so.6 $CURDIR/package/lib/libstdc++.so.6
|
||||
cp -arfLv /lib/aarch64-linux-gnu/libm.so.6 $CURDIR/package/lib/libm.so.6
|
||||
cp -arfLv /lib/aarch64-linux-gnu/libgomp.so.1 $CURDIR/package/lib/libgomp.so.1
|
||||
else
|
||||
echo "Error: Could not detect architecture"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Packaging completed successfully"
|
||||
ls -liah $CURDIR/package/
|
||||
ls -liah $CURDIR/package/lib/
|
||||
@@ -1,13 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
CURDIR=$(dirname "$(realpath $0)")
|
||||
export LD_LIBRARY_PATH=$CURDIR/lib:$LD_LIBRARY_PATH
|
||||
|
||||
# If there is a lib/ld.so, use it
|
||||
if [ -f $CURDIR/lib/ld.so ]; then
|
||||
echo "Using lib/ld.so"
|
||||
exec $CURDIR/lib/ld.so $CURDIR/bark-cpp "$@"
|
||||
fi
|
||||
|
||||
exec $CURDIR/bark-cpp "$@"
|
||||
@@ -8,7 +8,7 @@ JOBS?=$(shell nproc --ignore=1)
|
||||
|
||||
# stablediffusion.cpp (ggml)
|
||||
STABLEDIFFUSION_GGML_REPO?=https://github.com/leejet/stable-diffusion.cpp
|
||||
STABLEDIFFUSION_GGML_VERSION?=a48b4a3ade9972faf0adcad47e51c6fc03f0e46d
|
||||
STABLEDIFFUSION_GGML_VERSION?=e411520407663e1ddf8ff2e5ed4ff3a116fbbc97
|
||||
|
||||
CMAKE_ARGS+=-DGGML_MAX_NAME=128
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ JOBS?=$(shell nproc --ignore=1)
|
||||
|
||||
# whisper.cpp version
|
||||
WHISPER_REPO?=https://github.com/ggml-org/whisper.cpp
|
||||
WHISPER_CPP_VERSION?=f53dc74843e97f19f94a79241357f74ad5b691a6
|
||||
WHISPER_CPP_VERSION?=aa1bc0d1a6dfd70dbb9f60c11df12441e03a9075
|
||||
SO_TARGET?=libgowhisper.so
|
||||
|
||||
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF
|
||||
|
||||
@@ -130,8 +130,9 @@ func (w *Whisper) AudioTranscription(opts *pb.TranscriptRequest) (pb.TranscriptR
|
||||
segments := []*pb.TranscriptSegment{}
|
||||
text := ""
|
||||
for i := range int(segsLen) {
|
||||
s := CppGetSegmentStart(i)
|
||||
t := CppGetSegmentEnd(i)
|
||||
// segment start/end conversion factor taken from https://github.com/ggml-org/whisper.cpp/blob/master/examples/cli/cli.cpp#L895
|
||||
s := CppGetSegmentStart(i) * (10000000)
|
||||
t := CppGetSegmentEnd(i) * (10000000)
|
||||
txt := strings.Clone(CppGetSegmentText(i))
|
||||
tokens := make([]int32, CppNTokens(i))
|
||||
|
||||
|
||||
@@ -142,6 +142,31 @@
|
||||
amd: "rocm-vllm"
|
||||
intel: "intel-vllm"
|
||||
nvidia-cuda-12: "cuda12-vllm"
|
||||
- &vllm-omni
|
||||
name: "vllm-omni"
|
||||
license: apache-2.0
|
||||
urls:
|
||||
- https://github.com/vllm-project/vllm-omni
|
||||
tags:
|
||||
- text-to-image
|
||||
- image-generation
|
||||
- text-to-video
|
||||
- video-generation
|
||||
- text-to-speech
|
||||
- TTS
|
||||
- multimodal
|
||||
- LLM
|
||||
icon: https://raw.githubusercontent.com/vllm-project/vllm/main/docs/assets/logos/vllm-logo-text-dark.png
|
||||
description: |
|
||||
vLLM-Omni is a unified interface for multimodal generation with vLLM.
|
||||
It supports image generation (text-to-image, image editing), video generation
|
||||
(text-to-video, image-to-video), text generation with multimodal inputs, and
|
||||
text-to-speech generation. Only supports NVIDIA (CUDA) and ROCm platforms.
|
||||
alias: "vllm-omni"
|
||||
capabilities:
|
||||
nvidia: "cuda12-vllm-omni"
|
||||
amd: "rocm-vllm-omni"
|
||||
nvidia-cuda-12: "cuda12-vllm-omni"
|
||||
- &mlx
|
||||
name: "mlx"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-mlx"
|
||||
@@ -200,7 +225,7 @@
|
||||
amd: "rocm-rerankers"
|
||||
- &transformers
|
||||
name: "transformers"
|
||||
icon: https://camo.githubusercontent.com/26569a27b8a30a488dd345024b71dbc05da7ff1b2ba97bb6080c9f1ee0f26cc7/68747470733a2f2f68756767696e67666163652e636f2f64617461736574732f68756767696e67666163652f646f63756d656e746174696f6e2d696d616765732f7265736f6c76652f6d61696e2f7472616e73666f726d6572732f7472616e73666f726d6572735f61735f615f6d6f64656c5f646566696e6974696f6e2e706e67
|
||||
icon: https://avatars.githubusercontent.com/u/25720743?s=200&v=4
|
||||
alias: "transformers"
|
||||
license: apache-2.0
|
||||
description: |
|
||||
@@ -241,22 +266,6 @@
|
||||
nvidia-cuda-12: "cuda12-diffusers"
|
||||
nvidia-l4t-cuda-12: "nvidia-l4t-arm64-diffusers"
|
||||
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-diffusers"
|
||||
- &exllama2
|
||||
name: "exllama2"
|
||||
urls:
|
||||
- https://github.com/turboderp-org/exllamav2
|
||||
tags:
|
||||
- text-to-text
|
||||
- LLM
|
||||
- EXL2
|
||||
license: MIT
|
||||
description: |
|
||||
ExLlamaV2 is an inference library for running local LLMs on modern consumer GPUs.
|
||||
alias: "exllama2"
|
||||
capabilities:
|
||||
nvidia: "cuda12-exllama2"
|
||||
intel: "intel-exllama2"
|
||||
nvidia-cuda-12: "cuda12-exllama2"
|
||||
- &faster-whisper
|
||||
icon: https://avatars.githubusercontent.com/u/1520500?s=200&v=4
|
||||
description: |
|
||||
@@ -293,6 +302,25 @@
|
||||
default: "cpu-moonshine"
|
||||
nvidia-cuda-13: "cuda13-moonshine"
|
||||
nvidia-cuda-12: "cuda12-moonshine"
|
||||
- &whisperx
|
||||
description: |
|
||||
WhisperX provides fast automatic speech recognition with word-level timestamps, speaker diarization,
|
||||
and forced alignment. Built on faster-whisper and pyannote-audio for high-accuracy transcription
|
||||
with speaker identification.
|
||||
urls:
|
||||
- https://github.com/m-bain/whisperX
|
||||
tags:
|
||||
- speech-to-text
|
||||
- diarization
|
||||
- whisperx
|
||||
license: BSD-4-Clause
|
||||
name: "whisperx"
|
||||
capabilities:
|
||||
nvidia: "cuda12-whisperx"
|
||||
amd: "rocm-whisperx"
|
||||
default: "cpu-whisperx"
|
||||
nvidia-cuda-13: "cuda13-whisperx"
|
||||
nvidia-cuda-12: "cuda12-whisperx"
|
||||
- &kokoro
|
||||
icon: https://avatars.githubusercontent.com/u/166769057?v=4
|
||||
description: |
|
||||
@@ -339,51 +367,6 @@
|
||||
nvidia-cuda-13: "cuda13-coqui"
|
||||
nvidia-cuda-12: "cuda12-coqui"
|
||||
icon: https://avatars.githubusercontent.com/u/1338804?s=200&v=4
|
||||
- &bark
|
||||
urls:
|
||||
- https://github.com/suno-ai/bark
|
||||
description: |
|
||||
Bark is a transformer-based text-to-audio model created by Suno. Bark can generate highly realistic, multilingual speech as well as other audio - including music, background noise and simple sound effects. The model can also produce nonverbal communications like laughing, sighing and crying. To support the research community, we are providing access to pretrained model checkpoints, which are ready for inference and available for commercial use.
|
||||
tags:
|
||||
- text-to-speech
|
||||
- TTS
|
||||
license: MIT
|
||||
name: "bark"
|
||||
alias: "bark"
|
||||
capabilities:
|
||||
cuda: "cuda12-bark"
|
||||
intel: "intel-bark"
|
||||
rocm: "rocm-bark"
|
||||
nvidia-cuda-13: "cuda13-bark"
|
||||
nvidia-cuda-12: "cuda12-bark"
|
||||
icon: https://avatars.githubusercontent.com/u/99442120?s=200&v=4
|
||||
- &barkcpp
|
||||
urls:
|
||||
- https://github.com/PABannier/bark.cpp
|
||||
description: |
|
||||
With bark.cpp, our goal is to bring real-time realistic multilingual text-to-speech generation to the community.
|
||||
|
||||
Plain C/C++ implementation without dependencies
|
||||
AVX, AVX2 and AVX512 for x86 architectures
|
||||
CPU and GPU compatible backends
|
||||
Mixed F16 / F32 precision
|
||||
4-bit, 5-bit and 8-bit integer quantization
|
||||
Metal and CUDA backends
|
||||
|
||||
Models supported
|
||||
|
||||
Bark Small
|
||||
Bark Large
|
||||
tags:
|
||||
- text-to-speech
|
||||
- TTS
|
||||
license: MIT
|
||||
icon: https://github.com/PABannier/bark.cpp/raw/main/assets/banner.png
|
||||
name: "bark-cpp"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-bark-cpp"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-bark-cpp
|
||||
alias: "bark-cpp"
|
||||
- &chatterbox
|
||||
urls:
|
||||
- https://github.com/resemble-ai/chatterbox
|
||||
@@ -394,7 +377,7 @@
|
||||
- text-to-speech
|
||||
- TTS
|
||||
license: MIT
|
||||
icon: https://private-user-images.githubusercontent.com/660224/448166653-bd8c5f03-e91d-4ee5-b680-57355da204d1.png?jwt=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJnaXRodWIuY29tIiwiYXVkIjoicmF3LmdpdGh1YnVzZXJjb250ZW50LmNvbSIsImtleSI6ImtleTUiLCJleHAiOjE3NTAxOTE0MDAsIm5iZiI6MTc1MDE5MTEwMCwicGF0aCI6Ii82NjAyMjQvNDQ4MTY2NjUzLWJkOGM1ZjAzLWU5MWQtNGVlNS1iNjgwLTU3MzU1ZGEyMDRkMS5wbmc_WC1BbXotQWxnb3JpdGhtPUFXUzQtSE1BQy1TSEEyNTYmWC1BbXotQ3JlZGVudGlhbD1BS0lBVkNPRFlMU0E1M1BRSzRaQSUyRjIwMjUwNjE3JTJGdXMtZWFzdC0xJTJGczMlMkZhd3M0X3JlcXVlc3QmWC1BbXotRGF0ZT0yMDI1MDYxN1QyMDExNDBaJlgtQW16LUV4cGlyZXM9MzAwJlgtQW16LVNpZ25hdHVyZT1hMmI1NGY3OGFiZTlhNGFkNTVlYTY4NTIwMWEzODRiZGE4YzdhNGQ5MGNhNzE3MDYyYTA2NDIxYTkyYzhiODkwJlgtQW16LVNpZ25lZEhlYWRlcnM9aG9zdCJ9.mR9kM9xX0TdzPuSpuspCllHYQiq79dFQ2rtuNvjrl6w
|
||||
icon: https://avatars.githubusercontent.com/u/49844015?s=200&v=4
|
||||
name: "chatterbox"
|
||||
alias: "chatterbox"
|
||||
capabilities:
|
||||
@@ -428,6 +411,69 @@
|
||||
nvidia-l4t-cuda-12: "nvidia-l4t-vibevoice"
|
||||
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-vibevoice"
|
||||
icon: https://avatars.githubusercontent.com/u/6154722?s=200&v=4
|
||||
- &qwen-tts
|
||||
urls:
|
||||
- https://github.com/QwenLM/Qwen3-TTS
|
||||
description: |
|
||||
Qwen3-TTS is a high-quality text-to-speech model supporting custom voice, voice design, and voice cloning.
|
||||
tags:
|
||||
- text-to-speech
|
||||
- TTS
|
||||
license: apache-2.0
|
||||
name: "qwen-tts"
|
||||
alias: "qwen-tts"
|
||||
capabilities:
|
||||
nvidia: "cuda12-qwen-tts"
|
||||
intel: "intel-qwen-tts"
|
||||
amd: "rocm-qwen-tts"
|
||||
nvidia-l4t: "nvidia-l4t-qwen-tts"
|
||||
default: "cpu-qwen-tts"
|
||||
nvidia-cuda-13: "cuda13-qwen-tts"
|
||||
nvidia-cuda-12: "cuda12-qwen-tts"
|
||||
nvidia-l4t-cuda-12: "nvidia-l4t-qwen-tts"
|
||||
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-qwen-tts"
|
||||
icon: https://cdn-avatars.huggingface.co/v1/production/uploads/620760a26e3b7210c2ff1943/-s1gyJfvbE1RgO5iBeNOi.png
|
||||
- &qwen-asr
|
||||
urls:
|
||||
- https://github.com/QwenLM/Qwen3-ASR
|
||||
description: |
|
||||
Qwen3-ASR is an automatic speech recognition model supporting multiple languages and batch inference.
|
||||
tags:
|
||||
- speech-recognition
|
||||
- ASR
|
||||
license: apache-2.0
|
||||
name: "qwen-asr"
|
||||
alias: "qwen-asr"
|
||||
capabilities:
|
||||
nvidia: "cuda12-qwen-asr"
|
||||
intel: "intel-qwen-asr"
|
||||
amd: "rocm-qwen-asr"
|
||||
nvidia-l4t: "nvidia-l4t-qwen-asr"
|
||||
default: "cpu-qwen-asr"
|
||||
nvidia-cuda-13: "cuda13-qwen-asr"
|
||||
nvidia-cuda-12: "cuda12-qwen-asr"
|
||||
nvidia-l4t-cuda-12: "nvidia-l4t-qwen-asr"
|
||||
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-qwen-asr"
|
||||
icon: https://cdn-avatars.huggingface.co/v1/production/uploads/620760a26e3b7210c2ff1943/-s1gyJfvbE1RgO5iBeNOi.png
|
||||
- &voxcpm
|
||||
urls:
|
||||
- https://github.com/ModelBest/VoxCPM
|
||||
description: |
|
||||
VoxCPM is an innovative end-to-end TTS model from ModelBest, designed to generate highly expressive speech.
|
||||
tags:
|
||||
- text-to-speech
|
||||
- TTS
|
||||
license: mit
|
||||
name: "voxcpm"
|
||||
alias: "voxcpm"
|
||||
capabilities:
|
||||
nvidia: "cuda12-voxcpm"
|
||||
intel: "intel-voxcpm"
|
||||
amd: "rocm-voxcpm"
|
||||
default: "cpu-voxcpm"
|
||||
nvidia-cuda-13: "cuda13-voxcpm"
|
||||
nvidia-cuda-12: "cuda12-voxcpm"
|
||||
icon: https://avatars.githubusercontent.com/u/6154722?s=200&v=4
|
||||
- &pocket-tts
|
||||
urls:
|
||||
- https://github.com/kyutai-labs/pocket-tts
|
||||
@@ -449,7 +495,7 @@
|
||||
nvidia-cuda-12: "cuda12-pocket-tts"
|
||||
nvidia-l4t-cuda-12: "nvidia-l4t-pocket-tts"
|
||||
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-pocket-tts"
|
||||
icon: https://avatars.githubusercontent.com/u/6154722?s=200&v=4
|
||||
icon: https://avatars.githubusercontent.com/u/151010778?s=200&v=4
|
||||
- &piper
|
||||
name: "piper"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-piper"
|
||||
@@ -967,6 +1013,33 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-vllm"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-intel-vllm
|
||||
# vllm-omni
|
||||
- !!merge <<: *vllm-omni
|
||||
name: "vllm-omni-development"
|
||||
capabilities:
|
||||
nvidia: "cuda12-vllm-omni-development"
|
||||
amd: "rocm-vllm-omni-development"
|
||||
nvidia-cuda-12: "cuda12-vllm-omni-development"
|
||||
- !!merge <<: *vllm-omni
|
||||
name: "cuda12-vllm-omni"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-vllm-omni"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-vllm-omni
|
||||
- !!merge <<: *vllm-omni
|
||||
name: "rocm-vllm-omni"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-vllm-omni"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-rocm-hipblas-vllm-omni
|
||||
- !!merge <<: *vllm-omni
|
||||
name: "cuda12-vllm-omni-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-vllm-omni"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-12-vllm-omni
|
||||
- !!merge <<: *vllm-omni
|
||||
name: "rocm-vllm-omni-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-vllm-omni"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-rocm-hipblas-vllm-omni
|
||||
# rfdetr
|
||||
- !!merge <<: *rfdetr
|
||||
name: "rfdetr-development"
|
||||
@@ -1229,22 +1302,6 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-diffusers"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-metal-darwin-arm64-diffusers
|
||||
## exllama2
|
||||
- !!merge <<: *exllama2
|
||||
name: "exllama2-development"
|
||||
capabilities:
|
||||
nvidia: "cuda12-exllama2-development"
|
||||
intel: "intel-exllama2-development"
|
||||
- !!merge <<: *exllama2
|
||||
name: "cuda12-exllama2"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-exllama2"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-exllama2
|
||||
- !!merge <<: *exllama2
|
||||
name: "cuda12-exllama2-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-exllama2"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-12-exllama2
|
||||
## kokoro
|
||||
- !!merge <<: *kokoro
|
||||
name: "kokoro-development"
|
||||
@@ -1379,6 +1436,55 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-moonshine"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-13-moonshine
|
||||
## whisperx
|
||||
- !!merge <<: *whisperx
|
||||
name: "whisperx-development"
|
||||
capabilities:
|
||||
nvidia: "cuda12-whisperx-development"
|
||||
amd: "rocm-whisperx-development"
|
||||
default: "cpu-whisperx-development"
|
||||
nvidia-cuda-13: "cuda13-whisperx-development"
|
||||
nvidia-cuda-12: "cuda12-whisperx-development"
|
||||
- !!merge <<: *whisperx
|
||||
name: "cpu-whisperx"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-whisperx"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-cpu-whisperx
|
||||
- !!merge <<: *whisperx
|
||||
name: "cpu-whisperx-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-whisperx"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-cpu-whisperx
|
||||
- !!merge <<: *whisperx
|
||||
name: "cuda12-whisperx"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-whisperx"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-whisperx
|
||||
- !!merge <<: *whisperx
|
||||
name: "cuda12-whisperx-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-whisperx"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-12-whisperx
|
||||
- !!merge <<: *whisperx
|
||||
name: "rocm-whisperx"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-whisperx"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-rocm-hipblas-whisperx
|
||||
- !!merge <<: *whisperx
|
||||
name: "rocm-whisperx-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-whisperx"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-rocm-hipblas-whisperx
|
||||
- !!merge <<: *whisperx
|
||||
name: "cuda13-whisperx"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-whisperx"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-13-whisperx
|
||||
- !!merge <<: *whisperx
|
||||
name: "cuda13-whisperx-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-whisperx"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-13-whisperx
|
||||
## coqui
|
||||
|
||||
- !!merge <<: *coqui
|
||||
@@ -1417,47 +1523,6 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-coqui"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-rocm-hipblas-coqui
|
||||
## bark
|
||||
- !!merge <<: *bark
|
||||
name: "bark-development"
|
||||
capabilities:
|
||||
nvidia: "cuda12-bark-development"
|
||||
intel: "intel-bark-development"
|
||||
amd: "rocm-bark-development"
|
||||
- !!merge <<: *bark
|
||||
name: "rocm-bark-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-bark"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-rocm-hipblas-bark
|
||||
- !!merge <<: *bark
|
||||
name: "intel-bark"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-bark"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-intel-bark
|
||||
- !!merge <<: *bark
|
||||
name: "intel-bark-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-bark"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-intel-bark
|
||||
- !!merge <<: *bark
|
||||
name: "cuda12-bark"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-bark"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-bark
|
||||
- !!merge <<: *bark
|
||||
name: "rocm-bark"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-bark"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-rocm-hipblas-bark
|
||||
- !!merge <<: *bark
|
||||
name: "cuda12-bark-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-bark"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-12-bark
|
||||
- !!merge <<: *barkcpp
|
||||
name: "bark-cpp-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-bark-cpp"
|
||||
alias: "bark-cpp"
|
||||
## chatterbox
|
||||
- !!merge <<: *chatterbox
|
||||
name: "chatterbox-development"
|
||||
@@ -1613,6 +1678,232 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-vibevoice"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-vibevoice
|
||||
## qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "qwen-tts-development"
|
||||
capabilities:
|
||||
nvidia: "cuda12-qwen-tts-development"
|
||||
intel: "intel-qwen-tts-development"
|
||||
amd: "rocm-qwen-tts-development"
|
||||
nvidia-l4t: "nvidia-l4t-qwen-tts-development"
|
||||
default: "cpu-qwen-tts-development"
|
||||
nvidia-cuda-13: "cuda13-qwen-tts-development"
|
||||
nvidia-cuda-12: "cuda12-qwen-tts-development"
|
||||
nvidia-l4t-cuda-12: "nvidia-l4t-qwen-tts-development"
|
||||
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-qwen-tts-development"
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "cpu-qwen-tts"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-cpu-qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "cpu-qwen-tts-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-cpu-qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "cuda12-qwen-tts"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "cuda12-qwen-tts-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-12-qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "cuda13-qwen-tts"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-13-qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "cuda13-qwen-tts-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-13-qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "intel-qwen-tts"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-intel-qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "intel-qwen-tts-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-intel-qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "rocm-qwen-tts"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-rocm-hipblas-qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "rocm-qwen-tts-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-rocm-hipblas-qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "nvidia-l4t-qwen-tts"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-nvidia-l4t-qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "nvidia-l4t-qwen-tts-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-nvidia-l4t-qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "cuda13-nvidia-l4t-arm64-qwen-tts"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-cuda-13-arm64-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-nvidia-l4t-cuda-13-arm64-qwen-tts
|
||||
- !!merge <<: *qwen-tts
|
||||
name: "cuda13-nvidia-l4t-arm64-qwen-tts-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-qwen-tts"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-qwen-tts
|
||||
## qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "qwen-asr-development"
|
||||
capabilities:
|
||||
nvidia: "cuda12-qwen-asr-development"
|
||||
intel: "intel-qwen-asr-development"
|
||||
amd: "rocm-qwen-asr-development"
|
||||
nvidia-l4t: "nvidia-l4t-qwen-asr-development"
|
||||
default: "cpu-qwen-asr-development"
|
||||
nvidia-cuda-13: "cuda13-qwen-asr-development"
|
||||
nvidia-cuda-12: "cuda12-qwen-asr-development"
|
||||
nvidia-l4t-cuda-12: "nvidia-l4t-qwen-asr-development"
|
||||
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-qwen-asr-development"
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "cpu-qwen-asr"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-cpu-qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "cpu-qwen-asr-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-cpu-qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "cuda12-qwen-asr"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "cuda12-qwen-asr-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-12-qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "cuda13-qwen-asr"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-13-qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "cuda13-qwen-asr-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-13-qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "intel-qwen-asr"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-intel-qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "intel-qwen-asr-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-intel-qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "rocm-qwen-asr"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-rocm-hipblas-qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "rocm-qwen-asr-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-rocm-hipblas-qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "nvidia-l4t-qwen-asr"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-nvidia-l4t-qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "nvidia-l4t-qwen-asr-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-nvidia-l4t-qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "cuda13-nvidia-l4t-arm64-qwen-asr"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-cuda-13-arm64-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-nvidia-l4t-cuda-13-arm64-qwen-asr
|
||||
- !!merge <<: *qwen-asr
|
||||
name: "cuda13-nvidia-l4t-arm64-qwen-asr-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-qwen-asr"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-qwen-asr
|
||||
## voxcpm
|
||||
- !!merge <<: *voxcpm
|
||||
name: "voxcpm-development"
|
||||
capabilities:
|
||||
nvidia: "cuda12-voxcpm-development"
|
||||
intel: "intel-voxcpm-development"
|
||||
amd: "rocm-voxcpm-development"
|
||||
default: "cpu-voxcpm-development"
|
||||
nvidia-cuda-13: "cuda13-voxcpm-development"
|
||||
nvidia-cuda-12: "cuda12-voxcpm-development"
|
||||
- !!merge <<: *voxcpm
|
||||
name: "cpu-voxcpm"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-voxcpm"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-cpu-voxcpm
|
||||
- !!merge <<: *voxcpm
|
||||
name: "cpu-voxcpm-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-voxcpm"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-cpu-voxcpm
|
||||
- !!merge <<: *voxcpm
|
||||
name: "cuda12-voxcpm"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-voxcpm"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-voxcpm
|
||||
- !!merge <<: *voxcpm
|
||||
name: "cuda12-voxcpm-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-voxcpm"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-12-voxcpm
|
||||
- !!merge <<: *voxcpm
|
||||
name: "cuda13-voxcpm"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-voxcpm"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-13-voxcpm
|
||||
- !!merge <<: *voxcpm
|
||||
name: "cuda13-voxcpm-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-voxcpm"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-13-voxcpm
|
||||
- !!merge <<: *voxcpm
|
||||
name: "intel-voxcpm"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-voxcpm"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-intel-voxcpm
|
||||
- !!merge <<: *voxcpm
|
||||
name: "intel-voxcpm-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-voxcpm"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-intel-voxcpm
|
||||
- !!merge <<: *voxcpm
|
||||
name: "rocm-voxcpm"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-voxcpm"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-rocm-hipblas-voxcpm
|
||||
- !!merge <<: *voxcpm
|
||||
name: "rocm-voxcpm-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-voxcpm"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-rocm-hipblas-voxcpm
|
||||
## pocket-tts
|
||||
- !!merge <<: *pocket-tts
|
||||
name: "pocket-tts-development"
|
||||
|
||||
@@ -16,10 +16,8 @@ The Python backends use a unified build system based on `libbackend.sh` that pro
|
||||
- **transformers** - Hugging Face Transformers framework (PyTorch-based)
|
||||
- **vllm** - High-performance LLM inference engine
|
||||
- **mlx** - Apple Silicon optimized ML framework
|
||||
- **exllama2** - ExLlama2 quantized models
|
||||
|
||||
### Audio & Speech
|
||||
- **bark** - Text-to-speech synthesis
|
||||
- **coqui** - Coqui TTS models
|
||||
- **faster-whisper** - Fast Whisper speech recognition
|
||||
- **kitten-tts** - Lightweight TTS
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
# Creating a separate environment for ttsbark project
|
||||
|
||||
```
|
||||
make ttsbark
|
||||
```
|
||||
|
||||
# Testing the gRPC server
|
||||
|
||||
```
|
||||
<The path of your python interpreter> -m unittest test_ttsbark.py
|
||||
```
|
||||
|
||||
For example
|
||||
```
|
||||
/opt/conda/envs/bark/bin/python -m unittest extra/grpc/bark/test_ttsbark.py
|
||||
``````
|
||||
@@ -1,98 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
This is an extra gRPC server of LocalAI for Bark TTS
|
||||
"""
|
||||
from concurrent import futures
|
||||
import time
|
||||
import argparse
|
||||
import signal
|
||||
import sys
|
||||
import os
|
||||
from scipy.io.wavfile import write as write_wav
|
||||
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
from bark import SAMPLE_RATE, generate_audio, preload_models
|
||||
|
||||
import grpc
|
||||
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
|
||||
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
|
||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
||||
|
||||
# Implement the BackendServicer class with the service methods
|
||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
"""
|
||||
BackendServicer is the class that implements the gRPC service
|
||||
"""
|
||||
def Health(self, request, context):
|
||||
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
||||
def LoadModel(self, request, context):
|
||||
model_name = request.Model
|
||||
try:
|
||||
print("Preparing models, please wait", file=sys.stderr)
|
||||
# download and load all models
|
||||
preload_models()
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
# Implement your logic here for the LoadModel service
|
||||
# Replace this with your desired response
|
||||
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
||||
|
||||
def TTS(self, request, context):
|
||||
model = request.model
|
||||
print(request, file=sys.stderr)
|
||||
try:
|
||||
audio_array = None
|
||||
if model != "":
|
||||
audio_array = generate_audio(request.text, history_prompt=model)
|
||||
else:
|
||||
audio_array = generate_audio(request.text)
|
||||
print("saving to", request.dst, file=sys.stderr)
|
||||
# save audio to disk
|
||||
write_wav(request.dst, SAMPLE_RATE, audio_array)
|
||||
print("saved to", request.dst, file=sys.stderr)
|
||||
print("tts for", file=sys.stderr)
|
||||
print(request, file=sys.stderr)
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
return backend_pb2.Result(success=True)
|
||||
|
||||
def serve(address):
|
||||
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
||||
options=[
|
||||
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
|
||||
])
|
||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||
server.add_insecure_port(address)
|
||||
server.start()
|
||||
print("Server started. Listening on: " + address, file=sys.stderr)
|
||||
|
||||
# Define the signal handler function
|
||||
def signal_handler(sig, frame):
|
||||
print("Received termination signal. Shutting down...")
|
||||
server.stop(0)
|
||||
sys.exit(0)
|
||||
|
||||
# Set the signal handlers for SIGINT and SIGTERM
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
try:
|
||||
while True:
|
||||
time.sleep(_ONE_DAY_IN_SECONDS)
|
||||
except KeyboardInterrupt:
|
||||
server.stop(0)
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Run the gRPC server.")
|
||||
parser.add_argument(
|
||||
"--addr", default="localhost:50051", help="The address to bind the server to."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
serve(args.addr)
|
||||
@@ -1,19 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
# This is here because the Intel pip index is broken and returns 200 status codes for every package name, it just doesn't return any package links.
|
||||
# This makes uv think that the package exists in the Intel pip index, and by default it stops looking at other pip indexes once it finds a match.
|
||||
# We need uv to continue falling through to the pypi default index to find optimum[openvino] in the pypi index
|
||||
# the --upgrade actually allows us to *downgrade* torch to the version provided in the Intel pip index
|
||||
if [ "x${BUILD_PROFILE}" == "xintel" ]; then
|
||||
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
|
||||
fi
|
||||
|
||||
installRequirements
|
||||
@@ -1,4 +0,0 @@
|
||||
transformers
|
||||
accelerate
|
||||
torch==2.4.1
|
||||
torchaudio==2.4.1
|
||||
@@ -1,4 +0,0 @@
|
||||
torch==2.4.1
|
||||
torchaudio==2.4.1
|
||||
transformers
|
||||
accelerate
|
||||
@@ -1,5 +0,0 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
||||
torch==2.8.0+rocm6.4
|
||||
torchaudio==2.8.0+rocm6.4
|
||||
transformers
|
||||
accelerate
|
||||
@@ -1,9 +0,0 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.8.10+xpu
|
||||
torch==2.3.1+cxx11.abi
|
||||
torchaudio==2.3.1+cxx11.abi
|
||||
oneccl_bind_pt==2.3.100+xpu
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
transformers
|
||||
accelerate
|
||||
@@ -1,4 +0,0 @@
|
||||
bark==0.1.5
|
||||
grpcio==1.76.0
|
||||
protobuf
|
||||
certifi
|
||||
@@ -1,7 +1,6 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.3.110+xpu
|
||||
torch==2.3.1+cxx11.abi
|
||||
torchaudio==2.3.1+cxx11.abi
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
torch
|
||||
torchaudio
|
||||
transformers
|
||||
numpy>=1.24.0,<1.26.0
|
||||
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289
|
||||
|
||||
@@ -398,7 +398,7 @@ function runProtogen() {
|
||||
# NOTE: for BUILD_PROFILE==intel, this function does NOT automatically use the Intel python package index.
|
||||
# you may want to add the following line to a requirements-intel.txt if you use one:
|
||||
#
|
||||
# --index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
# --index-url https://download.pytorch.org/whl/xpu
|
||||
#
|
||||
# If you need to add extra flags into the pip install command you can do so by setting the variable EXTRA_PIP_INSTALL_FLAGS
|
||||
# before calling installRequirements. For example:
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.8.10+xpu
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
torch==2.8.0
|
||||
oneccl_bind_pt==2.8.0+xpu
|
||||
optimum[openvino]
|
||||
@@ -1,4 +1,4 @@
|
||||
# Creating a separate environment for ttsbark project
|
||||
# Creating a separate environment for coqui project
|
||||
|
||||
```
|
||||
make coqui
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
This is an extra gRPC server of LocalAI for Bark TTS
|
||||
This is an extra gRPC server of LocalAI for Coqui TTS
|
||||
"""
|
||||
from concurrent import futures
|
||||
import time
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.3.110+xpu
|
||||
torch==2.3.1+cxx11.abi
|
||||
torchaudio==2.3.1+cxx11.abi
|
||||
oneccl_bind_pt==2.3.100+xpu
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
torch==2.8.0+xpu
|
||||
torchaudio==2.8.0+xpu
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
transformers==4.48.3
|
||||
|
||||
@@ -42,12 +42,8 @@ from transformers import T5EncoderModel
|
||||
from safetensors.torch import load_file
|
||||
|
||||
# Import LTX-2 specific utilities
|
||||
try:
|
||||
from diffusers.pipelines.ltx2.export_utils import encode_video as ltx2_encode_video
|
||||
LTX2_AVAILABLE = True
|
||||
except ImportError:
|
||||
LTX2_AVAILABLE = False
|
||||
ltx2_encode_video = None
|
||||
from diffusers.pipelines.ltx2.export_utils import encode_video as ltx2_encode_video
|
||||
from diffusers import LTX2VideoTransformer3DModel, GGUFQuantizationConfig
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
COMPEL = os.environ.get("COMPEL", "0") == "1"
|
||||
@@ -302,12 +298,96 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
if pipeline_type == "LTX2ImageToVideoPipeline":
|
||||
self.img2vid = True
|
||||
self.ltx2_pipeline = True
|
||||
pipe = load_diffusers_pipeline(
|
||||
class_name="LTX2ImageToVideoPipeline",
|
||||
model_id=request.Model,
|
||||
torch_dtype=torchType,
|
||||
variant=variant
|
||||
)
|
||||
|
||||
# Check if loading from single file (GGUF)
|
||||
if fromSingleFile and LTX2VideoTransformer3DModel is not None:
|
||||
_, single_file_ext = os.path.splitext(modelFile)
|
||||
if single_file_ext == ".gguf":
|
||||
# Load transformer from single GGUF file with quantization
|
||||
transformer_kwargs = {}
|
||||
quantization_config = GGUFQuantizationConfig(compute_dtype=torchType)
|
||||
transformer_kwargs["quantization_config"] = quantization_config
|
||||
|
||||
transformer = LTX2VideoTransformer3DModel.from_single_file(
|
||||
modelFile,
|
||||
config=request.Model, # Use request.Model as the config/model_id
|
||||
subfolder="transformer",
|
||||
**transformer_kwargs,
|
||||
)
|
||||
|
||||
# Load pipeline with custom transformer
|
||||
pipe = load_diffusers_pipeline(
|
||||
class_name="LTX2ImageToVideoPipeline",
|
||||
model_id=request.Model,
|
||||
transformer=transformer,
|
||||
torch_dtype=torchType,
|
||||
)
|
||||
else:
|
||||
# Single file but not GGUF - use standard single file loading
|
||||
pipe = load_diffusers_pipeline(
|
||||
class_name="LTX2ImageToVideoPipeline",
|
||||
model_id=modelFile,
|
||||
from_single_file=True,
|
||||
torch_dtype=torchType,
|
||||
)
|
||||
else:
|
||||
# Standard loading from pretrained
|
||||
pipe = load_diffusers_pipeline(
|
||||
class_name="LTX2ImageToVideoPipeline",
|
||||
model_id=request.Model,
|
||||
torch_dtype=torchType,
|
||||
variant=variant
|
||||
)
|
||||
|
||||
if not DISABLE_CPU_OFFLOAD:
|
||||
pipe.enable_model_cpu_offload()
|
||||
return pipe
|
||||
|
||||
# LTX2Pipeline - text-to-video pipeline, needs txt2vid flag, CPU offload, and special handling
|
||||
if pipeline_type == "LTX2Pipeline":
|
||||
self.txt2vid = True
|
||||
self.ltx2_pipeline = True
|
||||
|
||||
# Check if loading from single file (GGUF)
|
||||
if fromSingleFile and LTX2VideoTransformer3DModel is not None:
|
||||
_, single_file_ext = os.path.splitext(modelFile)
|
||||
if single_file_ext == ".gguf":
|
||||
# Load transformer from single GGUF file with quantization
|
||||
transformer_kwargs = {}
|
||||
quantization_config = GGUFQuantizationConfig(compute_dtype=torchType)
|
||||
transformer_kwargs["quantization_config"] = quantization_config
|
||||
|
||||
transformer = LTX2VideoTransformer3DModel.from_single_file(
|
||||
modelFile,
|
||||
config=request.Model, # Use request.Model as the config/model_id
|
||||
subfolder="transformer",
|
||||
**transformer_kwargs,
|
||||
)
|
||||
|
||||
# Load pipeline with custom transformer
|
||||
pipe = load_diffusers_pipeline(
|
||||
class_name="LTX2Pipeline",
|
||||
model_id=request.Model,
|
||||
transformer=transformer,
|
||||
torch_dtype=torchType,
|
||||
)
|
||||
else:
|
||||
# Single file but not GGUF - use standard single file loading
|
||||
pipe = load_diffusers_pipeline(
|
||||
class_name="LTX2Pipeline",
|
||||
model_id=modelFile,
|
||||
from_single_file=True,
|
||||
torch_dtype=torchType,
|
||||
)
|
||||
else:
|
||||
# Standard loading from pretrained
|
||||
pipe = load_diffusers_pipeline(
|
||||
class_name="LTX2Pipeline",
|
||||
model_id=request.Model,
|
||||
torch_dtype=torchType,
|
||||
variant=variant
|
||||
)
|
||||
|
||||
if not DISABLE_CPU_OFFLOAD:
|
||||
pipe.enable_model_cpu_offload()
|
||||
return pipe
|
||||
@@ -428,6 +508,8 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
self.txt2vid = False
|
||||
self.ltx2_pipeline = False
|
||||
|
||||
print(f"LoadModel: PipelineType from request: {request.PipelineType}", file=sys.stderr)
|
||||
|
||||
# Load pipeline using dynamic loader
|
||||
# Special cases that require custom initialization are handled first
|
||||
self.pipe = self._load_pipeline(
|
||||
@@ -437,6 +519,8 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
torchType=torchType,
|
||||
variant=variant
|
||||
)
|
||||
|
||||
print(f"LoadModel: After loading - ltx2_pipeline: {self.ltx2_pipeline}, img2vid: {self.img2vid}, txt2vid: {self.txt2vid}, PipelineType: {self.PipelineType}", file=sys.stderr)
|
||||
|
||||
if CLIPSKIP and request.CLIPSkip != 0:
|
||||
self.clip_skip = request.CLIPSkip
|
||||
@@ -674,14 +758,20 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
try:
|
||||
prompt = request.prompt
|
||||
if not prompt:
|
||||
print(f"GenerateVideo: No prompt provided for video generation.", file=sys.stderr)
|
||||
return backend_pb2.Result(success=False, message="No prompt provided for video generation")
|
||||
|
||||
# Debug: Print raw request values
|
||||
print(f"GenerateVideo: Raw request values - num_frames: {request.num_frames}, fps: {request.fps}, cfg_scale: {request.cfg_scale}, step: {request.step}", file=sys.stderr)
|
||||
|
||||
# Set default values from request or use defaults
|
||||
num_frames = request.num_frames if request.num_frames > 0 else 81
|
||||
fps = request.fps if request.fps > 0 else 16
|
||||
cfg_scale = request.cfg_scale if request.cfg_scale > 0 else 4.0
|
||||
num_inference_steps = request.step if request.step > 0 else 40
|
||||
|
||||
print(f"GenerateVideo: Using values - num_frames: {num_frames}, fps: {fps}, cfg_scale: {cfg_scale}, num_inference_steps: {num_inference_steps}", file=sys.stderr)
|
||||
|
||||
# Prepare generation parameters
|
||||
kwargs = {
|
||||
"prompt": prompt,
|
||||
@@ -707,19 +797,34 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
kwargs["end_image"] = load_image(request.end_image)
|
||||
|
||||
print(f"Generating video with {kwargs=}", file=sys.stderr)
|
||||
print(f"GenerateVideo: Pipeline type: {self.PipelineType}, ltx2_pipeline flag: {self.ltx2_pipeline}", file=sys.stderr)
|
||||
|
||||
# Generate video frames based on pipeline type
|
||||
if self.ltx2_pipeline or self.PipelineType == "LTX2ImageToVideoPipeline":
|
||||
# LTX-2 image-to-video generation with audio
|
||||
if not LTX2_AVAILABLE:
|
||||
return backend_pb2.Result(success=False, message="LTX-2 pipeline requires diffusers.pipelines.ltx2.export_utils")
|
||||
if self.ltx2_pipeline or self.PipelineType in ["LTX2Pipeline", "LTX2ImageToVideoPipeline"]:
|
||||
# LTX-2 generation with audio (supports both text-to-video and image-to-video)
|
||||
# Determine if this is text-to-video (no image) or image-to-video (has image)
|
||||
has_image = bool(request.start_image)
|
||||
|
||||
# LTX-2 uses 'image' parameter instead of 'start_image'
|
||||
if request.start_image:
|
||||
image = load_image(request.start_image)
|
||||
kwargs["image"] = image
|
||||
# Remove start_image if it was added
|
||||
kwargs.pop("start_image", None)
|
||||
# Remove image-related parameters that might have been added earlier
|
||||
kwargs.pop("start_image", None)
|
||||
kwargs.pop("end_image", None)
|
||||
|
||||
# LTX2ImageToVideoPipeline uses 'image' parameter for image-to-video
|
||||
# LTX2Pipeline (text-to-video) doesn't need an image parameter
|
||||
if has_image:
|
||||
# Image-to-video: use 'image' parameter
|
||||
if self.PipelineType == "LTX2ImageToVideoPipeline":
|
||||
image = load_image(request.start_image)
|
||||
kwargs["image"] = image
|
||||
print(f"LTX-2: Using image-to-video mode with image", file=sys.stderr)
|
||||
else:
|
||||
# If pipeline type is LTX2Pipeline but we have an image, we can't do image-to-video
|
||||
return backend_pb2.Result(success=False, message="LTX2Pipeline does not support image-to-video. Use LTX2ImageToVideoPipeline for image-to-video generation.")
|
||||
else:
|
||||
# Text-to-video: no image parameter needed
|
||||
# Ensure no image-related kwargs are present
|
||||
kwargs.pop("image", None)
|
||||
print(f"LTX-2: Using text-to-video mode (no image)", file=sys.stderr)
|
||||
|
||||
# LTX-2 uses 'frame_rate' instead of 'fps'
|
||||
frame_rate = float(fps)
|
||||
@@ -730,20 +835,45 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
kwargs["return_dict"] = False
|
||||
|
||||
# Generate video and audio
|
||||
video, audio = self.pipe(**kwargs)
|
||||
print(f"LTX-2: Generating with kwargs: {kwargs}", file=sys.stderr)
|
||||
try:
|
||||
video, audio = self.pipe(**kwargs)
|
||||
print(f"LTX-2: Generated video shape: {video.shape}, audio shape: {audio.shape}", file=sys.stderr)
|
||||
except Exception as e:
|
||||
print(f"LTX-2: Error during pipe() call: {e}", file=sys.stderr)
|
||||
traceback.print_exc()
|
||||
return backend_pb2.Result(success=False, message=f"Error generating video with LTX-2 pipeline: {e}")
|
||||
|
||||
# Convert video to uint8 format
|
||||
video = (video * 255).round().astype("uint8")
|
||||
video = torch.from_numpy(video)
|
||||
|
||||
print(f"LTX-2: Converting video, shape after conversion: {video.shape}", file=sys.stderr)
|
||||
print(f"LTX-2: Audio sample rate: {self.pipe.vocoder.config.output_sampling_rate}", file=sys.stderr)
|
||||
print(f"LTX-2: Output path: {request.dst}", file=sys.stderr)
|
||||
|
||||
# Use LTX-2's encode_video function which handles audio
|
||||
ltx2_encode_video(
|
||||
video[0],
|
||||
fps=frame_rate,
|
||||
audio=audio[0].float().cpu(),
|
||||
audio_sample_rate=self.pipe.vocoder.config.output_sampling_rate,
|
||||
output_path=request.dst,
|
||||
)
|
||||
try:
|
||||
ltx2_encode_video(
|
||||
video[0],
|
||||
fps=frame_rate,
|
||||
audio=audio[0].float().cpu(),
|
||||
audio_sample_rate=self.pipe.vocoder.config.output_sampling_rate,
|
||||
output_path=request.dst,
|
||||
)
|
||||
# Verify file was created and has content
|
||||
import os
|
||||
if os.path.exists(request.dst):
|
||||
file_size = os.path.getsize(request.dst)
|
||||
print(f"LTX-2: Video file created successfully, size: {file_size} bytes", file=sys.stderr)
|
||||
if file_size == 0:
|
||||
return backend_pb2.Result(success=False, message=f"Video file was created but is empty (0 bytes). Check LTX-2 encode_video function.")
|
||||
else:
|
||||
return backend_pb2.Result(success=False, message=f"Video file was not created at {request.dst}")
|
||||
except Exception as e:
|
||||
print(f"LTX-2: Error encoding video: {e}", file=sys.stderr)
|
||||
traceback.print_exc()
|
||||
return backend_pb2.Result(success=False, message=f"Error encoding video: {e}")
|
||||
|
||||
return backend_pb2.Result(message="Video generated successfully", success=True)
|
||||
elif self.PipelineType == "WanPipeline":
|
||||
@@ -785,11 +915,23 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
output = self.pipe(**kwargs)
|
||||
frames = output.frames[0]
|
||||
else:
|
||||
print(f"GenerateVideo: Pipeline {self.PipelineType} does not match any known video pipeline handler", file=sys.stderr)
|
||||
return backend_pb2.Result(success=False, message=f"Pipeline {self.PipelineType} does not support video generation")
|
||||
|
||||
# Export video (for non-LTX-2 pipelines)
|
||||
print(f"GenerateVideo: Exporting video to {request.dst} with fps={fps}", file=sys.stderr)
|
||||
export_to_video(frames, request.dst, fps=fps)
|
||||
|
||||
# Verify file was created
|
||||
import os
|
||||
if os.path.exists(request.dst):
|
||||
file_size = os.path.getsize(request.dst)
|
||||
print(f"GenerateVideo: Video file created, size: {file_size} bytes", file=sys.stderr)
|
||||
if file_size == 0:
|
||||
return backend_pb2.Result(success=False, message=f"Video file was created but is empty (0 bytes)")
|
||||
else:
|
||||
return backend_pb2.Result(success=False, message=f"Video file was not created at {request.dst}")
|
||||
|
||||
return backend_pb2.Result(message="Video generated successfully", success=True)
|
||||
|
||||
except Exception as err:
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.3.110+xpu
|
||||
torch==2.5.1+cxx11.abi
|
||||
torchvision==0.20.1+cxx11.abi
|
||||
oneccl_bind_pt==2.8.0+xpu
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
torch
|
||||
torchvision
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
git+https://github.com/huggingface/diffusers
|
||||
|
||||
@@ -3,3 +3,4 @@ grpcio==1.76.0
|
||||
pillow
|
||||
protobuf
|
||||
certifi
|
||||
av
|
||||
|
||||
1
backend/python/exllama2/.gitignore
vendored
1
backend/python/exllama2/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
source
|
||||
@@ -1,17 +0,0 @@
|
||||
.PHONY: exllama2
|
||||
exllama2:
|
||||
bash install.sh
|
||||
|
||||
.PHONY: run
|
||||
run: exllama2
|
||||
@echo "Running exllama2..."
|
||||
bash run.sh
|
||||
@echo "exllama2 run."
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
$(RM) -r venv source __pycache__
|
||||
@@ -1,143 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import grpc
|
||||
from concurrent import futures
|
||||
import time
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
import argparse
|
||||
import signal
|
||||
import sys
|
||||
import os
|
||||
import glob
|
||||
|
||||
from pathlib import Path
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from torch import version as torch_version
|
||||
|
||||
|
||||
from exllamav2.generator import (
|
||||
ExLlamaV2BaseGenerator,
|
||||
ExLlamaV2Sampler
|
||||
)
|
||||
|
||||
|
||||
from exllamav2 import (
|
||||
ExLlamaV2,
|
||||
ExLlamaV2Config,
|
||||
ExLlamaV2Cache,
|
||||
ExLlamaV2Cache_8bit,
|
||||
ExLlamaV2Tokenizer,
|
||||
model_init,
|
||||
)
|
||||
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
|
||||
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
|
||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
||||
|
||||
# Implement the BackendServicer class with the service methods
|
||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
def Health(self, request, context):
|
||||
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
||||
|
||||
def LoadModel(self, request, context):
|
||||
try:
|
||||
model_directory = request.ModelFile
|
||||
|
||||
config = ExLlamaV2Config()
|
||||
config.model_dir = model_directory
|
||||
config.prepare()
|
||||
|
||||
model = ExLlamaV2(config)
|
||||
|
||||
cache = ExLlamaV2Cache(model, lazy=True)
|
||||
model.load_autosplit(cache)
|
||||
|
||||
tokenizer = ExLlamaV2Tokenizer(config)
|
||||
|
||||
# Initialize generator
|
||||
|
||||
generator = ExLlamaV2BaseGenerator(model, cache, tokenizer)
|
||||
|
||||
self.generator = generator
|
||||
|
||||
generator.warmup()
|
||||
self.model = model
|
||||
self.tokenizer = tokenizer
|
||||
self.cache = cache
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
||||
|
||||
def Predict(self, request, context):
|
||||
|
||||
penalty = 1.15
|
||||
if request.Penalty != 0.0:
|
||||
penalty = request.Penalty
|
||||
|
||||
settings = ExLlamaV2Sampler.Settings()
|
||||
settings.temperature = request.Temperature
|
||||
settings.top_k = request.TopK
|
||||
settings.top_p = request.TopP
|
||||
settings.token_repetition_penalty = penalty
|
||||
settings.disallow_tokens(self.tokenizer, [self.tokenizer.eos_token_id])
|
||||
tokens = 512
|
||||
|
||||
if request.Tokens != 0:
|
||||
tokens = request.Tokens
|
||||
output = self.generator.generate_simple(
|
||||
request.Prompt, settings, tokens)
|
||||
|
||||
# Remove prompt from response if present
|
||||
if request.Prompt in output:
|
||||
output = output.replace(request.Prompt, "")
|
||||
|
||||
return backend_pb2.Result(message=bytes(output, encoding='utf-8'))
|
||||
|
||||
def PredictStream(self, request, context):
|
||||
# Implement PredictStream RPC
|
||||
# for reply in some_data_generator():
|
||||
# yield reply
|
||||
# Not implemented yet
|
||||
return self.Predict(request, context)
|
||||
|
||||
|
||||
def serve(address):
|
||||
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
||||
options=[
|
||||
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
|
||||
])
|
||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||
server.add_insecure_port(address)
|
||||
server.start()
|
||||
print("Server started. Listening on: " + address, file=sys.stderr)
|
||||
|
||||
# Define the signal handler function
|
||||
def signal_handler(sig, frame):
|
||||
print("Received termination signal. Shutting down...")
|
||||
server.stop(0)
|
||||
sys.exit(0)
|
||||
|
||||
# Set the signal handlers for SIGINT and SIGTERM
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
try:
|
||||
while True:
|
||||
time.sleep(_ONE_DAY_IN_SECONDS)
|
||||
except KeyboardInterrupt:
|
||||
server.stop(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Run the gRPC server.")
|
||||
parser.add_argument(
|
||||
"--addr", default="localhost:50051", help="The address to bind the server to."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
serve(args.addr)
|
||||
@@ -1,21 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
LIMIT_TARGETS="cublas"
|
||||
EXTRA_PIP_INSTALL_FLAGS="--no-build-isolation"
|
||||
EXLLAMA2_VERSION=c0ddebaaaf8ffd1b3529c2bb654e650bce2f790f
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
installRequirements
|
||||
|
||||
git clone https://github.com/turboderp/exllamav2 $MY_DIR/source
|
||||
pushd ${MY_DIR}/source && git checkout -b build ${EXLLAMA2_VERSION} && popd
|
||||
|
||||
# This installs exllamav2 in JIT mode so it will compile the appropriate torch extension at runtime
|
||||
EXLLAMA_NOCOMPILE= uv pip install ${EXTRA_PIP_INSTALL_FLAGS} ${MY_DIR}/source/
|
||||
@@ -1,3 +0,0 @@
|
||||
transformers
|
||||
accelerate
|
||||
torch==2.4.1
|
||||
@@ -1,3 +0,0 @@
|
||||
torch==2.4.1
|
||||
transformers
|
||||
accelerate
|
||||
@@ -1,4 +0,0 @@
|
||||
# This is here to trigger the install script to add --no-build-isolation to the uv pip install commands
|
||||
# exllama2 does not specify it's build requirements per PEP517, so we need to provide some things ourselves
|
||||
wheel
|
||||
setuptools
|
||||
@@ -1,5 +0,0 @@
|
||||
grpcio==1.76.0
|
||||
protobuf
|
||||
certifi
|
||||
wheel
|
||||
setuptools
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
This is an extra gRPC server of LocalAI for Bark TTS
|
||||
This is an extra gRPC server of LocalAI for Faster Whisper TTS
|
||||
"""
|
||||
from concurrent import futures
|
||||
import time
|
||||
@@ -40,7 +40,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
device = "mps"
|
||||
try:
|
||||
print("Preparing models, please wait", file=sys.stderr)
|
||||
self.model = WhisperModel(request.Model, device=device, compute_type="float16")
|
||||
self.model = WhisperModel(request.Model, device=device, compute_type="default")
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
# Implement your logic here for the LoadModel service
|
||||
@@ -55,11 +55,12 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
id = 0
|
||||
for segment in segments:
|
||||
print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text))
|
||||
resultSegments.append(backend_pb2.TranscriptSegment(id=id, start=segment.start, end=segment.end, text=segment.text))
|
||||
resultSegments.append(backend_pb2.TranscriptSegment(id=id, start=int(segment.start)*1e9, end=int(segment.end)*1e9, text=segment.text))
|
||||
text += segment.text
|
||||
id += 1
|
||||
id += 1
|
||||
except Exception as err:
|
||||
print(f"Unexpected {err=}, {type(err)=}", file=sys.stderr)
|
||||
raise err
|
||||
|
||||
return backend_pb2.TranscriptResult(segments=resultSegments, text=text)
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.3.110+xpu
|
||||
torch==2.3.1+cxx11.abi
|
||||
oneccl_bind_pt==2.3.100+xpu
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
torch
|
||||
optimum[openvino]
|
||||
faster-whisper
|
||||
@@ -1,8 +1,6 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.8.10+xpu
|
||||
torch==2.5.1+cxx11.abi
|
||||
oneccl_bind_pt==2.8.0+xpu
|
||||
torchaudio==2.5.1+cxx11.abi
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
torch
|
||||
torchaudio
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
transformers==4.48.3
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
pocket-tts
|
||||
scipy
|
||||
torch==2.5.1+cxx11.abi
|
||||
torch
|
||||
25
backend/python/qwen-asr/Makefile
Normal file
25
backend/python/qwen-asr/Makefile
Normal file
@@ -0,0 +1,25 @@
|
||||
.DEFAULT_GOAL := install
|
||||
|
||||
.PHONY: qwen-asr
|
||||
qwen-asr:
|
||||
bash install.sh
|
||||
|
||||
.PHONY: run
|
||||
run: qwen-asr
|
||||
@echo "Running qwen-asr..."
|
||||
bash run.sh
|
||||
@echo "qwen-asr run."
|
||||
|
||||
.PHONY: test
|
||||
test: qwen-asr
|
||||
@echo "Testing qwen-asr..."
|
||||
bash test.sh
|
||||
@echo "qwen-asr tested."
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
rm -rf venv __pycache__
|
||||
212
backend/python/qwen-asr/backend.py
Normal file
212
backend/python/qwen-asr/backend.py
Normal file
@@ -0,0 +1,212 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
gRPC server of LocalAI for Qwen3-ASR (transformers backend, non-vLLM).
|
||||
"""
|
||||
from concurrent import futures
|
||||
import time
|
||||
import argparse
|
||||
import signal
|
||||
import sys
|
||||
import os
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
import torch
|
||||
from qwen_asr import Qwen3ASRModel
|
||||
|
||||
import grpc
|
||||
|
||||
|
||||
def is_float(s):
|
||||
try:
|
||||
float(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def is_int(s):
|
||||
try:
|
||||
int(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
||||
|
||||
|
||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
def Health(self, request, context):
|
||||
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
||||
|
||||
def LoadModel(self, request, context):
|
||||
if torch.cuda.is_available():
|
||||
device = "cuda"
|
||||
else:
|
||||
device = "cpu"
|
||||
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
||||
if mps_available:
|
||||
device = "mps"
|
||||
if not torch.cuda.is_available() and request.CUDA:
|
||||
return backend_pb2.Result(success=False, message="CUDA is not available")
|
||||
|
||||
self.device = device
|
||||
self.options = {}
|
||||
|
||||
for opt in request.Options:
|
||||
if ":" not in opt:
|
||||
continue
|
||||
key, value = opt.split(":", 1)
|
||||
if is_float(value):
|
||||
value = float(value)
|
||||
elif is_int(value):
|
||||
value = int(value)
|
||||
elif value.lower() in ["true", "false"]:
|
||||
value = value.lower() == "true"
|
||||
self.options[key] = value
|
||||
|
||||
model_path = request.Model or "Qwen/Qwen3-ASR-1.7B"
|
||||
default_dtype = torch.bfloat16 if self.device == "cuda" else torch.float32
|
||||
load_dtype = default_dtype
|
||||
if "torch_dtype" in self.options:
|
||||
d = str(self.options["torch_dtype"]).lower()
|
||||
if d == "fp16":
|
||||
load_dtype = torch.float16
|
||||
elif d == "bf16":
|
||||
load_dtype = torch.bfloat16
|
||||
elif d == "fp32":
|
||||
load_dtype = torch.float32
|
||||
del self.options["torch_dtype"]
|
||||
|
||||
self.max_inference_batch_size = self.options.get("max_inference_batch_size", 32)
|
||||
self.max_new_tokens = self.options.get("max_new_tokens", 256)
|
||||
|
||||
forced_aligner = self.options.get("forced_aligner")
|
||||
if forced_aligner is not None and isinstance(forced_aligner, str):
|
||||
forced_aligner = forced_aligner.strip() or None
|
||||
attn_implementation = self.options.get("attn_implementation")
|
||||
if attn_implementation is not None and isinstance(attn_implementation, str):
|
||||
attn_implementation = attn_implementation.strip() or None
|
||||
|
||||
if self.device == "mps":
|
||||
device_map = None
|
||||
elif self.device == "cuda":
|
||||
device_map = "cuda:0"
|
||||
else:
|
||||
device_map = "cpu"
|
||||
|
||||
load_kwargs = dict(
|
||||
dtype=load_dtype,
|
||||
device_map=device_map,
|
||||
max_inference_batch_size=self.max_inference_batch_size,
|
||||
max_new_tokens=self.max_new_tokens,
|
||||
)
|
||||
if attn_implementation:
|
||||
load_kwargs["attn_implementation"] = attn_implementation
|
||||
if forced_aligner:
|
||||
load_kwargs["forced_aligner"] = forced_aligner
|
||||
forced_aligner_kwargs = dict(
|
||||
dtype=load_dtype,
|
||||
device_map=device_map,
|
||||
)
|
||||
if attn_implementation:
|
||||
forced_aligner_kwargs["attn_implementation"] = attn_implementation
|
||||
load_kwargs["forced_aligner_kwargs"] = forced_aligner_kwargs
|
||||
|
||||
try:
|
||||
print(f"Loading Qwen3-ASR from {model_path}", file=sys.stderr)
|
||||
if attn_implementation:
|
||||
print(f"Using attn_implementation: {attn_implementation}", file=sys.stderr)
|
||||
if forced_aligner:
|
||||
print(f"Loading with forced_aligner: {forced_aligner}", file=sys.stderr)
|
||||
self.model = Qwen3ASRModel.from_pretrained(model_path, **load_kwargs)
|
||||
print("Qwen3-ASR model loaded successfully", file=sys.stderr)
|
||||
except Exception as err:
|
||||
print(f"[ERROR] LoadModel failed: {err}", file=sys.stderr)
|
||||
import traceback
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
return backend_pb2.Result(success=False, message=str(err))
|
||||
|
||||
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
||||
|
||||
def AudioTranscription(self, request, context):
|
||||
result_segments = []
|
||||
text = ""
|
||||
try:
|
||||
audio_path = request.dst
|
||||
if not audio_path or not os.path.exists(audio_path):
|
||||
print(f"Error: Audio file not found: {audio_path}", file=sys.stderr)
|
||||
return backend_pb2.TranscriptResult(segments=[], text="")
|
||||
|
||||
language = None
|
||||
if request.language and request.language.strip():
|
||||
language = request.language.strip()
|
||||
|
||||
results = self.model.transcribe(audio=audio_path, language=language)
|
||||
|
||||
if not results:
|
||||
return backend_pb2.TranscriptResult(segments=[], text="")
|
||||
|
||||
r = results[0]
|
||||
text = r.text or ""
|
||||
|
||||
if getattr(r, 'time_stamps', None) and len(r.time_stamps) > 0:
|
||||
for idx, ts in enumerate(r.time_stamps):
|
||||
start_ms = 0
|
||||
end_ms = 0
|
||||
seg_text = text
|
||||
if isinstance(ts, (list, tuple)) and len(ts) >= 3:
|
||||
start_ms = int(float(ts[0]) * 1000) if ts[0] is not None else 0
|
||||
end_ms = int(float(ts[1]) * 1000) if ts[1] is not None else 0
|
||||
seg_text = ts[2] if len(ts) > 2 and ts[2] is not None else ""
|
||||
result_segments.append(backend_pb2.TranscriptSegment(
|
||||
id=idx, start=start_ms, end=end_ms, text=seg_text
|
||||
))
|
||||
else:
|
||||
if text:
|
||||
result_segments.append(backend_pb2.TranscriptSegment(
|
||||
id=0, start=0, end=0, text=text
|
||||
))
|
||||
except Exception as err:
|
||||
print(f"Error in AudioTranscription: {err}", file=sys.stderr)
|
||||
import traceback
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
return backend_pb2.TranscriptResult(segments=[], text="")
|
||||
|
||||
return backend_pb2.TranscriptResult(segments=result_segments, text=text)
|
||||
|
||||
|
||||
def serve(address):
|
||||
server = grpc.server(
|
||||
futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
||||
options=[
|
||||
('grpc.max_message_length', 50 * 1024 * 1024),
|
||||
('grpc.max_send_message_length', 50 * 1024 * 1024),
|
||||
('grpc.max_receive_message_length', 50 * 1024 * 1024),
|
||||
])
|
||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||
server.add_insecure_port(address)
|
||||
server.start()
|
||||
print("Server started. Listening on: " + address, file=sys.stderr)
|
||||
|
||||
def signal_handler(sig, frame):
|
||||
print("Received termination signal. Shutting down...")
|
||||
server.stop(0)
|
||||
sys.exit(0)
|
||||
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
try:
|
||||
while True:
|
||||
time.sleep(_ONE_DAY_IN_SECONDS)
|
||||
except KeyboardInterrupt:
|
||||
server.stop(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Run the gRPC server.")
|
||||
parser.add_argument("--addr", default="localhost:50051", help="The address to bind the server to.")
|
||||
args = parser.parse_args()
|
||||
serve(args.addr)
|
||||
21
backend/python/qwen-asr/install.sh
Normal file
21
backend/python/qwen-asr/install.sh
Normal file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
EXTRA_PIP_INSTALL_FLAGS="--no-build-isolation"
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
if [ "x${BUILD_PROFILE}" == "xintel" ]; then
|
||||
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
|
||||
fi
|
||||
|
||||
PYTHON_VERSION="3.12"
|
||||
PYTHON_PATCH="12"
|
||||
PY_STANDALONE_TAG="20251120"
|
||||
|
||||
installRequirements
|
||||
3
backend/python/qwen-asr/requirements-cpu.txt
Normal file
3
backend/python/qwen-asr/requirements-cpu.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
torch
|
||||
qwen-asr
|
||||
1
backend/python/qwen-asr/requirements-cublas12-after.txt
Normal file
1
backend/python/qwen-asr/requirements-cublas12-after.txt
Normal file
@@ -0,0 +1 @@
|
||||
flash-attn
|
||||
3
backend/python/qwen-asr/requirements-cublas12.txt
Normal file
3
backend/python/qwen-asr/requirements-cublas12.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu121
|
||||
torch
|
||||
qwen-asr
|
||||
3
backend/python/qwen-asr/requirements-cublas13.txt
Normal file
3
backend/python/qwen-asr/requirements-cublas13.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu130
|
||||
torch
|
||||
qwen-asr
|
||||
3
backend/python/qwen-asr/requirements-hipblas.txt
Normal file
3
backend/python/qwen-asr/requirements-hipblas.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.3
|
||||
torch==2.7.1+rocm6.3
|
||||
qwen-asr
|
||||
1
backend/python/qwen-asr/requirements-intel-after.txt
Normal file
1
backend/python/qwen-asr/requirements-intel-after.txt
Normal file
@@ -0,0 +1 @@
|
||||
flash-attn
|
||||
3
backend/python/qwen-asr/requirements-intel.txt
Normal file
3
backend/python/qwen-asr/requirements-intel.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
torch
|
||||
qwen-asr
|
||||
3
backend/python/qwen-asr/requirements-l4t12.txt
Normal file
3
backend/python/qwen-asr/requirements-l4t12.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
--extra-index-url https://pypi.jetson-ai-lab.io/jp6/cu129/
|
||||
torch
|
||||
qwen-asr
|
||||
3
backend/python/qwen-asr/requirements-l4t13.txt
Normal file
3
backend/python/qwen-asr/requirements-l4t13.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu130
|
||||
torch
|
||||
qwen-asr
|
||||
2
backend/python/qwen-asr/requirements-mps.txt
Normal file
2
backend/python/qwen-asr/requirements-mps.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
torch==2.7.1
|
||||
qwen-asr
|
||||
5
backend/python/qwen-asr/requirements.txt
Normal file
5
backend/python/qwen-asr/requirements.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
grpcio==1.71.0
|
||||
protobuf
|
||||
certifi
|
||||
packaging==24.1
|
||||
setuptools
|
||||
2
backend/python/bark/run.sh → backend/python/qwen-asr/run.sh
Executable file → Normal file
2
backend/python/bark/run.sh → backend/python/qwen-asr/run.sh
Executable file → Normal file
@@ -6,4 +6,4 @@ else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
startBackend $@
|
||||
startBackend $@
|
||||
94
backend/python/qwen-asr/test.py
Normal file
94
backend/python/qwen-asr/test.py
Normal file
@@ -0,0 +1,94 @@
|
||||
"""
|
||||
Tests for the Qwen3-ASR gRPC backend.
|
||||
"""
|
||||
import unittest
|
||||
import subprocess
|
||||
import time
|
||||
import os
|
||||
import tempfile
|
||||
import shutil
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
|
||||
import grpc
|
||||
|
||||
# Skip heavy transcription test in CI (model download + inference)
|
||||
SKIP_ASR_TESTS = os.environ.get("SKIP_ASR_TESTS", "false").lower() == "true"
|
||||
|
||||
|
||||
class TestBackendServicer(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"])
|
||||
time.sleep(15)
|
||||
|
||||
def tearDown(self):
|
||||
self.service.terminate()
|
||||
self.service.wait()
|
||||
|
||||
def test_server_startup(self):
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.Health(backend_pb2.HealthMessage())
|
||||
self.assertEqual(response.message, b'OK')
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("Server failed to start")
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
def test_load_model(self):
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="Qwen/Qwen3-ASR-1.7B"))
|
||||
self.assertTrue(response.success, response.message)
|
||||
self.assertEqual(response.message, "Model loaded successfully")
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("LoadModel service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
@unittest.skipIf(SKIP_ASR_TESTS, "ASR transcription test skipped (SKIP_ASR_TESTS=true)")
|
||||
def test_audio_transcription(self):
|
||||
temp_dir = tempfile.mkdtemp()
|
||||
audio_file = os.path.join(temp_dir, 'audio.wav')
|
||||
try:
|
||||
url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen3-ASR-Repo/asr_en.wav"
|
||||
result = subprocess.run(
|
||||
["wget", "-q", url, "-O", audio_file],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=30,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
self.skipTest(f"Could not download sample audio: {result.stderr}")
|
||||
if not os.path.exists(audio_file):
|
||||
self.skipTest("Sample audio file not found after download")
|
||||
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
load_response = stub.LoadModel(backend_pb2.ModelOptions(Model="Qwen/Qwen3-ASR-0.6B"))
|
||||
self.assertTrue(load_response.success, load_response.message)
|
||||
|
||||
transcript_response = stub.AudioTranscription(
|
||||
backend_pb2.TranscriptRequest(dst=audio_file)
|
||||
)
|
||||
self.assertIsNotNone(transcript_response)
|
||||
self.assertIsNotNone(transcript_response.text)
|
||||
self.assertGreaterEqual(len(transcript_response.segments), 0)
|
||||
all_text = ""
|
||||
for segment in transcript_response.segments:
|
||||
all_text += segment.text
|
||||
print(f"All text: {all_text}")
|
||||
self.assertIn("big", all_text)
|
||||
if transcript_response.segments:
|
||||
self.assertIsNotNone(transcript_response.segments[0].text)
|
||||
finally:
|
||||
self.tearDown()
|
||||
if os.path.exists(temp_dir):
|
||||
shutil.rmtree(temp_dir)
|
||||
0
backend/python/bark/test.sh → backend/python/qwen-asr/test.sh
Executable file → Normal file
0
backend/python/bark/test.sh → backend/python/qwen-asr/test.sh
Executable file → Normal file
23
backend/python/qwen-tts/Makefile
Normal file
23
backend/python/qwen-tts/Makefile
Normal file
@@ -0,0 +1,23 @@
|
||||
.PHONY: qwen-tts
|
||||
qwen-tts:
|
||||
bash install.sh
|
||||
|
||||
.PHONY: run
|
||||
run: qwen-tts
|
||||
@echo "Running qwen-tts..."
|
||||
bash run.sh
|
||||
@echo "qwen-tts run."
|
||||
|
||||
.PHONY: test
|
||||
test: qwen-tts
|
||||
@echo "Testing qwen-tts..."
|
||||
bash test.sh
|
||||
@echo "qwen-tts tested."
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
rm -rf venv __pycache__
|
||||
475
backend/python/qwen-tts/backend.py
Normal file
475
backend/python/qwen-tts/backend.py
Normal file
@@ -0,0 +1,475 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
This is an extra gRPC server of LocalAI for Qwen3-TTS
|
||||
"""
|
||||
from concurrent import futures
|
||||
import time
|
||||
import argparse
|
||||
import signal
|
||||
import sys
|
||||
import os
|
||||
import copy
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
import torch
|
||||
import soundfile as sf
|
||||
from qwen_tts import Qwen3TTSModel
|
||||
|
||||
import grpc
|
||||
|
||||
def is_float(s):
|
||||
"""Check if a string can be converted to float."""
|
||||
try:
|
||||
float(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
def is_int(s):
|
||||
"""Check if a string can be converted to int."""
|
||||
try:
|
||||
int(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
|
||||
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
|
||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
||||
|
||||
|
||||
# Implement the BackendServicer class with the service methods
|
||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
"""
|
||||
BackendServicer is the class that implements the gRPC service
|
||||
"""
|
||||
def Health(self, request, context):
|
||||
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
||||
|
||||
def LoadModel(self, request, context):
|
||||
# Get device
|
||||
if torch.cuda.is_available():
|
||||
print("CUDA is available", file=sys.stderr)
|
||||
device = "cuda"
|
||||
else:
|
||||
print("CUDA is not available", file=sys.stderr)
|
||||
device = "cpu"
|
||||
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
||||
if mps_available:
|
||||
device = "mps"
|
||||
if not torch.cuda.is_available() and request.CUDA:
|
||||
return backend_pb2.Result(success=False, message="CUDA is not available")
|
||||
|
||||
# Normalize potential 'mpx' typo to 'mps'
|
||||
if device == "mpx":
|
||||
print("Note: device 'mpx' detected, treating it as 'mps'.", file=sys.stderr)
|
||||
device = "mps"
|
||||
|
||||
# Validate mps availability if requested
|
||||
if device == "mps" and not torch.backends.mps.is_available():
|
||||
print("Warning: MPS not available. Falling back to CPU.", file=sys.stderr)
|
||||
device = "cpu"
|
||||
|
||||
self.device = device
|
||||
self._torch_device = torch.device(device)
|
||||
|
||||
options = request.Options
|
||||
|
||||
# empty dict
|
||||
self.options = {}
|
||||
|
||||
# The options are a list of strings in this form optname:optvalue
|
||||
# We are storing all the options in a dict so we can use it later when
|
||||
# generating the audio
|
||||
for opt in options:
|
||||
if ":" not in opt:
|
||||
continue
|
||||
key, value = opt.split(":", 1) # Split only on first colon
|
||||
# if value is a number, convert it to the appropriate type
|
||||
if is_float(value):
|
||||
value = float(value)
|
||||
elif is_int(value):
|
||||
value = int(value)
|
||||
elif value.lower() in ["true", "false"]:
|
||||
value = value.lower() == "true"
|
||||
self.options[key] = value
|
||||
|
||||
# Get model path from request
|
||||
model_path = request.Model
|
||||
if not model_path:
|
||||
model_path = "Qwen/Qwen3-TTS-12Hz-1.7B-CustomVoice"
|
||||
|
||||
# Determine model type from model path or options
|
||||
self.model_type = self.options.get("model_type", None)
|
||||
if not self.model_type:
|
||||
if "CustomVoice" in model_path:
|
||||
self.model_type = "CustomVoice"
|
||||
elif "VoiceDesign" in model_path:
|
||||
self.model_type = "VoiceDesign"
|
||||
elif "Base" in model_path or "0.6B" in model_path or "1.7B" in model_path:
|
||||
self.model_type = "Base" # VoiceClone model
|
||||
else:
|
||||
# Default to CustomVoice
|
||||
self.model_type = "CustomVoice"
|
||||
|
||||
# Cache for voice clone prompts
|
||||
self._voice_clone_cache = {}
|
||||
|
||||
# Store AudioPath, ModelFile, and ModelPath from LoadModel request
|
||||
# These are used later in TTS for VoiceClone mode
|
||||
self.audio_path = request.AudioPath if hasattr(request, 'AudioPath') and request.AudioPath else None
|
||||
self.model_file = request.ModelFile if hasattr(request, 'ModelFile') and request.ModelFile else None
|
||||
self.model_path = request.ModelPath if hasattr(request, 'ModelPath') and request.ModelPath else None
|
||||
|
||||
# Decide dtype & attention implementation
|
||||
if self.device == "mps":
|
||||
load_dtype = torch.float32 # MPS requires float32
|
||||
device_map = None
|
||||
attn_impl_primary = "sdpa" # flash_attention_2 not supported on MPS
|
||||
elif self.device == "cuda":
|
||||
load_dtype = torch.bfloat16
|
||||
device_map = "cuda"
|
||||
attn_impl_primary = "flash_attention_2"
|
||||
else: # cpu
|
||||
load_dtype = torch.float32
|
||||
device_map = "cpu"
|
||||
attn_impl_primary = "sdpa"
|
||||
|
||||
print(f"Using device: {self.device}, torch_dtype: {load_dtype}, attn_implementation: {attn_impl_primary}, model_type: {self.model_type}", file=sys.stderr)
|
||||
print(f"Loading model from: {model_path}", file=sys.stderr)
|
||||
|
||||
# Load model with device-specific logic
|
||||
# Common parameters for all devices
|
||||
load_kwargs = {
|
||||
"dtype": load_dtype,
|
||||
"attn_implementation": attn_impl_primary,
|
||||
"trust_remote_code": True, # Required for qwen-tts models
|
||||
}
|
||||
|
||||
try:
|
||||
if self.device == "mps":
|
||||
load_kwargs["device_map"] = None # load then move
|
||||
self.model = Qwen3TTSModel.from_pretrained(model_path, **load_kwargs)
|
||||
self.model.to("mps")
|
||||
elif self.device == "cuda":
|
||||
load_kwargs["device_map"] = device_map
|
||||
self.model = Qwen3TTSModel.from_pretrained(model_path, **load_kwargs)
|
||||
else: # cpu
|
||||
load_kwargs["device_map"] = device_map
|
||||
self.model = Qwen3TTSModel.from_pretrained(model_path, **load_kwargs)
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
print(f"[ERROR] Loading model: {type(e).__name__}: {error_msg}", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
|
||||
# Check if it's a missing feature extractor/tokenizer error
|
||||
if "speech_tokenizer" in error_msg or "preprocessor_config.json" in error_msg or "feature extractor" in error_msg.lower():
|
||||
print("\n[ERROR] Model files appear to be incomplete. This usually means:", file=sys.stderr)
|
||||
print(" 1. The model download was interrupted or incomplete", file=sys.stderr)
|
||||
print(" 2. The model cache is corrupted", file=sys.stderr)
|
||||
print("\nTo fix this, try:", file=sys.stderr)
|
||||
print(f" rm -rf ~/.cache/huggingface/hub/models--Qwen--Qwen3-TTS-*", file=sys.stderr)
|
||||
print(" Then re-run to trigger a fresh download.", file=sys.stderr)
|
||||
print("\nAlternatively, try using a different model variant:", file=sys.stderr)
|
||||
print(" - Qwen/Qwen3-TTS-12Hz-1.7B-CustomVoice", file=sys.stderr)
|
||||
print(" - Qwen/Qwen3-TTS-12Hz-1.7B-VoiceDesign", file=sys.stderr)
|
||||
print(" - Qwen/Qwen3-TTS-12Hz-1.7B-Base", file=sys.stderr)
|
||||
|
||||
if attn_impl_primary == 'flash_attention_2':
|
||||
print("\nTrying to use SDPA instead of flash_attention_2...", file=sys.stderr)
|
||||
load_kwargs["attn_implementation"] = 'sdpa'
|
||||
try:
|
||||
if self.device == "mps":
|
||||
load_kwargs["device_map"] = None
|
||||
self.model = Qwen3TTSModel.from_pretrained(model_path, **load_kwargs)
|
||||
self.model.to("mps")
|
||||
else:
|
||||
load_kwargs["device_map"] = (self.device if self.device in ("cuda", "cpu") else None)
|
||||
self.model = Qwen3TTSModel.from_pretrained(model_path, **load_kwargs)
|
||||
except Exception as e2:
|
||||
print(f"[ERROR] Failed to load with SDPA: {type(e2).__name__}: {e2}", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
raise e2
|
||||
else:
|
||||
raise e
|
||||
|
||||
print(f"Model loaded successfully: {model_path}", file=sys.stderr)
|
||||
|
||||
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
||||
|
||||
def _detect_mode(self, request):
|
||||
"""Detect which mode to use based on request parameters."""
|
||||
# Priority: VoiceClone > VoiceDesign > CustomVoice
|
||||
|
||||
# model_type explicitly set
|
||||
if self.model_type == "CustomVoice":
|
||||
return "CustomVoice"
|
||||
if self.model_type == "VoiceClone":
|
||||
return "VoiceClone"
|
||||
if self.model_type == "VoiceDesign":
|
||||
return "VoiceDesign"
|
||||
|
||||
# VoiceClone: AudioPath is provided (from LoadModel, stored in self.audio_path)
|
||||
if self.audio_path:
|
||||
return "VoiceClone"
|
||||
|
||||
# VoiceDesign: instruct option is provided
|
||||
if "instruct" in self.options and self.options["instruct"]:
|
||||
return "VoiceDesign"
|
||||
|
||||
# Default to CustomVoice
|
||||
return "CustomVoice"
|
||||
|
||||
def _get_ref_audio_path(self, request):
|
||||
"""Get reference audio path from stored AudioPath (from LoadModel)."""
|
||||
if not self.audio_path:
|
||||
return None
|
||||
|
||||
# If absolute path, use as-is
|
||||
if os.path.isabs(self.audio_path):
|
||||
return self.audio_path
|
||||
|
||||
# Try relative to ModelFile
|
||||
if self.model_file:
|
||||
model_file_base = os.path.dirname(self.model_file)
|
||||
ref_path = os.path.join(model_file_base, self.audio_path)
|
||||
if os.path.exists(ref_path):
|
||||
return ref_path
|
||||
|
||||
# Try relative to ModelPath
|
||||
if self.model_path:
|
||||
ref_path = os.path.join(self.model_path, self.audio_path)
|
||||
if os.path.exists(ref_path):
|
||||
return ref_path
|
||||
|
||||
# Return as-is (might be URL or base64)
|
||||
return self.audio_path
|
||||
|
||||
def _get_voice_clone_prompt(self, request, ref_audio, ref_text):
|
||||
"""Get or create voice clone prompt, with caching."""
|
||||
cache_key = f"{ref_audio}:{ref_text}"
|
||||
|
||||
if cache_key not in self._voice_clone_cache:
|
||||
print(f"Creating voice clone prompt from {ref_audio}", file=sys.stderr)
|
||||
try:
|
||||
prompt_items = self.model.create_voice_clone_prompt(
|
||||
ref_audio=ref_audio,
|
||||
ref_text=ref_text,
|
||||
x_vector_only_mode=self.options.get("x_vector_only_mode", False),
|
||||
)
|
||||
self._voice_clone_cache[cache_key] = prompt_items
|
||||
except Exception as e:
|
||||
print(f"Error creating voice clone prompt: {e}", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
return None
|
||||
|
||||
return self._voice_clone_cache[cache_key]
|
||||
|
||||
def TTS(self, request, context):
|
||||
try:
|
||||
# Check if dst is provided
|
||||
if not request.dst:
|
||||
return backend_pb2.Result(
|
||||
success=False,
|
||||
message="dst (output path) is required"
|
||||
)
|
||||
|
||||
# Prepare text
|
||||
text = request.text.strip()
|
||||
if not text:
|
||||
return backend_pb2.Result(
|
||||
success=False,
|
||||
message="Text is empty"
|
||||
)
|
||||
|
||||
# Get language (auto-detect if not provided)
|
||||
language = request.language if hasattr(request, 'language') and request.language else None
|
||||
if not language or language == "":
|
||||
language = "Auto" # Auto-detect language
|
||||
|
||||
# Detect mode
|
||||
mode = self._detect_mode(request)
|
||||
print(f"Detected mode: {mode}", file=sys.stderr)
|
||||
|
||||
# Get generation parameters from options
|
||||
max_new_tokens = self.options.get("max_new_tokens", None)
|
||||
top_p = self.options.get("top_p", None)
|
||||
temperature = self.options.get("temperature", None)
|
||||
do_sample = self.options.get("do_sample", None)
|
||||
|
||||
# Prepare generation kwargs
|
||||
generation_kwargs = {}
|
||||
if max_new_tokens is not None:
|
||||
generation_kwargs["max_new_tokens"] = max_new_tokens
|
||||
if top_p is not None:
|
||||
generation_kwargs["top_p"] = top_p
|
||||
if temperature is not None:
|
||||
generation_kwargs["temperature"] = temperature
|
||||
if do_sample is not None:
|
||||
generation_kwargs["do_sample"] = do_sample
|
||||
|
||||
instruct = self.options.get("instruct", "")
|
||||
if instruct is not None and instruct != "":
|
||||
generation_kwargs["instruct"] = instruct
|
||||
|
||||
# Generate audio based on mode
|
||||
if mode == "VoiceClone":
|
||||
# VoiceClone mode
|
||||
ref_audio = self._get_ref_audio_path(request)
|
||||
if not ref_audio:
|
||||
return backend_pb2.Result(
|
||||
success=False,
|
||||
message="AudioPath is required for VoiceClone mode"
|
||||
)
|
||||
|
||||
ref_text = self.options.get("ref_text", None)
|
||||
if not ref_text:
|
||||
# Try to get from request if available
|
||||
if hasattr(request, 'ref_text') and request.ref_text:
|
||||
ref_text = request.ref_text
|
||||
else:
|
||||
# x_vector_only_mode doesn't require ref_text
|
||||
if not self.options.get("x_vector_only_mode", False):
|
||||
return backend_pb2.Result(
|
||||
success=False,
|
||||
message="ref_text is required for VoiceClone mode (or set x_vector_only_mode=true)"
|
||||
)
|
||||
|
||||
# Check if we should use cached prompt
|
||||
use_cached_prompt = self.options.get("use_cached_prompt", True)
|
||||
voice_clone_prompt = None
|
||||
|
||||
if use_cached_prompt:
|
||||
voice_clone_prompt = self._get_voice_clone_prompt(request, ref_audio, ref_text)
|
||||
if voice_clone_prompt is None:
|
||||
return backend_pb2.Result(
|
||||
success=False,
|
||||
message="Failed to create voice clone prompt"
|
||||
)
|
||||
|
||||
if voice_clone_prompt:
|
||||
# Use cached prompt
|
||||
wavs, sr = self.model.generate_voice_clone(
|
||||
text=text,
|
||||
language=language,
|
||||
voice_clone_prompt=voice_clone_prompt,
|
||||
**generation_kwargs
|
||||
)
|
||||
else:
|
||||
# Create prompt on-the-fly
|
||||
wavs, sr = self.model.generate_voice_clone(
|
||||
text=text,
|
||||
language=language,
|
||||
ref_audio=ref_audio,
|
||||
ref_text=ref_text,
|
||||
x_vector_only_mode=self.options.get("x_vector_only_mode", False),
|
||||
**generation_kwargs
|
||||
)
|
||||
|
||||
elif mode == "VoiceDesign":
|
||||
# VoiceDesign mode
|
||||
if not instruct:
|
||||
return backend_pb2.Result(
|
||||
success=False,
|
||||
message="instruct option is required for VoiceDesign mode"
|
||||
)
|
||||
|
||||
wavs, sr = self.model.generate_voice_design(
|
||||
text=text,
|
||||
language=language,
|
||||
instruct=instruct,
|
||||
**generation_kwargs
|
||||
)
|
||||
|
||||
else:
|
||||
# CustomVoice mode (default)
|
||||
speaker = request.voice if request.voice else None
|
||||
if not speaker:
|
||||
# Try to get from options
|
||||
speaker = self.options.get("speaker", None)
|
||||
if not speaker:
|
||||
# Use default speaker
|
||||
speaker = "Vivian"
|
||||
print(f"No speaker specified, using default: {speaker}", file=sys.stderr)
|
||||
|
||||
# Validate speaker if model supports it
|
||||
if hasattr(self.model, 'get_supported_speakers'):
|
||||
try:
|
||||
supported_speakers = self.model.get_supported_speakers()
|
||||
if speaker not in supported_speakers:
|
||||
print(f"Warning: Speaker '{speaker}' not in supported list. Available: {supported_speakers}", file=sys.stderr)
|
||||
# Try to find a close match (case-insensitive)
|
||||
speaker_lower = speaker.lower()
|
||||
for sup_speaker in supported_speakers:
|
||||
if sup_speaker.lower() == speaker_lower:
|
||||
speaker = sup_speaker
|
||||
print(f"Using matched speaker: {speaker}", file=sys.stderr)
|
||||
break
|
||||
except Exception as e:
|
||||
print(f"Warning: Could not get supported speakers: {e}", file=sys.stderr)
|
||||
|
||||
wavs, sr = self.model.generate_custom_voice(
|
||||
text=text,
|
||||
language=language,
|
||||
speaker=speaker,
|
||||
**generation_kwargs
|
||||
)
|
||||
|
||||
# Save output
|
||||
if wavs is not None and len(wavs) > 0:
|
||||
# wavs is a list, take first element
|
||||
audio_data = wavs[0] if isinstance(wavs, list) else wavs
|
||||
sf.write(request.dst, audio_data, sr)
|
||||
print(f"Saved output to {request.dst}", file=sys.stderr)
|
||||
else:
|
||||
return backend_pb2.Result(
|
||||
success=False,
|
||||
message="No audio output generated"
|
||||
)
|
||||
|
||||
except Exception as err:
|
||||
print(f"Error in TTS: {err}", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
|
||||
return backend_pb2.Result(success=True)
|
||||
|
||||
def serve(address):
|
||||
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
||||
options=[
|
||||
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
|
||||
])
|
||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||
server.add_insecure_port(address)
|
||||
server.start()
|
||||
print("Server started. Listening on: " + address, file=sys.stderr)
|
||||
|
||||
# Define the signal handler function
|
||||
def signal_handler(sig, frame):
|
||||
print("Received termination signal. Shutting down...")
|
||||
server.stop(0)
|
||||
sys.exit(0)
|
||||
|
||||
# Set the signal handlers for SIGINT and SIGTERM
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
try:
|
||||
while True:
|
||||
time.sleep(_ONE_DAY_IN_SECONDS)
|
||||
except KeyboardInterrupt:
|
||||
server.stop(0)
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Run the gRPC server.")
|
||||
parser.add_argument(
|
||||
"--addr", default="localhost:50051", help="The address to bind the server to."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
serve(args.addr)
|
||||
13
backend/python/qwen-tts/install.sh
Executable file
13
backend/python/qwen-tts/install.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
EXTRA_PIP_INSTALL_FLAGS="--no-build-isolation"
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
installRequirements
|
||||
5
backend/python/qwen-tts/requirements-cpu.txt
Normal file
5
backend/python/qwen-tts/requirements-cpu.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
torch
|
||||
torchaudio
|
||||
qwen-tts
|
||||
sox
|
||||
1
backend/python/qwen-tts/requirements-cublas12-after.txt
Normal file
1
backend/python/qwen-tts/requirements-cublas12-after.txt
Normal file
@@ -0,0 +1 @@
|
||||
flash-attn
|
||||
5
backend/python/qwen-tts/requirements-cublas12.txt
Normal file
5
backend/python/qwen-tts/requirements-cublas12.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu121
|
||||
torch
|
||||
torchaudio
|
||||
qwen-tts
|
||||
sox
|
||||
5
backend/python/qwen-tts/requirements-cublas13.txt
Normal file
5
backend/python/qwen-tts/requirements-cublas13.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu130
|
||||
torch
|
||||
torchaudio
|
||||
qwen-tts
|
||||
sox
|
||||
5
backend/python/qwen-tts/requirements-hipblas.txt
Normal file
5
backend/python/qwen-tts/requirements-hipblas.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.3
|
||||
torch==2.7.1+rocm6.3
|
||||
torchaudio==2.7.1+rocm6.3
|
||||
qwen-tts
|
||||
sox
|
||||
1
backend/python/qwen-tts/requirements-intel-after.txt
Normal file
1
backend/python/qwen-tts/requirements-intel-after.txt
Normal file
@@ -0,0 +1 @@
|
||||
flash-attn
|
||||
5
backend/python/qwen-tts/requirements-intel.txt
Normal file
5
backend/python/qwen-tts/requirements-intel.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
torch
|
||||
torchaudio
|
||||
qwen-tts
|
||||
sox
|
||||
5
backend/python/qwen-tts/requirements-l4t12.txt
Normal file
5
backend/python/qwen-tts/requirements-l4t12.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
--extra-index-url https://pypi.jetson-ai-lab.io/jp6/cu129/
|
||||
torch
|
||||
torchaudio
|
||||
qwen-tts
|
||||
sox
|
||||
5
backend/python/qwen-tts/requirements-l4t13.txt
Normal file
5
backend/python/qwen-tts/requirements-l4t13.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu130
|
||||
torch
|
||||
torchaudio
|
||||
qwen-tts
|
||||
sox
|
||||
4
backend/python/qwen-tts/requirements-mps.txt
Normal file
4
backend/python/qwen-tts/requirements-mps.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
torch==2.7.1
|
||||
torchaudio==0.22.1
|
||||
qwen-tts
|
||||
sox
|
||||
6
backend/python/qwen-tts/requirements.txt
Normal file
6
backend/python/qwen-tts/requirements.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
grpcio==1.71.0
|
||||
protobuf
|
||||
certifi
|
||||
packaging==24.1
|
||||
soundfile
|
||||
setuptools
|
||||
9
backend/python/qwen-tts/run.sh
Executable file
9
backend/python/qwen-tts/run.sh
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
startBackend $@
|
||||
98
backend/python/qwen-tts/test.py
Normal file
98
backend/python/qwen-tts/test.py
Normal file
@@ -0,0 +1,98 @@
|
||||
"""
|
||||
A test script to test the gRPC service
|
||||
"""
|
||||
import unittest
|
||||
import subprocess
|
||||
import time
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
import threading
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
|
||||
import grpc
|
||||
|
||||
|
||||
class TestBackendServicer(unittest.TestCase):
|
||||
"""
|
||||
TestBackendServicer is the class that tests the gRPC service
|
||||
"""
|
||||
def setUp(self):
|
||||
"""
|
||||
This method sets up the gRPC service by starting the server
|
||||
"""
|
||||
self.service = subprocess.Popen(
|
||||
["python3", "backend.py", "--addr", "localhost:50051"],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True
|
||||
)
|
||||
time.sleep(5)
|
||||
|
||||
def tearDown(self) -> None:
|
||||
"""
|
||||
This method tears down the gRPC service by terminating the server
|
||||
"""
|
||||
self.service.terminate()
|
||||
try:
|
||||
stdout, stderr = self.service.communicate(timeout=5)
|
||||
# Output should already be printed by threads, but print any remaining
|
||||
if stdout:
|
||||
print("=== REMAINING STDOUT ===")
|
||||
print(stdout)
|
||||
if stderr:
|
||||
print("=== REMAINING STDERR ===")
|
||||
print(stderr)
|
||||
except subprocess.TimeoutExpired:
|
||||
self.service.kill()
|
||||
stdout, stderr = self.service.communicate()
|
||||
if stdout:
|
||||
print("=== REMAINING STDOUT ===")
|
||||
print(stdout)
|
||||
if stderr:
|
||||
print("=== REMAINING STDERR ===")
|
||||
print(stderr)
|
||||
|
||||
def test_tts(self):
|
||||
"""
|
||||
This method tests if the TTS generation works successfully
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
# Allow up to 10 minutes for model download on first run
|
||||
response = stub.LoadModel(
|
||||
backend_pb2.ModelOptions(Model="Qwen/Qwen3-TTS-12Hz-0.6B-CustomVoice"),
|
||||
timeout=600.0
|
||||
)
|
||||
self.assertTrue(response.success)
|
||||
|
||||
# Create temporary output file
|
||||
with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as tmp_file:
|
||||
output_path = tmp_file.name
|
||||
|
||||
tts_request = backend_pb2.TTSRequest(
|
||||
text="Hello, this is a test of the qwen-tts backend.",
|
||||
voice="Vivian",
|
||||
dst=output_path
|
||||
)
|
||||
# Allow up to 2 minutes for TTS generation
|
||||
tts_response = stub.TTS(tts_request, timeout=120.0)
|
||||
self.assertIsNotNone(tts_response)
|
||||
self.assertTrue(tts_response.success)
|
||||
|
||||
# Verify output file exists and is not empty
|
||||
self.assertTrue(os.path.exists(output_path))
|
||||
self.assertGreater(os.path.getsize(output_path), 0)
|
||||
|
||||
# Cleanup
|
||||
os.unlink(output_path)
|
||||
except Exception as err:
|
||||
print(f"Exception: {err}", file=sys.stderr)
|
||||
# Give threads a moment to flush any remaining output
|
||||
time.sleep(1)
|
||||
self.fail("TTS service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
@@ -1,9 +1,7 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.3.110+xpu
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
transformers
|
||||
accelerate
|
||||
torch==2.3.1+cxx11.abi
|
||||
oneccl_bind_pt==2.8.0+xpu
|
||||
torch
|
||||
rerankers[transformers]
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
@@ -1,8 +1,6 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.3.110+xpu
|
||||
torch==2.3.1+cxx11.abi
|
||||
torchvision==0.18.1+cxx11.abi
|
||||
oneccl_bind_pt==2.3.100+xpu
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
torch
|
||||
torchvision
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
rfdetr
|
||||
|
||||
@@ -5,5 +5,5 @@ accelerate
|
||||
transformers
|
||||
bitsandbytes
|
||||
outetts
|
||||
sentence-transformers==5.2.0
|
||||
protobuf==6.33.4
|
||||
sentence-transformers==5.2.2
|
||||
protobuf==6.33.5
|
||||
@@ -5,5 +5,5 @@ numba==0.60.0
|
||||
transformers
|
||||
bitsandbytes
|
||||
outetts
|
||||
sentence-transformers==5.2.0
|
||||
protobuf==6.33.4
|
||||
sentence-transformers==5.2.2
|
||||
protobuf==6.33.5
|
||||
@@ -5,5 +5,5 @@ numba==0.60.0
|
||||
transformers
|
||||
bitsandbytes
|
||||
outetts
|
||||
sentence-transformers==5.2.0
|
||||
protobuf==6.33.4
|
||||
sentence-transformers==5.2.2
|
||||
protobuf==6.33.5
|
||||
@@ -7,5 +7,5 @@ numba==0.60.0
|
||||
bitsandbytes
|
||||
outetts
|
||||
bitsandbytes
|
||||
sentence-transformers==5.2.0
|
||||
protobuf==6.33.4
|
||||
sentence-transformers==5.2.2
|
||||
protobuf==6.33.5
|
||||
@@ -1,13 +1,10 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.3.110+xpu
|
||||
torch==2.5.1+cxx11.abi
|
||||
oneccl_bind_pt==2.8.0+xpu
|
||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
||||
torch
|
||||
optimum[openvino]
|
||||
llvmlite==0.43.0
|
||||
numba==0.60.0
|
||||
transformers
|
||||
intel-extension-for-transformers
|
||||
bitsandbytes
|
||||
outetts
|
||||
sentence-transformers==5.2.0
|
||||
protobuf==6.33.4
|
||||
sentence-transformers==5.2.2
|
||||
protobuf==6.33.5
|
||||
@@ -1,5 +1,5 @@
|
||||
grpcio==1.76.0
|
||||
protobuf==6.33.4
|
||||
protobuf==6.33.5
|
||||
certifi
|
||||
setuptools
|
||||
scipy==1.15.1
|
||||
|
||||
@@ -2,6 +2,43 @@
|
||||
vibevoice:
|
||||
bash install.sh
|
||||
|
||||
.PHONY: download-voices
|
||||
download-voices:
|
||||
@echo "Downloading voice preset files..."
|
||||
@mkdir -p voices/streaming_model
|
||||
@if command -v wget >/dev/null 2>&1; then \
|
||||
wget -q -O voices/streaming_model/en-Frank_man.pt \
|
||||
https://raw.githubusercontent.com/microsoft/VibeVoice/main/demo/voices/streaming_model/en-Frank_man.pt && \
|
||||
wget -q -O voices/streaming_model/en-Grace_woman.pt \
|
||||
https://raw.githubusercontent.com/microsoft/VibeVoice/main/demo/voices/streaming_model/en-Grace_woman.pt && \
|
||||
wget -q -O voices/streaming_model/en-Mike_man.pt \
|
||||
https://raw.githubusercontent.com/microsoft/VibeVoice/main/demo/voices/streaming_model/en-Mike_man.pt && \
|
||||
wget -q -O voices/streaming_model/en-Emma_woman.pt \
|
||||
https://raw.githubusercontent.com/microsoft/VibeVoice/main/demo/voices/streaming_model/en-Emma_woman.pt && \
|
||||
wget -q -O voices/streaming_model/en-Carter_man.pt \
|
||||
https://raw.githubusercontent.com/microsoft/VibeVoice/main/demo/voices/streaming_model/en-Carter_man.pt && \
|
||||
wget -q -O voices/streaming_model/en-Davis_man.pt \
|
||||
https://raw.githubusercontent.com/microsoft/VibeVoice/main/demo/voices/streaming_model/en-Davis_man.pt && \
|
||||
echo "Voice files downloaded successfully"; \
|
||||
elif command -v curl >/dev/null 2>&1; then \
|
||||
curl -sL -o voices/streaming_model/en-Frank_man.pt \
|
||||
https://raw.githubusercontent.com/microsoft/VibeVoice/main/demo/voices/streaming_model/en-Frank_man.pt && \
|
||||
curl -sL -o voices/streaming_model/en-Grace_woman.pt \
|
||||
https://raw.githubusercontent.com/microsoft/VibeVoice/main/demo/voices/streaming_model/en-Grace_woman.pt && \
|
||||
curl -sL -o voices/streaming_model/en-Mike_man.pt \
|
||||
https://raw.githubusercontent.com/microsoft/VibeVoice/main/demo/voices/streaming_model/en-Mike_man.pt && \
|
||||
curl -sL -o voices/streaming_model/en-Emma_woman.pt \
|
||||
https://raw.githubusercontent.com/microsoft/VibeVoice/main/demo/voices/streaming_model/en-Emma_woman.pt && \
|
||||
curl -sL -o voices/streaming_model/en-Carter_man.pt \
|
||||
https://raw.githubusercontent.com/microsoft/VibeVoice/main/demo/voices/streaming_model/en-Carter_man.pt && \
|
||||
curl -sL -o voices/streaming_model/en-Davis_man.pt \
|
||||
https://raw.githubusercontent.com/microsoft/VibeVoice/main/demo/voices/streaming_model/en-Davis_man.pt && \
|
||||
echo "Voice files downloaded successfully"; \
|
||||
else \
|
||||
echo "Error: Neither wget nor curl found. Cannot download voice files."; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
.PHONY: run
|
||||
run: vibevoice
|
||||
@echo "Running vibevoice..."
|
||||
@@ -9,7 +46,7 @@ run: vibevoice
|
||||
@echo "vibevoice run."
|
||||
|
||||
.PHONY: test
|
||||
test: vibevoice
|
||||
test: vibevoice download-voices
|
||||
@echo "Testing vibevoice..."
|
||||
bash test.sh
|
||||
@echo "vibevoice tested."
|
||||
|
||||
@@ -16,6 +16,8 @@ import backend_pb2_grpc
|
||||
import torch
|
||||
from vibevoice.modular.modeling_vibevoice_streaming_inference import VibeVoiceStreamingForConditionalGenerationInference
|
||||
from vibevoice.processor.vibevoice_streaming_processor import VibeVoiceStreamingProcessor
|
||||
from vibevoice.modular.modeling_vibevoice_asr import VibeVoiceASRForConditionalGeneration
|
||||
from vibevoice.processor.vibevoice_asr_processor import VibeVoiceASRProcessor
|
||||
|
||||
import grpc
|
||||
|
||||
@@ -95,21 +97,72 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
value = value.lower() == "true"
|
||||
self.options[key] = value
|
||||
|
||||
# Check if ASR mode is enabled
|
||||
self.asr_mode = self.options.get("asr_mode", False)
|
||||
if not isinstance(self.asr_mode, bool):
|
||||
# Handle string "true"/"false" case
|
||||
self.asr_mode = str(self.asr_mode).lower() == "true"
|
||||
|
||||
# Get model path from request
|
||||
model_path = request.Model
|
||||
if not model_path:
|
||||
model_path = "microsoft/VibeVoice-Realtime-0.5B"
|
||||
if self.asr_mode:
|
||||
model_path = "microsoft/VibeVoice-ASR" # Default ASR model
|
||||
else:
|
||||
model_path = "microsoft/VibeVoice-Realtime-0.5B" # Default TTS model
|
||||
|
||||
# Get inference steps from options, default to 5
|
||||
default_dtype = torch.bfloat16 if self.device == "cuda" else torch.float32
|
||||
|
||||
load_dtype = default_dtype
|
||||
if "torch_dtype" in self.options:
|
||||
torch_dtype_str = str(self.options["torch_dtype"]).lower()
|
||||
if torch_dtype_str == "fp16":
|
||||
load_dtype = torch.float16
|
||||
elif torch_dtype_str == "bf16":
|
||||
load_dtype = torch.bfloat16
|
||||
elif torch_dtype_str == "fp32":
|
||||
load_dtype = torch.float32
|
||||
# remove it from options after reading
|
||||
del self.options["torch_dtype"]
|
||||
|
||||
# Get inference steps from options, default to 5 (TTS only)
|
||||
self.inference_steps = self.options.get("inference_steps", 5)
|
||||
if not isinstance(self.inference_steps, int) or self.inference_steps <= 0:
|
||||
self.inference_steps = 5
|
||||
|
||||
# Get cfg_scale from options, default to 1.5
|
||||
# Get cfg_scale from options, default to 1.5 (TTS only)
|
||||
self.cfg_scale = self.options.get("cfg_scale", 1.5)
|
||||
if not isinstance(self.cfg_scale, (int, float)) or self.cfg_scale <= 0:
|
||||
self.cfg_scale = 1.5
|
||||
|
||||
# Get ASR generation parameters from options
|
||||
self.max_new_tokens = self.options.get("max_new_tokens", 512)
|
||||
if not isinstance(self.max_new_tokens, int) or self.max_new_tokens <= 0:
|
||||
self.max_new_tokens = 512
|
||||
|
||||
self.temperature = self.options.get("temperature", 0.0)
|
||||
if not isinstance(self.temperature, (int, float)) or self.temperature < 0:
|
||||
self.temperature = 0.0
|
||||
|
||||
self.top_p = self.options.get("top_p", 1.0)
|
||||
if not isinstance(self.top_p, (int, float)) or self.top_p <= 0:
|
||||
self.top_p = 1.0
|
||||
|
||||
self.do_sample = self.options.get("do_sample", None)
|
||||
if self.do_sample is None:
|
||||
# Default: use sampling if temperature > 0
|
||||
self.do_sample = self.temperature > 0
|
||||
elif not isinstance(self.do_sample, bool):
|
||||
self.do_sample = str(self.do_sample).lower() == "true"
|
||||
|
||||
self.num_beams = self.options.get("num_beams", 1)
|
||||
if not isinstance(self.num_beams, int) or self.num_beams < 1:
|
||||
self.num_beams = 1
|
||||
|
||||
self.repetition_penalty = self.options.get("repetition_penalty", 1.0)
|
||||
if not isinstance(self.repetition_penalty, (int, float)) or self.repetition_penalty <= 0:
|
||||
self.repetition_penalty = 1.0
|
||||
|
||||
# Determine voices directory
|
||||
# Priority order:
|
||||
# 1. voices_dir option (explicitly set by user - highest priority)
|
||||
@@ -163,91 +216,151 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
else:
|
||||
voices_dir = None
|
||||
|
||||
# Initialize voice-related attributes (TTS only)
|
||||
self.voices_dir = voices_dir
|
||||
self.voice_presets = {}
|
||||
self._voice_cache = {}
|
||||
self.default_voice_key = None
|
||||
|
||||
# Load voice presets if directory exists
|
||||
if self.voices_dir and os.path.exists(self.voices_dir):
|
||||
self._load_voice_presets()
|
||||
else:
|
||||
print(f"Warning: Voices directory not found. Voice presets will not be available.", file=sys.stderr)
|
||||
|
||||
# Store AudioPath, ModelFile, and ModelPath from LoadModel request for use in TTS
|
||||
self.audio_path = request.AudioPath if hasattr(request, 'AudioPath') and request.AudioPath else None
|
||||
self.model_file = request.ModelFile if hasattr(request, 'ModelFile') and request.ModelFile else None
|
||||
self.model_path = request.ModelPath if hasattr(request, 'ModelPath') and request.ModelPath else None
|
||||
|
||||
# Decide attention implementation and device_map (matching upstream example)
|
||||
if self.device == "mps":
|
||||
device_map = None
|
||||
attn_impl_primary = "sdpa" # flash_attention_2 not supported on MPS
|
||||
elif self.device == "cuda":
|
||||
device_map = "cuda"
|
||||
attn_impl_primary = "flash_attention_2"
|
||||
else: # cpu
|
||||
device_map = "cpu" # Match upstream example: use "cpu" for CPU device_map
|
||||
attn_impl_primary = "sdpa"
|
||||
|
||||
try:
|
||||
print(f"Loading processor & model from {model_path}", file=sys.stderr)
|
||||
self.processor = VibeVoiceStreamingProcessor.from_pretrained(model_path)
|
||||
if self.asr_mode:
|
||||
# Load ASR model and processor
|
||||
print(f"Loading ASR processor & model from {model_path}", file=sys.stderr)
|
||||
|
||||
# Load ASR processor
|
||||
self.processor = VibeVoiceASRProcessor.from_pretrained(
|
||||
model_path,
|
||||
language_model_pretrained_name="Qwen/Qwen2.5-7B"
|
||||
)
|
||||
|
||||
# Decide dtype & attention implementation
|
||||
if self.device == "mps":
|
||||
load_dtype = torch.float32 # MPS requires float32
|
||||
device_map = None
|
||||
attn_impl_primary = "sdpa" # flash_attention_2 not supported on MPS
|
||||
elif self.device == "cuda":
|
||||
load_dtype = torch.bfloat16
|
||||
device_map = "cuda"
|
||||
attn_impl_primary = "flash_attention_2"
|
||||
else: # cpu
|
||||
load_dtype = torch.float32
|
||||
device_map = "cpu"
|
||||
attn_impl_primary = "sdpa"
|
||||
print(f"Using device: {self.device}, torch_dtype: {load_dtype}, attn_implementation: {attn_impl_primary}", file=sys.stderr)
|
||||
|
||||
print(f"Using device: {self.device}, torch_dtype: {load_dtype}, attn_implementation: {attn_impl_primary}", file=sys.stderr)
|
||||
|
||||
# Load model with device-specific logic
|
||||
try:
|
||||
if self.device == "mps":
|
||||
self.model = VibeVoiceStreamingForConditionalGenerationInference.from_pretrained(
|
||||
# Load ASR model - use device_map=None and move manually to avoid JSON serialization issues
|
||||
# Load with dtype to ensure all components are in correct dtype from the start
|
||||
try:
|
||||
print(f"Using attention implementation: {attn_impl_primary}", file=sys.stderr)
|
||||
# Load model with dtype to ensure all components are in correct dtype
|
||||
self.model = VibeVoiceASRForConditionalGeneration.from_pretrained(
|
||||
model_path,
|
||||
torch_dtype=load_dtype,
|
||||
dtype=load_dtype,
|
||||
device_map=None, # Always use None, move manually to avoid JSON serialization issues
|
||||
attn_implementation=attn_impl_primary,
|
||||
device_map=None, # load then move
|
||||
trust_remote_code=True
|
||||
)
|
||||
self.model.to("mps")
|
||||
elif self.device == "cuda":
|
||||
self.model = VibeVoiceStreamingForConditionalGenerationInference.from_pretrained(
|
||||
model_path,
|
||||
torch_dtype=load_dtype,
|
||||
device_map="cuda",
|
||||
attn_implementation=attn_impl_primary,
|
||||
)
|
||||
else: # cpu
|
||||
self.model = VibeVoiceStreamingForConditionalGenerationInference.from_pretrained(
|
||||
model_path,
|
||||
torch_dtype=load_dtype,
|
||||
device_map="cpu",
|
||||
attn_implementation=attn_impl_primary,
|
||||
)
|
||||
except Exception as e:
|
||||
if attn_impl_primary == 'flash_attention_2':
|
||||
print(f"[ERROR] : {type(e).__name__}: {e}", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
print("Error loading the model. Trying to use SDPA. However, note that only flash_attention_2 has been fully tested, and using SDPA may result in lower audio quality.", file=sys.stderr)
|
||||
self.model = VibeVoiceStreamingForConditionalGenerationInference.from_pretrained(
|
||||
model_path,
|
||||
torch_dtype=load_dtype,
|
||||
device_map=(self.device if self.device in ("cuda", "cpu") else None),
|
||||
attn_implementation='sdpa'
|
||||
)
|
||||
if self.device == "mps":
|
||||
self.model.to("mps")
|
||||
else:
|
||||
raise e
|
||||
# Move to device manually
|
||||
self.model = self.model.to(self.device)
|
||||
except Exception as e:
|
||||
if attn_impl_primary == 'flash_attention_2':
|
||||
print(f"[ERROR] : {type(e).__name__}: {e}", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
print("Error loading the ASR model. Trying to use SDPA.", file=sys.stderr)
|
||||
self.model = VibeVoiceASRForConditionalGeneration.from_pretrained(
|
||||
model_path,
|
||||
dtype=load_dtype,
|
||||
device_map=None,
|
||||
attn_implementation='sdpa',
|
||||
trust_remote_code=True
|
||||
)
|
||||
# Move to device manually
|
||||
self.model = self.model.to(self.device)
|
||||
else:
|
||||
raise e
|
||||
|
||||
self.model.eval()
|
||||
self.model.set_ddpm_inference_steps(num_steps=self.inference_steps)
|
||||
|
||||
# Set default voice key
|
||||
if self.voice_presets:
|
||||
# Try to get default from environment or use first available
|
||||
preset_name = os.environ.get("VOICE_PRESET")
|
||||
self.default_voice_key = self._determine_voice_key(preset_name)
|
||||
print(f"Default voice preset: {self.default_voice_key}", file=sys.stderr)
|
||||
self.model.eval()
|
||||
print(f"ASR model loaded successfully", file=sys.stderr)
|
||||
else:
|
||||
print("Warning: No voice presets available. Voice selection will not work.", file=sys.stderr)
|
||||
# Load TTS model and processor (existing logic)
|
||||
# Load voice presets if directory exists
|
||||
if self.voices_dir and os.path.exists(self.voices_dir):
|
||||
self._load_voice_presets()
|
||||
else:
|
||||
print(f"Warning: Voices directory not found. Voice presets will not be available.", file=sys.stderr)
|
||||
|
||||
print(f"Loading TTS processor & model from {model_path}", file=sys.stderr)
|
||||
self.processor = VibeVoiceStreamingProcessor.from_pretrained(model_path)
|
||||
|
||||
|
||||
print(f"Using device: {self.device}, torch_dtype: {load_dtype}, attn_implementation: {attn_impl_primary}", file=sys.stderr)
|
||||
|
||||
# Load model with device-specific logic (matching upstream example exactly)
|
||||
try:
|
||||
if self.device == "mps":
|
||||
self.model = VibeVoiceStreamingForConditionalGenerationInference.from_pretrained(
|
||||
model_path,
|
||||
torch_dtype=load_dtype,
|
||||
attn_implementation=attn_impl_primary,
|
||||
device_map=None, # load then move
|
||||
)
|
||||
self.model.to("mps")
|
||||
elif self.device == "cuda":
|
||||
self.model = VibeVoiceStreamingForConditionalGenerationInference.from_pretrained(
|
||||
model_path,
|
||||
torch_dtype=load_dtype,
|
||||
device_map=device_map,
|
||||
attn_implementation=attn_impl_primary,
|
||||
)
|
||||
else: # cpu
|
||||
# Match upstream example: use device_map="cpu" for CPU
|
||||
self.model = VibeVoiceStreamingForConditionalGenerationInference.from_pretrained(
|
||||
model_path,
|
||||
torch_dtype=load_dtype,
|
||||
device_map="cpu",
|
||||
attn_implementation=attn_impl_primary,
|
||||
)
|
||||
except Exception as e:
|
||||
if attn_impl_primary == 'flash_attention_2':
|
||||
print(f"[ERROR] : {type(e).__name__}: {e}", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
print("Error loading the model. Trying to use SDPA. However, note that only flash_attention_2 has been fully tested, and using SDPA may result in lower audio quality.", file=sys.stderr)
|
||||
# Match upstream example fallback pattern
|
||||
self.model = VibeVoiceStreamingForConditionalGenerationInference.from_pretrained(
|
||||
model_path,
|
||||
torch_dtype=load_dtype,
|
||||
device_map=(self.device if self.device in ("cuda", "cpu") else None),
|
||||
attn_implementation='sdpa'
|
||||
)
|
||||
if self.device == "mps":
|
||||
self.model.to("mps")
|
||||
else:
|
||||
raise e
|
||||
|
||||
self.model.eval()
|
||||
self.model.set_ddpm_inference_steps(num_steps=self.inference_steps)
|
||||
|
||||
# Set default voice key
|
||||
if self.voice_presets:
|
||||
# Try to get default from environment or use first available
|
||||
preset_name = os.environ.get("VOICE_PRESET")
|
||||
self.default_voice_key = self._determine_voice_key(preset_name)
|
||||
print(f"Default voice preset: {self.default_voice_key}", file=sys.stderr)
|
||||
else:
|
||||
print("Warning: No voice presets available. Voice selection will not work.", file=sys.stderr)
|
||||
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
# Format error message safely, avoiding JSON serialization issues
|
||||
error_msg = str(err)
|
||||
error_type = type(err).__name__
|
||||
# Include traceback for debugging
|
||||
tb_str = traceback.format_exc()
|
||||
print(f"[ERROR] LoadModel failed: {error_type}: {error_msg}", file=sys.stderr)
|
||||
print(tb_str, file=sys.stderr)
|
||||
return backend_pb2.Result(success=False, message=f"{error_type}: {error_msg}")
|
||||
|
||||
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
||||
|
||||
@@ -327,14 +440,30 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
if not voice_path or not os.path.exists(voice_path):
|
||||
return None
|
||||
|
||||
# Ensure cache exists (should be initialized in LoadModel)
|
||||
if not hasattr(self, '_voice_cache'):
|
||||
self._voice_cache = {}
|
||||
|
||||
# Use path as cache key
|
||||
if voice_path not in self._voice_cache:
|
||||
print(f"Loading prefilled prompt from {voice_path}", file=sys.stderr)
|
||||
prefilled_outputs = torch.load(
|
||||
voice_path,
|
||||
map_location=self._torch_device,
|
||||
weights_only=False,
|
||||
)
|
||||
# Match self-test.py: use string device name for map_location
|
||||
# Ensure self.device exists (should be set in LoadModel)
|
||||
try:
|
||||
if not hasattr(self, 'device'):
|
||||
# Fallback to CPU if device not set
|
||||
device_str = "cpu"
|
||||
else:
|
||||
device_str = str(self.device)
|
||||
except AttributeError as e:
|
||||
print(f"Error accessing self.device: {e}, falling back to CPU", file=sys.stderr)
|
||||
device_str = "cpu"
|
||||
if device_str != "cpu":
|
||||
map_loc = device_str
|
||||
else:
|
||||
map_loc = "cpu"
|
||||
# Call torch.load with explicit arguments
|
||||
prefilled_outputs = torch.load(voice_path, map_location=map_loc, weights_only=False)
|
||||
self._voice_cache[voice_path] = prefilled_outputs
|
||||
|
||||
return self._voice_cache[voice_path]
|
||||
@@ -351,17 +480,17 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
voice_path = self._get_voice_path(request.voice)
|
||||
if voice_path:
|
||||
voice_key = request.voice
|
||||
elif request.AudioPath:
|
||||
# Use AudioPath as voice file
|
||||
if os.path.isabs(request.AudioPath):
|
||||
voice_path = request.AudioPath
|
||||
elif request.ModelFile:
|
||||
model_file_base = os.path.dirname(request.ModelFile)
|
||||
voice_path = os.path.join(model_file_base, request.AudioPath)
|
||||
elif hasattr(request, 'ModelPath') and request.ModelPath:
|
||||
voice_path = os.path.join(request.ModelPath, request.AudioPath)
|
||||
elif self.audio_path:
|
||||
# Use AudioPath from LoadModel as voice file
|
||||
if os.path.isabs(self.audio_path):
|
||||
voice_path = self.audio_path
|
||||
elif self.model_file:
|
||||
model_file_base = os.path.dirname(self.model_file)
|
||||
voice_path = os.path.join(model_file_base, self.audio_path)
|
||||
elif self.model_path:
|
||||
voice_path = os.path.join(self.model_path, self.audio_path)
|
||||
else:
|
||||
voice_path = request.AudioPath
|
||||
voice_path = self.audio_path
|
||||
elif self.default_voice_key:
|
||||
voice_path = self._get_voice_path(self.default_voice_key)
|
||||
voice_key = self.default_voice_key
|
||||
@@ -404,8 +533,9 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
return_attention_mask=True,
|
||||
)
|
||||
|
||||
# Move tensors to target device
|
||||
target_device = self._torch_device
|
||||
# Move tensors to target device (matching self-test.py exactly)
|
||||
# Explicitly ensure it's a string to avoid any variable name collisions
|
||||
target_device = str(self.device) if str(self.device) != "cpu" else "cpu"
|
||||
for k, v in inputs.items():
|
||||
if torch.is_tensor(v):
|
||||
inputs[k] = v.to(target_device)
|
||||
@@ -447,6 +577,147 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
|
||||
return backend_pb2.Result(success=True)
|
||||
|
||||
def AudioTranscription(self, request, context):
|
||||
"""Transcribe audio file to text using ASR model."""
|
||||
try:
|
||||
# Validate ASR mode is active
|
||||
if not self.asr_mode:
|
||||
return backend_pb2.TranscriptResult(
|
||||
segments=[],
|
||||
text="",
|
||||
)
|
||||
# Note: We return empty result instead of error to match faster-whisper behavior
|
||||
|
||||
# Get audio file path
|
||||
audio_path = request.dst
|
||||
if not audio_path or not os.path.exists(audio_path):
|
||||
print(f"Error: Audio file not found: {audio_path}", file=sys.stderr)
|
||||
return backend_pb2.TranscriptResult(
|
||||
segments=[],
|
||||
text="",
|
||||
)
|
||||
|
||||
print(f"Transcribing audio file: {audio_path}", file=sys.stderr)
|
||||
|
||||
# Get context_info from options if available
|
||||
context_info = self.options.get("context_info", None)
|
||||
if context_info and isinstance(context_info, str) and context_info.strip():
|
||||
context_info = context_info.strip()
|
||||
else:
|
||||
context_info = None
|
||||
|
||||
# Process audio with ASR processor (matching gradio example)
|
||||
inputs = self.processor(
|
||||
audio=audio_path,
|
||||
sampling_rate=None,
|
||||
return_tensors="pt",
|
||||
add_generation_prompt=True,
|
||||
context_info=context_info
|
||||
)
|
||||
|
||||
# Move to device (matching gradio example)
|
||||
inputs = {k: v.to(self.device) if isinstance(v, torch.Tensor) else v
|
||||
for k, v in inputs.items()}
|
||||
|
||||
# Prepare generation config (matching gradio example)
|
||||
generation_config = {
|
||||
"max_new_tokens": self.max_new_tokens,
|
||||
"temperature": self.temperature if self.temperature > 0 else None,
|
||||
"top_p": self.top_p if self.do_sample else None,
|
||||
"do_sample": self.do_sample,
|
||||
"num_beams": self.num_beams,
|
||||
"repetition_penalty": self.repetition_penalty,
|
||||
"pad_token_id": self.processor.pad_id,
|
||||
"eos_token_id": self.processor.tokenizer.eos_token_id,
|
||||
}
|
||||
|
||||
# Remove None values (matching gradio example)
|
||||
generation_config = {k: v for k, v in generation_config.items() if v is not None}
|
||||
|
||||
print(f"Generating transcription with max_new_tokens: {self.max_new_tokens}, temperature: {self.temperature}, do_sample: {self.do_sample}, num_beams: {self.num_beams}, repetition_penalty: {self.repetition_penalty}", file=sys.stderr)
|
||||
|
||||
# Generate transcription (matching gradio example)
|
||||
with torch.no_grad():
|
||||
output_ids = self.model.generate(
|
||||
**inputs,
|
||||
**generation_config
|
||||
)
|
||||
|
||||
# Decode output (matching gradio example)
|
||||
generated_ids = output_ids[0, inputs['input_ids'].shape[1]:]
|
||||
generated_text = self.processor.decode(generated_ids, skip_special_tokens=True)
|
||||
|
||||
# Parse structured output to get segments
|
||||
result_segments = []
|
||||
try:
|
||||
transcription_segments = self.processor.post_process_transcription(generated_text)
|
||||
|
||||
if transcription_segments:
|
||||
# Map segments to TranscriptSegment format
|
||||
for idx, seg in enumerate(transcription_segments):
|
||||
# Extract timing information (if available)
|
||||
# Handle both dict and object with attributes
|
||||
if isinstance(seg, dict):
|
||||
start_time = seg.get('start_time', 0)
|
||||
end_time = seg.get('end_time', 0)
|
||||
text = seg.get('text', '')
|
||||
speaker_id = seg.get('speaker_id', None)
|
||||
else:
|
||||
# Handle object with attributes
|
||||
start_time = getattr(seg, 'start_time', 0)
|
||||
end_time = getattr(seg, 'end_time', 0)
|
||||
text = getattr(seg, 'text', '')
|
||||
speaker_id = getattr(seg, 'speaker_id', None)
|
||||
|
||||
# Convert time to milliseconds (assuming seconds)
|
||||
start_ms = int(start_time * 1000) if isinstance(start_time, (int, float)) else 0
|
||||
end_ms = int(end_time * 1000) if isinstance(end_time, (int, float)) else 0
|
||||
|
||||
# Add speaker info to text if available
|
||||
if speaker_id is not None:
|
||||
text = f"[Speaker {speaker_id}] {text}"
|
||||
|
||||
result_segments.append(backend_pb2.TranscriptSegment(
|
||||
id=idx,
|
||||
start=start_ms,
|
||||
end=end_ms,
|
||||
text=text,
|
||||
tokens=[] # Token IDs not extracted for now
|
||||
))
|
||||
except Exception as e:
|
||||
print(f"Warning: Failed to parse structured output: {e}", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
# Fallback: create a single segment with the full text
|
||||
if generated_text:
|
||||
result_segments.append(backend_pb2.TranscriptSegment(
|
||||
id=0,
|
||||
start=0,
|
||||
end=0,
|
||||
text=generated_text,
|
||||
tokens=[]
|
||||
))
|
||||
|
||||
# Combine all segment texts into full transcription
|
||||
if result_segments:
|
||||
full_text = " ".join([seg.text for seg in result_segments])
|
||||
else:
|
||||
full_text = generated_text if generated_text else ""
|
||||
|
||||
print(f"Transcription completed: {len(result_segments)} segments", file=sys.stderr)
|
||||
|
||||
return backend_pb2.TranscriptResult(
|
||||
segments=result_segments,
|
||||
text=full_text
|
||||
)
|
||||
|
||||
except Exception as err:
|
||||
print(f"Error in AudioTranscription: {err}", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
return backend_pb2.TranscriptResult(
|
||||
segments=[],
|
||||
text="",
|
||||
)
|
||||
|
||||
def serve(address):
|
||||
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
||||
options=[
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user