mirror of
https://github.com/mudler/LocalAI.git
synced 2026-02-07 21:22:58 -05:00
Compare commits
1 Commits
feat/voxtr
...
copilot/fi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5041294265 |
434
.github/workflows/backend.yml
vendored
434
.github/workflows/backend.yml
vendored
@@ -14,7 +14,6 @@ concurrency:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
backend-jobs:
|
backend-jobs:
|
||||||
if: github.repository == 'mudler/LocalAI'
|
|
||||||
uses: ./.github/workflows/backend_build.yml
|
uses: ./.github/workflows/backend_build.yml
|
||||||
with:
|
with:
|
||||||
tag-latest: ${{ matrix.tag-latest }}
|
tag-latest: ${{ matrix.tag-latest }}
|
||||||
@@ -105,58 +104,6 @@ jobs:
|
|||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
context: "./"
|
context: "./"
|
||||||
ubuntu-version: '2404'
|
ubuntu-version: '2404'
|
||||||
- build-type: ''
|
|
||||||
cuda-major-version: ""
|
|
||||||
cuda-minor-version: ""
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-cpu-ace-step'
|
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
base-image: "ubuntu:24.04"
|
|
||||||
skip-drivers: 'true'
|
|
||||||
backend: "ace-step"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
ubuntu-version: '2404'
|
|
||||||
- build-type: ''
|
|
||||||
cuda-major-version: ""
|
|
||||||
cuda-minor-version: ""
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-cpu-mlx'
|
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
base-image: "ubuntu:24.04"
|
|
||||||
skip-drivers: 'true'
|
|
||||||
backend: "mlx"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
ubuntu-version: '2404'
|
|
||||||
- build-type: ''
|
|
||||||
cuda-major-version: ""
|
|
||||||
cuda-minor-version: ""
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-cpu-mlx-vlm'
|
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
base-image: "ubuntu:24.04"
|
|
||||||
skip-drivers: 'true'
|
|
||||||
backend: "mlx-vlm"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
ubuntu-version: '2404'
|
|
||||||
- build-type: ''
|
|
||||||
cuda-major-version: ""
|
|
||||||
cuda-minor-version: ""
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-cpu-mlx-audio'
|
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
base-image: "ubuntu:24.04"
|
|
||||||
skip-drivers: 'true'
|
|
||||||
backend: "mlx-audio"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
ubuntu-version: '2404'
|
|
||||||
# CUDA 12 builds
|
# CUDA 12 builds
|
||||||
- build-type: 'cublas'
|
- build-type: 'cublas'
|
||||||
cuda-major-version: "12"
|
cuda-major-version: "12"
|
||||||
@@ -184,19 +131,6 @@ jobs:
|
|||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
context: "./"
|
context: "./"
|
||||||
ubuntu-version: '2404'
|
ubuntu-version: '2404'
|
||||||
- build-type: 'cublas'
|
|
||||||
cuda-major-version: "12"
|
|
||||||
cuda-minor-version: "8"
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-gpu-nvidia-cuda-12-nemo'
|
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
base-image: "ubuntu:24.04"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
backend: "nemo"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
ubuntu-version: '2404'
|
|
||||||
- build-type: 'cublas'
|
- build-type: 'cublas'
|
||||||
cuda-major-version: "12"
|
cuda-major-version: "12"
|
||||||
cuda-minor-version: "8"
|
cuda-minor-version: "8"
|
||||||
@@ -314,19 +248,6 @@ jobs:
|
|||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
context: "./"
|
context: "./"
|
||||||
ubuntu-version: '2404'
|
ubuntu-version: '2404'
|
||||||
- build-type: 'cublas'
|
|
||||||
cuda-major-version: "12"
|
|
||||||
cuda-minor-version: "8"
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-gpu-nvidia-cuda-12-ace-step'
|
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
base-image: "ubuntu:24.04"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
backend: "ace-step"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
ubuntu-version: '2404'
|
|
||||||
- build-type: 'cublas'
|
- build-type: 'cublas'
|
||||||
cuda-major-version: "12"
|
cuda-major-version: "12"
|
||||||
cuda-minor-version: "8"
|
cuda-minor-version: "8"
|
||||||
@@ -379,19 +300,6 @@ jobs:
|
|||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
context: "./"
|
context: "./"
|
||||||
ubuntu-version: '2404'
|
ubuntu-version: '2404'
|
||||||
- build-type: 'cublas'
|
|
||||||
cuda-major-version: "12"
|
|
||||||
cuda-minor-version: "8"
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-gpu-nvidia-cuda-12-outetts'
|
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
base-image: "ubuntu:24.04"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
backend: "outetts"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
ubuntu-version: '2404'
|
|
||||||
- build-type: 'cublas'
|
- build-type: 'cublas'
|
||||||
cuda-major-version: "12"
|
cuda-major-version: "12"
|
||||||
cuda-minor-version: "8"
|
cuda-minor-version: "8"
|
||||||
@@ -418,45 +326,6 @@ jobs:
|
|||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
context: "./"
|
context: "./"
|
||||||
ubuntu-version: '2404'
|
ubuntu-version: '2404'
|
||||||
- build-type: 'cublas'
|
|
||||||
cuda-major-version: "12"
|
|
||||||
cuda-minor-version: "8"
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-gpu-nvidia-cuda-12-mlx'
|
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
base-image: "ubuntu:24.04"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
backend: "mlx"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
ubuntu-version: '2404'
|
|
||||||
- build-type: 'cublas'
|
|
||||||
cuda-major-version: "12"
|
|
||||||
cuda-minor-version: "8"
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-gpu-nvidia-cuda-12-mlx-vlm'
|
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
base-image: "ubuntu:24.04"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
backend: "mlx-vlm"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
ubuntu-version: '2404'
|
|
||||||
- build-type: 'cublas'
|
|
||||||
cuda-major-version: "12"
|
|
||||||
cuda-minor-version: "8"
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-gpu-nvidia-cuda-12-mlx-audio'
|
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
base-image: "ubuntu:24.04"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
backend: "mlx-audio"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
ubuntu-version: '2404'
|
|
||||||
- build-type: 'cublas'
|
- build-type: 'cublas'
|
||||||
cuda-major-version: "12"
|
cuda-major-version: "12"
|
||||||
cuda-minor-version: "8"
|
cuda-minor-version: "8"
|
||||||
@@ -549,19 +418,6 @@ jobs:
|
|||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
context: "./"
|
context: "./"
|
||||||
ubuntu-version: '2404'
|
ubuntu-version: '2404'
|
||||||
- build-type: 'cublas'
|
|
||||||
cuda-major-version: "13"
|
|
||||||
cuda-minor-version: "0"
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-gpu-nvidia-cuda-13-nemo'
|
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
base-image: "ubuntu:24.04"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
backend: "nemo"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
ubuntu-version: '2404'
|
|
||||||
- build-type: 'cublas'
|
- build-type: 'cublas'
|
||||||
cuda-major-version: "13"
|
cuda-major-version: "13"
|
||||||
cuda-minor-version: "0"
|
cuda-minor-version: "0"
|
||||||
@@ -653,19 +509,6 @@ jobs:
|
|||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
context: "./"
|
context: "./"
|
||||||
ubuntu-version: '2404'
|
ubuntu-version: '2404'
|
||||||
- build-type: 'cublas'
|
|
||||||
cuda-major-version: "13"
|
|
||||||
cuda-minor-version: "0"
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-gpu-nvidia-cuda-13-ace-step'
|
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
base-image: "ubuntu:24.04"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
backend: "ace-step"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
ubuntu-version: '2404'
|
|
||||||
- build-type: 'l4t'
|
- build-type: 'l4t'
|
||||||
cuda-major-version: "13"
|
cuda-major-version: "13"
|
||||||
cuda-minor-version: "0"
|
cuda-minor-version: "0"
|
||||||
@@ -731,45 +574,6 @@ jobs:
|
|||||||
backend: "diffusers"
|
backend: "diffusers"
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
context: "./"
|
context: "./"
|
||||||
- build-type: 'l4t'
|
|
||||||
cuda-major-version: "13"
|
|
||||||
cuda-minor-version: "0"
|
|
||||||
platforms: 'linux/arm64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-nvidia-l4t-cuda-13-arm64-mlx'
|
|
||||||
runs-on: 'ubuntu-24.04-arm'
|
|
||||||
base-image: "ubuntu:24.04"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
ubuntu-version: '2404'
|
|
||||||
backend: "mlx"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
- build-type: 'l4t'
|
|
||||||
cuda-major-version: "13"
|
|
||||||
cuda-minor-version: "0"
|
|
||||||
platforms: 'linux/arm64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-nvidia-l4t-cuda-13-arm64-mlx-vlm'
|
|
||||||
runs-on: 'ubuntu-24.04-arm'
|
|
||||||
base-image: "ubuntu:24.04"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
ubuntu-version: '2404'
|
|
||||||
backend: "mlx-vlm"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
- build-type: 'l4t'
|
|
||||||
cuda-major-version: "13"
|
|
||||||
cuda-minor-version: "0"
|
|
||||||
platforms: 'linux/arm64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-nvidia-l4t-cuda-13-arm64-mlx-audio'
|
|
||||||
runs-on: 'ubuntu-24.04-arm'
|
|
||||||
base-image: "ubuntu:24.04"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
ubuntu-version: '2404'
|
|
||||||
backend: "mlx-audio"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
- build-type: 'cublas'
|
- build-type: 'cublas'
|
||||||
cuda-major-version: "13"
|
cuda-major-version: "13"
|
||||||
cuda-minor-version: "0"
|
cuda-minor-version: "0"
|
||||||
@@ -835,45 +639,6 @@ jobs:
|
|||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
context: "./"
|
context: "./"
|
||||||
ubuntu-version: '2404'
|
ubuntu-version: '2404'
|
||||||
- build-type: 'cublas'
|
|
||||||
cuda-major-version: "13"
|
|
||||||
cuda-minor-version: "0"
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-gpu-nvidia-cuda-13-mlx'
|
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
base-image: "ubuntu:24.04"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
backend: "mlx"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
ubuntu-version: '2404'
|
|
||||||
- build-type: 'cublas'
|
|
||||||
cuda-major-version: "13"
|
|
||||||
cuda-minor-version: "0"
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-gpu-nvidia-cuda-13-mlx-vlm'
|
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
base-image: "ubuntu:24.04"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
backend: "mlx-vlm"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
ubuntu-version: '2404'
|
|
||||||
- build-type: 'cublas'
|
|
||||||
cuda-major-version: "13"
|
|
||||||
cuda-minor-version: "0"
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-gpu-nvidia-cuda-13-mlx-audio'
|
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
base-image: "ubuntu:24.04"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
backend: "mlx-audio"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
ubuntu-version: '2404'
|
|
||||||
- build-type: 'cublas'
|
- build-type: 'cublas'
|
||||||
cuda-major-version: "13"
|
cuda-major-version: "13"
|
||||||
cuda-minor-version: "0"
|
cuda-minor-version: "0"
|
||||||
@@ -1018,19 +783,6 @@ jobs:
|
|||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
context: "./"
|
context: "./"
|
||||||
ubuntu-version: '2404'
|
ubuntu-version: '2404'
|
||||||
- build-type: 'hipblas'
|
|
||||||
cuda-major-version: ""
|
|
||||||
cuda-minor-version: ""
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-gpu-rocm-hipblas-ace-step'
|
|
||||||
runs-on: 'arc-runner-set'
|
|
||||||
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
backend: "ace-step"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
ubuntu-version: '2404'
|
|
||||||
# ROCm additional backends
|
# ROCm additional backends
|
||||||
- build-type: 'hipblas'
|
- build-type: 'hipblas'
|
||||||
cuda-major-version: ""
|
cuda-major-version: ""
|
||||||
@@ -1071,19 +823,6 @@ jobs:
|
|||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
context: "./"
|
context: "./"
|
||||||
ubuntu-version: '2404'
|
ubuntu-version: '2404'
|
||||||
- build-type: 'hipblas'
|
|
||||||
cuda-major-version: ""
|
|
||||||
cuda-minor-version: ""
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-gpu-rocm-hipblas-nemo'
|
|
||||||
runs-on: 'arc-runner-set'
|
|
||||||
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
backend: "nemo"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
ubuntu-version: '2404'
|
|
||||||
- build-type: 'hipblas'
|
- build-type: 'hipblas'
|
||||||
cuda-major-version: ""
|
cuda-major-version: ""
|
||||||
cuda-minor-version: ""
|
cuda-minor-version: ""
|
||||||
@@ -1241,19 +980,6 @@ jobs:
|
|||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
context: "./"
|
context: "./"
|
||||||
ubuntu-version: '2404'
|
ubuntu-version: '2404'
|
||||||
- build-type: 'intel'
|
|
||||||
cuda-major-version: ""
|
|
||||||
cuda-minor-version: ""
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-gpu-intel-ace-step'
|
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
backend: "ace-step"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
ubuntu-version: '2404'
|
|
||||||
- build-type: 'l4t'
|
- build-type: 'l4t'
|
||||||
cuda-major-version: "12"
|
cuda-major-version: "12"
|
||||||
cuda-minor-version: "0"
|
cuda-minor-version: "0"
|
||||||
@@ -1319,45 +1045,6 @@ jobs:
|
|||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
context: "./"
|
context: "./"
|
||||||
ubuntu-version: '2204'
|
ubuntu-version: '2204'
|
||||||
- build-type: 'l4t'
|
|
||||||
cuda-major-version: "12"
|
|
||||||
cuda-minor-version: "0"
|
|
||||||
platforms: 'linux/arm64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-nvidia-l4t-mlx'
|
|
||||||
runs-on: 'ubuntu-24.04-arm'
|
|
||||||
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
|
||||||
skip-drivers: 'true'
|
|
||||||
backend: "mlx"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
ubuntu-version: '2204'
|
|
||||||
- build-type: 'l4t'
|
|
||||||
cuda-major-version: "12"
|
|
||||||
cuda-minor-version: "0"
|
|
||||||
platforms: 'linux/arm64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-nvidia-l4t-mlx-vlm'
|
|
||||||
runs-on: 'ubuntu-24.04-arm'
|
|
||||||
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
|
||||||
skip-drivers: 'true'
|
|
||||||
backend: "mlx-vlm"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
ubuntu-version: '2204'
|
|
||||||
- build-type: 'l4t'
|
|
||||||
cuda-major-version: "12"
|
|
||||||
cuda-minor-version: "0"
|
|
||||||
platforms: 'linux/arm64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-nvidia-l4t-mlx-audio'
|
|
||||||
runs-on: 'ubuntu-24.04-arm'
|
|
||||||
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
|
||||||
skip-drivers: 'true'
|
|
||||||
backend: "mlx-audio"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
ubuntu-version: '2204'
|
|
||||||
# SYCL additional backends
|
# SYCL additional backends
|
||||||
- build-type: 'intel'
|
- build-type: 'intel'
|
||||||
cuda-major-version: ""
|
cuda-major-version: ""
|
||||||
@@ -1411,19 +1098,6 @@ jobs:
|
|||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
context: "./"
|
context: "./"
|
||||||
ubuntu-version: '2404'
|
ubuntu-version: '2404'
|
||||||
- build-type: 'intel'
|
|
||||||
cuda-major-version: ""
|
|
||||||
cuda-minor-version: ""
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-gpu-intel-nemo'
|
|
||||||
runs-on: 'arc-runner-set'
|
|
||||||
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
backend: "nemo"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
ubuntu-version: '2404'
|
|
||||||
- build-type: 'intel'
|
- build-type: 'intel'
|
||||||
cuda-major-version: ""
|
cuda-major-version: ""
|
||||||
cuda-minor-version: ""
|
cuda-minor-version: ""
|
||||||
@@ -1674,20 +1348,6 @@ jobs:
|
|||||||
dockerfile: "./backend/Dockerfile.golang"
|
dockerfile: "./backend/Dockerfile.golang"
|
||||||
context: "./"
|
context: "./"
|
||||||
ubuntu-version: '2404'
|
ubuntu-version: '2404'
|
||||||
# voxtral
|
|
||||||
- build-type: ''
|
|
||||||
cuda-major-version: ""
|
|
||||||
cuda-minor-version: ""
|
|
||||||
platforms: 'linux/amd64,linux/arm64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-cpu-voxtral'
|
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
base-image: "ubuntu:24.04"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
backend: "voxtral"
|
|
||||||
dockerfile: "./backend/Dockerfile.golang"
|
|
||||||
context: "./"
|
|
||||||
ubuntu-version: '2404'
|
|
||||||
#silero-vad
|
#silero-vad
|
||||||
- build-type: ''
|
- build-type: ''
|
||||||
cuda-major-version: ""
|
cuda-major-version: ""
|
||||||
@@ -1863,19 +1523,6 @@ jobs:
|
|||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
context: "./"
|
context: "./"
|
||||||
ubuntu-version: '2404'
|
ubuntu-version: '2404'
|
||||||
- build-type: ''
|
|
||||||
cuda-major-version: ""
|
|
||||||
cuda-minor-version: ""
|
|
||||||
platforms: 'linux/amd64,linux/arm64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-cpu-nemo'
|
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
base-image: "ubuntu:24.04"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
backend: "nemo"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
ubuntu-version: '2404'
|
|
||||||
- build-type: ''
|
- build-type: ''
|
||||||
cuda-major-version: ""
|
cuda-major-version: ""
|
||||||
cuda-minor-version: ""
|
cuda-minor-version: ""
|
||||||
@@ -1915,19 +1562,6 @@ jobs:
|
|||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
context: "./"
|
context: "./"
|
||||||
ubuntu-version: '2404'
|
ubuntu-version: '2404'
|
||||||
- build-type: ''
|
|
||||||
cuda-major-version: ""
|
|
||||||
cuda-minor-version: ""
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-cpu-outetts'
|
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
base-image: "ubuntu:24.04"
|
|
||||||
skip-drivers: 'true'
|
|
||||||
backend: "outetts"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./"
|
|
||||||
ubuntu-version: '2404'
|
|
||||||
backend-jobs-darwin:
|
backend-jobs-darwin:
|
||||||
uses: ./.github/workflows/backend_build_darwin.yml
|
uses: ./.github/workflows/backend_build_darwin.yml
|
||||||
strategy:
|
strategy:
|
||||||
@@ -1936,9 +1570,6 @@ jobs:
|
|||||||
- backend: "diffusers"
|
- backend: "diffusers"
|
||||||
tag-suffix: "-metal-darwin-arm64-diffusers"
|
tag-suffix: "-metal-darwin-arm64-diffusers"
|
||||||
build-type: "mps"
|
build-type: "mps"
|
||||||
- backend: "ace-step"
|
|
||||||
tag-suffix: "-metal-darwin-arm64-ace-step"
|
|
||||||
build-type: "mps"
|
|
||||||
- backend: "mlx"
|
- backend: "mlx"
|
||||||
tag-suffix: "-metal-darwin-arm64-mlx"
|
tag-suffix: "-metal-darwin-arm64-mlx"
|
||||||
build-type: "mps"
|
build-type: "mps"
|
||||||
@@ -1959,71 +1590,6 @@ jobs:
|
|||||||
tag-suffix: "-metal-darwin-arm64-whisper"
|
tag-suffix: "-metal-darwin-arm64-whisper"
|
||||||
build-type: "metal"
|
build-type: "metal"
|
||||||
lang: "go"
|
lang: "go"
|
||||||
- backend: "voxtral"
|
|
||||||
tag-suffix: "-metal-darwin-arm64-voxtral"
|
|
||||||
build-type: "metal"
|
|
||||||
lang: "go"
|
|
||||||
- backend: "vibevoice"
|
|
||||||
tag-suffix: "-metal-darwin-arm64-vibevoice"
|
|
||||||
build-type: "mps"
|
|
||||||
- backend: "qwen-asr"
|
|
||||||
tag-suffix: "-metal-darwin-arm64-qwen-asr"
|
|
||||||
build-type: "mps"
|
|
||||||
- backend: "nemo"
|
|
||||||
tag-suffix: "-metal-darwin-arm64-nemo"
|
|
||||||
build-type: "mps"
|
|
||||||
- backend: "qwen-tts"
|
|
||||||
tag-suffix: "-metal-darwin-arm64-qwen-tts"
|
|
||||||
build-type: "mps"
|
|
||||||
- backend: "voxcpm"
|
|
||||||
tag-suffix: "-metal-darwin-arm64-voxcpm"
|
|
||||||
build-type: "mps"
|
|
||||||
- backend: "pocket-tts"
|
|
||||||
tag-suffix: "-metal-darwin-arm64-pocket-tts"
|
|
||||||
build-type: "mps"
|
|
||||||
- backend: "moonshine"
|
|
||||||
tag-suffix: "-metal-darwin-arm64-moonshine"
|
|
||||||
build-type: "mps"
|
|
||||||
- backend: "whisperx"
|
|
||||||
tag-suffix: "-metal-darwin-arm64-whisperx"
|
|
||||||
build-type: "mps"
|
|
||||||
- backend: "rerankers"
|
|
||||||
tag-suffix: "-metal-darwin-arm64-rerankers"
|
|
||||||
build-type: "mps"
|
|
||||||
- backend: "transformers"
|
|
||||||
tag-suffix: "-metal-darwin-arm64-transformers"
|
|
||||||
build-type: "mps"
|
|
||||||
- backend: "kokoro"
|
|
||||||
tag-suffix: "-metal-darwin-arm64-kokoro"
|
|
||||||
build-type: "mps"
|
|
||||||
- backend: "faster-whisper"
|
|
||||||
tag-suffix: "-metal-darwin-arm64-faster-whisper"
|
|
||||||
build-type: "mps"
|
|
||||||
- backend: "coqui"
|
|
||||||
tag-suffix: "-metal-darwin-arm64-coqui"
|
|
||||||
build-type: "mps"
|
|
||||||
- backend: "rfdetr"
|
|
||||||
tag-suffix: "-metal-darwin-arm64-rfdetr"
|
|
||||||
build-type: "mps"
|
|
||||||
- backend: "kitten-tts"
|
|
||||||
tag-suffix: "-metal-darwin-arm64-kitten-tts"
|
|
||||||
build-type: "mps"
|
|
||||||
- backend: "piper"
|
|
||||||
tag-suffix: "-metal-darwin-arm64-piper"
|
|
||||||
build-type: "metal"
|
|
||||||
lang: "go"
|
|
||||||
- backend: "silero-vad"
|
|
||||||
tag-suffix: "-metal-darwin-arm64-silero-vad"
|
|
||||||
build-type: "metal"
|
|
||||||
lang: "go"
|
|
||||||
- backend: "local-store"
|
|
||||||
tag-suffix: "-metal-darwin-arm64-local-store"
|
|
||||||
build-type: "metal"
|
|
||||||
lang: "go"
|
|
||||||
- backend: "huggingface"
|
|
||||||
tag-suffix: "-metal-darwin-arm64-huggingface"
|
|
||||||
build-type: "metal"
|
|
||||||
lang: "go"
|
|
||||||
with:
|
with:
|
||||||
backend: ${{ matrix.backend }}
|
backend: ${{ matrix.backend }}
|
||||||
build-type: ${{ matrix.build-type }}
|
build-type: ${{ matrix.build-type }}
|
||||||
|
|||||||
5
.github/workflows/bump_deps.yaml
vendored
5
.github/workflows/bump_deps.yaml
vendored
@@ -5,7 +5,6 @@ on:
|
|||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
jobs:
|
jobs:
|
||||||
bump-backends:
|
bump-backends:
|
||||||
if: github.repository == 'mudler/LocalAI'
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
@@ -30,10 +29,6 @@ jobs:
|
|||||||
variable: "PIPER_VERSION"
|
variable: "PIPER_VERSION"
|
||||||
branch: "master"
|
branch: "master"
|
||||||
file: "backend/go/piper/Makefile"
|
file: "backend/go/piper/Makefile"
|
||||||
- repository: "antirez/voxtral.c"
|
|
||||||
variable: "VOXTRAL_VERSION"
|
|
||||||
branch: "main"
|
|
||||||
file: "backend/go/voxtral/Makefile"
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v6
|
- uses: actions/checkout@v6
|
||||||
|
|||||||
1
.github/workflows/bump_docs.yaml
vendored
1
.github/workflows/bump_docs.yaml
vendored
@@ -5,7 +5,6 @@ on:
|
|||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
jobs:
|
jobs:
|
||||||
bump-docs:
|
bump-docs:
|
||||||
if: github.repository == 'mudler/LocalAI'
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
|
|||||||
1
.github/workflows/checksum_checker.yaml
vendored
1
.github/workflows/checksum_checker.yaml
vendored
@@ -5,7 +5,6 @@ on:
|
|||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
jobs:
|
jobs:
|
||||||
checksum_check:
|
checksum_check:
|
||||||
if: github.repository == 'mudler/LocalAI'
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Force Install GIT latest
|
- name: Force Install GIT latest
|
||||||
|
|||||||
2
.github/workflows/dependabot_auto.yml
vendored
2
.github/workflows/dependabot_auto.yml
vendored
@@ -9,8 +9,8 @@ permissions:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
dependabot:
|
dependabot:
|
||||||
if: github.repository == 'mudler/LocalAI' && github.actor == 'dependabot[bot]'
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
if: ${{ github.actor == 'dependabot[bot]' }}
|
||||||
steps:
|
steps:
|
||||||
- name: Dependabot metadata
|
- name: Dependabot metadata
|
||||||
id: metadata
|
id: metadata
|
||||||
|
|||||||
5
.github/workflows/deploy-explorer.yaml
vendored
5
.github/workflows/deploy-explorer.yaml
vendored
@@ -12,7 +12,6 @@ concurrency:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-linux:
|
build-linux:
|
||||||
if: github.repository == 'mudler/LocalAI'
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
@@ -34,7 +33,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
CGO_ENABLED=0 make build
|
CGO_ENABLED=0 make build
|
||||||
- name: rm
|
- name: rm
|
||||||
uses: appleboy/ssh-action@v1.2.5
|
uses: appleboy/ssh-action@v1.2.4
|
||||||
with:
|
with:
|
||||||
host: ${{ secrets.EXPLORER_SSH_HOST }}
|
host: ${{ secrets.EXPLORER_SSH_HOST }}
|
||||||
username: ${{ secrets.EXPLORER_SSH_USERNAME }}
|
username: ${{ secrets.EXPLORER_SSH_USERNAME }}
|
||||||
@@ -54,7 +53,7 @@ jobs:
|
|||||||
rm: true
|
rm: true
|
||||||
target: ./local-ai
|
target: ./local-ai
|
||||||
- name: restarting
|
- name: restarting
|
||||||
uses: appleboy/ssh-action@v1.2.5
|
uses: appleboy/ssh-action@v1.2.4
|
||||||
with:
|
with:
|
||||||
host: ${{ secrets.EXPLORER_SSH_HOST }}
|
host: ${{ secrets.EXPLORER_SSH_HOST }}
|
||||||
username: ${{ secrets.EXPLORER_SSH_USERNAME }}
|
username: ${{ secrets.EXPLORER_SSH_USERNAME }}
|
||||||
|
|||||||
1
.github/workflows/gallery-agent.yaml
vendored
1
.github/workflows/gallery-agent.yaml
vendored
@@ -27,7 +27,6 @@ on:
|
|||||||
type: string
|
type: string
|
||||||
jobs:
|
jobs:
|
||||||
gallery-agent:
|
gallery-agent:
|
||||||
if: github.repository == 'mudler/LocalAI'
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
|
|||||||
1
.github/workflows/generate_grpc_cache.yaml
vendored
1
.github/workflows/generate_grpc_cache.yaml
vendored
@@ -13,7 +13,6 @@ concurrency:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
generate_caches:
|
generate_caches:
|
||||||
if: github.repository == 'mudler/LocalAI'
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
|
|||||||
1
.github/workflows/generate_intel_image.yaml
vendored
1
.github/workflows/generate_intel_image.yaml
vendored
@@ -12,7 +12,6 @@ concurrency:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
generate_caches:
|
generate_caches:
|
||||||
if: github.repository == 'mudler/LocalAI'
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
|
|||||||
3
.github/workflows/image.yml
vendored
3
.github/workflows/image.yml
vendored
@@ -14,7 +14,6 @@
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
hipblas-jobs:
|
hipblas-jobs:
|
||||||
if: github.repository == 'mudler/LocalAI'
|
|
||||||
uses: ./.github/workflows/image_build.yml
|
uses: ./.github/workflows/image_build.yml
|
||||||
with:
|
with:
|
||||||
tag-latest: ${{ matrix.tag-latest }}
|
tag-latest: ${{ matrix.tag-latest }}
|
||||||
@@ -51,7 +50,6 @@
|
|||||||
ubuntu-codename: 'noble'
|
ubuntu-codename: 'noble'
|
||||||
|
|
||||||
core-image-build:
|
core-image-build:
|
||||||
if: github.repository == 'mudler/LocalAI'
|
|
||||||
uses: ./.github/workflows/image_build.yml
|
uses: ./.github/workflows/image_build.yml
|
||||||
with:
|
with:
|
||||||
tag-latest: ${{ matrix.tag-latest }}
|
tag-latest: ${{ matrix.tag-latest }}
|
||||||
@@ -138,7 +136,6 @@
|
|||||||
ubuntu-codename: 'noble'
|
ubuntu-codename: 'noble'
|
||||||
|
|
||||||
gh-runner:
|
gh-runner:
|
||||||
if: github.repository == 'mudler/LocalAI'
|
|
||||||
uses: ./.github/workflows/image_build.yml
|
uses: ./.github/workflows/image_build.yml
|
||||||
with:
|
with:
|
||||||
tag-latest: ${{ matrix.tag-latest }}
|
tag-latest: ${{ matrix.tag-latest }}
|
||||||
|
|||||||
2
.github/workflows/localaibot_automerge.yml
vendored
2
.github/workflows/localaibot_automerge.yml
vendored
@@ -10,8 +10,8 @@ permissions:
|
|||||||
actions: write # to dispatch publish workflow
|
actions: write # to dispatch publish workflow
|
||||||
jobs:
|
jobs:
|
||||||
dependabot:
|
dependabot:
|
||||||
if: github.repository == 'mudler/LocalAI' && github.actor == 'localai-bot' && !contains(github.event.pull_request.title, 'chore(model gallery):')
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
if: ${{ github.actor == 'localai-bot' && !contains(github.event.pull_request.title, 'chore(model gallery):') }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v6
|
uses: actions/checkout@v6
|
||||||
|
|||||||
4
.github/workflows/notify-models.yaml
vendored
4
.github/workflows/notify-models.yaml
vendored
@@ -10,7 +10,7 @@ permissions:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
notify-discord:
|
notify-discord:
|
||||||
if: github.repository == 'mudler/LocalAI' && (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'area/ai-model'))
|
if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'area/ai-model')) }}
|
||||||
env:
|
env:
|
||||||
MODEL_NAME: gemma-3-12b-it-qat
|
MODEL_NAME: gemma-3-12b-it-qat
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -90,7 +90,7 @@ jobs:
|
|||||||
connect-timeout-seconds: 180
|
connect-timeout-seconds: 180
|
||||||
limit-access-to-actor: true
|
limit-access-to-actor: true
|
||||||
notify-twitter:
|
notify-twitter:
|
||||||
if: github.repository == 'mudler/LocalAI' && (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'area/ai-model'))
|
if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'area/ai-model')) }}
|
||||||
env:
|
env:
|
||||||
MODEL_NAME: gemma-3-12b-it-qat
|
MODEL_NAME: gemma-3-12b-it-qat
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|||||||
1
.github/workflows/notify-releases.yaml
vendored
1
.github/workflows/notify-releases.yaml
vendored
@@ -6,7 +6,6 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
notify-discord:
|
notify-discord:
|
||||||
if: github.repository == 'mudler/LocalAI'
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
env:
|
env:
|
||||||
RELEASE_BODY: ${{ github.event.release.body }}
|
RELEASE_BODY: ${{ github.event.release.body }}
|
||||||
|
|||||||
1
.github/workflows/stalebot.yml
vendored
1
.github/workflows/stalebot.yml
vendored
@@ -8,7 +8,6 @@ on:
|
|||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
stale:
|
stale:
|
||||||
if: github.repository == 'mudler/LocalAI'
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v9
|
- uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v9
|
||||||
|
|||||||
50
.github/workflows/test-extra.yml
vendored
50
.github/workflows/test-extra.yml
vendored
@@ -323,25 +323,6 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
make --jobs=5 --output-sync=target -C backend/python/qwen-asr
|
make --jobs=5 --output-sync=target -C backend/python/qwen-asr
|
||||||
make --jobs=5 --output-sync=target -C backend/python/qwen-asr test
|
make --jobs=5 --output-sync=target -C backend/python/qwen-asr test
|
||||||
tests-nemo:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Clone
|
|
||||||
uses: actions/checkout@v6
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
- name: Dependencies
|
|
||||||
run: |
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y build-essential ffmpeg sox
|
|
||||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
|
||||||
# Install UV
|
|
||||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
|
||||||
pip install --user --no-cache-dir grpcio-tools==1.64.1
|
|
||||||
- name: Test nemo
|
|
||||||
run: |
|
|
||||||
make --jobs=5 --output-sync=target -C backend/python/nemo
|
|
||||||
make --jobs=5 --output-sync=target -C backend/python/nemo test
|
|
||||||
tests-voxcpm:
|
tests-voxcpm:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
@@ -361,34 +342,3 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
make --jobs=5 --output-sync=target -C backend/python/voxcpm
|
make --jobs=5 --output-sync=target -C backend/python/voxcpm
|
||||||
make --jobs=5 --output-sync=target -C backend/python/voxcpm test
|
make --jobs=5 --output-sync=target -C backend/python/voxcpm test
|
||||||
tests-voxtral:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Clone
|
|
||||||
uses: actions/checkout@v6
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
- name: Dependencies
|
|
||||||
run: |
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y build-essential cmake curl libopenblas-dev ffmpeg
|
|
||||||
- name: Setup Go
|
|
||||||
uses: actions/setup-go@v5
|
|
||||||
# You can test your matrix by printing the current Go version
|
|
||||||
- name: Display Go version
|
|
||||||
run: go version
|
|
||||||
- name: Proto Dependencies
|
|
||||||
run: |
|
|
||||||
# Install protoc
|
|
||||||
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v26.1/protoc-26.1-linux-x86_64.zip -o protoc.zip && \
|
|
||||||
unzip -j -d /usr/local/bin protoc.zip bin/protoc && \
|
|
||||||
rm protoc.zip
|
|
||||||
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2
|
|
||||||
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
|
|
||||||
PATH="$PATH:$HOME/go/bin" make protogen-go
|
|
||||||
- name: Build voxtral
|
|
||||||
run: |
|
|
||||||
make --jobs=5 --output-sync=target -C backend/go/voxtral
|
|
||||||
- name: Test voxtral
|
|
||||||
run: |
|
|
||||||
make --jobs=5 --output-sync=target -C backend/go/voxtral test
|
|
||||||
|
|||||||
1
.github/workflows/update_swagger.yaml
vendored
1
.github/workflows/update_swagger.yaml
vendored
@@ -5,7 +5,6 @@ on:
|
|||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
jobs:
|
jobs:
|
||||||
swagger:
|
swagger:
|
||||||
if: github.repository == 'mudler/LocalAI'
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|||||||
18
Makefile
18
Makefile
@@ -1,5 +1,5 @@
|
|||||||
# Disable parallel execution for backend builds
|
# Disable parallel execution for backend builds
|
||||||
.NOTPARALLEL: backends/diffusers backends/llama-cpp backends/outetts backends/piper backends/stablediffusion-ggml backends/whisper backends/faster-whisper backends/silero-vad backends/local-store backends/huggingface backends/rfdetr backends/kitten-tts backends/kokoro backends/chatterbox backends/llama-cpp-darwin backends/neutts build-darwin-python-backend build-darwin-go-backend backends/mlx backends/diffuser-darwin backends/mlx-vlm backends/mlx-audio backends/stablediffusion-ggml-darwin backends/vllm backends/vllm-omni backends/moonshine backends/pocket-tts backends/qwen-tts backends/qwen-asr backends/nemo backends/voxcpm backends/whisperx backends/ace-step backends/voxtral
|
.NOTPARALLEL: backends/diffusers backends/llama-cpp backends/piper backends/stablediffusion-ggml backends/whisper backends/faster-whisper backends/silero-vad backends/local-store backends/huggingface backends/rfdetr backends/kitten-tts backends/kokoro backends/chatterbox backends/llama-cpp-darwin backends/neutts build-darwin-python-backend build-darwin-go-backend backends/mlx backends/diffuser-darwin backends/mlx-vlm backends/mlx-audio backends/stablediffusion-ggml-darwin backends/vllm backends/vllm-omni backends/moonshine backends/pocket-tts backends/qwen-tts backends/qwen-asr backends/voxcpm backends/whisperx
|
||||||
|
|
||||||
GOCMD=go
|
GOCMD=go
|
||||||
GOTEST=$(GOCMD) test
|
GOTEST=$(GOCMD) test
|
||||||
@@ -308,7 +308,6 @@ protogen-go-clean:
|
|||||||
|
|
||||||
prepare-test-extra: protogen-python
|
prepare-test-extra: protogen-python
|
||||||
$(MAKE) -C backend/python/transformers
|
$(MAKE) -C backend/python/transformers
|
||||||
$(MAKE) -C backend/python/outetts
|
|
||||||
$(MAKE) -C backend/python/diffusers
|
$(MAKE) -C backend/python/diffusers
|
||||||
$(MAKE) -C backend/python/chatterbox
|
$(MAKE) -C backend/python/chatterbox
|
||||||
$(MAKE) -C backend/python/vllm
|
$(MAKE) -C backend/python/vllm
|
||||||
@@ -318,14 +317,11 @@ prepare-test-extra: protogen-python
|
|||||||
$(MAKE) -C backend/python/pocket-tts
|
$(MAKE) -C backend/python/pocket-tts
|
||||||
$(MAKE) -C backend/python/qwen-tts
|
$(MAKE) -C backend/python/qwen-tts
|
||||||
$(MAKE) -C backend/python/qwen-asr
|
$(MAKE) -C backend/python/qwen-asr
|
||||||
$(MAKE) -C backend/python/nemo
|
|
||||||
$(MAKE) -C backend/python/voxcpm
|
$(MAKE) -C backend/python/voxcpm
|
||||||
$(MAKE) -C backend/python/whisperx
|
$(MAKE) -C backend/python/whisperx
|
||||||
$(MAKE) -C backend/python/ace-step
|
|
||||||
|
|
||||||
test-extra: prepare-test-extra
|
test-extra: prepare-test-extra
|
||||||
$(MAKE) -C backend/python/transformers test
|
$(MAKE) -C backend/python/transformers test
|
||||||
$(MAKE) -C backend/python/outetts test
|
|
||||||
$(MAKE) -C backend/python/diffusers test
|
$(MAKE) -C backend/python/diffusers test
|
||||||
$(MAKE) -C backend/python/chatterbox test
|
$(MAKE) -C backend/python/chatterbox test
|
||||||
$(MAKE) -C backend/python/vllm test
|
$(MAKE) -C backend/python/vllm test
|
||||||
@@ -335,10 +331,8 @@ test-extra: prepare-test-extra
|
|||||||
$(MAKE) -C backend/python/pocket-tts test
|
$(MAKE) -C backend/python/pocket-tts test
|
||||||
$(MAKE) -C backend/python/qwen-tts test
|
$(MAKE) -C backend/python/qwen-tts test
|
||||||
$(MAKE) -C backend/python/qwen-asr test
|
$(MAKE) -C backend/python/qwen-asr test
|
||||||
$(MAKE) -C backend/python/nemo test
|
|
||||||
$(MAKE) -C backend/python/voxcpm test
|
$(MAKE) -C backend/python/voxcpm test
|
||||||
$(MAKE) -C backend/python/whisperx test
|
$(MAKE) -C backend/python/whisperx test
|
||||||
$(MAKE) -C backend/python/ace-step test
|
|
||||||
|
|
||||||
DOCKER_IMAGE?=local-ai
|
DOCKER_IMAGE?=local-ai
|
||||||
DOCKER_AIO_IMAGE?=local-ai-aio
|
DOCKER_AIO_IMAGE?=local-ai-aio
|
||||||
@@ -453,12 +447,10 @@ BACKEND_HUGGINGFACE = huggingface|golang|.|false|true
|
|||||||
BACKEND_SILERO_VAD = silero-vad|golang|.|false|true
|
BACKEND_SILERO_VAD = silero-vad|golang|.|false|true
|
||||||
BACKEND_STABLEDIFFUSION_GGML = stablediffusion-ggml|golang|.|--progress=plain|true
|
BACKEND_STABLEDIFFUSION_GGML = stablediffusion-ggml|golang|.|--progress=plain|true
|
||||||
BACKEND_WHISPER = whisper|golang|.|false|true
|
BACKEND_WHISPER = whisper|golang|.|false|true
|
||||||
BACKEND_VOXTRAL = voxtral|golang|.|false|true
|
|
||||||
|
|
||||||
# Python backends with root context
|
# Python backends with root context
|
||||||
BACKEND_RERANKERS = rerankers|python|.|false|true
|
BACKEND_RERANKERS = rerankers|python|.|false|true
|
||||||
BACKEND_TRANSFORMERS = transformers|python|.|false|true
|
BACKEND_TRANSFORMERS = transformers|python|.|false|true
|
||||||
BACKEND_OUTETTS = outetts|python|.|false|true
|
|
||||||
BACKEND_FASTER_WHISPER = faster-whisper|python|.|false|true
|
BACKEND_FASTER_WHISPER = faster-whisper|python|.|false|true
|
||||||
BACKEND_COQUI = coqui|python|.|false|true
|
BACKEND_COQUI = coqui|python|.|false|true
|
||||||
BACKEND_RFDETR = rfdetr|python|.|false|true
|
BACKEND_RFDETR = rfdetr|python|.|false|true
|
||||||
@@ -474,10 +466,8 @@ BACKEND_MOONSHINE = moonshine|python|.|false|true
|
|||||||
BACKEND_POCKET_TTS = pocket-tts|python|.|false|true
|
BACKEND_POCKET_TTS = pocket-tts|python|.|false|true
|
||||||
BACKEND_QWEN_TTS = qwen-tts|python|.|false|true
|
BACKEND_QWEN_TTS = qwen-tts|python|.|false|true
|
||||||
BACKEND_QWEN_ASR = qwen-asr|python|.|false|true
|
BACKEND_QWEN_ASR = qwen-asr|python|.|false|true
|
||||||
BACKEND_NEMO = nemo|python|.|false|true
|
|
||||||
BACKEND_VOXCPM = voxcpm|python|.|false|true
|
BACKEND_VOXCPM = voxcpm|python|.|false|true
|
||||||
BACKEND_WHISPERX = whisperx|python|.|false|true
|
BACKEND_WHISPERX = whisperx|python|.|false|true
|
||||||
BACKEND_ACE_STEP = ace-step|python|.|false|true
|
|
||||||
|
|
||||||
# Helper function to build docker image for a backend
|
# Helper function to build docker image for a backend
|
||||||
# Usage: $(call docker-build-backend,BACKEND_NAME,DOCKERFILE_TYPE,BUILD_CONTEXT,PROGRESS_FLAG,NEEDS_BACKEND_ARG)
|
# Usage: $(call docker-build-backend,BACKEND_NAME,DOCKERFILE_TYPE,BUILD_CONTEXT,PROGRESS_FLAG,NEEDS_BACKEND_ARG)
|
||||||
@@ -507,10 +497,8 @@ $(eval $(call generate-docker-build-target,$(BACKEND_HUGGINGFACE)))
|
|||||||
$(eval $(call generate-docker-build-target,$(BACKEND_SILERO_VAD)))
|
$(eval $(call generate-docker-build-target,$(BACKEND_SILERO_VAD)))
|
||||||
$(eval $(call generate-docker-build-target,$(BACKEND_STABLEDIFFUSION_GGML)))
|
$(eval $(call generate-docker-build-target,$(BACKEND_STABLEDIFFUSION_GGML)))
|
||||||
$(eval $(call generate-docker-build-target,$(BACKEND_WHISPER)))
|
$(eval $(call generate-docker-build-target,$(BACKEND_WHISPER)))
|
||||||
$(eval $(call generate-docker-build-target,$(BACKEND_VOXTRAL)))
|
|
||||||
$(eval $(call generate-docker-build-target,$(BACKEND_RERANKERS)))
|
$(eval $(call generate-docker-build-target,$(BACKEND_RERANKERS)))
|
||||||
$(eval $(call generate-docker-build-target,$(BACKEND_TRANSFORMERS)))
|
$(eval $(call generate-docker-build-target,$(BACKEND_TRANSFORMERS)))
|
||||||
$(eval $(call generate-docker-build-target,$(BACKEND_OUTETTS)))
|
|
||||||
$(eval $(call generate-docker-build-target,$(BACKEND_FASTER_WHISPER)))
|
$(eval $(call generate-docker-build-target,$(BACKEND_FASTER_WHISPER)))
|
||||||
$(eval $(call generate-docker-build-target,$(BACKEND_COQUI)))
|
$(eval $(call generate-docker-build-target,$(BACKEND_COQUI)))
|
||||||
$(eval $(call generate-docker-build-target,$(BACKEND_RFDETR)))
|
$(eval $(call generate-docker-build-target,$(BACKEND_RFDETR)))
|
||||||
@@ -526,16 +514,14 @@ $(eval $(call generate-docker-build-target,$(BACKEND_MOONSHINE)))
|
|||||||
$(eval $(call generate-docker-build-target,$(BACKEND_POCKET_TTS)))
|
$(eval $(call generate-docker-build-target,$(BACKEND_POCKET_TTS)))
|
||||||
$(eval $(call generate-docker-build-target,$(BACKEND_QWEN_TTS)))
|
$(eval $(call generate-docker-build-target,$(BACKEND_QWEN_TTS)))
|
||||||
$(eval $(call generate-docker-build-target,$(BACKEND_QWEN_ASR)))
|
$(eval $(call generate-docker-build-target,$(BACKEND_QWEN_ASR)))
|
||||||
$(eval $(call generate-docker-build-target,$(BACKEND_NEMO)))
|
|
||||||
$(eval $(call generate-docker-build-target,$(BACKEND_VOXCPM)))
|
$(eval $(call generate-docker-build-target,$(BACKEND_VOXCPM)))
|
||||||
$(eval $(call generate-docker-build-target,$(BACKEND_WHISPERX)))
|
$(eval $(call generate-docker-build-target,$(BACKEND_WHISPERX)))
|
||||||
$(eval $(call generate-docker-build-target,$(BACKEND_ACE_STEP)))
|
|
||||||
|
|
||||||
# Pattern rule for docker-save targets
|
# Pattern rule for docker-save targets
|
||||||
docker-save-%: backend-images
|
docker-save-%: backend-images
|
||||||
docker save local-ai-backend:$* -o backend-images/$*.tar
|
docker save local-ai-backend:$* -o backend-images/$*.tar
|
||||||
|
|
||||||
docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-vllm-omni docker-build-transformers docker-build-outetts docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-chatterbox docker-build-vibevoice docker-build-moonshine docker-build-pocket-tts docker-build-qwen-tts docker-build-qwen-asr docker-build-nemo docker-build-voxcpm docker-build-whisperx docker-build-ace-step docker-build-voxtral
|
docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-vllm-omni docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-chatterbox docker-build-vibevoice docker-build-moonshine docker-build-pocket-tts docker-build-qwen-tts docker-build-qwen-asr docker-build-voxcpm docker-build-whisperx
|
||||||
|
|
||||||
########################################################
|
########################################################
|
||||||
### Mock Backend for E2E Tests
|
### Mock Backend for E2E Tests
|
||||||
|
|||||||
13
README.md
13
README.md
@@ -203,8 +203,7 @@ local-ai run oci://localai/phi-2:latest
|
|||||||
For more information, see [💻 Getting started](https://localai.io/basics/getting_started/index.html), if you are interested in our roadmap items and future enhancements, you can see the [Issues labeled as Roadmap here](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap)
|
For more information, see [💻 Getting started](https://localai.io/basics/getting_started/index.html), if you are interested in our roadmap items and future enhancements, you can see the [Issues labeled as Roadmap here](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap)
|
||||||
|
|
||||||
## 📰 Latest project news
|
## 📰 Latest project news
|
||||||
- February 2026: [Realtime API for audio-to-audio with tool calling](https://github.com/mudler/LocalAI/pull/6245), [ACE-Step 1.5 support](https://github.com/mudler/LocalAI/pull/8396)
|
|
||||||
- January 2026: **LocalAI 3.10.0** - Major release with Anthropic API support, Open Responses API for stateful agents, video & image generation suite (LTX-2), unified GPU backends, tool streaming & XML parsing, system-aware backend gallery, crash fixes for AVX-only CPUs and AMD VRAM reporting, request tracing, and new backends: **Moonshine** (ultra-fast transcription), **Pocket-TTS** (lightweight TTS). Vulkan arm64 builds now available. [Release notes](https://github.com/mudler/LocalAI/releases/tag/v3.10.0).
|
|
||||||
- December 2025: [Dynamic Memory Resource reclaimer](https://github.com/mudler/LocalAI/pull/7583), [Automatic fitting of models to multiple GPUS(llama.cpp)](https://github.com/mudler/LocalAI/pull/7584), [Added Vibevoice backend](https://github.com/mudler/LocalAI/pull/7494)
|
- December 2025: [Dynamic Memory Resource reclaimer](https://github.com/mudler/LocalAI/pull/7583), [Automatic fitting of models to multiple GPUS(llama.cpp)](https://github.com/mudler/LocalAI/pull/7584), [Added Vibevoice backend](https://github.com/mudler/LocalAI/pull/7494)
|
||||||
- November 2025: Major improvements to the UX. Among these: [Import models via URL](https://github.com/mudler/LocalAI/pull/7245) and [Multiple chats and history](https://github.com/mudler/LocalAI/pull/7325)
|
- November 2025: Major improvements to the UX. Among these: [Import models via URL](https://github.com/mudler/LocalAI/pull/7245) and [Multiple chats and history](https://github.com/mudler/LocalAI/pull/7325)
|
||||||
- October 2025: 🔌 [Model Context Protocol (MCP)](https://localai.io/docs/features/mcp/) support added for agentic capabilities with external tools
|
- October 2025: 🔌 [Model Context Protocol (MCP)](https://localai.io/docs/features/mcp/) support added for agentic capabilities with external tools
|
||||||
@@ -270,7 +269,6 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
|
|||||||
|---------|-------------|---------------------|
|
|---------|-------------|---------------------|
|
||||||
| **whisper.cpp** | OpenAI Whisper in C/C++ | CUDA 12/13, ROCm, Intel SYCL, Vulkan, CPU |
|
| **whisper.cpp** | OpenAI Whisper in C/C++ | CUDA 12/13, ROCm, Intel SYCL, Vulkan, CPU |
|
||||||
| **faster-whisper** | Fast Whisper with CTranslate2 | CUDA 12/13, ROCm, Intel, CPU |
|
| **faster-whisper** | Fast Whisper with CTranslate2 | CUDA 12/13, ROCm, Intel, CPU |
|
||||||
| **moonshine** | Ultra-fast transcription engine for low-end devices | CUDA 12/13, Metal, CPU |
|
|
||||||
| **coqui** | Advanced TTS with 1100+ languages | CUDA 12/13, ROCm, Intel, CPU |
|
| **coqui** | Advanced TTS with 1100+ languages | CUDA 12/13, ROCm, Intel, CPU |
|
||||||
| **kokoro** | Lightweight TTS model | CUDA 12/13, ROCm, Intel, CPU |
|
| **kokoro** | Lightweight TTS model | CUDA 12/13, ROCm, Intel, CPU |
|
||||||
| **chatterbox** | Production-grade TTS | CUDA 12/13, CPU |
|
| **chatterbox** | Production-grade TTS | CUDA 12/13, CPU |
|
||||||
@@ -281,7 +279,6 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
|
|||||||
| **vibevoice** | Real-time TTS with voice cloning | CUDA 12/13, ROCm, Intel, CPU |
|
| **vibevoice** | Real-time TTS with voice cloning | CUDA 12/13, ROCm, Intel, CPU |
|
||||||
| **pocket-tts** | Lightweight CPU-based TTS | CUDA 12/13, ROCm, Intel, CPU |
|
| **pocket-tts** | Lightweight CPU-based TTS | CUDA 12/13, ROCm, Intel, CPU |
|
||||||
| **qwen-tts** | High-quality TTS with custom voice, voice design, and voice cloning | CUDA 12/13, ROCm, Intel, CPU |
|
| **qwen-tts** | High-quality TTS with custom voice, voice design, and voice cloning | CUDA 12/13, ROCm, Intel, CPU |
|
||||||
| **ace-step** | Music generation from text descriptions, lyrics, or audio samples | CUDA 12/13, ROCm, Intel, Metal, CPU |
|
|
||||||
|
|
||||||
### Image & Video Generation
|
### Image & Video Generation
|
||||||
| Backend | Description | Acceleration Support |
|
| Backend | Description | Acceleration Support |
|
||||||
@@ -303,11 +300,11 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
|
|||||||
|-------------------|-------------------|------------------|
|
|-------------------|-------------------|------------------|
|
||||||
| **NVIDIA CUDA 12** | All CUDA-compatible backends | Nvidia hardware |
|
| **NVIDIA CUDA 12** | All CUDA-compatible backends | Nvidia hardware |
|
||||||
| **NVIDIA CUDA 13** | All CUDA-compatible backends | Nvidia hardware |
|
| **NVIDIA CUDA 13** | All CUDA-compatible backends | Nvidia hardware |
|
||||||
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, neutts, vibevoice, pocket-tts, qwen-tts, ace-step | AMD Graphics |
|
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, neutts, vibevoice, pocket-tts, qwen-tts | AMD Graphics |
|
||||||
| **Intel oneAPI** | llama.cpp, whisper, stablediffusion, vllm, transformers, diffusers, rfdetr, rerankers, coqui, kokoro, vibevoice, pocket-tts, qwen-tts, ace-step | Intel Arc, Intel iGPUs |
|
| **Intel oneAPI** | llama.cpp, whisper, stablediffusion, vllm, transformers, diffusers, rfdetr, rerankers, coqui, kokoro, vibevoice, pocket-tts, qwen-tts | Intel Arc, Intel iGPUs |
|
||||||
| **Apple Metal** | llama.cpp, whisper, diffusers, MLX, MLX-VLM, moonshine, ace-step | Apple M1/M2/M3+ |
|
| **Apple Metal** | llama.cpp, whisper, diffusers, MLX, MLX-VLM | Apple M1/M2/M3+ |
|
||||||
| **Vulkan** | llama.cpp, whisper, stablediffusion | Cross-platform GPUs |
|
| **Vulkan** | llama.cpp, whisper, stablediffusion | Cross-platform GPUs |
|
||||||
| **NVIDIA Jetson (CUDA 12)** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr, ace-step | ARM64 embedded AI (AGX Orin, etc.) |
|
| **NVIDIA Jetson (CUDA 12)** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr | ARM64 embedded AI (AGX Orin, etc.) |
|
||||||
| **NVIDIA Jetson (CUDA 13)** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr | ARM64 embedded AI (DGX Spark) |
|
| **NVIDIA Jetson (CUDA 13)** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr | ARM64 embedded AI (DGX Spark) |
|
||||||
| **CPU Optimized** | All backends | AVX/AVX2/AVX512, quantization support |
|
| **CPU Optimized** | All backends | AVX/AVX2/AVX512, quantization support |
|
||||||
|
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ RUN apt-get update && \
|
|||||||
build-essential \
|
build-essential \
|
||||||
git ccache \
|
git ccache \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
make cmake wget libopenblas-dev \
|
make cmake wget \
|
||||||
curl unzip \
|
curl unzip \
|
||||||
libssl-dev && \
|
libssl-dev && \
|
||||||
apt-get clean && \
|
apt-get clean && \
|
||||||
|
|||||||
@@ -365,14 +365,6 @@ message SoundGenerationRequest {
|
|||||||
optional bool sample = 6;
|
optional bool sample = 6;
|
||||||
optional string src = 7;
|
optional string src = 7;
|
||||||
optional int32 src_divisor = 8;
|
optional int32 src_divisor = 8;
|
||||||
optional bool think = 9;
|
|
||||||
optional string caption = 10;
|
|
||||||
optional string lyrics = 11;
|
|
||||||
optional int32 bpm = 12;
|
|
||||||
optional string keyscale = 13;
|
|
||||||
optional string language = 14;
|
|
||||||
optional string timesignature = 15;
|
|
||||||
optional bool instrumental = 17;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
message TokenizationResponse {
|
message TokenizationResponse {
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
|
|
||||||
LLAMA_VERSION?=8872ad2125336d209a9911a82101f80095a9831d
|
LLAMA_VERSION?=2634ed207a17db1a54bd8df0555bd8499a6ab691
|
||||||
LLAMA_REPO?=https://github.com/ggerganov/llama.cpp
|
LLAMA_REPO?=https://github.com/ggerganov/llama.cpp
|
||||||
|
|
||||||
CMAKE_ARGS?=
|
CMAKE_ARGS?=
|
||||||
|
|||||||
@@ -7,6 +7,3 @@ package:
|
|||||||
bash package.sh
|
bash package.sh
|
||||||
|
|
||||||
build: huggingface package
|
build: huggingface package
|
||||||
|
|
||||||
clean:
|
|
||||||
rm -f huggingface
|
|
||||||
@@ -8,5 +8,5 @@ set -e
|
|||||||
CURDIR=$(dirname "$(realpath $0)")
|
CURDIR=$(dirname "$(realpath $0)")
|
||||||
|
|
||||||
mkdir -p $CURDIR/package
|
mkdir -p $CURDIR/package
|
||||||
cp -avf $CURDIR/huggingface $CURDIR/package/
|
cp -avrf $CURDIR/huggingface $CURDIR/package/
|
||||||
cp -rfv $CURDIR/run.sh $CURDIR/package/
|
cp -rfv $CURDIR/run.sh $CURDIR/package/
|
||||||
@@ -7,6 +7,3 @@ package:
|
|||||||
bash package.sh
|
bash package.sh
|
||||||
|
|
||||||
build: local-store package
|
build: local-store package
|
||||||
|
|
||||||
clean:
|
|
||||||
rm -f local-store
|
|
||||||
@@ -8,5 +8,5 @@ set -e
|
|||||||
CURDIR=$(dirname "$(realpath $0)")
|
CURDIR=$(dirname "$(realpath $0)")
|
||||||
|
|
||||||
mkdir -p $CURDIR/package
|
mkdir -p $CURDIR/package
|
||||||
cp -avf $CURDIR/local-store $CURDIR/package/
|
cp -avrf $CURDIR/local-store $CURDIR/package/
|
||||||
cp -rfv $CURDIR/run.sh $CURDIR/package/
|
cp -rfv $CURDIR/run.sh $CURDIR/package/
|
||||||
@@ -35,6 +35,3 @@ package:
|
|||||||
bash package.sh
|
bash package.sh
|
||||||
|
|
||||||
build: piper package
|
build: piper package
|
||||||
|
|
||||||
clean:
|
|
||||||
rm -f piper
|
|
||||||
@@ -10,8 +10,8 @@ CURDIR=$(dirname "$(realpath $0)")
|
|||||||
# Create lib directory
|
# Create lib directory
|
||||||
mkdir -p $CURDIR/package/lib
|
mkdir -p $CURDIR/package/lib
|
||||||
|
|
||||||
cp -avf $CURDIR/piper $CURDIR/package/
|
cp -avrf $CURDIR/piper $CURDIR/package/
|
||||||
cp -avf $CURDIR/espeak-ng-data $CURDIR/package/
|
cp -avrf $CURDIR/espeak-ng-data $CURDIR/package/
|
||||||
cp -rfv $CURDIR/run.sh $CURDIR/package/
|
cp -rfv $CURDIR/run.sh $CURDIR/package/
|
||||||
cp -rfLv $CURDIR/sources/go-piper/piper-phonemize/pi/lib/* $CURDIR/package/lib/
|
cp -rfLv $CURDIR/sources/go-piper/piper-phonemize/pi/lib/* $CURDIR/package/lib/
|
||||||
|
|
||||||
|
|||||||
@@ -45,6 +45,3 @@ package:
|
|||||||
bash package.sh
|
bash package.sh
|
||||||
|
|
||||||
build: silero-vad package
|
build: silero-vad package
|
||||||
|
|
||||||
clean:
|
|
||||||
rm -f silero-vad
|
|
||||||
@@ -10,8 +10,8 @@ CURDIR=$(dirname "$(realpath $0)")
|
|||||||
# Create lib directory
|
# Create lib directory
|
||||||
mkdir -p $CURDIR/package/lib
|
mkdir -p $CURDIR/package/lib
|
||||||
|
|
||||||
cp -avf $CURDIR/silero-vad $CURDIR/package/
|
cp -avrf $CURDIR/silero-vad $CURDIR/package/
|
||||||
cp -avf $CURDIR/run.sh $CURDIR/package/
|
cp -avrf $CURDIR/run.sh $CURDIR/package/
|
||||||
cp -rfLv $CURDIR/backend-assets/lib/* $CURDIR/package/lib/
|
cp -rfLv $CURDIR/backend-assets/lib/* $CURDIR/package/lib/
|
||||||
|
|
||||||
# Detect architecture and copy appropriate libraries
|
# Detect architecture and copy appropriate libraries
|
||||||
|
|||||||
9
backend/go/voxtral/.gitignore
vendored
9
backend/go/voxtral/.gitignore
vendored
@@ -1,9 +0,0 @@
|
|||||||
.cache/
|
|
||||||
sources/
|
|
||||||
build/
|
|
||||||
build-*/
|
|
||||||
package/
|
|
||||||
voxtral
|
|
||||||
*.so
|
|
||||||
*.dylib
|
|
||||||
compile_commands.json
|
|
||||||
@@ -1,84 +0,0 @@
|
|||||||
cmake_minimum_required(VERSION 3.12)
|
|
||||||
|
|
||||||
if(USE_METAL)
|
|
||||||
project(govoxtral LANGUAGES C OBJC)
|
|
||||||
else()
|
|
||||||
project(govoxtral LANGUAGES C)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
|
||||||
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
|
||||||
|
|
||||||
# Workaround: CMake + GCC linker depfile generation fails for MODULE libraries
|
|
||||||
set(CMAKE_C_LINKER_DEPFILE_SUPPORTED FALSE)
|
|
||||||
|
|
||||||
# Build voxtral.c as a library
|
|
||||||
set(VOXTRAL_SOURCES
|
|
||||||
sources/voxtral.c/voxtral.c
|
|
||||||
sources/voxtral.c/voxtral_kernels.c
|
|
||||||
sources/voxtral.c/voxtral_audio.c
|
|
||||||
sources/voxtral.c/voxtral_encoder.c
|
|
||||||
sources/voxtral.c/voxtral_decoder.c
|
|
||||||
sources/voxtral.c/voxtral_tokenizer.c
|
|
||||||
sources/voxtral.c/voxtral_safetensors.c
|
|
||||||
)
|
|
||||||
|
|
||||||
# Metal GPU acceleration (macOS arm64 only)
|
|
||||||
if(USE_METAL)
|
|
||||||
# Generate embedded shader header from .metal source via xxd
|
|
||||||
add_custom_command(
|
|
||||||
OUTPUT ${CMAKE_CURRENT_SOURCE_DIR}/sources/voxtral.c/voxtral_shaders_source.h
|
|
||||||
COMMAND xxd -i voxtral_shaders.metal > voxtral_shaders_source.h
|
|
||||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/sources/voxtral.c
|
|
||||||
DEPENDS sources/voxtral.c/voxtral_shaders.metal
|
|
||||||
COMMENT "Generating embedded Metal shaders header"
|
|
||||||
)
|
|
||||||
list(APPEND VOXTRAL_SOURCES sources/voxtral.c/voxtral_metal.m)
|
|
||||||
set_source_files_properties(sources/voxtral.c/voxtral_metal.m PROPERTIES
|
|
||||||
COMPILE_FLAGS "-fobjc-arc"
|
|
||||||
)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
add_library(govoxtral MODULE csrc/govoxtral.c ${VOXTRAL_SOURCES})
|
|
||||||
|
|
||||||
target_include_directories(govoxtral PRIVATE sources/voxtral.c csrc)
|
|
||||||
|
|
||||||
target_compile_options(govoxtral PRIVATE -O3 -ffast-math)
|
|
||||||
|
|
||||||
if(USE_METAL)
|
|
||||||
target_compile_definitions(govoxtral PRIVATE USE_BLAS USE_METAL ACCELERATE_NEW_LAPACK)
|
|
||||||
target_link_libraries(govoxtral PRIVATE
|
|
||||||
"-framework Accelerate"
|
|
||||||
"-framework Metal"
|
|
||||||
"-framework MetalPerformanceShaders"
|
|
||||||
"-framework MetalPerformanceShadersGraph"
|
|
||||||
"-framework Foundation"
|
|
||||||
"-framework AudioToolbox"
|
|
||||||
"-framework CoreFoundation"
|
|
||||||
m
|
|
||||||
)
|
|
||||||
# Ensure the generated shader header is built before compiling
|
|
||||||
target_sources(govoxtral PRIVATE
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/sources/voxtral.c/voxtral_shaders_source.h
|
|
||||||
)
|
|
||||||
elseif(USE_OPENBLAS)
|
|
||||||
# Try to find OpenBLAS; use it if available, otherwise fall back to pure C
|
|
||||||
find_package(BLAS)
|
|
||||||
if(BLAS_FOUND)
|
|
||||||
target_compile_definitions(govoxtral PRIVATE USE_BLAS USE_OPENBLAS)
|
|
||||||
target_link_libraries(govoxtral PRIVATE ${BLAS_LIBRARIES} m)
|
|
||||||
target_include_directories(govoxtral PRIVATE /usr/include/openblas)
|
|
||||||
else()
|
|
||||||
message(WARNING "OpenBLAS requested but not found, building without BLAS")
|
|
||||||
target_link_libraries(govoxtral PRIVATE m)
|
|
||||||
endif()
|
|
||||||
elseif(APPLE)
|
|
||||||
# macOS without Metal: use Accelerate framework
|
|
||||||
target_compile_definitions(govoxtral PRIVATE USE_BLAS ACCELERATE_NEW_LAPACK)
|
|
||||||
target_link_libraries(govoxtral PRIVATE "-framework Accelerate" m)
|
|
||||||
else()
|
|
||||||
target_link_libraries(govoxtral PRIVATE m)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set_property(TARGET govoxtral PROPERTY C_STANDARD 11)
|
|
||||||
set_target_properties(govoxtral PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
|
|
||||||
@@ -1,124 +0,0 @@
|
|||||||
.NOTPARALLEL:
|
|
||||||
|
|
||||||
CMAKE_ARGS?=
|
|
||||||
BUILD_TYPE?=
|
|
||||||
NATIVE?=true
|
|
||||||
|
|
||||||
GOCMD?=go
|
|
||||||
GO_TAGS?=
|
|
||||||
JOBS?=$(shell nproc --ignore=1 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 4)
|
|
||||||
|
|
||||||
# voxtral.c version
|
|
||||||
VOXTRAL_REPO?=https://github.com/antirez/voxtral.c
|
|
||||||
VOXTRAL_VERSION?=8f810dd23c44be5453cb46c92216a3eaab46e85f
|
|
||||||
|
|
||||||
# Detect OS
|
|
||||||
UNAME_S := $(shell uname -s)
|
|
||||||
|
|
||||||
# Shared library extension
|
|
||||||
ifeq ($(UNAME_S),Darwin)
|
|
||||||
SO_EXT=dylib
|
|
||||||
else
|
|
||||||
SO_EXT=so
|
|
||||||
endif
|
|
||||||
|
|
||||||
SO_TARGET?=libgovoxtral.$(SO_EXT)
|
|
||||||
|
|
||||||
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF
|
|
||||||
|
|
||||||
ifeq ($(NATIVE),false)
|
|
||||||
ifneq ($(UNAME_S),Darwin)
|
|
||||||
CMAKE_ARGS+=-DCMAKE_C_FLAGS="-march=x86-64"
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifeq ($(BUILD_TYPE),cublas)
|
|
||||||
CMAKE_ARGS+=-DUSE_OPENBLAS=OFF
|
|
||||||
else ifeq ($(BUILD_TYPE),hipblas)
|
|
||||||
CMAKE_ARGS+=-DUSE_OPENBLAS=OFF
|
|
||||||
else ifeq ($(BUILD_TYPE),metal)
|
|
||||||
CMAKE_ARGS+=-DUSE_OPENBLAS=OFF -DUSE_METAL=ON
|
|
||||||
else ifeq ($(UNAME_S),Darwin)
|
|
||||||
# Default on macOS: use Accelerate (no OpenBLAS needed)
|
|
||||||
CMAKE_ARGS+=-DUSE_OPENBLAS=OFF
|
|
||||||
else
|
|
||||||
CMAKE_ARGS+=-DUSE_OPENBLAS=ON
|
|
||||||
endif
|
|
||||||
|
|
||||||
# Only build CPU variants on Linux; on Darwin build a single dylib
|
|
||||||
ifeq ($(UNAME_S),Linux)
|
|
||||||
VARIANT_TARGETS = libgovoxtral-avx.so libgovoxtral-avx2.so libgovoxtral-fallback.so
|
|
||||||
else ifeq ($(UNAME_S),Darwin)
|
|
||||||
VARIANT_TARGETS = libgovoxtral-fallback.dylib
|
|
||||||
else
|
|
||||||
VARIANT_TARGETS = libgovoxtral-fallback.so
|
|
||||||
endif
|
|
||||||
|
|
||||||
sources/voxtral.c:
|
|
||||||
mkdir -p sources/voxtral.c
|
|
||||||
cd sources/voxtral.c && \
|
|
||||||
git init && \
|
|
||||||
git remote add origin $(VOXTRAL_REPO) && \
|
|
||||||
git fetch origin && \
|
|
||||||
git checkout $(VOXTRAL_VERSION) && \
|
|
||||||
git submodule update --init --recursive --depth 1 --single-branch
|
|
||||||
|
|
||||||
voxtral: main.go govoxtral.go $(VARIANT_TARGETS)
|
|
||||||
CGO_ENABLED=0 $(GOCMD) build -tags "$(GO_TAGS)" -o voxtral ./
|
|
||||||
|
|
||||||
package: voxtral
|
|
||||||
bash package.sh
|
|
||||||
|
|
||||||
build: package
|
|
||||||
|
|
||||||
clean: purge
|
|
||||||
rm -rf libgovoxtral*.so libgovoxtral*.dylib package sources/voxtral.c voxtral
|
|
||||||
|
|
||||||
purge:
|
|
||||||
rm -rf build*
|
|
||||||
|
|
||||||
# Build all variants (Linux only)
|
|
||||||
ifeq ($(UNAME_S),Linux)
|
|
||||||
libgovoxtral-avx.so: sources/voxtral.c
|
|
||||||
$(MAKE) purge
|
|
||||||
$(info Building voxtral: avx)
|
|
||||||
SO_TARGET=libgovoxtral-avx.so CMAKE_ARGS="$(CMAKE_ARGS) -DCMAKE_C_FLAGS='-mavx -mno-avx2 -mno-avx512f -mno-fma -mno-f16c'" $(MAKE) libgovoxtral-custom
|
|
||||||
rm -rfv build*
|
|
||||||
|
|
||||||
libgovoxtral-avx2.so: sources/voxtral.c
|
|
||||||
$(MAKE) purge
|
|
||||||
$(info Building voxtral: avx2)
|
|
||||||
SO_TARGET=libgovoxtral-avx2.so CMAKE_ARGS="$(CMAKE_ARGS) -DCMAKE_C_FLAGS='-mavx -mavx2 -mfma -mf16c'" $(MAKE) libgovoxtral-custom
|
|
||||||
rm -rfv build*
|
|
||||||
endif
|
|
||||||
|
|
||||||
# Build fallback variant
|
|
||||||
ifeq ($(UNAME_S),Darwin)
|
|
||||||
libgovoxtral-fallback.dylib: sources/voxtral.c
|
|
||||||
$(MAKE) purge
|
|
||||||
$(info Building voxtral: darwin fallback)
|
|
||||||
SO_TARGET=libgovoxtral-fallback.dylib NATIVE=true $(MAKE) libgovoxtral-custom
|
|
||||||
rm -rfv build*
|
|
||||||
else
|
|
||||||
libgovoxtral-fallback.so: sources/voxtral.c
|
|
||||||
$(MAKE) purge
|
|
||||||
$(info Building voxtral: fallback)
|
|
||||||
SO_TARGET=libgovoxtral-fallback.so CMAKE_ARGS="$(CMAKE_ARGS) -DCMAKE_C_FLAGS='-mno-avx -mno-avx2 -mno-avx512f -mno-fma -mno-f16c'" $(MAKE) libgovoxtral-custom
|
|
||||||
rm -rfv build*
|
|
||||||
endif
|
|
||||||
|
|
||||||
libgovoxtral-custom: CMakeLists.txt csrc/govoxtral.c csrc/govoxtral.h
|
|
||||||
mkdir -p build-$(SO_TARGET) && \
|
|
||||||
cd build-$(SO_TARGET) && \
|
|
||||||
cmake .. $(CMAKE_ARGS) && \
|
|
||||||
cmake --build . --config Release -j$(JOBS) && \
|
|
||||||
cd .. && \
|
|
||||||
(mv build-$(SO_TARGET)/libgovoxtral.so ./$(SO_TARGET) 2>/dev/null || \
|
|
||||||
mv build-$(SO_TARGET)/libgovoxtral.dylib ./$(SO_TARGET) 2>/dev/null)
|
|
||||||
|
|
||||||
test: voxtral
|
|
||||||
@echo "Running voxtral tests..."
|
|
||||||
bash test.sh
|
|
||||||
@echo "voxtral tests completed."
|
|
||||||
|
|
||||||
all: voxtral package
|
|
||||||
@@ -1,62 +0,0 @@
|
|||||||
#include "govoxtral.h"
|
|
||||||
#include "voxtral.h"
|
|
||||||
#include "voxtral_audio.h"
|
|
||||||
#ifdef USE_METAL
|
|
||||||
#include "voxtral_metal.h"
|
|
||||||
#endif
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <stdio.h>
|
|
||||||
|
|
||||||
static vox_ctx_t *ctx = NULL;
|
|
||||||
static char *last_result = NULL;
|
|
||||||
static int metal_initialized = 0;
|
|
||||||
|
|
||||||
int load_model(const char *model_dir) {
|
|
||||||
if (ctx != NULL) {
|
|
||||||
vox_free(ctx);
|
|
||||||
ctx = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef USE_METAL
|
|
||||||
if (!metal_initialized) {
|
|
||||||
vox_metal_init();
|
|
||||||
metal_initialized = 1;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
ctx = vox_load(model_dir);
|
|
||||||
if (ctx == NULL) {
|
|
||||||
fprintf(stderr, "error: failed to load voxtral model from %s\n", model_dir);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
const char *transcribe(const char *wav_path) {
|
|
||||||
if (ctx == NULL) {
|
|
||||||
fprintf(stderr, "error: model not loaded\n");
|
|
||||||
return "";
|
|
||||||
}
|
|
||||||
|
|
||||||
if (last_result != NULL) {
|
|
||||||
free(last_result);
|
|
||||||
last_result = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
last_result = vox_transcribe(ctx, wav_path);
|
|
||||||
if (last_result == NULL) {
|
|
||||||
fprintf(stderr, "error: transcription failed for %s\n", wav_path);
|
|
||||||
return "";
|
|
||||||
}
|
|
||||||
|
|
||||||
return last_result;
|
|
||||||
}
|
|
||||||
|
|
||||||
void free_result(void) {
|
|
||||||
if (last_result != NULL) {
|
|
||||||
free(last_result);
|
|
||||||
last_result = NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
#ifndef GOVOXTRAL_H
|
|
||||||
#define GOVOXTRAL_H
|
|
||||||
|
|
||||||
extern int load_model(const char *model_dir);
|
|
||||||
extern const char *transcribe(const char *wav_path);
|
|
||||||
extern void free_result(void);
|
|
||||||
|
|
||||||
#endif /* GOVOXTRAL_H */
|
|
||||||
@@ -1,60 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/mudler/LocalAI/pkg/grpc/base"
|
|
||||||
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
|
||||||
"github.com/mudler/LocalAI/pkg/utils"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
CppLoadModel func(modelDir string) int
|
|
||||||
CppTranscribe func(wavPath string) string
|
|
||||||
CppFreeResult func()
|
|
||||||
)
|
|
||||||
|
|
||||||
type Voxtral struct {
|
|
||||||
base.SingleThread
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *Voxtral) Load(opts *pb.ModelOptions) error {
|
|
||||||
if ret := CppLoadModel(opts.ModelFile); ret != 0 {
|
|
||||||
return fmt.Errorf("failed to load Voxtral model from %s", opts.ModelFile)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *Voxtral) AudioTranscription(opts *pb.TranscriptRequest) (pb.TranscriptResult, error) {
|
|
||||||
dir, err := os.MkdirTemp("", "voxtral")
|
|
||||||
if err != nil {
|
|
||||||
return pb.TranscriptResult{}, err
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
convertedPath := dir + "/converted.wav"
|
|
||||||
|
|
||||||
if err := utils.AudioToWav(opts.Dst, convertedPath); err != nil {
|
|
||||||
return pb.TranscriptResult{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
result := strings.Clone(CppTranscribe(convertedPath))
|
|
||||||
CppFreeResult()
|
|
||||||
|
|
||||||
text := strings.TrimSpace(result)
|
|
||||||
|
|
||||||
segments := []*pb.TranscriptSegment{}
|
|
||||||
if text != "" {
|
|
||||||
segments = append(segments, &pb.TranscriptSegment{
|
|
||||||
Id: 0,
|
|
||||||
Text: text,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return pb.TranscriptResult{
|
|
||||||
Segments: segments,
|
|
||||||
Text: text,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
@@ -1,53 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
|
|
||||||
"github.com/ebitengine/purego"
|
|
||||||
grpc "github.com/mudler/LocalAI/pkg/grpc"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
addr = flag.String("addr", "localhost:50051", "the address to connect to")
|
|
||||||
)
|
|
||||||
|
|
||||||
type LibFuncs struct {
|
|
||||||
FuncPtr any
|
|
||||||
Name string
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// Get library name from environment variable, default to fallback
|
|
||||||
libName := os.Getenv("VOXTRAL_LIBRARY")
|
|
||||||
if libName == "" {
|
|
||||||
if runtime.GOOS == "darwin" {
|
|
||||||
libName = "./libgovoxtral-fallback.dylib"
|
|
||||||
} else {
|
|
||||||
libName = "./libgovoxtral-fallback.so"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
gosd, err := purego.Dlopen(libName, purego.RTLD_NOW|purego.RTLD_GLOBAL)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
libFuncs := []LibFuncs{
|
|
||||||
{&CppLoadModel, "load_model"},
|
|
||||||
{&CppTranscribe, "transcribe"},
|
|
||||||
{&CppFreeResult, "free_result"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, lf := range libFuncs {
|
|
||||||
purego.RegisterLibFunc(lf.FuncPtr, gosd, lf.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
flag.Parse()
|
|
||||||
|
|
||||||
if err := grpc.StartServer(*addr, &Voxtral{}); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,68 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Script to copy the appropriate libraries based on architecture
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
CURDIR=$(dirname "$(realpath $0)")
|
|
||||||
REPO_ROOT="${CURDIR}/../../.."
|
|
||||||
|
|
||||||
# Create lib directory
|
|
||||||
mkdir -p $CURDIR/package/lib
|
|
||||||
|
|
||||||
cp -avf $CURDIR/voxtral $CURDIR/package/
|
|
||||||
cp -fv $CURDIR/libgovoxtral-*.so $CURDIR/package/ 2>/dev/null || true
|
|
||||||
cp -fv $CURDIR/libgovoxtral-*.dylib $CURDIR/package/ 2>/dev/null || true
|
|
||||||
cp -fv $CURDIR/run.sh $CURDIR/package/
|
|
||||||
|
|
||||||
# Detect architecture and copy appropriate libraries
|
|
||||||
if [ -f "/lib64/ld-linux-x86-64.so.2" ]; then
|
|
||||||
# x86_64 architecture
|
|
||||||
echo "Detected x86_64 architecture, copying x86_64 libraries..."
|
|
||||||
cp -arfLv /lib64/ld-linux-x86-64.so.2 $CURDIR/package/lib/ld.so
|
|
||||||
cp -arfLv /lib/x86_64-linux-gnu/libc.so.6 $CURDIR/package/lib/libc.so.6
|
|
||||||
cp -arfLv /lib/x86_64-linux-gnu/libgcc_s.so.1 $CURDIR/package/lib/libgcc_s.so.1
|
|
||||||
cp -arfLv /lib/x86_64-linux-gnu/libstdc++.so.6 $CURDIR/package/lib/libstdc++.so.6
|
|
||||||
cp -arfLv /lib/x86_64-linux-gnu/libm.so.6 $CURDIR/package/lib/libm.so.6
|
|
||||||
cp -arfLv /lib/x86_64-linux-gnu/libgomp.so.1 $CURDIR/package/lib/libgomp.so.1
|
|
||||||
cp -arfLv /lib/x86_64-linux-gnu/libdl.so.2 $CURDIR/package/lib/libdl.so.2
|
|
||||||
cp -arfLv /lib/x86_64-linux-gnu/librt.so.1 $CURDIR/package/lib/librt.so.1
|
|
||||||
cp -arfLv /lib/x86_64-linux-gnu/libpthread.so.0 $CURDIR/package/lib/libpthread.so.0
|
|
||||||
# OpenBLAS if available
|
|
||||||
if [ -f /usr/lib/x86_64-linux-gnu/libopenblas.so.0 ]; then
|
|
||||||
cp -arfLv /usr/lib/x86_64-linux-gnu/libopenblas.so.0 $CURDIR/package/lib/
|
|
||||||
fi
|
|
||||||
elif [ -f "/lib/ld-linux-aarch64.so.1" ]; then
|
|
||||||
# ARM64 architecture
|
|
||||||
echo "Detected ARM64 architecture, copying ARM64 libraries..."
|
|
||||||
cp -arfLv /lib/ld-linux-aarch64.so.1 $CURDIR/package/lib/ld.so
|
|
||||||
cp -arfLv /lib/aarch64-linux-gnu/libc.so.6 $CURDIR/package/lib/libc.so.6
|
|
||||||
cp -arfLv /lib/aarch64-linux-gnu/libgcc_s.so.1 $CURDIR/package/lib/libgcc_s.so.1
|
|
||||||
cp -arfLv /lib/aarch64-linux-gnu/libstdc++.so.6 $CURDIR/package/lib/libstdc++.so.6
|
|
||||||
cp -arfLv /lib/aarch64-linux-gnu/libm.so.6 $CURDIR/package/lib/libm.so.6
|
|
||||||
cp -arfLv /lib/aarch64-linux-gnu/libgomp.so.1 $CURDIR/package/lib/libgomp.so.1
|
|
||||||
cp -arfLv /lib/aarch64-linux-gnu/libdl.so.2 $CURDIR/package/lib/libdl.so.2
|
|
||||||
cp -arfLv /lib/aarch64-linux-gnu/librt.so.1 $CURDIR/package/lib/librt.so.1
|
|
||||||
cp -arfLv /lib/aarch64-linux-gnu/libpthread.so.0 $CURDIR/package/lib/libpthread.so.0
|
|
||||||
# OpenBLAS if available
|
|
||||||
if [ -f /usr/lib/aarch64-linux-gnu/libopenblas.so.0 ]; then
|
|
||||||
cp -arfLv /usr/lib/aarch64-linux-gnu/libopenblas.so.0 $CURDIR/package/lib/
|
|
||||||
fi
|
|
||||||
elif [ $(uname -s) = "Darwin" ]; then
|
|
||||||
echo "Detected Darwin — system frameworks linked dynamically, no bundled libs needed"
|
|
||||||
else
|
|
||||||
echo "Error: Could not detect architecture"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Package GPU libraries based on BUILD_TYPE
|
|
||||||
GPU_LIB_SCRIPT="${REPO_ROOT}/scripts/build/package-gpu-libs.sh"
|
|
||||||
if [ -f "$GPU_LIB_SCRIPT" ]; then
|
|
||||||
echo "Packaging GPU libraries for BUILD_TYPE=${BUILD_TYPE:-cpu}..."
|
|
||||||
source "$GPU_LIB_SCRIPT" "$CURDIR/package/lib"
|
|
||||||
package_gpu_libs
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Packaging completed successfully"
|
|
||||||
ls -liah $CURDIR/package/
|
|
||||||
ls -liah $CURDIR/package/lib/
|
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
# Get the absolute current dir where the script is located
|
|
||||||
CURDIR=$(dirname "$(realpath $0)")
|
|
||||||
|
|
||||||
cd /
|
|
||||||
|
|
||||||
echo "CPU info:"
|
|
||||||
if [ "$(uname)" != "Darwin" ]; then
|
|
||||||
grep -e "model\sname" /proc/cpuinfo | head -1
|
|
||||||
grep -e "flags" /proc/cpuinfo | head -1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$(uname)" = "Darwin" ]; then
|
|
||||||
# macOS: single dylib variant (Metal or Accelerate)
|
|
||||||
LIBRARY="$CURDIR/libgovoxtral-fallback.dylib"
|
|
||||||
export DYLD_LIBRARY_PATH=$CURDIR/lib:$DYLD_LIBRARY_PATH
|
|
||||||
else
|
|
||||||
LIBRARY="$CURDIR/libgovoxtral-fallback.so"
|
|
||||||
|
|
||||||
if grep -q -e "\savx\s" /proc/cpuinfo ; then
|
|
||||||
echo "CPU: AVX found OK"
|
|
||||||
if [ -e $CURDIR/libgovoxtral-avx.so ]; then
|
|
||||||
LIBRARY="$CURDIR/libgovoxtral-avx.so"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
if grep -q -e "\savx2\s" /proc/cpuinfo ; then
|
|
||||||
echo "CPU: AVX2 found OK"
|
|
||||||
if [ -e $CURDIR/libgovoxtral-avx2.so ]; then
|
|
||||||
LIBRARY="$CURDIR/libgovoxtral-avx2.so"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
export LD_LIBRARY_PATH=$CURDIR/lib:$LD_LIBRARY_PATH
|
|
||||||
fi
|
|
||||||
|
|
||||||
export VOXTRAL_LIBRARY=$LIBRARY
|
|
||||||
|
|
||||||
# If there is a lib/ld.so, use it (Linux only)
|
|
||||||
if [ -f $CURDIR/lib/ld.so ]; then
|
|
||||||
echo "Using lib/ld.so"
|
|
||||||
echo "Using library: $LIBRARY"
|
|
||||||
exec $CURDIR/lib/ld.so $CURDIR/voxtral "$@"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Using library: $LIBRARY"
|
|
||||||
exec $CURDIR/voxtral "$@"
|
|
||||||
@@ -1,48 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
CURDIR=$(dirname "$(realpath $0)")
|
|
||||||
|
|
||||||
echo "Running voxtral backend tests..."
|
|
||||||
|
|
||||||
# The test requires:
|
|
||||||
# - VOXTRAL_MODEL_DIR: path to directory containing consolidated.safetensors + tekken.json
|
|
||||||
# - VOXTRAL_BINARY: path to the voxtral binary (defaults to ./voxtral)
|
|
||||||
#
|
|
||||||
# Tests that require the model will be skipped if VOXTRAL_MODEL_DIR is not set.
|
|
||||||
|
|
||||||
cd "$CURDIR"
|
|
||||||
export VOXTRAL_MODEL_DIR="${VOXTRAL_MODEL_DIR:-./voxtral-model}"
|
|
||||||
|
|
||||||
if [ ! -d "$VOXTRAL_MODEL_DIR" ]; then
|
|
||||||
echo "Creating voxtral-model directory for tests..."
|
|
||||||
mkdir -p "$VOXTRAL_MODEL_DIR"
|
|
||||||
MODEL_ID="mistralai/Voxtral-Mini-4B-Realtime-2602"
|
|
||||||
echo "Model: ${MODEL_ID}"
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
# Files to download
|
|
||||||
FILES=(
|
|
||||||
"consolidated.safetensors"
|
|
||||||
"params.json"
|
|
||||||
"tekken.json"
|
|
||||||
)
|
|
||||||
|
|
||||||
BASE_URL="https://huggingface.co/${MODEL_ID}/resolve/main"
|
|
||||||
|
|
||||||
for file in "${FILES[@]}"; do
|
|
||||||
dest="${VOXTRAL_MODEL_DIR}/${file}"
|
|
||||||
if [ -f "${dest}" ]; then
|
|
||||||
echo " [skip] ${file} (already exists)"
|
|
||||||
else
|
|
||||||
echo " [download] ${file}..."
|
|
||||||
curl -L -o "${dest}" "${BASE_URL}/${file}" --progress-bar
|
|
||||||
echo " [done] ${file}"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Run Go tests
|
|
||||||
go test -v -timeout 300s ./...
|
|
||||||
|
|
||||||
echo "All voxtral tests passed."
|
|
||||||
@@ -1,201 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
"google.golang.org/grpc/credentials/insecure"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
testAddr = "localhost:50051"
|
|
||||||
sampleAudio = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen3-ASR-Repo/asr_en.wav"
|
|
||||||
startupWait = 5 * time.Second
|
|
||||||
)
|
|
||||||
|
|
||||||
func skipIfNoModel(t *testing.T) string {
|
|
||||||
t.Helper()
|
|
||||||
modelDir := os.Getenv("VOXTRAL_MODEL_DIR")
|
|
||||||
if modelDir == "" {
|
|
||||||
t.Skip("VOXTRAL_MODEL_DIR not set, skipping test (set to voxtral model directory)")
|
|
||||||
}
|
|
||||||
if _, err := os.Stat(filepath.Join(modelDir, "consolidated.safetensors")); os.IsNotExist(err) {
|
|
||||||
t.Skipf("Model file not found in %s, skipping", modelDir)
|
|
||||||
}
|
|
||||||
return modelDir
|
|
||||||
}
|
|
||||||
|
|
||||||
func startServer(t *testing.T) *exec.Cmd {
|
|
||||||
t.Helper()
|
|
||||||
binary := os.Getenv("VOXTRAL_BINARY")
|
|
||||||
if binary == "" {
|
|
||||||
binary = "./voxtral"
|
|
||||||
}
|
|
||||||
if _, err := os.Stat(binary); os.IsNotExist(err) {
|
|
||||||
t.Skipf("Backend binary not found at %s, skipping", binary)
|
|
||||||
}
|
|
||||||
cmd := exec.Command(binary, "--addr", testAddr)
|
|
||||||
cmd.Stdout = os.Stderr
|
|
||||||
cmd.Stderr = os.Stderr
|
|
||||||
if err := cmd.Start(); err != nil {
|
|
||||||
t.Fatalf("Failed to start server: %v", err)
|
|
||||||
}
|
|
||||||
time.Sleep(startupWait)
|
|
||||||
return cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
func stopServer(cmd *exec.Cmd) {
|
|
||||||
if cmd != nil && cmd.Process != nil {
|
|
||||||
cmd.Process.Kill()
|
|
||||||
cmd.Wait()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func dialGRPC(t *testing.T) *grpc.ClientConn {
|
|
||||||
t.Helper()
|
|
||||||
conn, err := grpc.Dial(testAddr,
|
|
||||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
|
||||||
grpc.WithDefaultCallOptions(
|
|
||||||
grpc.MaxCallRecvMsgSize(50*1024*1024),
|
|
||||||
grpc.MaxCallSendMsgSize(50*1024*1024),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to dial gRPC: %v", err)
|
|
||||||
}
|
|
||||||
return conn
|
|
||||||
}
|
|
||||||
|
|
||||||
func downloadFile(url, dest string) error {
|
|
||||||
resp, err := http.Get(url)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("HTTP GET failed: %w", err)
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return fmt.Errorf("bad status: %s", resp.Status)
|
|
||||||
}
|
|
||||||
f, err := os.Create(dest)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
_, err = io.Copy(f, resp.Body)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestServerHealth(t *testing.T) {
|
|
||||||
cmd := startServer(t)
|
|
||||||
defer stopServer(cmd)
|
|
||||||
|
|
||||||
conn := dialGRPC(t)
|
|
||||||
defer conn.Close()
|
|
||||||
|
|
||||||
client := pb.NewBackendClient(conn)
|
|
||||||
resp, err := client.Health(context.Background(), &pb.HealthMessage{})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Health check failed: %v", err)
|
|
||||||
}
|
|
||||||
if string(resp.Message) != "OK" {
|
|
||||||
t.Fatalf("Expected OK, got %s", string(resp.Message))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadModel(t *testing.T) {
|
|
||||||
modelDir := skipIfNoModel(t)
|
|
||||||
cmd := startServer(t)
|
|
||||||
defer stopServer(cmd)
|
|
||||||
|
|
||||||
conn := dialGRPC(t)
|
|
||||||
defer conn.Close()
|
|
||||||
|
|
||||||
client := pb.NewBackendClient(conn)
|
|
||||||
resp, err := client.LoadModel(context.Background(), &pb.ModelOptions{
|
|
||||||
ModelFile: modelDir,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("LoadModel failed: %v", err)
|
|
||||||
}
|
|
||||||
if !resp.Success {
|
|
||||||
t.Fatalf("LoadModel returned failure: %s", resp.Message)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAudioTranscription(t *testing.T) {
|
|
||||||
modelDir := skipIfNoModel(t)
|
|
||||||
|
|
||||||
tmpDir, err := os.MkdirTemp("", "voxtral-test")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(tmpDir)
|
|
||||||
|
|
||||||
// Download sample audio — JFK "ask not what your country can do for you" clip
|
|
||||||
audioFile := filepath.Join(tmpDir, "sample.wav")
|
|
||||||
t.Log("Downloading sample audio...")
|
|
||||||
if err := downloadFile(sampleAudio, audioFile); err != nil {
|
|
||||||
t.Fatalf("Failed to download sample audio: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := startServer(t)
|
|
||||||
defer stopServer(cmd)
|
|
||||||
|
|
||||||
conn := dialGRPC(t)
|
|
||||||
defer conn.Close()
|
|
||||||
|
|
||||||
client := pb.NewBackendClient(conn)
|
|
||||||
|
|
||||||
// Load model
|
|
||||||
loadResp, err := client.LoadModel(context.Background(), &pb.ModelOptions{
|
|
||||||
ModelFile: modelDir,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("LoadModel failed: %v", err)
|
|
||||||
}
|
|
||||||
if !loadResp.Success {
|
|
||||||
t.Fatalf("LoadModel returned failure: %s", loadResp.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Transcribe
|
|
||||||
transcriptResp, err := client.AudioTranscription(context.Background(), &pb.TranscriptRequest{
|
|
||||||
Dst: audioFile,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("AudioTranscription failed: %v", err)
|
|
||||||
}
|
|
||||||
if transcriptResp == nil {
|
|
||||||
t.Fatal("AudioTranscription returned nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Logf("Transcribed text: %s", transcriptResp.Text)
|
|
||||||
t.Logf("Number of segments: %d", len(transcriptResp.Segments))
|
|
||||||
|
|
||||||
if transcriptResp.Text == "" {
|
|
||||||
t.Fatal("Transcription returned empty text")
|
|
||||||
}
|
|
||||||
|
|
||||||
allText := strings.ToLower(transcriptResp.Text)
|
|
||||||
for _, seg := range transcriptResp.Segments {
|
|
||||||
allText += " " + strings.ToLower(seg.Text)
|
|
||||||
}
|
|
||||||
t.Logf("All text: %s", allText)
|
|
||||||
|
|
||||||
if !strings.Contains(allText, "big") {
|
|
||||||
t.Errorf("Expected 'big' in transcription, got: %s", allText)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The sample audio should contain recognizable speech
|
|
||||||
if len(allText) < 10 {
|
|
||||||
t.Errorf("Transcription too short: %q", allText)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -8,7 +8,7 @@ JOBS?=$(shell nproc --ignore=1)
|
|||||||
|
|
||||||
# whisper.cpp version
|
# whisper.cpp version
|
||||||
WHISPER_REPO?=https://github.com/ggml-org/whisper.cpp
|
WHISPER_REPO?=https://github.com/ggml-org/whisper.cpp
|
||||||
WHISPER_CPP_VERSION?=941bdabbe4561bc6de68981aea01bc5ab05781c5
|
WHISPER_CPP_VERSION?=aa1bc0d1a6dfd70dbb9f60c11df12441e03a9075
|
||||||
SO_TARGET?=libgowhisper.so
|
SO_TARGET?=libgowhisper.so
|
||||||
|
|
||||||
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF
|
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF
|
||||||
@@ -78,7 +78,7 @@ package: whisper
|
|||||||
build: package
|
build: package
|
||||||
|
|
||||||
clean: purge
|
clean: purge
|
||||||
rm -rf libgowhisper*.so package sources/whisper.cpp whisper
|
rm -rf libgowhisper*.so sources/whisper.cpp whisper
|
||||||
|
|
||||||
purge:
|
purge:
|
||||||
rm -rf build*
|
rm -rf build*
|
||||||
|
|||||||
@@ -56,21 +56,6 @@
|
|||||||
nvidia-cuda-12: "cuda12-whisper"
|
nvidia-cuda-12: "cuda12-whisper"
|
||||||
nvidia-l4t-cuda-12: "nvidia-l4t-arm64-whisper"
|
nvidia-l4t-cuda-12: "nvidia-l4t-arm64-whisper"
|
||||||
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-whisper"
|
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-whisper"
|
||||||
- &voxtral
|
|
||||||
name: "voxtral"
|
|
||||||
alias: "voxtral"
|
|
||||||
license: mit
|
|
||||||
description: |
|
|
||||||
Voxtral Realtime 4B Pure C speech-to-text inference engine
|
|
||||||
urls:
|
|
||||||
- https://github.com/mudler/voxtral.c
|
|
||||||
tags:
|
|
||||||
- audio-transcription
|
|
||||||
- CPU
|
|
||||||
- Metal
|
|
||||||
capabilities:
|
|
||||||
default: "cpu-voxtral"
|
|
||||||
metal-darwin-arm64: "metal-voxtral"
|
|
||||||
- &stablediffusionggml
|
- &stablediffusionggml
|
||||||
name: "stablediffusion-ggml"
|
name: "stablediffusion-ggml"
|
||||||
alias: "stablediffusion-ggml"
|
alias: "stablediffusion-ggml"
|
||||||
@@ -120,7 +105,6 @@
|
|||||||
intel: "intel-rfdetr"
|
intel: "intel-rfdetr"
|
||||||
#amd: "rocm-rfdetr"
|
#amd: "rocm-rfdetr"
|
||||||
nvidia-l4t: "nvidia-l4t-arm64-rfdetr"
|
nvidia-l4t: "nvidia-l4t-arm64-rfdetr"
|
||||||
metal: "metal-rfdetr"
|
|
||||||
default: "cpu-rfdetr"
|
default: "cpu-rfdetr"
|
||||||
nvidia-cuda-13: "cuda13-rfdetr"
|
nvidia-cuda-13: "cuda13-rfdetr"
|
||||||
nvidia-cuda-12: "cuda12-rfdetr"
|
nvidia-cuda-12: "cuda12-rfdetr"
|
||||||
@@ -198,15 +182,6 @@
|
|||||||
- text-to-text
|
- text-to-text
|
||||||
- LLM
|
- LLM
|
||||||
- MLX
|
- MLX
|
||||||
capabilities:
|
|
||||||
default: "cpu-mlx"
|
|
||||||
nvidia: "cuda12-mlx"
|
|
||||||
metal: "metal-mlx"
|
|
||||||
nvidia-cuda-12: "cuda12-mlx"
|
|
||||||
nvidia-cuda-13: "cuda13-mlx"
|
|
||||||
nvidia-l4t: "nvidia-l4t-mlx"
|
|
||||||
nvidia-l4t-cuda-12: "nvidia-l4t-mlx"
|
|
||||||
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-mlx"
|
|
||||||
- &mlx-vlm
|
- &mlx-vlm
|
||||||
name: "mlx-vlm"
|
name: "mlx-vlm"
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-mlx-vlm"
|
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-mlx-vlm"
|
||||||
@@ -224,15 +199,6 @@
|
|||||||
- vision-language
|
- vision-language
|
||||||
- LLM
|
- LLM
|
||||||
- MLX
|
- MLX
|
||||||
capabilities:
|
|
||||||
default: "cpu-mlx-vlm"
|
|
||||||
nvidia: "cuda12-mlx-vlm"
|
|
||||||
metal: "metal-mlx-vlm"
|
|
||||||
nvidia-cuda-12: "cuda12-mlx-vlm"
|
|
||||||
nvidia-cuda-13: "cuda13-mlx-vlm"
|
|
||||||
nvidia-l4t: "nvidia-l4t-mlx-vlm"
|
|
||||||
nvidia-l4t-cuda-12: "nvidia-l4t-mlx-vlm"
|
|
||||||
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-mlx-vlm"
|
|
||||||
- &mlx-audio
|
- &mlx-audio
|
||||||
name: "mlx-audio"
|
name: "mlx-audio"
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-mlx-audio"
|
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-mlx-audio"
|
||||||
@@ -250,15 +216,6 @@
|
|||||||
- text-to-audio
|
- text-to-audio
|
||||||
- LLM
|
- LLM
|
||||||
- MLX
|
- MLX
|
||||||
capabilities:
|
|
||||||
default: "cpu-mlx-audio"
|
|
||||||
nvidia: "cuda12-mlx-audio"
|
|
||||||
metal: "metal-mlx-audio"
|
|
||||||
nvidia-cuda-12: "cuda12-mlx-audio"
|
|
||||||
nvidia-cuda-13: "cuda13-mlx-audio"
|
|
||||||
nvidia-l4t: "nvidia-l4t-mlx-audio"
|
|
||||||
nvidia-l4t-cuda-12: "nvidia-l4t-mlx-audio"
|
|
||||||
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-mlx-audio"
|
|
||||||
- &rerankers
|
- &rerankers
|
||||||
name: "rerankers"
|
name: "rerankers"
|
||||||
alias: "rerankers"
|
alias: "rerankers"
|
||||||
@@ -266,7 +223,6 @@
|
|||||||
nvidia: "cuda12-rerankers"
|
nvidia: "cuda12-rerankers"
|
||||||
intel: "intel-rerankers"
|
intel: "intel-rerankers"
|
||||||
amd: "rocm-rerankers"
|
amd: "rocm-rerankers"
|
||||||
metal: "metal-rerankers"
|
|
||||||
- &transformers
|
- &transformers
|
||||||
name: "transformers"
|
name: "transformers"
|
||||||
icon: https://avatars.githubusercontent.com/u/25720743?s=200&v=4
|
icon: https://avatars.githubusercontent.com/u/25720743?s=200&v=4
|
||||||
@@ -284,7 +240,6 @@
|
|||||||
nvidia: "cuda12-transformers"
|
nvidia: "cuda12-transformers"
|
||||||
intel: "intel-transformers"
|
intel: "intel-transformers"
|
||||||
amd: "rocm-transformers"
|
amd: "rocm-transformers"
|
||||||
metal: "metal-transformers"
|
|
||||||
nvidia-cuda-13: "cuda13-transformers"
|
nvidia-cuda-13: "cuda13-transformers"
|
||||||
nvidia-cuda-12: "cuda12-transformers"
|
nvidia-cuda-12: "cuda12-transformers"
|
||||||
- &diffusers
|
- &diffusers
|
||||||
@@ -311,34 +266,6 @@
|
|||||||
nvidia-cuda-12: "cuda12-diffusers"
|
nvidia-cuda-12: "cuda12-diffusers"
|
||||||
nvidia-l4t-cuda-12: "nvidia-l4t-arm64-diffusers"
|
nvidia-l4t-cuda-12: "nvidia-l4t-arm64-diffusers"
|
||||||
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-diffusers"
|
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-diffusers"
|
||||||
- &ace-step
|
|
||||||
name: "ace-step"
|
|
||||||
description: |
|
|
||||||
ACE-Step 1.5 is an open-source music generation model. It supports simple mode (natural language description) and advanced mode (caption, lyrics, think, bpm, keyscale, etc.). Uses in-process acestep (LLMHandler for metadata, DiT for audio).
|
|
||||||
urls:
|
|
||||||
- https://github.com/ace-step/ACE-Step-1.5
|
|
||||||
tags:
|
|
||||||
- music-generation
|
|
||||||
- sound-generation
|
|
||||||
alias: "ace-step"
|
|
||||||
capabilities:
|
|
||||||
nvidia: "cuda12-ace-step"
|
|
||||||
intel: "intel-ace-step"
|
|
||||||
amd: "rocm-ace-step"
|
|
||||||
metal: "metal-ace-step"
|
|
||||||
default: "cpu-ace-step"
|
|
||||||
nvidia-cuda-13: "cuda13-ace-step"
|
|
||||||
nvidia-cuda-12: "cuda12-ace-step"
|
|
||||||
- !!merge <<: *ace-step
|
|
||||||
name: "ace-step-development"
|
|
||||||
capabilities:
|
|
||||||
nvidia: "cuda12-ace-step-development"
|
|
||||||
intel: "intel-ace-step-development"
|
|
||||||
amd: "rocm-ace-step-development"
|
|
||||||
metal: "metal-ace-step-development"
|
|
||||||
default: "cpu-ace-step-development"
|
|
||||||
nvidia-cuda-13: "cuda13-ace-step-development"
|
|
||||||
nvidia-cuda-12: "cuda12-ace-step-development"
|
|
||||||
- &faster-whisper
|
- &faster-whisper
|
||||||
icon: https://avatars.githubusercontent.com/u/1520500?s=200&v=4
|
icon: https://avatars.githubusercontent.com/u/1520500?s=200&v=4
|
||||||
description: |
|
description: |
|
||||||
@@ -355,7 +282,6 @@
|
|||||||
nvidia: "cuda12-faster-whisper"
|
nvidia: "cuda12-faster-whisper"
|
||||||
intel: "intel-faster-whisper"
|
intel: "intel-faster-whisper"
|
||||||
amd: "rocm-faster-whisper"
|
amd: "rocm-faster-whisper"
|
||||||
metal: "metal-faster-whisper"
|
|
||||||
nvidia-cuda-13: "cuda13-faster-whisper"
|
nvidia-cuda-13: "cuda13-faster-whisper"
|
||||||
nvidia-cuda-12: "cuda12-faster-whisper"
|
nvidia-cuda-12: "cuda12-faster-whisper"
|
||||||
- &moonshine
|
- &moonshine
|
||||||
@@ -373,7 +299,6 @@
|
|||||||
alias: "moonshine"
|
alias: "moonshine"
|
||||||
capabilities:
|
capabilities:
|
||||||
nvidia: "cuda12-moonshine"
|
nvidia: "cuda12-moonshine"
|
||||||
metal: "metal-moonshine"
|
|
||||||
default: "cpu-moonshine"
|
default: "cpu-moonshine"
|
||||||
nvidia-cuda-13: "cuda13-moonshine"
|
nvidia-cuda-13: "cuda13-moonshine"
|
||||||
nvidia-cuda-12: "cuda12-moonshine"
|
nvidia-cuda-12: "cuda12-moonshine"
|
||||||
@@ -393,7 +318,6 @@
|
|||||||
capabilities:
|
capabilities:
|
||||||
nvidia: "cuda12-whisperx"
|
nvidia: "cuda12-whisperx"
|
||||||
amd: "rocm-whisperx"
|
amd: "rocm-whisperx"
|
||||||
metal: "metal-whisperx"
|
|
||||||
default: "cpu-whisperx"
|
default: "cpu-whisperx"
|
||||||
nvidia-cuda-13: "cuda13-whisperx"
|
nvidia-cuda-13: "cuda13-whisperx"
|
||||||
nvidia-cuda-12: "cuda12-whisperx"
|
nvidia-cuda-12: "cuda12-whisperx"
|
||||||
@@ -416,7 +340,6 @@
|
|||||||
intel: "intel-kokoro"
|
intel: "intel-kokoro"
|
||||||
amd: "rocm-kokoro"
|
amd: "rocm-kokoro"
|
||||||
nvidia-l4t: "nvidia-l4t-kokoro"
|
nvidia-l4t: "nvidia-l4t-kokoro"
|
||||||
metal: "metal-kokoro"
|
|
||||||
nvidia-cuda-13: "cuda13-kokoro"
|
nvidia-cuda-13: "cuda13-kokoro"
|
||||||
nvidia-cuda-12: "cuda12-kokoro"
|
nvidia-cuda-12: "cuda12-kokoro"
|
||||||
nvidia-l4t-cuda-12: "nvidia-l4t-arm64-kokoro"
|
nvidia-l4t-cuda-12: "nvidia-l4t-arm64-kokoro"
|
||||||
@@ -441,25 +364,9 @@
|
|||||||
nvidia: "cuda12-coqui"
|
nvidia: "cuda12-coqui"
|
||||||
intel: "intel-coqui"
|
intel: "intel-coqui"
|
||||||
amd: "rocm-coqui"
|
amd: "rocm-coqui"
|
||||||
metal: "metal-coqui"
|
|
||||||
nvidia-cuda-13: "cuda13-coqui"
|
nvidia-cuda-13: "cuda13-coqui"
|
||||||
nvidia-cuda-12: "cuda12-coqui"
|
nvidia-cuda-12: "cuda12-coqui"
|
||||||
icon: https://avatars.githubusercontent.com/u/1338804?s=200&v=4
|
icon: https://avatars.githubusercontent.com/u/1338804?s=200&v=4
|
||||||
- &outetts
|
|
||||||
urls:
|
|
||||||
- https://github.com/OuteAI/outetts
|
|
||||||
description: |
|
|
||||||
OuteTTS is an open-weight text-to-speech model from OuteAI (OuteAI/OuteTTS-0.3-1B).
|
|
||||||
Supports custom speaker voices via audio path or default speakers.
|
|
||||||
tags:
|
|
||||||
- text-to-speech
|
|
||||||
- TTS
|
|
||||||
license: apache-2.0
|
|
||||||
name: "outetts"
|
|
||||||
alias: "outetts"
|
|
||||||
capabilities:
|
|
||||||
default: "cpu-outetts"
|
|
||||||
nvidia-cuda-12: "cuda12-outetts"
|
|
||||||
- &chatterbox
|
- &chatterbox
|
||||||
urls:
|
urls:
|
||||||
- https://github.com/resemble-ai/chatterbox
|
- https://github.com/resemble-ai/chatterbox
|
||||||
@@ -498,7 +405,6 @@
|
|||||||
intel: "intel-vibevoice"
|
intel: "intel-vibevoice"
|
||||||
amd: "rocm-vibevoice"
|
amd: "rocm-vibevoice"
|
||||||
nvidia-l4t: "nvidia-l4t-vibevoice"
|
nvidia-l4t: "nvidia-l4t-vibevoice"
|
||||||
metal: "metal-vibevoice"
|
|
||||||
default: "cpu-vibevoice"
|
default: "cpu-vibevoice"
|
||||||
nvidia-cuda-13: "cuda13-vibevoice"
|
nvidia-cuda-13: "cuda13-vibevoice"
|
||||||
nvidia-cuda-12: "cuda12-vibevoice"
|
nvidia-cuda-12: "cuda12-vibevoice"
|
||||||
@@ -521,7 +427,6 @@
|
|||||||
intel: "intel-qwen-tts"
|
intel: "intel-qwen-tts"
|
||||||
amd: "rocm-qwen-tts"
|
amd: "rocm-qwen-tts"
|
||||||
nvidia-l4t: "nvidia-l4t-qwen-tts"
|
nvidia-l4t: "nvidia-l4t-qwen-tts"
|
||||||
metal: "metal-qwen-tts"
|
|
||||||
default: "cpu-qwen-tts"
|
default: "cpu-qwen-tts"
|
||||||
nvidia-cuda-13: "cuda13-qwen-tts"
|
nvidia-cuda-13: "cuda13-qwen-tts"
|
||||||
nvidia-cuda-12: "cuda12-qwen-tts"
|
nvidia-cuda-12: "cuda12-qwen-tts"
|
||||||
@@ -544,34 +449,12 @@
|
|||||||
intel: "intel-qwen-asr"
|
intel: "intel-qwen-asr"
|
||||||
amd: "rocm-qwen-asr"
|
amd: "rocm-qwen-asr"
|
||||||
nvidia-l4t: "nvidia-l4t-qwen-asr"
|
nvidia-l4t: "nvidia-l4t-qwen-asr"
|
||||||
metal: "metal-qwen-asr"
|
|
||||||
default: "cpu-qwen-asr"
|
default: "cpu-qwen-asr"
|
||||||
nvidia-cuda-13: "cuda13-qwen-asr"
|
nvidia-cuda-13: "cuda13-qwen-asr"
|
||||||
nvidia-cuda-12: "cuda12-qwen-asr"
|
nvidia-cuda-12: "cuda12-qwen-asr"
|
||||||
nvidia-l4t-cuda-12: "nvidia-l4t-qwen-asr"
|
nvidia-l4t-cuda-12: "nvidia-l4t-qwen-asr"
|
||||||
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-qwen-asr"
|
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-qwen-asr"
|
||||||
icon: https://cdn-avatars.huggingface.co/v1/production/uploads/620760a26e3b7210c2ff1943/-s1gyJfvbE1RgO5iBeNOi.png
|
icon: https://cdn-avatars.huggingface.co/v1/production/uploads/620760a26e3b7210c2ff1943/-s1gyJfvbE1RgO5iBeNOi.png
|
||||||
- &nemo
|
|
||||||
urls:
|
|
||||||
- https://github.com/NVIDIA/NeMo
|
|
||||||
description: |
|
|
||||||
NVIDIA NEMO Toolkit for ASR provides state-of-the-art automatic speech recognition models including Parakeet models for various languages and use cases.
|
|
||||||
tags:
|
|
||||||
- speech-recognition
|
|
||||||
- ASR
|
|
||||||
- NVIDIA
|
|
||||||
license: apache-2.0
|
|
||||||
name: "nemo"
|
|
||||||
alias: "nemo"
|
|
||||||
capabilities:
|
|
||||||
nvidia: "cuda12-nemo"
|
|
||||||
intel: "intel-nemo"
|
|
||||||
amd: "rocm-nemo"
|
|
||||||
metal: "metal-nemo"
|
|
||||||
default: "cpu-nemo"
|
|
||||||
nvidia-cuda-13: "cuda13-nemo"
|
|
||||||
nvidia-cuda-12: "cuda12-nemo"
|
|
||||||
icon: https://www.nvidia.com/favicon.ico
|
|
||||||
- &voxcpm
|
- &voxcpm
|
||||||
urls:
|
urls:
|
||||||
- https://github.com/ModelBest/VoxCPM
|
- https://github.com/ModelBest/VoxCPM
|
||||||
@@ -587,7 +470,6 @@
|
|||||||
nvidia: "cuda12-voxcpm"
|
nvidia: "cuda12-voxcpm"
|
||||||
intel: "intel-voxcpm"
|
intel: "intel-voxcpm"
|
||||||
amd: "rocm-voxcpm"
|
amd: "rocm-voxcpm"
|
||||||
metal: "metal-voxcpm"
|
|
||||||
default: "cpu-voxcpm"
|
default: "cpu-voxcpm"
|
||||||
nvidia-cuda-13: "cuda13-voxcpm"
|
nvidia-cuda-13: "cuda13-voxcpm"
|
||||||
nvidia-cuda-12: "cuda12-voxcpm"
|
nvidia-cuda-12: "cuda12-voxcpm"
|
||||||
@@ -608,7 +490,6 @@
|
|||||||
intel: "intel-pocket-tts"
|
intel: "intel-pocket-tts"
|
||||||
amd: "rocm-pocket-tts"
|
amd: "rocm-pocket-tts"
|
||||||
nvidia-l4t: "nvidia-l4t-pocket-tts"
|
nvidia-l4t: "nvidia-l4t-pocket-tts"
|
||||||
metal: "metal-pocket-tts"
|
|
||||||
default: "cpu-pocket-tts"
|
default: "cpu-pocket-tts"
|
||||||
nvidia-cuda-13: "cuda13-pocket-tts"
|
nvidia-cuda-13: "cuda13-pocket-tts"
|
||||||
nvidia-cuda-12: "cuda12-pocket-tts"
|
nvidia-cuda-12: "cuda12-pocket-tts"
|
||||||
@@ -769,234 +650,31 @@
|
|||||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-mlx-audio"
|
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-mlx-audio"
|
||||||
mirrors:
|
mirrors:
|
||||||
- localai/localai-backends:master-metal-darwin-arm64-mlx-audio
|
- localai/localai-backends:master-metal-darwin-arm64-mlx-audio
|
||||||
## mlx
|
|
||||||
- !!merge <<: *mlx
|
|
||||||
name: "cpu-mlx"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-mlx"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-cpu-mlx
|
|
||||||
- !!merge <<: *mlx
|
|
||||||
name: "cpu-mlx-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-mlx"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-cpu-mlx
|
|
||||||
- !!merge <<: *mlx
|
|
||||||
name: "cuda12-mlx"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-mlx"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-mlx
|
|
||||||
- !!merge <<: *mlx
|
|
||||||
name: "cuda12-mlx-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-mlx"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-gpu-nvidia-cuda-12-mlx
|
|
||||||
- !!merge <<: *mlx
|
|
||||||
name: "cuda13-mlx"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-mlx"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-gpu-nvidia-cuda-13-mlx
|
|
||||||
- !!merge <<: *mlx
|
|
||||||
name: "cuda13-mlx-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-mlx"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-gpu-nvidia-cuda-13-mlx
|
|
||||||
- !!merge <<: *mlx
|
|
||||||
name: "nvidia-l4t-mlx"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-mlx"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-nvidia-l4t-mlx
|
|
||||||
- !!merge <<: *mlx
|
|
||||||
name: "nvidia-l4t-mlx-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-mlx"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-nvidia-l4t-mlx
|
|
||||||
- !!merge <<: *mlx
|
|
||||||
name: "cuda13-nvidia-l4t-arm64-mlx"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-cuda-13-arm64-mlx"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-nvidia-l4t-cuda-13-arm64-mlx
|
|
||||||
- !!merge <<: *mlx
|
|
||||||
name: "cuda13-nvidia-l4t-arm64-mlx-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-mlx"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-mlx
|
|
||||||
## mlx-vlm
|
|
||||||
- !!merge <<: *mlx-vlm
|
|
||||||
name: "cpu-mlx-vlm"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-mlx-vlm"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-cpu-mlx-vlm
|
|
||||||
- !!merge <<: *mlx-vlm
|
|
||||||
name: "cpu-mlx-vlm-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-mlx-vlm"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-cpu-mlx-vlm
|
|
||||||
- !!merge <<: *mlx-vlm
|
|
||||||
name: "cuda12-mlx-vlm"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-mlx-vlm"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-mlx-vlm
|
|
||||||
- !!merge <<: *mlx-vlm
|
|
||||||
name: "cuda12-mlx-vlm-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-mlx-vlm"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-gpu-nvidia-cuda-12-mlx-vlm
|
|
||||||
- !!merge <<: *mlx-vlm
|
|
||||||
name: "cuda13-mlx-vlm"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-mlx-vlm"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-gpu-nvidia-cuda-13-mlx-vlm
|
|
||||||
- !!merge <<: *mlx-vlm
|
|
||||||
name: "cuda13-mlx-vlm-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-mlx-vlm"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-gpu-nvidia-cuda-13-mlx-vlm
|
|
||||||
- !!merge <<: *mlx-vlm
|
|
||||||
name: "nvidia-l4t-mlx-vlm"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-mlx-vlm"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-nvidia-l4t-mlx-vlm
|
|
||||||
- !!merge <<: *mlx-vlm
|
|
||||||
name: "nvidia-l4t-mlx-vlm-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-mlx-vlm"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-nvidia-l4t-mlx-vlm
|
|
||||||
- !!merge <<: *mlx-vlm
|
|
||||||
name: "cuda13-nvidia-l4t-arm64-mlx-vlm"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-cuda-13-arm64-mlx-vlm"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-nvidia-l4t-cuda-13-arm64-mlx-vlm
|
|
||||||
- !!merge <<: *mlx-vlm
|
|
||||||
name: "cuda13-nvidia-l4t-arm64-mlx-vlm-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-mlx-vlm"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-mlx-vlm
|
|
||||||
## mlx-audio
|
|
||||||
- !!merge <<: *mlx-audio
|
|
||||||
name: "cpu-mlx-audio"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-mlx-audio"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-cpu-mlx-audio
|
|
||||||
- !!merge <<: *mlx-audio
|
|
||||||
name: "cpu-mlx-audio-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-mlx-audio"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-cpu-mlx-audio
|
|
||||||
- !!merge <<: *mlx-audio
|
|
||||||
name: "cuda12-mlx-audio"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-mlx-audio"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-mlx-audio
|
|
||||||
- !!merge <<: *mlx-audio
|
|
||||||
name: "cuda12-mlx-audio-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-mlx-audio"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-gpu-nvidia-cuda-12-mlx-audio
|
|
||||||
- !!merge <<: *mlx-audio
|
|
||||||
name: "cuda13-mlx-audio"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-mlx-audio"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-gpu-nvidia-cuda-13-mlx-audio
|
|
||||||
- !!merge <<: *mlx-audio
|
|
||||||
name: "cuda13-mlx-audio-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-mlx-audio"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-gpu-nvidia-cuda-13-mlx-audio
|
|
||||||
- !!merge <<: *mlx-audio
|
|
||||||
name: "nvidia-l4t-mlx-audio"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-mlx-audio"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-nvidia-l4t-mlx-audio
|
|
||||||
- !!merge <<: *mlx-audio
|
|
||||||
name: "nvidia-l4t-mlx-audio-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-mlx-audio"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-nvidia-l4t-mlx-audio
|
|
||||||
- !!merge <<: *mlx-audio
|
|
||||||
name: "cuda13-nvidia-l4t-arm64-mlx-audio"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-cuda-13-arm64-mlx-audio"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-nvidia-l4t-cuda-13-arm64-mlx-audio
|
|
||||||
- !!merge <<: *mlx-audio
|
|
||||||
name: "cuda13-nvidia-l4t-arm64-mlx-audio-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-mlx-audio"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-mlx-audio
|
|
||||||
- !!merge <<: *kitten-tts
|
- !!merge <<: *kitten-tts
|
||||||
name: "kitten-tts-development"
|
name: "kitten-tts-development"
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-kitten-tts"
|
uri: "quay.io/go-skynet/local-ai-backends:master-kitten-tts"
|
||||||
mirrors:
|
mirrors:
|
||||||
- localai/localai-backends:master-kitten-tts
|
- localai/localai-backends:master-kitten-tts
|
||||||
- !!merge <<: *kitten-tts
|
|
||||||
name: "metal-kitten-tts"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-kitten-tts"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-metal-darwin-arm64-kitten-tts
|
|
||||||
- !!merge <<: *kitten-tts
|
|
||||||
name: "metal-kitten-tts-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-kitten-tts"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-metal-darwin-arm64-kitten-tts
|
|
||||||
- !!merge <<: *huggingface
|
- !!merge <<: *huggingface
|
||||||
name: "huggingface-development"
|
name: "huggingface-development"
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-huggingface"
|
uri: "quay.io/go-skynet/local-ai-backends:master-huggingface"
|
||||||
mirrors:
|
mirrors:
|
||||||
- localai/localai-backends:master-huggingface
|
- localai/localai-backends:master-huggingface
|
||||||
- !!merge <<: *huggingface
|
|
||||||
name: "metal-huggingface"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-huggingface"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-metal-darwin-arm64-huggingface
|
|
||||||
- !!merge <<: *huggingface
|
|
||||||
name: "metal-huggingface-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-huggingface"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-metal-darwin-arm64-huggingface
|
|
||||||
- !!merge <<: *local-store
|
- !!merge <<: *local-store
|
||||||
name: "local-store-development"
|
name: "local-store-development"
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-local-store"
|
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-local-store"
|
||||||
mirrors:
|
mirrors:
|
||||||
- localai/localai-backends:master-cpu-local-store
|
- localai/localai-backends:master-cpu-local-store
|
||||||
- !!merge <<: *local-store
|
|
||||||
name: "metal-local-store"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-local-store"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-metal-darwin-arm64-local-store
|
|
||||||
- !!merge <<: *local-store
|
|
||||||
name: "metal-local-store-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-local-store"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-metal-darwin-arm64-local-store
|
|
||||||
- !!merge <<: *silero-vad
|
- !!merge <<: *silero-vad
|
||||||
name: "silero-vad-development"
|
name: "silero-vad-development"
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-silero-vad"
|
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-silero-vad"
|
||||||
mirrors:
|
mirrors:
|
||||||
- localai/localai-backends:master-cpu-silero-vad
|
- localai/localai-backends:master-cpu-silero-vad
|
||||||
- !!merge <<: *silero-vad
|
|
||||||
name: "metal-silero-vad"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-silero-vad"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-metal-darwin-arm64-silero-vad
|
|
||||||
- !!merge <<: *silero-vad
|
|
||||||
name: "metal-silero-vad-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-silero-vad"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-metal-darwin-arm64-silero-vad
|
|
||||||
- !!merge <<: *piper
|
- !!merge <<: *piper
|
||||||
name: "piper-development"
|
name: "piper-development"
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-piper"
|
uri: "quay.io/go-skynet/local-ai-backends:master-piper"
|
||||||
mirrors:
|
mirrors:
|
||||||
- localai/localai-backends:master-piper
|
- localai/localai-backends:master-piper
|
||||||
- !!merge <<: *piper
|
|
||||||
name: "metal-piper"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-piper"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-metal-darwin-arm64-piper
|
|
||||||
- !!merge <<: *piper
|
|
||||||
name: "metal-piper-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-piper"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-metal-darwin-arm64-piper
|
|
||||||
## llama-cpp
|
## llama-cpp
|
||||||
- !!merge <<: *llamacpp
|
- !!merge <<: *llamacpp
|
||||||
name: "nvidia-l4t-arm64-llama-cpp"
|
name: "nvidia-l4t-arm64-llama-cpp"
|
||||||
@@ -1370,7 +1048,6 @@
|
|||||||
intel: "intel-rfdetr-development"
|
intel: "intel-rfdetr-development"
|
||||||
#amd: "rocm-rfdetr-development"
|
#amd: "rocm-rfdetr-development"
|
||||||
nvidia-l4t: "nvidia-l4t-arm64-rfdetr-development"
|
nvidia-l4t: "nvidia-l4t-arm64-rfdetr-development"
|
||||||
metal: "metal-rfdetr-development"
|
|
||||||
default: "cpu-rfdetr-development"
|
default: "cpu-rfdetr-development"
|
||||||
nvidia-cuda-13: "cuda13-rfdetr-development"
|
nvidia-cuda-13: "cuda13-rfdetr-development"
|
||||||
- !!merge <<: *rfdetr
|
- !!merge <<: *rfdetr
|
||||||
@@ -1438,16 +1115,6 @@
|
|||||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-rfdetr"
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-rfdetr"
|
||||||
mirrors:
|
mirrors:
|
||||||
- localai/localai-backends:master-gpu-nvidia-cuda-13-rfdetr
|
- localai/localai-backends:master-gpu-nvidia-cuda-13-rfdetr
|
||||||
- !!merge <<: *rfdetr
|
|
||||||
name: "metal-rfdetr"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-rfdetr"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-metal-darwin-arm64-rfdetr
|
|
||||||
- !!merge <<: *rfdetr
|
|
||||||
name: "metal-rfdetr-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-rfdetr"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-metal-darwin-arm64-rfdetr
|
|
||||||
## Rerankers
|
## Rerankers
|
||||||
- !!merge <<: *rerankers
|
- !!merge <<: *rerankers
|
||||||
name: "rerankers-development"
|
name: "rerankers-development"
|
||||||
@@ -1455,7 +1122,6 @@
|
|||||||
nvidia: "cuda12-rerankers-development"
|
nvidia: "cuda12-rerankers-development"
|
||||||
intel: "intel-rerankers-development"
|
intel: "intel-rerankers-development"
|
||||||
amd: "rocm-rerankers-development"
|
amd: "rocm-rerankers-development"
|
||||||
metal: "metal-rerankers-development"
|
|
||||||
nvidia-cuda-13: "cuda13-rerankers-development"
|
nvidia-cuda-13: "cuda13-rerankers-development"
|
||||||
- !!merge <<: *rerankers
|
- !!merge <<: *rerankers
|
||||||
name: "cuda12-rerankers"
|
name: "cuda12-rerankers"
|
||||||
@@ -1497,16 +1163,6 @@
|
|||||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-rerankers"
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-rerankers"
|
||||||
mirrors:
|
mirrors:
|
||||||
- localai/localai-backends:master-gpu-nvidia-cuda-13-rerankers
|
- localai/localai-backends:master-gpu-nvidia-cuda-13-rerankers
|
||||||
- !!merge <<: *rerankers
|
|
||||||
name: "metal-rerankers"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-rerankers"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-metal-darwin-arm64-rerankers
|
|
||||||
- !!merge <<: *rerankers
|
|
||||||
name: "metal-rerankers-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-rerankers"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-metal-darwin-arm64-rerankers
|
|
||||||
## Transformers
|
## Transformers
|
||||||
- !!merge <<: *transformers
|
- !!merge <<: *transformers
|
||||||
name: "transformers-development"
|
name: "transformers-development"
|
||||||
@@ -1514,7 +1170,6 @@
|
|||||||
nvidia: "cuda12-transformers-development"
|
nvidia: "cuda12-transformers-development"
|
||||||
intel: "intel-transformers-development"
|
intel: "intel-transformers-development"
|
||||||
amd: "rocm-transformers-development"
|
amd: "rocm-transformers-development"
|
||||||
metal: "metal-transformers-development"
|
|
||||||
nvidia-cuda-13: "cuda13-transformers-development"
|
nvidia-cuda-13: "cuda13-transformers-development"
|
||||||
- !!merge <<: *transformers
|
- !!merge <<: *transformers
|
||||||
name: "cuda12-transformers"
|
name: "cuda12-transformers"
|
||||||
@@ -1556,16 +1211,6 @@
|
|||||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-transformers"
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-transformers"
|
||||||
mirrors:
|
mirrors:
|
||||||
- localai/localai-backends:master-gpu-nvidia-cuda-13-transformers
|
- localai/localai-backends:master-gpu-nvidia-cuda-13-transformers
|
||||||
- !!merge <<: *transformers
|
|
||||||
name: "metal-transformers"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-transformers"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-metal-darwin-arm64-transformers
|
|
||||||
- !!merge <<: *transformers
|
|
||||||
name: "metal-transformers-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-transformers"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-metal-darwin-arm64-transformers
|
|
||||||
## Diffusers
|
## Diffusers
|
||||||
- !!merge <<: *diffusers
|
- !!merge <<: *diffusers
|
||||||
name: "diffusers-development"
|
name: "diffusers-development"
|
||||||
@@ -1657,67 +1302,6 @@
|
|||||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-diffusers"
|
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-diffusers"
|
||||||
mirrors:
|
mirrors:
|
||||||
- localai/localai-backends:master-metal-darwin-arm64-diffusers
|
- localai/localai-backends:master-metal-darwin-arm64-diffusers
|
||||||
## ace-step
|
|
||||||
- !!merge <<: *ace-step
|
|
||||||
name: "cpu-ace-step"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-ace-step"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-cpu-ace-step
|
|
||||||
- !!merge <<: *ace-step
|
|
||||||
name: "cpu-ace-step-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-ace-step"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-cpu-ace-step
|
|
||||||
- !!merge <<: *ace-step
|
|
||||||
name: "cuda12-ace-step"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-ace-step"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-ace-step
|
|
||||||
- !!merge <<: *ace-step
|
|
||||||
name: "cuda12-ace-step-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-ace-step"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-gpu-nvidia-cuda-12-ace-step
|
|
||||||
- !!merge <<: *ace-step
|
|
||||||
name: "cuda13-ace-step"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-ace-step"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-gpu-nvidia-cuda-13-ace-step
|
|
||||||
- !!merge <<: *ace-step
|
|
||||||
name: "cuda13-ace-step-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-ace-step"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-gpu-nvidia-cuda-13-ace-step
|
|
||||||
- !!merge <<: *ace-step
|
|
||||||
name: "rocm-ace-step"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-ace-step"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-gpu-rocm-hipblas-ace-step
|
|
||||||
- !!merge <<: *ace-step
|
|
||||||
name: "rocm-ace-step-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-ace-step"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-gpu-rocm-hipblas-ace-step
|
|
||||||
- !!merge <<: *ace-step
|
|
||||||
name: "intel-ace-step"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-ace-step"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-gpu-intel-ace-step
|
|
||||||
- !!merge <<: *ace-step
|
|
||||||
name: "intel-ace-step-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-ace-step"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-gpu-intel-ace-step
|
|
||||||
- !!merge <<: *ace-step
|
|
||||||
name: "metal-ace-step"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-ace-step"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-metal-darwin-arm64-ace-step
|
|
||||||
- !!merge <<: *ace-step
|
|
||||||
name: "metal-ace-step-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-ace-step"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-metal-darwin-arm64-ace-step
|
|
||||||
## kokoro
|
## kokoro
|
||||||
- !!merge <<: *kokoro
|
- !!merge <<: *kokoro
|
||||||
name: "kokoro-development"
|
name: "kokoro-development"
|
||||||
@@ -1726,7 +1310,6 @@
|
|||||||
intel: "intel-kokoro-development"
|
intel: "intel-kokoro-development"
|
||||||
amd: "rocm-kokoro-development"
|
amd: "rocm-kokoro-development"
|
||||||
nvidia-l4t: "nvidia-l4t-kokoro-development"
|
nvidia-l4t: "nvidia-l4t-kokoro-development"
|
||||||
metal: "metal-kokoro-development"
|
|
||||||
- !!merge <<: *kokoro
|
- !!merge <<: *kokoro
|
||||||
name: "cuda12-kokoro-development"
|
name: "cuda12-kokoro-development"
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-kokoro"
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-kokoro"
|
||||||
@@ -1777,16 +1360,6 @@
|
|||||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-kokoro"
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-kokoro"
|
||||||
mirrors:
|
mirrors:
|
||||||
- localai/localai-backends:master-gpu-nvidia-cuda-13-kokoro
|
- localai/localai-backends:master-gpu-nvidia-cuda-13-kokoro
|
||||||
- !!merge <<: *kokoro
|
|
||||||
name: "metal-kokoro"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-kokoro"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-metal-darwin-arm64-kokoro
|
|
||||||
- !!merge <<: *kokoro
|
|
||||||
name: "metal-kokoro-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-kokoro"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-metal-darwin-arm64-kokoro
|
|
||||||
## faster-whisper
|
## faster-whisper
|
||||||
- !!merge <<: *faster-whisper
|
- !!merge <<: *faster-whisper
|
||||||
name: "faster-whisper-development"
|
name: "faster-whisper-development"
|
||||||
@@ -1794,7 +1367,6 @@
|
|||||||
nvidia: "cuda12-faster-whisper-development"
|
nvidia: "cuda12-faster-whisper-development"
|
||||||
intel: "intel-faster-whisper-development"
|
intel: "intel-faster-whisper-development"
|
||||||
amd: "rocm-faster-whisper-development"
|
amd: "rocm-faster-whisper-development"
|
||||||
metal: "metal-faster-whisper-development"
|
|
||||||
nvidia-cuda-13: "cuda13-faster-whisper-development"
|
nvidia-cuda-13: "cuda13-faster-whisper-development"
|
||||||
- !!merge <<: *faster-whisper
|
- !!merge <<: *faster-whisper
|
||||||
name: "cuda12-faster-whisper-development"
|
name: "cuda12-faster-whisper-development"
|
||||||
@@ -1826,16 +1398,6 @@
|
|||||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-faster-whisper"
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-faster-whisper"
|
||||||
mirrors:
|
mirrors:
|
||||||
- localai/localai-backends:master-gpu-nvidia-cuda-13-faster-whisper
|
- localai/localai-backends:master-gpu-nvidia-cuda-13-faster-whisper
|
||||||
- !!merge <<: *faster-whisper
|
|
||||||
name: "metal-faster-whisper"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-faster-whisper"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-metal-darwin-arm64-faster-whisper
|
|
||||||
- !!merge <<: *faster-whisper
|
|
||||||
name: "metal-faster-whisper-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-faster-whisper"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-metal-darwin-arm64-faster-whisper
|
|
||||||
## moonshine
|
## moonshine
|
||||||
- !!merge <<: *moonshine
|
- !!merge <<: *moonshine
|
||||||
name: "moonshine-development"
|
name: "moonshine-development"
|
||||||
@@ -1874,23 +1436,12 @@
|
|||||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-moonshine"
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-moonshine"
|
||||||
mirrors:
|
mirrors:
|
||||||
- localai/localai-backends:master-gpu-nvidia-cuda-13-moonshine
|
- localai/localai-backends:master-gpu-nvidia-cuda-13-moonshine
|
||||||
- !!merge <<: *moonshine
|
|
||||||
name: "metal-moonshine"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-moonshine"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-metal-darwin-arm64-moonshine
|
|
||||||
- !!merge <<: *moonshine
|
|
||||||
name: "metal-moonshine-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-moonshine"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-metal-darwin-arm64-moonshine
|
|
||||||
## whisperx
|
## whisperx
|
||||||
- !!merge <<: *whisperx
|
- !!merge <<: *whisperx
|
||||||
name: "whisperx-development"
|
name: "whisperx-development"
|
||||||
capabilities:
|
capabilities:
|
||||||
nvidia: "cuda12-whisperx-development"
|
nvidia: "cuda12-whisperx-development"
|
||||||
amd: "rocm-whisperx-development"
|
amd: "rocm-whisperx-development"
|
||||||
metal: "metal-whisperx-development"
|
|
||||||
default: "cpu-whisperx-development"
|
default: "cpu-whisperx-development"
|
||||||
nvidia-cuda-13: "cuda13-whisperx-development"
|
nvidia-cuda-13: "cuda13-whisperx-development"
|
||||||
nvidia-cuda-12: "cuda12-whisperx-development"
|
nvidia-cuda-12: "cuda12-whisperx-development"
|
||||||
@@ -1934,16 +1485,6 @@
|
|||||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-whisperx"
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-whisperx"
|
||||||
mirrors:
|
mirrors:
|
||||||
- localai/localai-backends:master-gpu-nvidia-cuda-13-whisperx
|
- localai/localai-backends:master-gpu-nvidia-cuda-13-whisperx
|
||||||
- !!merge <<: *whisperx
|
|
||||||
name: "metal-whisperx"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-whisperx"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-metal-darwin-arm64-whisperx
|
|
||||||
- !!merge <<: *whisperx
|
|
||||||
name: "metal-whisperx-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-whisperx"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-metal-darwin-arm64-whisperx
|
|
||||||
## coqui
|
## coqui
|
||||||
|
|
||||||
- !!merge <<: *coqui
|
- !!merge <<: *coqui
|
||||||
@@ -1952,7 +1493,6 @@
|
|||||||
nvidia: "cuda12-coqui-development"
|
nvidia: "cuda12-coqui-development"
|
||||||
intel: "intel-coqui-development"
|
intel: "intel-coqui-development"
|
||||||
amd: "rocm-coqui-development"
|
amd: "rocm-coqui-development"
|
||||||
metal: "metal-coqui-development"
|
|
||||||
- !!merge <<: *coqui
|
- !!merge <<: *coqui
|
||||||
name: "cuda12-coqui"
|
name: "cuda12-coqui"
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-coqui"
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-coqui"
|
||||||
@@ -1983,42 +1523,6 @@
|
|||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-coqui"
|
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-coqui"
|
||||||
mirrors:
|
mirrors:
|
||||||
- localai/localai-backends:latest-gpu-rocm-hipblas-coqui
|
- localai/localai-backends:latest-gpu-rocm-hipblas-coqui
|
||||||
- !!merge <<: *coqui
|
|
||||||
name: "metal-coqui"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-coqui"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-metal-darwin-arm64-coqui
|
|
||||||
- !!merge <<: *coqui
|
|
||||||
name: "metal-coqui-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-coqui"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-metal-darwin-arm64-coqui
|
|
||||||
## outetts
|
|
||||||
- !!merge <<: *outetts
|
|
||||||
name: "outetts-development"
|
|
||||||
capabilities:
|
|
||||||
default: "cpu-outetts-development"
|
|
||||||
nvidia-cuda-12: "cuda12-outetts-development"
|
|
||||||
- !!merge <<: *outetts
|
|
||||||
name: "cpu-outetts"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-outetts"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-cpu-outetts
|
|
||||||
- !!merge <<: *outetts
|
|
||||||
name: "cpu-outetts-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-outetts"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-cpu-outetts
|
|
||||||
- !!merge <<: *outetts
|
|
||||||
name: "cuda12-outetts"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-outetts"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-outetts
|
|
||||||
- !!merge <<: *outetts
|
|
||||||
name: "cuda12-outetts-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-outetts"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-gpu-nvidia-cuda-12-outetts
|
|
||||||
## chatterbox
|
## chatterbox
|
||||||
- !!merge <<: *chatterbox
|
- !!merge <<: *chatterbox
|
||||||
name: "chatterbox-development"
|
name: "chatterbox-development"
|
||||||
@@ -2099,7 +1603,6 @@
|
|||||||
intel: "intel-vibevoice-development"
|
intel: "intel-vibevoice-development"
|
||||||
amd: "rocm-vibevoice-development"
|
amd: "rocm-vibevoice-development"
|
||||||
nvidia-l4t: "nvidia-l4t-vibevoice-development"
|
nvidia-l4t: "nvidia-l4t-vibevoice-development"
|
||||||
metal: "metal-vibevoice-development"
|
|
||||||
default: "cpu-vibevoice-development"
|
default: "cpu-vibevoice-development"
|
||||||
nvidia-cuda-13: "cuda13-vibevoice-development"
|
nvidia-cuda-13: "cuda13-vibevoice-development"
|
||||||
nvidia-cuda-12: "cuda12-vibevoice-development"
|
nvidia-cuda-12: "cuda12-vibevoice-development"
|
||||||
@@ -2175,16 +1678,6 @@
|
|||||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-vibevoice"
|
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-vibevoice"
|
||||||
mirrors:
|
mirrors:
|
||||||
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-vibevoice
|
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-vibevoice
|
||||||
- !!merge <<: *vibevoice
|
|
||||||
name: "metal-vibevoice"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-vibevoice"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-metal-darwin-arm64-vibevoice
|
|
||||||
- !!merge <<: *vibevoice
|
|
||||||
name: "metal-vibevoice-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-vibevoice"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-metal-darwin-arm64-vibevoice
|
|
||||||
## qwen-tts
|
## qwen-tts
|
||||||
- !!merge <<: *qwen-tts
|
- !!merge <<: *qwen-tts
|
||||||
name: "qwen-tts-development"
|
name: "qwen-tts-development"
|
||||||
@@ -2193,7 +1686,6 @@
|
|||||||
intel: "intel-qwen-tts-development"
|
intel: "intel-qwen-tts-development"
|
||||||
amd: "rocm-qwen-tts-development"
|
amd: "rocm-qwen-tts-development"
|
||||||
nvidia-l4t: "nvidia-l4t-qwen-tts-development"
|
nvidia-l4t: "nvidia-l4t-qwen-tts-development"
|
||||||
metal: "metal-qwen-tts-development"
|
|
||||||
default: "cpu-qwen-tts-development"
|
default: "cpu-qwen-tts-development"
|
||||||
nvidia-cuda-13: "cuda13-qwen-tts-development"
|
nvidia-cuda-13: "cuda13-qwen-tts-development"
|
||||||
nvidia-cuda-12: "cuda12-qwen-tts-development"
|
nvidia-cuda-12: "cuda12-qwen-tts-development"
|
||||||
@@ -2269,16 +1761,6 @@
|
|||||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-qwen-tts"
|
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-qwen-tts"
|
||||||
mirrors:
|
mirrors:
|
||||||
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-qwen-tts
|
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-qwen-tts
|
||||||
- !!merge <<: *qwen-tts
|
|
||||||
name: "metal-qwen-tts"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-qwen-tts"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-metal-darwin-arm64-qwen-tts
|
|
||||||
- !!merge <<: *qwen-tts
|
|
||||||
name: "metal-qwen-tts-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-qwen-tts"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-metal-darwin-arm64-qwen-tts
|
|
||||||
## qwen-asr
|
## qwen-asr
|
||||||
- !!merge <<: *qwen-asr
|
- !!merge <<: *qwen-asr
|
||||||
name: "qwen-asr-development"
|
name: "qwen-asr-development"
|
||||||
@@ -2287,7 +1769,6 @@
|
|||||||
intel: "intel-qwen-asr-development"
|
intel: "intel-qwen-asr-development"
|
||||||
amd: "rocm-qwen-asr-development"
|
amd: "rocm-qwen-asr-development"
|
||||||
nvidia-l4t: "nvidia-l4t-qwen-asr-development"
|
nvidia-l4t: "nvidia-l4t-qwen-asr-development"
|
||||||
metal: "metal-qwen-asr-development"
|
|
||||||
default: "cpu-qwen-asr-development"
|
default: "cpu-qwen-asr-development"
|
||||||
nvidia-cuda-13: "cuda13-qwen-asr-development"
|
nvidia-cuda-13: "cuda13-qwen-asr-development"
|
||||||
nvidia-cuda-12: "cuda12-qwen-asr-development"
|
nvidia-cuda-12: "cuda12-qwen-asr-development"
|
||||||
@@ -2363,87 +1844,6 @@
|
|||||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-qwen-asr"
|
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-qwen-asr"
|
||||||
mirrors:
|
mirrors:
|
||||||
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-qwen-asr
|
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-qwen-asr
|
||||||
- !!merge <<: *qwen-asr
|
|
||||||
name: "metal-qwen-asr"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-qwen-asr"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-metal-darwin-arm64-qwen-asr
|
|
||||||
- !!merge <<: *qwen-asr
|
|
||||||
name: "metal-qwen-asr-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-qwen-asr"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-metal-darwin-arm64-qwen-asr
|
|
||||||
## nemo
|
|
||||||
- !!merge <<: *nemo
|
|
||||||
name: "nemo-development"
|
|
||||||
capabilities:
|
|
||||||
nvidia: "cuda12-nemo-development"
|
|
||||||
intel: "intel-nemo-development"
|
|
||||||
amd: "rocm-nemo-development"
|
|
||||||
metal: "metal-nemo-development"
|
|
||||||
default: "cpu-nemo-development"
|
|
||||||
nvidia-cuda-13: "cuda13-nemo-development"
|
|
||||||
nvidia-cuda-12: "cuda12-nemo-development"
|
|
||||||
- !!merge <<: *nemo
|
|
||||||
name: "cpu-nemo"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-nemo"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-cpu-nemo
|
|
||||||
- !!merge <<: *nemo
|
|
||||||
name: "cpu-nemo-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-nemo"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-cpu-nemo
|
|
||||||
- !!merge <<: *nemo
|
|
||||||
name: "cuda12-nemo"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-nemo"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-nemo
|
|
||||||
- !!merge <<: *nemo
|
|
||||||
name: "cuda12-nemo-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-nemo"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-gpu-nvidia-cuda-12-nemo
|
|
||||||
- !!merge <<: *nemo
|
|
||||||
name: "cuda13-nemo"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-nemo"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-gpu-nvidia-cuda-13-nemo
|
|
||||||
- !!merge <<: *nemo
|
|
||||||
name: "cuda13-nemo-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-nemo"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-gpu-nvidia-cuda-13-nemo
|
|
||||||
- !!merge <<: *nemo
|
|
||||||
name: "intel-nemo"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-nemo"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-gpu-intel-nemo
|
|
||||||
- !!merge <<: *nemo
|
|
||||||
name: "intel-nemo-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-nemo"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-gpu-intel-nemo
|
|
||||||
- !!merge <<: *nemo
|
|
||||||
name: "rocm-nemo"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-nemo"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-gpu-rocm-hipblas-nemo
|
|
||||||
- !!merge <<: *nemo
|
|
||||||
name: "rocm-nemo-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-nemo"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-gpu-rocm-hipblas-nemo
|
|
||||||
- !!merge <<: *nemo
|
|
||||||
name: "metal-nemo"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-nemo"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-metal-darwin-arm64-nemo
|
|
||||||
- !!merge <<: *nemo
|
|
||||||
name: "metal-nemo-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-nemo"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-metal-darwin-arm64-nemo
|
|
||||||
## voxcpm
|
## voxcpm
|
||||||
- !!merge <<: *voxcpm
|
- !!merge <<: *voxcpm
|
||||||
name: "voxcpm-development"
|
name: "voxcpm-development"
|
||||||
@@ -2451,7 +1851,6 @@
|
|||||||
nvidia: "cuda12-voxcpm-development"
|
nvidia: "cuda12-voxcpm-development"
|
||||||
intel: "intel-voxcpm-development"
|
intel: "intel-voxcpm-development"
|
||||||
amd: "rocm-voxcpm-development"
|
amd: "rocm-voxcpm-development"
|
||||||
metal: "metal-voxcpm-development"
|
|
||||||
default: "cpu-voxcpm-development"
|
default: "cpu-voxcpm-development"
|
||||||
nvidia-cuda-13: "cuda13-voxcpm-development"
|
nvidia-cuda-13: "cuda13-voxcpm-development"
|
||||||
nvidia-cuda-12: "cuda12-voxcpm-development"
|
nvidia-cuda-12: "cuda12-voxcpm-development"
|
||||||
@@ -2505,16 +1904,6 @@
|
|||||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-voxcpm"
|
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-voxcpm"
|
||||||
mirrors:
|
mirrors:
|
||||||
- localai/localai-backends:master-gpu-rocm-hipblas-voxcpm
|
- localai/localai-backends:master-gpu-rocm-hipblas-voxcpm
|
||||||
- !!merge <<: *voxcpm
|
|
||||||
name: "metal-voxcpm"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-voxcpm"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-metal-darwin-arm64-voxcpm
|
|
||||||
- !!merge <<: *voxcpm
|
|
||||||
name: "metal-voxcpm-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-voxcpm"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-metal-darwin-arm64-voxcpm
|
|
||||||
## pocket-tts
|
## pocket-tts
|
||||||
- !!merge <<: *pocket-tts
|
- !!merge <<: *pocket-tts
|
||||||
name: "pocket-tts-development"
|
name: "pocket-tts-development"
|
||||||
@@ -2523,7 +1912,6 @@
|
|||||||
intel: "intel-pocket-tts-development"
|
intel: "intel-pocket-tts-development"
|
||||||
amd: "rocm-pocket-tts-development"
|
amd: "rocm-pocket-tts-development"
|
||||||
nvidia-l4t: "nvidia-l4t-pocket-tts-development"
|
nvidia-l4t: "nvidia-l4t-pocket-tts-development"
|
||||||
metal: "metal-pocket-tts-development"
|
|
||||||
default: "cpu-pocket-tts-development"
|
default: "cpu-pocket-tts-development"
|
||||||
nvidia-cuda-13: "cuda13-pocket-tts-development"
|
nvidia-cuda-13: "cuda13-pocket-tts-development"
|
||||||
nvidia-cuda-12: "cuda12-pocket-tts-development"
|
nvidia-cuda-12: "cuda12-pocket-tts-development"
|
||||||
@@ -2599,34 +1987,3 @@
|
|||||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-pocket-tts"
|
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-pocket-tts"
|
||||||
mirrors:
|
mirrors:
|
||||||
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-pocket-tts
|
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-pocket-tts
|
||||||
- !!merge <<: *pocket-tts
|
|
||||||
name: "metal-pocket-tts"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-pocket-tts"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-metal-darwin-arm64-pocket-tts
|
|
||||||
- !!merge <<: *pocket-tts
|
|
||||||
name: "metal-pocket-tts-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-pocket-tts"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-metal-darwin-arm64-pocket-tts
|
|
||||||
## voxtral
|
|
||||||
- !!merge <<: *voxtral
|
|
||||||
name: "cpu-voxtral"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-voxtral"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-cpu-voxtral
|
|
||||||
- !!merge <<: *voxtral
|
|
||||||
name: "cpu-voxtral-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-voxtral"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-cpu-voxtral
|
|
||||||
- !!merge <<: *voxtral
|
|
||||||
name: "metal-voxtral"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-voxtral"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:latest-metal-darwin-arm64-voxtral
|
|
||||||
- !!merge <<: *voxtral
|
|
||||||
name: "metal-voxtral-development"
|
|
||||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-voxtral"
|
|
||||||
mirrors:
|
|
||||||
- localai/localai-backends:master-metal-darwin-arm64-voxtral
|
|
||||||
|
|||||||
@@ -1,16 +0,0 @@
|
|||||||
.DEFAULT_GOAL := install
|
|
||||||
|
|
||||||
.PHONY: install
|
|
||||||
install:
|
|
||||||
bash install.sh
|
|
||||||
|
|
||||||
.PHONY: protogen-clean
|
|
||||||
protogen-clean:
|
|
||||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
|
||||||
|
|
||||||
.PHONY: clean
|
|
||||||
clean: protogen-clean
|
|
||||||
rm -rf venv __pycache__
|
|
||||||
|
|
||||||
test: install
|
|
||||||
bash test.sh
|
|
||||||
@@ -1,472 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
|
||||||
LocalAI ACE-Step Backend
|
|
||||||
|
|
||||||
gRPC backend for ACE-Step 1.5 music generation. Aligns with upstream acestep API:
|
|
||||||
- LoadModel: initializes AceStepHandler (DiT) and LLMHandler, parses Options.
|
|
||||||
- SoundGeneration: uses create_sample (simple mode), format_sample (optional), then
|
|
||||||
generate_music from acestep.inference. Writes first output to request.dst.
|
|
||||||
- Fail hard: no fallback WAV on error; exceptions propagate to gRPC.
|
|
||||||
"""
|
|
||||||
from concurrent import futures
|
|
||||||
import argparse
|
|
||||||
import shutil
|
|
||||||
import signal
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
import tempfile
|
|
||||||
|
|
||||||
import backend_pb2
|
|
||||||
import backend_pb2_grpc
|
|
||||||
import grpc
|
|
||||||
from acestep.inference import (
|
|
||||||
GenerationParams,
|
|
||||||
GenerationConfig,
|
|
||||||
generate_music,
|
|
||||||
create_sample,
|
|
||||||
format_sample,
|
|
||||||
)
|
|
||||||
from acestep.handler import AceStepHandler
|
|
||||||
from acestep.llm_inference import LLMHandler
|
|
||||||
from acestep.model_downloader import ensure_lm_model
|
|
||||||
|
|
||||||
|
|
||||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
|
||||||
MAX_WORKERS = int(os.environ.get("PYTHON_GRPC_MAX_WORKERS", "1"))
|
|
||||||
|
|
||||||
# Model name -> HuggingFace/ModelScope repo (from upstream api_server.py)
|
|
||||||
MODEL_REPO_MAPPING = {
|
|
||||||
"acestep-v15-turbo": "ACE-Step/Ace-Step1.5",
|
|
||||||
"acestep-5Hz-lm-0.6B": "ACE-Step/Ace-Step1.5",
|
|
||||||
"acestep-5Hz-lm-1.7B": "ACE-Step/Ace-Step1.5",
|
|
||||||
"vae": "ACE-Step/Ace-Step1.5",
|
|
||||||
"Qwen3-Embedding-0.6B": "ACE-Step/Ace-Step1.5",
|
|
||||||
"acestep-v15-base": "ACE-Step/acestep-v15-base",
|
|
||||||
"acestep-v15-sft": "ACE-Step/acestep-v15-sft",
|
|
||||||
"acestep-v15-turbo-shift3": "ACE-Step/acestep-v15-turbo-shift3",
|
|
||||||
"acestep-5Hz-lm-4B": "ACE-Step/acestep-5Hz-lm-4B",
|
|
||||||
}
|
|
||||||
DEFAULT_REPO_ID = "ACE-Step/Ace-Step1.5"
|
|
||||||
|
|
||||||
def _is_float(s):
|
|
||||||
try:
|
|
||||||
float(s)
|
|
||||||
return True
|
|
||||||
except (ValueError, TypeError):
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def _is_int(s):
|
|
||||||
try:
|
|
||||||
int(s)
|
|
||||||
return True
|
|
||||||
except (ValueError, TypeError):
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def _parse_timesteps(s):
|
|
||||||
if s is None or (isinstance(s, str) and not s.strip()):
|
|
||||||
return None
|
|
||||||
if isinstance(s, (list, tuple)):
|
|
||||||
return [float(x) for x in s]
|
|
||||||
try:
|
|
||||||
return [float(x.strip()) for x in str(s).split(",") if x.strip()]
|
|
||||||
except (ValueError, TypeError):
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def _parse_options(opts_list):
|
|
||||||
"""Parse repeated 'key:value' options into a dict. Coerce numeric and bool."""
|
|
||||||
out = {}
|
|
||||||
for opt in opts_list or []:
|
|
||||||
if ":" not in opt:
|
|
||||||
continue
|
|
||||||
key, value = opt.split(":", 1)
|
|
||||||
key = key.strip()
|
|
||||||
value = value.strip()
|
|
||||||
if _is_int(value):
|
|
||||||
out[key] = int(value)
|
|
||||||
elif _is_float(value):
|
|
||||||
out[key] = float(value)
|
|
||||||
elif value.lower() in ("true", "false"):
|
|
||||||
out[key] = value.lower() == "true"
|
|
||||||
else:
|
|
||||||
out[key] = value
|
|
||||||
return out
|
|
||||||
|
|
||||||
|
|
||||||
def _generate_audio_sync(servicer, payload, dst_path):
|
|
||||||
"""
|
|
||||||
Run full ACE-Step pipeline using acestep.inference:
|
|
||||||
- If sample_mode/sample_query: create_sample() for caption/lyrics/metadata.
|
|
||||||
- If use_format and caption/lyrics: format_sample().
|
|
||||||
- Build GenerationParams and GenerationConfig, then generate_music().
|
|
||||||
Writes the first generated audio to dst_path. Raises on failure.
|
|
||||||
"""
|
|
||||||
|
|
||||||
opts = servicer.options
|
|
||||||
dit_handler = servicer.dit_handler
|
|
||||||
llm_handler = servicer.llm_handler
|
|
||||||
|
|
||||||
for key, value in opts.items():
|
|
||||||
if key not in payload:
|
|
||||||
payload[key] = value
|
|
||||||
|
|
||||||
def _opt(name, default):
|
|
||||||
return opts.get(name, default)
|
|
||||||
|
|
||||||
lm_temperature = _opt("temperature", 0.85)
|
|
||||||
lm_cfg_scale = _opt("lm_cfg_scale", _opt("cfg_scale", 2.0))
|
|
||||||
lm_top_k = opts.get("top_k")
|
|
||||||
lm_top_p = _opt("top_p", 0.9)
|
|
||||||
if lm_top_p is not None and lm_top_p >= 1.0:
|
|
||||||
lm_top_p = None
|
|
||||||
inference_steps = _opt("inference_steps", 8)
|
|
||||||
guidance_scale = _opt("guidance_scale", 7.0)
|
|
||||||
batch_size = max(1, int(_opt("batch_size", 1)))
|
|
||||||
|
|
||||||
use_simple = bool(payload.get("sample_query") or payload.get("text"))
|
|
||||||
sample_mode = use_simple and (payload.get("thinking") or payload.get("sample_mode"))
|
|
||||||
sample_query = (payload.get("sample_query") or payload.get("text") or "").strip()
|
|
||||||
use_format = bool(payload.get("use_format"))
|
|
||||||
caption = (payload.get("prompt") or payload.get("caption") or "").strip()
|
|
||||||
lyrics = (payload.get("lyrics") or "").strip()
|
|
||||||
vocal_language = (payload.get("vocal_language") or "en").strip()
|
|
||||||
instrumental = bool(payload.get("instrumental"))
|
|
||||||
bpm = payload.get("bpm")
|
|
||||||
key_scale = (payload.get("key_scale") or "").strip()
|
|
||||||
time_signature = (payload.get("time_signature") or "").strip()
|
|
||||||
audio_duration = payload.get("audio_duration")
|
|
||||||
if audio_duration is not None:
|
|
||||||
try:
|
|
||||||
audio_duration = float(audio_duration)
|
|
||||||
except (TypeError, ValueError):
|
|
||||||
audio_duration = None
|
|
||||||
|
|
||||||
if sample_mode and llm_handler and getattr(llm_handler, "llm_initialized", False):
|
|
||||||
parsed_language = None
|
|
||||||
if sample_query:
|
|
||||||
for hint in ("english", "en", "chinese", "zh", "japanese", "ja"):
|
|
||||||
if hint in sample_query.lower():
|
|
||||||
parsed_language = "en" if hint == "english" or hint == "en" else hint
|
|
||||||
break
|
|
||||||
vocal_lang = vocal_language if vocal_language and vocal_language != "unknown" else parsed_language
|
|
||||||
sample_result = create_sample(
|
|
||||||
llm_handler=llm_handler,
|
|
||||||
query=sample_query or "NO USER INPUT",
|
|
||||||
instrumental=instrumental,
|
|
||||||
vocal_language=vocal_lang,
|
|
||||||
temperature=lm_temperature,
|
|
||||||
top_k=lm_top_k,
|
|
||||||
top_p=lm_top_p,
|
|
||||||
use_constrained_decoding=True,
|
|
||||||
)
|
|
||||||
if not sample_result.success:
|
|
||||||
raise RuntimeError(f"create_sample failed: {sample_result.error or sample_result.status_message}")
|
|
||||||
caption = sample_result.caption or caption
|
|
||||||
lyrics = sample_result.lyrics or lyrics
|
|
||||||
bpm = sample_result.bpm
|
|
||||||
key_scale = sample_result.keyscale or key_scale
|
|
||||||
time_signature = sample_result.timesignature or time_signature
|
|
||||||
if sample_result.duration is not None:
|
|
||||||
audio_duration = sample_result.duration
|
|
||||||
if getattr(sample_result, "language", None):
|
|
||||||
vocal_language = sample_result.language
|
|
||||||
|
|
||||||
if use_format and (caption or lyrics) and llm_handler and getattr(llm_handler, "llm_initialized", False):
|
|
||||||
user_metadata = {}
|
|
||||||
if bpm is not None:
|
|
||||||
user_metadata["bpm"] = bpm
|
|
||||||
if audio_duration is not None and float(audio_duration) > 0:
|
|
||||||
user_metadata["duration"] = int(audio_duration)
|
|
||||||
if key_scale:
|
|
||||||
user_metadata["keyscale"] = key_scale
|
|
||||||
if time_signature:
|
|
||||||
user_metadata["timesignature"] = time_signature
|
|
||||||
if vocal_language and vocal_language != "unknown":
|
|
||||||
user_metadata["language"] = vocal_language
|
|
||||||
format_result = format_sample(
|
|
||||||
llm_handler=llm_handler,
|
|
||||||
caption=caption,
|
|
||||||
lyrics=lyrics,
|
|
||||||
user_metadata=user_metadata if user_metadata else None,
|
|
||||||
temperature=lm_temperature,
|
|
||||||
top_k=lm_top_k,
|
|
||||||
top_p=lm_top_p,
|
|
||||||
use_constrained_decoding=True,
|
|
||||||
)
|
|
||||||
if format_result.success:
|
|
||||||
caption = format_result.caption or caption
|
|
||||||
lyrics = format_result.lyrics or lyrics
|
|
||||||
if format_result.duration is not None:
|
|
||||||
audio_duration = format_result.duration
|
|
||||||
if format_result.bpm is not None:
|
|
||||||
bpm = format_result.bpm
|
|
||||||
if format_result.keyscale:
|
|
||||||
key_scale = format_result.keyscale
|
|
||||||
if format_result.timesignature:
|
|
||||||
time_signature = format_result.timesignature
|
|
||||||
if getattr(format_result, "language", None):
|
|
||||||
vocal_language = format_result.language
|
|
||||||
|
|
||||||
thinking = bool(payload.get("thinking"))
|
|
||||||
use_cot_metas = not sample_mode
|
|
||||||
params = GenerationParams(
|
|
||||||
task_type=payload.get("task_type", "text2music"),
|
|
||||||
instruction=payload.get("instruction", "Fill the audio semantic mask based on the given conditions:"),
|
|
||||||
reference_audio=payload.get("reference_audio_path"),
|
|
||||||
src_audio=payload.get("src_audio_path"),
|
|
||||||
audio_codes=payload.get("audio_code_string", ""),
|
|
||||||
caption=caption,
|
|
||||||
lyrics=lyrics,
|
|
||||||
instrumental=instrumental or (not lyrics or str(lyrics).strip().lower() in ("[inst]", "[instrumental]")),
|
|
||||||
vocal_language=vocal_language or "unknown",
|
|
||||||
bpm=bpm,
|
|
||||||
keyscale=key_scale,
|
|
||||||
timesignature=time_signature,
|
|
||||||
duration=float(audio_duration) if audio_duration and float(audio_duration) > 0 else -1.0,
|
|
||||||
inference_steps=inference_steps,
|
|
||||||
seed=int(payload.get("seed", -1)),
|
|
||||||
guidance_scale=guidance_scale,
|
|
||||||
use_adg=bool(payload.get("use_adg")),
|
|
||||||
cfg_interval_start=float(payload.get("cfg_interval_start", 0.0)),
|
|
||||||
cfg_interval_end=float(payload.get("cfg_interval_end", 1.0)),
|
|
||||||
shift=float(payload.get("shift", 1.0)),
|
|
||||||
infer_method=(payload.get("infer_method") or "ode").strip(),
|
|
||||||
timesteps=_parse_timesteps(payload.get("timesteps")),
|
|
||||||
repainting_start=float(payload.get("repainting_start", 0.0)),
|
|
||||||
repainting_end=float(payload.get("repainting_end", -1)) if payload.get("repainting_end") is not None else -1,
|
|
||||||
audio_cover_strength=float(payload.get("audio_cover_strength", 1.0)),
|
|
||||||
thinking=thinking,
|
|
||||||
lm_temperature=lm_temperature,
|
|
||||||
lm_cfg_scale=lm_cfg_scale,
|
|
||||||
lm_top_k=lm_top_k or 0,
|
|
||||||
lm_top_p=lm_top_p if lm_top_p is not None and lm_top_p < 1.0 else 0.9,
|
|
||||||
lm_negative_prompt=payload.get("lm_negative_prompt", "NO USER INPUT"),
|
|
||||||
use_cot_metas=use_cot_metas,
|
|
||||||
use_cot_caption=bool(payload.get("use_cot_caption", True)),
|
|
||||||
use_cot_language=bool(payload.get("use_cot_language", True)),
|
|
||||||
use_constrained_decoding=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
config = GenerationConfig(
|
|
||||||
batch_size=batch_size,
|
|
||||||
allow_lm_batch=bool(payload.get("allow_lm_batch", False)),
|
|
||||||
use_random_seed=bool(payload.get("use_random_seed", True)),
|
|
||||||
seeds=payload.get("seeds"),
|
|
||||||
lm_batch_chunk_size=max(1, int(payload.get("lm_batch_chunk_size", 8))),
|
|
||||||
constrained_decoding_debug=bool(payload.get("constrained_decoding_debug")),
|
|
||||||
audio_format=(payload.get("audio_format") or "flac").strip() or "flac",
|
|
||||||
)
|
|
||||||
|
|
||||||
save_dir = tempfile.mkdtemp(prefix="ace_step_")
|
|
||||||
try:
|
|
||||||
result = generate_music(
|
|
||||||
dit_handler=dit_handler,
|
|
||||||
llm_handler=llm_handler if (llm_handler and getattr(llm_handler, "llm_initialized", False)) else None,
|
|
||||||
params=params,
|
|
||||||
config=config,
|
|
||||||
save_dir=save_dir,
|
|
||||||
progress=None,
|
|
||||||
)
|
|
||||||
if not result.success:
|
|
||||||
raise RuntimeError(result.error or result.status_message or "generate_music failed")
|
|
||||||
|
|
||||||
audios = result.audios or []
|
|
||||||
if not audios:
|
|
||||||
raise RuntimeError("generate_music returned no audio")
|
|
||||||
|
|
||||||
first_path = audios[0].get("path") or ""
|
|
||||||
if not first_path or not os.path.isfile(first_path):
|
|
||||||
raise RuntimeError("first generated audio path missing or not a file")
|
|
||||||
|
|
||||||
shutil.copy2(first_path, dst_path)
|
|
||||||
finally:
|
|
||||||
try:
|
|
||||||
shutil.rmtree(save_dir, ignore_errors=True)
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
|
||||||
def __init__(self):
|
|
||||||
self.model_path = None
|
|
||||||
self.model_dir = None
|
|
||||||
self.checkpoint_dir = None
|
|
||||||
self.project_root = None
|
|
||||||
self.options = {}
|
|
||||||
self.dit_handler = None
|
|
||||||
self.llm_handler = None
|
|
||||||
|
|
||||||
def Health(self, request, context):
|
|
||||||
return backend_pb2.Reply(message=b"OK")
|
|
||||||
|
|
||||||
def LoadModel(self, request, context):
|
|
||||||
try:
|
|
||||||
self.options = _parse_options(list(getattr(request, "Options", []) or []))
|
|
||||||
model_path = getattr(request, "ModelPath", None) or ""
|
|
||||||
model_name = (request.Model or "").strip()
|
|
||||||
model_file = (getattr(request, "ModelFile", None) or "").strip()
|
|
||||||
|
|
||||||
# Model dir: where we store checkpoints (always under LocalAI models path, never backend dir)
|
|
||||||
if model_path and model_name:
|
|
||||||
model_dir = os.path.join(model_path, model_name)
|
|
||||||
elif model_file:
|
|
||||||
model_dir = model_file
|
|
||||||
else:
|
|
||||||
model_dir = os.path.abspath(model_name or ".")
|
|
||||||
self.model_dir = model_dir
|
|
||||||
self.checkpoint_dir = os.path.join(model_dir, "checkpoints")
|
|
||||||
self.project_root = model_dir
|
|
||||||
self.model_path = os.path.join(self.checkpoint_dir, model_name or os.path.basename(model_dir.rstrip("/\\")))
|
|
||||||
|
|
||||||
config_path = model_name or os.path.basename(model_dir.rstrip("/\\"))
|
|
||||||
os.makedirs(self.checkpoint_dir, exist_ok=True)
|
|
||||||
|
|
||||||
self.dit_handler = AceStepHandler()
|
|
||||||
# Patch handler so it uses our model dir instead of site-packages/checkpoints
|
|
||||||
self.dit_handler._get_project_root = lambda: self.project_root
|
|
||||||
device = self.options.get("device", "auto")
|
|
||||||
use_flash = self.options.get("use_flash_attention", True)
|
|
||||||
if isinstance(use_flash, str):
|
|
||||||
use_flash = str(use_flash).lower() in ("1", "true", "yes")
|
|
||||||
offload = self.options.get("offload_to_cpu", False)
|
|
||||||
if isinstance(offload, str):
|
|
||||||
offload = str(offload).lower() in ("1", "true", "yes")
|
|
||||||
status_msg, ok = self.dit_handler.initialize_service(
|
|
||||||
project_root=self.project_root,
|
|
||||||
config_path=config_path,
|
|
||||||
device=device,
|
|
||||||
use_flash_attention=use_flash,
|
|
||||||
compile_model=False,
|
|
||||||
offload_to_cpu=offload,
|
|
||||||
offload_dit_to_cpu=bool(self.options.get("offload_dit_to_cpu", False)),
|
|
||||||
)
|
|
||||||
if not ok:
|
|
||||||
return backend_pb2.Result(success=False, message=f"DiT init failed: {status_msg}")
|
|
||||||
|
|
||||||
self.llm_handler = None
|
|
||||||
if self.options.get("init_lm", True):
|
|
||||||
lm_model = self.options.get("lm_model_path", "acestep-5Hz-lm-0.6B")
|
|
||||||
|
|
||||||
# Ensure LM model is downloaded before initializing
|
|
||||||
try:
|
|
||||||
from pathlib import Path
|
|
||||||
lm_success, lm_msg = ensure_lm_model(
|
|
||||||
model_name=lm_model,
|
|
||||||
checkpoints_dir=Path(self.checkpoint_dir),
|
|
||||||
prefer_source=None, # Auto-detect HuggingFace vs ModelScope
|
|
||||||
)
|
|
||||||
if not lm_success:
|
|
||||||
print(f"[ace-step] Warning: LM model download failed: {lm_msg}", file=sys.stderr)
|
|
||||||
# Continue anyway - LLM initialization will fail gracefully
|
|
||||||
else:
|
|
||||||
print(f"[ace-step] LM model ready: {lm_msg}", file=sys.stderr)
|
|
||||||
except Exception as e:
|
|
||||||
print(f"[ace-step] Warning: LM model download check failed: {e}", file=sys.stderr)
|
|
||||||
# Continue anyway - LLM initialization will fail gracefully
|
|
||||||
|
|
||||||
self.llm_handler = LLMHandler()
|
|
||||||
lm_backend = (self.options.get("lm_backend") or "vllm").strip().lower()
|
|
||||||
if lm_backend not in ("vllm", "pt"):
|
|
||||||
lm_backend = "vllm"
|
|
||||||
lm_status, lm_ok = self.llm_handler.initialize(
|
|
||||||
checkpoint_dir=self.checkpoint_dir,
|
|
||||||
lm_model_path=lm_model,
|
|
||||||
backend=lm_backend,
|
|
||||||
device=device,
|
|
||||||
offload_to_cpu=offload,
|
|
||||||
dtype=getattr(self.dit_handler, "dtype", None),
|
|
||||||
)
|
|
||||||
if not lm_ok:
|
|
||||||
self.llm_handler = None
|
|
||||||
print(f"[ace-step] LM init failed (optional): {lm_status}", file=sys.stderr)
|
|
||||||
|
|
||||||
print(f"[ace-step] LoadModel: model={self.model_path}, options={list(self.options.keys())}", file=sys.stderr)
|
|
||||||
return backend_pb2.Result(success=True, message="Model loaded successfully")
|
|
||||||
except Exception as err:
|
|
||||||
return backend_pb2.Result(success=False, message=f"LoadModel error: {err}")
|
|
||||||
|
|
||||||
def SoundGeneration(self, request, context):
|
|
||||||
if not request.dst:
|
|
||||||
return backend_pb2.Result(success=False, message="request.dst is required")
|
|
||||||
|
|
||||||
use_simple = bool(request.text)
|
|
||||||
if use_simple:
|
|
||||||
payload = {
|
|
||||||
"sample_query": request.text or "",
|
|
||||||
"sample_mode": True,
|
|
||||||
"thinking": True,
|
|
||||||
"vocal_language": request.language or request.GetLanguage() or "en",
|
|
||||||
"instrumental": request.instrumental if request.HasField("instrumental") else False,
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
caption = request.caption or request.GetCaption() or request.text
|
|
||||||
payload = {
|
|
||||||
"prompt": caption,
|
|
||||||
"lyrics": request.lyrics or request.lyrics or "",
|
|
||||||
"thinking": request.think if request.HasField("think") else False,
|
|
||||||
"vocal_language": request.language or request.GetLanguage() or "en",
|
|
||||||
}
|
|
||||||
if request.HasField("bpm"):
|
|
||||||
payload["bpm"] = request.bpm
|
|
||||||
if request.HasField("keyscale") and request.keyscale:
|
|
||||||
payload["key_scale"] = request.keyscale
|
|
||||||
if request.HasField("timesignature") and request.timesignature:
|
|
||||||
payload["time_signature"] = request.timesignature
|
|
||||||
if request.HasField("duration") and request.duration:
|
|
||||||
payload["audio_duration"] = int(request.duration) if request.duration else None
|
|
||||||
if request.src:
|
|
||||||
payload["src_audio_path"] = request.src
|
|
||||||
|
|
||||||
_generate_audio_sync(self, payload, request.dst)
|
|
||||||
return backend_pb2.Result(success=True, message="Sound generated successfully")
|
|
||||||
|
|
||||||
def TTS(self, request, context):
|
|
||||||
if not request.dst:
|
|
||||||
return backend_pb2.Result(success=False, message="request.dst is required")
|
|
||||||
payload = {
|
|
||||||
"sample_query": request.text,
|
|
||||||
"sample_mode": True,
|
|
||||||
"thinking": False,
|
|
||||||
"vocal_language": (request.language if request.language else "") or "en",
|
|
||||||
"instrumental": False,
|
|
||||||
}
|
|
||||||
_generate_audio_sync(self, payload, request.dst)
|
|
||||||
return backend_pb2.Result(success=True, message="TTS (music fallback) generated successfully")
|
|
||||||
|
|
||||||
|
|
||||||
def serve(address):
|
|
||||||
server = grpc.server(
|
|
||||||
futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
|
||||||
options=[
|
|
||||||
("grpc.max_message_length", 50 * 1024 * 1024),
|
|
||||||
("grpc.max_send_message_length", 50 * 1024 * 1024),
|
|
||||||
("grpc.max_receive_message_length", 50 * 1024 * 1024),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
|
||||||
server.add_insecure_port(address)
|
|
||||||
server.start()
|
|
||||||
print(f"[ace-step] Server listening on {address}", file=sys.stderr)
|
|
||||||
|
|
||||||
def shutdown(sig, frame):
|
|
||||||
server.stop(0)
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
signal.signal(signal.SIGINT, shutdown)
|
|
||||||
signal.signal(signal.SIGTERM, shutdown)
|
|
||||||
|
|
||||||
try:
|
|
||||||
while True:
|
|
||||||
import time
|
|
||||||
time.sleep(_ONE_DAY_IN_SECONDS)
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
server.stop(0)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument("--addr", default="localhost:50051", help="Listen address")
|
|
||||||
args = parser.parse_args()
|
|
||||||
serve(args.addr)
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
backend_dir=$(dirname $0)
|
|
||||||
if [ -d $backend_dir/common ]; then
|
|
||||||
source $backend_dir/common/libbackend.sh
|
|
||||||
else
|
|
||||||
source $backend_dir/../common/libbackend.sh
|
|
||||||
fi
|
|
||||||
|
|
||||||
PYTHON_VERSION="3.11"
|
|
||||||
PYTHON_PATCH="14"
|
|
||||||
PY_STANDALONE_TAG="20260203"
|
|
||||||
|
|
||||||
installRequirements
|
|
||||||
|
|
||||||
if [ ! -d ACE-Step-1.5 ]; then
|
|
||||||
git clone https://github.com/ace-step/ACE-Step-1.5
|
|
||||||
cd ACE-Step-1.5/
|
|
||||||
if [ "x${USE_PIP}" == "xtrue" ]; then
|
|
||||||
pip install ${EXTRA_PIP_INSTALL_FLAGS:-} --no-deps .
|
|
||||||
else
|
|
||||||
uv pip install ${EXTRA_PIP_INSTALL_FLAGS:-} --no-deps .
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
|
||||||
torch
|
|
||||||
torchaudio
|
|
||||||
torchvision
|
|
||||||
|
|
||||||
# Core dependencies
|
|
||||||
transformers>=4.51.0,<4.58.0
|
|
||||||
diffusers
|
|
||||||
gradio
|
|
||||||
matplotlib>=3.7.5
|
|
||||||
scipy>=1.10.1
|
|
||||||
soundfile>=0.13.1
|
|
||||||
loguru>=0.7.3
|
|
||||||
einops>=0.8.1
|
|
||||||
accelerate>=1.12.0
|
|
||||||
fastapi>=0.110.0
|
|
||||||
uvicorn[standard]>=0.27.0
|
|
||||||
numba>=0.63.1
|
|
||||||
vector-quantize-pytorch>=1.27.15
|
|
||||||
torchcodec>=0.9.1
|
|
||||||
torchao
|
|
||||||
modelscope
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/cu128
|
|
||||||
torch
|
|
||||||
torchaudio
|
|
||||||
torchvision
|
|
||||||
|
|
||||||
# Core dependencies
|
|
||||||
transformers>=4.51.0,<4.58.0
|
|
||||||
diffusers
|
|
||||||
gradio>=6.5.1
|
|
||||||
matplotlib>=3.7.5
|
|
||||||
scipy>=1.10.1
|
|
||||||
soundfile>=0.13.1
|
|
||||||
loguru>=0.7.3
|
|
||||||
einops>=0.8.1
|
|
||||||
accelerate>=1.12.0
|
|
||||||
fastapi>=0.110.0
|
|
||||||
uvicorn[standard]>=0.27.0
|
|
||||||
numba>=0.63.1
|
|
||||||
vector-quantize-pytorch>=1.27.15
|
|
||||||
torchcodec>=0.9.1
|
|
||||||
torchao
|
|
||||||
modelscope
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/cu130
|
|
||||||
torch
|
|
||||||
torchaudio
|
|
||||||
torchvision
|
|
||||||
|
|
||||||
# Core dependencies
|
|
||||||
transformers>=4.51.0,<4.58.0
|
|
||||||
diffusers
|
|
||||||
gradio>=6.5.1
|
|
||||||
matplotlib>=3.7.5
|
|
||||||
scipy>=1.10.1
|
|
||||||
soundfile>=0.13.1
|
|
||||||
loguru>=0.7.3
|
|
||||||
einops>=0.8.1
|
|
||||||
accelerate>=1.12.0
|
|
||||||
fastapi>=0.110.0
|
|
||||||
uvicorn[standard]>=0.27.0
|
|
||||||
numba>=0.63.1
|
|
||||||
vector-quantize-pytorch>=1.27.15
|
|
||||||
torchcodec>=0.9.1
|
|
||||||
torchao
|
|
||||||
modelscope
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
|
||||||
torch==2.8.0+rocm6.4
|
|
||||||
torchaudio
|
|
||||||
torchvision
|
|
||||||
|
|
||||||
# Core dependencies
|
|
||||||
transformers>=4.51.0,<4.58.0
|
|
||||||
diffusers
|
|
||||||
gradio>=6.5.1
|
|
||||||
matplotlib>=3.7.5
|
|
||||||
scipy>=1.10.1
|
|
||||||
soundfile>=0.13.1
|
|
||||||
loguru>=0.7.3
|
|
||||||
einops>=0.8.1
|
|
||||||
accelerate>=1.12.0
|
|
||||||
fastapi>=0.110.0
|
|
||||||
uvicorn[standard]>=0.27.0
|
|
||||||
numba>=0.63.1
|
|
||||||
vector-quantize-pytorch>=1.27.15
|
|
||||||
torchcodec>=0.9.1
|
|
||||||
torchao
|
|
||||||
modelscope
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
|
||||||
torch
|
|
||||||
torchaudio
|
|
||||||
torchvision
|
|
||||||
|
|
||||||
# Core dependencies
|
|
||||||
transformers>=4.51.0,<4.58.0
|
|
||||||
diffusers
|
|
||||||
gradio
|
|
||||||
matplotlib>=3.7.5
|
|
||||||
scipy>=1.10.1
|
|
||||||
soundfile>=0.13.1
|
|
||||||
loguru>=0.7.3
|
|
||||||
einops>=0.8.1
|
|
||||||
accelerate>=1.12.0
|
|
||||||
fastapi>=0.110.0
|
|
||||||
uvicorn[standard]>=0.27.0
|
|
||||||
numba>=0.63.1
|
|
||||||
vector-quantize-pytorch>=1.27.15
|
|
||||||
torchcodec>=0.9.1
|
|
||||||
torchao
|
|
||||||
modelscope
|
|
||||||
|
|
||||||
# LoRA Training dependencies (optional)
|
|
||||||
peft>=0.7.0
|
|
||||||
lightning>=2.0.0
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/cu130
|
|
||||||
torch
|
|
||||||
torchaudio
|
|
||||||
torchvision
|
|
||||||
# Core dependencies
|
|
||||||
transformers>=4.51.0,<4.58.0
|
|
||||||
diffusers
|
|
||||||
gradio>=6.5.1
|
|
||||||
matplotlib>=3.7.5
|
|
||||||
scipy>=1.10.1
|
|
||||||
soundfile>=0.13.1
|
|
||||||
loguru>=0.7.3
|
|
||||||
einops>=0.8.1
|
|
||||||
accelerate>=1.12.0
|
|
||||||
fastapi>=0.110.0
|
|
||||||
uvicorn[standard]>=0.27.0
|
|
||||||
numba>=0.63.1
|
|
||||||
vector-quantize-pytorch>=1.27.15
|
|
||||||
torchcodec>=0.9.1
|
|
||||||
torchao
|
|
||||||
modelscope
|
|
||||||
@@ -1,25 +0,0 @@
|
|||||||
torch
|
|
||||||
torchaudio
|
|
||||||
torchvision
|
|
||||||
|
|
||||||
# Core dependencies
|
|
||||||
transformers>=4.51.0,<4.58.0
|
|
||||||
diffusers
|
|
||||||
gradio
|
|
||||||
matplotlib>=3.7.5
|
|
||||||
scipy>=1.10.1
|
|
||||||
soundfile>=0.13.1
|
|
||||||
loguru>=0.7.3
|
|
||||||
einops>=0.8.1
|
|
||||||
accelerate>=1.12.0
|
|
||||||
fastapi>=0.110.0
|
|
||||||
uvicorn[standard]>=0.27.0
|
|
||||||
numba>=0.63.1
|
|
||||||
vector-quantize-pytorch>=1.27.15
|
|
||||||
torchcodec>=0.9.1
|
|
||||||
torchao
|
|
||||||
modelscope
|
|
||||||
|
|
||||||
# LoRA Training dependencies (optional)
|
|
||||||
peft>=0.7.0
|
|
||||||
lightning>=2.0.0
|
|
||||||
@@ -1,4 +0,0 @@
|
|||||||
setuptools
|
|
||||||
grpcio==1.76.0
|
|
||||||
protobuf
|
|
||||||
certifi
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
backend_dir=$(dirname $0)
|
|
||||||
if [ -d $backend_dir/common ]; then
|
|
||||||
source $backend_dir/common/libbackend.sh
|
|
||||||
else
|
|
||||||
source $backend_dir/../common/libbackend.sh
|
|
||||||
fi
|
|
||||||
|
|
||||||
startBackend $@
|
|
||||||
@@ -1,53 +0,0 @@
|
|||||||
"""
|
|
||||||
Tests for the ACE-Step gRPC backend.
|
|
||||||
"""
|
|
||||||
import os
|
|
||||||
import tempfile
|
|
||||||
import unittest
|
|
||||||
|
|
||||||
import backend_pb2
|
|
||||||
import backend_pb2_grpc
|
|
||||||
import grpc
|
|
||||||
|
|
||||||
|
|
||||||
class TestACEStepBackend(unittest.TestCase):
|
|
||||||
"""Test Health, LoadModel, and SoundGeneration (minimal; no real model required)."""
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def setUpClass(cls):
|
|
||||||
port = os.environ.get("BACKEND_PORT", "50051")
|
|
||||||
cls.channel = grpc.insecure_channel(f"localhost:{port}")
|
|
||||||
cls.stub = backend_pb2_grpc.BackendStub(cls.channel)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def tearDownClass(cls):
|
|
||||||
cls.channel.close()
|
|
||||||
|
|
||||||
def test_health(self):
|
|
||||||
response = self.stub.Health(backend_pb2.HealthMessage())
|
|
||||||
self.assertEqual(response.message, b"OK")
|
|
||||||
|
|
||||||
def test_load_model(self):
|
|
||||||
response = self.stub.LoadModel(backend_pb2.ModelOptions(Model="ace-step-test"))
|
|
||||||
self.assertTrue(response.success, response.message)
|
|
||||||
|
|
||||||
def test_sound_generation_minimal(self):
|
|
||||||
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
|
|
||||||
dst = f.name
|
|
||||||
try:
|
|
||||||
req = backend_pb2.SoundGenerationRequest(
|
|
||||||
text="upbeat pop song",
|
|
||||||
model="ace-step-test",
|
|
||||||
dst=dst,
|
|
||||||
)
|
|
||||||
response = self.stub.SoundGeneration(req)
|
|
||||||
self.assertTrue(response.success, response.message)
|
|
||||||
self.assertTrue(os.path.exists(dst), f"Output file not created: {dst}")
|
|
||||||
self.assertGreater(os.path.getsize(dst), 0)
|
|
||||||
finally:
|
|
||||||
if os.path.exists(dst):
|
|
||||||
os.unlink(dst)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
unittest.main()
|
|
||||||
@@ -1,19 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
backend_dir=$(dirname $0)
|
|
||||||
if [ -d $backend_dir/common ]; then
|
|
||||||
source $backend_dir/common/libbackend.sh
|
|
||||||
else
|
|
||||||
source $backend_dir/../common/libbackend.sh
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Start backend in background (use env to avoid port conflict in parallel tests)
|
|
||||||
export PYTHONUNBUFFERED=1
|
|
||||||
BACKEND_PORT=${BACKEND_PORT:-50051}
|
|
||||||
python backend.py --addr "localhost:${BACKEND_PORT}" &
|
|
||||||
BACKEND_PID=$!
|
|
||||||
trap "kill $BACKEND_PID 2>/dev/null || true" EXIT
|
|
||||||
sleep 3
|
|
||||||
export BACKEND_PORT
|
|
||||||
runUnittests
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
torch
|
|
||||||
torchaudio
|
|
||||||
accelerate
|
|
||||||
numpy>=1.24.0,<1.26.0
|
|
||||||
transformers
|
|
||||||
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289
|
|
||||||
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
|
|
||||||
@@ -1,4 +0,0 @@
|
|||||||
torch==2.7.1
|
|
||||||
transformers==4.48.3
|
|
||||||
accelerate
|
|
||||||
coqui-tts
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
torch==2.7.1
|
|
||||||
faster-whisper
|
|
||||||
opencv-python
|
|
||||||
accelerate
|
|
||||||
compel
|
|
||||||
peft
|
|
||||||
sentencepiece
|
|
||||||
optimum-quanto
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
grpcio==1.71.0
|
|
||||||
protobuf
|
|
||||||
certifi
|
|
||||||
packaging==24.1
|
|
||||||
https://github.com/KittenML/KittenTTS/releases/download/0.1/kittentts-0.1.0-py3-none-any.whl
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
torch==2.7.1
|
|
||||||
transformers
|
|
||||||
accelerate
|
|
||||||
kokoro
|
|
||||||
soundfile
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
git+https://github.com/Blaizzy/mlx-audio
|
|
||||||
mlx[cpu]
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
git+https://github.com/Blaizzy/mlx-audio
|
|
||||||
mlx[cuda12]
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
git+https://github.com/Blaizzy/mlx-audio
|
|
||||||
mlx[cuda13]
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
git+https://github.com/Blaizzy/mlx-audio
|
|
||||||
mlx[cuda12]
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
git+https://github.com/Blaizzy/mlx-audio
|
|
||||||
mlx[cuda13]
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
git+https://github.com/Blaizzy/mlx-vlm
|
|
||||||
mlx[cpu]
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
git+https://github.com/Blaizzy/mlx-vlm
|
|
||||||
mlx[cuda12]
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
git+https://github.com/Blaizzy/mlx-vlm
|
|
||||||
mlx[cuda13]
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
git+https://github.com/Blaizzy/mlx-vlm
|
|
||||||
mlx[cuda12]
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
git+https://github.com/Blaizzy/mlx-vlm
|
|
||||||
mlx[cuda13]
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
mlx-lm
|
|
||||||
mlx[cpu]
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
mlx-lm
|
|
||||||
mlx[cuda12]
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
mlx-lm
|
|
||||||
mlx[cuda13]
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
mlx-lm
|
|
||||||
mlx[cuda12]
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
mlx-lm
|
|
||||||
mlx[cuda13]
|
|
||||||
@@ -1,4 +0,0 @@
|
|||||||
grpcio==1.71.0
|
|
||||||
protobuf
|
|
||||||
grpcio-tools
|
|
||||||
useful-moonshine-onnx@git+https://git@github.com/moonshine-ai/moonshine.git#subdirectory=moonshine-onnx
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
.PHONY: nemo-asr
|
|
||||||
nemo-asr:
|
|
||||||
bash install.sh
|
|
||||||
|
|
||||||
.PHONY: run
|
|
||||||
run: nemo-asr
|
|
||||||
@echo "Running nemo-asr..."
|
|
||||||
bash run.sh
|
|
||||||
@echo "nemo-asr run."
|
|
||||||
|
|
||||||
.PHONY: test
|
|
||||||
test: nemo-asr
|
|
||||||
@echo "Testing nemo-asr..."
|
|
||||||
bash test.sh
|
|
||||||
@echo "nemo-asr tested."
|
|
||||||
|
|
||||||
.PHONY: protogen-clean
|
|
||||||
protogen-clean:
|
|
||||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
|
||||||
|
|
||||||
.PHONY: clean
|
|
||||||
clean: protogen-clean
|
|
||||||
rm -rf venv __pycache__
|
|
||||||
@@ -1,147 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
|
||||||
gRPC server of LocalAI for NVIDIA NEMO Toolkit ASR.
|
|
||||||
"""
|
|
||||||
from concurrent import futures
|
|
||||||
import time
|
|
||||||
import argparse
|
|
||||||
import signal
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
import backend_pb2
|
|
||||||
import backend_pb2_grpc
|
|
||||||
import torch
|
|
||||||
import nemo.collections.asr as nemo_asr
|
|
||||||
|
|
||||||
import grpc
|
|
||||||
|
|
||||||
|
|
||||||
def is_float(s):
|
|
||||||
try:
|
|
||||||
float(s)
|
|
||||||
return True
|
|
||||||
except ValueError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def is_int(s):
|
|
||||||
try:
|
|
||||||
int(s)
|
|
||||||
return True
|
|
||||||
except ValueError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
|
||||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
|
||||||
|
|
||||||
|
|
||||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
|
||||||
def Health(self, request, context):
|
|
||||||
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
|
||||||
|
|
||||||
def LoadModel(self, request, context):
|
|
||||||
if torch.cuda.is_available():
|
|
||||||
device = "cuda"
|
|
||||||
else:
|
|
||||||
device = "cpu"
|
|
||||||
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
|
||||||
if mps_available:
|
|
||||||
device = "mps"
|
|
||||||
if not torch.cuda.is_available() and request.CUDA:
|
|
||||||
return backend_pb2.Result(success=False, message="CUDA is not available")
|
|
||||||
|
|
||||||
self.device = device
|
|
||||||
self.options = {}
|
|
||||||
|
|
||||||
for opt in request.Options:
|
|
||||||
if ":" not in opt:
|
|
||||||
continue
|
|
||||||
key, value = opt.split(":", 1)
|
|
||||||
if is_float(value):
|
|
||||||
value = float(value)
|
|
||||||
elif is_int(value):
|
|
||||||
value = int(value)
|
|
||||||
elif value.lower() in ["true", "false"]:
|
|
||||||
value = value.lower() == "true"
|
|
||||||
self.options[key] = value
|
|
||||||
|
|
||||||
model_name = request.Model or "nvidia/parakeet-tdt-0.6b-v3"
|
|
||||||
|
|
||||||
try:
|
|
||||||
print(f"Loading NEMO ASR model from {model_name}", file=sys.stderr)
|
|
||||||
self.model = nemo_asr.models.ASRModel.from_pretrained(model_name=model_name)
|
|
||||||
print("NEMO ASR model loaded successfully", file=sys.stderr)
|
|
||||||
except Exception as err:
|
|
||||||
print(f"[ERROR] LoadModel failed: {err}", file=sys.stderr)
|
|
||||||
import traceback
|
|
||||||
traceback.print_exc(file=sys.stderr)
|
|
||||||
return backend_pb2.Result(success=False, message=str(err))
|
|
||||||
|
|
||||||
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
|
||||||
|
|
||||||
def AudioTranscription(self, request, context):
|
|
||||||
result_segments = []
|
|
||||||
text = ""
|
|
||||||
try:
|
|
||||||
audio_path = request.dst
|
|
||||||
if not audio_path or not os.path.exists(audio_path):
|
|
||||||
print(f"Error: Audio file not found: {audio_path}", file=sys.stderr)
|
|
||||||
return backend_pb2.TranscriptResult(segments=[], text="")
|
|
||||||
|
|
||||||
# NEMO's transcribe method accepts a list of audio paths and returns a list of transcripts
|
|
||||||
results = self.model.transcribe([audio_path])
|
|
||||||
|
|
||||||
if not results or len(results) == 0:
|
|
||||||
return backend_pb2.TranscriptResult(segments=[], text="")
|
|
||||||
|
|
||||||
# Get the transcript text from the first result
|
|
||||||
text = results[0]
|
|
||||||
if text:
|
|
||||||
# Create a single segment with the full transcription
|
|
||||||
result_segments.append(backend_pb2.TranscriptSegment(
|
|
||||||
id=0, start=0, end=0, text=text
|
|
||||||
))
|
|
||||||
|
|
||||||
except Exception as err:
|
|
||||||
print(f"Error in AudioTranscription: {err}", file=sys.stderr)
|
|
||||||
import traceback
|
|
||||||
traceback.print_exc(file=sys.stderr)
|
|
||||||
return backend_pb2.TranscriptResult(segments=[], text="")
|
|
||||||
|
|
||||||
return backend_pb2.TranscriptResult(segments=result_segments, text=text)
|
|
||||||
|
|
||||||
|
|
||||||
def serve(address):
|
|
||||||
server = grpc.server(
|
|
||||||
futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
|
||||||
options=[
|
|
||||||
('grpc.max_message_length', 50 * 1024 * 1024),
|
|
||||||
('grpc.max_send_message_length', 50 * 1024 * 1024),
|
|
||||||
('grpc.max_receive_message_length', 50 * 1024 * 1024),
|
|
||||||
])
|
|
||||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
|
||||||
server.add_insecure_port(address)
|
|
||||||
server.start()
|
|
||||||
print("Server started. Listening on: " + address, file=sys.stderr)
|
|
||||||
|
|
||||||
def signal_handler(sig, frame):
|
|
||||||
print("Received termination signal. Shutting down...")
|
|
||||||
server.stop(0)
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
signal.signal(signal.SIGINT, signal_handler)
|
|
||||||
signal.signal(signal.SIGTERM, signal_handler)
|
|
||||||
|
|
||||||
try:
|
|
||||||
while True:
|
|
||||||
time.sleep(_ONE_DAY_IN_SECONDS)
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
server.stop(0)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser(description="Run the gRPC server.")
|
|
||||||
parser.add_argument("--addr", default="localhost:50051", help="The address to bind the server to.")
|
|
||||||
args = parser.parse_args()
|
|
||||||
serve(args.addr)
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
EXTRA_PIP_INSTALL_FLAGS="--no-build-isolation"
|
|
||||||
|
|
||||||
backend_dir=$(dirname $0)
|
|
||||||
if [ -d $backend_dir/common ]; then
|
|
||||||
source $backend_dir/common/libbackend.sh
|
|
||||||
else
|
|
||||||
source $backend_dir/../common/libbackend.sh
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "x${BUILD_PROFILE}" == "xintel" ]; then
|
|
||||||
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
|
|
||||||
fi
|
|
||||||
|
|
||||||
installRequirements
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
backend_dir=$(dirname $0)
|
|
||||||
if [ -d $backend_dir/common ]; then
|
|
||||||
source $backend_dir/common/libbackend.sh
|
|
||||||
else
|
|
||||||
source $backend_dir/../common/libbackend.sh
|
|
||||||
fi
|
|
||||||
|
|
||||||
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
|
||||||
torch
|
|
||||||
nemo_toolkit[asr]
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/cu128
|
|
||||||
torch
|
|
||||||
nemo_toolkit[asr]
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/cu130
|
|
||||||
torch
|
|
||||||
nemo_toolkit[asr]
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/rocm6.3
|
|
||||||
torch
|
|
||||||
nemo_toolkit[asr]
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/xpu
|
|
||||||
torch
|
|
||||||
nemo_toolkit[asr]
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
--extra-index-url https://pypi.jetson-ai-lab.io/jp6/cu129/
|
|
||||||
torch
|
|
||||||
nemo_toolkit[asr]
|
|
||||||
@@ -1,3 +0,0 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/cu130
|
|
||||||
torch
|
|
||||||
nemo_toolkit[asr]
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
torch
|
|
||||||
nemo_toolkit[asr]
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
grpcio==1.71.0
|
|
||||||
protobuf
|
|
||||||
certifi
|
|
||||||
packaging==24.1
|
|
||||||
setuptools
|
|
||||||
pyarrow==20.0.0
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
backend_dir=$(dirname $0)
|
|
||||||
if [ -d $backend_dir/common ]; then
|
|
||||||
source $backend_dir/common/libbackend.sh
|
|
||||||
else
|
|
||||||
source $backend_dir/../common/libbackend.sh
|
|
||||||
fi
|
|
||||||
|
|
||||||
startBackend $@
|
|
||||||
@@ -1,99 +0,0 @@
|
|||||||
"""
|
|
||||||
Tests for the NEMO Toolkit ASR gRPC backend.
|
|
||||||
"""
|
|
||||||
import unittest
|
|
||||||
import subprocess
|
|
||||||
import time
|
|
||||||
import os
|
|
||||||
import tempfile
|
|
||||||
import shutil
|
|
||||||
import backend_pb2
|
|
||||||
import backend_pb2_grpc
|
|
||||||
|
|
||||||
import grpc
|
|
||||||
|
|
||||||
# Skip heavy transcription test in CI (model download + inference)
|
|
||||||
SKIP_ASR_TESTS = os.environ.get("SKIP_ASR_TESTS", "false").lower() == "true"
|
|
||||||
|
|
||||||
|
|
||||||
class TestBackendServicer(unittest.TestCase):
|
|
||||||
def setUp(self):
|
|
||||||
self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"])
|
|
||||||
time.sleep(15)
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
self.service.terminate()
|
|
||||||
self.service.wait()
|
|
||||||
|
|
||||||
def test_server_startup(self):
|
|
||||||
try:
|
|
||||||
self.setUp()
|
|
||||||
with grpc.insecure_channel("localhost:50051") as channel:
|
|
||||||
stub = backend_pb2_grpc.BackendStub(channel)
|
|
||||||
response = stub.Health(backend_pb2.HealthMessage())
|
|
||||||
self.assertEqual(response.message, b'OK')
|
|
||||||
except Exception as err:
|
|
||||||
print(err)
|
|
||||||
self.fail("Server failed to start")
|
|
||||||
finally:
|
|
||||||
self.tearDown()
|
|
||||||
|
|
||||||
def test_load_model(self):
|
|
||||||
try:
|
|
||||||
self.setUp()
|
|
||||||
with grpc.insecure_channel("localhost:50051") as channel:
|
|
||||||
stub = backend_pb2_grpc.BackendStub(channel)
|
|
||||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="nvidia/parakeet-tdt-0.6b-v3"))
|
|
||||||
self.assertTrue(response.success, response.message)
|
|
||||||
self.assertEqual(response.message, "Model loaded successfully")
|
|
||||||
except Exception as err:
|
|
||||||
print(err)
|
|
||||||
self.fail("LoadModel service failed")
|
|
||||||
finally:
|
|
||||||
self.tearDown()
|
|
||||||
|
|
||||||
@unittest.skipIf(SKIP_ASR_TESTS, "ASR transcription test skipped (SKIP_ASR_TESTS=true)")
|
|
||||||
def test_audio_transcription(self):
|
|
||||||
temp_dir = tempfile.mkdtemp()
|
|
||||||
audio_file = os.path.join(temp_dir, 'audio.wav')
|
|
||||||
try:
|
|
||||||
# Download a sample audio file for testing
|
|
||||||
url = "https://audio-samples.github.io/samples/mp3/crowd-cheering-and-applause-sound-effect.mp3"
|
|
||||||
result = subprocess.run(
|
|
||||||
["wget", "-q", url, "-O", audio_file],
|
|
||||||
capture_output=True,
|
|
||||||
text=True,
|
|
||||||
timeout=30,
|
|
||||||
)
|
|
||||||
if result.returncode != 0:
|
|
||||||
self.skipTest(f"Could not download sample audio: {result.stderr}")
|
|
||||||
if not os.path.exists(audio_file):
|
|
||||||
self.skipTest("Sample audio file not found after download")
|
|
||||||
|
|
||||||
self.setUp()
|
|
||||||
with grpc.insecure_channel("localhost:50051") as channel:
|
|
||||||
stub = backend_pb2_grpc.BackendStub(channel)
|
|
||||||
load_response = stub.LoadModel(backend_pb2.ModelOptions(Model="nvidia/parakeet-tdt-0.6b-v3"))
|
|
||||||
self.assertTrue(load_response.success, load_response.message)
|
|
||||||
|
|
||||||
transcript_response = stub.AudioTranscription(
|
|
||||||
backend_pb2.TranscriptRequest(dst=audio_file)
|
|
||||||
)
|
|
||||||
self.assertIsNotNone(transcript_response)
|
|
||||||
self.assertIsNotNone(transcript_response.text)
|
|
||||||
self.assertGreaterEqual(len(transcript_response.segments), 0)
|
|
||||||
all_text = ""
|
|
||||||
for segment in transcript_response.segments:
|
|
||||||
all_text += segment.text
|
|
||||||
print(f"Transcription result: {all_text}")
|
|
||||||
self.assertIn("big", all_text)
|
|
||||||
if transcript_response.segments:
|
|
||||||
self.assertIsNotNone(transcript_response.segments[0].text)
|
|
||||||
finally:
|
|
||||||
self.tearDown()
|
|
||||||
if os.path.exists(temp_dir):
|
|
||||||
shutil.rmtree(temp_dir)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
unittest.main()
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
backend_dir=$(dirname $0)
|
|
||||||
if [ -d $backend_dir/common ]; then
|
|
||||||
source $backend_dir/common/libbackend.sh
|
|
||||||
else
|
|
||||||
source $backend_dir/../common/libbackend.sh
|
|
||||||
fi
|
|
||||||
|
|
||||||
runUnittests
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
.PHONY: outetts
|
|
||||||
outetts:
|
|
||||||
bash install.sh
|
|
||||||
|
|
||||||
.PHONY: run
|
|
||||||
run: outetts
|
|
||||||
@echo "Running outetts..."
|
|
||||||
bash run.sh
|
|
||||||
@echo "outetts run."
|
|
||||||
|
|
||||||
.PHONY: test
|
|
||||||
test: outetts
|
|
||||||
@echo "Testing outetts..."
|
|
||||||
bash test.sh
|
|
||||||
@echo "outetts tested."
|
|
||||||
|
|
||||||
.PHONY: protogen-clean
|
|
||||||
protogen-clean:
|
|
||||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
|
||||||
|
|
||||||
.PHONY: clean
|
|
||||||
clean: protogen-clean
|
|
||||||
rm -rf venv __pycache__
|
|
||||||
@@ -1,138 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
"""
|
|
||||||
gRPC server for OuteTTS (OuteAI TTS) models.
|
|
||||||
"""
|
|
||||||
from concurrent import futures
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import signal
|
|
||||||
import sys
|
|
||||||
import os
|
|
||||||
import asyncio
|
|
||||||
|
|
||||||
import backend_pb2
|
|
||||||
import backend_pb2_grpc
|
|
||||||
|
|
||||||
import grpc
|
|
||||||
import outetts
|
|
||||||
|
|
||||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
|
||||||
|
|
||||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
|
||||||
|
|
||||||
|
|
||||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
|
||||||
def Health(self, request, context):
|
|
||||||
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
|
||||||
|
|
||||||
def LoadModel(self, request, context):
|
|
||||||
model_name = request.Model
|
|
||||||
if os.path.exists(request.ModelFile):
|
|
||||||
model_name = request.ModelFile
|
|
||||||
|
|
||||||
self.options = {}
|
|
||||||
for opt in request.Options:
|
|
||||||
if ":" not in opt:
|
|
||||||
continue
|
|
||||||
key, value = opt.split(":", 1)
|
|
||||||
try:
|
|
||||||
if "." in value:
|
|
||||||
value = float(value)
|
|
||||||
else:
|
|
||||||
value = int(value)
|
|
||||||
except ValueError:
|
|
||||||
pass
|
|
||||||
self.options[key] = value
|
|
||||||
|
|
||||||
MODELNAME = "OuteAI/OuteTTS-0.3-1B"
|
|
||||||
TOKENIZER = "OuteAI/OuteTTS-0.3-1B"
|
|
||||||
VERSION = "0.3"
|
|
||||||
SPEAKER = "en_male_1"
|
|
||||||
for opt in request.Options:
|
|
||||||
if opt.startswith("tokenizer:"):
|
|
||||||
TOKENIZER = opt.split(":")[1]
|
|
||||||
break
|
|
||||||
if opt.startswith("version:"):
|
|
||||||
VERSION = opt.split(":")[1]
|
|
||||||
break
|
|
||||||
if opt.startswith("speaker:"):
|
|
||||||
SPEAKER = opt.split(":")[1]
|
|
||||||
break
|
|
||||||
|
|
||||||
if model_name != "":
|
|
||||||
MODELNAME = model_name
|
|
||||||
|
|
||||||
try:
|
|
||||||
model_config = outetts.HFModelConfig_v2(
|
|
||||||
model_path=MODELNAME,
|
|
||||||
tokenizer_path=TOKENIZER
|
|
||||||
)
|
|
||||||
self.interface = outetts.InterfaceHF(model_version=VERSION, cfg=model_config)
|
|
||||||
|
|
||||||
self.interface.print_default_speakers()
|
|
||||||
if request.AudioPath:
|
|
||||||
if os.path.isabs(request.AudioPath):
|
|
||||||
self.AudioPath = request.AudioPath
|
|
||||||
else:
|
|
||||||
self.AudioPath = os.path.join(request.ModelPath, request.AudioPath)
|
|
||||||
self.speaker = self.interface.create_speaker(audio_path=self.AudioPath)
|
|
||||||
else:
|
|
||||||
self.speaker = self.interface.load_default_speaker(name=SPEAKER)
|
|
||||||
|
|
||||||
if request.ContextSize > 0:
|
|
||||||
self.max_tokens = request.ContextSize
|
|
||||||
else:
|
|
||||||
self.max_tokens = self.options.get("max_new_tokens", 512)
|
|
||||||
|
|
||||||
except Exception as err:
|
|
||||||
print("Error:", err, file=sys.stderr)
|
|
||||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
|
||||||
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
|
||||||
|
|
||||||
def TTS(self, request, context):
|
|
||||||
try:
|
|
||||||
text = request.text if request.text else "Speech synthesis is the artificial production of human speech."
|
|
||||||
print("[OuteTTS] generating TTS", file=sys.stderr)
|
|
||||||
gen_cfg = outetts.GenerationConfig(
|
|
||||||
text=text,
|
|
||||||
temperature=self.options.get("temperature", 0.1),
|
|
||||||
repetition_penalty=self.options.get("repetition_penalty", 1.1),
|
|
||||||
max_length=self.max_tokens,
|
|
||||||
speaker=self.speaker,
|
|
||||||
)
|
|
||||||
output = self.interface.generate(config=gen_cfg)
|
|
||||||
print("[OuteTTS] Generated TTS", file=sys.stderr)
|
|
||||||
output.save(request.dst)
|
|
||||||
print("[OuteTTS] TTS done", file=sys.stderr)
|
|
||||||
except Exception as err:
|
|
||||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
|
||||||
return backend_pb2.Result(success=True)
|
|
||||||
|
|
||||||
|
|
||||||
async def serve(address):
|
|
||||||
server = grpc.aio.server(
|
|
||||||
migration_thread_pool=futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
|
||||||
options=[
|
|
||||||
('grpc.max_message_length', 50 * 1024 * 1024),
|
|
||||||
('grpc.max_send_message_length', 50 * 1024 * 1024),
|
|
||||||
('grpc.max_receive_message_length', 50 * 1024 * 1024),
|
|
||||||
])
|
|
||||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
|
||||||
server.add_insecure_port(address)
|
|
||||||
|
|
||||||
loop = asyncio.get_event_loop()
|
|
||||||
for sig in (signal.SIGINT, signal.SIGTERM):
|
|
||||||
loop.add_signal_handler(
|
|
||||||
sig, lambda: asyncio.ensure_future(server.stop(5))
|
|
||||||
)
|
|
||||||
|
|
||||||
await server.start()
|
|
||||||
print("Server started. Listening on: " + address, file=sys.stderr)
|
|
||||||
await server.wait_for_termination()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
parser = argparse.ArgumentParser(description="Run the OuteTTS gRPC server.")
|
|
||||||
parser.add_argument("--addr", default="localhost:50051", help="The address to bind the server to.")
|
|
||||||
args = parser.parse_args()
|
|
||||||
asyncio.run(serve(args.addr))
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
backend_dir=$(dirname $0)
|
|
||||||
if [ -d $backend_dir/common ]; then
|
|
||||||
source $backend_dir/common/libbackend.sh
|
|
||||||
else
|
|
||||||
source $backend_dir/../common/libbackend.sh
|
|
||||||
fi
|
|
||||||
|
|
||||||
installRequirements
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
torch==2.7.1
|
|
||||||
llvmlite==0.43.0
|
|
||||||
numba==0.60.0
|
|
||||||
accelerate
|
|
||||||
bitsandbytes
|
|
||||||
outetts
|
|
||||||
protobuf==6.33.5
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
torch==2.7.1
|
|
||||||
accelerate
|
|
||||||
llvmlite==0.43.0
|
|
||||||
numba==0.60.0
|
|
||||||
bitsandbytes
|
|
||||||
protobuf==6.33.5
|
|
||||||
outetts
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/cu130
|
|
||||||
torch==2.9.0
|
|
||||||
llvmlite==0.43.0
|
|
||||||
numba==0.60.0
|
|
||||||
bitsandbytes
|
|
||||||
outetts
|
|
||||||
protobuf==6.33.5
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/rocm6.4
|
|
||||||
torch==2.8.0+rocm6.4
|
|
||||||
accelerate
|
|
||||||
llvmlite==0.43.0
|
|
||||||
numba==0.60.0
|
|
||||||
bitsandbytes
|
|
||||||
outetts
|
|
||||||
protobuf==6.33.5
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user