mirror of
https://github.com/mudler/LocalAI.git
synced 2026-02-03 03:02:38 -05:00
Compare commits
184 Commits
v3.2.1
...
feat/trans
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
478d2adfb7 | ||
|
|
909fdd1b0e | ||
|
|
be132fe816 | ||
|
|
ff5d2dc8be | ||
|
|
c1cfa08226 | ||
|
|
fec8a36b36 | ||
|
|
5d4f5d2355 | ||
|
|
057248008f | ||
|
|
9f2c9cd691 | ||
|
|
6971f71a6c | ||
|
|
1ba66d00f5 | ||
|
|
259383cf5e | ||
|
|
209c0694f5 | ||
|
|
0fd395d6ec | ||
|
|
d04bd47116 | ||
|
|
1d830ce7dd | ||
|
|
6dccfb09f8 | ||
|
|
e4d9cf8349 | ||
|
|
c899e90277 | ||
|
|
8193d18c7c | ||
|
|
2e4dc6456f | ||
|
|
4594430a3e | ||
|
|
9c7f92c81f | ||
|
|
060037bcd4 | ||
|
|
d9da4676b4 | ||
|
|
5ef4c2e471 | ||
|
|
27ce570844 | ||
|
|
42c7859ab1 | ||
|
|
e7e83d0fa6 | ||
|
|
c6dc1d86f1 | ||
|
|
6fd2e1964d | ||
|
|
49ae41b716 | ||
|
|
b3f0ed62fd | ||
|
|
4b9afc418b | ||
|
|
e44ff8514b | ||
|
|
2b6be10b6b | ||
|
|
1361d844a1 | ||
|
|
fcc521cae5 | ||
|
|
8cad7138be | ||
|
|
ebd1db2f09 | ||
|
|
7920d75805 | ||
|
|
1d0e24a865 | ||
|
|
9eed5ef872 | ||
|
|
39ab80442a | ||
|
|
1b101df2c0 | ||
|
|
784bd5db33 | ||
|
|
b8b1ca782c | ||
|
|
1149fb66d3 | ||
|
|
243e86176e | ||
|
|
8da38a0d10 | ||
|
|
60786fc876 | ||
|
|
9486b88a25 | ||
|
|
bef4c10629 | ||
|
|
80f15851c5 | ||
|
|
22067e3384 | ||
|
|
4fbd639463 | ||
|
|
70f7d0c25f | ||
|
|
576e821298 | ||
|
|
7293f26fcf | ||
|
|
79973a28ad | ||
|
|
8ab51509cc | ||
|
|
b3384e5428 | ||
|
|
7050c9f69d | ||
|
|
089efe05fd | ||
|
|
253b7537dc | ||
|
|
19c92c70c5 | ||
|
|
b52bfaf1b3 | ||
|
|
bf60ca5bf0 | ||
|
|
2b44467bd1 | ||
|
|
8c1f4a131e | ||
|
|
10a3f0bd92 | ||
|
|
72f4d541d0 | ||
|
|
9f812fdb84 | ||
|
|
b70ee45fff | ||
|
|
9d9c853541 | ||
|
|
18fcd8557c | ||
|
|
d8e27c38d7 | ||
|
|
3b0dc87932 | ||
|
|
2374485222 | ||
|
|
0ca1765c17 | ||
|
|
90b5ed9a1e | ||
|
|
d438b769da | ||
|
|
2e4bd1e33d | ||
|
|
ff73800970 | ||
|
|
94cb20ae7f | ||
|
|
47c20f9adb | ||
|
|
a7fe153630 | ||
|
|
27519d2233 | ||
|
|
8cab0f880b | ||
|
|
8c48b250c4 | ||
|
|
ba802c2ee4 | ||
|
|
429bb7a88c | ||
|
|
b2e8b6d1aa | ||
|
|
fba5b557a1 | ||
|
|
6db19c5cb9 | ||
|
|
5428678209 | ||
|
|
06129139eb | ||
|
|
05757e2738 | ||
|
|
240b790f29 | ||
|
|
5f221f5946 | ||
|
|
def7cdc0bf | ||
|
|
ea9bf3dba2 | ||
|
|
b8eca530b6 | ||
|
|
47034ddacd | ||
|
|
9a41331855 | ||
|
|
facc0181df | ||
|
|
4733adb983 | ||
|
|
326fda3223 | ||
|
|
abf61e5b42 | ||
|
|
2ae45e7635 | ||
|
|
7d41551e10 | ||
|
|
6fbd720515 | ||
|
|
4e40a8d1ed | ||
|
|
003b9292fe | ||
|
|
09457b9221 | ||
|
|
41aa7e107f | ||
|
|
bda875f962 | ||
|
|
224063f0f7 | ||
|
|
89978c8b57 | ||
|
|
987b5dcac1 | ||
|
|
ec1276e5a9 | ||
|
|
61ba98d43d | ||
|
|
b9a25b16e6 | ||
|
|
6a8149e1fd | ||
|
|
9c2840ac38 | ||
|
|
20a70e1244 | ||
|
|
3295a298f4 | ||
|
|
da6f37f000 | ||
|
|
c092633cd7 | ||
|
|
7e2a522229 | ||
|
|
03e8592450 | ||
|
|
f207bd1427 | ||
|
|
a5c0fe31c3 | ||
|
|
c68907ac65 | ||
|
|
9087ddc4de | ||
|
|
33bebd5114 | ||
|
|
2913676157 | ||
|
|
e83652489c | ||
|
|
d6274eaf4a | ||
|
|
4d90971424 | ||
|
|
90f5639639 | ||
|
|
a35a701052 | ||
|
|
3d8ec72dbf | ||
|
|
2a9d675d62 | ||
|
|
c782e8abf1 | ||
|
|
a1e1942d83 | ||
|
|
787302b204 | ||
|
|
0b085089b9 | ||
|
|
624f3b1fc8 | ||
|
|
c07bc55fee | ||
|
|
173e0774c0 | ||
|
|
8ece26ab7c | ||
|
|
d704cc7970 | ||
|
|
ab17baaae1 | ||
|
|
ca358fcdca | ||
|
|
9aadfd485f | ||
|
|
da3b0850de | ||
|
|
8b1e8b4cda | ||
|
|
3d22bfc27c | ||
|
|
4438b4361e | ||
|
|
04bad9a2da | ||
|
|
8235e53602 | ||
|
|
eb5c3670f1 | ||
|
|
89e61fca90 | ||
|
|
9d6efe8842 | ||
|
|
60726d16f2 | ||
|
|
9d7ec09ec0 | ||
|
|
36179ffbed | ||
|
|
d25145e641 | ||
|
|
949e5b9be8 | ||
|
|
73ecb7f90b | ||
|
|
053bed6e5f | ||
|
|
932360bf7e | ||
|
|
6d0b52843f | ||
|
|
078c22f485 | ||
|
|
6ef3852de5 | ||
|
|
a8057b952c | ||
|
|
fd5c1d916f | ||
|
|
5ce982b9c9 | ||
|
|
47ccfccf7a | ||
|
|
a760f7ff39 | ||
|
|
facf7625f3 | ||
|
|
b3600b3c50 | ||
|
|
f0b47cfe6a |
416
.github/workflows/backend.yml
vendored
416
.github/workflows/backend.yml
vendored
@@ -2,6 +2,7 @@
|
||||
name: 'build backend container images'
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
@@ -63,6 +64,18 @@ jobs:
|
||||
backend: "llama-cpp"
|
||||
dockerfile: "./backend/Dockerfile.llama-cpp"
|
||||
context: "./"
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-cpu-transformers'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'true'
|
||||
backend: "transformers"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "11"
|
||||
cuda-minor-version: "7"
|
||||
@@ -87,6 +100,30 @@ jobs:
|
||||
backend: "diffusers"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-l4t-diffusers'
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
||||
skip-drivers: 'true'
|
||||
backend: "diffusers"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-cpu-diffusers'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'true'
|
||||
backend: "diffusers"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
# CUDA 11 additional backends
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "11"
|
||||
@@ -179,7 +216,7 @@ jobs:
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-vllm'
|
||||
runs-on: 'ubuntu-latest'
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "vllm"
|
||||
@@ -278,7 +315,7 @@ jobs:
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-rocm-hipblas-rerankers'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||
skip-drivers: 'false'
|
||||
backend: "rerankers"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
@@ -290,7 +327,7 @@ jobs:
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-rocm-hipblas-llama-cpp'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||
skip-drivers: 'false'
|
||||
backend: "llama-cpp"
|
||||
dockerfile: "./backend/Dockerfile.llama-cpp"
|
||||
@@ -301,8 +338,8 @@ jobs:
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-rocm-hipblas-vllm'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||
skip-drivers: 'false'
|
||||
backend: "vllm"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
@@ -313,8 +350,8 @@ jobs:
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-rocm-hipblas-transformers'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||
skip-drivers: 'false'
|
||||
backend: "transformers"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
@@ -325,8 +362,8 @@ jobs:
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-rocm-hipblas-diffusers'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||
skip-drivers: 'false'
|
||||
backend: "diffusers"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
@@ -338,8 +375,8 @@ jobs:
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-rocm-hipblas-kokoro'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||
skip-drivers: 'false'
|
||||
backend: "kokoro"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
@@ -351,7 +388,7 @@ jobs:
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-rocm-hipblas-faster-whisper'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||
skip-drivers: 'false'
|
||||
backend: "faster-whisper"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
@@ -363,7 +400,7 @@ jobs:
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-rocm-hipblas-coqui'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||
skip-drivers: 'false'
|
||||
backend: "coqui"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
@@ -374,31 +411,19 @@ jobs:
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-rocm-hipblas-bark'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||
skip-drivers: 'false'
|
||||
backend: "bark"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
# sycl builds
|
||||
- build-type: 'sycl_f32'
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-sycl-f32-rerankers'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
skip-drivers: 'false'
|
||||
backend: "rerankers"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'sycl_f16'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-sycl-f16-rerankers'
|
||||
tag-suffix: '-gpu-intel-rerankers'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
skip-drivers: 'false'
|
||||
@@ -429,60 +454,36 @@ jobs:
|
||||
backend: "llama-cpp"
|
||||
dockerfile: "./backend/Dockerfile.llama-cpp"
|
||||
context: "./"
|
||||
- build-type: 'sycl_f32'
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-sycl-f32-vllm'
|
||||
runs-on: 'ubuntu-latest'
|
||||
tag-suffix: '-gpu-intel-vllm'
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
skip-drivers: 'false'
|
||||
backend: "vllm"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'sycl_f16'
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-sycl-f16-vllm'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
skip-drivers: 'false'
|
||||
backend: "vllm"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'sycl_f32'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-sycl-f32-transformers'
|
||||
tag-suffix: '-gpu-intel-transformers'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
skip-drivers: 'false'
|
||||
backend: "transformers"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'sycl_f16'
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-sycl-f16-transformers'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
skip-drivers: 'false'
|
||||
backend: "transformers"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'sycl_f32'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-sycl-f32-diffusers'
|
||||
tag-suffix: '-gpu-intel-diffusers'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
skip-drivers: 'false'
|
||||
@@ -490,96 +491,48 @@ jobs:
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
# SYCL additional backends
|
||||
- build-type: 'sycl_f32'
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-sycl-f32-kokoro'
|
||||
tag-suffix: '-gpu-intel-kokoro'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
skip-drivers: 'false'
|
||||
backend: "kokoro"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'sycl_f16'
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-sycl-f16-kokoro'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
skip-drivers: 'false'
|
||||
backend: "kokoro"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'sycl_f32'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-sycl-f32-faster-whisper'
|
||||
tag-suffix: '-gpu-intel-faster-whisper'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
skip-drivers: 'false'
|
||||
backend: "faster-whisper"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'sycl_f16'
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-sycl-f16-faster-whisper'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
skip-drivers: 'false'
|
||||
backend: "faster-whisper"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'sycl_f32'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-sycl-f32-coqui'
|
||||
tag-suffix: '-gpu-intel-coqui'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
skip-drivers: 'false'
|
||||
backend: "coqui"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'sycl_f16'
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-sycl-f16-coqui'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
skip-drivers: 'false'
|
||||
backend: "coqui"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'sycl_f32'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-sycl-f32-bark'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
skip-drivers: 'false'
|
||||
backend: "bark"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'sycl_f16'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-sycl-f16-bark'
|
||||
tag-suffix: '-gpu-intel-bark'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
skip-drivers: 'false'
|
||||
@@ -824,7 +777,7 @@ jobs:
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-hipblas-whisper'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||
runs-on: 'ubuntu-latest'
|
||||
skip-drivers: 'false'
|
||||
backend: "whisper"
|
||||
@@ -868,7 +821,209 @@ jobs:
|
||||
skip-drivers: 'false'
|
||||
backend: "huggingface"
|
||||
dockerfile: "./backend/Dockerfile.golang"
|
||||
context: "./"
|
||||
context: "./"
|
||||
# rfdetr
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64,linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-cpu-rfdetr'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "rfdetr"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-rfdetr'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "rfdetr"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "11"
|
||||
cuda-minor-version: "7"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-11-rfdetr'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "rfdetr"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-rfdetr'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
skip-drivers: 'false'
|
||||
backend: "rfdetr"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
skip-drivers: 'true'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-arm64-rfdetr'
|
||||
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
backend: "rfdetr"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
# exllama2
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-cpu-exllama2'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "exllama2"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-exllama2'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "exllama2"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "11"
|
||||
cuda-minor-version: "7"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-11-exllama2'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "exllama2"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-intel-exllama2'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
skip-drivers: 'false'
|
||||
backend: "exllama2"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'hipblas'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
skip-drivers: 'true'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-hipblas-exllama2'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||
runs-on: 'ubuntu-latest'
|
||||
backend: "exllama2"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
# runs out of space on the runner
|
||||
# - build-type: 'hipblas'
|
||||
# cuda-major-version: ""
|
||||
# cuda-minor-version: ""
|
||||
# platforms: 'linux/amd64'
|
||||
# tag-latest: 'auto'
|
||||
# tag-suffix: '-gpu-hipblas-rfdetr'
|
||||
# base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||
# runs-on: 'ubuntu-latest'
|
||||
# skip-drivers: 'false'
|
||||
# backend: "rfdetr"
|
||||
# dockerfile: "./backend/Dockerfile.python"
|
||||
# context: "./backend"
|
||||
# kitten-tts
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64,linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-kitten-tts'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "kitten-tts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
transformers-darwin:
|
||||
uses: ./.github/workflows/backend_build_darwin.yml
|
||||
with:
|
||||
backend: "transformers"
|
||||
build-type: "mps"
|
||||
go-version: "1.24.x"
|
||||
tag-suffix: "-metal-darwin-arm64-transformers"
|
||||
use-pip: true
|
||||
runs-on: "macOS-14"
|
||||
secrets:
|
||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||
diffusers-darwin:
|
||||
uses: ./.github/workflows/backend_build_darwin.yml
|
||||
with:
|
||||
backend: "diffusers"
|
||||
build-type: "mps"
|
||||
go-version: "1.24.x"
|
||||
tag-suffix: "-metal-darwin-arm64-diffusers"
|
||||
use-pip: true
|
||||
runs-on: "macOS-14"
|
||||
secrets:
|
||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||
mlx-darwin:
|
||||
uses: ./.github/workflows/backend_build_darwin.yml
|
||||
with:
|
||||
backend: "mlx"
|
||||
build-type: "mps"
|
||||
go-version: "1.24.x"
|
||||
tag-suffix: "-metal-darwin-arm64-mlx"
|
||||
runs-on: "macOS-14"
|
||||
secrets:
|
||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||
mlx-vlm-darwin:
|
||||
uses: ./.github/workflows/backend_build_darwin.yml
|
||||
with:
|
||||
backend: "mlx-vlm"
|
||||
build-type: "mps"
|
||||
go-version: "1.24.x"
|
||||
tag-suffix: "-metal-darwin-arm64-mlx-vlm"
|
||||
runs-on: "macOS-14"
|
||||
secrets:
|
||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||
llama-cpp-darwin:
|
||||
runs-on: macOS-14
|
||||
strategy:
|
||||
@@ -876,7 +1031,7 @@ jobs:
|
||||
go-version: ['1.21.x']
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go ${{ matrix.go-version }}
|
||||
@@ -893,21 +1048,19 @@ jobs:
|
||||
- name: Build llama-cpp-darwin
|
||||
run: |
|
||||
make protogen-go
|
||||
make build
|
||||
bash scripts/build-llama-cpp-darwin.sh
|
||||
ls -la build/darwin.tar
|
||||
mv build/darwin.tar build/llama-cpp.tar
|
||||
make backends/llama-cpp-darwin
|
||||
- name: Upload llama-cpp.tar
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: llama-cpp-tar
|
||||
path: build/llama-cpp.tar
|
||||
path: backend-images/llama-cpp.tar
|
||||
llama-cpp-darwin-publish:
|
||||
needs: llama-cpp-darwin
|
||||
if: github.event_name != 'pull_request'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download llama-cpp.tar
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: llama-cpp-tar
|
||||
path: .
|
||||
@@ -964,7 +1117,7 @@ jobs:
|
||||
go-version: ['1.21.x']
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go ${{ matrix.go-version }}
|
||||
@@ -983,20 +1136,19 @@ jobs:
|
||||
make protogen-go
|
||||
make build
|
||||
export PLATFORMARCH=darwin/amd64
|
||||
bash scripts/build-llama-cpp-darwin.sh
|
||||
ls -la build/darwin.tar
|
||||
mv build/darwin.tar build/llama-cpp.tar
|
||||
make backends/llama-cpp-darwin
|
||||
- name: Upload llama-cpp.tar
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: llama-cpp-tar-x86
|
||||
path: build/llama-cpp.tar
|
||||
path: backend-images/llama-cpp.tar
|
||||
llama-cpp-darwin-x86-publish:
|
||||
if: github.event_name != 'pull_request'
|
||||
needs: llama-cpp-darwin-x86
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download llama-cpp.tar
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: llama-cpp-tar-x86
|
||||
path: .
|
||||
@@ -1045,4 +1197,4 @@ jobs:
|
||||
run: |
|
||||
for tag in $(echo "${{ steps.quaymeta.outputs.tags }}" | tr ',' '\n'); do
|
||||
crane push llama-cpp.tar $tag
|
||||
done
|
||||
done
|
||||
|
||||
14
.github/workflows/backend_build.yml
vendored
14
.github/workflows/backend_build.yml
vendored
@@ -55,9 +55,9 @@ on:
|
||||
type: string
|
||||
secrets:
|
||||
dockerUsername:
|
||||
required: true
|
||||
required: false
|
||||
dockerPassword:
|
||||
required: true
|
||||
required: false
|
||||
quayUsername:
|
||||
required: true
|
||||
quayPassword:
|
||||
@@ -66,6 +66,8 @@ on:
|
||||
jobs:
|
||||
backend-build:
|
||||
runs-on: ${{ inputs.runs-on }}
|
||||
env:
|
||||
quay_username: ${{ secrets.quayUsername }}
|
||||
steps:
|
||||
|
||||
|
||||
@@ -95,7 +97,7 @@ jobs:
|
||||
&& sudo apt-get install -y git
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Release space from worker
|
||||
if: inputs.runs-on == 'ubuntu-latest'
|
||||
@@ -187,7 +189,7 @@ jobs:
|
||||
password: ${{ secrets.dockerPassword }}
|
||||
|
||||
- name: Login to Quay.io
|
||||
# if: github.event_name != 'pull_request'
|
||||
if: ${{ env.quay_username != '' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
@@ -230,7 +232,7 @@ jobs:
|
||||
file: ${{ inputs.dockerfile }}
|
||||
cache-from: type=gha
|
||||
platforms: ${{ inputs.platforms }}
|
||||
push: true
|
||||
push: ${{ env.quay_username != '' }}
|
||||
tags: ${{ steps.meta_pull_request.outputs.tags }}
|
||||
labels: ${{ steps.meta_pull_request.outputs.labels }}
|
||||
|
||||
@@ -238,4 +240,4 @@ jobs:
|
||||
|
||||
- name: job summary
|
||||
run: |
|
||||
echo "Built image: ${{ steps.meta.outputs.labels }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Built image: ${{ steps.meta.outputs.labels }}" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
140
.github/workflows/backend_build_darwin.yml
vendored
Normal file
140
.github/workflows/backend_build_darwin.yml
vendored
Normal file
@@ -0,0 +1,140 @@
|
||||
---
|
||||
name: 'build darwin python backend container images (reusable)'
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
backend:
|
||||
description: 'Backend to build'
|
||||
required: true
|
||||
type: string
|
||||
build-type:
|
||||
description: 'Build type (e.g., mps)'
|
||||
default: ''
|
||||
type: string
|
||||
use-pip:
|
||||
description: 'Use pip to install dependencies'
|
||||
default: false
|
||||
type: boolean
|
||||
go-version:
|
||||
description: 'Go version to use'
|
||||
default: '1.24.x'
|
||||
type: string
|
||||
tag-suffix:
|
||||
description: 'Tag suffix for the built image'
|
||||
required: true
|
||||
type: string
|
||||
runs-on:
|
||||
description: 'Runner to use'
|
||||
default: 'macOS-14'
|
||||
type: string
|
||||
secrets:
|
||||
dockerUsername:
|
||||
required: false
|
||||
dockerPassword:
|
||||
required: false
|
||||
quayUsername:
|
||||
required: true
|
||||
quayPassword:
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
darwin-backend-build:
|
||||
runs-on: ${{ inputs.runs-on }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: ['${{ inputs.go-version }}']
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- name: Setup Go ${{ matrix.go-version }}
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
cache: false
|
||||
|
||||
# You can test your matrix by printing the current Go version
|
||||
- name: Display Go version
|
||||
run: go version
|
||||
|
||||
- name: Dependencies
|
||||
run: |
|
||||
brew install protobuf grpc make protoc-gen-go protoc-gen-go-grpc libomp llvm
|
||||
|
||||
- name: Build ${{ inputs.backend }}-darwin
|
||||
run: |
|
||||
make protogen-go
|
||||
BACKEND=${{ inputs.backend }} BUILD_TYPE=${{ inputs.build-type }} USE_PIP=${{ inputs.use-pip }} make build-darwin-python-backend
|
||||
|
||||
- name: Upload ${{ inputs.backend }}.tar
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ inputs.backend }}-tar
|
||||
path: backend-images/${{ inputs.backend }}.tar
|
||||
|
||||
darwin-backend-publish:
|
||||
needs: darwin-backend-build
|
||||
if: github.event_name != 'pull_request'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download ${{ inputs.backend }}.tar
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: ${{ inputs.backend }}-tar
|
||||
path: .
|
||||
|
||||
- name: Install crane
|
||||
run: |
|
||||
curl -L https://github.com/google/go-containerregistry/releases/latest/download/go-containerregistry_Linux_x86_64.tar.gz | tar -xz
|
||||
sudo mv crane /usr/local/bin/
|
||||
|
||||
- name: Log in to DockerHub
|
||||
run: |
|
||||
echo "${{ secrets.dockerPassword }}" | crane auth login docker.io -u "${{ secrets.dockerUsername }}" --password-stdin
|
||||
|
||||
- name: Log in to quay.io
|
||||
run: |
|
||||
echo "${{ secrets.quayPassword }}" | crane auth login quay.io -u "${{ secrets.quayUsername }}" --password-stdin
|
||||
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
localai/localai-backends
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{raw}}
|
||||
type=sha
|
||||
flavor: |
|
||||
latest=auto
|
||||
suffix=${{ inputs.tag-suffix }},onlatest=true
|
||||
|
||||
- name: Docker meta
|
||||
id: quaymeta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
quay.io/go-skynet/local-ai-backends
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{raw}}
|
||||
type=sha
|
||||
flavor: |
|
||||
latest=auto
|
||||
suffix=${{ inputs.tag-suffix }},onlatest=true
|
||||
|
||||
- name: Push Docker image (DockerHub)
|
||||
run: |
|
||||
for tag in $(echo "${{ steps.meta.outputs.tags }}" | tr ',' '\n'); do
|
||||
crane push ${{ inputs.backend }}.tar $tag
|
||||
done
|
||||
|
||||
- name: Push Docker image (Quay)
|
||||
run: |
|
||||
for tag in $(echo "${{ steps.quaymeta.outputs.tags }}" | tr ',' '\n'); do
|
||||
crane push ${{ inputs.backend }}.tar $tag
|
||||
done
|
||||
58
.github/workflows/backend_pr.yml
vendored
Normal file
58
.github/workflows/backend_pr.yml
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
name: 'build backend container images (PR-filtered)'
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
concurrency:
|
||||
group: ci-backends-pr-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
generate-matrix:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
has-backends: ${{ steps.set-matrix.outputs.has-backends }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
bun add js-yaml
|
||||
bun add @octokit/core
|
||||
|
||||
# filters the matrix in backend.yml
|
||||
- name: Filter matrix for changed backends
|
||||
id: set-matrix
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GITHUB_EVENT_PATH: ${{ github.event_path }}
|
||||
run: bun run scripts/changed-backends.js
|
||||
|
||||
backend-jobs:
|
||||
needs: generate-matrix
|
||||
uses: ./.github/workflows/backend_build.yml
|
||||
if: needs.generate-matrix.outputs.has-backends == 'true'
|
||||
with:
|
||||
tag-latest: ${{ matrix.tag-latest }}
|
||||
tag-suffix: ${{ matrix.tag-suffix }}
|
||||
build-type: ${{ matrix.build-type }}
|
||||
cuda-major-version: ${{ matrix.cuda-major-version }}
|
||||
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
base-image: ${{ matrix.base-image }}
|
||||
backend: ${{ matrix.backend }}
|
||||
dockerfile: ${{ matrix.dockerfile }}
|
||||
skip-drivers: ${{ matrix.skip-drivers }}
|
||||
context: ${{ matrix.context }}
|
||||
secrets:
|
||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||
strategy:
|
||||
fail-fast: true
|
||||
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
|
||||
2
.github/workflows/build-test.yaml
vendored
2
.github/workflows/build-test.yaml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Go
|
||||
|
||||
2
.github/workflows/bump_deps.yaml
vendored
2
.github/workflows/bump_deps.yaml
vendored
@@ -31,7 +31,7 @@ jobs:
|
||||
file: "backend/go/piper/Makefile"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
- name: Bump dependencies 🔧
|
||||
id: bump
|
||||
run: |
|
||||
|
||||
2
.github/workflows/bump_docs.yaml
vendored
2
.github/workflows/bump_docs.yaml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
- repository: "mudler/LocalAI"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
- name: Bump dependencies 🔧
|
||||
run: |
|
||||
bash .github/bump_docs.sh ${{ matrix.repository }}
|
||||
|
||||
2
.github/workflows/checksum_checker.yaml
vendored
2
.github/workflows/checksum_checker.yaml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
&& sudo add-apt-repository -y ppa:git-core/ppa \
|
||||
&& sudo apt-get update \
|
||||
&& sudo apt-get install -y git
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
|
||||
2
.github/workflows/dependabot_auto.yml
vendored
2
.github/workflows/dependabot_auto.yml
vendored
@@ -20,7 +20,7 @@ jobs:
|
||||
skip-commit-verification: true
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Approve a PR if not already approved
|
||||
run: |
|
||||
|
||||
2
.github/workflows/deploy-explorer.yaml
vendored
2
.github/workflows/deploy-explorer.yaml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
submodules: true
|
||||
- uses: actions/setup-go@v5
|
||||
|
||||
2
.github/workflows/generate_grpc_cache.yaml
vendored
2
.github/workflows/generate_grpc_cache.yaml
vendored
@@ -73,7 +73,7 @@ jobs:
|
||||
uses: docker/setup-buildx-action@master
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Cache GRPC
|
||||
uses: docker/build-push-action@v6
|
||||
|
||||
2
.github/workflows/generate_intel_image.yaml
vendored
2
.github/workflows/generate_intel_image.yaml
vendored
@@ -43,7 +43,7 @@ jobs:
|
||||
uses: docker/setup-buildx-action@master
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Cache Intel images
|
||||
uses: docker/build-push-action@v6
|
||||
|
||||
8
.github/workflows/image-pr.yml
vendored
8
.github/workflows/image-pr.yml
vendored
@@ -39,7 +39,7 @@ jobs:
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-gpu-nvidia-cuda12'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
@@ -47,16 +47,16 @@ jobs:
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-hipblas'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
runs-on: 'ubuntu-latest'
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
- build-type: 'sycl_f16'
|
||||
- build-type: 'sycl'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
tag-suffix: 'sycl-f16'
|
||||
tag-suffix: 'sycl'
|
||||
runs-on: 'ubuntu-latest'
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
- build-type: 'vulkan'
|
||||
|
||||
23
.github/workflows/image.yml
vendored
23
.github/workflows/image.yml
vendored
@@ -39,7 +39,7 @@ jobs:
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-hipblas'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
runs-on: 'ubuntu-latest'
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
@@ -83,7 +83,7 @@ jobs:
|
||||
cuda-minor-version: "7"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda11'
|
||||
tag-suffix: '-gpu-nvidia-cuda-11'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
@@ -94,7 +94,7 @@ jobs:
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda12'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
@@ -103,30 +103,21 @@ jobs:
|
||||
- build-type: 'vulkan'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-vulkan'
|
||||
tag-suffix: '-gpu-vulkan'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
aio: "-aio-gpu-vulkan"
|
||||
- build-type: 'sycl_f16'
|
||||
- build-type: 'intel'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
tag-suffix: '-gpu-intel-f16'
|
||||
tag-suffix: '-gpu-intel'
|
||||
runs-on: 'ubuntu-latest'
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
aio: "-aio-gpu-intel-f16"
|
||||
- build-type: 'sycl_f32'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
tag-suffix: '-gpu-intel-f32'
|
||||
runs-on: 'ubuntu-latest'
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
aio: "-aio-gpu-intel-f32"
|
||||
aio: "-aio-gpu-intel"
|
||||
|
||||
gh-runner:
|
||||
uses: ./.github/workflows/image_build.yml
|
||||
|
||||
2
.github/workflows/image_build.yml
vendored
2
.github/workflows/image_build.yml
vendored
@@ -94,7 +94,7 @@ jobs:
|
||||
&& sudo apt-get update \
|
||||
&& sudo apt-get install -y git
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Release space from worker
|
||||
if: inputs.runs-on == 'ubuntu-latest'
|
||||
|
||||
2
.github/workflows/localaibot_automerge.yml
vendored
2
.github/workflows/localaibot_automerge.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
if: ${{ github.actor == 'localai-bot' }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Approve a PR if not already approved
|
||||
run: |
|
||||
|
||||
4
.github/workflows/notify-models.yaml
vendored
4
.github/workflows/notify-models.yaml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
MODEL_NAME: gemma-3-12b-it
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0 # needed to checkout all branches for this Action to work
|
||||
- uses: mudler/localai-github-action@v1
|
||||
@@ -90,7 +90,7 @@ jobs:
|
||||
MODEL_NAME: gemma-3-12b-it
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0 # needed to checkout all branches for this Action to work
|
||||
- name: Start LocalAI
|
||||
|
||||
2
.github/workflows/release.yaml
vendored
2
.github/workflows/release.yaml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Go
|
||||
|
||||
4
.github/workflows/secscan.yaml
vendored
4
.github/workflows/secscan.yaml
vendored
@@ -14,11 +14,11 @@ jobs:
|
||||
GO111MODULE: on
|
||||
steps:
|
||||
- name: Checkout Source
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
if: ${{ github.actor != 'dependabot[bot]' }}
|
||||
- name: Run Gosec Security Scanner
|
||||
if: ${{ github.actor != 'dependabot[bot]' }}
|
||||
uses: securego/gosec@v2.22.7
|
||||
uses: securego/gosec@v2.22.8
|
||||
with:
|
||||
# we let the report trigger content trigger a failure using the GitHub Security features.
|
||||
args: '-no-fail -fmt sarif -out results.sarif ./...'
|
||||
|
||||
18
.github/workflows/test-extra.yml
vendored
18
.github/workflows/test-extra.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
# runs-on: ubuntu-latest
|
||||
# steps:
|
||||
# - name: Clone
|
||||
# uses: actions/checkout@v4
|
||||
# uses: actions/checkout@v5
|
||||
# with:
|
||||
# submodules: true
|
||||
# - name: Dependencies
|
||||
@@ -40,7 +40,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
submodules: true
|
||||
- name: Dependencies
|
||||
@@ -61,7 +61,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
submodules: true
|
||||
- name: Dependencies
|
||||
@@ -83,7 +83,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
submodules: true
|
||||
- name: Dependencies
|
||||
@@ -104,7 +104,7 @@ jobs:
|
||||
# runs-on: ubuntu-latest
|
||||
# steps:
|
||||
# - name: Clone
|
||||
# uses: actions/checkout@v4
|
||||
# uses: actions/checkout@v5
|
||||
# with:
|
||||
# submodules: true
|
||||
# - name: Dependencies
|
||||
@@ -124,7 +124,7 @@ jobs:
|
||||
# runs-on: ubuntu-latest
|
||||
# steps:
|
||||
# - name: Clone
|
||||
# uses: actions/checkout@v4
|
||||
# uses: actions/checkout@v5
|
||||
# with:
|
||||
# submodules: true
|
||||
# - name: Dependencies
|
||||
@@ -186,7 +186,7 @@ jobs:
|
||||
# sudo rm -rf "$AGENT_TOOLSDIRECTORY" || true
|
||||
# df -h
|
||||
# - name: Clone
|
||||
# uses: actions/checkout@v4
|
||||
# uses: actions/checkout@v5
|
||||
# with:
|
||||
# submodules: true
|
||||
# - name: Dependencies
|
||||
@@ -211,7 +211,7 @@ jobs:
|
||||
# runs-on: ubuntu-latest
|
||||
# steps:
|
||||
# - name: Clone
|
||||
# uses: actions/checkout@v4
|
||||
# uses: actions/checkout@v5
|
||||
# with:
|
||||
# submodules: true
|
||||
# - name: Dependencies
|
||||
@@ -232,7 +232,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
submodules: true
|
||||
- name: Dependencies
|
||||
|
||||
26
.github/workflows/test.yml
vendored
26
.github/workflows/test.yml
vendored
@@ -23,6 +23,20 @@ jobs:
|
||||
matrix:
|
||||
go-version: ['1.21.x']
|
||||
steps:
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@main
|
||||
with:
|
||||
# this might remove tools that are actually needed,
|
||||
# if set to "true" but frees about 6 GB
|
||||
tool-cache: true
|
||||
# all of these default to true, but feel free to set to
|
||||
# "false" if necessary for your workflow
|
||||
android: true
|
||||
dotnet: true
|
||||
haskell: true
|
||||
large-packages: true
|
||||
docker-images: true
|
||||
swap-storage: true
|
||||
- name: Release space from worker
|
||||
run: |
|
||||
echo "Listing top largest packages"
|
||||
@@ -56,7 +70,7 @@ jobs:
|
||||
sudo rm -rfv build || true
|
||||
df -h
|
||||
- name: Clone
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go ${{ matrix.go-version }}
|
||||
@@ -152,7 +166,7 @@ jobs:
|
||||
sudo rm -rfv build || true
|
||||
df -h
|
||||
- name: Clone
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
submodules: true
|
||||
- name: Dependencies
|
||||
@@ -182,7 +196,7 @@ jobs:
|
||||
go-version: ['1.21.x']
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
submodules: true
|
||||
- name: Setup Go ${{ matrix.go-version }}
|
||||
@@ -200,11 +214,7 @@ jobs:
|
||||
- name: Build llama-cpp-darwin
|
||||
run: |
|
||||
make protogen-go
|
||||
make build
|
||||
bash scripts/build-llama-cpp-darwin.sh
|
||||
ls -la build/darwin.tar
|
||||
mv build/darwin.tar build/llama-cpp.tar
|
||||
./local-ai backends install "ocifile://$PWD/build/llama-cpp.tar"
|
||||
make backends/llama-cpp-darwin
|
||||
- name: Test
|
||||
run: |
|
||||
export C_INCLUDE_PATH=/usr/local/include
|
||||
|
||||
2
.github/workflows/update_swagger.yaml
vendored
2
.github/workflows/update_swagger.yaml
vendored
@@ -9,7 +9,7 @@ jobs:
|
||||
fail-fast: false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 'stable'
|
||||
|
||||
10
Dockerfile
10
Dockerfile
@@ -9,7 +9,7 @@ ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
ca-certificates curl wget espeak-ng libgomp1 \
|
||||
python3 python-is-python3 ffmpeg && \
|
||||
ffmpeg libopenblas-base libopenblas-dev && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
@@ -72,6 +72,12 @@ RUN <<EOT bash
|
||||
fi
|
||||
EOT
|
||||
|
||||
RUN <<EOT bash
|
||||
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${TARGETARCH}" = "arm64" ]; then
|
||||
echo "nvidia-l4t" > /run/localai/capability
|
||||
fi
|
||||
EOT
|
||||
|
||||
# If we are building with clblas support, we need the libraries for the builds
|
||||
RUN if [ "${BUILD_TYPE}" = "clblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
|
||||
apt-get update && \
|
||||
@@ -94,6 +100,8 @@ RUN if [ "${BUILD_TYPE}" = "hipblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
|
||||
ldconfig \
|
||||
; fi
|
||||
|
||||
RUN expr "${BUILD_TYPE}" = intel && echo "intel" > /run/localai/capability || echo "not intel"
|
||||
|
||||
# Cuda
|
||||
ENV PATH=/usr/local/cuda/bin:${PATH}
|
||||
|
||||
|
||||
114
Makefile
114
Makefile
@@ -5,8 +5,6 @@ BINARY_NAME=local-ai
|
||||
|
||||
GORELEASER?=
|
||||
|
||||
ONEAPI_VERSION?=2025.2
|
||||
|
||||
export BUILD_TYPE?=
|
||||
|
||||
GO_TAGS?=
|
||||
@@ -134,27 +132,6 @@ test: test-models/testmodel.ggml protogen-go
|
||||
$(MAKE) test-tts
|
||||
$(MAKE) test-stablediffusion
|
||||
|
||||
backends/llama-cpp: docker-build-llama-cpp docker-save-llama-cpp build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/llama-cpp.tar)"
|
||||
|
||||
backends/piper: docker-build-piper docker-save-piper build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/piper.tar)"
|
||||
|
||||
backends/stablediffusion-ggml: docker-build-stablediffusion-ggml docker-save-stablediffusion-ggml build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/stablediffusion-ggml.tar)"
|
||||
|
||||
backends/whisper: docker-build-whisper docker-save-whisper build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/whisper.tar)"
|
||||
|
||||
backends/silero-vad: docker-build-silero-vad docker-save-silero-vad build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/silero-vad.tar)"
|
||||
|
||||
backends/local-store: docker-build-local-store docker-save-local-store build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/local-store.tar)"
|
||||
|
||||
backends/huggingface: docker-build-huggingface docker-save-huggingface build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/huggingface.tar)"
|
||||
|
||||
########################################################
|
||||
## AIO tests
|
||||
########################################################
|
||||
@@ -322,7 +299,7 @@ docker-cuda11:
|
||||
--build-arg GO_TAGS="$(GO_TAGS)" \
|
||||
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
|
||||
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
|
||||
-t $(DOCKER_IMAGE)-cuda11 .
|
||||
-t $(DOCKER_IMAGE)-cuda-11 .
|
||||
|
||||
docker-aio:
|
||||
@echo "Building AIO image with base $(BASE_IMAGE) as $(DOCKER_AIO_IMAGE)"
|
||||
@@ -337,24 +314,69 @@ docker-aio-all:
|
||||
|
||||
docker-image-intel:
|
||||
docker build \
|
||||
--build-arg BASE_IMAGE=intel/oneapi-basekit:${ONEAPI_VERSION}.0-0-devel-ubuntu24.04 \
|
||||
--build-arg BASE_IMAGE=quay.io/go-skynet/intel-oneapi-base:latest \
|
||||
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
|
||||
--build-arg GO_TAGS="$(GO_TAGS)" \
|
||||
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
|
||||
--build-arg BUILD_TYPE=sycl_f32 -t $(DOCKER_IMAGE) .
|
||||
|
||||
docker-image-intel-xpu:
|
||||
docker build \
|
||||
--build-arg BASE_IMAGE=intel/oneapi-basekit:${ONEAPI_VERSION}.0-0-devel-ubuntu22.04 \
|
||||
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
|
||||
--build-arg GO_TAGS="$(GO_TAGS)" \
|
||||
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
|
||||
--build-arg BUILD_TYPE=sycl_f32 -t $(DOCKER_IMAGE) .
|
||||
--build-arg BUILD_TYPE=intel -t $(DOCKER_IMAGE) .
|
||||
|
||||
########################################################
|
||||
## Backends
|
||||
########################################################
|
||||
|
||||
|
||||
backends/diffusers: docker-build-diffusers docker-save-diffusers build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/diffusers.tar)"
|
||||
|
||||
backends/llama-cpp: docker-build-llama-cpp docker-save-llama-cpp build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/llama-cpp.tar)"
|
||||
|
||||
backends/piper: docker-build-piper docker-save-piper build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/piper.tar)"
|
||||
|
||||
backends/stablediffusion-ggml: docker-build-stablediffusion-ggml docker-save-stablediffusion-ggml build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/stablediffusion-ggml.tar)"
|
||||
|
||||
backends/whisper: docker-build-whisper docker-save-whisper build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/whisper.tar)"
|
||||
|
||||
backends/silero-vad: docker-build-silero-vad docker-save-silero-vad build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/silero-vad.tar)"
|
||||
|
||||
backends/local-store: docker-build-local-store docker-save-local-store build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/local-store.tar)"
|
||||
|
||||
backends/huggingface: docker-build-huggingface docker-save-huggingface build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/huggingface.tar)"
|
||||
|
||||
backends/rfdetr: docker-build-rfdetr docker-save-rfdetr build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/rfdetr.tar)"
|
||||
|
||||
backends/kitten-tts: docker-build-kitten-tts docker-save-kitten-tts build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/kitten-tts.tar)"
|
||||
|
||||
backends/kokoro: docker-build-kokoro docker-save-kokoro build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/kokoro.tar)"
|
||||
|
||||
backends/llama-cpp-darwin: build
|
||||
bash ./scripts/build/llama-cpp-darwin.sh
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/llama-cpp.tar)"
|
||||
|
||||
build-darwin-python-backend: build
|
||||
bash ./scripts/build/python-darwin.sh
|
||||
|
||||
backends/mlx:
|
||||
BACKEND=mlx $(MAKE) build-darwin-python-backend
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/mlx.tar)"
|
||||
|
||||
backends/diffuser-darwin:
|
||||
BACKEND=diffusers $(MAKE) build-darwin-python-backend
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/diffusers.tar)"
|
||||
|
||||
backends/mlx-vlm:
|
||||
BACKEND=mlx-vlm $(MAKE) build-darwin-python-backend
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/mlx-vlm.tar)"
|
||||
|
||||
backend-images:
|
||||
mkdir -p backend-images
|
||||
|
||||
@@ -373,6 +395,24 @@ docker-build-local-store:
|
||||
docker-build-huggingface:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:huggingface -f backend/Dockerfile.golang --build-arg BACKEND=huggingface .
|
||||
|
||||
docker-build-rfdetr:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:rfdetr -f backend/Dockerfile.python --build-arg BACKEND=rfdetr ./backend
|
||||
|
||||
docker-build-kitten-tts:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:kitten-tts -f backend/Dockerfile.python --build-arg BACKEND=kitten-tts ./backend
|
||||
|
||||
docker-save-kitten-tts: backend-images
|
||||
docker save local-ai-backend:kitten-tts -o backend-images/kitten-tts.tar
|
||||
|
||||
docker-build-kokoro:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:kokoro -f backend/Dockerfile.python --build-arg BACKEND=kokoro ./backend
|
||||
|
||||
docker-save-kokoro: backend-images
|
||||
docker save local-ai-backend:kokoro -o backend-images/kokoro.tar
|
||||
|
||||
docker-save-rfdetr: backend-images
|
||||
docker save local-ai-backend:rfdetr -o backend-images/rfdetr.tar
|
||||
|
||||
docker-save-huggingface: backend-images
|
||||
docker save local-ai-backend:huggingface -o backend-images/huggingface.tar
|
||||
|
||||
@@ -410,10 +450,10 @@ docker-build-transformers:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:transformers -f backend/Dockerfile.python --build-arg BACKEND=transformers .
|
||||
|
||||
docker-build-diffusers:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:diffusers -f backend/Dockerfile.python --build-arg BACKEND=diffusers .
|
||||
docker build --progress=plain --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:diffusers -f backend/Dockerfile.python --build-arg BACKEND=diffusers ./backend
|
||||
|
||||
docker-build-kokoro:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:kokoro -f backend/Dockerfile.python --build-arg BACKEND=kokoro .
|
||||
docker-save-diffusers: backend-images
|
||||
docker save local-ai-backend:diffusers -o backend-images/diffusers.tar
|
||||
|
||||
docker-build-whisper:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:whisper -f backend/Dockerfile.golang --build-arg BACKEND=whisper .
|
||||
|
||||
13
README.md
13
README.md
@@ -140,11 +140,7 @@ docker run -ti --name local-ai -p 8080:8080 --device=/dev/kfd --device=/dev/dri
|
||||
### Intel GPU Images (oneAPI):
|
||||
|
||||
```bash
|
||||
# Intel GPU with FP16 support
|
||||
docker run -ti --name local-ai -p 8080:8080 --device=/dev/dri/card1 --device=/dev/dri/renderD128 localai/localai:latest-gpu-intel-f16
|
||||
|
||||
# Intel GPU with FP32 support
|
||||
docker run -ti --name local-ai -p 8080:8080 --device=/dev/dri/card1 --device=/dev/dri/renderD128 localai/localai:latest-gpu-intel-f32
|
||||
docker run -ti --name local-ai -p 8080:8080 --device=/dev/dri/card1 --device=/dev/dri/renderD128 localai/localai:latest-gpu-intel
|
||||
```
|
||||
|
||||
### Vulkan GPU Images:
|
||||
@@ -166,7 +162,7 @@ docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-ai
|
||||
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-11
|
||||
|
||||
# Intel GPU version
|
||||
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-gpu-intel-f16
|
||||
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-gpu-intel
|
||||
|
||||
# AMD GPU version
|
||||
docker run -ti --name local-ai -p 8080:8080 --device=/dev/kfd --device=/dev/dri --group-add=video localai/localai:latest-aio-gpu-hipblas
|
||||
@@ -189,10 +185,14 @@ local-ai run https://gist.githubusercontent.com/.../phi-2.yaml
|
||||
local-ai run oci://localai/phi-2:latest
|
||||
```
|
||||
|
||||
> ⚡ **Automatic Backend Detection**: When you install models from the gallery or YAML files, LocalAI automatically detects your system's GPU capabilities (NVIDIA, AMD, Intel) and downloads the appropriate backend. For advanced configuration options, see [GPU Acceleration](https://localai.io/features/gpu-acceleration/#automatic-backend-detection).
|
||||
|
||||
For more information, see [💻 Getting started](https://localai.io/basics/getting_started/index.html)
|
||||
|
||||
## 📰 Latest project news
|
||||
|
||||
- August 2025: MLX, MLX-VLM, Diffusers and llama.cpp are now supported on Mac M1/M2/M3+ chips ( with `development` suffix in the gallery ): https://github.com/mudler/LocalAI/pull/6049 https://github.com/mudler/LocalAI/pull/6119 https://github.com/mudler/LocalAI/pull/6121 https://github.com/mudler/LocalAI/pull/6060
|
||||
- July/August 2025: 🔍 [Object Detection](https://localai.io/features/object-detection/) added to the API featuring [rf-detr](https://github.com/roboflow/rf-detr)
|
||||
- July 2025: All backends migrated outside of the main binary. LocalAI is now more lightweight, small, and automatically downloads the required backend to run the model. [Read the release notes](https://github.com/mudler/LocalAI/releases/tag/v3.2.0)
|
||||
- June 2025: [Backend management](https://github.com/mudler/LocalAI/pull/5607) has been added. Attention: extras images are going to be deprecated from the next release! Read [the backend management PR](https://github.com/mudler/LocalAI/pull/5607).
|
||||
- May 2025: [Audio input](https://github.com/mudler/LocalAI/pull/5466) and [Reranking](https://github.com/mudler/LocalAI/pull/5396) in llama.cpp backend, [Realtime API](https://github.com/mudler/LocalAI/pull/5392), Support to Gemma, SmollVLM, and more multimodal models (available in the gallery).
|
||||
@@ -226,6 +226,7 @@ Roadmap items: [List of issues](https://github.com/mudler/LocalAI/issues?q=is%3A
|
||||
- ✍️ [Constrained grammars](https://localai.io/features/constrained_grammars/)
|
||||
- 🖼️ [Download Models directly from Huggingface ](https://localai.io/models/)
|
||||
- 🥽 [Vision API](https://localai.io/features/gpt-vision/)
|
||||
- 🔍 [Object Detection](https://localai.io/features/object-detection/)
|
||||
- 📈 [Reranker API](https://localai.io/features/reranker/)
|
||||
- 🆕🖧 [P2P Inferencing](https://localai.io/features/distribute/)
|
||||
- [Agentic capabilities](https://github.com/mudler/LocalAGI)
|
||||
|
||||
@@ -96,17 +96,6 @@ RUN if [ "${BUILD_TYPE}" = "hipblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
|
||||
ldconfig \
|
||||
; fi
|
||||
|
||||
# Intel oneAPI requirements
|
||||
RUN <<EOT bash
|
||||
if [[ "${BUILD_TYPE}" == sycl* ]] && [ "${SKIP_DRIVERS}" = "false" ]; then
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
intel-oneapi-runtime-libs && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
fi
|
||||
EOT
|
||||
|
||||
# Install Go
|
||||
RUN curl -L -s https://go.dev/dl/go${GO_VERSION}.linux-${TARGETARCH}.tar.gz | tar -C /usr/local -xz
|
||||
ENV PATH=$PATH:/root/go/bin:/usr/local/go/bin:/usr/local/bin
|
||||
|
||||
@@ -23,7 +23,7 @@ RUN apt-get update && \
|
||||
libssl-dev \
|
||||
git \
|
||||
git-lfs \
|
||||
unzip \
|
||||
unzip clang \
|
||||
upx-ucl \
|
||||
curl python3-pip \
|
||||
python-is-python3 \
|
||||
@@ -116,7 +116,7 @@ COPY python/${BACKEND} /${BACKEND}
|
||||
COPY backend.proto /${BACKEND}/backend.proto
|
||||
COPY python/common/ /${BACKEND}/common
|
||||
|
||||
RUN cd /${BACKEND} && make
|
||||
RUN cd /${BACKEND} && PORTABLE_PYTHON=true make
|
||||
|
||||
FROM scratch
|
||||
ARG BACKEND=rerankers
|
||||
|
||||
@@ -20,6 +20,7 @@ service Backend {
|
||||
rpc SoundGeneration(SoundGenerationRequest) returns (Result) {}
|
||||
rpc TokenizeString(PredictOptions) returns (TokenizationResponse) {}
|
||||
rpc Status(HealthMessage) returns (StatusResponse) {}
|
||||
rpc Detect(DetectOptions) returns (DetectResponse) {}
|
||||
|
||||
rpc StoresSet(StoresSetOptions) returns (Result) {}
|
||||
rpc StoresDelete(StoresDeleteOptions) returns (Result) {}
|
||||
@@ -304,6 +305,9 @@ message GenerateImageRequest {
|
||||
// Diffusers
|
||||
string EnableParameters = 10;
|
||||
int32 CLIPSkip = 11;
|
||||
|
||||
// Reference images for models that support them (e.g., Flux Kontext)
|
||||
repeated string ref_images = 12;
|
||||
}
|
||||
|
||||
message GenerateVideoRequest {
|
||||
@@ -376,3 +380,20 @@ message Message {
|
||||
string role = 1;
|
||||
string content = 2;
|
||||
}
|
||||
|
||||
message DetectOptions {
|
||||
string src = 1;
|
||||
}
|
||||
|
||||
message Detection {
|
||||
float x = 1;
|
||||
float y = 2;
|
||||
float width = 3;
|
||||
float height = 4;
|
||||
float confidence = 5;
|
||||
string class_name = 6;
|
||||
}
|
||||
|
||||
message DetectResponse {
|
||||
repeated Detection Detections = 1;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
|
||||
LLAMA_VERSION?=3f4fc97f1d745f1d5d3c853949503136d419e6de
|
||||
LLAMA_VERSION?=710dfc465a68f7443b87d9f792cffba00ed739fe
|
||||
LLAMA_REPO?=https://github.com/ggerganov/llama.cpp
|
||||
|
||||
CMAKE_ARGS?=
|
||||
@@ -26,16 +26,14 @@ else ifeq ($(BUILD_TYPE),openblas)
|
||||
# If build type is clblas (openCL) we set -DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
|
||||
else ifeq ($(BUILD_TYPE),clblas)
|
||||
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
|
||||
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
|
||||
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
|
||||
else ifeq ($(BUILD_TYPE),hipblas)
|
||||
ROCM_HOME ?= /opt/rocm
|
||||
ROCM_PATH ?= /opt/rocm
|
||||
export CXX=$(ROCM_HOME)/llvm/bin/clang++
|
||||
export CC=$(ROCM_HOME)/llvm/bin/clang
|
||||
# GPU_TARGETS ?= gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102
|
||||
# AMDGPU_TARGETS ?= "$(GPU_TARGETS)"
|
||||
CMAKE_ARGS+=-DGGML_HIP=ON
|
||||
# CMAKE_ARGS+=-DGGML_HIP=ON -DAMDGPU_TARGETS="$(AMDGPU_TARGETS)" -DGPU_TARGETS="$(GPU_TARGETS)"
|
||||
AMDGPU_TARGETS?=gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102,gfx1200,gfx1201
|
||||
CMAKE_ARGS+=-DGGML_HIP=ON -DAMDGPU_TARGETS=$(AMDGPU_TARGETS)
|
||||
else ifeq ($(BUILD_TYPE),vulkan)
|
||||
CMAKE_ARGS+=-DGGML_VULKAN=1
|
||||
else ifeq ($(OS),Darwin)
|
||||
|
||||
@@ -53,9 +53,9 @@ static void start_llama_server(server_context& ctx_server) {
|
||||
LOG_INF("%s: model loaded\n", __func__);
|
||||
|
||||
// print sample chat example to make it clear which template is used
|
||||
LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__,
|
||||
common_chat_templates_source(ctx_server.chat_templates.get()),
|
||||
common_chat_format_example(ctx_server.chat_templates.get(), ctx_server.params_base.use_jinja).c_str());
|
||||
// LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__,
|
||||
// common_chat_templates_source(ctx_server.chat_templates.get()),
|
||||
// common_chat_format_example(ctx_server.chat_templates.get(), ctx_server.params_base.use_jinja).c_str(), ctx_server.params_base.default_template_kwargs);
|
||||
|
||||
// Reset the chat templates
|
||||
// TODO: We should make this configurable by respecting the option that is already present in LocalAI for vLLM
|
||||
@@ -313,9 +313,11 @@ static void params_parse(const backend::ModelOptions* request,
|
||||
params.pooling_type = LLAMA_POOLING_TYPE_RANK;
|
||||
}
|
||||
|
||||
|
||||
if (request->ropescaling() == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_NONE; }
|
||||
else if (request->ropescaling() == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_YARN; }
|
||||
else { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_LINEAR; }
|
||||
else if (request->ropescaling() == "linear") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_LINEAR; }
|
||||
|
||||
if ( request->yarnextfactor() != 0.0f ) {
|
||||
params.yarn_ext_factor = request->yarnextfactor();
|
||||
}
|
||||
@@ -435,24 +437,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
// process files
|
||||
mtmd::bitmaps bitmaps;
|
||||
const bool has_mtmd = ctx_server.mctx != nullptr;
|
||||
{
|
||||
if (!has_mtmd && !files.empty()) {
|
||||
throw std::runtime_error("This server does not support multimodal");
|
||||
}
|
||||
for (auto & file : files) {
|
||||
mtmd::bitmap bmp(mtmd_helper_bitmap_init_from_buf(ctx_server.mctx, file.data(), file.size()));
|
||||
if (!bmp.ptr) {
|
||||
throw std::runtime_error("Failed to load image/audio");
|
||||
}
|
||||
// calculate bitmap hash (for KV caching)
|
||||
std::string hash = fnv_hash(bmp.data(), bmp.n_bytes());
|
||||
bmp.set_id(hash.c_str());
|
||||
bitmaps.entries.push_back(std::move(bmp));
|
||||
}
|
||||
}
|
||||
|
||||
// process prompt
|
||||
std::vector<server_tokens> inputs;
|
||||
@@ -462,32 +447,10 @@ public:
|
||||
|
||||
if (has_mtmd) {
|
||||
// multimodal
|
||||
std::string prompt_str = prompt.get<std::string>();
|
||||
mtmd_input_text inp_txt = {
|
||||
prompt_str.c_str(),
|
||||
/* add_special */ true,
|
||||
/* parse_special */ true,
|
||||
};
|
||||
mtmd::input_chunks chunks(mtmd_input_chunks_init());
|
||||
auto bitmaps_c_ptr = bitmaps.c_ptr();
|
||||
int32_t tokenized = mtmd_tokenize(ctx_server.mctx,
|
||||
chunks.ptr.get(),
|
||||
&inp_txt,
|
||||
bitmaps_c_ptr.data(),
|
||||
bitmaps_c_ptr.size());
|
||||
if (tokenized != 0) {
|
||||
throw std::runtime_error("Failed to tokenize prompt");
|
||||
}
|
||||
|
||||
server_tokens tmp(chunks, true);
|
||||
inputs.push_back(std::move(tmp));
|
||||
inputs.push_back(process_mtmd_prompt(ctx_server.mctx, prompt.get<std::string>(), files));
|
||||
} else {
|
||||
// non-multimodal version
|
||||
auto tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, prompt, true, true);
|
||||
for (auto & p : tokenized_prompts) {
|
||||
auto tmp = server_tokens(p, ctx_server.mctx != nullptr);
|
||||
inputs.push_back(std::move(tmp));
|
||||
}
|
||||
// Everything else, including multimodal completions.
|
||||
inputs = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, prompt, true, true);
|
||||
}
|
||||
|
||||
tasks.reserve(inputs.size());
|
||||
@@ -628,23 +591,7 @@ public:
|
||||
}
|
||||
|
||||
// process files
|
||||
mtmd::bitmaps bitmaps;
|
||||
const bool has_mtmd = ctx_server.mctx != nullptr;
|
||||
{
|
||||
if (!has_mtmd && !files.empty()) {
|
||||
throw std::runtime_error("This server does not support multimodal");
|
||||
}
|
||||
for (auto & file : files) {
|
||||
mtmd::bitmap bmp(mtmd_helper_bitmap_init_from_buf(ctx_server.mctx, file.data(), file.size()));
|
||||
if (!bmp.ptr) {
|
||||
throw std::runtime_error("Failed to load image/audio");
|
||||
}
|
||||
// calculate bitmap hash (for KV caching)
|
||||
std::string hash = fnv_hash(bmp.data(), bmp.n_bytes());
|
||||
bmp.set_id(hash.c_str());
|
||||
bitmaps.entries.push_back(std::move(bmp));
|
||||
}
|
||||
}
|
||||
|
||||
// process prompt
|
||||
std::vector<server_tokens> inputs;
|
||||
@@ -655,33 +602,10 @@ public:
|
||||
|
||||
if (has_mtmd) {
|
||||
// multimodal
|
||||
std::string prompt_str = prompt.get<std::string>();
|
||||
mtmd_input_text inp_txt = {
|
||||
prompt_str.c_str(),
|
||||
/* add_special */ true,
|
||||
/* parse_special */ true,
|
||||
};
|
||||
mtmd::input_chunks chunks(mtmd_input_chunks_init());
|
||||
auto bitmaps_c_ptr = bitmaps.c_ptr();
|
||||
int32_t tokenized = mtmd_tokenize(ctx_server.mctx,
|
||||
chunks.ptr.get(),
|
||||
&inp_txt,
|
||||
bitmaps_c_ptr.data(),
|
||||
bitmaps_c_ptr.size());
|
||||
if (tokenized != 0) {
|
||||
std::cout << "[PREDICT] Failed to tokenize prompt" << std::endl;
|
||||
throw std::runtime_error("Failed to tokenize prompt");
|
||||
}
|
||||
|
||||
server_tokens tmp(chunks, true);
|
||||
inputs.push_back(std::move(tmp));
|
||||
inputs.push_back(process_mtmd_prompt(ctx_server.mctx, prompt.get<std::string>(), files));
|
||||
} else {
|
||||
// non-multimodal version
|
||||
auto tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, prompt, true, true);
|
||||
for (auto & p : tokenized_prompts) {
|
||||
auto tmp = server_tokens(p, ctx_server.mctx != nullptr);
|
||||
inputs.push_back(std::move(tmp));
|
||||
}
|
||||
// Everything else, including multimodal completions.
|
||||
inputs = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, prompt, true, true);
|
||||
}
|
||||
|
||||
tasks.reserve(inputs.size());
|
||||
@@ -772,7 +696,7 @@ public:
|
||||
json prompt = body.at("prompt");
|
||||
|
||||
|
||||
auto tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, prompt, true, true);
|
||||
auto tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, prompt, true, true);
|
||||
for (const auto & tokens : tokenized_prompts) {
|
||||
// this check is necessary for models that do not add BOS token to the input
|
||||
if (tokens.empty()) {
|
||||
@@ -791,7 +715,7 @@ public:
|
||||
|
||||
task.id = ctx_server.queue_tasks.get_new_id();
|
||||
task.index = i;
|
||||
task.prompt_tokens = server_tokens(tokenized_prompts[i], ctx_server.mctx != nullptr);
|
||||
task.prompt_tokens = std::move(tokenized_prompts[i]);
|
||||
|
||||
// OAI-compat
|
||||
task.params.oaicompat = OAICOMPAT_TYPE_EMBEDDING;
|
||||
@@ -847,8 +771,10 @@ public:
|
||||
}
|
||||
|
||||
// Tokenize the query
|
||||
llama_tokens tokenized_query = tokenize_input_prompts(ctx_server.vocab, request->query(), /* add_special */ false, true)[0];
|
||||
|
||||
auto tokenized_query = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, request->query(), /* add_special */ false, true);
|
||||
if (tokenized_query.size() != 1) {
|
||||
return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "\"query\" must contain only a single prompt");
|
||||
}
|
||||
// Create and queue the task
|
||||
json responses = json::array();
|
||||
bool error = false;
|
||||
@@ -860,14 +786,14 @@ public:
|
||||
documents.push_back(request->documents(i));
|
||||
}
|
||||
|
||||
auto tokenized_docs = tokenize_input_prompts(ctx_server.vocab, documents, /* add_special */ false, true);
|
||||
auto tokenized_docs = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, documents, /* add_special */ false, true);
|
||||
tasks.reserve(tokenized_docs.size());
|
||||
for (size_t i = 0; i < tokenized_docs.size(); i++) {
|
||||
auto tmp = format_rerank(ctx_server.vocab, tokenized_query, tokenized_docs[i]);
|
||||
auto tmp = format_rerank(ctx_server.vocab, tokenized_query[0], tokenized_docs[i]);
|
||||
server_task task = server_task(SERVER_TASK_TYPE_RERANK);
|
||||
task.id = ctx_server.queue_tasks.get_new_id();
|
||||
task.index = i;
|
||||
task.prompt_tokens = server_tokens(tmp, ctx_server.mctx != nullptr);
|
||||
task.prompt_tokens = std::move(tmp);
|
||||
tasks.push_back(std::move(task));
|
||||
}
|
||||
|
||||
|
||||
@@ -42,7 +42,8 @@ fi
|
||||
|
||||
# Extend ld library path with the dir where this script is located/lib
|
||||
if [ "$(uname)" == "Darwin" ]; then
|
||||
DYLD_FALLBACK_LIBRARY_PATH=$CURDIR/lib:$DYLD_FALLBACK_LIBRARY_PATH
|
||||
export DYLD_LIBRARY_PATH=$CURDIR/lib:$DYLD_LIBRARY_PATH
|
||||
#export DYLD_FALLBACK_LIBRARY_PATH=$CURDIR/lib:$DYLD_FALLBACK_LIBRARY_PATH
|
||||
else
|
||||
export LD_LIBRARY_PATH=$CURDIR/lib:$LD_LIBRARY_PATH
|
||||
fi
|
||||
@@ -57,5 +58,5 @@ fi
|
||||
echo "Using binary: $BINARY"
|
||||
exec $CURDIR/$BINARY "$@"
|
||||
|
||||
# In case we fail execing, just run fallback
|
||||
# We should never reach this point, however just in case we do, run fallback
|
||||
exec $CURDIR/llama-cpp-fallback "$@"
|
||||
16
backend/go/stablediffusion-ggml/CMakeLists.txt
Normal file
16
backend/go/stablediffusion-ggml/CMakeLists.txt
Normal file
@@ -0,0 +1,16 @@
|
||||
cmake_minimum_required(VERSION 3.12)
|
||||
project(gosd LANGUAGES C CXX)
|
||||
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
||||
|
||||
add_subdirectory(./sources/stablediffusion-ggml.cpp)
|
||||
|
||||
add_library(gosd MODULE gosd.cpp)
|
||||
target_link_libraries(gosd PRIVATE stable-diffusion ggml stdc++fs)
|
||||
|
||||
target_include_directories(gosd PUBLIC
|
||||
stable-diffusion.cpp
|
||||
stable-diffusion.cpp/thirdparty
|
||||
)
|
||||
|
||||
set_property(TARGET gosd PROPERTY CXX_STANDARD 17)
|
||||
set_target_properties(gosd PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
|
||||
@@ -1,28 +1,16 @@
|
||||
INCLUDE_PATH := $(abspath ./)
|
||||
LIBRARY_PATH := $(abspath ./)
|
||||
|
||||
AR?=ar
|
||||
CMAKE_ARGS?=
|
||||
BUILD_TYPE?=
|
||||
NATIVE?=false
|
||||
CUDA_LIBPATH?=/usr/local/cuda/lib64/
|
||||
ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh
|
||||
# keep standard at C11 and C++11
|
||||
CXXFLAGS = -I. -I$(INCLUDE_PATH)/sources/stablediffusion-ggml.cpp/thirdparty -I$(INCLUDE_PATH)/sources/stablediffusion-ggml.cpp/ggml/include -I$(INCLUDE_PATH)/sources/stablediffusion-ggml.cpp -O3 -DNDEBUG -std=c++17 -fPIC
|
||||
|
||||
GOCMD?=go
|
||||
CGO_LDFLAGS?=
|
||||
# Avoid parent make file overwriting CGO_LDFLAGS which is needed for hipblas
|
||||
CGO_LDFLAGS_SYCL=
|
||||
GO_TAGS?=
|
||||
LD_FLAGS?=
|
||||
JOBS?=$(shell nproc --ignore=1)
|
||||
|
||||
# stablediffusion.cpp (ggml)
|
||||
STABLEDIFFUSION_GGML_REPO?=https://github.com/leejet/stable-diffusion.cpp
|
||||
STABLEDIFFUSION_GGML_VERSION?=eed97a5e1d054f9c1e7ac01982ae480411d4157e
|
||||
STABLEDIFFUSION_GGML_VERSION?=5900ef6605c6fbf7934239f795c13c97bc993853
|
||||
|
||||
# Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
|
||||
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF
|
||||
CMAKE_ARGS+=-DGGML_MAX_NAME=128
|
||||
|
||||
ifeq ($(NATIVE),false)
|
||||
CMAKE_ARGS+=-DGGML_NATIVE=OFF
|
||||
@@ -31,7 +19,6 @@ endif
|
||||
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
|
||||
ifeq ($(BUILD_TYPE),cublas)
|
||||
CMAKE_ARGS+=-DSD_CUDA=ON -DGGML_CUDA=ON
|
||||
CGO_LDFLAGS+=-lcublas -lcudart -L$(CUDA_LIBPATH) -L$(CUDA_LIBPATH)/stubs/ -lcuda
|
||||
# If build type is openblas then we set -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
|
||||
# to CMAKE_ARGS automatically
|
||||
else ifeq ($(BUILD_TYPE),openblas)
|
||||
@@ -46,14 +33,12 @@ else ifeq ($(BUILD_TYPE),hipblas)
|
||||
# But if it's OSX without metal, disable it here
|
||||
else ifeq ($(BUILD_TYPE),vulkan)
|
||||
CMAKE_ARGS+=-DSD_VULKAN=ON -DGGML_VULKAN=ON
|
||||
CGO_LDFLAGS+=-lvulkan
|
||||
else ifeq ($(OS),Darwin)
|
||||
ifneq ($(BUILD_TYPE),metal)
|
||||
CMAKE_ARGS+=-DSD_METAL=OFF -DGGML_METAL=OFF
|
||||
else
|
||||
CMAKE_ARGS+=-DSD_METAL=ON -DGGML_METAL=ON
|
||||
CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON
|
||||
TARGET+=--target ggml-metal
|
||||
endif
|
||||
endif
|
||||
|
||||
@@ -63,12 +48,6 @@ ifeq ($(BUILD_TYPE),sycl_f16)
|
||||
-DCMAKE_CXX_COMPILER=icpx \
|
||||
-DSD_SYCL=ON \
|
||||
-DGGML_SYCL_F16=ON
|
||||
export CC=icx
|
||||
export CXX=icpx
|
||||
CGO_LDFLAGS_SYCL += -fsycl -L${DNNLROOT}/lib -ldnnl ${MKLROOT}/lib/intel64/libmkl_sycl.a -fiopenmp -fopenmp-targets=spir64 -lOpenCL
|
||||
CGO_LDFLAGS_SYCL += $(shell pkg-config --libs mkl-static-lp64-gomp)
|
||||
CGO_CXXFLAGS += -fiopenmp -fopenmp-targets=spir64
|
||||
CGO_CXXFLAGS += $(shell pkg-config --cflags mkl-static-lp64-gomp )
|
||||
endif
|
||||
|
||||
ifeq ($(BUILD_TYPE),sycl_f32)
|
||||
@@ -76,73 +55,24 @@ ifeq ($(BUILD_TYPE),sycl_f32)
|
||||
-DCMAKE_C_COMPILER=icx \
|
||||
-DCMAKE_CXX_COMPILER=icpx \
|
||||
-DSD_SYCL=ON
|
||||
export CC=icx
|
||||
export CXX=icpx
|
||||
CGO_LDFLAGS_SYCL += -fsycl -L${DNNLROOT}/lib -ldnnl ${MKLROOT}/lib/intel64/libmkl_sycl.a -fiopenmp -fopenmp-targets=spir64 -lOpenCL
|
||||
CGO_LDFLAGS_SYCL += $(shell pkg-config --libs mkl-static-lp64-gomp)
|
||||
CGO_CXXFLAGS += -fiopenmp -fopenmp-targets=spir64
|
||||
CGO_CXXFLAGS += $(shell pkg-config --cflags mkl-static-lp64-gomp )
|
||||
endif
|
||||
|
||||
# warnings
|
||||
# CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function
|
||||
|
||||
# Find all .a archives in ARCHIVE_DIR
|
||||
# (ggml can have different backends cpu, cuda, etc., each backend generates a .a archive)
|
||||
GGML_ARCHIVE_DIR := build/ggml/src/
|
||||
ALL_ARCHIVES := $(shell find $(GGML_ARCHIVE_DIR) -type f -name '*.a')
|
||||
ALL_OBJS := $(shell find $(GGML_ARCHIVE_DIR) -type f -name '*.o')
|
||||
|
||||
# Name of the single merged library
|
||||
COMBINED_LIB := libggmlall.a
|
||||
|
||||
# Instead of using the archives generated by GGML, use the object files directly to avoid overwriting objects with the same base name
|
||||
$(COMBINED_LIB): $(ALL_ARCHIVES)
|
||||
@echo "Merging all .o into $(COMBINED_LIB): $(ALL_OBJS)"
|
||||
rm -f $@
|
||||
ar -qc $@ $(ALL_OBJS)
|
||||
# Ensure we have a proper index
|
||||
ranlib $@
|
||||
|
||||
build/libstable-diffusion.a:
|
||||
@echo "Building SD with $(BUILD_TYPE) build type and $(CMAKE_ARGS)"
|
||||
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
|
||||
+bash -c "source $(ONEAPI_VARS); \
|
||||
mkdir -p build && \
|
||||
cd build && \
|
||||
cmake $(CMAKE_ARGS) ../sources/stablediffusion-ggml.cpp && \
|
||||
cmake --build . --config Release"
|
||||
else
|
||||
mkdir -p build && \
|
||||
cd build && \
|
||||
cmake $(CMAKE_ARGS) ../sources/stablediffusion-ggml.cpp && \
|
||||
cmake --build . --config Release
|
||||
endif
|
||||
$(MAKE) $(COMBINED_LIB)
|
||||
|
||||
gosd.o:
|
||||
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
|
||||
+bash -c "source $(ONEAPI_VARS); \
|
||||
$(CXX) $(CXXFLAGS) gosd.cpp -o gosd.o -c"
|
||||
else
|
||||
$(CXX) $(CXXFLAGS) gosd.cpp -o gosd.o -c
|
||||
endif
|
||||
|
||||
## stablediffusion (ggml)
|
||||
sources/stablediffusion-ggml.cpp:
|
||||
git clone --recursive $(STABLEDIFFUSION_GGML_REPO) sources/stablediffusion-ggml.cpp && \
|
||||
cd sources/stablediffusion-ggml.cpp && \
|
||||
git checkout $(STABLEDIFFUSION_GGML_VERSION) && \
|
||||
git submodule update --init --recursive --depth 1 --single-branch
|
||||
|
||||
libsd.a: sources/stablediffusion-ggml.cpp build/libstable-diffusion.a gosd.o
|
||||
cp $(INCLUDE_PATH)/build/libstable-diffusion.a ./libsd.a
|
||||
$(AR) rcs libsd.a gosd.o
|
||||
libgosd.so: sources/stablediffusion-ggml.cpp CMakeLists.txt gosd.cpp gosd.h
|
||||
mkdir -p build && \
|
||||
cd build && \
|
||||
cmake .. $(CMAKE_ARGS) && \
|
||||
cmake --build . --config Release -j$(JOBS) && \
|
||||
cd .. && \
|
||||
mv build/libgosd.so ./
|
||||
|
||||
stablediffusion-ggml: libsd.a
|
||||
CGO_LDFLAGS="$(CGO_LDFLAGS) $(CGO_LDFLAGS_SYCL)" C_INCLUDE_PATH="$(INCLUDE_PATH)" LIBRARY_PATH="$(LIBRARY_PATH)" \
|
||||
CC="$(CC)" CXX="$(CXX)" CGO_CXXFLAGS="$(CGO_CXXFLAGS)" \
|
||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o stablediffusion-ggml ./
|
||||
stablediffusion-ggml: main.go gosd.go libgosd.so
|
||||
CGO_ENABLED=0 $(GOCMD) build -tags "$(GO_TAGS)" -o stablediffusion-ggml ./
|
||||
|
||||
package:
|
||||
bash package.sh
|
||||
@@ -150,4 +80,4 @@ package:
|
||||
build: stablediffusion-ggml package
|
||||
|
||||
clean:
|
||||
rm -rf gosd.o libsd.a build $(COMBINED_LIB)
|
||||
rm -rf libgosd.o build stablediffusion-ggml
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
#include <cstdint>
|
||||
#define GGML_MAX_NAME 128
|
||||
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <time.h>
|
||||
@@ -5,6 +8,7 @@
|
||||
#include <random>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <filesystem>
|
||||
#include "gosd.h"
|
||||
|
||||
// #include "preprocessing.hpp"
|
||||
@@ -54,7 +58,7 @@ sd_ctx_t* sd_c;
|
||||
sample_method_t sample_method;
|
||||
|
||||
// Copied from the upstream CLI
|
||||
void sd_log_cb(enum sd_log_level_t level, const char* log, void* data) {
|
||||
static void sd_log_cb(enum sd_log_level_t level, const char* log, void* data) {
|
||||
//SDParams* params = (SDParams*)data;
|
||||
const char* level_str;
|
||||
|
||||
@@ -85,31 +89,33 @@ void sd_log_cb(enum sd_log_level_t level, const char* log, void* data) {
|
||||
fflush(stderr);
|
||||
}
|
||||
|
||||
int load_model(char *model, char* options[], int threads, int diff) {
|
||||
fprintf (stderr, "Loading model!\n");
|
||||
int load_model(const char *model, char *model_path, char* options[], int threads, int diff) {
|
||||
fprintf (stderr, "Loading model: %p=%s\n", model, model);
|
||||
|
||||
sd_set_log_callback(sd_log_cb, NULL);
|
||||
|
||||
char *stableDiffusionModel = "";
|
||||
const char *stableDiffusionModel = "";
|
||||
if (diff == 1 ) {
|
||||
stableDiffusionModel = model;
|
||||
model = "";
|
||||
}
|
||||
|
||||
// decode options. Options are in form optname:optvale, or if booleans only optname.
|
||||
char *clip_l_path = "";
|
||||
char *clip_g_path = "";
|
||||
char *t5xxl_path = "";
|
||||
char *vae_path = "";
|
||||
char *scheduler = "";
|
||||
char *sampler = "";
|
||||
const char *clip_l_path = "";
|
||||
const char *clip_g_path = "";
|
||||
const char *t5xxl_path = "";
|
||||
const char *vae_path = "";
|
||||
const char *scheduler = "";
|
||||
const char *sampler = "";
|
||||
char *lora_dir = model_path;
|
||||
bool lora_dir_allocated = false;
|
||||
|
||||
fprintf(stderr, "parsing options\n");
|
||||
fprintf(stderr, "parsing options: %p\n", options);
|
||||
|
||||
// If options is not NULL, parse options
|
||||
for (int i = 0; options[i] != NULL; i++) {
|
||||
char *optname = strtok(options[i], ":");
|
||||
char *optval = strtok(NULL, ":");
|
||||
const char *optname = strtok(options[i], ":");
|
||||
const char *optval = strtok(NULL, ":");
|
||||
if (optval == NULL) {
|
||||
optval = "true";
|
||||
}
|
||||
@@ -132,6 +138,21 @@ int load_model(char *model, char* options[], int threads, int diff) {
|
||||
if (!strcmp(optname, "sampler")) {
|
||||
sampler = optval;
|
||||
}
|
||||
if (!strcmp(optname, "lora_dir")) {
|
||||
// Path join with model dir
|
||||
if (model_path && strlen(model_path) > 0) {
|
||||
std::filesystem::path model_path_str(model_path);
|
||||
std::filesystem::path lora_path(optval);
|
||||
std::filesystem::path full_lora_path = model_path_str / lora_path;
|
||||
lora_dir = strdup(full_lora_path.string().c_str());
|
||||
lora_dir_allocated = true;
|
||||
fprintf(stderr, "Lora dir resolved to: %s\n", lora_dir);
|
||||
} else {
|
||||
lora_dir = strdup(optval);
|
||||
lora_dir_allocated = true;
|
||||
fprintf(stderr, "No model path provided, using lora dir as-is: %s\n", lora_dir);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr, "parsed options\n");
|
||||
@@ -176,7 +197,7 @@ int load_model(char *model, char* options[], int threads, int diff) {
|
||||
ctx_params.vae_path = vae_path;
|
||||
ctx_params.taesd_path = "";
|
||||
ctx_params.control_net_path = "";
|
||||
ctx_params.lora_model_dir = "";
|
||||
ctx_params.lora_model_dir = lora_dir;
|
||||
ctx_params.embedding_dir = "";
|
||||
ctx_params.stacked_id_embed_dir = "";
|
||||
ctx_params.vae_decode_only = false;
|
||||
@@ -189,16 +210,25 @@ int load_model(char *model, char* options[], int threads, int diff) {
|
||||
|
||||
if (sd_ctx == NULL) {
|
||||
fprintf (stderr, "failed loading model (generic error)\n");
|
||||
// Clean up allocated memory
|
||||
if (lora_dir_allocated && lora_dir) {
|
||||
free(lora_dir);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
fprintf (stderr, "Created context: OK\n");
|
||||
|
||||
sd_c = sd_ctx;
|
||||
|
||||
// Clean up allocated memory
|
||||
if (lora_dir_allocated && lora_dir) {
|
||||
free(lora_dir);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gen_image(char *text, char *negativeText, int width, int height, int steps, int seed , char *dst, float cfg_scale) {
|
||||
int gen_image(char *text, char *negativeText, int width, int height, int steps, int64_t seed, char *dst, float cfg_scale, char *src_image, float strength, char *mask_image, char **ref_images, int ref_images_count) {
|
||||
|
||||
sd_image_t* results;
|
||||
|
||||
@@ -221,15 +251,187 @@ int gen_image(char *text, char *negativeText, int width, int height, int steps,
|
||||
p.seed = seed;
|
||||
p.input_id_images_path = "";
|
||||
|
||||
// Handle input image for img2img
|
||||
bool has_input_image = (src_image != NULL && strlen(src_image) > 0);
|
||||
bool has_mask_image = (mask_image != NULL && strlen(mask_image) > 0);
|
||||
|
||||
uint8_t* input_image_buffer = NULL;
|
||||
uint8_t* mask_image_buffer = NULL;
|
||||
std::vector<uint8_t> default_mask_image_vec;
|
||||
|
||||
if (has_input_image) {
|
||||
fprintf(stderr, "Loading input image: %s\n", src_image);
|
||||
|
||||
int c = 0;
|
||||
int img_width = 0;
|
||||
int img_height = 0;
|
||||
input_image_buffer = stbi_load(src_image, &img_width, &img_height, &c, 3);
|
||||
if (input_image_buffer == NULL) {
|
||||
fprintf(stderr, "Failed to load input image from '%s'\n", src_image);
|
||||
return 1;
|
||||
}
|
||||
if (c < 3) {
|
||||
fprintf(stderr, "Input image must have at least 3 channels, got %d\n", c);
|
||||
free(input_image_buffer);
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Resize input image if dimensions don't match
|
||||
if (img_width != width || img_height != height) {
|
||||
fprintf(stderr, "Resizing input image from %dx%d to %dx%d\n", img_width, img_height, width, height);
|
||||
|
||||
uint8_t* resized_image_buffer = (uint8_t*)malloc(height * width * 3);
|
||||
if (resized_image_buffer == NULL) {
|
||||
fprintf(stderr, "Failed to allocate memory for resized image\n");
|
||||
free(input_image_buffer);
|
||||
return 1;
|
||||
}
|
||||
|
||||
stbir_resize(input_image_buffer, img_width, img_height, 0,
|
||||
resized_image_buffer, width, height, 0, STBIR_TYPE_UINT8,
|
||||
3, STBIR_ALPHA_CHANNEL_NONE, 0,
|
||||
STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP,
|
||||
STBIR_FILTER_BOX, STBIR_FILTER_BOX,
|
||||
STBIR_COLORSPACE_SRGB, nullptr);
|
||||
|
||||
free(input_image_buffer);
|
||||
input_image_buffer = resized_image_buffer;
|
||||
}
|
||||
|
||||
p.init_image = {(uint32_t)width, (uint32_t)height, 3, input_image_buffer};
|
||||
p.strength = strength;
|
||||
fprintf(stderr, "Using img2img with strength: %.2f\n", strength);
|
||||
} else {
|
||||
// No input image, use empty image for text-to-image
|
||||
p.init_image = {(uint32_t)width, (uint32_t)height, 3, NULL};
|
||||
p.strength = 0.0f;
|
||||
}
|
||||
|
||||
// Handle mask image for inpainting
|
||||
if (has_mask_image) {
|
||||
fprintf(stderr, "Loading mask image: %s\n", mask_image);
|
||||
|
||||
int c = 0;
|
||||
int mask_width = 0;
|
||||
int mask_height = 0;
|
||||
mask_image_buffer = stbi_load(mask_image, &mask_width, &mask_height, &c, 1);
|
||||
if (mask_image_buffer == NULL) {
|
||||
fprintf(stderr, "Failed to load mask image from '%s'\n", mask_image);
|
||||
if (input_image_buffer) free(input_image_buffer);
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Resize mask if dimensions don't match
|
||||
if (mask_width != width || mask_height != height) {
|
||||
fprintf(stderr, "Resizing mask image from %dx%d to %dx%d\n", mask_width, mask_height, width, height);
|
||||
|
||||
uint8_t* resized_mask_buffer = (uint8_t*)malloc(height * width);
|
||||
if (resized_mask_buffer == NULL) {
|
||||
fprintf(stderr, "Failed to allocate memory for resized mask\n");
|
||||
free(mask_image_buffer);
|
||||
if (input_image_buffer) free(input_image_buffer);
|
||||
return 1;
|
||||
}
|
||||
|
||||
stbir_resize(mask_image_buffer, mask_width, mask_height, 0,
|
||||
resized_mask_buffer, width, height, 0, STBIR_TYPE_UINT8,
|
||||
1, STBIR_ALPHA_CHANNEL_NONE, 0,
|
||||
STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP,
|
||||
STBIR_FILTER_BOX, STBIR_FILTER_BOX,
|
||||
STBIR_COLORSPACE_SRGB, nullptr);
|
||||
|
||||
free(mask_image_buffer);
|
||||
mask_image_buffer = resized_mask_buffer;
|
||||
}
|
||||
|
||||
p.mask_image = {(uint32_t)width, (uint32_t)height, 1, mask_image_buffer};
|
||||
fprintf(stderr, "Using inpainting with mask\n");
|
||||
} else {
|
||||
// No mask image, create default full mask
|
||||
default_mask_image_vec.resize(width * height, 255);
|
||||
p.mask_image = {(uint32_t)width, (uint32_t)height, 1, default_mask_image_vec.data()};
|
||||
}
|
||||
|
||||
// Handle reference images
|
||||
std::vector<sd_image_t> ref_images_vec;
|
||||
std::vector<uint8_t*> ref_image_buffers;
|
||||
|
||||
if (ref_images_count > 0 && ref_images != NULL) {
|
||||
fprintf(stderr, "Loading %d reference images\n", ref_images_count);
|
||||
|
||||
for (int i = 0; i < ref_images_count; i++) {
|
||||
if (ref_images[i] == NULL || strlen(ref_images[i]) == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
fprintf(stderr, "Loading reference image %d: %s\n", i + 1, ref_images[i]);
|
||||
|
||||
int c = 0;
|
||||
int ref_width = 0;
|
||||
int ref_height = 0;
|
||||
uint8_t* ref_image_buffer = stbi_load(ref_images[i], &ref_width, &ref_height, &c, 3);
|
||||
if (ref_image_buffer == NULL) {
|
||||
fprintf(stderr, "Failed to load reference image from '%s'\n", ref_images[i]);
|
||||
continue;
|
||||
}
|
||||
if (c < 3) {
|
||||
fprintf(stderr, "Reference image must have at least 3 channels, got %d\n", c);
|
||||
free(ref_image_buffer);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Resize reference image if dimensions don't match
|
||||
if (ref_width != width || ref_height != height) {
|
||||
fprintf(stderr, "Resizing reference image from %dx%d to %dx%d\n", ref_width, ref_height, width, height);
|
||||
|
||||
uint8_t* resized_ref_buffer = (uint8_t*)malloc(height * width * 3);
|
||||
if (resized_ref_buffer == NULL) {
|
||||
fprintf(stderr, "Failed to allocate memory for resized reference image\n");
|
||||
free(ref_image_buffer);
|
||||
continue;
|
||||
}
|
||||
|
||||
stbir_resize(ref_image_buffer, ref_width, ref_height, 0,
|
||||
resized_ref_buffer, width, height, 0, STBIR_TYPE_UINT8,
|
||||
3, STBIR_ALPHA_CHANNEL_NONE, 0,
|
||||
STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP,
|
||||
STBIR_FILTER_BOX, STBIR_FILTER_BOX,
|
||||
STBIR_COLORSPACE_SRGB, nullptr);
|
||||
|
||||
free(ref_image_buffer);
|
||||
ref_image_buffer = resized_ref_buffer;
|
||||
}
|
||||
|
||||
ref_image_buffers.push_back(ref_image_buffer);
|
||||
ref_images_vec.push_back({(uint32_t)width, (uint32_t)height, 3, ref_image_buffer});
|
||||
}
|
||||
|
||||
if (!ref_images_vec.empty()) {
|
||||
p.ref_images = ref_images_vec.data();
|
||||
p.ref_images_count = ref_images_vec.size();
|
||||
fprintf(stderr, "Using %zu reference images\n", ref_images_vec.size());
|
||||
}
|
||||
}
|
||||
|
||||
results = generate_image(sd_c, &p);
|
||||
|
||||
if (results == NULL) {
|
||||
fprintf (stderr, "NO results\n");
|
||||
if (input_image_buffer) free(input_image_buffer);
|
||||
if (mask_image_buffer) free(mask_image_buffer);
|
||||
for (auto buffer : ref_image_buffers) {
|
||||
if (buffer) free(buffer);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (results[0].data == NULL) {
|
||||
fprintf (stderr, "Results with no data\n");
|
||||
if (input_image_buffer) free(input_image_buffer);
|
||||
if (mask_image_buffer) free(mask_image_buffer);
|
||||
for (auto buffer : ref_image_buffers) {
|
||||
if (buffer) free(buffer);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -245,17 +447,21 @@ int gen_image(char *text, char *negativeText, int width, int height, int steps,
|
||||
results[0].data, 0, NULL);
|
||||
fprintf (stderr, "Saved resulting image to '%s'\n", dst);
|
||||
|
||||
// TODO: free results. Why does it crash?
|
||||
|
||||
// Clean up
|
||||
free(results[0].data);
|
||||
results[0].data = NULL;
|
||||
free(results);
|
||||
fprintf (stderr, "gen_image is done", dst);
|
||||
if (input_image_buffer) free(input_image_buffer);
|
||||
if (mask_image_buffer) free(mask_image_buffer);
|
||||
for (auto buffer : ref_image_buffers) {
|
||||
if (buffer) free(buffer);
|
||||
}
|
||||
fprintf (stderr, "gen_image is done: %s", dst);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int unload() {
|
||||
free_sd_ctx(sd_c);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,15 +1,10 @@
|
||||
package main
|
||||
|
||||
// #cgo CXXFLAGS: -I${SRCDIR}/sources/stablediffusion-ggml.cpp/thirdparty -I${SRCDIR}/sources/stablediffusion-ggml.cpp -I${SRCDIR}/sources/stablediffusion-ggml.cpp/ggml/include
|
||||
// #cgo LDFLAGS: -L${SRCDIR}/ -lsd -lstdc++ -lm -lggmlall -lgomp
|
||||
// #include <gosd.h>
|
||||
// #include <stdlib.h>
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
@@ -25,20 +20,34 @@ type SDGGML struct {
|
||||
cfgScale float32
|
||||
}
|
||||
|
||||
var (
|
||||
LoadModel func(model, model_apth string, options []uintptr, threads int32, diff int) int
|
||||
GenImage func(text, negativeText string, width, height, steps int, seed int64, dst string, cfgScale float32, srcImage string, strength float32, maskImage string, refImages []string, refImagesCount int) int
|
||||
)
|
||||
|
||||
// Copied from Purego internal/strings
|
||||
// TODO: We should upstream sending []string
|
||||
func hasSuffix(s, suffix string) bool {
|
||||
return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix
|
||||
}
|
||||
|
||||
func CString(name string) *byte {
|
||||
if hasSuffix(name, "\x00") {
|
||||
return &(*(*[]byte)(unsafe.Pointer(&name)))[0]
|
||||
}
|
||||
b := make([]byte, len(name)+1)
|
||||
copy(b, name)
|
||||
return &b[0]
|
||||
}
|
||||
|
||||
func (sd *SDGGML) Load(opts *pb.ModelOptions) error {
|
||||
|
||||
sd.threads = int(opts.Threads)
|
||||
|
||||
modelFile := C.CString(opts.ModelFile)
|
||||
defer C.free(unsafe.Pointer(modelFile))
|
||||
modelPath := opts.ModelPath
|
||||
|
||||
var options **C.char
|
||||
// prepare the options array to pass to C
|
||||
|
||||
size := C.size_t(unsafe.Sizeof((*C.char)(nil)))
|
||||
length := C.size_t(len(opts.Options))
|
||||
options = (**C.char)(C.malloc((length + 1) * size))
|
||||
view := (*[1 << 30]*C.char)(unsafe.Pointer(options))[0:len(opts.Options) + 1:len(opts.Options) + 1]
|
||||
modelFile := opts.ModelFile
|
||||
modelPathC := modelPath
|
||||
|
||||
var diffusionModel int
|
||||
|
||||
@@ -63,32 +72,55 @@ func (sd *SDGGML) Load(opts *pb.ModelOptions) error {
|
||||
|
||||
fmt.Fprintf(os.Stderr, "Options: %+v\n", oo)
|
||||
|
||||
for i, x := range oo {
|
||||
view[i] = C.CString(x)
|
||||
// At the time of writing Purego doesn't recurse into slices and convert Go strings to pointers so we need to do that
|
||||
var keepAlive []any
|
||||
options := make([]uintptr, len(oo), len(oo)+1)
|
||||
for i, op := range oo {
|
||||
bytep := CString(op)
|
||||
options[i] = uintptr(unsafe.Pointer(bytep))
|
||||
keepAlive = append(keepAlive, bytep)
|
||||
}
|
||||
view[len(oo)] = nil
|
||||
|
||||
sd.cfgScale = opts.CFGScale
|
||||
|
||||
ret := C.load_model(modelFile, options, C.int(opts.Threads), C.int(diffusionModel))
|
||||
ret := LoadModel(modelFile, modelPathC, options, opts.Threads, diffusionModel)
|
||||
if ret != 0 {
|
||||
return fmt.Errorf("could not load model")
|
||||
}
|
||||
|
||||
runtime.KeepAlive(keepAlive)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sd *SDGGML) GenerateImage(opts *pb.GenerateImageRequest) error {
|
||||
t := C.CString(opts.PositivePrompt)
|
||||
defer C.free(unsafe.Pointer(t))
|
||||
t := opts.PositivePrompt
|
||||
dst := opts.Dst
|
||||
negative := opts.NegativePrompt
|
||||
srcImage := opts.Src
|
||||
|
||||
dst := C.CString(opts.Dst)
|
||||
defer C.free(unsafe.Pointer(dst))
|
||||
var maskImage string
|
||||
if opts.EnableParameters != "" {
|
||||
if strings.Contains(opts.EnableParameters, "mask:") {
|
||||
parts := strings.Split(opts.EnableParameters, "mask:")
|
||||
if len(parts) > 1 {
|
||||
maskPath := strings.TrimSpace(parts[1])
|
||||
if maskPath != "" {
|
||||
maskImage = maskPath
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
negative := C.CString(opts.NegativePrompt)
|
||||
defer C.free(unsafe.Pointer(negative))
|
||||
refImagesCount := len(opts.RefImages)
|
||||
refImages := make([]string, refImagesCount, refImagesCount+1)
|
||||
copy(refImages, opts.RefImages)
|
||||
*(*uintptr)(unsafe.Add(unsafe.Pointer(&refImages), refImagesCount)) = 0
|
||||
|
||||
ret := C.gen_image(t, negative, C.int(opts.Width), C.int(opts.Height), C.int(opts.Step), C.int(opts.Seed), dst, C.float(sd.cfgScale))
|
||||
// Default strength for img2img (0.75 is a good default)
|
||||
strength := float32(0.75)
|
||||
|
||||
ret := GenImage(t, negative, int(opts.Width), int(opts.Height), int(opts.Step), int64(opts.Seed), dst, sd.cfgScale, srcImage, strength, maskImage, refImages, refImagesCount)
|
||||
if ret != 0 {
|
||||
return fmt.Errorf("inference failed")
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
int load_model(char *model, char* options[], int threads, int diffusionModel);
|
||||
int gen_image(char *text, char *negativeText, int width, int height, int steps, int seed, char *dst, float cfg_scale);
|
||||
int load_model(const char *model, char *model_path, char* options[], int threads, int diffusionModel);
|
||||
int gen_image(char *text, char *negativeText, int width, int height, int steps, int64_t seed, char *dst, float cfg_scale, char *src_image, float strength, char *mask_image, char **ref_images, int ref_images_count);
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
package main
|
||||
|
||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
||||
import (
|
||||
"flag"
|
||||
|
||||
"github.com/ebitengine/purego"
|
||||
grpc "github.com/mudler/LocalAI/pkg/grpc"
|
||||
)
|
||||
|
||||
@@ -12,6 +12,14 @@ var (
|
||||
)
|
||||
|
||||
func main() {
|
||||
gosd, err := purego.Dlopen("./libgosd.so", purego.RTLD_NOW|purego.RTLD_GLOBAL)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
purego.RegisterLibFunc(&LoadModel, gosd, "load_model")
|
||||
purego.RegisterLibFunc(&GenImage, gosd, "gen_image")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
if err := grpc.StartServer(*addr, &SDGGML{}); err != nil {
|
||||
|
||||
@@ -10,6 +10,7 @@ CURDIR=$(dirname "$(realpath $0)")
|
||||
# Create lib directory
|
||||
mkdir -p $CURDIR/package/lib
|
||||
|
||||
cp -avrf $CURDIR/libgosd.so $CURDIR/package/
|
||||
cp -avrf $CURDIR/stablediffusion-ggml $CURDIR/package/
|
||||
cp -rfv $CURDIR/run.sh $CURDIR/package/
|
||||
|
||||
@@ -47,6 +48,6 @@ else
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Packaging completed successfully"
|
||||
echo "Packaging completed successfully"
|
||||
ls -liah $CURDIR/package/
|
||||
ls -liah $CURDIR/package/lib/
|
||||
ls -liah $CURDIR/package/lib/
|
||||
|
||||
@@ -6,7 +6,7 @@ CMAKE_ARGS?=
|
||||
|
||||
# whisper.cpp version
|
||||
WHISPER_REPO?=https://github.com/ggml-org/whisper.cpp
|
||||
WHISPER_CPP_VERSION?=7de8dd783f7b2eab56bff6bbc5d3369e34f0e77f
|
||||
WHISPER_CPP_VERSION?=fc45bb86251f774ef817e89878bb4c2636c8a58f
|
||||
|
||||
export WHISPER_CMAKE_ARGS?=-DBUILD_SHARED_LIBS=OFF
|
||||
export WHISPER_DIR=$(abspath ./sources/whisper.cpp)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,29 +1,23 @@
|
||||
.PHONY: ttsbark
|
||||
ttsbark: protogen
|
||||
ttsbark:
|
||||
bash install.sh
|
||||
|
||||
.PHONY: run
|
||||
run: protogen
|
||||
run: ttsbark
|
||||
@echo "Running bark..."
|
||||
bash run.sh
|
||||
@echo "bark run."
|
||||
|
||||
.PHONY: test
|
||||
test: protogen
|
||||
test: ttsbark
|
||||
@echo "Testing bark..."
|
||||
bash test.sh
|
||||
@echo "bark tested."
|
||||
|
||||
.PHONY: protogen
|
||||
protogen: backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
backend_pb2_grpc.py backend_pb2.py:
|
||||
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
rm -rf venv __pycache__
|
||||
@@ -1,5 +1,5 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.3.110+xpu
|
||||
intel-extension-for-pytorch==2.8.10+xpu
|
||||
torch==2.3.1+cxx11.abi
|
||||
torchaudio==2.3.1+cxx11.abi
|
||||
oneccl_bind_pt==2.3.100+xpu
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
bark==0.1.5
|
||||
grpcio==1.71.0
|
||||
grpcio==1.74.0
|
||||
protobuf
|
||||
certifi
|
||||
@@ -1,29 +1,23 @@
|
||||
.PHONY: coqui
|
||||
coqui: protogen
|
||||
.PHONY: chatterbox
|
||||
chatterbox:
|
||||
bash install.sh
|
||||
|
||||
.PHONY: run
|
||||
run: protogen
|
||||
run: chatterbox
|
||||
@echo "Running coqui..."
|
||||
bash run.sh
|
||||
@echo "coqui run."
|
||||
|
||||
.PHONY: test
|
||||
test: protogen
|
||||
test: chatterbox
|
||||
@echo "Testing coqui..."
|
||||
bash test.sh
|
||||
@echo "coqui tested."
|
||||
|
||||
.PHONY: protogen
|
||||
protogen: backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
backend_pb2_grpc.py backend_pb2.py:
|
||||
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
rm -rf venv __pycache__
|
||||
@@ -41,7 +41,9 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
else:
|
||||
print("CUDA is not available", file=sys.stderr)
|
||||
device = "cpu"
|
||||
|
||||
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
||||
if mps_available:
|
||||
device = "mps"
|
||||
if not torch.cuda.is_available() and request.CUDA:
|
||||
return backend_pb2.Result(success=False, message="CUDA is not available")
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
|
||||
# init handles the setup of the library
|
||||
#
|
||||
# use the library by adding the following line to a script:
|
||||
# source $(dirname $0)/../common/libbackend.sh
|
||||
@@ -17,29 +17,182 @@
|
||||
# LIMIT_TARGETS="cublas12"
|
||||
# source $(dirname $0)/../common/libbackend.sh
|
||||
#
|
||||
# You can switch between uv (conda-like) and pip installation methods by setting USE_PIP:
|
||||
# USE_PIP=true source $(dirname $0)/../common/libbackend.sh
|
||||
#
|
||||
# ===================== user-configurable defaults =====================
|
||||
PYTHON_VERSION="${PYTHON_VERSION:-3.10}" # e.g. 3.10 / 3.11 / 3.12 / 3.13
|
||||
PYTHON_PATCH="${PYTHON_PATCH:-18}" # e.g. 18 -> 3.10.18 ; 13 -> 3.11.13
|
||||
PY_STANDALONE_TAG="${PY_STANDALONE_TAG:-20250818}" # release tag date
|
||||
# Enable/disable bundling of a portable Python build
|
||||
PORTABLE_PYTHON="${PORTABLE_PYTHON:-false}"
|
||||
|
||||
PYTHON_VERSION="3.10"
|
||||
# If you want to fully pin the filename (including tuned CPU targets), set:
|
||||
# PORTABLE_PY_FILENAME="cpython-3.10.18+20250818-x86_64_v3-unknown-linux-gnu-install_only.tar.gz"
|
||||
: "${PORTABLE_PY_FILENAME:=}"
|
||||
: "${PORTABLE_PY_SHA256:=}" # optional; if set we verify the download
|
||||
# =====================================================================
|
||||
|
||||
# Default to uv if USE_PIP is not set
|
||||
if [ "x${USE_PIP:-}" == "x" ]; then
|
||||
USE_PIP=false
|
||||
fi
|
||||
|
||||
# ----------------------- helpers -----------------------
|
||||
function _is_musl() {
|
||||
# detect musl (Alpine, etc)
|
||||
if command -v ldd >/dev/null 2>&1; then
|
||||
ldd --version 2>&1 | grep -qi musl && return 0
|
||||
fi
|
||||
# busybox-ish fallback
|
||||
if command -v getconf >/dev/null 2>&1; then
|
||||
getconf GNU_LIBC_VERSION >/dev/null 2>&1 || return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
function _triple() {
|
||||
local os="" arch="" libc="gnu"
|
||||
case "$(uname -s)" in
|
||||
Linux*) os="unknown-linux" ;;
|
||||
Darwin*) os="apple-darwin" ;;
|
||||
MINGW*|MSYS*|CYGWIN*) os="pc-windows-msvc" ;; # best-effort for Git Bash
|
||||
*) echo "Unsupported OS $(uname -s)"; exit 1;;
|
||||
esac
|
||||
|
||||
case "$(uname -m)" in
|
||||
x86_64) arch="x86_64" ;;
|
||||
aarch64|arm64) arch="aarch64" ;;
|
||||
armv7l) arch="armv7" ;;
|
||||
i686|i386) arch="i686" ;;
|
||||
ppc64le) arch="ppc64le" ;;
|
||||
s390x) arch="s390x" ;;
|
||||
riscv64) arch="riscv64" ;;
|
||||
*) echo "Unsupported arch $(uname -m)"; exit 1;;
|
||||
esac
|
||||
|
||||
if [[ "$os" == "unknown-linux" ]]; then
|
||||
if _is_musl; then
|
||||
libc="musl"
|
||||
else
|
||||
libc="gnu"
|
||||
fi
|
||||
echo "${arch}-${os}-${libc}"
|
||||
else
|
||||
echo "${arch}-${os}"
|
||||
fi
|
||||
}
|
||||
|
||||
function _portable_dir() {
|
||||
echo "${EDIR}/python"
|
||||
}
|
||||
|
||||
function _portable_bin() {
|
||||
# python-build-standalone puts python in ./bin
|
||||
echo "$(_portable_dir)/bin"
|
||||
}
|
||||
|
||||
function _portable_python() {
|
||||
if [ -x "$(_portable_bin)/python3" ]; then
|
||||
echo "$(_portable_bin)/python3"
|
||||
else
|
||||
echo "$(_portable_bin)/python"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# macOS loader env for the portable CPython
|
||||
_macosPortableEnv() {
|
||||
if [ "$(uname -s)" = "Darwin" ]; then
|
||||
export DYLD_LIBRARY_PATH="$(_portable_dir)/lib${DYLD_LIBRARY_PATH:+:${DYLD_LIBRARY_PATH}}"
|
||||
export DYLD_FALLBACK_LIBRARY_PATH="$(_portable_dir)/lib${DYLD_FALLBACK_LIBRARY_PATH:+:${DYLD_FALLBACK_LIBRARY_PATH}}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Good hygiene on macOS for downloaded/extracted trees
|
||||
_unquarantinePortablePython() {
|
||||
if [ "$(uname -s)" = "Darwin" ]; then
|
||||
command -v xattr >/dev/null 2>&1 && xattr -dr com.apple.quarantine "$(_portable_dir)" || true
|
||||
fi
|
||||
}
|
||||
|
||||
# ------------------ ### PORTABLE PYTHON ------------------
|
||||
function ensurePortablePython() {
|
||||
local pdir="$(_portable_dir)"
|
||||
local pbin="$(_portable_bin)"
|
||||
local pyexe
|
||||
|
||||
if [ -x "${pbin}/python3" ] || [ -x "${pbin}/python" ]; then
|
||||
_macosPortableEnv
|
||||
return 0
|
||||
fi
|
||||
|
||||
mkdir -p "${pdir}"
|
||||
local triple="$(_triple)"
|
||||
|
||||
local full_ver="${PYTHON_VERSION}.${PYTHON_PATCH}"
|
||||
local fn=""
|
||||
if [ -n "${PORTABLE_PY_FILENAME}" ]; then
|
||||
fn="${PORTABLE_PY_FILENAME}"
|
||||
else
|
||||
# generic asset name: cpython-<full_ver>+<tag>-<triple>-install_only.tar.gz
|
||||
fn="cpython-${full_ver}+${PY_STANDALONE_TAG}-${triple}-install_only.tar.gz"
|
||||
fi
|
||||
|
||||
local url="https://github.com/astral-sh/python-build-standalone/releases/download/${PY_STANDALONE_TAG}/${fn}"
|
||||
local tmp="${pdir}/${fn}"
|
||||
echo "Downloading portable Python: ${fn}"
|
||||
# curl with retries; fall back to wget if needed
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
curl -L --fail --retry 3 --retry-delay 1 -o "${tmp}" "${url}"
|
||||
else
|
||||
wget -O "${tmp}" "${url}"
|
||||
fi
|
||||
|
||||
if [ -n "${PORTABLE_PY_SHA256}" ]; then
|
||||
echo "${PORTABLE_PY_SHA256} ${tmp}" | sha256sum -c -
|
||||
fi
|
||||
|
||||
echo "Extracting ${fn} -> ${pdir}"
|
||||
# always a .tar.gz (we purposely choose install_only)
|
||||
tar -xzf "${tmp}" -C "${pdir}"
|
||||
rm -f "${tmp}"
|
||||
|
||||
# Some archives nest a directory; if so, flatten to ${pdir}
|
||||
# Find the first dir with a 'bin/python*'
|
||||
local inner
|
||||
inner="$(find "${pdir}" -type f -path "*/bin/python*" -maxdepth 3 2>/dev/null | head -n1 || true)"
|
||||
if [ -n "${inner}" ]; then
|
||||
local inner_root
|
||||
inner_root="$(dirname "$(dirname "${inner}")")" # .../bin -> root
|
||||
if [ "${inner_root}" != "${pdir}" ]; then
|
||||
# move contents up one level
|
||||
shopt -s dotglob
|
||||
mv "${inner_root}/"* "${pdir}/"
|
||||
rm -rf "${inner_root}"
|
||||
shopt -u dotglob
|
||||
fi
|
||||
fi
|
||||
|
||||
_unquarantinePortablePython
|
||||
_macosPortableEnv
|
||||
# Make sure it's runnable
|
||||
pyexe="$(_portable_python)"
|
||||
"${pyexe}" -V
|
||||
}
|
||||
|
||||
# init handles the setup of the library
|
||||
function init() {
|
||||
# Name of the backend (directory name)
|
||||
BACKEND_NAME=${PWD##*/}
|
||||
|
||||
# Path where all backends files are
|
||||
MY_DIR=$(realpath `dirname $0`)
|
||||
|
||||
# Build type
|
||||
MY_DIR=$(realpath "$(dirname "$0")")
|
||||
BUILD_PROFILE=$(getBuildProfile)
|
||||
|
||||
# Environment directory
|
||||
EDIR=${MY_DIR}
|
||||
|
||||
# Allow to specify a custom env dir for shared environments
|
||||
if [ "x${ENV_DIR}" != "x" ]; then
|
||||
if [ "x${ENV_DIR:-}" != "x" ]; then
|
||||
EDIR=${ENV_DIR}
|
||||
fi
|
||||
|
||||
# If a backend has defined a list of valid build profiles...
|
||||
if [ ! -z "${LIMIT_TARGETS}" ]; then
|
||||
if [ ! -z "${LIMIT_TARGETS:-}" ]; then
|
||||
isValidTarget=$(checkTargets ${LIMIT_TARGETS})
|
||||
if [ ${isValidTarget} != true ]; then
|
||||
echo "${BACKEND_NAME} can only be used on the following targets: ${LIMIT_TARGETS}"
|
||||
@@ -50,6 +203,7 @@ function init() {
|
||||
echo "Initializing libbackend for ${BACKEND_NAME}"
|
||||
}
|
||||
|
||||
|
||||
# getBuildProfile will inspect the system to determine which build profile is appropriate:
|
||||
# returns one of the following:
|
||||
# - cublas11
|
||||
@@ -57,53 +211,139 @@ function init() {
|
||||
# - hipblas
|
||||
# - intel
|
||||
function getBuildProfile() {
|
||||
# First check if we are a cublas build, and if so report the correct build profile
|
||||
if [ x"${BUILD_TYPE}" == "xcublas" ]; then
|
||||
if [ ! -z ${CUDA_MAJOR_VERSION} ]; then
|
||||
# If we have been given a CUDA version, we trust it
|
||||
if [ x"${BUILD_TYPE:-}" == "xcublas" ]; then
|
||||
if [ ! -z "${CUDA_MAJOR_VERSION:-}" ]; then
|
||||
echo ${BUILD_TYPE}${CUDA_MAJOR_VERSION}
|
||||
else
|
||||
# We don't know what version of cuda we are, so we report ourselves as a generic cublas
|
||||
echo ${BUILD_TYPE}
|
||||
fi
|
||||
return 0
|
||||
fi
|
||||
|
||||
# If /opt/intel exists, then we are doing an intel/ARC build
|
||||
if [ -d "/opt/intel" ]; then
|
||||
echo "intel"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# If for any other values of BUILD_TYPE, we don't need any special handling/discovery
|
||||
if [ ! -z ${BUILD_TYPE} ]; then
|
||||
if [ -n "${BUILD_TYPE:-}" ]; then
|
||||
echo ${BUILD_TYPE}
|
||||
return 0
|
||||
fi
|
||||
|
||||
# If there is no BUILD_TYPE set at all, set a build-profile value of CPU, we aren't building for any GPU targets
|
||||
echo "cpu"
|
||||
}
|
||||
|
||||
|
||||
# Make the venv relocatable:
|
||||
# - rewrite venv/bin/python{,3} to relative symlinks into $(_portable_dir)
|
||||
# - normalize entrypoint shebangs to /usr/bin/env python3
|
||||
_makeVenvPortable() {
|
||||
local venv_dir="${EDIR}/venv"
|
||||
local vbin="${venv_dir}/bin"
|
||||
|
||||
[ -d "${vbin}" ] || return 0
|
||||
|
||||
# 1) Replace python symlinks with relative ones to ../../python/bin/python3
|
||||
# (venv/bin -> venv -> EDIR -> python/bin)
|
||||
local rel_py='../../python/bin/python3'
|
||||
|
||||
for name in python3 python; do
|
||||
if [ -e "${vbin}/${name}" ] || [ -L "${vbin}/${name}" ]; then
|
||||
rm -f "${vbin}/${name}"
|
||||
fi
|
||||
done
|
||||
ln -s "${rel_py}" "${vbin}/python3"
|
||||
ln -s "python3" "${vbin}/python"
|
||||
|
||||
# 2) Rewrite shebangs of entry points to use env, so the venv is relocatable
|
||||
# Only touch text files that start with #! and reference the current venv.
|
||||
local ve_abs="${vbin}/python"
|
||||
local sed_i=(sed -i)
|
||||
# macOS/BSD sed needs a backup suffix; GNU sed doesn't. Make it portable:
|
||||
if sed --version >/dev/null 2>&1; then
|
||||
sed_i=(sed -i)
|
||||
else
|
||||
sed_i=(sed -i '')
|
||||
fi
|
||||
|
||||
for f in "${vbin}"/*; do
|
||||
[ -f "$f" ] || continue
|
||||
# Fast path: check first two bytes (#!)
|
||||
head -c2 "$f" 2>/dev/null | grep -q '^#!' || continue
|
||||
# Only rewrite if the shebang mentions the (absolute) venv python
|
||||
if head -n1 "$f" | grep -Fq "${ve_abs}"; then
|
||||
"${sed_i[@]}" '1s|^#!.*$|#!/usr/bin/env python3|' "$f"
|
||||
chmod +x "$f" 2>/dev/null || true
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
# ensureVenv makes sure that the venv for the backend both exists, and is activated.
|
||||
#
|
||||
# This function is idempotent, so you can call it as many times as you want and it will
|
||||
# always result in an activated virtual environment
|
||||
function ensureVenv() {
|
||||
local interpreter=""
|
||||
|
||||
if [ "x${PORTABLE_PYTHON}" == "xtrue" ]; then
|
||||
ensurePortablePython
|
||||
interpreter="$(_portable_python)"
|
||||
else
|
||||
# Prefer system python${PYTHON_VERSION}, else python3, else fall back to bundled
|
||||
if command -v python${PYTHON_VERSION} >/dev/null 2>&1; then
|
||||
interpreter="python${PYTHON_VERSION}"
|
||||
elif command -v python3 >/dev/null 2>&1; then
|
||||
interpreter="python3"
|
||||
else
|
||||
echo "No suitable system Python found, bootstrapping portable build..."
|
||||
ensurePortablePython
|
||||
interpreter="$(_portable_python)"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -d "${EDIR}/venv" ]; then
|
||||
uv venv --python ${PYTHON_VERSION} ${EDIR}/venv
|
||||
echo "virtualenv created"
|
||||
if [ "x${USE_PIP}" == "xtrue" ]; then
|
||||
"${interpreter}" -m venv --copies "${EDIR}/venv"
|
||||
source "${EDIR}/venv/bin/activate"
|
||||
"${interpreter}" -m pip install --upgrade pip
|
||||
else
|
||||
if [ "x${PORTABLE_PYTHON}" == "xtrue" ]; then
|
||||
uv venv --python "${interpreter}" "${EDIR}/venv"
|
||||
else
|
||||
uv venv --python "${PYTHON_VERSION}" "${EDIR}/venv"
|
||||
fi
|
||||
fi
|
||||
if [ "x${PORTABLE_PYTHON}" == "xtrue" ]; then
|
||||
_makeVenvPortable
|
||||
fi
|
||||
fi
|
||||
|
||||
# Source if we are not already in a Virtual env
|
||||
if [ "x${VIRTUAL_ENV}" != "x${EDIR}/venv" ]; then
|
||||
source ${EDIR}/venv/bin/activate
|
||||
echo "virtualenv activated"
|
||||
# We call it here to make sure that when we source a venv we can still use python as expected
|
||||
if [ -x "$(_portable_python)" ]; then
|
||||
_macosPortableEnv
|
||||
fi
|
||||
|
||||
echo "activated virtualenv has been ensured"
|
||||
if [ "x${VIRTUAL_ENV:-}" != "x${EDIR}/venv" ]; then
|
||||
source "${EDIR}/venv/bin/activate"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
function runProtogen() {
|
||||
ensureVenv
|
||||
if [ "x${USE_PIP}" == "xtrue" ]; then
|
||||
pip install grpcio-tools
|
||||
else
|
||||
uv pip install grpcio-tools
|
||||
fi
|
||||
pushd "${EDIR}" >/dev/null
|
||||
# use the venv python (ensures correct interpreter & sys.path)
|
||||
python -m grpc_tools.protoc -I../../ -I./ --python_out=. --grpc_python_out=. backend.proto
|
||||
popd >/dev/null
|
||||
}
|
||||
|
||||
|
||||
# installRequirements looks for several requirements files and if they exist runs the install for them in order
|
||||
#
|
||||
# - requirements-install.txt
|
||||
@@ -111,7 +351,7 @@ function ensureVenv() {
|
||||
# - requirements-${BUILD_TYPE}.txt
|
||||
# - requirements-${BUILD_PROFILE}.txt
|
||||
#
|
||||
# BUILD_PROFILE is a pore specific version of BUILD_TYPE, ex: cuda11 or cuda12
|
||||
# BUILD_PROFILE is a more specific version of BUILD_TYPE, ex: cuda-11 or cuda-12
|
||||
# it can also include some options that we do not have BUILD_TYPES for, ex: intel
|
||||
#
|
||||
# NOTE: for BUILD_PROFILE==intel, this function does NOT automatically use the Intel python package index.
|
||||
@@ -127,36 +367,36 @@ function ensureVenv() {
|
||||
# installRequirements
|
||||
function installRequirements() {
|
||||
ensureVenv
|
||||
|
||||
# These are the requirements files we will attempt to install, in order
|
||||
declare -a requirementFiles=(
|
||||
"${EDIR}/requirements-install.txt"
|
||||
"${EDIR}/requirements.txt"
|
||||
"${EDIR}/requirements-${BUILD_TYPE}.txt"
|
||||
"${EDIR}/requirements-${BUILD_TYPE:-}.txt"
|
||||
)
|
||||
|
||||
if [ "x${BUILD_TYPE}" != "x${BUILD_PROFILE}" ]; then
|
||||
if [ "x${BUILD_TYPE:-}" != "x${BUILD_PROFILE}" ]; then
|
||||
requirementFiles+=("${EDIR}/requirements-${BUILD_PROFILE}.txt")
|
||||
fi
|
||||
|
||||
# if BUILD_TYPE is empty, we are a CPU build, so we should try to install the CPU requirements
|
||||
if [ "x${BUILD_TYPE}" == "x" ]; then
|
||||
if [ "x${BUILD_TYPE:-}" == "x" ]; then
|
||||
requirementFiles+=("${EDIR}/requirements-cpu.txt")
|
||||
fi
|
||||
|
||||
requirementFiles+=("${EDIR}/requirements-after.txt")
|
||||
|
||||
if [ "x${BUILD_TYPE}" != "x${BUILD_PROFILE}" ]; then
|
||||
if [ "x${BUILD_TYPE:-}" != "x${BUILD_PROFILE}" ]; then
|
||||
requirementFiles+=("${EDIR}/requirements-${BUILD_PROFILE}-after.txt")
|
||||
fi
|
||||
|
||||
for reqFile in ${requirementFiles[@]}; do
|
||||
if [ -f ${reqFile} ]; then
|
||||
if [ -f "${reqFile}" ]; then
|
||||
echo "starting requirements install for ${reqFile}"
|
||||
uv pip install ${EXTRA_PIP_INSTALL_FLAGS} --requirement ${reqFile}
|
||||
if [ "x${USE_PIP}" == "xtrue" ]; then
|
||||
pip install ${EXTRA_PIP_INSTALL_FLAGS:-} --requirement "${reqFile}"
|
||||
else
|
||||
uv pip install ${EXTRA_PIP_INSTALL_FLAGS:-} --requirement "${reqFile}"
|
||||
fi
|
||||
echo "finished requirements install for ${reqFile}"
|
||||
fi
|
||||
done
|
||||
|
||||
runProtogen
|
||||
}
|
||||
|
||||
# startBackend discovers and runs the backend GRPC server
|
||||
@@ -174,18 +414,18 @@ function installRequirements() {
|
||||
# - ${BACKEND_NAME}.py
|
||||
function startBackend() {
|
||||
ensureVenv
|
||||
|
||||
if [ ! -z ${BACKEND_FILE} ]; then
|
||||
exec ${EDIR}/venv/bin/python ${BACKEND_FILE} $@
|
||||
if [ ! -z "${BACKEND_FILE:-}" ]; then
|
||||
exec "${EDIR}/venv/bin/python" "${BACKEND_FILE}" "$@"
|
||||
elif [ -e "${MY_DIR}/server.py" ]; then
|
||||
exec ${EDIR}/venv/bin/python ${MY_DIR}/server.py $@
|
||||
exec "${EDIR}/venv/bin/python" "${MY_DIR}/server.py" "$@"
|
||||
elif [ -e "${MY_DIR}/backend.py" ]; then
|
||||
exec ${EDIR}/venv/bin/python ${MY_DIR}/backend.py $@
|
||||
exec "${EDIR}/venv/bin/python" "${MY_DIR}/backend.py" "$@"
|
||||
elif [ -e "${MY_DIR}/${BACKEND_NAME}.py" ]; then
|
||||
exec ${EDIR}/venv/bin/python ${MY_DIR}/${BACKEND_NAME}.py $@
|
||||
exec "${EDIR}/venv/bin/python" "${MY_DIR}/${BACKEND_NAME}.py" "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
# runUnittests discovers and runs python unittests
|
||||
#
|
||||
# You can specify a specific test file to use by setting TEST_FILE before calling runUnittests.
|
||||
@@ -198,41 +438,36 @@ function startBackend() {
|
||||
# be default a file named test.py in the backends directory will be used
|
||||
function runUnittests() {
|
||||
ensureVenv
|
||||
|
||||
if [ ! -z ${TEST_FILE} ]; then
|
||||
testDir=$(dirname `realpath ${TEST_FILE}`)
|
||||
testFile=$(basename ${TEST_FILE})
|
||||
pushd ${testDir}
|
||||
python -m unittest ${testFile}
|
||||
popd
|
||||
if [ ! -z "${TEST_FILE:-}" ]; then
|
||||
testDir=$(dirname "$(realpath "${TEST_FILE}")")
|
||||
testFile=$(basename "${TEST_FILE}")
|
||||
pushd "${testDir}" >/dev/null
|
||||
python -m unittest "${testFile}"
|
||||
popd >/dev/null
|
||||
elif [ -f "${MY_DIR}/test.py" ]; then
|
||||
pushd ${MY_DIR}
|
||||
pushd "${MY_DIR}" >/dev/null
|
||||
python -m unittest test.py
|
||||
popd
|
||||
popd >/dev/null
|
||||
else
|
||||
echo "no tests defined for ${BACKEND_NAME}"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
##################################################################################
|
||||
# Below here are helper functions not intended to be used outside of the library #
|
||||
##################################################################################
|
||||
|
||||
# checkTargets determines if the current BUILD_TYPE or BUILD_PROFILE is in a list of valid targets
|
||||
function checkTargets() {
|
||||
# Collect all provided targets into a variable and...
|
||||
targets=$@
|
||||
# ...convert it into an array
|
||||
declare -a targets=($targets)
|
||||
|
||||
for target in ${targets[@]}; do
|
||||
if [ "x${BUILD_TYPE}" == "x${target}" ]; then
|
||||
echo true
|
||||
return 0
|
||||
if [ "x${BUILD_TYPE:-}" == "x${target}" ]; then
|
||||
echo true; return 0
|
||||
fi
|
||||
if [ "x${BUILD_PROFILE}" == "x${target}" ]; then
|
||||
echo true
|
||||
return 0
|
||||
echo true; return 0
|
||||
fi
|
||||
done
|
||||
echo false
|
||||
|
||||
@@ -3,18 +3,11 @@
|
||||
.PHONY: install
|
||||
install:
|
||||
bash install.sh
|
||||
$(MAKE) protogen
|
||||
|
||||
.PHONY: protogen
|
||||
protogen: backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
backend_pb2_grpc.py backend_pb2.py:
|
||||
bash protogen.sh
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
rm -rf venv __pycache__
|
||||
@@ -8,4 +8,4 @@ else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
|
||||
runProtogen
|
||||
@@ -1,5 +1,5 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.3.110+xpu
|
||||
torch==2.3.1+cxx11.abi
|
||||
oneccl_bind_pt==2.3.100+xpu
|
||||
intel-extension-for-pytorch==2.8.10+xpu
|
||||
torch==2.8.0
|
||||
oneccl_bind_pt==2.8.0+xpu
|
||||
optimum[openvino]
|
||||
@@ -1,3 +1,3 @@
|
||||
grpcio==1.71.0
|
||||
grpcio==1.74.0
|
||||
protobuf
|
||||
grpcio-tools
|
||||
@@ -1,29 +1,23 @@
|
||||
.PHONY: coqui
|
||||
coqui: protogen
|
||||
coqui:
|
||||
bash install.sh
|
||||
|
||||
.PHONY: run
|
||||
run: protogen
|
||||
run: coqui
|
||||
@echo "Running coqui..."
|
||||
bash run.sh
|
||||
@echo "coqui run."
|
||||
|
||||
.PHONY: test
|
||||
test: protogen
|
||||
test: coqui
|
||||
@echo "Testing coqui..."
|
||||
bash test.sh
|
||||
@echo "coqui tested."
|
||||
|
||||
.PHONY: protogen
|
||||
protogen: backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
backend_pb2_grpc.py backend_pb2.py:
|
||||
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
rm -rf venv __pycache__
|
||||
@@ -40,7 +40,9 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
else:
|
||||
print("CUDA is not available", file=sys.stderr)
|
||||
device = "cpu"
|
||||
|
||||
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
||||
if mps_available:
|
||||
device = "mps"
|
||||
if not torch.cuda.is_available() and request.CUDA:
|
||||
return backend_pb2.Result(success=False, message="CUDA is not available")
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
grpcio==1.71.0
|
||||
grpcio==1.74.0
|
||||
protobuf
|
||||
certifi
|
||||
packaging==24.1
|
||||
@@ -12,28 +12,22 @@ export SKIP_CONDA=1
|
||||
endif
|
||||
|
||||
.PHONY: diffusers
|
||||
diffusers: protogen
|
||||
diffusers:
|
||||
bash install.sh
|
||||
|
||||
.PHONY: run
|
||||
run: protogen
|
||||
run: diffusers
|
||||
@echo "Running diffusers..."
|
||||
bash run.sh
|
||||
@echo "Diffusers run."
|
||||
|
||||
test: protogen
|
||||
test: diffusers
|
||||
bash test.sh
|
||||
|
||||
.PHONY: protogen
|
||||
protogen: backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
backend_pb2_grpc.py backend_pb2.py:
|
||||
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
rm -rf venv __pycache__
|
||||
@@ -18,7 +18,7 @@ import backend_pb2_grpc
|
||||
import grpc
|
||||
|
||||
from diffusers import SanaPipeline, StableDiffusion3Pipeline, StableDiffusionXLPipeline, StableDiffusionDepth2ImgPipeline, DPMSolverMultistepScheduler, StableDiffusionPipeline, DiffusionPipeline, \
|
||||
EulerAncestralDiscreteScheduler, FluxPipeline, FluxTransformer2DModel
|
||||
EulerAncestralDiscreteScheduler, FluxPipeline, FluxTransformer2DModel, QwenImageEditPipeline
|
||||
from diffusers import StableDiffusionImg2ImgPipeline, AutoPipelineForText2Image, ControlNetModel, StableVideoDiffusionPipeline, Lumina2Text2ImgPipeline
|
||||
from diffusers.pipelines.stable_diffusion import safety_checker
|
||||
from diffusers.utils import load_image, export_to_video
|
||||
@@ -65,6 +65,19 @@ from diffusers.schedulers import (
|
||||
UniPCMultistepScheduler,
|
||||
)
|
||||
|
||||
def is_float(s):
|
||||
try:
|
||||
float(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
def is_int(s):
|
||||
try:
|
||||
int(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
# The scheduler list mapping was taken from here: https://github.com/neggles/animatediff-cli/blob/6f336f5f4b5e38e85d7f06f1744ef42d0a45f2a7/src/animatediff/schedulers.py#L39
|
||||
# Credits to https://github.com/neggles
|
||||
@@ -169,8 +182,24 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
if ":" not in opt:
|
||||
continue
|
||||
key, value = opt.split(":")
|
||||
# if value is a number, convert it to the appropriate type
|
||||
if is_float(value):
|
||||
value = float(value)
|
||||
elif is_int(value):
|
||||
value = int(value)
|
||||
self.options[key] = value
|
||||
|
||||
# From options, extract if present "torch_dtype" and set it to the appropriate type
|
||||
if "torch_dtype" in self.options:
|
||||
if self.options["torch_dtype"] == "fp16":
|
||||
torchType = torch.float16
|
||||
elif self.options["torch_dtype"] == "bf16":
|
||||
torchType = torch.bfloat16
|
||||
elif self.options["torch_dtype"] == "fp32":
|
||||
torchType = torch.float32
|
||||
# remove it from options
|
||||
del self.options["torch_dtype"]
|
||||
|
||||
print(f"Options: {self.options}", file=sys.stderr)
|
||||
|
||||
local = False
|
||||
@@ -234,6 +263,9 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
elif request.PipelineType == "DiffusionPipeline":
|
||||
self.pipe = DiffusionPipeline.from_pretrained(request.Model,
|
||||
torch_dtype=torchType)
|
||||
elif request.PipelineType == "QwenImageEditPipeline":
|
||||
self.pipe = QwenImageEditPipeline.from_pretrained(request.Model,
|
||||
torch_dtype=torchType)
|
||||
elif request.PipelineType == "VideoDiffusionPipeline":
|
||||
self.txt2vid = True
|
||||
self.pipe = DiffusionPipeline.from_pretrained(request.Model,
|
||||
@@ -336,6 +368,9 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
device = "cpu" if not request.CUDA else "cuda"
|
||||
if XPU:
|
||||
device = "xpu"
|
||||
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
||||
if mps_available:
|
||||
device = "mps"
|
||||
self.device = device
|
||||
if request.LoraAdapter:
|
||||
# Check if its a local file and not a directory ( we load lora differently for a safetensor file )
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
diffusers
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
git+https://github.com/huggingface/diffusers
|
||||
opencv-python
|
||||
transformers
|
||||
torchvision==0.22.1
|
||||
accelerate
|
||||
compel
|
||||
peft
|
||||
sentencepiece
|
||||
torch==2.4.1
|
||||
torch==2.7.1
|
||||
optimum-quanto
|
||||
@@ -1,6 +1,7 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch==2.4.1+cu118
|
||||
diffusers
|
||||
torch==2.7.1+cu118
|
||||
torchvision==0.22.1+cu118
|
||||
git+https://github.com/huggingface/diffusers
|
||||
opencv-python
|
||||
transformers
|
||||
accelerate
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
torch==2.4.1
|
||||
diffusers
|
||||
torch==2.7.1
|
||||
torchvision==0.22.1
|
||||
git+https://github.com/huggingface/diffusers
|
||||
opencv-python
|
||||
transformers
|
||||
accelerate
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.0
|
||||
torch==2.3.1+rocm6.0
|
||||
torchvision==0.18.1+rocm6.0
|
||||
diffusers
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.3
|
||||
torch==2.7.1+rocm6.3
|
||||
torchvision==0.22.1+rocm6.3
|
||||
git+https://github.com/huggingface/diffusers
|
||||
opencv-python
|
||||
transformers
|
||||
accelerate
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.3.110+xpu
|
||||
torch==2.3.1+cxx11.abi
|
||||
torchvision==0.18.1+cxx11.abi
|
||||
oneccl_bind_pt==2.3.100+xpu
|
||||
torch==2.5.1+cxx11.abi
|
||||
torchvision==0.20.1+cxx11.abi
|
||||
oneccl_bind_pt==2.8.0+xpu
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
diffusers
|
||||
git+https://github.com/huggingface/diffusers
|
||||
opencv-python
|
||||
transformers
|
||||
accelerate
|
||||
|
||||
11
backend/python/diffusers/requirements-l4t.txt
Normal file
11
backend/python/diffusers/requirements-l4t.txt
Normal file
@@ -0,0 +1,11 @@
|
||||
--extra-index-url https://pypi.jetson-ai-lab.io/jp6/cu126/
|
||||
torch
|
||||
diffusers
|
||||
transformers
|
||||
accelerate
|
||||
compel
|
||||
peft
|
||||
optimum-quanto
|
||||
numpy<2
|
||||
sentencepiece
|
||||
torchvision
|
||||
10
backend/python/diffusers/requirements-mps.txt
Normal file
10
backend/python/diffusers/requirements-mps.txt
Normal file
@@ -0,0 +1,10 @@
|
||||
torch==2.7.1
|
||||
torchvision==0.22.1
|
||||
git+https://github.com/huggingface/diffusers
|
||||
opencv-python
|
||||
transformers
|
||||
accelerate
|
||||
compel
|
||||
peft
|
||||
sentencepiece
|
||||
optimum-quanto
|
||||
@@ -1,5 +1,5 @@
|
||||
setuptools
|
||||
grpcio==1.71.0
|
||||
grpcio==1.74.0
|
||||
pillow
|
||||
protobuf
|
||||
certifi
|
||||
|
||||
@@ -12,4 +12,6 @@ if [ -d "/opt/intel" ]; then
|
||||
export XPU=1
|
||||
fi
|
||||
|
||||
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||
|
||||
startBackend $@
|
||||
|
||||
@@ -1,23 +1,17 @@
|
||||
.PHONY: exllama2
|
||||
exllama2: protogen
|
||||
exllama2:
|
||||
bash install.sh
|
||||
|
||||
.PHONY: run
|
||||
run: protogen
|
||||
run: exllama2
|
||||
@echo "Running exllama2..."
|
||||
bash run.sh
|
||||
@echo "exllama2 run."
|
||||
|
||||
.PHONY: protogen
|
||||
protogen: backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
backend_pb2_grpc.py backend_pb2.py:
|
||||
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
$(RM) -r venv source __pycache__
|
||||
@@ -1,4 +1,4 @@
|
||||
grpcio==1.71.0
|
||||
grpcio==1.74.0
|
||||
protobuf
|
||||
certifi
|
||||
wheel
|
||||
|
||||
@@ -3,18 +3,11 @@
|
||||
.PHONY: install
|
||||
install:
|
||||
bash install.sh
|
||||
$(MAKE) protogen
|
||||
|
||||
.PHONY: protogen
|
||||
protogen: backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
backend_pb2_grpc.py backend_pb2.py:
|
||||
bash protogen.sh
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
rm -rf venv __pycache__
|
||||
@@ -10,7 +10,7 @@ import sys
|
||||
import os
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
|
||||
import torch
|
||||
from faster_whisper import WhisperModel
|
||||
|
||||
import grpc
|
||||
@@ -35,7 +35,9 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
# device = "cuda" if request.CUDA else "cpu"
|
||||
if request.CUDA:
|
||||
device = "cuda"
|
||||
|
||||
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
||||
if mps_available:
|
||||
device = "mps"
|
||||
try:
|
||||
print("Preparing models, please wait", file=sys.stderr)
|
||||
self.model = WhisperModel(request.Model, device=device, compute_type="float16")
|
||||
|
||||
23
backend/python/kitten-tts/Makefile
Normal file
23
backend/python/kitten-tts/Makefile
Normal file
@@ -0,0 +1,23 @@
|
||||
.PHONY: kitten-tts
|
||||
kitten-tts:
|
||||
bash install.sh
|
||||
|
||||
.PHONY: run
|
||||
run: kitten-tts
|
||||
@echo "Running kitten-tts..."
|
||||
bash run.sh
|
||||
@echo "kitten-tts run."
|
||||
|
||||
.PHONY: test
|
||||
test: kitten-tts
|
||||
@echo "Testing kitten-tts..."
|
||||
bash test.sh
|
||||
@echo "kitten-tts tested."
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
rm -rf venv __pycache__
|
||||
109
backend/python/kitten-tts/backend.py
Normal file
109
backend/python/kitten-tts/backend.py
Normal file
@@ -0,0 +1,109 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
This is an extra gRPC server of LocalAI for Kitten TTS
|
||||
"""
|
||||
from concurrent import futures
|
||||
import time
|
||||
import argparse
|
||||
import signal
|
||||
import sys
|
||||
import os
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
|
||||
import torch
|
||||
from kittentts import KittenTTS
|
||||
import soundfile as sf
|
||||
|
||||
import grpc
|
||||
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
|
||||
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
|
||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
||||
KITTEN_LANGUAGE = os.environ.get('KITTEN_LANGUAGE', None)
|
||||
|
||||
# Implement the BackendServicer class with the service methods
|
||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
"""
|
||||
BackendServicer is the class that implements the gRPC service
|
||||
"""
|
||||
def Health(self, request, context):
|
||||
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
||||
def LoadModel(self, request, context):
|
||||
|
||||
self.AudioPath = None
|
||||
# List available KittenTTS models
|
||||
print("Available KittenTTS voices: expr-voice-2-m, expr-voice-2-f, expr-voice-3-m, expr-voice-3-f, expr-voice-4-m, expr-voice-4-f, expr-voice-5-m, expr-voice-5-f")
|
||||
if os.path.isabs(request.AudioPath):
|
||||
self.AudioPath = request.AudioPath
|
||||
elif request.AudioPath and request.ModelFile != "" and not os.path.isabs(request.AudioPath):
|
||||
# get base path of modelFile
|
||||
modelFileBase = os.path.dirname(request.ModelFile)
|
||||
# modify LoraAdapter to be relative to modelFileBase
|
||||
self.AudioPath = os.path.join(modelFileBase, request.AudioPath)
|
||||
|
||||
try:
|
||||
print("Preparing KittenTTS model, please wait", file=sys.stderr)
|
||||
# Use the model name from request.Model, defaulting to "KittenML/kitten-tts-nano-0.1" if not specified
|
||||
model_name = request.Model if request.Model else "KittenML/kitten-tts-nano-0.1"
|
||||
self.tts = KittenTTS(model_name)
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
# Implement your logic here for the LoadModel service
|
||||
# Replace this with your desired response
|
||||
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
||||
|
||||
def TTS(self, request, context):
|
||||
try:
|
||||
# KittenTTS doesn't use language parameter like TTS, so we ignore it
|
||||
# For multi-speaker models, use voice parameter
|
||||
voice = request.voice if request.voice else "expr-voice-2-f"
|
||||
|
||||
# Generate audio using KittenTTS
|
||||
audio = self.tts.generate(request.text, voice=voice)
|
||||
|
||||
# Save the audio using soundfile
|
||||
sf.write(request.dst, audio, 24000)
|
||||
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
return backend_pb2.Result(success=True)
|
||||
|
||||
def serve(address):
|
||||
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
||||
options=[
|
||||
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
|
||||
])
|
||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||
server.add_insecure_port(address)
|
||||
server.start()
|
||||
print("Server started. Listening on: " + address, file=sys.stderr)
|
||||
|
||||
# Define the signal handler function
|
||||
def signal_handler(sig, frame):
|
||||
print("Received termination signal. Shutting down...")
|
||||
server.stop(0)
|
||||
sys.exit(0)
|
||||
|
||||
# Set the signal handlers for SIGINT and SIGTERM
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
try:
|
||||
while True:
|
||||
time.sleep(_ONE_DAY_IN_SECONDS)
|
||||
except KeyboardInterrupt:
|
||||
server.stop(0)
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Run the gRPC server.")
|
||||
parser.add_argument(
|
||||
"--addr", default="localhost:50051", help="The address to bind the server to."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
serve(args.addr)
|
||||
19
backend/python/kitten-tts/install.sh
Executable file
19
backend/python/kitten-tts/install.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
# This is here because the Intel pip index is broken and returns 200 status codes for every package name, it just doesn't return any package links.
|
||||
# This makes uv think that the package exists in the Intel pip index, and by default it stops looking at other pip indexes once it finds a match.
|
||||
# We need uv to continue falling through to the pypi default index to find optimum[openvino] in the pypi index
|
||||
# the --upgrade actually allows us to *downgrade* torch to the version provided in the Intel pip index
|
||||
if [ "x${BUILD_PROFILE}" == "xintel" ]; then
|
||||
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
|
||||
fi
|
||||
|
||||
installRequirements
|
||||
5
backend/python/kitten-tts/requirements.txt
Normal file
5
backend/python/kitten-tts/requirements.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
grpcio==1.71.0
|
||||
protobuf
|
||||
certifi
|
||||
packaging==24.1
|
||||
https://github.com/KittenML/KittenTTS/releases/download/0.1/kittentts-0.1.0-py3-none-any.whl
|
||||
9
backend/python/kitten-tts/run.sh
Executable file
9
backend/python/kitten-tts/run.sh
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
startBackend $@
|
||||
82
backend/python/kitten-tts/test.py
Normal file
82
backend/python/kitten-tts/test.py
Normal file
@@ -0,0 +1,82 @@
|
||||
"""
|
||||
A test script to test the gRPC service
|
||||
"""
|
||||
import unittest
|
||||
import subprocess
|
||||
import time
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
|
||||
import grpc
|
||||
|
||||
|
||||
class TestBackendServicer(unittest.TestCase):
|
||||
"""
|
||||
TestBackendServicer is the class that tests the gRPC service
|
||||
"""
|
||||
def setUp(self):
|
||||
"""
|
||||
This method sets up the gRPC service by starting the server
|
||||
"""
|
||||
self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"])
|
||||
time.sleep(30)
|
||||
|
||||
def tearDown(self) -> None:
|
||||
"""
|
||||
This method tears down the gRPC service by terminating the server
|
||||
"""
|
||||
self.service.terminate()
|
||||
self.service.wait()
|
||||
|
||||
def test_server_startup(self):
|
||||
"""
|
||||
This method tests if the server starts up successfully
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.Health(backend_pb2.HealthMessage())
|
||||
self.assertEqual(response.message, b'OK')
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("Server failed to start")
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
def test_load_model(self):
|
||||
"""
|
||||
This method tests if the model is loaded successfully
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="tts_models/en/vctk/vits"))
|
||||
print(response)
|
||||
self.assertTrue(response.success)
|
||||
self.assertEqual(response.message, "Model loaded successfully")
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("LoadModel service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
def test_tts(self):
|
||||
"""
|
||||
This method tests if the embeddings are generated successfully
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="tts_models/en/vctk/vits"))
|
||||
self.assertTrue(response.success)
|
||||
tts_request = backend_pb2.TTSRequest(text="80s TV news production music hit for tonight's biggest story")
|
||||
tts_response = stub.TTS(tts_request)
|
||||
self.assertIsNotNone(tts_response)
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("TTS service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
2
backend/python/kokoro/protogen.sh → backend/python/kitten-tts/test.sh
Normal file → Executable file
2
backend/python/kokoro/protogen.sh → backend/python/kitten-tts/test.sh
Normal file → Executable file
@@ -8,4 +8,4 @@ else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
|
||||
runUnittests
|
||||
@@ -1,20 +1,23 @@
|
||||
.DEFAULT_GOAL := install
|
||||
|
||||
.PHONY: install
|
||||
install:
|
||||
.PHONY: kokoro
|
||||
kokoro:
|
||||
bash install.sh
|
||||
$(MAKE) protogen
|
||||
|
||||
.PHONY: protogen
|
||||
protogen: backend_pb2_grpc.py backend_pb2.py
|
||||
.PHONY: run
|
||||
run: kokoro
|
||||
@echo "Running kokoro..."
|
||||
bash run.sh
|
||||
@echo "kokoro run."
|
||||
|
||||
.PHONY: test
|
||||
test: kokoro
|
||||
@echo "Testing kokoro..."
|
||||
bash test.sh
|
||||
@echo "kokoro tested."
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
backend_pb2_grpc.py backend_pb2.py:
|
||||
bash protogen.sh
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
rm -rf venv __pycache__
|
||||
23
backend/python/kokoro/README.md
Normal file
23
backend/python/kokoro/README.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# Kokoro TTS Backend for LocalAI
|
||||
|
||||
This is a gRPC server backend for LocalAI that uses the Kokoro TTS pipeline.
|
||||
|
||||
## Creating a separate environment for kokoro project
|
||||
|
||||
```bash
|
||||
make kokoro
|
||||
```
|
||||
|
||||
## Testing the gRPC server
|
||||
|
||||
```bash
|
||||
make test
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
- Lightweight TTS model with 82 million parameters
|
||||
- Apache-licensed weights
|
||||
- Fast and cost-efficient
|
||||
- Multi-language support
|
||||
- Multiple voice options
|
||||
108
backend/python/kokoro/backend.py
Executable file → Normal file
108
backend/python/kokoro/backend.py
Executable file → Normal file
@@ -1,101 +1,81 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Extra gRPC server for Kokoro models.
|
||||
This is an extra gRPC server of LocalAI for Kokoro TTS
|
||||
"""
|
||||
from concurrent import futures
|
||||
|
||||
import time
|
||||
import argparse
|
||||
import signal
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
|
||||
import torch
|
||||
from kokoro import KPipeline
|
||||
import soundfile as sf
|
||||
|
||||
import grpc
|
||||
|
||||
from models import build_model
|
||||
from kokoro import generate
|
||||
import torch
|
||||
|
||||
SAMPLE_RATE = 22050
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
|
||||
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
|
||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
||||
KOKORO_LANG_CODE = os.environ.get('KOKORO_LANG_CODE', 'a')
|
||||
|
||||
# Implement the BackendServicer class with the service methods
|
||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
"""
|
||||
A gRPC servicer for the backend service.
|
||||
|
||||
This class implements the gRPC methods for the backend service, including Health, LoadModel, and Embedding.
|
||||
BackendServicer is the class that implements the gRPC service
|
||||
"""
|
||||
def Health(self, request, context):
|
||||
"""
|
||||
A gRPC method that returns the health status of the backend service.
|
||||
|
||||
Args:
|
||||
request: A HealthRequest object that contains the request parameters.
|
||||
context: A grpc.ServicerContext object that provides information about the RPC.
|
||||
|
||||
Returns:
|
||||
A Reply object that contains the health status of the backend service.
|
||||
"""
|
||||
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
||||
|
||||
|
||||
def LoadModel(self, request, context):
|
||||
"""
|
||||
A gRPC method that loads a model into memory.
|
||||
|
||||
Args:
|
||||
request: A LoadModelRequest object that contains the request parameters.
|
||||
context: A grpc.ServicerContext object that provides information about the RPC.
|
||||
|
||||
Returns:
|
||||
A Result object that contains the result of the LoadModel operation.
|
||||
"""
|
||||
model_name = request.Model
|
||||
try:
|
||||
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
||||
self.MODEL = build_model(request.ModelFile, device)
|
||||
print("Preparing Kokoro TTS pipeline, please wait", file=sys.stderr)
|
||||
# empty dict
|
||||
self.options = {}
|
||||
options = request.Options
|
||||
# Find the voice from the options, options are a list of strings in this form optname:optvalue:
|
||||
VOICE_NAME = None
|
||||
# The options are a list of strings in this form optname:optvalue
|
||||
# We are storing all the options in a dict so we can use it later when
|
||||
# generating the images
|
||||
for opt in options:
|
||||
if opt.startswith("voice:"):
|
||||
VOICE_NAME = opt.split(":")[1]
|
||||
break
|
||||
if VOICE_NAME is None:
|
||||
return backend_pb2.Result(success=False, message=f"No voice specified in options")
|
||||
MODELPATH = request.ModelPath
|
||||
# If voice name contains a plus, split it and load the two models and combine them
|
||||
if "+" in VOICE_NAME:
|
||||
voice1, voice2 = VOICE_NAME.split("+")
|
||||
voice1 = torch.load(f'{MODELPATH}/{voice1}.pt', weights_only=True).to(device)
|
||||
voice2 = torch.load(f'{MODELPATH}/{voice2}.pt', weights_only=True).to(device)
|
||||
self.VOICEPACK = torch.mean(torch.stack([voice1, voice2]), dim=0)
|
||||
else:
|
||||
self.VOICEPACK = torch.load(f'{MODELPATH}/{VOICE_NAME}.pt', weights_only=True).to(device)
|
||||
if ":" not in opt:
|
||||
continue
|
||||
key, value = opt.split(":")
|
||||
self.options[key] = value
|
||||
|
||||
self.VOICE_NAME = VOICE_NAME
|
||||
|
||||
print(f'Loaded voice: {VOICE_NAME}')
|
||||
# Initialize Kokoro pipeline with language code
|
||||
lang_code = self.options.get("lang_code", KOKORO_LANG_CODE)
|
||||
self.pipeline = KPipeline(lang_code=lang_code)
|
||||
print(f"Kokoro TTS pipeline loaded with language code: {lang_code}", file=sys.stderr)
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
|
||||
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
||||
|
||||
return backend_pb2.Result(message="Kokoro TTS pipeline loaded successfully", success=True)
|
||||
|
||||
def TTS(self, request, context):
|
||||
model_name = request.model
|
||||
if model_name == "":
|
||||
return backend_pb2.Result(success=False, message="request.model is required")
|
||||
try:
|
||||
audio, out_ps = generate(self.MODEL, request.text, self.VOICEPACK, lang=self.VOICE_NAME)
|
||||
print(out_ps)
|
||||
sf.write(request.dst, audio, SAMPLE_RATE)
|
||||
# Get voice from request, default to 'af_heart' if not specified
|
||||
voice = request.voice if request.voice else 'af_heart'
|
||||
|
||||
# Generate audio using Kokoro pipeline
|
||||
generator = self.pipeline(request.text, voice=voice)
|
||||
|
||||
# Get the first (and typically only) audio segment
|
||||
for i, (gs, ps, audio) in enumerate(generator):
|
||||
# Save audio to the destination file
|
||||
sf.write(request.dst, audio, 24000)
|
||||
print(f"Generated audio segment {i}: gs={gs}, ps={ps}", file=sys.stderr)
|
||||
# For now, we only process the first segment
|
||||
# If you need to handle multiple segments, you might want to modify this
|
||||
break
|
||||
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
|
||||
return backend_pb2.Result(success=True)
|
||||
|
||||
def serve(address):
|
||||
@@ -108,11 +88,11 @@ def serve(address):
|
||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||
server.add_insecure_port(address)
|
||||
server.start()
|
||||
print("[Kokoro] Server started. Listening on: " + address, file=sys.stderr)
|
||||
print("Server started. Listening on: " + address, file=sys.stderr)
|
||||
|
||||
# Define the signal handler function
|
||||
def signal_handler(sig, frame):
|
||||
print("[Kokoro] Received termination signal. Shutting down...")
|
||||
print("Received termination signal. Shutting down...")
|
||||
server.stop(0)
|
||||
sys.exit(0)
|
||||
|
||||
@@ -132,5 +112,5 @@ if __name__ == "__main__":
|
||||
"--addr", default="localhost:50051", help="The address to bind the server to."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
print(f"[Kokoro] startup: {args}", file=sys.stderr)
|
||||
|
||||
serve(args.addr)
|
||||
|
||||
@@ -1,524 +0,0 @@
|
||||
# https://huggingface.co/hexgrad/Kokoro-82M/blob/main/istftnet.py
|
||||
# https://github.com/yl4579/StyleTTS2/blob/main/Modules/istftnet.py
|
||||
from scipy.signal import get_window
|
||||
from torch.nn import Conv1d, ConvTranspose1d
|
||||
from torch.nn.utils import weight_norm, remove_weight_norm
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
# https://github.com/yl4579/StyleTTS2/blob/main/Modules/utils.py
|
||||
def init_weights(m, mean=0.0, std=0.01):
|
||||
classname = m.__class__.__name__
|
||||
if classname.find("Conv") != -1:
|
||||
m.weight.data.normal_(mean, std)
|
||||
|
||||
def get_padding(kernel_size, dilation=1):
|
||||
return int((kernel_size*dilation - dilation)/2)
|
||||
|
||||
LRELU_SLOPE = 0.1
|
||||
|
||||
class AdaIN1d(nn.Module):
|
||||
def __init__(self, style_dim, num_features):
|
||||
super().__init__()
|
||||
self.norm = nn.InstanceNorm1d(num_features, affine=False)
|
||||
self.fc = nn.Linear(style_dim, num_features*2)
|
||||
|
||||
def forward(self, x, s):
|
||||
h = self.fc(s)
|
||||
h = h.view(h.size(0), h.size(1), 1)
|
||||
gamma, beta = torch.chunk(h, chunks=2, dim=1)
|
||||
return (1 + gamma) * self.norm(x) + beta
|
||||
|
||||
class AdaINResBlock1(torch.nn.Module):
|
||||
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5), style_dim=64):
|
||||
super(AdaINResBlock1, self).__init__()
|
||||
self.convs1 = nn.ModuleList([
|
||||
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
|
||||
padding=get_padding(kernel_size, dilation[0]))),
|
||||
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
|
||||
padding=get_padding(kernel_size, dilation[1]))),
|
||||
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
|
||||
padding=get_padding(kernel_size, dilation[2])))
|
||||
])
|
||||
self.convs1.apply(init_weights)
|
||||
|
||||
self.convs2 = nn.ModuleList([
|
||||
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
||||
padding=get_padding(kernel_size, 1))),
|
||||
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
||||
padding=get_padding(kernel_size, 1))),
|
||||
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
|
||||
padding=get_padding(kernel_size, 1)))
|
||||
])
|
||||
self.convs2.apply(init_weights)
|
||||
|
||||
self.adain1 = nn.ModuleList([
|
||||
AdaIN1d(style_dim, channels),
|
||||
AdaIN1d(style_dim, channels),
|
||||
AdaIN1d(style_dim, channels),
|
||||
])
|
||||
|
||||
self.adain2 = nn.ModuleList([
|
||||
AdaIN1d(style_dim, channels),
|
||||
AdaIN1d(style_dim, channels),
|
||||
AdaIN1d(style_dim, channels),
|
||||
])
|
||||
|
||||
self.alpha1 = nn.ParameterList([nn.Parameter(torch.ones(1, channels, 1)) for i in range(len(self.convs1))])
|
||||
self.alpha2 = nn.ParameterList([nn.Parameter(torch.ones(1, channels, 1)) for i in range(len(self.convs2))])
|
||||
|
||||
|
||||
def forward(self, x, s):
|
||||
for c1, c2, n1, n2, a1, a2 in zip(self.convs1, self.convs2, self.adain1, self.adain2, self.alpha1, self.alpha2):
|
||||
xt = n1(x, s)
|
||||
xt = xt + (1 / a1) * (torch.sin(a1 * xt) ** 2) # Snake1D
|
||||
xt = c1(xt)
|
||||
xt = n2(xt, s)
|
||||
xt = xt + (1 / a2) * (torch.sin(a2 * xt) ** 2) # Snake1D
|
||||
xt = c2(xt)
|
||||
x = xt + x
|
||||
return x
|
||||
|
||||
def remove_weight_norm(self):
|
||||
for l in self.convs1:
|
||||
remove_weight_norm(l)
|
||||
for l in self.convs2:
|
||||
remove_weight_norm(l)
|
||||
|
||||
class TorchSTFT(torch.nn.Module):
|
||||
def __init__(self, filter_length=800, hop_length=200, win_length=800, window='hann'):
|
||||
super().__init__()
|
||||
self.filter_length = filter_length
|
||||
self.hop_length = hop_length
|
||||
self.win_length = win_length
|
||||
self.window = torch.from_numpy(get_window(window, win_length, fftbins=True).astype(np.float32))
|
||||
|
||||
def transform(self, input_data):
|
||||
forward_transform = torch.stft(
|
||||
input_data,
|
||||
self.filter_length, self.hop_length, self.win_length, window=self.window.to(input_data.device),
|
||||
return_complex=True)
|
||||
|
||||
return torch.abs(forward_transform), torch.angle(forward_transform)
|
||||
|
||||
def inverse(self, magnitude, phase):
|
||||
inverse_transform = torch.istft(
|
||||
magnitude * torch.exp(phase * 1j),
|
||||
self.filter_length, self.hop_length, self.win_length, window=self.window.to(magnitude.device))
|
||||
|
||||
return inverse_transform.unsqueeze(-2) # unsqueeze to stay consistent with conv_transpose1d implementation
|
||||
|
||||
def forward(self, input_data):
|
||||
self.magnitude, self.phase = self.transform(input_data)
|
||||
reconstruction = self.inverse(self.magnitude, self.phase)
|
||||
return reconstruction
|
||||
|
||||
class SineGen(torch.nn.Module):
|
||||
""" Definition of sine generator
|
||||
SineGen(samp_rate, harmonic_num = 0,
|
||||
sine_amp = 0.1, noise_std = 0.003,
|
||||
voiced_threshold = 0,
|
||||
flag_for_pulse=False)
|
||||
samp_rate: sampling rate in Hz
|
||||
harmonic_num: number of harmonic overtones (default 0)
|
||||
sine_amp: amplitude of sine-wavefrom (default 0.1)
|
||||
noise_std: std of Gaussian noise (default 0.003)
|
||||
voiced_thoreshold: F0 threshold for U/V classification (default 0)
|
||||
flag_for_pulse: this SinGen is used inside PulseGen (default False)
|
||||
Note: when flag_for_pulse is True, the first time step of a voiced
|
||||
segment is always sin(np.pi) or cos(0)
|
||||
"""
|
||||
|
||||
def __init__(self, samp_rate, upsample_scale, harmonic_num=0,
|
||||
sine_amp=0.1, noise_std=0.003,
|
||||
voiced_threshold=0,
|
||||
flag_for_pulse=False):
|
||||
super(SineGen, self).__init__()
|
||||
self.sine_amp = sine_amp
|
||||
self.noise_std = noise_std
|
||||
self.harmonic_num = harmonic_num
|
||||
self.dim = self.harmonic_num + 1
|
||||
self.sampling_rate = samp_rate
|
||||
self.voiced_threshold = voiced_threshold
|
||||
self.flag_for_pulse = flag_for_pulse
|
||||
self.upsample_scale = upsample_scale
|
||||
|
||||
def _f02uv(self, f0):
|
||||
# generate uv signal
|
||||
uv = (f0 > self.voiced_threshold).type(torch.float32)
|
||||
return uv
|
||||
|
||||
def _f02sine(self, f0_values):
|
||||
""" f0_values: (batchsize, length, dim)
|
||||
where dim indicates fundamental tone and overtones
|
||||
"""
|
||||
# convert to F0 in rad. The integer part n can be ignored
|
||||
# because 2 * np.pi * n doesn't affect phase
|
||||
rad_values = (f0_values / self.sampling_rate) % 1
|
||||
|
||||
# initial phase noise (no noise for fundamental component)
|
||||
rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \
|
||||
device=f0_values.device)
|
||||
rand_ini[:, 0] = 0
|
||||
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
|
||||
|
||||
# instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
|
||||
if not self.flag_for_pulse:
|
||||
# # for normal case
|
||||
|
||||
# # To prevent torch.cumsum numerical overflow,
|
||||
# # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1.
|
||||
# # Buffer tmp_over_one_idx indicates the time step to add -1.
|
||||
# # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi
|
||||
# tmp_over_one = torch.cumsum(rad_values, 1) % 1
|
||||
# tmp_over_one_idx = (padDiff(tmp_over_one)) < 0
|
||||
# cumsum_shift = torch.zeros_like(rad_values)
|
||||
# cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
|
||||
|
||||
# phase = torch.cumsum(rad_values, dim=1) * 2 * np.pi
|
||||
rad_values = torch.nn.functional.interpolate(rad_values.transpose(1, 2),
|
||||
scale_factor=1/self.upsample_scale,
|
||||
mode="linear").transpose(1, 2)
|
||||
|
||||
# tmp_over_one = torch.cumsum(rad_values, 1) % 1
|
||||
# tmp_over_one_idx = (padDiff(tmp_over_one)) < 0
|
||||
# cumsum_shift = torch.zeros_like(rad_values)
|
||||
# cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
|
||||
|
||||
phase = torch.cumsum(rad_values, dim=1) * 2 * np.pi
|
||||
phase = torch.nn.functional.interpolate(phase.transpose(1, 2) * self.upsample_scale,
|
||||
scale_factor=self.upsample_scale, mode="linear").transpose(1, 2)
|
||||
sines = torch.sin(phase)
|
||||
|
||||
else:
|
||||
# If necessary, make sure that the first time step of every
|
||||
# voiced segments is sin(pi) or cos(0)
|
||||
# This is used for pulse-train generation
|
||||
|
||||
# identify the last time step in unvoiced segments
|
||||
uv = self._f02uv(f0_values)
|
||||
uv_1 = torch.roll(uv, shifts=-1, dims=1)
|
||||
uv_1[:, -1, :] = 1
|
||||
u_loc = (uv < 1) * (uv_1 > 0)
|
||||
|
||||
# get the instantanouse phase
|
||||
tmp_cumsum = torch.cumsum(rad_values, dim=1)
|
||||
# different batch needs to be processed differently
|
||||
for idx in range(f0_values.shape[0]):
|
||||
temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :]
|
||||
temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :]
|
||||
# stores the accumulation of i.phase within
|
||||
# each voiced segments
|
||||
tmp_cumsum[idx, :, :] = 0
|
||||
tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum
|
||||
|
||||
# rad_values - tmp_cumsum: remove the accumulation of i.phase
|
||||
# within the previous voiced segment.
|
||||
i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1)
|
||||
|
||||
# get the sines
|
||||
sines = torch.cos(i_phase * 2 * np.pi)
|
||||
return sines
|
||||
|
||||
def forward(self, f0):
|
||||
""" sine_tensor, uv = forward(f0)
|
||||
input F0: tensor(batchsize=1, length, dim=1)
|
||||
f0 for unvoiced steps should be 0
|
||||
output sine_tensor: tensor(batchsize=1, length, dim)
|
||||
output uv: tensor(batchsize=1, length, 1)
|
||||
"""
|
||||
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim,
|
||||
device=f0.device)
|
||||
# fundamental component
|
||||
fn = torch.multiply(f0, torch.FloatTensor([[range(1, self.harmonic_num + 2)]]).to(f0.device))
|
||||
|
||||
# generate sine waveforms
|
||||
sine_waves = self._f02sine(fn) * self.sine_amp
|
||||
|
||||
# generate uv signal
|
||||
# uv = torch.ones(f0.shape)
|
||||
# uv = uv * (f0 > self.voiced_threshold)
|
||||
uv = self._f02uv(f0)
|
||||
|
||||
# noise: for unvoiced should be similar to sine_amp
|
||||
# std = self.sine_amp/3 -> max value ~ self.sine_amp
|
||||
# . for voiced regions is self.noise_std
|
||||
noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
|
||||
noise = noise_amp * torch.randn_like(sine_waves)
|
||||
|
||||
# first: set the unvoiced part to 0 by uv
|
||||
# then: additive noise
|
||||
sine_waves = sine_waves * uv + noise
|
||||
return sine_waves, uv, noise
|
||||
|
||||
|
||||
class SourceModuleHnNSF(torch.nn.Module):
|
||||
""" SourceModule for hn-nsf
|
||||
SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
|
||||
add_noise_std=0.003, voiced_threshod=0)
|
||||
sampling_rate: sampling_rate in Hz
|
||||
harmonic_num: number of harmonic above F0 (default: 0)
|
||||
sine_amp: amplitude of sine source signal (default: 0.1)
|
||||
add_noise_std: std of additive Gaussian noise (default: 0.003)
|
||||
note that amplitude of noise in unvoiced is decided
|
||||
by sine_amp
|
||||
voiced_threshold: threhold to set U/V given F0 (default: 0)
|
||||
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
|
||||
F0_sampled (batchsize, length, 1)
|
||||
Sine_source (batchsize, length, 1)
|
||||
noise_source (batchsize, length 1)
|
||||
uv (batchsize, length, 1)
|
||||
"""
|
||||
|
||||
def __init__(self, sampling_rate, upsample_scale, harmonic_num=0, sine_amp=0.1,
|
||||
add_noise_std=0.003, voiced_threshod=0):
|
||||
super(SourceModuleHnNSF, self).__init__()
|
||||
|
||||
self.sine_amp = sine_amp
|
||||
self.noise_std = add_noise_std
|
||||
|
||||
# to produce sine waveforms
|
||||
self.l_sin_gen = SineGen(sampling_rate, upsample_scale, harmonic_num,
|
||||
sine_amp, add_noise_std, voiced_threshod)
|
||||
|
||||
# to merge source harmonics into a single excitation
|
||||
self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
|
||||
self.l_tanh = torch.nn.Tanh()
|
||||
|
||||
def forward(self, x):
|
||||
"""
|
||||
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
|
||||
F0_sampled (batchsize, length, 1)
|
||||
Sine_source (batchsize, length, 1)
|
||||
noise_source (batchsize, length 1)
|
||||
"""
|
||||
# source for harmonic branch
|
||||
with torch.no_grad():
|
||||
sine_wavs, uv, _ = self.l_sin_gen(x)
|
||||
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
|
||||
|
||||
# source for noise branch, in the same shape as uv
|
||||
noise = torch.randn_like(uv) * self.sine_amp / 3
|
||||
return sine_merge, noise, uv
|
||||
def padDiff(x):
|
||||
return F.pad(F.pad(x, (0,0,-1,1), 'constant', 0) - x, (0,0,0,-1), 'constant', 0)
|
||||
|
||||
|
||||
class Generator(torch.nn.Module):
|
||||
def __init__(self, style_dim, resblock_kernel_sizes, upsample_rates, upsample_initial_channel, resblock_dilation_sizes, upsample_kernel_sizes, gen_istft_n_fft, gen_istft_hop_size):
|
||||
super(Generator, self).__init__()
|
||||
|
||||
self.num_kernels = len(resblock_kernel_sizes)
|
||||
self.num_upsamples = len(upsample_rates)
|
||||
resblock = AdaINResBlock1
|
||||
|
||||
self.m_source = SourceModuleHnNSF(
|
||||
sampling_rate=24000,
|
||||
upsample_scale=np.prod(upsample_rates) * gen_istft_hop_size,
|
||||
harmonic_num=8, voiced_threshod=10)
|
||||
self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates) * gen_istft_hop_size)
|
||||
self.noise_convs = nn.ModuleList()
|
||||
self.noise_res = nn.ModuleList()
|
||||
|
||||
self.ups = nn.ModuleList()
|
||||
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
|
||||
self.ups.append(weight_norm(
|
||||
ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)),
|
||||
k, u, padding=(k-u)//2)))
|
||||
|
||||
self.resblocks = nn.ModuleList()
|
||||
for i in range(len(self.ups)):
|
||||
ch = upsample_initial_channel//(2**(i+1))
|
||||
for j, (k, d) in enumerate(zip(resblock_kernel_sizes,resblock_dilation_sizes)):
|
||||
self.resblocks.append(resblock(ch, k, d, style_dim))
|
||||
|
||||
c_cur = upsample_initial_channel // (2 ** (i + 1))
|
||||
|
||||
if i + 1 < len(upsample_rates): #
|
||||
stride_f0 = np.prod(upsample_rates[i + 1:])
|
||||
self.noise_convs.append(Conv1d(
|
||||
gen_istft_n_fft + 2, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=(stride_f0+1) // 2))
|
||||
self.noise_res.append(resblock(c_cur, 7, [1,3,5], style_dim))
|
||||
else:
|
||||
self.noise_convs.append(Conv1d(gen_istft_n_fft + 2, c_cur, kernel_size=1))
|
||||
self.noise_res.append(resblock(c_cur, 11, [1,3,5], style_dim))
|
||||
|
||||
|
||||
self.post_n_fft = gen_istft_n_fft
|
||||
self.conv_post = weight_norm(Conv1d(ch, self.post_n_fft + 2, 7, 1, padding=3))
|
||||
self.ups.apply(init_weights)
|
||||
self.conv_post.apply(init_weights)
|
||||
self.reflection_pad = torch.nn.ReflectionPad1d((1, 0))
|
||||
self.stft = TorchSTFT(filter_length=gen_istft_n_fft, hop_length=gen_istft_hop_size, win_length=gen_istft_n_fft)
|
||||
|
||||
|
||||
def forward(self, x, s, f0):
|
||||
with torch.no_grad():
|
||||
f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t
|
||||
|
||||
har_source, noi_source, uv = self.m_source(f0)
|
||||
har_source = har_source.transpose(1, 2).squeeze(1)
|
||||
har_spec, har_phase = self.stft.transform(har_source)
|
||||
har = torch.cat([har_spec, har_phase], dim=1)
|
||||
|
||||
for i in range(self.num_upsamples):
|
||||
x = F.leaky_relu(x, LRELU_SLOPE)
|
||||
x_source = self.noise_convs[i](har)
|
||||
x_source = self.noise_res[i](x_source, s)
|
||||
|
||||
x = self.ups[i](x)
|
||||
if i == self.num_upsamples - 1:
|
||||
x = self.reflection_pad(x)
|
||||
|
||||
x = x + x_source
|
||||
xs = None
|
||||
for j in range(self.num_kernels):
|
||||
if xs is None:
|
||||
xs = self.resblocks[i*self.num_kernels+j](x, s)
|
||||
else:
|
||||
xs += self.resblocks[i*self.num_kernels+j](x, s)
|
||||
x = xs / self.num_kernels
|
||||
x = F.leaky_relu(x)
|
||||
x = self.conv_post(x)
|
||||
spec = torch.exp(x[:,:self.post_n_fft // 2 + 1, :])
|
||||
phase = torch.sin(x[:, self.post_n_fft // 2 + 1:, :])
|
||||
return self.stft.inverse(spec, phase)
|
||||
|
||||
def fw_phase(self, x, s):
|
||||
for i in range(self.num_upsamples):
|
||||
x = F.leaky_relu(x, LRELU_SLOPE)
|
||||
x = self.ups[i](x)
|
||||
xs = None
|
||||
for j in range(self.num_kernels):
|
||||
if xs is None:
|
||||
xs = self.resblocks[i*self.num_kernels+j](x, s)
|
||||
else:
|
||||
xs += self.resblocks[i*self.num_kernels+j](x, s)
|
||||
x = xs / self.num_kernels
|
||||
x = F.leaky_relu(x)
|
||||
x = self.reflection_pad(x)
|
||||
x = self.conv_post(x)
|
||||
spec = torch.exp(x[:,:self.post_n_fft // 2 + 1, :])
|
||||
phase = torch.sin(x[:, self.post_n_fft // 2 + 1:, :])
|
||||
return spec, phase
|
||||
|
||||
def remove_weight_norm(self):
|
||||
print('Removing weight norm...')
|
||||
for l in self.ups:
|
||||
remove_weight_norm(l)
|
||||
for l in self.resblocks:
|
||||
l.remove_weight_norm()
|
||||
remove_weight_norm(self.conv_pre)
|
||||
remove_weight_norm(self.conv_post)
|
||||
|
||||
|
||||
class AdainResBlk1d(nn.Module):
|
||||
def __init__(self, dim_in, dim_out, style_dim=64, actv=nn.LeakyReLU(0.2),
|
||||
upsample='none', dropout_p=0.0):
|
||||
super().__init__()
|
||||
self.actv = actv
|
||||
self.upsample_type = upsample
|
||||
self.upsample = UpSample1d(upsample)
|
||||
self.learned_sc = dim_in != dim_out
|
||||
self._build_weights(dim_in, dim_out, style_dim)
|
||||
self.dropout = nn.Dropout(dropout_p)
|
||||
|
||||
if upsample == 'none':
|
||||
self.pool = nn.Identity()
|
||||
else:
|
||||
self.pool = weight_norm(nn.ConvTranspose1d(dim_in, dim_in, kernel_size=3, stride=2, groups=dim_in, padding=1, output_padding=1))
|
||||
|
||||
|
||||
def _build_weights(self, dim_in, dim_out, style_dim):
|
||||
self.conv1 = weight_norm(nn.Conv1d(dim_in, dim_out, 3, 1, 1))
|
||||
self.conv2 = weight_norm(nn.Conv1d(dim_out, dim_out, 3, 1, 1))
|
||||
self.norm1 = AdaIN1d(style_dim, dim_in)
|
||||
self.norm2 = AdaIN1d(style_dim, dim_out)
|
||||
if self.learned_sc:
|
||||
self.conv1x1 = weight_norm(nn.Conv1d(dim_in, dim_out, 1, 1, 0, bias=False))
|
||||
|
||||
def _shortcut(self, x):
|
||||
x = self.upsample(x)
|
||||
if self.learned_sc:
|
||||
x = self.conv1x1(x)
|
||||
return x
|
||||
|
||||
def _residual(self, x, s):
|
||||
x = self.norm1(x, s)
|
||||
x = self.actv(x)
|
||||
x = self.pool(x)
|
||||
x = self.conv1(self.dropout(x))
|
||||
x = self.norm2(x, s)
|
||||
x = self.actv(x)
|
||||
x = self.conv2(self.dropout(x))
|
||||
return x
|
||||
|
||||
def forward(self, x, s):
|
||||
out = self._residual(x, s)
|
||||
out = (out + self._shortcut(x)) / np.sqrt(2)
|
||||
return out
|
||||
|
||||
class UpSample1d(nn.Module):
|
||||
def __init__(self, layer_type):
|
||||
super().__init__()
|
||||
self.layer_type = layer_type
|
||||
|
||||
def forward(self, x):
|
||||
if self.layer_type == 'none':
|
||||
return x
|
||||
else:
|
||||
return F.interpolate(x, scale_factor=2, mode='nearest')
|
||||
|
||||
class Decoder(nn.Module):
|
||||
def __init__(self, dim_in=512, F0_channel=512, style_dim=64, dim_out=80,
|
||||
resblock_kernel_sizes = [3,7,11],
|
||||
upsample_rates = [10, 6],
|
||||
upsample_initial_channel=512,
|
||||
resblock_dilation_sizes=[[1,3,5], [1,3,5], [1,3,5]],
|
||||
upsample_kernel_sizes=[20, 12],
|
||||
gen_istft_n_fft=20, gen_istft_hop_size=5):
|
||||
super().__init__()
|
||||
|
||||
self.decode = nn.ModuleList()
|
||||
|
||||
self.encode = AdainResBlk1d(dim_in + 2, 1024, style_dim)
|
||||
|
||||
self.decode.append(AdainResBlk1d(1024 + 2 + 64, 1024, style_dim))
|
||||
self.decode.append(AdainResBlk1d(1024 + 2 + 64, 1024, style_dim))
|
||||
self.decode.append(AdainResBlk1d(1024 + 2 + 64, 1024, style_dim))
|
||||
self.decode.append(AdainResBlk1d(1024 + 2 + 64, 512, style_dim, upsample=True))
|
||||
|
||||
self.F0_conv = weight_norm(nn.Conv1d(1, 1, kernel_size=3, stride=2, groups=1, padding=1))
|
||||
|
||||
self.N_conv = weight_norm(nn.Conv1d(1, 1, kernel_size=3, stride=2, groups=1, padding=1))
|
||||
|
||||
self.asr_res = nn.Sequential(
|
||||
weight_norm(nn.Conv1d(512, 64, kernel_size=1)),
|
||||
)
|
||||
|
||||
|
||||
self.generator = Generator(style_dim, resblock_kernel_sizes, upsample_rates,
|
||||
upsample_initial_channel, resblock_dilation_sizes,
|
||||
upsample_kernel_sizes, gen_istft_n_fft, gen_istft_hop_size)
|
||||
|
||||
def forward(self, asr, F0_curve, N, s):
|
||||
F0 = self.F0_conv(F0_curve.unsqueeze(1))
|
||||
N = self.N_conv(N.unsqueeze(1))
|
||||
|
||||
x = torch.cat([asr, F0, N], axis=1)
|
||||
x = self.encode(x, s)
|
||||
|
||||
asr_res = self.asr_res(asr)
|
||||
|
||||
res = True
|
||||
for block in self.decode:
|
||||
if res:
|
||||
x = torch.cat([x, asr_res, F0, N], axis=1)
|
||||
x = block(x, s)
|
||||
if block.upsample_type != "none":
|
||||
res = False
|
||||
|
||||
x = self.generator(x, s, F0_curve)
|
||||
return x
|
||||
@@ -1,166 +0,0 @@
|
||||
# https://huggingface.co/hexgrad/Kokoro-82M/blob/main/kokoro.py
|
||||
import phonemizer
|
||||
import re
|
||||
import torch
|
||||
import numpy as np
|
||||
|
||||
def split_num(num):
|
||||
num = num.group()
|
||||
if '.' in num:
|
||||
return num
|
||||
elif ':' in num:
|
||||
h, m = [int(n) for n in num.split(':')]
|
||||
if m == 0:
|
||||
return f"{h} o'clock"
|
||||
elif m < 10:
|
||||
return f'{h} oh {m}'
|
||||
return f'{h} {m}'
|
||||
year = int(num[:4])
|
||||
if year < 1100 or year % 1000 < 10:
|
||||
return num
|
||||
left, right = num[:2], int(num[2:4])
|
||||
s = 's' if num.endswith('s') else ''
|
||||
if 100 <= year % 1000 <= 999:
|
||||
if right == 0:
|
||||
return f'{left} hundred{s}'
|
||||
elif right < 10:
|
||||
return f'{left} oh {right}{s}'
|
||||
return f'{left} {right}{s}'
|
||||
|
||||
def flip_money(m):
|
||||
m = m.group()
|
||||
bill = 'dollar' if m[0] == '$' else 'pound'
|
||||
if m[-1].isalpha():
|
||||
return f'{m[1:]} {bill}s'
|
||||
elif '.' not in m:
|
||||
s = '' if m[1:] == '1' else 's'
|
||||
return f'{m[1:]} {bill}{s}'
|
||||
b, c = m[1:].split('.')
|
||||
s = '' if b == '1' else 's'
|
||||
c = int(c.ljust(2, '0'))
|
||||
coins = f"cent{'' if c == 1 else 's'}" if m[0] == '$' else ('penny' if c == 1 else 'pence')
|
||||
return f'{b} {bill}{s} and {c} {coins}'
|
||||
|
||||
def point_num(num):
|
||||
a, b = num.group().split('.')
|
||||
return ' point '.join([a, ' '.join(b)])
|
||||
|
||||
def normalize_text(text):
|
||||
text = text.replace(chr(8216), "'").replace(chr(8217), "'")
|
||||
text = text.replace('«', chr(8220)).replace('»', chr(8221))
|
||||
text = text.replace(chr(8220), '"').replace(chr(8221), '"')
|
||||
text = text.replace('(', '«').replace(')', '»')
|
||||
for a, b in zip('、。!,:;?', ',.!,:;?'):
|
||||
text = text.replace(a, b+' ')
|
||||
text = re.sub(r'[^\S \n]', ' ', text)
|
||||
text = re.sub(r' +', ' ', text)
|
||||
text = re.sub(r'(?<=\n) +(?=\n)', '', text)
|
||||
text = re.sub(r'\bD[Rr]\.(?= [A-Z])', 'Doctor', text)
|
||||
text = re.sub(r'\b(?:Mr\.|MR\.(?= [A-Z]))', 'Mister', text)
|
||||
text = re.sub(r'\b(?:Ms\.|MS\.(?= [A-Z]))', 'Miss', text)
|
||||
text = re.sub(r'\b(?:Mrs\.|MRS\.(?= [A-Z]))', 'Mrs', text)
|
||||
text = re.sub(r'\betc\.(?! [A-Z])', 'etc', text)
|
||||
text = re.sub(r'(?i)\b(y)eah?\b', r"\1e'a", text)
|
||||
text = re.sub(r'\d*\.\d+|\b\d{4}s?\b|(?<!:)\b(?:[1-9]|1[0-2]):[0-5]\d\b(?!:)', split_num, text)
|
||||
text = re.sub(r'(?<=\d),(?=\d)', '', text)
|
||||
text = re.sub(r'(?i)[$£]\d+(?:\.\d+)?(?: hundred| thousand| (?:[bm]|tr)illion)*\b|[$£]\d+\.\d\d?\b', flip_money, text)
|
||||
text = re.sub(r'\d*\.\d+', point_num, text)
|
||||
text = re.sub(r'(?<=\d)-(?=\d)', ' to ', text)
|
||||
text = re.sub(r'(?<=\d)S', ' S', text)
|
||||
text = re.sub(r"(?<=[BCDFGHJ-NP-TV-Z])'?s\b", "'S", text)
|
||||
text = re.sub(r"(?<=X')S\b", 's', text)
|
||||
text = re.sub(r'(?:[A-Za-z]\.){2,} [a-z]', lambda m: m.group().replace('.', '-'), text)
|
||||
text = re.sub(r'(?i)(?<=[A-Z])\.(?=[A-Z])', '-', text)
|
||||
return text.strip()
|
||||
|
||||
def get_vocab():
|
||||
_pad = "$"
|
||||
_punctuation = ';:,.!?¡¿—…"«»“” '
|
||||
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
|
||||
_letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ"
|
||||
symbols = [_pad] + list(_punctuation) + list(_letters) + list(_letters_ipa)
|
||||
dicts = {}
|
||||
for i in range(len((symbols))):
|
||||
dicts[symbols[i]] = i
|
||||
return dicts
|
||||
|
||||
VOCAB = get_vocab()
|
||||
def tokenize(ps):
|
||||
return [i for i in map(VOCAB.get, ps) if i is not None]
|
||||
|
||||
phonemizers = dict(
|
||||
a=phonemizer.backend.EspeakBackend(language='en-us', preserve_punctuation=True, with_stress=True),
|
||||
b=phonemizer.backend.EspeakBackend(language='en-gb', preserve_punctuation=True, with_stress=True),
|
||||
)
|
||||
def phonemize(text, lang, norm=True):
|
||||
if norm:
|
||||
text = normalize_text(text)
|
||||
ps = phonemizers[lang].phonemize([text])
|
||||
ps = ps[0] if ps else ''
|
||||
# https://en.wiktionary.org/wiki/kokoro#English
|
||||
ps = ps.replace('kəkˈoːɹoʊ', 'kˈoʊkəɹoʊ').replace('kəkˈɔːɹəʊ', 'kˈəʊkəɹəʊ')
|
||||
ps = ps.replace('ʲ', 'j').replace('r', 'ɹ').replace('x', 'k').replace('ɬ', 'l')
|
||||
ps = re.sub(r'(?<=[a-zɹː])(?=hˈʌndɹɪd)', ' ', ps)
|
||||
ps = re.sub(r' z(?=[;:,.!?¡¿—…"«»“” ]|$)', 'z', ps)
|
||||
if lang == 'a':
|
||||
ps = re.sub(r'(?<=nˈaɪn)ti(?!ː)', 'di', ps)
|
||||
ps = ''.join(filter(lambda p: p in VOCAB, ps))
|
||||
return ps.strip()
|
||||
|
||||
def length_to_mask(lengths):
|
||||
mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)
|
||||
mask = torch.gt(mask+1, lengths.unsqueeze(1))
|
||||
return mask
|
||||
|
||||
@torch.no_grad()
|
||||
def forward(model, tokens, ref_s, speed):
|
||||
device = ref_s.device
|
||||
tokens = torch.LongTensor([[0, *tokens, 0]]).to(device)
|
||||
input_lengths = torch.LongTensor([tokens.shape[-1]]).to(device)
|
||||
text_mask = length_to_mask(input_lengths).to(device)
|
||||
bert_dur = model.bert(tokens, attention_mask=(~text_mask).int())
|
||||
d_en = model.bert_encoder(bert_dur).transpose(-1, -2)
|
||||
s = ref_s[:, 128:]
|
||||
d = model.predictor.text_encoder(d_en, s, input_lengths, text_mask)
|
||||
x, _ = model.predictor.lstm(d)
|
||||
duration = model.predictor.duration_proj(x)
|
||||
duration = torch.sigmoid(duration).sum(axis=-1) / speed
|
||||
pred_dur = torch.round(duration).clamp(min=1).long()
|
||||
pred_aln_trg = torch.zeros(input_lengths, pred_dur.sum().item())
|
||||
c_frame = 0
|
||||
for i in range(pred_aln_trg.size(0)):
|
||||
pred_aln_trg[i, c_frame:c_frame + pred_dur[0,i].item()] = 1
|
||||
c_frame += pred_dur[0,i].item()
|
||||
en = d.transpose(-1, -2) @ pred_aln_trg.unsqueeze(0).to(device)
|
||||
F0_pred, N_pred = model.predictor.F0Ntrain(en, s)
|
||||
t_en = model.text_encoder(tokens, input_lengths, text_mask)
|
||||
asr = t_en @ pred_aln_trg.unsqueeze(0).to(device)
|
||||
return model.decoder(asr, F0_pred, N_pred, ref_s[:, :128]).squeeze().cpu().numpy()
|
||||
|
||||
def generate(model, text, voicepack, lang='a', speed=1, ps=None):
|
||||
ps = ps or phonemize(text, lang)
|
||||
tokens = tokenize(ps)
|
||||
if not tokens:
|
||||
return None
|
||||
elif len(tokens) > 510:
|
||||
tokens = tokens[:510]
|
||||
print('Truncated to 510 tokens')
|
||||
ref_s = voicepack[len(tokens)]
|
||||
out = forward(model, tokens, ref_s, speed)
|
||||
ps = ''.join(next(k for k, v in VOCAB.items() if i == v) for i in tokens)
|
||||
return out, ps
|
||||
|
||||
def generate_full(model, text, voicepack, lang='a', speed=1, ps=None):
|
||||
ps = ps or phonemize(text, lang)
|
||||
tokens = tokenize(ps)
|
||||
if not tokens:
|
||||
return None
|
||||
outs = []
|
||||
loop_count = len(tokens)//510 + (1 if len(tokens) % 510 != 0 else 0)
|
||||
for i in range(loop_count):
|
||||
ref_s = voicepack[len(tokens[i*510:(i+1)*510])]
|
||||
out = forward(model, tokens[i*510:(i+1)*510], ref_s, speed)
|
||||
outs.append(out)
|
||||
outs = np.concatenate(outs)
|
||||
ps = ''.join(next(k for k, v in VOCAB.items() if i == v) for i in tokens)
|
||||
return outs, ps
|
||||
@@ -1,373 +0,0 @@
|
||||
# https://github.com/yl4579/StyleTTS2/blob/main/models.py
|
||||
# https://huggingface.co/hexgrad/Kokoro-82M/blob/main/models.py
|
||||
from istftnet import AdaIN1d, Decoder
|
||||
from munch import Munch
|
||||
from pathlib import Path
|
||||
from plbert import load_plbert
|
||||
from torch.nn.utils import weight_norm, spectral_norm
|
||||
import json
|
||||
import numpy as np
|
||||
import os
|
||||
import os.path as osp
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
class LinearNorm(torch.nn.Module):
|
||||
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
|
||||
super(LinearNorm, self).__init__()
|
||||
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
|
||||
|
||||
torch.nn.init.xavier_uniform_(
|
||||
self.linear_layer.weight,
|
||||
gain=torch.nn.init.calculate_gain(w_init_gain))
|
||||
|
||||
def forward(self, x):
|
||||
return self.linear_layer(x)
|
||||
|
||||
class LayerNorm(nn.Module):
|
||||
def __init__(self, channels, eps=1e-5):
|
||||
super().__init__()
|
||||
self.channels = channels
|
||||
self.eps = eps
|
||||
|
||||
self.gamma = nn.Parameter(torch.ones(channels))
|
||||
self.beta = nn.Parameter(torch.zeros(channels))
|
||||
|
||||
def forward(self, x):
|
||||
x = x.transpose(1, -1)
|
||||
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
|
||||
return x.transpose(1, -1)
|
||||
|
||||
class TextEncoder(nn.Module):
|
||||
def __init__(self, channels, kernel_size, depth, n_symbols, actv=nn.LeakyReLU(0.2)):
|
||||
super().__init__()
|
||||
self.embedding = nn.Embedding(n_symbols, channels)
|
||||
|
||||
padding = (kernel_size - 1) // 2
|
||||
self.cnn = nn.ModuleList()
|
||||
for _ in range(depth):
|
||||
self.cnn.append(nn.Sequential(
|
||||
weight_norm(nn.Conv1d(channels, channels, kernel_size=kernel_size, padding=padding)),
|
||||
LayerNorm(channels),
|
||||
actv,
|
||||
nn.Dropout(0.2),
|
||||
))
|
||||
# self.cnn = nn.Sequential(*self.cnn)
|
||||
|
||||
self.lstm = nn.LSTM(channels, channels//2, 1, batch_first=True, bidirectional=True)
|
||||
|
||||
def forward(self, x, input_lengths, m):
|
||||
x = self.embedding(x) # [B, T, emb]
|
||||
x = x.transpose(1, 2) # [B, emb, T]
|
||||
m = m.to(input_lengths.device).unsqueeze(1)
|
||||
x.masked_fill_(m, 0.0)
|
||||
|
||||
for c in self.cnn:
|
||||
x = c(x)
|
||||
x.masked_fill_(m, 0.0)
|
||||
|
||||
x = x.transpose(1, 2) # [B, T, chn]
|
||||
|
||||
input_lengths = input_lengths.cpu().numpy()
|
||||
x = nn.utils.rnn.pack_padded_sequence(
|
||||
x, input_lengths, batch_first=True, enforce_sorted=False)
|
||||
|
||||
self.lstm.flatten_parameters()
|
||||
x, _ = self.lstm(x)
|
||||
x, _ = nn.utils.rnn.pad_packed_sequence(
|
||||
x, batch_first=True)
|
||||
|
||||
x = x.transpose(-1, -2)
|
||||
x_pad = torch.zeros([x.shape[0], x.shape[1], m.shape[-1]])
|
||||
|
||||
x_pad[:, :, :x.shape[-1]] = x
|
||||
x = x_pad.to(x.device)
|
||||
|
||||
x.masked_fill_(m, 0.0)
|
||||
|
||||
return x
|
||||
|
||||
def inference(self, x):
|
||||
x = self.embedding(x)
|
||||
x = x.transpose(1, 2)
|
||||
x = self.cnn(x)
|
||||
x = x.transpose(1, 2)
|
||||
self.lstm.flatten_parameters()
|
||||
x, _ = self.lstm(x)
|
||||
return x
|
||||
|
||||
def length_to_mask(self, lengths):
|
||||
mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)
|
||||
mask = torch.gt(mask+1, lengths.unsqueeze(1))
|
||||
return mask
|
||||
|
||||
|
||||
class UpSample1d(nn.Module):
|
||||
def __init__(self, layer_type):
|
||||
super().__init__()
|
||||
self.layer_type = layer_type
|
||||
|
||||
def forward(self, x):
|
||||
if self.layer_type == 'none':
|
||||
return x
|
||||
else:
|
||||
return F.interpolate(x, scale_factor=2, mode='nearest')
|
||||
|
||||
class AdainResBlk1d(nn.Module):
|
||||
def __init__(self, dim_in, dim_out, style_dim=64, actv=nn.LeakyReLU(0.2),
|
||||
upsample='none', dropout_p=0.0):
|
||||
super().__init__()
|
||||
self.actv = actv
|
||||
self.upsample_type = upsample
|
||||
self.upsample = UpSample1d(upsample)
|
||||
self.learned_sc = dim_in != dim_out
|
||||
self._build_weights(dim_in, dim_out, style_dim)
|
||||
self.dropout = nn.Dropout(dropout_p)
|
||||
|
||||
if upsample == 'none':
|
||||
self.pool = nn.Identity()
|
||||
else:
|
||||
self.pool = weight_norm(nn.ConvTranspose1d(dim_in, dim_in, kernel_size=3, stride=2, groups=dim_in, padding=1, output_padding=1))
|
||||
|
||||
|
||||
def _build_weights(self, dim_in, dim_out, style_dim):
|
||||
self.conv1 = weight_norm(nn.Conv1d(dim_in, dim_out, 3, 1, 1))
|
||||
self.conv2 = weight_norm(nn.Conv1d(dim_out, dim_out, 3, 1, 1))
|
||||
self.norm1 = AdaIN1d(style_dim, dim_in)
|
||||
self.norm2 = AdaIN1d(style_dim, dim_out)
|
||||
if self.learned_sc:
|
||||
self.conv1x1 = weight_norm(nn.Conv1d(dim_in, dim_out, 1, 1, 0, bias=False))
|
||||
|
||||
def _shortcut(self, x):
|
||||
x = self.upsample(x)
|
||||
if self.learned_sc:
|
||||
x = self.conv1x1(x)
|
||||
return x
|
||||
|
||||
def _residual(self, x, s):
|
||||
x = self.norm1(x, s)
|
||||
x = self.actv(x)
|
||||
x = self.pool(x)
|
||||
x = self.conv1(self.dropout(x))
|
||||
x = self.norm2(x, s)
|
||||
x = self.actv(x)
|
||||
x = self.conv2(self.dropout(x))
|
||||
return x
|
||||
|
||||
def forward(self, x, s):
|
||||
out = self._residual(x, s)
|
||||
out = (out + self._shortcut(x)) / np.sqrt(2)
|
||||
return out
|
||||
|
||||
class AdaLayerNorm(nn.Module):
|
||||
def __init__(self, style_dim, channels, eps=1e-5):
|
||||
super().__init__()
|
||||
self.channels = channels
|
||||
self.eps = eps
|
||||
|
||||
self.fc = nn.Linear(style_dim, channels*2)
|
||||
|
||||
def forward(self, x, s):
|
||||
x = x.transpose(-1, -2)
|
||||
x = x.transpose(1, -1)
|
||||
|
||||
h = self.fc(s)
|
||||
h = h.view(h.size(0), h.size(1), 1)
|
||||
gamma, beta = torch.chunk(h, chunks=2, dim=1)
|
||||
gamma, beta = gamma.transpose(1, -1), beta.transpose(1, -1)
|
||||
|
||||
|
||||
x = F.layer_norm(x, (self.channels,), eps=self.eps)
|
||||
x = (1 + gamma) * x + beta
|
||||
return x.transpose(1, -1).transpose(-1, -2)
|
||||
|
||||
class ProsodyPredictor(nn.Module):
|
||||
|
||||
def __init__(self, style_dim, d_hid, nlayers, max_dur=50, dropout=0.1):
|
||||
super().__init__()
|
||||
|
||||
self.text_encoder = DurationEncoder(sty_dim=style_dim,
|
||||
d_model=d_hid,
|
||||
nlayers=nlayers,
|
||||
dropout=dropout)
|
||||
|
||||
self.lstm = nn.LSTM(d_hid + style_dim, d_hid // 2, 1, batch_first=True, bidirectional=True)
|
||||
self.duration_proj = LinearNorm(d_hid, max_dur)
|
||||
|
||||
self.shared = nn.LSTM(d_hid + style_dim, d_hid // 2, 1, batch_first=True, bidirectional=True)
|
||||
self.F0 = nn.ModuleList()
|
||||
self.F0.append(AdainResBlk1d(d_hid, d_hid, style_dim, dropout_p=dropout))
|
||||
self.F0.append(AdainResBlk1d(d_hid, d_hid // 2, style_dim, upsample=True, dropout_p=dropout))
|
||||
self.F0.append(AdainResBlk1d(d_hid // 2, d_hid // 2, style_dim, dropout_p=dropout))
|
||||
|
||||
self.N = nn.ModuleList()
|
||||
self.N.append(AdainResBlk1d(d_hid, d_hid, style_dim, dropout_p=dropout))
|
||||
self.N.append(AdainResBlk1d(d_hid, d_hid // 2, style_dim, upsample=True, dropout_p=dropout))
|
||||
self.N.append(AdainResBlk1d(d_hid // 2, d_hid // 2, style_dim, dropout_p=dropout))
|
||||
|
||||
self.F0_proj = nn.Conv1d(d_hid // 2, 1, 1, 1, 0)
|
||||
self.N_proj = nn.Conv1d(d_hid // 2, 1, 1, 1, 0)
|
||||
|
||||
|
||||
def forward(self, texts, style, text_lengths, alignment, m):
|
||||
d = self.text_encoder(texts, style, text_lengths, m)
|
||||
|
||||
batch_size = d.shape[0]
|
||||
text_size = d.shape[1]
|
||||
|
||||
# predict duration
|
||||
input_lengths = text_lengths.cpu().numpy()
|
||||
x = nn.utils.rnn.pack_padded_sequence(
|
||||
d, input_lengths, batch_first=True, enforce_sorted=False)
|
||||
|
||||
m = m.to(text_lengths.device).unsqueeze(1)
|
||||
|
||||
self.lstm.flatten_parameters()
|
||||
x, _ = self.lstm(x)
|
||||
x, _ = nn.utils.rnn.pad_packed_sequence(
|
||||
x, batch_first=True)
|
||||
|
||||
x_pad = torch.zeros([x.shape[0], m.shape[-1], x.shape[-1]])
|
||||
|
||||
x_pad[:, :x.shape[1], :] = x
|
||||
x = x_pad.to(x.device)
|
||||
|
||||
duration = self.duration_proj(nn.functional.dropout(x, 0.5, training=self.training))
|
||||
|
||||
en = (d.transpose(-1, -2) @ alignment)
|
||||
|
||||
return duration.squeeze(-1), en
|
||||
|
||||
def F0Ntrain(self, x, s):
|
||||
x, _ = self.shared(x.transpose(-1, -2))
|
||||
|
||||
F0 = x.transpose(-1, -2)
|
||||
for block in self.F0:
|
||||
F0 = block(F0, s)
|
||||
F0 = self.F0_proj(F0)
|
||||
|
||||
N = x.transpose(-1, -2)
|
||||
for block in self.N:
|
||||
N = block(N, s)
|
||||
N = self.N_proj(N)
|
||||
|
||||
return F0.squeeze(1), N.squeeze(1)
|
||||
|
||||
def length_to_mask(self, lengths):
|
||||
mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)
|
||||
mask = torch.gt(mask+1, lengths.unsqueeze(1))
|
||||
return mask
|
||||
|
||||
class DurationEncoder(nn.Module):
|
||||
|
||||
def __init__(self, sty_dim, d_model, nlayers, dropout=0.1):
|
||||
super().__init__()
|
||||
self.lstms = nn.ModuleList()
|
||||
for _ in range(nlayers):
|
||||
self.lstms.append(nn.LSTM(d_model + sty_dim,
|
||||
d_model // 2,
|
||||
num_layers=1,
|
||||
batch_first=True,
|
||||
bidirectional=True,
|
||||
dropout=dropout))
|
||||
self.lstms.append(AdaLayerNorm(sty_dim, d_model))
|
||||
|
||||
|
||||
self.dropout = dropout
|
||||
self.d_model = d_model
|
||||
self.sty_dim = sty_dim
|
||||
|
||||
def forward(self, x, style, text_lengths, m):
|
||||
masks = m.to(text_lengths.device)
|
||||
|
||||
x = x.permute(2, 0, 1)
|
||||
s = style.expand(x.shape[0], x.shape[1], -1)
|
||||
x = torch.cat([x, s], axis=-1)
|
||||
x.masked_fill_(masks.unsqueeze(-1).transpose(0, 1), 0.0)
|
||||
|
||||
x = x.transpose(0, 1)
|
||||
input_lengths = text_lengths.cpu().numpy()
|
||||
x = x.transpose(-1, -2)
|
||||
|
||||
for block in self.lstms:
|
||||
if isinstance(block, AdaLayerNorm):
|
||||
x = block(x.transpose(-1, -2), style).transpose(-1, -2)
|
||||
x = torch.cat([x, s.permute(1, -1, 0)], axis=1)
|
||||
x.masked_fill_(masks.unsqueeze(-1).transpose(-1, -2), 0.0)
|
||||
else:
|
||||
x = x.transpose(-1, -2)
|
||||
x = nn.utils.rnn.pack_padded_sequence(
|
||||
x, input_lengths, batch_first=True, enforce_sorted=False)
|
||||
block.flatten_parameters()
|
||||
x, _ = block(x)
|
||||
x, _ = nn.utils.rnn.pad_packed_sequence(
|
||||
x, batch_first=True)
|
||||
x = F.dropout(x, p=self.dropout, training=self.training)
|
||||
x = x.transpose(-1, -2)
|
||||
|
||||
x_pad = torch.zeros([x.shape[0], x.shape[1], m.shape[-1]])
|
||||
|
||||
x_pad[:, :, :x.shape[-1]] = x
|
||||
x = x_pad.to(x.device)
|
||||
|
||||
return x.transpose(-1, -2)
|
||||
|
||||
def inference(self, x, style):
|
||||
x = self.embedding(x.transpose(-1, -2)) * np.sqrt(self.d_model)
|
||||
style = style.expand(x.shape[0], x.shape[1], -1)
|
||||
x = torch.cat([x, style], axis=-1)
|
||||
src = self.pos_encoder(x)
|
||||
output = self.transformer_encoder(src).transpose(0, 1)
|
||||
return output
|
||||
|
||||
def length_to_mask(self, lengths):
|
||||
mask = torch.arange(lengths.max()).unsqueeze(0).expand(lengths.shape[0], -1).type_as(lengths)
|
||||
mask = torch.gt(mask+1, lengths.unsqueeze(1))
|
||||
return mask
|
||||
|
||||
# https://github.com/yl4579/StyleTTS2/blob/main/utils.py
|
||||
def recursive_munch(d):
|
||||
if isinstance(d, dict):
|
||||
return Munch((k, recursive_munch(v)) for k, v in d.items())
|
||||
elif isinstance(d, list):
|
||||
return [recursive_munch(v) for v in d]
|
||||
else:
|
||||
return d
|
||||
|
||||
def build_model(path, device):
|
||||
config = Path(__file__).parent / 'config.json'
|
||||
assert config.exists(), f'Config path incorrect: config.json not found at {config}'
|
||||
with open(config, 'r') as r:
|
||||
args = recursive_munch(json.load(r))
|
||||
assert args.decoder.type == 'istftnet', f'Unknown decoder type: {args.decoder.type}'
|
||||
decoder = Decoder(dim_in=args.hidden_dim, style_dim=args.style_dim, dim_out=args.n_mels,
|
||||
resblock_kernel_sizes = args.decoder.resblock_kernel_sizes,
|
||||
upsample_rates = args.decoder.upsample_rates,
|
||||
upsample_initial_channel=args.decoder.upsample_initial_channel,
|
||||
resblock_dilation_sizes=args.decoder.resblock_dilation_sizes,
|
||||
upsample_kernel_sizes=args.decoder.upsample_kernel_sizes,
|
||||
gen_istft_n_fft=args.decoder.gen_istft_n_fft, gen_istft_hop_size=args.decoder.gen_istft_hop_size)
|
||||
text_encoder = TextEncoder(channels=args.hidden_dim, kernel_size=5, depth=args.n_layer, n_symbols=args.n_token)
|
||||
predictor = ProsodyPredictor(style_dim=args.style_dim, d_hid=args.hidden_dim, nlayers=args.n_layer, max_dur=args.max_dur, dropout=args.dropout)
|
||||
bert = load_plbert()
|
||||
bert_encoder = nn.Linear(bert.config.hidden_size, args.hidden_dim)
|
||||
for parent in [bert, bert_encoder, predictor, decoder, text_encoder]:
|
||||
for child in parent.children():
|
||||
if isinstance(child, nn.RNNBase):
|
||||
child.flatten_parameters()
|
||||
model = Munch(
|
||||
bert=bert.to(device).eval(),
|
||||
bert_encoder=bert_encoder.to(device).eval(),
|
||||
predictor=predictor.to(device).eval(),
|
||||
decoder=decoder.to(device).eval(),
|
||||
text_encoder=text_encoder.to(device).eval(),
|
||||
)
|
||||
for key, state_dict in torch.load(path, map_location='cpu', weights_only=True)['net'].items():
|
||||
assert key in model, key
|
||||
try:
|
||||
model[key].load_state_dict(state_dict)
|
||||
except:
|
||||
state_dict = {k[7:]: v for k, v in state_dict.items()}
|
||||
model[key].load_state_dict(state_dict, strict=False)
|
||||
return model
|
||||
@@ -1,16 +0,0 @@
|
||||
# https://huggingface.co/hexgrad/Kokoro-82M/blob/main/plbert.py
|
||||
# https://github.com/yl4579/StyleTTS2/blob/main/Utils/PLBERT/util.py
|
||||
from transformers import AlbertConfig, AlbertModel
|
||||
|
||||
class CustomAlbert(AlbertModel):
|
||||
def forward(self, *args, **kwargs):
|
||||
# Call the original forward method
|
||||
outputs = super().forward(*args, **kwargs)
|
||||
# Only return the last_hidden_state
|
||||
return outputs.last_hidden_state
|
||||
|
||||
def load_plbert():
|
||||
plbert_config = {'vocab_size': 178, 'hidden_size': 768, 'num_attention_heads': 12, 'intermediate_size': 2048, 'max_position_embeddings': 512, 'num_hidden_layers': 12, 'dropout': 0.1}
|
||||
albert_base_configuration = AlbertConfig(**plbert_config)
|
||||
bert = CustomAlbert(albert_base_configuration)
|
||||
return bert
|
||||
@@ -1,2 +1,6 @@
|
||||
torch==2.4.1
|
||||
transformers
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
transformers
|
||||
accelerate
|
||||
torch
|
||||
kokoro
|
||||
soundfile
|
||||
@@ -1,3 +1,7 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch==2.4.1+cu118
|
||||
transformers
|
||||
torch==2.7.1+cu118
|
||||
torchaudio==2.7.1+cu118
|
||||
transformers
|
||||
accelerate
|
||||
kokoro
|
||||
soundfile
|
||||
@@ -1,2 +1,6 @@
|
||||
torch==2.4.1
|
||||
transformers
|
||||
torch==2.7.1
|
||||
torchaudio==2.7.1
|
||||
transformers
|
||||
accelerate
|
||||
kokoro
|
||||
soundfile
|
||||
@@ -1,3 +1,7 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.0
|
||||
torch==2.4.1+rocm6.0
|
||||
transformers
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.3
|
||||
torch==2.7.1+rocm6.3
|
||||
torchaudio==2.7.1+rocm6.3
|
||||
transformers
|
||||
accelerate
|
||||
kokoro
|
||||
soundfile
|
||||
@@ -1,5 +1,11 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch==2.3.110+xpu
|
||||
torch==2.3.1+cxx11.abi
|
||||
oneccl_bind_pt==2.3.100+xpu
|
||||
transformers
|
||||
intel-extension-for-pytorch==2.8.10+xpu
|
||||
torch==2.5.1+cxx11.abi
|
||||
oneccl_bind_pt==2.8.0+xpu
|
||||
torchaudio==2.5.1+cxx11.abi
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
transformers==4.48.3
|
||||
accelerate
|
||||
kokoro
|
||||
soundfile
|
||||
@@ -1,7 +1,6 @@
|
||||
grpcio==1.71.0
|
||||
protobuf
|
||||
phonemizer
|
||||
scipy
|
||||
munch
|
||||
setuptools
|
||||
soundfile
|
||||
certifi
|
||||
packaging==24.1
|
||||
pip
|
||||
chardet
|
||||
87
backend/python/kokoro/test.py
Normal file
87
backend/python/kokoro/test.py
Normal file
@@ -0,0 +1,87 @@
|
||||
"""
|
||||
A test script to test the gRPC service
|
||||
"""
|
||||
import unittest
|
||||
import subprocess
|
||||
import time
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
|
||||
import grpc
|
||||
|
||||
|
||||
class TestBackendServicer(unittest.TestCase):
|
||||
"""
|
||||
TestBackendServicer is the class that tests the gRPC service
|
||||
"""
|
||||
def setUp(self):
|
||||
"""
|
||||
This method sets up the gRPC service by starting the server
|
||||
"""
|
||||
self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"])
|
||||
time.sleep(30)
|
||||
|
||||
def tearDown(self) -> None:
|
||||
"""
|
||||
This method tears down the gRPC service by terminating the server
|
||||
"""
|
||||
self.service.terminate()
|
||||
self.service.wait()
|
||||
|
||||
def test_server_startup(self):
|
||||
"""
|
||||
This method tests if the server starts up successfully
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.Health(backend_pb2.HealthMessage())
|
||||
self.assertEqual(response.message, b'OK')
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("Server failed to start")
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
def test_load_model(self):
|
||||
"""
|
||||
This method tests if the Kokoro pipeline is loaded successfully
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(language="a"))
|
||||
print(response)
|
||||
self.assertTrue(response.success)
|
||||
self.assertEqual(response.message, "Kokoro TTS pipeline loaded successfully")
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("LoadModel service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
def test_tts(self):
|
||||
"""
|
||||
This method tests if the TTS generation works successfully
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(language="a"))
|
||||
self.assertTrue(response.success)
|
||||
tts_request = backend_pb2.TTSRequest(
|
||||
text="Kokoro is an open-weight TTS model with 82 million parameters.",
|
||||
voice="af_heart",
|
||||
dst="test_output.wav"
|
||||
)
|
||||
tts_response = stub.TTS(tts_request)
|
||||
self.assertIsNotNone(tts_response)
|
||||
self.assertTrue(tts_response.success)
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("TTS service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
23
backend/python/mlx-vlm/Makefile
Normal file
23
backend/python/mlx-vlm/Makefile
Normal file
@@ -0,0 +1,23 @@
|
||||
.PHONY: mlx-vlm
|
||||
mlx-vlm:
|
||||
bash install.sh
|
||||
|
||||
.PHONY: run
|
||||
run: mlx-vlm
|
||||
@echo "Running mlx-vlm..."
|
||||
bash run.sh
|
||||
@echo "mlx run."
|
||||
|
||||
.PHONY: test
|
||||
test: mlx-vlm
|
||||
@echo "Testing mlx-vlm..."
|
||||
bash test.sh
|
||||
@echo "mlx tested."
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
rm -rf venv __pycache__
|
||||
477
backend/python/mlx-vlm/backend.py
Normal file
477
backend/python/mlx-vlm/backend.py
Normal file
@@ -0,0 +1,477 @@
|
||||
#!/usr/bin/env python3
|
||||
import asyncio
|
||||
from concurrent import futures
|
||||
import argparse
|
||||
import signal
|
||||
import sys
|
||||
import os
|
||||
from typing import List
|
||||
import time
|
||||
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
|
||||
import grpc
|
||||
from mlx_vlm import load, generate, stream_generate
|
||||
from mlx_vlm.prompt_utils import apply_chat_template
|
||||
from mlx_vlm.utils import load_config, load_image
|
||||
import mlx.core as mx
|
||||
import base64
|
||||
import io
|
||||
from PIL import Image
|
||||
import tempfile
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
|
||||
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
|
||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
||||
|
||||
# Implement the BackendServicer class with the service methods
|
||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
"""
|
||||
A gRPC servicer that implements the Backend service defined in backend.proto.
|
||||
"""
|
||||
|
||||
def _is_float(self, s):
|
||||
"""Check if a string can be converted to float."""
|
||||
try:
|
||||
float(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
def _is_int(self, s):
|
||||
"""Check if a string can be converted to int."""
|
||||
try:
|
||||
int(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
def Health(self, request, context):
|
||||
"""
|
||||
Returns a health check message.
|
||||
|
||||
Args:
|
||||
request: The health check request.
|
||||
context: The gRPC context.
|
||||
|
||||
Returns:
|
||||
backend_pb2.Reply: The health check reply.
|
||||
"""
|
||||
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
||||
|
||||
async def LoadModel(self, request, context):
|
||||
"""
|
||||
Loads a multimodal vision-language model using MLX-VLM.
|
||||
|
||||
Args:
|
||||
request: The load model request.
|
||||
context: The gRPC context.
|
||||
|
||||
Returns:
|
||||
backend_pb2.Result: The load model result.
|
||||
"""
|
||||
try:
|
||||
print(f"Loading MLX-VLM model: {request.Model}", file=sys.stderr)
|
||||
print(f"Request: {request}", file=sys.stderr)
|
||||
|
||||
# Parse options like in the diffusers backend
|
||||
options = request.Options
|
||||
self.options = {}
|
||||
|
||||
# The options are a list of strings in this form optname:optvalue
|
||||
# We store all the options in a dict for later use
|
||||
for opt in options:
|
||||
if ":" not in opt:
|
||||
continue
|
||||
key, value = opt.split(":", 1) # Split only on first colon to handle values with colons
|
||||
|
||||
# Convert numeric values to appropriate types
|
||||
if self._is_float(value):
|
||||
value = float(value)
|
||||
elif self._is_int(value):
|
||||
value = int(value)
|
||||
elif value.lower() in ["true", "false"]:
|
||||
value = value.lower() == "true"
|
||||
|
||||
self.options[key] = value
|
||||
|
||||
print(f"Options: {self.options}", file=sys.stderr)
|
||||
|
||||
# Load model and processor using MLX-VLM
|
||||
# mlx-vlm load function returns (model, processor) instead of (model, tokenizer)
|
||||
self.model, self.processor = load(request.Model)
|
||||
|
||||
# Load model config for chat template support
|
||||
self.config = load_config(request.Model)
|
||||
|
||||
except Exception as err:
|
||||
print(f"Error loading MLX-VLM model {err=}, {type(err)=}", file=sys.stderr)
|
||||
return backend_pb2.Result(success=False, message=f"Error loading MLX-VLM model: {err}")
|
||||
|
||||
print("MLX-VLM model loaded successfully", file=sys.stderr)
|
||||
return backend_pb2.Result(message="MLX-VLM model loaded successfully", success=True)
|
||||
|
||||
async def Predict(self, request, context):
|
||||
"""
|
||||
Generates text based on the given prompt and sampling parameters using MLX-VLM with multimodal support.
|
||||
|
||||
Args:
|
||||
request: The predict request.
|
||||
context: The gRPC context.
|
||||
|
||||
Returns:
|
||||
backend_pb2.Reply: The predict result.
|
||||
"""
|
||||
temp_files = []
|
||||
try:
|
||||
# Process images and audios from request
|
||||
image_paths = []
|
||||
audio_paths = []
|
||||
|
||||
# Process images
|
||||
if request.Images:
|
||||
for img_data in request.Images:
|
||||
img_path = self.load_image_from_base64(img_data)
|
||||
if img_path:
|
||||
image_paths.append(img_path)
|
||||
temp_files.append(img_path)
|
||||
|
||||
# Process audios
|
||||
if request.Audios:
|
||||
for audio_data in request.Audios:
|
||||
audio_path = self.load_audio_from_base64(audio_data)
|
||||
if audio_path:
|
||||
audio_paths.append(audio_path)
|
||||
temp_files.append(audio_path)
|
||||
|
||||
# Prepare the prompt with multimodal information
|
||||
prompt = self._prepare_prompt(request, num_images=len(image_paths), num_audios=len(audio_paths))
|
||||
|
||||
# Build generation parameters using request attributes and options
|
||||
max_tokens, generation_params = self._build_generation_params(request)
|
||||
|
||||
print(f"Generating text with MLX-VLM - max_tokens: {max_tokens}, params: {generation_params}", file=sys.stderr)
|
||||
print(f"Images: {len(image_paths)}, Audios: {len(audio_paths)}", file=sys.stderr)
|
||||
|
||||
# Generate text using MLX-VLM with multimodal inputs
|
||||
response = generate(
|
||||
model=self.model,
|
||||
processor=self.processor,
|
||||
prompt=prompt,
|
||||
image=image_paths if image_paths else None,
|
||||
audio=audio_paths if audio_paths else None,
|
||||
max_tokens=max_tokens,
|
||||
temperature=generation_params.get('temp', 0.6),
|
||||
top_p=generation_params.get('top_p', 1.0),
|
||||
verbose=False
|
||||
)
|
||||
|
||||
return backend_pb2.Reply(message=bytes(response, encoding='utf-8'))
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error in MLX-VLM Predict: {e}", file=sys.stderr)
|
||||
context.set_code(grpc.StatusCode.INTERNAL)
|
||||
context.set_details(f"Generation failed: {str(e)}")
|
||||
return backend_pb2.Reply(message=bytes("", encoding='utf-8'))
|
||||
finally:
|
||||
# Clean up temporary files
|
||||
self.cleanup_temp_files(temp_files)
|
||||
|
||||
def Embedding(self, request, context):
|
||||
"""
|
||||
A gRPC method that calculates embeddings for a given sentence.
|
||||
|
||||
Note: MLX-VLM doesn't support embeddings directly. This method returns an error.
|
||||
|
||||
Args:
|
||||
request: An EmbeddingRequest object that contains the request parameters.
|
||||
context: A grpc.ServicerContext object that provides information about the RPC.
|
||||
|
||||
Returns:
|
||||
An EmbeddingResult object that contains the calculated embeddings.
|
||||
"""
|
||||
print("Embeddings not supported in MLX-VLM backend", file=sys.stderr)
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details("Embeddings are not supported in the MLX-VLM backend.")
|
||||
return backend_pb2.EmbeddingResult()
|
||||
|
||||
async def PredictStream(self, request, context):
|
||||
"""
|
||||
Generates text based on the given prompt and sampling parameters, and streams the results using MLX-VLM with multimodal support.
|
||||
|
||||
Args:
|
||||
request: The predict stream request.
|
||||
context: The gRPC context.
|
||||
|
||||
Yields:
|
||||
backend_pb2.Reply: Streaming predict results.
|
||||
"""
|
||||
temp_files = []
|
||||
try:
|
||||
# Process images and audios from request
|
||||
image_paths = []
|
||||
audio_paths = []
|
||||
|
||||
# Process images
|
||||
if request.Images:
|
||||
for img_data in request.Images:
|
||||
img_path = self.load_image_from_base64(img_data)
|
||||
if img_path:
|
||||
image_paths.append(img_path)
|
||||
temp_files.append(img_path)
|
||||
|
||||
# Process audios
|
||||
if request.Audios:
|
||||
for audio_data in request.Audios:
|
||||
audio_path = self.load_audio_from_base64(audio_data)
|
||||
if audio_path:
|
||||
audio_paths.append(audio_path)
|
||||
temp_files.append(audio_path)
|
||||
|
||||
# Prepare the prompt with multimodal information
|
||||
prompt = self._prepare_prompt(request, num_images=len(image_paths), num_audios=len(audio_paths))
|
||||
|
||||
# Build generation parameters using request attributes and options
|
||||
max_tokens, generation_params = self._build_generation_params(request, default_max_tokens=512)
|
||||
|
||||
print(f"Streaming text with MLX-VLM - max_tokens: {max_tokens}, params: {generation_params}", file=sys.stderr)
|
||||
print(f"Images: {len(image_paths)}, Audios: {len(audio_paths)}", file=sys.stderr)
|
||||
|
||||
# Stream text generation using MLX-VLM with multimodal inputs
|
||||
for response in stream_generate(
|
||||
model=self.model,
|
||||
processor=self.processor,
|
||||
prompt=prompt,
|
||||
image=image_paths if image_paths else None,
|
||||
audio=audio_paths if audio_paths else None,
|
||||
max_tokens=max_tokens,
|
||||
temperature=generation_params.get('temp', 0.6),
|
||||
top_p=generation_params.get('top_p', 1.0),
|
||||
):
|
||||
yield backend_pb2.Reply(message=bytes(response.text, encoding='utf-8'))
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error in MLX-VLM PredictStream: {e}", file=sys.stderr)
|
||||
context.set_code(grpc.StatusCode.INTERNAL)
|
||||
context.set_details(f"Streaming generation failed: {str(e)}")
|
||||
yield backend_pb2.Reply(message=bytes("", encoding='utf-8'))
|
||||
finally:
|
||||
# Clean up temporary files
|
||||
self.cleanup_temp_files(temp_files)
|
||||
|
||||
def _prepare_prompt(self, request, num_images=0, num_audios=0):
|
||||
"""
|
||||
Prepare the prompt for MLX-VLM generation, handling chat templates and multimodal inputs.
|
||||
|
||||
Args:
|
||||
request: The gRPC request containing prompt and message information.
|
||||
num_images: Number of images in the request.
|
||||
num_audios: Number of audio files in the request.
|
||||
|
||||
Returns:
|
||||
str: The prepared prompt.
|
||||
"""
|
||||
# If tokenizer template is enabled and messages are provided instead of prompt, apply the tokenizer template
|
||||
if not request.Prompt and request.UseTokenizerTemplate and request.Messages:
|
||||
# Convert gRPC messages to the format expected by apply_chat_template
|
||||
messages = []
|
||||
for msg in request.Messages:
|
||||
messages.append({"role": msg.role, "content": msg.content})
|
||||
|
||||
# Use mlx-vlm's apply_chat_template which handles multimodal inputs
|
||||
prompt = apply_chat_template(
|
||||
self.processor,
|
||||
self.config,
|
||||
messages,
|
||||
num_images=num_images,
|
||||
num_audios=num_audios
|
||||
)
|
||||
return prompt
|
||||
elif request.Prompt:
|
||||
# If we have a direct prompt but also have images/audio, we need to format it properly
|
||||
if num_images > 0 or num_audios > 0:
|
||||
# Create a simple message structure for multimodal prompt
|
||||
messages = [{"role": "user", "content": request.Prompt}]
|
||||
prompt = apply_chat_template(
|
||||
self.processor,
|
||||
self.config,
|
||||
messages,
|
||||
num_images=num_images,
|
||||
num_audios=num_audios
|
||||
)
|
||||
return prompt
|
||||
else:
|
||||
return request.Prompt
|
||||
else:
|
||||
# Fallback to empty prompt with multimodal template if we have media
|
||||
if num_images > 0 or num_audios > 0:
|
||||
messages = [{"role": "user", "content": ""}]
|
||||
prompt = apply_chat_template(
|
||||
self.processor,
|
||||
self.config,
|
||||
messages,
|
||||
num_images=num_images,
|
||||
num_audios=num_audios
|
||||
)
|
||||
return prompt
|
||||
else:
|
||||
return ""
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def _build_generation_params(self, request, default_max_tokens=200):
|
||||
"""
|
||||
Build generation parameters from request attributes and options for MLX-VLM.
|
||||
|
||||
Args:
|
||||
request: The gRPC request.
|
||||
default_max_tokens: Default max_tokens if not specified.
|
||||
|
||||
Returns:
|
||||
tuple: (max_tokens, generation_params dict)
|
||||
"""
|
||||
# Extract max_tokens
|
||||
max_tokens = getattr(request, 'Tokens', default_max_tokens)
|
||||
if max_tokens == 0:
|
||||
max_tokens = default_max_tokens
|
||||
|
||||
# Extract generation parameters from request attributes
|
||||
temp = getattr(request, 'Temperature', 0.0)
|
||||
if temp == 0.0:
|
||||
temp = 0.6 # Default temperature
|
||||
|
||||
top_p = getattr(request, 'TopP', 0.0)
|
||||
if top_p == 0.0:
|
||||
top_p = 1.0 # Default top_p
|
||||
|
||||
# Initialize generation parameters for MLX-VLM
|
||||
generation_params = {
|
||||
'temp': temp,
|
||||
'top_p': top_p,
|
||||
}
|
||||
|
||||
# Add seed if specified
|
||||
seed = getattr(request, 'Seed', 0)
|
||||
if seed != 0:
|
||||
mx.random.seed(seed)
|
||||
|
||||
# Override with options if available
|
||||
if hasattr(self, 'options'):
|
||||
# Max tokens from options
|
||||
if 'max_tokens' in self.options:
|
||||
max_tokens = self.options['max_tokens']
|
||||
|
||||
# Generation parameters from options
|
||||
param_option_mapping = {
|
||||
'temp': 'temp',
|
||||
'temperature': 'temp', # alias
|
||||
'top_p': 'top_p',
|
||||
}
|
||||
|
||||
for option_key, param_key in param_option_mapping.items():
|
||||
if option_key in self.options:
|
||||
generation_params[param_key] = self.options[option_key]
|
||||
|
||||
# Handle seed from options
|
||||
if 'seed' in self.options:
|
||||
mx.random.seed(self.options['seed'])
|
||||
|
||||
return max_tokens, generation_params
|
||||
|
||||
def load_image_from_base64(self, image_data: str):
|
||||
"""
|
||||
Load an image from base64 encoded data.
|
||||
|
||||
Args:
|
||||
image_data (str): Base64 encoded image data.
|
||||
|
||||
Returns:
|
||||
PIL.Image or str: The loaded image or path to the image.
|
||||
"""
|
||||
try:
|
||||
decoded_data = base64.b64decode(image_data)
|
||||
image = Image.open(io.BytesIO(decoded_data))
|
||||
|
||||
# Save to temporary file for mlx-vlm
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as tmp_file:
|
||||
image.save(tmp_file.name, format='JPEG')
|
||||
return tmp_file.name
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error loading image from base64: {e}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
def load_audio_from_base64(self, audio_data: str):
|
||||
"""
|
||||
Load audio from base64 encoded data.
|
||||
|
||||
Args:
|
||||
audio_data (str): Base64 encoded audio data.
|
||||
|
||||
Returns:
|
||||
str: Path to the loaded audio file.
|
||||
"""
|
||||
try:
|
||||
decoded_data = base64.b64decode(audio_data)
|
||||
|
||||
# Save to temporary file for mlx-vlm
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix='.wav') as tmp_file:
|
||||
tmp_file.write(decoded_data)
|
||||
return tmp_file.name
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error loading audio from base64: {e}", file=sys.stderr)
|
||||
return None
|
||||
|
||||
def cleanup_temp_files(self, file_paths: List[str]):
|
||||
"""
|
||||
Clean up temporary files.
|
||||
|
||||
Args:
|
||||
file_paths (List[str]): List of file paths to clean up.
|
||||
"""
|
||||
for file_path in file_paths:
|
||||
try:
|
||||
if file_path and os.path.exists(file_path):
|
||||
os.remove(file_path)
|
||||
except Exception as e:
|
||||
print(f"Error removing temporary file {file_path}: {e}", file=sys.stderr)
|
||||
|
||||
async def serve(address):
|
||||
# Start asyncio gRPC server
|
||||
server = grpc.aio.server(migration_thread_pool=futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
||||
options=[
|
||||
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
|
||||
])
|
||||
# Add the servicer to the server
|
||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||
# Bind the server to the address
|
||||
server.add_insecure_port(address)
|
||||
|
||||
# Gracefully shutdown the server on SIGTERM or SIGINT
|
||||
loop = asyncio.get_event_loop()
|
||||
for sig in (signal.SIGINT, signal.SIGTERM):
|
||||
loop.add_signal_handler(
|
||||
sig, lambda: asyncio.ensure_future(server.stop(5))
|
||||
)
|
||||
|
||||
# Start the server
|
||||
await server.start()
|
||||
print("Server started. Listening on: " + address, file=sys.stderr)
|
||||
# Wait for the server to be terminated
|
||||
await server.wait_for_termination()
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Run the gRPC server.")
|
||||
parser.add_argument(
|
||||
"--addr", default="localhost:50051", help="The address to bind the server to."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
asyncio.run(serve(args.addr))
|
||||
14
backend/python/mlx-vlm/install.sh
Executable file
14
backend/python/mlx-vlm/install.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
USE_PIP=true
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
installRequirements
|
||||
1
backend/python/mlx-vlm/requirements-mps.txt
Normal file
1
backend/python/mlx-vlm/requirements-mps.txt
Normal file
@@ -0,0 +1 @@
|
||||
git+https://github.com/Blaizzy/mlx-vlm
|
||||
4
backend/python/mlx-vlm/requirements.txt
Normal file
4
backend/python/mlx-vlm/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
grpcio==1.71.0
|
||||
protobuf
|
||||
certifi
|
||||
setuptools
|
||||
11
backend/python/mlx-vlm/run.sh
Executable file
11
backend/python/mlx-vlm/run.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
startBackend $@
|
||||
146
backend/python/mlx-vlm/test.py
Normal file
146
backend/python/mlx-vlm/test.py
Normal file
@@ -0,0 +1,146 @@
|
||||
import unittest
|
||||
import subprocess
|
||||
import time
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
|
||||
import grpc
|
||||
|
||||
import unittest
|
||||
import subprocess
|
||||
import time
|
||||
import grpc
|
||||
import backend_pb2_grpc
|
||||
import backend_pb2
|
||||
|
||||
class TestBackendServicer(unittest.TestCase):
|
||||
"""
|
||||
TestBackendServicer is the class that tests the gRPC service.
|
||||
|
||||
This class contains methods to test the startup and shutdown of the gRPC service.
|
||||
"""
|
||||
def setUp(self):
|
||||
self.service = subprocess.Popen(["python", "backend.py", "--addr", "localhost:50051"])
|
||||
time.sleep(10)
|
||||
|
||||
def tearDown(self) -> None:
|
||||
self.service.terminate()
|
||||
self.service.wait()
|
||||
|
||||
def test_server_startup(self):
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.Health(backend_pb2.HealthMessage())
|
||||
self.assertEqual(response.message, b'OK')
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("Server failed to start")
|
||||
finally:
|
||||
self.tearDown()
|
||||
def test_load_model(self):
|
||||
"""
|
||||
This method tests if the model is loaded successfully
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="facebook/opt-125m"))
|
||||
self.assertTrue(response.success)
|
||||
self.assertEqual(response.message, "Model loaded successfully")
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("LoadModel service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
def test_text(self):
|
||||
"""
|
||||
This method tests if the embeddings are generated successfully
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="facebook/opt-125m"))
|
||||
self.assertTrue(response.success)
|
||||
req = backend_pb2.PredictOptions(Prompt="The capital of France is")
|
||||
resp = stub.Predict(req)
|
||||
self.assertIsNotNone(resp.message)
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("text service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
def test_sampling_params(self):
|
||||
"""
|
||||
This method tests if all sampling parameters are correctly processed
|
||||
NOTE: this does NOT test for correctness, just that we received a compatible response
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="facebook/opt-125m"))
|
||||
self.assertTrue(response.success)
|
||||
|
||||
req = backend_pb2.PredictOptions(
|
||||
Prompt="The capital of France is",
|
||||
TopP=0.8,
|
||||
Tokens=50,
|
||||
Temperature=0.7,
|
||||
TopK=40,
|
||||
PresencePenalty=0.1,
|
||||
FrequencyPenalty=0.2,
|
||||
RepetitionPenalty=1.1,
|
||||
MinP=0.05,
|
||||
Seed=42,
|
||||
StopPrompts=["\n"],
|
||||
StopTokenIds=[50256],
|
||||
BadWords=["badword"],
|
||||
IncludeStopStrInOutput=True,
|
||||
IgnoreEOS=True,
|
||||
MinTokens=5,
|
||||
Logprobs=5,
|
||||
PromptLogprobs=5,
|
||||
SkipSpecialTokens=True,
|
||||
SpacesBetweenSpecialTokens=True,
|
||||
TruncatePromptTokens=10,
|
||||
GuidedDecoding=True,
|
||||
N=2,
|
||||
)
|
||||
resp = stub.Predict(req)
|
||||
self.assertIsNotNone(resp.message)
|
||||
self.assertIsNotNone(resp.logprobs)
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("sampling params service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
|
||||
def test_embedding(self):
|
||||
"""
|
||||
This method tests if the embeddings are generated successfully
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="intfloat/e5-mistral-7b-instruct"))
|
||||
self.assertTrue(response.success)
|
||||
embedding_request = backend_pb2.PredictOptions(Embeddings="This is a test sentence.")
|
||||
embedding_response = stub.Embedding(embedding_request)
|
||||
self.assertIsNotNone(embedding_response.embeddings)
|
||||
# assert that is a list of floats
|
||||
self.assertIsInstance(embedding_response.embeddings, list)
|
||||
# assert that the list is not empty
|
||||
self.assertTrue(len(embedding_response.embeddings) > 0)
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("Embedding service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
12
backend/python/mlx-vlm/test.sh
Executable file
12
backend/python/mlx-vlm/test.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
runUnittests
|
||||
23
backend/python/mlx/Makefile
Normal file
23
backend/python/mlx/Makefile
Normal file
@@ -0,0 +1,23 @@
|
||||
.PHONY: mlx
|
||||
mlx:
|
||||
bash install.sh
|
||||
|
||||
.PHONY: run
|
||||
run:
|
||||
@echo "Running mlx..."
|
||||
bash run.sh
|
||||
@echo "mlx run."
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
@echo "Testing mlx..."
|
||||
bash test.sh
|
||||
@echo "mlx tested."
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
rm -rf venv __pycache__
|
||||
376
backend/python/mlx/backend.py
Normal file
376
backend/python/mlx/backend.py
Normal file
@@ -0,0 +1,376 @@
|
||||
#!/usr/bin/env python3
|
||||
import asyncio
|
||||
from concurrent import futures
|
||||
import argparse
|
||||
import signal
|
||||
import sys
|
||||
import os
|
||||
from typing import List
|
||||
import time
|
||||
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
|
||||
import grpc
|
||||
from mlx_lm import load, generate, stream_generate
|
||||
from mlx_lm.sample_utils import make_sampler
|
||||
from mlx_lm.models.cache import make_prompt_cache
|
||||
import mlx.core as mx
|
||||
import base64
|
||||
import io
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
|
||||
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
|
||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
||||
|
||||
# Implement the BackendServicer class with the service methods
|
||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
"""
|
||||
A gRPC servicer that implements the Backend service defined in backend.proto.
|
||||
"""
|
||||
|
||||
def _is_float(self, s):
|
||||
"""Check if a string can be converted to float."""
|
||||
try:
|
||||
float(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
def _is_int(self, s):
|
||||
"""Check if a string can be converted to int."""
|
||||
try:
|
||||
int(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
def Health(self, request, context):
|
||||
"""
|
||||
Returns a health check message.
|
||||
|
||||
Args:
|
||||
request: The health check request.
|
||||
context: The gRPC context.
|
||||
|
||||
Returns:
|
||||
backend_pb2.Reply: The health check reply.
|
||||
"""
|
||||
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
||||
|
||||
async def LoadModel(self, request, context):
|
||||
"""
|
||||
Loads a language model using MLX.
|
||||
|
||||
Args:
|
||||
request: The load model request.
|
||||
context: The gRPC context.
|
||||
|
||||
Returns:
|
||||
backend_pb2.Result: The load model result.
|
||||
"""
|
||||
try:
|
||||
print(f"Loading MLX model: {request.Model}", file=sys.stderr)
|
||||
print(f"Request: {request}", file=sys.stderr)
|
||||
|
||||
# Parse options like in the diffusers backend
|
||||
options = request.Options
|
||||
self.options = {}
|
||||
|
||||
# The options are a list of strings in this form optname:optvalue
|
||||
# We store all the options in a dict for later use
|
||||
for opt in options:
|
||||
if ":" not in opt:
|
||||
continue
|
||||
key, value = opt.split(":", 1) # Split only on first colon to handle values with colons
|
||||
|
||||
# Convert numeric values to appropriate types
|
||||
if self._is_float(value):
|
||||
value = float(value)
|
||||
elif self._is_int(value):
|
||||
value = int(value)
|
||||
elif value.lower() in ["true", "false"]:
|
||||
value = value.lower() == "true"
|
||||
|
||||
self.options[key] = value
|
||||
|
||||
print(f"Options: {self.options}", file=sys.stderr)
|
||||
|
||||
# Build tokenizer config for MLX using options
|
||||
tokenizer_config = {}
|
||||
|
||||
# Handle trust_remote_code from request or options
|
||||
if request.TrustRemoteCode or self.options.get("trust_remote_code", False):
|
||||
tokenizer_config["trust_remote_code"] = True
|
||||
|
||||
# Handle EOS token from options
|
||||
if "eos_token" in self.options:
|
||||
tokenizer_config["eos_token"] = self.options["eos_token"]
|
||||
|
||||
# Handle other tokenizer config options
|
||||
for key in ["pad_token", "bos_token", "unk_token", "sep_token", "cls_token", "mask_token"]:
|
||||
if key in self.options:
|
||||
tokenizer_config[key] = self.options[key]
|
||||
|
||||
# Load model and tokenizer using MLX
|
||||
if tokenizer_config:
|
||||
print(f"Loading with tokenizer_config: {tokenizer_config}", file=sys.stderr)
|
||||
self.model, self.tokenizer = load(request.Model, tokenizer_config=tokenizer_config)
|
||||
else:
|
||||
self.model, self.tokenizer = load(request.Model)
|
||||
|
||||
# Initialize prompt cache for efficient generation
|
||||
max_kv_size = self.options.get("max_kv_size", None)
|
||||
self.prompt_cache = make_prompt_cache(self.model, max_kv_size)
|
||||
|
||||
except Exception as err:
|
||||
print(f"Error loading MLX model {err=}, {type(err)=}", file=sys.stderr)
|
||||
return backend_pb2.Result(success=False, message=f"Error loading MLX model: {err}")
|
||||
|
||||
print("MLX model loaded successfully", file=sys.stderr)
|
||||
return backend_pb2.Result(message="MLX model loaded successfully", success=True)
|
||||
|
||||
async def Predict(self, request, context):
|
||||
"""
|
||||
Generates text based on the given prompt and sampling parameters using MLX.
|
||||
|
||||
Args:
|
||||
request: The predict request.
|
||||
context: The gRPC context.
|
||||
|
||||
Returns:
|
||||
backend_pb2.Reply: The predict result.
|
||||
"""
|
||||
try:
|
||||
# Prepare the prompt
|
||||
prompt = self._prepare_prompt(request)
|
||||
|
||||
# Build generation parameters using request attributes and options
|
||||
max_tokens, sampler_params = self._build_generation_params(request)
|
||||
|
||||
print(f"Generating text with MLX - max_tokens: {max_tokens}, sampler_params: {sampler_params}", file=sys.stderr)
|
||||
|
||||
# Create sampler with parameters
|
||||
sampler = make_sampler(**sampler_params)
|
||||
|
||||
# Generate text using MLX with proper parameters
|
||||
response = generate(
|
||||
self.model,
|
||||
self.tokenizer,
|
||||
prompt=prompt,
|
||||
max_tokens=max_tokens,
|
||||
sampler=sampler,
|
||||
prompt_cache=self.prompt_cache,
|
||||
verbose=False
|
||||
)
|
||||
|
||||
return backend_pb2.Reply(message=bytes(response, encoding='utf-8'))
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error in MLX Predict: {e}", file=sys.stderr)
|
||||
context.set_code(grpc.StatusCode.INTERNAL)
|
||||
context.set_details(f"Generation failed: {str(e)}")
|
||||
return backend_pb2.Reply(message=bytes("", encoding='utf-8'))
|
||||
|
||||
def Embedding(self, request, context):
|
||||
"""
|
||||
A gRPC method that calculates embeddings for a given sentence.
|
||||
|
||||
Note: MLX-LM doesn't support embeddings directly. This method returns an error.
|
||||
|
||||
Args:
|
||||
request: An EmbeddingRequest object that contains the request parameters.
|
||||
context: A grpc.ServicerContext object that provides information about the RPC.
|
||||
|
||||
Returns:
|
||||
An EmbeddingResult object that contains the calculated embeddings.
|
||||
"""
|
||||
print("Embeddings not supported in MLX backend", file=sys.stderr)
|
||||
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
|
||||
context.set_details("Embeddings are not supported in the MLX backend.")
|
||||
return backend_pb2.EmbeddingResult()
|
||||
|
||||
async def PredictStream(self, request, context):
|
||||
"""
|
||||
Generates text based on the given prompt and sampling parameters, and streams the results using MLX.
|
||||
|
||||
Args:
|
||||
request: The predict stream request.
|
||||
context: The gRPC context.
|
||||
|
||||
Yields:
|
||||
backend_pb2.Reply: Streaming predict results.
|
||||
"""
|
||||
try:
|
||||
# Prepare the prompt
|
||||
prompt = self._prepare_prompt(request)
|
||||
|
||||
# Build generation parameters using request attributes and options
|
||||
max_tokens, sampler_params = self._build_generation_params(request, default_max_tokens=512)
|
||||
|
||||
print(f"Streaming text with MLX - max_tokens: {max_tokens}, sampler_params: {sampler_params}", file=sys.stderr)
|
||||
|
||||
# Create sampler with parameters
|
||||
sampler = make_sampler(**sampler_params)
|
||||
|
||||
# Stream text generation using MLX with proper parameters
|
||||
for response in stream_generate(
|
||||
self.model,
|
||||
self.tokenizer,
|
||||
prompt=prompt,
|
||||
max_tokens=max_tokens,
|
||||
sampler=sampler,
|
||||
prompt_cache=self.prompt_cache,
|
||||
):
|
||||
yield backend_pb2.Reply(message=bytes(response.text, encoding='utf-8'))
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error in MLX PredictStream: {e}", file=sys.stderr)
|
||||
context.set_code(grpc.StatusCode.INTERNAL)
|
||||
context.set_details(f"Streaming generation failed: {str(e)}")
|
||||
yield backend_pb2.Reply(message=bytes("", encoding='utf-8'))
|
||||
|
||||
def _prepare_prompt(self, request):
|
||||
"""
|
||||
Prepare the prompt for MLX generation, handling chat templates if needed.
|
||||
|
||||
Args:
|
||||
request: The gRPC request containing prompt and message information.
|
||||
|
||||
Returns:
|
||||
str: The prepared prompt.
|
||||
"""
|
||||
# If tokenizer template is enabled and messages are provided instead of prompt, apply the tokenizer template
|
||||
if not request.Prompt and request.UseTokenizerTemplate and request.Messages:
|
||||
# Convert gRPC messages to the format expected by apply_chat_template
|
||||
messages = []
|
||||
for msg in request.Messages:
|
||||
messages.append({"role": msg.role, "content": msg.content})
|
||||
|
||||
prompt = self.tokenizer.apply_chat_template(
|
||||
messages,
|
||||
tokenize=False,
|
||||
add_generation_prompt=True
|
||||
)
|
||||
return prompt
|
||||
else:
|
||||
return request.Prompt
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def _build_generation_params(self, request, default_max_tokens=200):
|
||||
"""
|
||||
Build generation parameters from request attributes and options.
|
||||
|
||||
Args:
|
||||
request: The gRPC request.
|
||||
default_max_tokens: Default max_tokens if not specified.
|
||||
|
||||
Returns:
|
||||
tuple: (max_tokens, sampler_params dict)
|
||||
"""
|
||||
# Extract max_tokens
|
||||
max_tokens = getattr(request, 'Tokens', default_max_tokens)
|
||||
if max_tokens == 0:
|
||||
max_tokens = default_max_tokens
|
||||
|
||||
# Extract sampler parameters from request attributes
|
||||
temp = getattr(request, 'Temperature', 0.0)
|
||||
if temp == 0.0:
|
||||
temp = 0.6 # Default temperature
|
||||
|
||||
top_p = getattr(request, 'TopP', 0.0)
|
||||
if top_p == 0.0:
|
||||
top_p = 1.0 # Default top_p
|
||||
|
||||
# Initialize sampler parameters
|
||||
sampler_params = {
|
||||
'temp': temp,
|
||||
'top_p': top_p,
|
||||
'xtc_threshold': 0.0,
|
||||
'xtc_probability': 0.0,
|
||||
}
|
||||
|
||||
# Add seed if specified
|
||||
seed = getattr(request, 'Seed', 0)
|
||||
if seed != 0:
|
||||
mx.random.seed(seed)
|
||||
|
||||
# Override with options if available
|
||||
if hasattr(self, 'options'):
|
||||
# Max tokens from options
|
||||
if 'max_tokens' in self.options:
|
||||
max_tokens = self.options['max_tokens']
|
||||
|
||||
# Sampler parameters from options
|
||||
sampler_option_mapping = {
|
||||
'temp': 'temp',
|
||||
'temperature': 'temp', # alias
|
||||
'top_p': 'top_p',
|
||||
'xtc_threshold': 'xtc_threshold',
|
||||
'xtc_probability': 'xtc_probability',
|
||||
}
|
||||
|
||||
for option_key, param_key in sampler_option_mapping.items():
|
||||
if option_key in self.options:
|
||||
sampler_params[param_key] = self.options[option_key]
|
||||
|
||||
# Handle seed from options
|
||||
if 'seed' in self.options:
|
||||
mx.random.seed(self.options['seed'])
|
||||
|
||||
# Special tokens for XTC sampling (if tokenizer has eos_token_ids)
|
||||
xtc_special_tokens = []
|
||||
if hasattr(self.tokenizer, 'eos_token_ids') and self.tokenizer.eos_token_ids:
|
||||
xtc_special_tokens = list(self.tokenizer.eos_token_ids)
|
||||
elif hasattr(self.tokenizer, 'eos_token_id') and self.tokenizer.eos_token_id is not None:
|
||||
xtc_special_tokens = [self.tokenizer.eos_token_id]
|
||||
|
||||
# Add newline token if available
|
||||
try:
|
||||
newline_tokens = self.tokenizer.encode("\n")
|
||||
xtc_special_tokens.extend(newline_tokens)
|
||||
except:
|
||||
pass # Skip if encoding fails
|
||||
|
||||
sampler_params['xtc_special_tokens'] = xtc_special_tokens
|
||||
|
||||
return max_tokens, sampler_params
|
||||
|
||||
async def serve(address):
|
||||
# Start asyncio gRPC server
|
||||
server = grpc.aio.server(migration_thread_pool=futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
||||
options=[
|
||||
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
|
||||
])
|
||||
# Add the servicer to the server
|
||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||
# Bind the server to the address
|
||||
server.add_insecure_port(address)
|
||||
|
||||
# Gracefully shutdown the server on SIGTERM or SIGINT
|
||||
loop = asyncio.get_event_loop()
|
||||
for sig in (signal.SIGINT, signal.SIGTERM):
|
||||
loop.add_signal_handler(
|
||||
sig, lambda: asyncio.ensure_future(server.stop(5))
|
||||
)
|
||||
|
||||
# Start the server
|
||||
await server.start()
|
||||
print("Server started. Listening on: " + address, file=sys.stderr)
|
||||
# Wait for the server to be terminated
|
||||
await server.wait_for_termination()
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Run the gRPC server.")
|
||||
parser.add_argument(
|
||||
"--addr", default="localhost:50051", help="The address to bind the server to."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
asyncio.run(serve(args.addr))
|
||||
15
backend/python/mlx/install.sh
Executable file
15
backend/python/mlx/install.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
USE_PIP=true
|
||||
PYTHON_VERSION=""
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
installRequirements
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user