mirror of
https://github.com/mudler/LocalAI.git
synced 2026-02-07 13:14:19 -05:00
Compare commits
366 Commits
llama_cpp/
...
v3.6.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8fb95686af | ||
|
|
4132085c01 | ||
|
|
c14f1ffcfd | ||
|
|
07cca4b69a | ||
|
|
dd927c36f6 | ||
|
|
052f42e926 | ||
|
|
30d43588ab | ||
|
|
d21ec22f74 | ||
|
|
04fecd634a | ||
|
|
33c14198db | ||
|
|
967c2727e3 | ||
|
|
f41f30ad92 | ||
|
|
e77340e8a5 | ||
|
|
d51a3090f7 | ||
|
|
1bf3bc932c | ||
|
|
564a47da4e | ||
|
|
c37ee93ff2 | ||
|
|
f4b65db4e7 | ||
|
|
f5fa8e6649 | ||
|
|
570e39bdcf | ||
|
|
2ebe37b671 | ||
|
|
dca685f784 | ||
|
|
84ebf2a2c9 | ||
|
|
ce5662ba90 | ||
|
|
9878f27813 | ||
|
|
f2b9452ec4 | ||
|
|
585da99c52 | ||
|
|
fd4f432079 | ||
|
|
238c68c57b | ||
|
|
04fbf5cb82 | ||
|
|
c85d559919 | ||
|
|
b5efc4f89e | ||
|
|
3f9c09a4c5 | ||
|
|
4a84660475 | ||
|
|
737248256e | ||
|
|
0ae334fc62 | ||
|
|
36c373b7c9 | ||
|
|
6afcb932b7 | ||
|
|
357bf571a3 | ||
|
|
e74ade9ebb | ||
|
|
f7f26b8efa | ||
|
|
75eb98f8bd | ||
|
|
c337e7baf7 | ||
|
|
660bd45be8 | ||
|
|
c27da0a0f6 | ||
|
|
ac043ed9ba | ||
|
|
2e0d66a1c8 | ||
|
|
41a0f361eb | ||
|
|
d3c5c02837 | ||
|
|
ae3d8fb0c4 | ||
|
|
902e47f0b0 | ||
|
|
50bb78fd24 | ||
|
|
542f07ab2d | ||
|
|
77c5acb9db | ||
|
|
44bbf4d778 | ||
|
|
633c12f93d | ||
|
|
6f24135f1d | ||
|
|
b72aa7b4fa | ||
|
|
e94e725479 | ||
|
|
e4ac7b14a3 | ||
|
|
ddb39c73f2 | ||
|
|
264b09fb1e | ||
|
|
36dd45df51 | ||
|
|
e5599f87b8 | ||
|
|
e89b5cc0e3 | ||
|
|
10bf1084cc | ||
|
|
b08ae559b3 | ||
|
|
aa7cb7e18c | ||
|
|
eadd3d4e46 | ||
|
|
2a18206033 | ||
|
|
39798d734e | ||
|
|
d0e99562af | ||
|
|
6410c99bf2 | ||
|
|
55766d269b | ||
|
|
ffa0ad1eac | ||
|
|
623789a29e | ||
|
|
2b9a3d32c9 | ||
|
|
f8b71dc5d0 | ||
|
|
1d3331b5cb | ||
|
|
2c0b9c6349 | ||
|
|
3c6c976755 | ||
|
|
ebbcba342a | ||
|
|
0de75519dc | ||
|
|
37f5e4f5c1 | ||
|
|
ffa934b959 | ||
|
|
59311d8b1e | ||
|
|
d9e25af7b5 | ||
|
|
e4f8b63b40 | ||
|
|
1364ae9be6 | ||
|
|
cfd6a9150d | ||
|
|
cd352d0c5f | ||
|
|
8d47309695 | ||
|
|
5f6fc02a55 | ||
|
|
0b528458d8 | ||
|
|
caab380c5d | ||
|
|
8a3a362504 | ||
|
|
07238eb743 | ||
|
|
e905e90dd7 | ||
|
|
08432d49e5 | ||
|
|
e51e2aacb9 | ||
|
|
9c3d85fc28 | ||
|
|
007ca647a7 | ||
|
|
59af928379 | ||
|
|
dbc2bb561b | ||
|
|
c72c85dcac | ||
|
|
ef984901e6 | ||
|
|
9911ec84a3 | ||
|
|
1956681d4c | ||
|
|
326f6e5ccb | ||
|
|
302958efd6 | ||
|
|
3dc86b247d | ||
|
|
5ec724af06 | ||
|
|
1f1e156bf0 | ||
|
|
df625e366a | ||
|
|
9e6685ac9c | ||
|
|
90c818aa71 | ||
|
|
034b9b691b | ||
|
|
ba52822e5c | ||
|
|
eb30f6c090 | ||
|
|
caba098959 | ||
|
|
3c75ea1e0e | ||
|
|
c5f911812f | ||
|
|
d82922786a | ||
|
|
d9e9bb4c0e | ||
|
|
657027bec6 | ||
|
|
2f5635308d | ||
|
|
63b5338dbd | ||
|
|
3150174962 | ||
|
|
4330fdce33 | ||
|
|
fef8583144 | ||
|
|
d4d6a56a4f | ||
|
|
2900a601a0 | ||
|
|
43e0437db6 | ||
|
|
976c159fdb | ||
|
|
969922ffec | ||
|
|
739573e41b | ||
|
|
dbdf2908ad | ||
|
|
317f8641dc | ||
|
|
54ff70e451 | ||
|
|
723f01c87e | ||
|
|
79a41a5e07 | ||
|
|
d0b6aa3f7d | ||
|
|
ad99399c6e | ||
|
|
e6ebfd3ba1 | ||
|
|
ead00a28b9 | ||
|
|
9621edb4c5 | ||
|
|
7ce92f0646 | ||
|
|
6a4ab3c1e0 | ||
|
|
83b85494c1 | ||
|
|
df6a80b38d | ||
|
|
21faa4114b | ||
|
|
e35ad56602 | ||
|
|
3be8b2d8e1 | ||
|
|
900745bb4d | ||
|
|
15a7fc7e9a | ||
|
|
03dddec538 | ||
|
|
3d34386712 | ||
|
|
1b3f66018b | ||
|
|
4381e892b8 | ||
|
|
3c3f477854 | ||
|
|
f8a8cf3e95 | ||
|
|
0fc88b3cdf | ||
|
|
4993df81c3 | ||
|
|
599bc88c6c | ||
|
|
1a0d06f3db | ||
|
|
5e1a8b3621 | ||
|
|
960e51e527 | ||
|
|
195aa22e77 | ||
|
|
be132fe816 | ||
|
|
ff5d2dc8be | ||
|
|
c1cfa08226 | ||
|
|
fec8a36b36 | ||
|
|
5d4f5d2355 | ||
|
|
057248008f | ||
|
|
9f2c9cd691 | ||
|
|
6971f71a6c | ||
|
|
1ba66d00f5 | ||
|
|
259383cf5e | ||
|
|
209c0694f5 | ||
|
|
0fd395d6ec | ||
|
|
d04bd47116 | ||
|
|
1d830ce7dd | ||
|
|
6dccfb09f8 | ||
|
|
e4d9cf8349 | ||
|
|
c899e90277 | ||
|
|
8193d18c7c | ||
|
|
2e4dc6456f | ||
|
|
4594430a3e | ||
|
|
9c7f92c81f | ||
|
|
060037bcd4 | ||
|
|
d9da4676b4 | ||
|
|
5ef4c2e471 | ||
|
|
27ce570844 | ||
|
|
42c7859ab1 | ||
|
|
e7e83d0fa6 | ||
|
|
c6dc1d86f1 | ||
|
|
6fd2e1964d | ||
|
|
49ae41b716 | ||
|
|
b3f0ed62fd | ||
|
|
4b9afc418b | ||
|
|
e44ff8514b | ||
|
|
2b6be10b6b | ||
|
|
1361d844a1 | ||
|
|
fcc521cae5 | ||
|
|
8cad7138be | ||
|
|
ebd1db2f09 | ||
|
|
7920d75805 | ||
|
|
1d0e24a865 | ||
|
|
9eed5ef872 | ||
|
|
39ab80442a | ||
|
|
1b101df2c0 | ||
|
|
784bd5db33 | ||
|
|
b8b1ca782c | ||
|
|
1149fb66d3 | ||
|
|
243e86176e | ||
|
|
8da38a0d10 | ||
|
|
60786fc876 | ||
|
|
9486b88a25 | ||
|
|
bef4c10629 | ||
|
|
80f15851c5 | ||
|
|
22067e3384 | ||
|
|
4fbd639463 | ||
|
|
70f7d0c25f | ||
|
|
576e821298 | ||
|
|
7293f26fcf | ||
|
|
79973a28ad | ||
|
|
8ab51509cc | ||
|
|
b3384e5428 | ||
|
|
7050c9f69d | ||
|
|
089efe05fd | ||
|
|
253b7537dc | ||
|
|
19c92c70c5 | ||
|
|
b52bfaf1b3 | ||
|
|
bf60ca5bf0 | ||
|
|
2b44467bd1 | ||
|
|
8c1f4a131e | ||
|
|
10a3f0bd92 | ||
|
|
72f4d541d0 | ||
|
|
9f812fdb84 | ||
|
|
b70ee45fff | ||
|
|
9d9c853541 | ||
|
|
18fcd8557c | ||
|
|
d8e27c38d7 | ||
|
|
3b0dc87932 | ||
|
|
2374485222 | ||
|
|
0ca1765c17 | ||
|
|
90b5ed9a1e | ||
|
|
d438b769da | ||
|
|
2e4bd1e33d | ||
|
|
ff73800970 | ||
|
|
94cb20ae7f | ||
|
|
47c20f9adb | ||
|
|
a7fe153630 | ||
|
|
27519d2233 | ||
|
|
8cab0f880b | ||
|
|
8c48b250c4 | ||
|
|
ba802c2ee4 | ||
|
|
429bb7a88c | ||
|
|
b2e8b6d1aa | ||
|
|
fba5b557a1 | ||
|
|
6db19c5cb9 | ||
|
|
5428678209 | ||
|
|
06129139eb | ||
|
|
05757e2738 | ||
|
|
240b790f29 | ||
|
|
5f221f5946 | ||
|
|
def7cdc0bf | ||
|
|
ea9bf3dba2 | ||
|
|
b8eca530b6 | ||
|
|
47034ddacd | ||
|
|
9a41331855 | ||
|
|
facc0181df | ||
|
|
4733adb983 | ||
|
|
326fda3223 | ||
|
|
abf61e5b42 | ||
|
|
2ae45e7635 | ||
|
|
7d41551e10 | ||
|
|
6fbd720515 | ||
|
|
4e40a8d1ed | ||
|
|
003b9292fe | ||
|
|
09457b9221 | ||
|
|
41aa7e107f | ||
|
|
bda875f962 | ||
|
|
224063f0f7 | ||
|
|
89978c8b57 | ||
|
|
987b5dcac1 | ||
|
|
ec1276e5a9 | ||
|
|
61ba98d43d | ||
|
|
b9a25b16e6 | ||
|
|
6a8149e1fd | ||
|
|
9c2840ac38 | ||
|
|
20a70e1244 | ||
|
|
3295a298f4 | ||
|
|
da6f37f000 | ||
|
|
c092633cd7 | ||
|
|
7e2a522229 | ||
|
|
03e8592450 | ||
|
|
f207bd1427 | ||
|
|
a5c0fe31c3 | ||
|
|
c68907ac65 | ||
|
|
9087ddc4de | ||
|
|
33bebd5114 | ||
|
|
2913676157 | ||
|
|
e83652489c | ||
|
|
d6274eaf4a | ||
|
|
4d90971424 | ||
|
|
90f5639639 | ||
|
|
a35a701052 | ||
|
|
3d8ec72dbf | ||
|
|
2a9d675d62 | ||
|
|
c782e8abf1 | ||
|
|
a1e1942d83 | ||
|
|
787302b204 | ||
|
|
0b085089b9 | ||
|
|
624f3b1fc8 | ||
|
|
c07bc55fee | ||
|
|
173e0774c0 | ||
|
|
8ece26ab7c | ||
|
|
d704cc7970 | ||
|
|
ab17baaae1 | ||
|
|
ca358fcdca | ||
|
|
9aadfd485f | ||
|
|
da3b0850de | ||
|
|
8b1e8b4cda | ||
|
|
3d22bfc27c | ||
|
|
4438b4361e | ||
|
|
04bad9a2da | ||
|
|
8235e53602 | ||
|
|
eb5c3670f1 | ||
|
|
89e61fca90 | ||
|
|
9d6efe8842 | ||
|
|
60726d16f2 | ||
|
|
9d7ec09ec0 | ||
|
|
36179ffbed | ||
|
|
d25145e641 | ||
|
|
949e5b9be8 | ||
|
|
73ecb7f90b | ||
|
|
053bed6e5f | ||
|
|
932360bf7e | ||
|
|
6d0b52843f | ||
|
|
078c22f485 | ||
|
|
6ef3852de5 | ||
|
|
a8057b952c | ||
|
|
fd5c1d916f | ||
|
|
5ce982b9c9 | ||
|
|
47ccfccf7a | ||
|
|
a760f7ff39 | ||
|
|
facf7625f3 | ||
|
|
b3600b3c50 | ||
|
|
f0b47cfe6a | ||
|
|
ee625fc34e | ||
|
|
693aa0b5de | ||
|
|
3973e6e5da | ||
|
|
fb6ec68090 | ||
|
|
0301fc7c46 | ||
|
|
813cb4296d | ||
|
|
deda3a4972 | ||
|
|
a28f27604a | ||
|
|
8fe9fa98f2 | ||
|
|
4db1b80278 | ||
|
|
b3c2a3c257 | ||
|
|
61c2304638 | ||
|
|
92c5ab97e2 | ||
|
|
76e471441c | ||
|
|
9cecf5e7ac | ||
|
|
b7b3164736 |
@@ -6,6 +6,10 @@ models
|
|||||||
backends
|
backends
|
||||||
examples/chatbot-ui/models
|
examples/chatbot-ui/models
|
||||||
backend/go/image/stablediffusion-ggml/build/
|
backend/go/image/stablediffusion-ggml/build/
|
||||||
|
backend/go/*/build
|
||||||
|
backend/go/*/.cache
|
||||||
|
backend/go/*/sources
|
||||||
|
backend/go/*/package
|
||||||
examples/rwkv/models
|
examples/rwkv/models
|
||||||
examples/**/models
|
examples/**/models
|
||||||
Dockerfile*
|
Dockerfile*
|
||||||
|
|||||||
470
.github/workflows/backend.yml
vendored
470
.github/workflows/backend.yml
vendored
@@ -87,6 +87,42 @@ jobs:
|
|||||||
backend: "diffusers"
|
backend: "diffusers"
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
context: "./backend"
|
context: "./backend"
|
||||||
|
- build-type: 'l4t'
|
||||||
|
cuda-major-version: "12"
|
||||||
|
cuda-minor-version: "0"
|
||||||
|
platforms: 'linux/arm64'
|
||||||
|
tag-latest: 'auto'
|
||||||
|
tag-suffix: '-gpu-nvidia-l4t-diffusers'
|
||||||
|
runs-on: 'ubuntu-24.04-arm'
|
||||||
|
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
||||||
|
skip-drivers: 'true'
|
||||||
|
backend: "diffusers"
|
||||||
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
|
context: "./backend"
|
||||||
|
- build-type: ''
|
||||||
|
cuda-major-version: ""
|
||||||
|
cuda-minor-version: ""
|
||||||
|
platforms: 'linux/amd64'
|
||||||
|
tag-latest: 'auto'
|
||||||
|
tag-suffix: '-cpu-diffusers'
|
||||||
|
runs-on: 'ubuntu-latest'
|
||||||
|
base-image: "ubuntu:22.04"
|
||||||
|
skip-drivers: 'true'
|
||||||
|
backend: "diffusers"
|
||||||
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
|
context: "./backend"
|
||||||
|
- build-type: ''
|
||||||
|
cuda-major-version: ""
|
||||||
|
cuda-minor-version: ""
|
||||||
|
platforms: 'linux/amd64'
|
||||||
|
tag-latest: 'auto'
|
||||||
|
tag-suffix: '-cpu-chatterbox'
|
||||||
|
runs-on: 'ubuntu-latest'
|
||||||
|
base-image: "ubuntu:22.04"
|
||||||
|
skip-drivers: 'true'
|
||||||
|
backend: "chatterbox"
|
||||||
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
|
context: "./backend"
|
||||||
# CUDA 11 additional backends
|
# CUDA 11 additional backends
|
||||||
- build-type: 'cublas'
|
- build-type: 'cublas'
|
||||||
cuda-major-version: "11"
|
cuda-major-version: "11"
|
||||||
@@ -179,7 +215,7 @@ jobs:
|
|||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-gpu-nvidia-cuda-12-vllm'
|
tag-suffix: '-gpu-nvidia-cuda-12-vllm'
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'arc-runner-set'
|
||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "vllm"
|
backend: "vllm"
|
||||||
@@ -206,7 +242,7 @@ jobs:
|
|||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'ubuntu-latest'
|
||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "diffusers"
|
backend: "diffusers"
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
context: "./backend"
|
context: "./backend"
|
||||||
# CUDA 12 additional backends
|
# CUDA 12 additional backends
|
||||||
@@ -278,7 +314,7 @@ jobs:
|
|||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-gpu-rocm-hipblas-rerankers'
|
tag-suffix: '-gpu-rocm-hipblas-rerankers'
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'ubuntu-latest'
|
||||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "rerankers"
|
backend: "rerankers"
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
@@ -290,7 +326,7 @@ jobs:
|
|||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-gpu-rocm-hipblas-llama-cpp'
|
tag-suffix: '-gpu-rocm-hipblas-llama-cpp'
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'ubuntu-latest'
|
||||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "llama-cpp"
|
backend: "llama-cpp"
|
||||||
dockerfile: "./backend/Dockerfile.llama-cpp"
|
dockerfile: "./backend/Dockerfile.llama-cpp"
|
||||||
@@ -301,8 +337,8 @@ jobs:
|
|||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-gpu-rocm-hipblas-vllm'
|
tag-suffix: '-gpu-rocm-hipblas-vllm'
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'arc-runner-set'
|
||||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "vllm"
|
backend: "vllm"
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
@@ -313,8 +349,8 @@ jobs:
|
|||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-gpu-rocm-hipblas-transformers'
|
tag-suffix: '-gpu-rocm-hipblas-transformers'
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'arc-runner-set'
|
||||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "transformers"
|
backend: "transformers"
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
@@ -325,8 +361,8 @@ jobs:
|
|||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-gpu-rocm-hipblas-diffusers'
|
tag-suffix: '-gpu-rocm-hipblas-diffusers'
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'arc-runner-set'
|
||||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "diffusers"
|
backend: "diffusers"
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
@@ -338,8 +374,8 @@ jobs:
|
|||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-gpu-rocm-hipblas-kokoro'
|
tag-suffix: '-gpu-rocm-hipblas-kokoro'
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'arc-runner-set'
|
||||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "kokoro"
|
backend: "kokoro"
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
@@ -351,7 +387,7 @@ jobs:
|
|||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-gpu-rocm-hipblas-faster-whisper'
|
tag-suffix: '-gpu-rocm-hipblas-faster-whisper'
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'ubuntu-latest'
|
||||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "faster-whisper"
|
backend: "faster-whisper"
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
@@ -363,7 +399,7 @@ jobs:
|
|||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-gpu-rocm-hipblas-coqui'
|
tag-suffix: '-gpu-rocm-hipblas-coqui'
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'ubuntu-latest'
|
||||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "coqui"
|
backend: "coqui"
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
@@ -374,31 +410,19 @@ jobs:
|
|||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-gpu-rocm-hipblas-bark'
|
tag-suffix: '-gpu-rocm-hipblas-bark'
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'arc-runner-set'
|
||||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "bark"
|
backend: "bark"
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
context: "./backend"
|
context: "./backend"
|
||||||
# sycl builds
|
# sycl builds
|
||||||
- build-type: 'sycl_f32'
|
- build-type: 'intel'
|
||||||
cuda-major-version: ""
|
cuda-major-version: ""
|
||||||
cuda-minor-version: ""
|
cuda-minor-version: ""
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-gpu-intel-sycl-f32-rerankers'
|
tag-suffix: '-gpu-intel-rerankers'
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
backend: "rerankers"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./backend"
|
|
||||||
- build-type: 'sycl_f16'
|
|
||||||
cuda-major-version: ""
|
|
||||||
cuda-minor-version: ""
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-gpu-intel-sycl-f16-rerankers'
|
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'ubuntu-latest'
|
||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
@@ -429,157 +453,97 @@ jobs:
|
|||||||
backend: "llama-cpp"
|
backend: "llama-cpp"
|
||||||
dockerfile: "./backend/Dockerfile.llama-cpp"
|
dockerfile: "./backend/Dockerfile.llama-cpp"
|
||||||
context: "./"
|
context: "./"
|
||||||
- build-type: 'sycl_f32'
|
- build-type: 'intel'
|
||||||
cuda-major-version: ""
|
cuda-major-version: ""
|
||||||
cuda-minor-version: ""
|
cuda-minor-version: ""
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-gpu-intel-sycl-f32-vllm'
|
tag-suffix: '-gpu-intel-vllm'
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'arc-runner-set'
|
||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "vllm"
|
backend: "vllm"
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
context: "./backend"
|
context: "./backend"
|
||||||
- build-type: 'sycl_f16'
|
- build-type: 'intel'
|
||||||
cuda-major-version: ""
|
cuda-major-version: ""
|
||||||
cuda-minor-version: ""
|
cuda-minor-version: ""
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-gpu-intel-sycl-f16-vllm'
|
tag-suffix: '-gpu-intel-transformers'
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
backend: "vllm"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./backend"
|
|
||||||
- build-type: 'sycl_f32'
|
|
||||||
cuda-major-version: ""
|
|
||||||
cuda-minor-version: ""
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-gpu-intel-sycl-f32-transformers'
|
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'ubuntu-latest'
|
||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "transformers"
|
backend: "transformers"
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
context: "./backend"
|
context: "./backend"
|
||||||
- build-type: 'sycl_f16'
|
- build-type: 'intel'
|
||||||
cuda-major-version: ""
|
cuda-major-version: ""
|
||||||
cuda-minor-version: ""
|
cuda-minor-version: ""
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-gpu-intel-sycl-f16-transformers'
|
tag-suffix: '-gpu-intel-diffusers'
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
backend: "transformers"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./backend"
|
|
||||||
- build-type: 'sycl_f32'
|
|
||||||
cuda-major-version: ""
|
|
||||||
cuda-minor-version: ""
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-gpu-intel-sycl-f32-diffusers'
|
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'ubuntu-latest'
|
||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "diffusers"
|
backend: "diffusers"
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
context: "./backend"
|
context: "./backend"
|
||||||
|
- build-type: 'l4t'
|
||||||
|
cuda-major-version: "12"
|
||||||
|
cuda-minor-version: "0"
|
||||||
|
platforms: 'linux/arm64'
|
||||||
|
tag-latest: 'auto'
|
||||||
|
tag-suffix: '-gpu-nvidia-l4t-kokoro'
|
||||||
|
runs-on: 'ubuntu-24.04-arm'
|
||||||
|
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
||||||
|
skip-drivers: 'true'
|
||||||
|
backend: "kokoro"
|
||||||
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
|
context: "./backend"
|
||||||
# SYCL additional backends
|
# SYCL additional backends
|
||||||
- build-type: 'sycl_f32'
|
- build-type: 'intel'
|
||||||
cuda-major-version: ""
|
cuda-major-version: ""
|
||||||
cuda-minor-version: ""
|
cuda-minor-version: ""
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-gpu-intel-sycl-f32-kokoro'
|
tag-suffix: '-gpu-intel-kokoro'
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'ubuntu-latest'
|
||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "kokoro"
|
backend: "kokoro"
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
context: "./backend"
|
context: "./backend"
|
||||||
- build-type: 'sycl_f16'
|
- build-type: 'intel'
|
||||||
cuda-major-version: ""
|
cuda-major-version: ""
|
||||||
cuda-minor-version: ""
|
cuda-minor-version: ""
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-gpu-intel-sycl-f16-kokoro'
|
tag-suffix: '-gpu-intel-faster-whisper'
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
backend: "kokoro"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./backend"
|
|
||||||
- build-type: 'sycl_f32'
|
|
||||||
cuda-major-version: ""
|
|
||||||
cuda-minor-version: ""
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-gpu-intel-sycl-f32-faster-whisper'
|
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'ubuntu-latest'
|
||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "faster-whisper"
|
backend: "faster-whisper"
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
context: "./backend"
|
context: "./backend"
|
||||||
- build-type: 'sycl_f16'
|
- build-type: 'intel'
|
||||||
cuda-major-version: ""
|
cuda-major-version: ""
|
||||||
cuda-minor-version: ""
|
cuda-minor-version: ""
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-gpu-intel-sycl-f16-faster-whisper'
|
tag-suffix: '-gpu-intel-coqui'
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
backend: "faster-whisper"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./backend"
|
|
||||||
- build-type: 'sycl_f32'
|
|
||||||
cuda-major-version: ""
|
|
||||||
cuda-minor-version: ""
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-gpu-intel-sycl-f32-coqui'
|
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'ubuntu-latest'
|
||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "coqui"
|
backend: "coqui"
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
context: "./backend"
|
context: "./backend"
|
||||||
- build-type: 'sycl_f16'
|
- build-type: 'intel'
|
||||||
cuda-major-version: ""
|
cuda-major-version: ""
|
||||||
cuda-minor-version: ""
|
cuda-minor-version: ""
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-gpu-intel-sycl-f16-coqui'
|
tag-suffix: '-gpu-intel-bark'
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
backend: "coqui"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./backend"
|
|
||||||
- build-type: 'sycl_f32'
|
|
||||||
cuda-major-version: ""
|
|
||||||
cuda-minor-version: ""
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-gpu-intel-sycl-f32-bark'
|
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
|
||||||
skip-drivers: 'false'
|
|
||||||
backend: "bark"
|
|
||||||
dockerfile: "./backend/Dockerfile.python"
|
|
||||||
context: "./backend"
|
|
||||||
- build-type: 'sycl_f16'
|
|
||||||
cuda-major-version: ""
|
|
||||||
cuda-minor-version: ""
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-gpu-intel-sycl-f16-bark'
|
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'ubuntu-latest'
|
||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
@@ -597,7 +561,7 @@ jobs:
|
|||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "piper"
|
backend: "piper"
|
||||||
dockerfile: "./backend/Dockerfile.go"
|
dockerfile: "./backend/Dockerfile.golang"
|
||||||
context: "./"
|
context: "./"
|
||||||
# bark-cpp
|
# bark-cpp
|
||||||
- build-type: ''
|
- build-type: ''
|
||||||
@@ -610,7 +574,7 @@ jobs:
|
|||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "bark-cpp"
|
backend: "bark-cpp"
|
||||||
dockerfile: "./backend/Dockerfile.go"
|
dockerfile: "./backend/Dockerfile.golang"
|
||||||
context: "./"
|
context: "./"
|
||||||
- build-type: ''
|
- build-type: ''
|
||||||
cuda-major-version: ""
|
cuda-major-version: ""
|
||||||
@@ -659,7 +623,7 @@ jobs:
|
|||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "stablediffusion-ggml"
|
backend: "stablediffusion-ggml"
|
||||||
dockerfile: "./backend/Dockerfile.go"
|
dockerfile: "./backend/Dockerfile.golang"
|
||||||
context: "./"
|
context: "./"
|
||||||
- build-type: 'cublas'
|
- build-type: 'cublas'
|
||||||
cuda-major-version: "12"
|
cuda-major-version: "12"
|
||||||
@@ -671,7 +635,7 @@ jobs:
|
|||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "stablediffusion-ggml"
|
backend: "stablediffusion-ggml"
|
||||||
dockerfile: "./backend/Dockerfile.go"
|
dockerfile: "./backend/Dockerfile.golang"
|
||||||
context: "./"
|
context: "./"
|
||||||
- build-type: 'cublas'
|
- build-type: 'cublas'
|
||||||
cuda-major-version: "11"
|
cuda-major-version: "11"
|
||||||
@@ -683,7 +647,7 @@ jobs:
|
|||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "stablediffusion-ggml"
|
backend: "stablediffusion-ggml"
|
||||||
dockerfile: "./backend/Dockerfile.go"
|
dockerfile: "./backend/Dockerfile.golang"
|
||||||
context: "./"
|
context: "./"
|
||||||
- build-type: 'sycl_f32'
|
- build-type: 'sycl_f32'
|
||||||
cuda-major-version: ""
|
cuda-major-version: ""
|
||||||
@@ -695,7 +659,7 @@ jobs:
|
|||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "stablediffusion-ggml"
|
backend: "stablediffusion-ggml"
|
||||||
dockerfile: "./backend/Dockerfile.go"
|
dockerfile: "./backend/Dockerfile.golang"
|
||||||
context: "./"
|
context: "./"
|
||||||
- build-type: 'sycl_f16'
|
- build-type: 'sycl_f16'
|
||||||
cuda-major-version: ""
|
cuda-major-version: ""
|
||||||
@@ -707,7 +671,7 @@ jobs:
|
|||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "stablediffusion-ggml"
|
backend: "stablediffusion-ggml"
|
||||||
dockerfile: "./backend/Dockerfile.go"
|
dockerfile: "./backend/Dockerfile.golang"
|
||||||
context: "./"
|
context: "./"
|
||||||
- build-type: 'vulkan'
|
- build-type: 'vulkan'
|
||||||
cuda-major-version: ""
|
cuda-major-version: ""
|
||||||
@@ -719,7 +683,7 @@ jobs:
|
|||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "stablediffusion-ggml"
|
backend: "stablediffusion-ggml"
|
||||||
dockerfile: "./backend/Dockerfile.go"
|
dockerfile: "./backend/Dockerfile.golang"
|
||||||
context: "./"
|
context: "./"
|
||||||
- build-type: 'cublas'
|
- build-type: 'cublas'
|
||||||
cuda-major-version: "12"
|
cuda-major-version: "12"
|
||||||
@@ -731,7 +695,7 @@ jobs:
|
|||||||
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
||||||
runs-on: 'ubuntu-24.04-arm'
|
runs-on: 'ubuntu-24.04-arm'
|
||||||
backend: "stablediffusion-ggml"
|
backend: "stablediffusion-ggml"
|
||||||
dockerfile: "./backend/Dockerfile.go"
|
dockerfile: "./backend/Dockerfile.golang"
|
||||||
context: "./"
|
context: "./"
|
||||||
# whisper
|
# whisper
|
||||||
- build-type: ''
|
- build-type: ''
|
||||||
@@ -744,7 +708,7 @@ jobs:
|
|||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "whisper"
|
backend: "whisper"
|
||||||
dockerfile: "./backend/Dockerfile.go"
|
dockerfile: "./backend/Dockerfile.golang"
|
||||||
context: "./"
|
context: "./"
|
||||||
- build-type: 'cublas'
|
- build-type: 'cublas'
|
||||||
cuda-major-version: "12"
|
cuda-major-version: "12"
|
||||||
@@ -756,7 +720,7 @@ jobs:
|
|||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "whisper"
|
backend: "whisper"
|
||||||
dockerfile: "./backend/Dockerfile.go"
|
dockerfile: "./backend/Dockerfile.golang"
|
||||||
context: "./"
|
context: "./"
|
||||||
- build-type: 'cublas'
|
- build-type: 'cublas'
|
||||||
cuda-major-version: "11"
|
cuda-major-version: "11"
|
||||||
@@ -768,7 +732,7 @@ jobs:
|
|||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "whisper"
|
backend: "whisper"
|
||||||
dockerfile: "./backend/Dockerfile.go"
|
dockerfile: "./backend/Dockerfile.golang"
|
||||||
context: "./"
|
context: "./"
|
||||||
- build-type: 'sycl_f32'
|
- build-type: 'sycl_f32'
|
||||||
cuda-major-version: ""
|
cuda-major-version: ""
|
||||||
@@ -780,7 +744,7 @@ jobs:
|
|||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "whisper"
|
backend: "whisper"
|
||||||
dockerfile: "./backend/Dockerfile.go"
|
dockerfile: "./backend/Dockerfile.golang"
|
||||||
context: "./"
|
context: "./"
|
||||||
- build-type: 'sycl_f16'
|
- build-type: 'sycl_f16'
|
||||||
cuda-major-version: ""
|
cuda-major-version: ""
|
||||||
@@ -792,7 +756,7 @@ jobs:
|
|||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "whisper"
|
backend: "whisper"
|
||||||
dockerfile: "./backend/Dockerfile.go"
|
dockerfile: "./backend/Dockerfile.golang"
|
||||||
context: "./"
|
context: "./"
|
||||||
- build-type: 'vulkan'
|
- build-type: 'vulkan'
|
||||||
cuda-major-version: ""
|
cuda-major-version: ""
|
||||||
@@ -804,7 +768,7 @@ jobs:
|
|||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "whisper"
|
backend: "whisper"
|
||||||
dockerfile: "./backend/Dockerfile.go"
|
dockerfile: "./backend/Dockerfile.golang"
|
||||||
context: "./"
|
context: "./"
|
||||||
- build-type: 'cublas'
|
- build-type: 'cublas'
|
||||||
cuda-major-version: "12"
|
cuda-major-version: "12"
|
||||||
@@ -816,19 +780,19 @@ jobs:
|
|||||||
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
||||||
runs-on: 'ubuntu-24.04-arm'
|
runs-on: 'ubuntu-24.04-arm'
|
||||||
backend: "whisper"
|
backend: "whisper"
|
||||||
dockerfile: "./backend/Dockerfile.go"
|
dockerfile: "./backend/Dockerfile.golang"
|
||||||
context: "./"
|
context: "./"
|
||||||
- build-type: 'hipblas'
|
- build-type: 'hipblas'
|
||||||
cuda-major-version: ""
|
cuda-major-version: ""
|
||||||
cuda-minor-version: ""
|
cuda-minor-version: ""
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-gpu-hipblas-whisper'
|
tag-suffix: '-gpu-rocm-hipblas-whisper'
|
||||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'ubuntu-latest'
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "whisper"
|
backend: "whisper"
|
||||||
dockerfile: "./backend/Dockerfile.go"
|
dockerfile: "./backend/Dockerfile.golang"
|
||||||
context: "./"
|
context: "./"
|
||||||
#silero-vad
|
#silero-vad
|
||||||
- build-type: ''
|
- build-type: ''
|
||||||
@@ -841,7 +805,7 @@ jobs:
|
|||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "silero-vad"
|
backend: "silero-vad"
|
||||||
dockerfile: "./backend/Dockerfile.go"
|
dockerfile: "./backend/Dockerfile.golang"
|
||||||
context: "./"
|
context: "./"
|
||||||
# local-store
|
# local-store
|
||||||
- build-type: ''
|
- build-type: ''
|
||||||
@@ -854,7 +818,7 @@ jobs:
|
|||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "local-store"
|
backend: "local-store"
|
||||||
dockerfile: "./backend/Dockerfile.go"
|
dockerfile: "./backend/Dockerfile.golang"
|
||||||
context: "./"
|
context: "./"
|
||||||
# huggingface
|
# huggingface
|
||||||
- build-type: ''
|
- build-type: ''
|
||||||
@@ -867,8 +831,209 @@ jobs:
|
|||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
backend: "huggingface"
|
backend: "huggingface"
|
||||||
dockerfile: "./backend/Dockerfile.go"
|
dockerfile: "./backend/Dockerfile.golang"
|
||||||
context: "./"
|
context: "./"
|
||||||
|
# rfdetr
|
||||||
|
- build-type: ''
|
||||||
|
cuda-major-version: ""
|
||||||
|
cuda-minor-version: ""
|
||||||
|
platforms: 'linux/amd64,linux/arm64'
|
||||||
|
tag-latest: 'auto'
|
||||||
|
tag-suffix: '-cpu-rfdetr'
|
||||||
|
runs-on: 'ubuntu-latest'
|
||||||
|
base-image: "ubuntu:22.04"
|
||||||
|
skip-drivers: 'false'
|
||||||
|
backend: "rfdetr"
|
||||||
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
|
context: "./backend"
|
||||||
|
- build-type: 'cublas'
|
||||||
|
cuda-major-version: "12"
|
||||||
|
cuda-minor-version: "0"
|
||||||
|
platforms: 'linux/amd64'
|
||||||
|
tag-latest: 'auto'
|
||||||
|
tag-suffix: '-gpu-nvidia-cuda-12-rfdetr'
|
||||||
|
runs-on: 'ubuntu-latest'
|
||||||
|
base-image: "ubuntu:22.04"
|
||||||
|
skip-drivers: 'false'
|
||||||
|
backend: "rfdetr"
|
||||||
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
|
context: "./backend"
|
||||||
|
- build-type: 'cublas'
|
||||||
|
cuda-major-version: "11"
|
||||||
|
cuda-minor-version: "7"
|
||||||
|
platforms: 'linux/amd64'
|
||||||
|
tag-latest: 'auto'
|
||||||
|
tag-suffix: '-gpu-nvidia-cuda-11-rfdetr'
|
||||||
|
runs-on: 'ubuntu-latest'
|
||||||
|
base-image: "ubuntu:22.04"
|
||||||
|
skip-drivers: 'false'
|
||||||
|
backend: "rfdetr"
|
||||||
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
|
context: "./backend"
|
||||||
|
- build-type: 'intel'
|
||||||
|
cuda-major-version: ""
|
||||||
|
cuda-minor-version: ""
|
||||||
|
platforms: 'linux/amd64'
|
||||||
|
tag-latest: 'auto'
|
||||||
|
tag-suffix: '-gpu-intel-rfdetr'
|
||||||
|
runs-on: 'ubuntu-latest'
|
||||||
|
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||||
|
skip-drivers: 'false'
|
||||||
|
backend: "rfdetr"
|
||||||
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
|
context: "./backend"
|
||||||
|
- build-type: 'l4t'
|
||||||
|
cuda-major-version: "12"
|
||||||
|
cuda-minor-version: "0"
|
||||||
|
platforms: 'linux/arm64'
|
||||||
|
skip-drivers: 'true'
|
||||||
|
tag-latest: 'auto'
|
||||||
|
tag-suffix: '-nvidia-l4t-arm64-rfdetr'
|
||||||
|
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
||||||
|
runs-on: 'ubuntu-24.04-arm'
|
||||||
|
backend: "rfdetr"
|
||||||
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
|
context: "./backend"
|
||||||
|
# exllama2
|
||||||
|
- build-type: ''
|
||||||
|
cuda-major-version: ""
|
||||||
|
cuda-minor-version: ""
|
||||||
|
platforms: 'linux/amd64'
|
||||||
|
tag-latest: 'auto'
|
||||||
|
tag-suffix: '-cpu-exllama2'
|
||||||
|
runs-on: 'ubuntu-latest'
|
||||||
|
base-image: "ubuntu:22.04"
|
||||||
|
skip-drivers: 'false'
|
||||||
|
backend: "exllama2"
|
||||||
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
|
context: "./backend"
|
||||||
|
- build-type: 'cublas'
|
||||||
|
cuda-major-version: "12"
|
||||||
|
cuda-minor-version: "0"
|
||||||
|
platforms: 'linux/amd64'
|
||||||
|
tag-latest: 'auto'
|
||||||
|
tag-suffix: '-gpu-nvidia-cuda-12-exllama2'
|
||||||
|
runs-on: 'ubuntu-latest'
|
||||||
|
base-image: "ubuntu:22.04"
|
||||||
|
skip-drivers: 'false'
|
||||||
|
backend: "exllama2"
|
||||||
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
|
context: "./backend"
|
||||||
|
- build-type: 'cublas'
|
||||||
|
cuda-major-version: "11"
|
||||||
|
cuda-minor-version: "7"
|
||||||
|
platforms: 'linux/amd64'
|
||||||
|
tag-latest: 'auto'
|
||||||
|
tag-suffix: '-gpu-nvidia-cuda-11-exllama2'
|
||||||
|
runs-on: 'ubuntu-latest'
|
||||||
|
base-image: "ubuntu:22.04"
|
||||||
|
skip-drivers: 'false'
|
||||||
|
backend: "exllama2"
|
||||||
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
|
context: "./backend"
|
||||||
|
- build-type: 'intel'
|
||||||
|
cuda-major-version: ""
|
||||||
|
cuda-minor-version: ""
|
||||||
|
platforms: 'linux/amd64'
|
||||||
|
tag-latest: 'auto'
|
||||||
|
tag-suffix: '-gpu-intel-exllama2'
|
||||||
|
runs-on: 'ubuntu-latest'
|
||||||
|
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||||
|
skip-drivers: 'false'
|
||||||
|
backend: "exllama2"
|
||||||
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
|
context: "./backend"
|
||||||
|
- build-type: 'hipblas'
|
||||||
|
cuda-major-version: ""
|
||||||
|
cuda-minor-version: ""
|
||||||
|
platforms: 'linux/amd64'
|
||||||
|
skip-drivers: 'true'
|
||||||
|
tag-latest: 'auto'
|
||||||
|
tag-suffix: '-gpu-hipblas-exllama2'
|
||||||
|
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||||
|
runs-on: 'ubuntu-latest'
|
||||||
|
backend: "exllama2"
|
||||||
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
|
context: "./backend"
|
||||||
|
- build-type: 'l4t'
|
||||||
|
cuda-major-version: "12"
|
||||||
|
cuda-minor-version: "0"
|
||||||
|
platforms: 'linux/arm64'
|
||||||
|
skip-drivers: 'true'
|
||||||
|
tag-latest: 'auto'
|
||||||
|
tag-suffix: '-gpu-nvidia-l4t-arm64-chatterbox'
|
||||||
|
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
||||||
|
runs-on: 'ubuntu-24.04-arm'
|
||||||
|
backend: "chatterbox"
|
||||||
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
|
context: "./backend"
|
||||||
|
# runs out of space on the runner
|
||||||
|
# - build-type: 'hipblas'
|
||||||
|
# cuda-major-version: ""
|
||||||
|
# cuda-minor-version: ""
|
||||||
|
# platforms: 'linux/amd64'
|
||||||
|
# tag-latest: 'auto'
|
||||||
|
# tag-suffix: '-gpu-hipblas-rfdetr'
|
||||||
|
# base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||||
|
# runs-on: 'ubuntu-latest'
|
||||||
|
# skip-drivers: 'false'
|
||||||
|
# backend: "rfdetr"
|
||||||
|
# dockerfile: "./backend/Dockerfile.python"
|
||||||
|
# context: "./backend"
|
||||||
|
# kitten-tts
|
||||||
|
- build-type: ''
|
||||||
|
cuda-major-version: ""
|
||||||
|
cuda-minor-version: ""
|
||||||
|
platforms: 'linux/amd64,linux/arm64'
|
||||||
|
tag-latest: 'auto'
|
||||||
|
tag-suffix: '-kitten-tts'
|
||||||
|
runs-on: 'ubuntu-latest'
|
||||||
|
base-image: "ubuntu:22.04"
|
||||||
|
skip-drivers: 'false'
|
||||||
|
backend: "kitten-tts"
|
||||||
|
dockerfile: "./backend/Dockerfile.python"
|
||||||
|
context: "./backend"
|
||||||
|
backend-jobs-darwin:
|
||||||
|
uses: ./.github/workflows/backend_build_darwin.yml
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
include:
|
||||||
|
- backend: "diffusers"
|
||||||
|
tag-suffix: "-metal-darwin-arm64-diffusers"
|
||||||
|
build-type: "mps"
|
||||||
|
- backend: "mlx"
|
||||||
|
tag-suffix: "-metal-darwin-arm64-mlx"
|
||||||
|
build-type: "mps"
|
||||||
|
- backend: "chatterbox"
|
||||||
|
tag-suffix: "-metal-darwin-arm64-chatterbox"
|
||||||
|
build-type: "mps"
|
||||||
|
- backend: "mlx-vlm"
|
||||||
|
tag-suffix: "-metal-darwin-arm64-mlx-vlm"
|
||||||
|
build-type: "mps"
|
||||||
|
- backend: "mlx-audio"
|
||||||
|
tag-suffix: "-metal-darwin-arm64-mlx-audio"
|
||||||
|
build-type: "mps"
|
||||||
|
- backend: "stablediffusion-ggml"
|
||||||
|
tag-suffix: "-metal-darwin-arm64-stablediffusion-ggml"
|
||||||
|
build-type: "metal"
|
||||||
|
lang: "go"
|
||||||
|
- backend: "whisper"
|
||||||
|
tag-suffix: "-metal-darwin-arm64-whisper"
|
||||||
|
build-type: "metal"
|
||||||
|
lang: "go"
|
||||||
|
with:
|
||||||
|
backend: ${{ matrix.backend }}
|
||||||
|
build-type: ${{ matrix.build-type }}
|
||||||
|
go-version: "1.24.x"
|
||||||
|
tag-suffix: ${{ matrix.tag-suffix }}
|
||||||
|
lang: ${{ matrix.lang || 'python' }}
|
||||||
|
use-pip: ${{ matrix.backend == 'diffusers' }}
|
||||||
|
runs-on: "macOS-14"
|
||||||
|
secrets:
|
||||||
|
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||||
|
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||||
|
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||||
llama-cpp-darwin:
|
llama-cpp-darwin:
|
||||||
runs-on: macOS-14
|
runs-on: macOS-14
|
||||||
strategy:
|
strategy:
|
||||||
@@ -876,7 +1041,7 @@ jobs:
|
|||||||
go-version: ['1.21.x']
|
go-version: ['1.21.x']
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
- name: Setup Go ${{ matrix.go-version }}
|
- name: Setup Go ${{ matrix.go-version }}
|
||||||
@@ -893,21 +1058,19 @@ jobs:
|
|||||||
- name: Build llama-cpp-darwin
|
- name: Build llama-cpp-darwin
|
||||||
run: |
|
run: |
|
||||||
make protogen-go
|
make protogen-go
|
||||||
make build
|
make backends/llama-cpp-darwin
|
||||||
bash scripts/build-llama-cpp-darwin.sh
|
|
||||||
ls -la build/darwin.tar
|
|
||||||
mv build/darwin.tar build/llama-cpp.tar
|
|
||||||
- name: Upload llama-cpp.tar
|
- name: Upload llama-cpp.tar
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: llama-cpp-tar
|
name: llama-cpp-tar
|
||||||
path: build/llama-cpp.tar
|
path: backend-images/llama-cpp.tar
|
||||||
llama-cpp-darwin-publish:
|
llama-cpp-darwin-publish:
|
||||||
needs: llama-cpp-darwin
|
needs: llama-cpp-darwin
|
||||||
|
if: github.event_name != 'pull_request'
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Download llama-cpp.tar
|
- name: Download llama-cpp.tar
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v5
|
||||||
with:
|
with:
|
||||||
name: llama-cpp-tar
|
name: llama-cpp-tar
|
||||||
path: .
|
path: .
|
||||||
@@ -964,7 +1127,7 @@ jobs:
|
|||||||
go-version: ['1.21.x']
|
go-version: ['1.21.x']
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
- name: Setup Go ${{ matrix.go-version }}
|
- name: Setup Go ${{ matrix.go-version }}
|
||||||
@@ -983,20 +1146,19 @@ jobs:
|
|||||||
make protogen-go
|
make protogen-go
|
||||||
make build
|
make build
|
||||||
export PLATFORMARCH=darwin/amd64
|
export PLATFORMARCH=darwin/amd64
|
||||||
bash scripts/build-llama-cpp-darwin.sh
|
make backends/llama-cpp-darwin
|
||||||
ls -la build/darwin.tar
|
|
||||||
mv build/darwin.tar build/llama-cpp.tar
|
|
||||||
- name: Upload llama-cpp.tar
|
- name: Upload llama-cpp.tar
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: llama-cpp-tar-x86
|
name: llama-cpp-tar-x86
|
||||||
path: build/llama-cpp.tar
|
path: backend-images/llama-cpp.tar
|
||||||
llama-cpp-darwin-x86-publish:
|
llama-cpp-darwin-x86-publish:
|
||||||
|
if: github.event_name != 'pull_request'
|
||||||
needs: llama-cpp-darwin-x86
|
needs: llama-cpp-darwin-x86
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Download llama-cpp.tar
|
- name: Download llama-cpp.tar
|
||||||
uses: actions/download-artifact@v4
|
uses: actions/download-artifact@v5
|
||||||
with:
|
with:
|
||||||
name: llama-cpp-tar-x86
|
name: llama-cpp-tar-x86
|
||||||
path: .
|
path: .
|
||||||
@@ -1045,4 +1207,4 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
for tag in $(echo "${{ steps.quaymeta.outputs.tags }}" | tr ',' '\n'); do
|
for tag in $(echo "${{ steps.quaymeta.outputs.tags }}" | tr ',' '\n'); do
|
||||||
crane push llama-cpp.tar $tag
|
crane push llama-cpp.tar $tag
|
||||||
done
|
done
|
||||||
|
|||||||
14
.github/workflows/backend_build.yml
vendored
14
.github/workflows/backend_build.yml
vendored
@@ -55,9 +55,9 @@ on:
|
|||||||
type: string
|
type: string
|
||||||
secrets:
|
secrets:
|
||||||
dockerUsername:
|
dockerUsername:
|
||||||
required: true
|
required: false
|
||||||
dockerPassword:
|
dockerPassword:
|
||||||
required: true
|
required: false
|
||||||
quayUsername:
|
quayUsername:
|
||||||
required: true
|
required: true
|
||||||
quayPassword:
|
quayPassword:
|
||||||
@@ -66,6 +66,8 @@ on:
|
|||||||
jobs:
|
jobs:
|
||||||
backend-build:
|
backend-build:
|
||||||
runs-on: ${{ inputs.runs-on }}
|
runs-on: ${{ inputs.runs-on }}
|
||||||
|
env:
|
||||||
|
quay_username: ${{ secrets.quayUsername }}
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
|
|
||||||
@@ -95,7 +97,7 @@ jobs:
|
|||||||
&& sudo apt-get install -y git
|
&& sudo apt-get install -y git
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
|
|
||||||
- name: Release space from worker
|
- name: Release space from worker
|
||||||
if: inputs.runs-on == 'ubuntu-latest'
|
if: inputs.runs-on == 'ubuntu-latest'
|
||||||
@@ -187,7 +189,7 @@ jobs:
|
|||||||
password: ${{ secrets.dockerPassword }}
|
password: ${{ secrets.dockerPassword }}
|
||||||
|
|
||||||
- name: Login to Quay.io
|
- name: Login to Quay.io
|
||||||
# if: github.event_name != 'pull_request'
|
if: ${{ env.quay_username != '' }}
|
||||||
uses: docker/login-action@v3
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: quay.io
|
registry: quay.io
|
||||||
@@ -230,7 +232,7 @@ jobs:
|
|||||||
file: ${{ inputs.dockerfile }}
|
file: ${{ inputs.dockerfile }}
|
||||||
cache-from: type=gha
|
cache-from: type=gha
|
||||||
platforms: ${{ inputs.platforms }}
|
platforms: ${{ inputs.platforms }}
|
||||||
push: true
|
push: ${{ env.quay_username != '' }}
|
||||||
tags: ${{ steps.meta_pull_request.outputs.tags }}
|
tags: ${{ steps.meta_pull_request.outputs.tags }}
|
||||||
labels: ${{ steps.meta_pull_request.outputs.labels }}
|
labels: ${{ steps.meta_pull_request.outputs.labels }}
|
||||||
|
|
||||||
@@ -238,4 +240,4 @@ jobs:
|
|||||||
|
|
||||||
- name: job summary
|
- name: job summary
|
||||||
run: |
|
run: |
|
||||||
echo "Built image: ${{ steps.meta.outputs.labels }}" >> $GITHUB_STEP_SUMMARY
|
echo "Built image: ${{ steps.meta.outputs.labels }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|||||||
144
.github/workflows/backend_build_darwin.yml
vendored
Normal file
144
.github/workflows/backend_build_darwin.yml
vendored
Normal file
@@ -0,0 +1,144 @@
|
|||||||
|
---
|
||||||
|
name: 'build darwin python backend container images (reusable)'
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
inputs:
|
||||||
|
backend:
|
||||||
|
description: 'Backend to build'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
build-type:
|
||||||
|
description: 'Build type (e.g., mps)'
|
||||||
|
default: ''
|
||||||
|
type: string
|
||||||
|
use-pip:
|
||||||
|
description: 'Use pip to install dependencies'
|
||||||
|
default: false
|
||||||
|
type: boolean
|
||||||
|
lang:
|
||||||
|
description: 'Programming language (e.g. go)'
|
||||||
|
default: 'python'
|
||||||
|
type: string
|
||||||
|
go-version:
|
||||||
|
description: 'Go version to use'
|
||||||
|
default: '1.24.x'
|
||||||
|
type: string
|
||||||
|
tag-suffix:
|
||||||
|
description: 'Tag suffix for the built image'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
runs-on:
|
||||||
|
description: 'Runner to use'
|
||||||
|
default: 'macOS-14'
|
||||||
|
type: string
|
||||||
|
secrets:
|
||||||
|
dockerUsername:
|
||||||
|
required: false
|
||||||
|
dockerPassword:
|
||||||
|
required: false
|
||||||
|
quayUsername:
|
||||||
|
required: true
|
||||||
|
quayPassword:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
darwin-backend-build:
|
||||||
|
runs-on: ${{ inputs.runs-on }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
go-version: ['${{ inputs.go-version }}']
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
uses: actions/checkout@v5
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
|
||||||
|
- name: Setup Go ${{ matrix.go-version }}
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: ${{ matrix.go-version }}
|
||||||
|
cache: false
|
||||||
|
|
||||||
|
# You can test your matrix by printing the current Go version
|
||||||
|
- name: Display Go version
|
||||||
|
run: go version
|
||||||
|
|
||||||
|
- name: Dependencies
|
||||||
|
run: |
|
||||||
|
brew install protobuf grpc make protoc-gen-go protoc-gen-go-grpc libomp llvm
|
||||||
|
|
||||||
|
- name: Build ${{ inputs.backend }}-darwin
|
||||||
|
run: |
|
||||||
|
make protogen-go
|
||||||
|
BACKEND=${{ inputs.backend }} BUILD_TYPE=${{ inputs.build-type }} USE_PIP=${{ inputs.use-pip }} make build-darwin-${{ inputs.lang }}-backend
|
||||||
|
|
||||||
|
- name: Upload ${{ inputs.backend }}.tar
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: ${{ inputs.backend }}-tar
|
||||||
|
path: backend-images/${{ inputs.backend }}.tar
|
||||||
|
|
||||||
|
darwin-backend-publish:
|
||||||
|
needs: darwin-backend-build
|
||||||
|
if: github.event_name != 'pull_request'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Download ${{ inputs.backend }}.tar
|
||||||
|
uses: actions/download-artifact@v5
|
||||||
|
with:
|
||||||
|
name: ${{ inputs.backend }}-tar
|
||||||
|
path: .
|
||||||
|
|
||||||
|
- name: Install crane
|
||||||
|
run: |
|
||||||
|
curl -L https://github.com/google/go-containerregistry/releases/latest/download/go-containerregistry_Linux_x86_64.tar.gz | tar -xz
|
||||||
|
sudo mv crane /usr/local/bin/
|
||||||
|
|
||||||
|
- name: Log in to DockerHub
|
||||||
|
run: |
|
||||||
|
echo "${{ secrets.dockerPassword }}" | crane auth login docker.io -u "${{ secrets.dockerUsername }}" --password-stdin
|
||||||
|
|
||||||
|
- name: Log in to quay.io
|
||||||
|
run: |
|
||||||
|
echo "${{ secrets.quayPassword }}" | crane auth login quay.io -u "${{ secrets.quayUsername }}" --password-stdin
|
||||||
|
|
||||||
|
- name: Docker meta
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v5
|
||||||
|
with:
|
||||||
|
images: |
|
||||||
|
localai/localai-backends
|
||||||
|
tags: |
|
||||||
|
type=ref,event=branch
|
||||||
|
type=semver,pattern={{raw}}
|
||||||
|
type=sha
|
||||||
|
flavor: |
|
||||||
|
latest=auto
|
||||||
|
suffix=${{ inputs.tag-suffix }},onlatest=true
|
||||||
|
|
||||||
|
- name: Docker meta
|
||||||
|
id: quaymeta
|
||||||
|
uses: docker/metadata-action@v5
|
||||||
|
with:
|
||||||
|
images: |
|
||||||
|
quay.io/go-skynet/local-ai-backends
|
||||||
|
tags: |
|
||||||
|
type=ref,event=branch
|
||||||
|
type=semver,pattern={{raw}}
|
||||||
|
type=sha
|
||||||
|
flavor: |
|
||||||
|
latest=auto
|
||||||
|
suffix=${{ inputs.tag-suffix }},onlatest=true
|
||||||
|
|
||||||
|
- name: Push Docker image (DockerHub)
|
||||||
|
run: |
|
||||||
|
for tag in $(echo "${{ steps.meta.outputs.tags }}" | tr ',' '\n'); do
|
||||||
|
crane push ${{ inputs.backend }}.tar $tag
|
||||||
|
done
|
||||||
|
|
||||||
|
- name: Push Docker image (Quay)
|
||||||
|
run: |
|
||||||
|
for tag in $(echo "${{ steps.quaymeta.outputs.tags }}" | tr ',' '\n'); do
|
||||||
|
crane push ${{ inputs.backend }}.tar $tag
|
||||||
|
done
|
||||||
78
.github/workflows/backend_pr.yml
vendored
Normal file
78
.github/workflows/backend_pr.yml
vendored
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
name: 'build backend container images (PR-filtered)'
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ci-backends-pr-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
generate-matrix:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||||
|
matrix-darwin: ${{ steps.set-matrix.outputs.matrix-darwin }}
|
||||||
|
has-backends: ${{ steps.set-matrix.outputs.has-backends }}
|
||||||
|
has-backends-darwin: ${{ steps.set-matrix.outputs.has-backends-darwin }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v5
|
||||||
|
|
||||||
|
- name: Setup Bun
|
||||||
|
uses: oven-sh/setup-bun@v2
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
bun add js-yaml
|
||||||
|
bun add @octokit/core
|
||||||
|
|
||||||
|
# filters the matrix in backend.yml
|
||||||
|
- name: Filter matrix for changed backends
|
||||||
|
id: set-matrix
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
GITHUB_EVENT_PATH: ${{ github.event_path }}
|
||||||
|
run: bun run scripts/changed-backends.js
|
||||||
|
|
||||||
|
backend-jobs:
|
||||||
|
needs: generate-matrix
|
||||||
|
uses: ./.github/workflows/backend_build.yml
|
||||||
|
if: needs.generate-matrix.outputs.has-backends == 'true'
|
||||||
|
with:
|
||||||
|
tag-latest: ${{ matrix.tag-latest }}
|
||||||
|
tag-suffix: ${{ matrix.tag-suffix }}
|
||||||
|
build-type: ${{ matrix.build-type }}
|
||||||
|
cuda-major-version: ${{ matrix.cuda-major-version }}
|
||||||
|
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
||||||
|
platforms: ${{ matrix.platforms }}
|
||||||
|
runs-on: ${{ matrix.runs-on }}
|
||||||
|
base-image: ${{ matrix.base-image }}
|
||||||
|
backend: ${{ matrix.backend }}
|
||||||
|
dockerfile: ${{ matrix.dockerfile }}
|
||||||
|
skip-drivers: ${{ matrix.skip-drivers }}
|
||||||
|
context: ${{ matrix.context }}
|
||||||
|
secrets:
|
||||||
|
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||||
|
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: true
|
||||||
|
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
|
||||||
|
backend-jobs-darwin:
|
||||||
|
needs: generate-matrix
|
||||||
|
uses: ./.github/workflows/backend_build_darwin.yml
|
||||||
|
if: needs.generate-matrix.outputs.has-backends-darwin == 'true'
|
||||||
|
with:
|
||||||
|
backend: ${{ matrix.backend }}
|
||||||
|
build-type: ${{ matrix.build-type }}
|
||||||
|
go-version: "1.24.x"
|
||||||
|
tag-suffix: ${{ matrix.tag-suffix }}
|
||||||
|
lang: ${{ matrix.lang || 'python' }}
|
||||||
|
use-pip: ${{ matrix.backend == 'diffusers' }}
|
||||||
|
runs-on: "macOS-14"
|
||||||
|
secrets:
|
||||||
|
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||||
|
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: true
|
||||||
|
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix-darwin) }}
|
||||||
46
.github/workflows/build-test.yaml
vendored
46
.github/workflows/build-test.yaml
vendored
@@ -11,7 +11,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
@@ -21,3 +21,47 @@ jobs:
|
|||||||
- name: Run GoReleaser
|
- name: Run GoReleaser
|
||||||
run: |
|
run: |
|
||||||
make dev-dist
|
make dev-dist
|
||||||
|
launcher-build-darwin:
|
||||||
|
runs-on: macos-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v5
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: 1.23
|
||||||
|
- name: Build launcher for macOS ARM64
|
||||||
|
run: |
|
||||||
|
make build-launcher-darwin
|
||||||
|
ls -liah dist
|
||||||
|
- name: Upload macOS launcher artifacts
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: launcher-macos
|
||||||
|
path: dist/
|
||||||
|
retention-days: 30
|
||||||
|
|
||||||
|
launcher-build-linux:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v5
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: 1.23
|
||||||
|
- name: Build launcher for Linux
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install golang gcc libgl1-mesa-dev xorg-dev libxkbcommon-dev
|
||||||
|
make build-launcher-linux
|
||||||
|
- name: Upload Linux launcher artifacts
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: launcher-linux
|
||||||
|
path: local-ai-launcher-linux.tar.xz
|
||||||
|
retention-days: 30
|
||||||
4
.github/workflows/bump_deps.yaml
vendored
4
.github/workflows/bump_deps.yaml
vendored
@@ -21,7 +21,7 @@ jobs:
|
|||||||
variable: "BARKCPP_VERSION"
|
variable: "BARKCPP_VERSION"
|
||||||
branch: "main"
|
branch: "main"
|
||||||
file: "Makefile"
|
file: "Makefile"
|
||||||
- repository: "richiejp/stable-diffusion.cpp"
|
- repository: "leejet/stable-diffusion.cpp"
|
||||||
variable: "STABLEDIFFUSION_GGML_VERSION"
|
variable: "STABLEDIFFUSION_GGML_VERSION"
|
||||||
branch: "master"
|
branch: "master"
|
||||||
file: "backend/go/stablediffusion-ggml/Makefile"
|
file: "backend/go/stablediffusion-ggml/Makefile"
|
||||||
@@ -31,7 +31,7 @@ jobs:
|
|||||||
file: "backend/go/piper/Makefile"
|
file: "backend/go/piper/Makefile"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v5
|
||||||
- name: Bump dependencies 🔧
|
- name: Bump dependencies 🔧
|
||||||
id: bump
|
id: bump
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
2
.github/workflows/bump_docs.yaml
vendored
2
.github/workflows/bump_docs.yaml
vendored
@@ -12,7 +12,7 @@ jobs:
|
|||||||
- repository: "mudler/LocalAI"
|
- repository: "mudler/LocalAI"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v5
|
||||||
- name: Bump dependencies 🔧
|
- name: Bump dependencies 🔧
|
||||||
run: |
|
run: |
|
||||||
bash .github/bump_docs.sh ${{ matrix.repository }}
|
bash .github/bump_docs.sh ${{ matrix.repository }}
|
||||||
|
|||||||
2
.github/workflows/checksum_checker.yaml
vendored
2
.github/workflows/checksum_checker.yaml
vendored
@@ -15,7 +15,7 @@ jobs:
|
|||||||
&& sudo add-apt-repository -y ppa:git-core/ppa \
|
&& sudo add-apt-repository -y ppa:git-core/ppa \
|
||||||
&& sudo apt-get update \
|
&& sudo apt-get update \
|
||||||
&& sudo apt-get install -y git
|
&& sudo apt-get install -y git
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v5
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
|
|||||||
2
.github/workflows/dependabot_auto.yml
vendored
2
.github/workflows/dependabot_auto.yml
vendored
@@ -20,7 +20,7 @@ jobs:
|
|||||||
skip-commit-verification: true
|
skip-commit-verification: true
|
||||||
|
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
|
|
||||||
- name: Approve a PR if not already approved
|
- name: Approve a PR if not already approved
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
2
.github/workflows/deploy-explorer.yaml
vendored
2
.github/workflows/deploy-explorer.yaml
vendored
@@ -15,7 +15,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
|
|||||||
2
.github/workflows/generate_grpc_cache.yaml
vendored
2
.github/workflows/generate_grpc_cache.yaml
vendored
@@ -73,7 +73,7 @@ jobs:
|
|||||||
uses: docker/setup-buildx-action@master
|
uses: docker/setup-buildx-action@master
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
|
|
||||||
- name: Cache GRPC
|
- name: Cache GRPC
|
||||||
uses: docker/build-push-action@v6
|
uses: docker/build-push-action@v6
|
||||||
|
|||||||
2
.github/workflows/generate_intel_image.yaml
vendored
2
.github/workflows/generate_intel_image.yaml
vendored
@@ -43,7 +43,7 @@ jobs:
|
|||||||
uses: docker/setup-buildx-action@master
|
uses: docker/setup-buildx-action@master
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
|
|
||||||
- name: Cache Intel images
|
- name: Cache Intel images
|
||||||
uses: docker/build-push-action@v6
|
uses: docker/build-push-action@v6
|
||||||
|
|||||||
8
.github/workflows/image-pr.yml
vendored
8
.github/workflows/image-pr.yml
vendored
@@ -39,7 +39,7 @@ jobs:
|
|||||||
cuda-minor-version: "0"
|
cuda-minor-version: "0"
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'false'
|
tag-latest: 'false'
|
||||||
tag-suffix: '-gpu-nvidia-cuda12'
|
tag-suffix: '-gpu-nvidia-cuda-12'
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'ubuntu-latest'
|
||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
makeflags: "--jobs=3 --output-sync=target"
|
makeflags: "--jobs=3 --output-sync=target"
|
||||||
@@ -47,16 +47,16 @@ jobs:
|
|||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'false'
|
tag-latest: 'false'
|
||||||
tag-suffix: '-hipblas'
|
tag-suffix: '-hipblas'
|
||||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||||
grpc-base-image: "ubuntu:22.04"
|
grpc-base-image: "ubuntu:22.04"
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'ubuntu-latest'
|
||||||
makeflags: "--jobs=3 --output-sync=target"
|
makeflags: "--jobs=3 --output-sync=target"
|
||||||
- build-type: 'sycl_f16'
|
- build-type: 'sycl'
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'false'
|
tag-latest: 'false'
|
||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||||
grpc-base-image: "ubuntu:22.04"
|
grpc-base-image: "ubuntu:22.04"
|
||||||
tag-suffix: 'sycl-f16'
|
tag-suffix: 'sycl'
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'ubuntu-latest'
|
||||||
makeflags: "--jobs=3 --output-sync=target"
|
makeflags: "--jobs=3 --output-sync=target"
|
||||||
- build-type: 'vulkan'
|
- build-type: 'vulkan'
|
||||||
|
|||||||
23
.github/workflows/image.yml
vendored
23
.github/workflows/image.yml
vendored
@@ -39,7 +39,7 @@ jobs:
|
|||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-gpu-hipblas'
|
tag-suffix: '-gpu-hipblas'
|
||||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||||
grpc-base-image: "ubuntu:22.04"
|
grpc-base-image: "ubuntu:22.04"
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'ubuntu-latest'
|
||||||
makeflags: "--jobs=3 --output-sync=target"
|
makeflags: "--jobs=3 --output-sync=target"
|
||||||
@@ -83,7 +83,7 @@ jobs:
|
|||||||
cuda-minor-version: "7"
|
cuda-minor-version: "7"
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-gpu-nvidia-cuda11'
|
tag-suffix: '-gpu-nvidia-cuda-11'
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'ubuntu-latest'
|
||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
makeflags: "--jobs=4 --output-sync=target"
|
makeflags: "--jobs=4 --output-sync=target"
|
||||||
@@ -94,7 +94,7 @@ jobs:
|
|||||||
cuda-minor-version: "0"
|
cuda-minor-version: "0"
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-gpu-nvidia-cuda12'
|
tag-suffix: '-gpu-nvidia-cuda-12'
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'ubuntu-latest'
|
||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
@@ -103,30 +103,21 @@ jobs:
|
|||||||
- build-type: 'vulkan'
|
- build-type: 'vulkan'
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-vulkan'
|
tag-suffix: '-gpu-vulkan'
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'ubuntu-latest'
|
||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
makeflags: "--jobs=4 --output-sync=target"
|
makeflags: "--jobs=4 --output-sync=target"
|
||||||
aio: "-aio-gpu-vulkan"
|
aio: "-aio-gpu-vulkan"
|
||||||
- build-type: 'sycl_f16'
|
- build-type: 'intel'
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||||
grpc-base-image: "ubuntu:22.04"
|
grpc-base-image: "ubuntu:22.04"
|
||||||
tag-suffix: '-gpu-intel-f16'
|
tag-suffix: '-gpu-intel'
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'ubuntu-latest'
|
||||||
makeflags: "--jobs=3 --output-sync=target"
|
makeflags: "--jobs=3 --output-sync=target"
|
||||||
aio: "-aio-gpu-intel-f16"
|
aio: "-aio-gpu-intel"
|
||||||
- build-type: 'sycl_f32'
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
|
||||||
grpc-base-image: "ubuntu:22.04"
|
|
||||||
tag-suffix: '-gpu-intel-f32'
|
|
||||||
runs-on: 'ubuntu-latest'
|
|
||||||
makeflags: "--jobs=3 --output-sync=target"
|
|
||||||
aio: "-aio-gpu-intel-f32"
|
|
||||||
|
|
||||||
gh-runner:
|
gh-runner:
|
||||||
uses: ./.github/workflows/image_build.yml
|
uses: ./.github/workflows/image_build.yml
|
||||||
|
|||||||
2
.github/workflows/image_build.yml
vendored
2
.github/workflows/image_build.yml
vendored
@@ -94,7 +94,7 @@ jobs:
|
|||||||
&& sudo apt-get update \
|
&& sudo apt-get update \
|
||||||
&& sudo apt-get install -y git
|
&& sudo apt-get install -y git
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
|
|
||||||
- name: Release space from worker
|
- name: Release space from worker
|
||||||
if: inputs.runs-on == 'ubuntu-latest'
|
if: inputs.runs-on == 'ubuntu-latest'
|
||||||
|
|||||||
2
.github/workflows/labeler.yml
vendored
2
.github/workflows/labeler.yml
vendored
@@ -9,4 +9,4 @@ jobs:
|
|||||||
pull-requests: write
|
pull-requests: write
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/labeler@v5
|
- uses: actions/labeler@v6
|
||||||
5
.github/workflows/localaibot_automerge.yml
vendored
5
.github/workflows/localaibot_automerge.yml
vendored
@@ -6,14 +6,15 @@ permissions:
|
|||||||
contents: write
|
contents: write
|
||||||
pull-requests: write
|
pull-requests: write
|
||||||
packages: read
|
packages: read
|
||||||
|
issues: write # for Homebrew/actions/post-comment
|
||||||
|
actions: write # to dispatch publish workflow
|
||||||
jobs:
|
jobs:
|
||||||
dependabot:
|
dependabot:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ${{ github.actor == 'localai-bot' }}
|
if: ${{ github.actor == 'localai-bot' }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
|
|
||||||
- name: Approve a PR if not already approved
|
- name: Approve a PR if not already approved
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
4
.github/workflows/notify-models.yaml
vendored
4
.github/workflows/notify-models.yaml
vendored
@@ -11,7 +11,7 @@ jobs:
|
|||||||
MODEL_NAME: gemma-3-12b-it
|
MODEL_NAME: gemma-3-12b-it
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0 # needed to checkout all branches for this Action to work
|
fetch-depth: 0 # needed to checkout all branches for this Action to work
|
||||||
- uses: mudler/localai-github-action@v1
|
- uses: mudler/localai-github-action@v1
|
||||||
@@ -90,7 +90,7 @@ jobs:
|
|||||||
MODEL_NAME: gemma-3-12b-it
|
MODEL_NAME: gemma-3-12b-it
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0 # needed to checkout all branches for this Action to work
|
fetch-depth: 0 # needed to checkout all branches for this Action to work
|
||||||
- name: Start LocalAI
|
- name: Start LocalAI
|
||||||
|
|||||||
42
.github/workflows/release.yaml
vendored
42
.github/workflows/release.yaml
vendored
@@ -10,7 +10,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
@@ -23,4 +23,42 @@ jobs:
|
|||||||
version: v2.11.0
|
version: v2.11.0
|
||||||
args: release --clean
|
args: release --clean
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
launcher-build-darwin:
|
||||||
|
runs-on: macos-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v5
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: 1.23
|
||||||
|
- name: Build launcher for macOS ARM64
|
||||||
|
run: |
|
||||||
|
make build-launcher-darwin
|
||||||
|
- name: Upload DMG to Release
|
||||||
|
uses: softprops/action-gh-release@v2
|
||||||
|
with:
|
||||||
|
files: ./dist/LocalAI.dmg
|
||||||
|
launcher-build-linux:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v5
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: 1.23
|
||||||
|
- name: Build launcher for Linux
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install golang gcc libgl1-mesa-dev xorg-dev libxkbcommon-dev
|
||||||
|
make build-launcher-linux
|
||||||
|
- name: Upload Linux launcher artifacts
|
||||||
|
uses: softprops/action-gh-release@v2
|
||||||
|
with:
|
||||||
|
files: ./local-ai-launcher-linux.tar.xz
|
||||||
|
|||||||
4
.github/workflows/secscan.yaml
vendored
4
.github/workflows/secscan.yaml
vendored
@@ -14,11 +14,11 @@ jobs:
|
|||||||
GO111MODULE: on
|
GO111MODULE: on
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout Source
|
- name: Checkout Source
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
if: ${{ github.actor != 'dependabot[bot]' }}
|
if: ${{ github.actor != 'dependabot[bot]' }}
|
||||||
- name: Run Gosec Security Scanner
|
- name: Run Gosec Security Scanner
|
||||||
if: ${{ github.actor != 'dependabot[bot]' }}
|
if: ${{ github.actor != 'dependabot[bot]' }}
|
||||||
uses: securego/gosec@v2.22.7
|
uses: securego/gosec@v2.22.9
|
||||||
with:
|
with:
|
||||||
# we let the report trigger content trigger a failure using the GitHub Security features.
|
# we let the report trigger content trigger a failure using the GitHub Security features.
|
||||||
args: '-no-fail -fmt sarif -out results.sarif ./...'
|
args: '-no-fail -fmt sarif -out results.sarif ./...'
|
||||||
|
|||||||
2
.github/workflows/stalebot.yml
vendored
2
.github/workflows/stalebot.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
|||||||
stale:
|
stale:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9
|
- uses: actions/stale@3a9db7e6a41a89f618792c92c0e97cc736e1b13f # v9
|
||||||
with:
|
with:
|
||||||
stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
|
stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
|
||||||
stale-pr-message: 'This PR is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 10 days.'
|
stale-pr-message: 'This PR is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 10 days.'
|
||||||
|
|||||||
18
.github/workflows/test-extra.yml
vendored
18
.github/workflows/test-extra.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
|||||||
# runs-on: ubuntu-latest
|
# runs-on: ubuntu-latest
|
||||||
# steps:
|
# steps:
|
||||||
# - name: Clone
|
# - name: Clone
|
||||||
# uses: actions/checkout@v4
|
# uses: actions/checkout@v5
|
||||||
# with:
|
# with:
|
||||||
# submodules: true
|
# submodules: true
|
||||||
# - name: Dependencies
|
# - name: Dependencies
|
||||||
@@ -40,7 +40,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
@@ -61,7 +61,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
@@ -83,7 +83,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
@@ -104,7 +104,7 @@ jobs:
|
|||||||
# runs-on: ubuntu-latest
|
# runs-on: ubuntu-latest
|
||||||
# steps:
|
# steps:
|
||||||
# - name: Clone
|
# - name: Clone
|
||||||
# uses: actions/checkout@v4
|
# uses: actions/checkout@v5
|
||||||
# with:
|
# with:
|
||||||
# submodules: true
|
# submodules: true
|
||||||
# - name: Dependencies
|
# - name: Dependencies
|
||||||
@@ -124,7 +124,7 @@ jobs:
|
|||||||
# runs-on: ubuntu-latest
|
# runs-on: ubuntu-latest
|
||||||
# steps:
|
# steps:
|
||||||
# - name: Clone
|
# - name: Clone
|
||||||
# uses: actions/checkout@v4
|
# uses: actions/checkout@v5
|
||||||
# with:
|
# with:
|
||||||
# submodules: true
|
# submodules: true
|
||||||
# - name: Dependencies
|
# - name: Dependencies
|
||||||
@@ -186,7 +186,7 @@ jobs:
|
|||||||
# sudo rm -rf "$AGENT_TOOLSDIRECTORY" || true
|
# sudo rm -rf "$AGENT_TOOLSDIRECTORY" || true
|
||||||
# df -h
|
# df -h
|
||||||
# - name: Clone
|
# - name: Clone
|
||||||
# uses: actions/checkout@v4
|
# uses: actions/checkout@v5
|
||||||
# with:
|
# with:
|
||||||
# submodules: true
|
# submodules: true
|
||||||
# - name: Dependencies
|
# - name: Dependencies
|
||||||
@@ -211,7 +211,7 @@ jobs:
|
|||||||
# runs-on: ubuntu-latest
|
# runs-on: ubuntu-latest
|
||||||
# steps:
|
# steps:
|
||||||
# - name: Clone
|
# - name: Clone
|
||||||
# uses: actions/checkout@v4
|
# uses: actions/checkout@v5
|
||||||
# with:
|
# with:
|
||||||
# submodules: true
|
# submodules: true
|
||||||
# - name: Dependencies
|
# - name: Dependencies
|
||||||
@@ -232,7 +232,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
|
|||||||
26
.github/workflows/test.yml
vendored
26
.github/workflows/test.yml
vendored
@@ -23,6 +23,20 @@ jobs:
|
|||||||
matrix:
|
matrix:
|
||||||
go-version: ['1.21.x']
|
go-version: ['1.21.x']
|
||||||
steps:
|
steps:
|
||||||
|
- name: Free Disk Space (Ubuntu)
|
||||||
|
uses: jlumbroso/free-disk-space@main
|
||||||
|
with:
|
||||||
|
# this might remove tools that are actually needed,
|
||||||
|
# if set to "true" but frees about 6 GB
|
||||||
|
tool-cache: true
|
||||||
|
# all of these default to true, but feel free to set to
|
||||||
|
# "false" if necessary for your workflow
|
||||||
|
android: true
|
||||||
|
dotnet: true
|
||||||
|
haskell: true
|
||||||
|
large-packages: true
|
||||||
|
docker-images: true
|
||||||
|
swap-storage: true
|
||||||
- name: Release space from worker
|
- name: Release space from worker
|
||||||
run: |
|
run: |
|
||||||
echo "Listing top largest packages"
|
echo "Listing top largest packages"
|
||||||
@@ -56,7 +70,7 @@ jobs:
|
|||||||
sudo rm -rfv build || true
|
sudo rm -rfv build || true
|
||||||
df -h
|
df -h
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
- name: Setup Go ${{ matrix.go-version }}
|
- name: Setup Go ${{ matrix.go-version }}
|
||||||
@@ -152,7 +166,7 @@ jobs:
|
|||||||
sudo rm -rfv build || true
|
sudo rm -rfv build || true
|
||||||
df -h
|
df -h
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
@@ -182,7 +196,7 @@ jobs:
|
|||||||
go-version: ['1.21.x']
|
go-version: ['1.21.x']
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
- name: Setup Go ${{ matrix.go-version }}
|
- name: Setup Go ${{ matrix.go-version }}
|
||||||
@@ -200,11 +214,7 @@ jobs:
|
|||||||
- name: Build llama-cpp-darwin
|
- name: Build llama-cpp-darwin
|
||||||
run: |
|
run: |
|
||||||
make protogen-go
|
make protogen-go
|
||||||
make build
|
make backends/llama-cpp-darwin
|
||||||
bash scripts/build-llama-cpp-darwin.sh
|
|
||||||
ls -la build/darwin.tar
|
|
||||||
mv build/darwin.tar build/llama-cpp.tar
|
|
||||||
./local-ai backends install "ocifile://$PWD/build/llama-cpp.tar"
|
|
||||||
- name: Test
|
- name: Test
|
||||||
run: |
|
run: |
|
||||||
export C_INCLUDE_PATH=/usr/local/include
|
export C_INCLUDE_PATH=/usr/local/include
|
||||||
|
|||||||
2
.github/workflows/update_swagger.yaml
vendored
2
.github/workflows/update_swagger.yaml
vendored
@@ -9,7 +9,7 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v5
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 'stable'
|
go-version: 'stable'
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -12,6 +12,7 @@ prepare-sources
|
|||||||
/backends
|
/backends
|
||||||
/backend-images
|
/backend-images
|
||||||
/result.yaml
|
/result.yaml
|
||||||
|
protoc
|
||||||
|
|
||||||
*.log
|
*.log
|
||||||
|
|
||||||
@@ -23,7 +24,7 @@ go-bert
|
|||||||
|
|
||||||
# LocalAI build binary
|
# LocalAI build binary
|
||||||
LocalAI
|
LocalAI
|
||||||
local-ai
|
/local-ai
|
||||||
# prevent above rules from omitting the helm chart
|
# prevent above rules from omitting the helm chart
|
||||||
!charts/*
|
!charts/*
|
||||||
# prevent above rules from omitting the api/localai folder
|
# prevent above rules from omitting the api/localai folder
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ source:
|
|||||||
enabled: true
|
enabled: true
|
||||||
name_template: '{{ .ProjectName }}-{{ .Tag }}-source'
|
name_template: '{{ .ProjectName }}-{{ .Tag }}-source'
|
||||||
builds:
|
builds:
|
||||||
-
|
- main: ./cmd/local-ai
|
||||||
env:
|
env:
|
||||||
- CGO_ENABLED=0
|
- CGO_ENABLED=0
|
||||||
ldflags:
|
ldflags:
|
||||||
|
|||||||
24
Dockerfile
24
Dockerfile
@@ -9,7 +9,7 @@ ENV DEBIAN_FRONTEND=noninteractive
|
|||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y --no-install-recommends \
|
apt-get install -y --no-install-recommends \
|
||||||
ca-certificates curl wget espeak-ng libgomp1 \
|
ca-certificates curl wget espeak-ng libgomp1 \
|
||||||
python3 python-is-python3 ffmpeg && \
|
ffmpeg libopenblas-base libopenblas-dev && \
|
||||||
apt-get clean && \
|
apt-get clean && \
|
||||||
rm -rf /var/lib/apt/lists/*
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
@@ -72,6 +72,22 @@ RUN <<EOT bash
|
|||||||
fi
|
fi
|
||||||
EOT
|
EOT
|
||||||
|
|
||||||
|
RUN <<EOT bash
|
||||||
|
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${TARGETARCH}" = "arm64" ]; then
|
||||||
|
echo "nvidia-l4t" > /run/localai/capability
|
||||||
|
fi
|
||||||
|
EOT
|
||||||
|
|
||||||
|
# https://github.com/NVIDIA/Isaac-GR00T/issues/343
|
||||||
|
RUN <<EOT bash
|
||||||
|
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${TARGETARCH}" = "arm64" ]; then
|
||||||
|
wget https://developer.download.nvidia.com/compute/cudss/0.6.0/local_installers/cudss-local-tegra-repo-ubuntu2204-0.6.0_0.6.0-1_arm64.deb && \
|
||||||
|
dpkg -i cudss-local-tegra-repo-ubuntu2204-0.6.0_0.6.0-1_arm64.deb && \
|
||||||
|
cp /var/cudss-local-tegra-repo-ubuntu2204-0.6.0/cudss-*-keyring.gpg /usr/share/keyrings/ && \
|
||||||
|
apt-get update && apt-get -y install cudss
|
||||||
|
fi
|
||||||
|
EOT
|
||||||
|
|
||||||
# If we are building with clblas support, we need the libraries for the builds
|
# If we are building with clblas support, we need the libraries for the builds
|
||||||
RUN if [ "${BUILD_TYPE}" = "clblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
|
RUN if [ "${BUILD_TYPE}" = "clblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
|
||||||
apt-get update && \
|
apt-get update && \
|
||||||
@@ -94,6 +110,12 @@ RUN if [ "${BUILD_TYPE}" = "hipblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
|
|||||||
ldconfig \
|
ldconfig \
|
||||||
; fi
|
; fi
|
||||||
|
|
||||||
|
RUN if [ "${BUILD_TYPE}" = "hipblas" ]; then \
|
||||||
|
ln -s /opt/rocm-**/lib/llvm/lib/libomp.so /usr/lib/libomp.so \
|
||||||
|
; fi
|
||||||
|
|
||||||
|
RUN expr "${BUILD_TYPE}" = intel && echo "intel" > /run/localai/capability || echo "not intel"
|
||||||
|
|
||||||
# Cuda
|
# Cuda
|
||||||
ENV PATH=/usr/local/cuda/bin:${PATH}
|
ENV PATH=/usr/local/cuda/bin:${PATH}
|
||||||
|
|
||||||
|
|||||||
288
Makefile
288
Makefile
@@ -2,11 +2,10 @@ GOCMD=go
|
|||||||
GOTEST=$(GOCMD) test
|
GOTEST=$(GOCMD) test
|
||||||
GOVET=$(GOCMD) vet
|
GOVET=$(GOCMD) vet
|
||||||
BINARY_NAME=local-ai
|
BINARY_NAME=local-ai
|
||||||
|
LAUNCHER_BINARY_NAME=local-ai-launcher
|
||||||
|
|
||||||
GORELEASER?=
|
GORELEASER?=
|
||||||
|
|
||||||
ONEAPI_VERSION?=2025.2
|
|
||||||
|
|
||||||
export BUILD_TYPE?=
|
export BUILD_TYPE?=
|
||||||
|
|
||||||
GO_TAGS?=
|
GO_TAGS?=
|
||||||
@@ -92,7 +91,17 @@ build: protogen-go install-go-tools ## Build the project
|
|||||||
$(info ${GREEN}I LD_FLAGS: ${YELLOW}$(LD_FLAGS)${RESET})
|
$(info ${GREEN}I LD_FLAGS: ${YELLOW}$(LD_FLAGS)${RESET})
|
||||||
$(info ${GREEN}I UPX: ${YELLOW}$(UPX)${RESET})
|
$(info ${GREEN}I UPX: ${YELLOW}$(UPX)${RESET})
|
||||||
rm -rf $(BINARY_NAME) || true
|
rm -rf $(BINARY_NAME) || true
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o $(BINARY_NAME) ./
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o $(BINARY_NAME) ./cmd/local-ai
|
||||||
|
|
||||||
|
build-launcher: ## Build the launcher application
|
||||||
|
$(info ${GREEN}I local-ai launcher build info:${RESET})
|
||||||
|
$(info ${GREEN}I BUILD_TYPE: ${YELLOW}$(BUILD_TYPE)${RESET})
|
||||||
|
$(info ${GREEN}I GO_TAGS: ${YELLOW}$(GO_TAGS)${RESET})
|
||||||
|
$(info ${GREEN}I LD_FLAGS: ${YELLOW}$(LD_FLAGS)${RESET})
|
||||||
|
rm -rf $(LAUNCHER_BINARY_NAME) || true
|
||||||
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o $(LAUNCHER_BINARY_NAME) ./cmd/launcher
|
||||||
|
|
||||||
|
build-all: build build-launcher ## Build both server and launcher
|
||||||
|
|
||||||
dev-dist:
|
dev-dist:
|
||||||
$(GORELEASER) build --snapshot --clean
|
$(GORELEASER) build --snapshot --clean
|
||||||
@@ -108,8 +117,8 @@ run: ## run local-ai
|
|||||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) run ./
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) run ./
|
||||||
|
|
||||||
test-models/testmodel.ggml:
|
test-models/testmodel.ggml:
|
||||||
mkdir test-models
|
mkdir -p test-models
|
||||||
mkdir test-dir
|
mkdir -p test-dir
|
||||||
wget -q https://huggingface.co/mradermacher/gpt2-alpaca-gpt4-GGUF/resolve/main/gpt2-alpaca-gpt4.Q4_K_M.gguf -O test-models/testmodel.ggml
|
wget -q https://huggingface.co/mradermacher/gpt2-alpaca-gpt4-GGUF/resolve/main/gpt2-alpaca-gpt4.Q4_K_M.gguf -O test-models/testmodel.ggml
|
||||||
wget -q https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-base.en.bin -O test-models/whisper-en
|
wget -q https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-base.en.bin -O test-models/whisper-en
|
||||||
wget -q https://huggingface.co/mudler/all-MiniLM-L6-v2/resolve/main/ggml-model-q4_0.bin -O test-models/bert
|
wget -q https://huggingface.co/mudler/all-MiniLM-L6-v2/resolve/main/ggml-model-q4_0.bin -O test-models/bert
|
||||||
@@ -134,27 +143,6 @@ test: test-models/testmodel.ggml protogen-go
|
|||||||
$(MAKE) test-tts
|
$(MAKE) test-tts
|
||||||
$(MAKE) test-stablediffusion
|
$(MAKE) test-stablediffusion
|
||||||
|
|
||||||
backends/llama-cpp: docker-build-llama-cpp docker-save-llama-cpp build
|
|
||||||
./local-ai backends install "ocifile://$(abspath ./backend-images/llama-cpp.tar)"
|
|
||||||
|
|
||||||
backends/piper: docker-build-piper docker-save-piper build
|
|
||||||
./local-ai backends install "ocifile://$(abspath ./backend-images/piper.tar)"
|
|
||||||
|
|
||||||
backends/stablediffusion-ggml: docker-build-stablediffusion-ggml docker-save-stablediffusion-ggml build
|
|
||||||
./local-ai backends install "ocifile://$(abspath ./backend-images/stablediffusion-ggml.tar)"
|
|
||||||
|
|
||||||
backends/whisper: docker-build-whisper docker-save-whisper build
|
|
||||||
./local-ai backends install "ocifile://$(abspath ./backend-images/whisper.tar)"
|
|
||||||
|
|
||||||
backends/silero-vad: docker-build-silero-vad docker-save-silero-vad build
|
|
||||||
./local-ai backends install "ocifile://$(abspath ./backend-images/silero-vad.tar)"
|
|
||||||
|
|
||||||
backends/local-store: docker-build-local-store docker-save-local-store build
|
|
||||||
./local-ai backends install "ocifile://$(abspath ./backend-images/local-store.tar)"
|
|
||||||
|
|
||||||
backends/huggingface: docker-build-huggingface docker-save-huggingface build
|
|
||||||
./local-ai backends install "ocifile://$(abspath ./backend-images/huggingface.tar)"
|
|
||||||
|
|
||||||
########################################################
|
########################################################
|
||||||
## AIO tests
|
## AIO tests
|
||||||
########################################################
|
########################################################
|
||||||
@@ -242,10 +230,7 @@ help: ## Show this help.
|
|||||||
########################################################
|
########################################################
|
||||||
|
|
||||||
.PHONY: protogen
|
.PHONY: protogen
|
||||||
protogen: protogen-go protogen-python
|
protogen: protogen-go
|
||||||
|
|
||||||
.PHONY: protogen-clean
|
|
||||||
protogen-clean: protogen-go-clean protogen-python-clean
|
|
||||||
|
|
||||||
protoc:
|
protoc:
|
||||||
@OS_NAME=$$(uname -s | tr '[:upper:]' '[:lower:]'); \
|
@OS_NAME=$$(uname -s | tr '[:upper:]' '[:lower:]'); \
|
||||||
@@ -290,93 +275,6 @@ protogen-go-clean:
|
|||||||
$(RM) pkg/grpc/proto/backend.pb.go pkg/grpc/proto/backend_grpc.pb.go
|
$(RM) pkg/grpc/proto/backend.pb.go pkg/grpc/proto/backend_grpc.pb.go
|
||||||
$(RM) bin/*
|
$(RM) bin/*
|
||||||
|
|
||||||
.PHONY: protogen-python
|
|
||||||
protogen-python: bark-protogen coqui-protogen chatterbox-protogen diffusers-protogen exllama2-protogen rerankers-protogen transformers-protogen kokoro-protogen vllm-protogen faster-whisper-protogen
|
|
||||||
|
|
||||||
.PHONY: protogen-python-clean
|
|
||||||
protogen-python-clean: bark-protogen-clean coqui-protogen-clean chatterbox-protogen-clean diffusers-protogen-clean exllama2-protogen-clean rerankers-protogen-clean transformers-protogen-clean kokoro-protogen-clean vllm-protogen-clean faster-whisper-protogen-clean
|
|
||||||
|
|
||||||
.PHONY: bark-protogen
|
|
||||||
bark-protogen:
|
|
||||||
$(MAKE) -C backend/python/bark protogen
|
|
||||||
|
|
||||||
.PHONY: bark-protogen-clean
|
|
||||||
bark-protogen-clean:
|
|
||||||
$(MAKE) -C backend/python/bark protogen-clean
|
|
||||||
|
|
||||||
.PHONY: coqui-protogen
|
|
||||||
coqui-protogen:
|
|
||||||
$(MAKE) -C backend/python/coqui protogen
|
|
||||||
|
|
||||||
.PHONY: coqui-protogen-clean
|
|
||||||
coqui-protogen-clean:
|
|
||||||
$(MAKE) -C backend/python/coqui protogen-clean
|
|
||||||
|
|
||||||
.PHONY: diffusers-protogen
|
|
||||||
diffusers-protogen:
|
|
||||||
$(MAKE) -C backend/python/diffusers protogen
|
|
||||||
|
|
||||||
.PHONY: chatterbox-protogen
|
|
||||||
chatterbox-protogen:
|
|
||||||
$(MAKE) -C backend/python/chatterbox protogen
|
|
||||||
|
|
||||||
.PHONY: diffusers-protogen-clean
|
|
||||||
diffusers-protogen-clean:
|
|
||||||
$(MAKE) -C backend/python/diffusers protogen-clean
|
|
||||||
|
|
||||||
.PHONY: chatterbox-protogen-clean
|
|
||||||
chatterbox-protogen-clean:
|
|
||||||
$(MAKE) -C backend/python/chatterbox protogen-clean
|
|
||||||
|
|
||||||
.PHONY: faster-whisper-protogen
|
|
||||||
faster-whisper-protogen:
|
|
||||||
$(MAKE) -C backend/python/faster-whisper protogen
|
|
||||||
|
|
||||||
.PHONY: faster-whisper-protogen-clean
|
|
||||||
faster-whisper-protogen-clean:
|
|
||||||
$(MAKE) -C backend/python/faster-whisper protogen-clean
|
|
||||||
|
|
||||||
.PHONY: exllama2-protogen
|
|
||||||
exllama2-protogen:
|
|
||||||
$(MAKE) -C backend/python/exllama2 protogen
|
|
||||||
|
|
||||||
.PHONY: exllama2-protogen-clean
|
|
||||||
exllama2-protogen-clean:
|
|
||||||
$(MAKE) -C backend/python/exllama2 protogen-clean
|
|
||||||
|
|
||||||
.PHONY: rerankers-protogen
|
|
||||||
rerankers-protogen:
|
|
||||||
$(MAKE) -C backend/python/rerankers protogen
|
|
||||||
|
|
||||||
.PHONY: rerankers-protogen-clean
|
|
||||||
rerankers-protogen-clean:
|
|
||||||
$(MAKE) -C backend/python/rerankers protogen-clean
|
|
||||||
|
|
||||||
.PHONY: transformers-protogen
|
|
||||||
transformers-protogen:
|
|
||||||
$(MAKE) -C backend/python/transformers protogen
|
|
||||||
|
|
||||||
.PHONY: transformers-protogen-clean
|
|
||||||
transformers-protogen-clean:
|
|
||||||
$(MAKE) -C backend/python/transformers protogen-clean
|
|
||||||
|
|
||||||
.PHONY: kokoro-protogen
|
|
||||||
kokoro-protogen:
|
|
||||||
$(MAKE) -C backend/python/kokoro protogen
|
|
||||||
|
|
||||||
.PHONY: kokoro-protogen-clean
|
|
||||||
kokoro-protogen-clean:
|
|
||||||
$(MAKE) -C backend/python/kokoro protogen-clean
|
|
||||||
|
|
||||||
.PHONY: vllm-protogen
|
|
||||||
vllm-protogen:
|
|
||||||
$(MAKE) -C backend/python/vllm protogen
|
|
||||||
|
|
||||||
.PHONY: vllm-protogen-clean
|
|
||||||
vllm-protogen-clean:
|
|
||||||
$(MAKE) -C backend/python/vllm protogen-clean
|
|
||||||
|
|
||||||
|
|
||||||
prepare-test-extra: protogen-python
|
prepare-test-extra: protogen-python
|
||||||
$(MAKE) -C backend/python/transformers
|
$(MAKE) -C backend/python/transformers
|
||||||
$(MAKE) -C backend/python/diffusers
|
$(MAKE) -C backend/python/diffusers
|
||||||
@@ -412,7 +310,7 @@ docker-cuda11:
|
|||||||
--build-arg GO_TAGS="$(GO_TAGS)" \
|
--build-arg GO_TAGS="$(GO_TAGS)" \
|
||||||
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
|
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
|
||||||
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
|
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
|
||||||
-t $(DOCKER_IMAGE)-cuda11 .
|
-t $(DOCKER_IMAGE)-cuda-11 .
|
||||||
|
|
||||||
docker-aio:
|
docker-aio:
|
||||||
@echo "Building AIO image with base $(BASE_IMAGE) as $(DOCKER_AIO_IMAGE)"
|
@echo "Building AIO image with base $(BASE_IMAGE) as $(DOCKER_AIO_IMAGE)"
|
||||||
@@ -427,41 +325,121 @@ docker-aio-all:
|
|||||||
|
|
||||||
docker-image-intel:
|
docker-image-intel:
|
||||||
docker build \
|
docker build \
|
||||||
--build-arg BASE_IMAGE=intel/oneapi-basekit:${ONEAPI_VERSION}.0-0-devel-ubuntu24.04 \
|
--build-arg BASE_IMAGE=quay.io/go-skynet/intel-oneapi-base:latest \
|
||||||
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
|
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
|
||||||
--build-arg GO_TAGS="$(GO_TAGS)" \
|
--build-arg GO_TAGS="$(GO_TAGS)" \
|
||||||
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
|
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
|
||||||
--build-arg BUILD_TYPE=sycl_f32 -t $(DOCKER_IMAGE) .
|
--build-arg BUILD_TYPE=intel -t $(DOCKER_IMAGE) .
|
||||||
|
|
||||||
docker-image-intel-xpu:
|
|
||||||
docker build \
|
|
||||||
--build-arg BASE_IMAGE=intel/oneapi-basekit:${ONEAPI_VERSION}.0-0-devel-ubuntu22.04 \
|
|
||||||
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
|
|
||||||
--build-arg GO_TAGS="$(GO_TAGS)" \
|
|
||||||
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
|
|
||||||
--build-arg BUILD_TYPE=sycl_f32 -t $(DOCKER_IMAGE) .
|
|
||||||
|
|
||||||
########################################################
|
########################################################
|
||||||
## Backends
|
## Backends
|
||||||
########################################################
|
########################################################
|
||||||
|
|
||||||
|
|
||||||
|
backends/diffusers: docker-build-diffusers docker-save-diffusers build
|
||||||
|
./local-ai backends install "ocifile://$(abspath ./backend-images/diffusers.tar)"
|
||||||
|
|
||||||
|
backends/llama-cpp: docker-build-llama-cpp docker-save-llama-cpp build
|
||||||
|
./local-ai backends install "ocifile://$(abspath ./backend-images/llama-cpp.tar)"
|
||||||
|
|
||||||
|
backends/piper: docker-build-piper docker-save-piper build
|
||||||
|
./local-ai backends install "ocifile://$(abspath ./backend-images/piper.tar)"
|
||||||
|
|
||||||
|
backends/stablediffusion-ggml: docker-build-stablediffusion-ggml docker-save-stablediffusion-ggml build
|
||||||
|
./local-ai backends install "ocifile://$(abspath ./backend-images/stablediffusion-ggml.tar)"
|
||||||
|
|
||||||
|
backends/whisper: docker-build-whisper docker-save-whisper build
|
||||||
|
./local-ai backends install "ocifile://$(abspath ./backend-images/whisper.tar)"
|
||||||
|
|
||||||
|
backends/silero-vad: docker-build-silero-vad docker-save-silero-vad build
|
||||||
|
./local-ai backends install "ocifile://$(abspath ./backend-images/silero-vad.tar)"
|
||||||
|
|
||||||
|
backends/local-store: docker-build-local-store docker-save-local-store build
|
||||||
|
./local-ai backends install "ocifile://$(abspath ./backend-images/local-store.tar)"
|
||||||
|
|
||||||
|
backends/huggingface: docker-build-huggingface docker-save-huggingface build
|
||||||
|
./local-ai backends install "ocifile://$(abspath ./backend-images/huggingface.tar)"
|
||||||
|
|
||||||
|
backends/rfdetr: docker-build-rfdetr docker-save-rfdetr build
|
||||||
|
./local-ai backends install "ocifile://$(abspath ./backend-images/rfdetr.tar)"
|
||||||
|
|
||||||
|
backends/kitten-tts: docker-build-kitten-tts docker-save-kitten-tts build
|
||||||
|
./local-ai backends install "ocifile://$(abspath ./backend-images/kitten-tts.tar)"
|
||||||
|
|
||||||
|
backends/kokoro: docker-build-kokoro docker-save-kokoro build
|
||||||
|
./local-ai backends install "ocifile://$(abspath ./backend-images/kokoro.tar)"
|
||||||
|
|
||||||
|
backends/chatterbox: docker-build-chatterbox docker-save-chatterbox build
|
||||||
|
./local-ai backends install "ocifile://$(abspath ./backend-images/chatterbox.tar)"
|
||||||
|
|
||||||
|
backends/llama-cpp-darwin: build
|
||||||
|
bash ./scripts/build/llama-cpp-darwin.sh
|
||||||
|
./local-ai backends install "ocifile://$(abspath ./backend-images/llama-cpp.tar)"
|
||||||
|
|
||||||
|
build-darwin-python-backend: build
|
||||||
|
bash ./scripts/build/python-darwin.sh
|
||||||
|
|
||||||
|
build-darwin-go-backend: build
|
||||||
|
bash ./scripts/build/golang-darwin.sh
|
||||||
|
|
||||||
|
backends/mlx:
|
||||||
|
BACKEND=mlx $(MAKE) build-darwin-python-backend
|
||||||
|
./local-ai backends install "ocifile://$(abspath ./backend-images/mlx.tar)"
|
||||||
|
|
||||||
|
backends/diffuser-darwin:
|
||||||
|
BACKEND=diffusers $(MAKE) build-darwin-python-backend
|
||||||
|
./local-ai backends install "ocifile://$(abspath ./backend-images/diffusers.tar)"
|
||||||
|
|
||||||
|
backends/mlx-vlm:
|
||||||
|
BACKEND=mlx-vlm $(MAKE) build-darwin-python-backend
|
||||||
|
./local-ai backends install "ocifile://$(abspath ./backend-images/mlx-vlm.tar)"
|
||||||
|
|
||||||
|
backends/mlx-audio:
|
||||||
|
BACKEND=mlx-audio $(MAKE) build-darwin-python-backend
|
||||||
|
./local-ai backends install "ocifile://$(abspath ./backend-images/mlx-audio.tar)"
|
||||||
|
|
||||||
|
backends/stablediffusion-ggml-darwin:
|
||||||
|
BACKEND=stablediffusion-ggml BUILD_TYPE=metal $(MAKE) build-darwin-go-backend
|
||||||
|
./local-ai backends install "ocifile://$(abspath ./backend-images/stablediffusion-ggml.tar)"
|
||||||
|
|
||||||
backend-images:
|
backend-images:
|
||||||
mkdir -p backend-images
|
mkdir -p backend-images
|
||||||
|
|
||||||
docker-build-llama-cpp:
|
docker-build-llama-cpp:
|
||||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg IMAGE_BASE=$(IMAGE_BASE) -t local-ai-backend:llama-cpp -f backend/Dockerfile.llama-cpp .
|
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:llama-cpp -f backend/Dockerfile.llama-cpp .
|
||||||
|
|
||||||
docker-build-bark-cpp:
|
docker-build-bark-cpp:
|
||||||
docker build -t local-ai-backend:bark-cpp -f backend/Dockerfile.go --build-arg BACKEND=bark-cpp .
|
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:bark-cpp -f backend/Dockerfile.golang --build-arg BACKEND=bark-cpp .
|
||||||
|
|
||||||
docker-build-piper:
|
docker-build-piper:
|
||||||
docker build -t local-ai-backend:piper -f backend/Dockerfile.go --build-arg BACKEND=piper .
|
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:piper -f backend/Dockerfile.golang --build-arg BACKEND=piper .
|
||||||
|
|
||||||
docker-build-local-store:
|
docker-build-local-store:
|
||||||
docker build -t local-ai-backend:local-store -f backend/Dockerfile.go --build-arg BACKEND=local-store .
|
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:local-store -f backend/Dockerfile.golang --build-arg BACKEND=local-store .
|
||||||
|
|
||||||
docker-build-huggingface:
|
docker-build-huggingface:
|
||||||
docker build -t local-ai-backend:huggingface -f backend/Dockerfile.go --build-arg BACKEND=huggingface .
|
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:huggingface -f backend/Dockerfile.golang --build-arg BACKEND=huggingface .
|
||||||
|
|
||||||
|
docker-build-rfdetr:
|
||||||
|
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:rfdetr -f backend/Dockerfile.python --build-arg BACKEND=rfdetr ./backend
|
||||||
|
|
||||||
|
docker-build-kitten-tts:
|
||||||
|
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:kitten-tts -f backend/Dockerfile.python --build-arg BACKEND=kitten-tts ./backend
|
||||||
|
|
||||||
|
docker-save-kitten-tts: backend-images
|
||||||
|
docker save local-ai-backend:kitten-tts -o backend-images/kitten-tts.tar
|
||||||
|
|
||||||
|
docker-save-chatterbox: backend-images
|
||||||
|
docker save local-ai-backend:chatterbox -o backend-images/chatterbox.tar
|
||||||
|
|
||||||
|
docker-build-kokoro:
|
||||||
|
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:kokoro -f backend/Dockerfile.python --build-arg BACKEND=kokoro ./backend
|
||||||
|
|
||||||
|
docker-save-kokoro: backend-images
|
||||||
|
docker save local-ai-backend:kokoro -o backend-images/kokoro.tar
|
||||||
|
|
||||||
|
docker-save-rfdetr: backend-images
|
||||||
|
docker save local-ai-backend:rfdetr -o backend-images/rfdetr.tar
|
||||||
|
|
||||||
docker-save-huggingface: backend-images
|
docker-save-huggingface: backend-images
|
||||||
docker save local-ai-backend:huggingface -o backend-images/huggingface.tar
|
docker save local-ai-backend:huggingface -o backend-images/huggingface.tar
|
||||||
@@ -470,7 +448,7 @@ docker-save-local-store: backend-images
|
|||||||
docker save local-ai-backend:local-store -o backend-images/local-store.tar
|
docker save local-ai-backend:local-store -o backend-images/local-store.tar
|
||||||
|
|
||||||
docker-build-silero-vad:
|
docker-build-silero-vad:
|
||||||
docker build -t local-ai-backend:silero-vad -f backend/Dockerfile.go --build-arg BACKEND=silero-vad .
|
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:silero-vad -f backend/Dockerfile.golang --build-arg BACKEND=silero-vad .
|
||||||
|
|
||||||
docker-save-silero-vad: backend-images
|
docker-save-silero-vad: backend-images
|
||||||
docker save local-ai-backend:silero-vad -o backend-images/silero-vad.tar
|
docker save local-ai-backend:silero-vad -o backend-images/silero-vad.tar
|
||||||
@@ -485,46 +463,46 @@ docker-save-bark-cpp: backend-images
|
|||||||
docker save local-ai-backend:bark-cpp -o backend-images/bark-cpp.tar
|
docker save local-ai-backend:bark-cpp -o backend-images/bark-cpp.tar
|
||||||
|
|
||||||
docker-build-stablediffusion-ggml:
|
docker-build-stablediffusion-ggml:
|
||||||
docker build -t local-ai-backend:stablediffusion-ggml -f backend/Dockerfile.go --build-arg BACKEND=stablediffusion-ggml .
|
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:stablediffusion-ggml -f backend/Dockerfile.golang --build-arg BACKEND=stablediffusion-ggml .
|
||||||
|
|
||||||
docker-save-stablediffusion-ggml: backend-images
|
docker-save-stablediffusion-ggml: backend-images
|
||||||
docker save local-ai-backend:stablediffusion-ggml -o backend-images/stablediffusion-ggml.tar
|
docker save local-ai-backend:stablediffusion-ggml -o backend-images/stablediffusion-ggml.tar
|
||||||
|
|
||||||
docker-build-rerankers:
|
docker-build-rerankers:
|
||||||
docker build -t local-ai-backend:rerankers -f backend/Dockerfile.python --build-arg BACKEND=rerankers .
|
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:rerankers -f backend/Dockerfile.python --build-arg BACKEND=rerankers .
|
||||||
|
|
||||||
docker-build-vllm:
|
docker-build-vllm:
|
||||||
docker build -t local-ai-backend:vllm -f backend/Dockerfile.python --build-arg BACKEND=vllm .
|
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:vllm -f backend/Dockerfile.python --build-arg BACKEND=vllm .
|
||||||
|
|
||||||
docker-build-transformers:
|
docker-build-transformers:
|
||||||
docker build -t local-ai-backend:transformers -f backend/Dockerfile.python --build-arg BACKEND=transformers .
|
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:transformers -f backend/Dockerfile.python --build-arg BACKEND=transformers .
|
||||||
|
|
||||||
docker-build-diffusers:
|
docker-build-diffusers:
|
||||||
docker build -t local-ai-backend:diffusers -f backend/Dockerfile.python --build-arg BACKEND=diffusers .
|
docker build --progress=plain --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:diffusers -f backend/Dockerfile.python --build-arg BACKEND=diffusers ./backend
|
||||||
|
|
||||||
docker-build-kokoro:
|
docker-save-diffusers: backend-images
|
||||||
docker build -t local-ai-backend:kokoro -f backend/Dockerfile.python --build-arg BACKEND=kokoro .
|
docker save local-ai-backend:diffusers -o backend-images/diffusers.tar
|
||||||
|
|
||||||
docker-build-whisper:
|
docker-build-whisper:
|
||||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:whisper -f backend/Dockerfile.go --build-arg BACKEND=whisper .
|
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:whisper -f backend/Dockerfile.golang --build-arg BACKEND=whisper .
|
||||||
|
|
||||||
docker-save-whisper: backend-images
|
docker-save-whisper: backend-images
|
||||||
docker save local-ai-backend:whisper -o backend-images/whisper.tar
|
docker save local-ai-backend:whisper -o backend-images/whisper.tar
|
||||||
|
|
||||||
docker-build-faster-whisper:
|
docker-build-faster-whisper:
|
||||||
docker build -t local-ai-backend:faster-whisper -f backend/Dockerfile.python --build-arg BACKEND=faster-whisper .
|
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:faster-whisper -f backend/Dockerfile.python --build-arg BACKEND=faster-whisper .
|
||||||
|
|
||||||
docker-build-coqui:
|
docker-build-coqui:
|
||||||
docker build -t local-ai-backend:coqui -f backend/Dockerfile.python --build-arg BACKEND=coqui .
|
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:coqui -f backend/Dockerfile.python --build-arg BACKEND=coqui .
|
||||||
|
|
||||||
docker-build-bark:
|
docker-build-bark:
|
||||||
docker build -t local-ai-backend:bark -f backend/Dockerfile.python --build-arg BACKEND=bark .
|
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:bark -f backend/Dockerfile.python --build-arg BACKEND=bark .
|
||||||
|
|
||||||
docker-build-chatterbox:
|
docker-build-chatterbox:
|
||||||
docker build -t local-ai-backend:chatterbox -f backend/Dockerfile.python --build-arg BACKEND=chatterbox .
|
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:chatterbox -f backend/Dockerfile.python --build-arg BACKEND=chatterbox ./backend
|
||||||
|
|
||||||
docker-build-exllama2:
|
docker-build-exllama2:
|
||||||
docker build -t local-ai-backend:exllama2 -f backend/Dockerfile.python --build-arg BACKEND=exllama2 .
|
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:exllama2 -f backend/Dockerfile.python --build-arg BACKEND=exllama2 .
|
||||||
|
|
||||||
docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-bark docker-build-chatterbox docker-build-exllama2
|
docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-bark docker-build-chatterbox docker-build-exllama2
|
||||||
|
|
||||||
@@ -557,3 +535,19 @@ docs-clean:
|
|||||||
.PHONY: docs
|
.PHONY: docs
|
||||||
docs: docs/static/gallery.html
|
docs: docs/static/gallery.html
|
||||||
cd docs && hugo serve
|
cd docs && hugo serve
|
||||||
|
|
||||||
|
########################################################
|
||||||
|
## Platform-specific builds
|
||||||
|
########################################################
|
||||||
|
|
||||||
|
## fyne cross-platform build
|
||||||
|
build-launcher-darwin: build-launcher
|
||||||
|
go run github.com/tiagomelo/macos-dmg-creator/cmd/createdmg@latest \
|
||||||
|
--appName "LocalAI" \
|
||||||
|
--appBinaryPath "$(LAUNCHER_BINARY_NAME)" \
|
||||||
|
--bundleIdentifier "com.localai.launcher" \
|
||||||
|
--iconPath "core/http/static/logo.png" \
|
||||||
|
--outputDir "dist/"
|
||||||
|
|
||||||
|
build-launcher-linux:
|
||||||
|
cd cmd/launcher && go run fyne.io/tools/cmd/fyne@latest package -os linux -icon ../../core/http/static/logo.png --executable $(LAUNCHER_BINARY_NAME)-linux && mv launcher.tar.xz ../../$(LAUNCHER_BINARY_NAME)-linux.tar.xz
|
||||||
|
|||||||
79
README.md
79
README.md
@@ -43,7 +43,7 @@
|
|||||||
|
|
||||||
> :bulb: Get help - [❓FAQ](https://localai.io/faq/) [💭Discussions](https://github.com/go-skynet/LocalAI/discussions) [:speech_balloon: Discord](https://discord.gg/uJAeKSAGDy) [:book: Documentation website](https://localai.io/)
|
> :bulb: Get help - [❓FAQ](https://localai.io/faq/) [💭Discussions](https://github.com/go-skynet/LocalAI/discussions) [:speech_balloon: Discord](https://discord.gg/uJAeKSAGDy) [:book: Documentation website](https://localai.io/)
|
||||||
>
|
>
|
||||||
> [💻 Quickstart](https://localai.io/basics/getting_started/) [🖼️ Models](https://models.localai.io/) [🚀 Roadmap](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) [🥽 Demo](https://demo.localai.io) [🌍 Explorer](https://explorer.localai.io) [🛫 Examples](https://github.com/mudler/LocalAI-examples) Try on
|
> [💻 Quickstart](https://localai.io/basics/getting_started/) [🖼️ Models](https://models.localai.io/) [🚀 Roadmap](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) [🌍 Explorer](https://explorer.localai.io) [🛫 Examples](https://github.com/mudler/LocalAI-examples) Try on
|
||||||
[](https://t.me/localaiofficial_bot)
|
[](https://t.me/localaiofficial_bot)
|
||||||
|
|
||||||
[](https://github.com/go-skynet/LocalAI/actions/workflows/test.yml)[](https://github.com/go-skynet/LocalAI/actions/workflows/release.yaml)[](https://github.com/go-skynet/LocalAI/actions/workflows/image.yml)[](https://github.com/go-skynet/LocalAI/actions/workflows/bump_deps.yaml)[](https://artifacthub.io/packages/search?repo=localai)
|
[](https://github.com/go-skynet/LocalAI/actions/workflows/test.yml)[](https://github.com/go-skynet/LocalAI/actions/workflows/release.yaml)[](https://github.com/go-skynet/LocalAI/actions/workflows/image.yml)[](https://github.com/go-skynet/LocalAI/actions/workflows/bump_deps.yaml)[](https://artifacthub.io/packages/search?repo=localai)
|
||||||
@@ -110,6 +110,12 @@ curl https://localai.io/install.sh | sh
|
|||||||
|
|
||||||
For more installation options, see [Installer Options](https://localai.io/docs/advanced/installer/).
|
For more installation options, see [Installer Options](https://localai.io/docs/advanced/installer/).
|
||||||
|
|
||||||
|
### macOS Download:
|
||||||
|
|
||||||
|
<a href="https://github.com/mudler/LocalAI/releases/latest/download/LocalAI.dmg">
|
||||||
|
<img src="https://img.shields.io/badge/Download-macOS-blue?style=for-the-badge&logo=apple&logoColor=white" alt="Download LocalAI for macOS"/>
|
||||||
|
</a>
|
||||||
|
|
||||||
Or run with docker:
|
Or run with docker:
|
||||||
|
|
||||||
### CPU only image:
|
### CPU only image:
|
||||||
@@ -140,11 +146,7 @@ docker run -ti --name local-ai -p 8080:8080 --device=/dev/kfd --device=/dev/dri
|
|||||||
### Intel GPU Images (oneAPI):
|
### Intel GPU Images (oneAPI):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Intel GPU with FP16 support
|
docker run -ti --name local-ai -p 8080:8080 --device=/dev/dri/card1 --device=/dev/dri/renderD128 localai/localai:latest-gpu-intel
|
||||||
docker run -ti --name local-ai -p 8080:8080 --device=/dev/dri/card1 --device=/dev/dri/renderD128 localai/localai:latest-gpu-intel-f16
|
|
||||||
|
|
||||||
# Intel GPU with FP32 support
|
|
||||||
docker run -ti --name local-ai -p 8080:8080 --device=/dev/dri/card1 --device=/dev/dri/renderD128 localai/localai:latest-gpu-intel-f32
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Vulkan GPU Images:
|
### Vulkan GPU Images:
|
||||||
@@ -166,7 +168,7 @@ docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-ai
|
|||||||
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-11
|
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-11
|
||||||
|
|
||||||
# Intel GPU version
|
# Intel GPU version
|
||||||
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-gpu-intel-f16
|
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-gpu-intel
|
||||||
|
|
||||||
# AMD GPU version
|
# AMD GPU version
|
||||||
docker run -ti --name local-ai -p 8080:8080 --device=/dev/kfd --device=/dev/dri --group-add=video localai/localai:latest-aio-gpu-hipblas
|
docker run -ti --name local-ai -p 8080:8080 --device=/dev/kfd --device=/dev/dri --group-add=video localai/localai:latest-aio-gpu-hipblas
|
||||||
@@ -189,10 +191,15 @@ local-ai run https://gist.githubusercontent.com/.../phi-2.yaml
|
|||||||
local-ai run oci://localai/phi-2:latest
|
local-ai run oci://localai/phi-2:latest
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> ⚡ **Automatic Backend Detection**: When you install models from the gallery or YAML files, LocalAI automatically detects your system's GPU capabilities (NVIDIA, AMD, Intel) and downloads the appropriate backend. For advanced configuration options, see [GPU Acceleration](https://localai.io/features/gpu-acceleration/#automatic-backend-detection).
|
||||||
|
|
||||||
For more information, see [💻 Getting started](https://localai.io/basics/getting_started/index.html)
|
For more information, see [💻 Getting started](https://localai.io/basics/getting_started/index.html)
|
||||||
|
|
||||||
## 📰 Latest project news
|
## 📰 Latest project news
|
||||||
|
|
||||||
|
- August 2025: MLX, MLX-VLM, Diffusers and llama.cpp are now supported on Mac M1/M2/M3+ chips ( with `development` suffix in the gallery ): https://github.com/mudler/LocalAI/pull/6049 https://github.com/mudler/LocalAI/pull/6119 https://github.com/mudler/LocalAI/pull/6121 https://github.com/mudler/LocalAI/pull/6060
|
||||||
|
- July/August 2025: 🔍 [Object Detection](https://localai.io/features/object-detection/) added to the API featuring [rf-detr](https://github.com/roboflow/rf-detr)
|
||||||
|
- July 2025: All backends migrated outside of the main binary. LocalAI is now more lightweight, small, and automatically downloads the required backend to run the model. [Read the release notes](https://github.com/mudler/LocalAI/releases/tag/v3.2.0)
|
||||||
- June 2025: [Backend management](https://github.com/mudler/LocalAI/pull/5607) has been added. Attention: extras images are going to be deprecated from the next release! Read [the backend management PR](https://github.com/mudler/LocalAI/pull/5607).
|
- June 2025: [Backend management](https://github.com/mudler/LocalAI/pull/5607) has been added. Attention: extras images are going to be deprecated from the next release! Read [the backend management PR](https://github.com/mudler/LocalAI/pull/5607).
|
||||||
- May 2025: [Audio input](https://github.com/mudler/LocalAI/pull/5466) and [Reranking](https://github.com/mudler/LocalAI/pull/5396) in llama.cpp backend, [Realtime API](https://github.com/mudler/LocalAI/pull/5392), Support to Gemma, SmollVLM, and more multimodal models (available in the gallery).
|
- May 2025: [Audio input](https://github.com/mudler/LocalAI/pull/5466) and [Reranking](https://github.com/mudler/LocalAI/pull/5396) in llama.cpp backend, [Realtime API](https://github.com/mudler/LocalAI/pull/5392), Support to Gemma, SmollVLM, and more multimodal models (available in the gallery).
|
||||||
- May 2025: Important: image name changes [See release](https://github.com/mudler/LocalAI/releases/tag/v2.29.0)
|
- May 2025: Important: image name changes [See release](https://github.com/mudler/LocalAI/releases/tag/v2.29.0)
|
||||||
@@ -225,12 +232,67 @@ Roadmap items: [List of issues](https://github.com/mudler/LocalAI/issues?q=is%3A
|
|||||||
- ✍️ [Constrained grammars](https://localai.io/features/constrained_grammars/)
|
- ✍️ [Constrained grammars](https://localai.io/features/constrained_grammars/)
|
||||||
- 🖼️ [Download Models directly from Huggingface ](https://localai.io/models/)
|
- 🖼️ [Download Models directly from Huggingface ](https://localai.io/models/)
|
||||||
- 🥽 [Vision API](https://localai.io/features/gpt-vision/)
|
- 🥽 [Vision API](https://localai.io/features/gpt-vision/)
|
||||||
|
- 🔍 [Object Detection](https://localai.io/features/object-detection/)
|
||||||
- 📈 [Reranker API](https://localai.io/features/reranker/)
|
- 📈 [Reranker API](https://localai.io/features/reranker/)
|
||||||
- 🆕🖧 [P2P Inferencing](https://localai.io/features/distribute/)
|
- 🆕🖧 [P2P Inferencing](https://localai.io/features/distribute/)
|
||||||
- [Agentic capabilities](https://github.com/mudler/LocalAGI)
|
- [Agentic capabilities](https://github.com/mudler/LocalAGI)
|
||||||
- 🔊 Voice activity detection (Silero-VAD support)
|
- 🔊 Voice activity detection (Silero-VAD support)
|
||||||
- 🌍 Integrated WebUI!
|
- 🌍 Integrated WebUI!
|
||||||
|
|
||||||
|
## 🧩 Supported Backends & Acceleration
|
||||||
|
|
||||||
|
LocalAI supports a comprehensive range of AI backends with multiple acceleration options:
|
||||||
|
|
||||||
|
### Text Generation & Language Models
|
||||||
|
| Backend | Description | Acceleration Support |
|
||||||
|
|---------|-------------|---------------------|
|
||||||
|
| **llama.cpp** | LLM inference in C/C++ | CUDA 11/12, ROCm, Intel SYCL, Vulkan, Metal, CPU |
|
||||||
|
| **vLLM** | Fast LLM inference with PagedAttention | CUDA 12, ROCm, Intel |
|
||||||
|
| **transformers** | HuggingFace transformers framework | CUDA 11/12, ROCm, Intel, CPU |
|
||||||
|
| **exllama2** | GPTQ inference library | CUDA 12 |
|
||||||
|
| **MLX** | Apple Silicon LLM inference | Metal (M1/M2/M3+) |
|
||||||
|
| **MLX-VLM** | Apple Silicon Vision-Language Models | Metal (M1/M2/M3+) |
|
||||||
|
|
||||||
|
### Audio & Speech Processing
|
||||||
|
| Backend | Description | Acceleration Support |
|
||||||
|
|---------|-------------|---------------------|
|
||||||
|
| **whisper.cpp** | OpenAI Whisper in C/C++ | CUDA 12, ROCm, Intel SYCL, Vulkan, CPU |
|
||||||
|
| **faster-whisper** | Fast Whisper with CTranslate2 | CUDA 12, ROCm, Intel, CPU |
|
||||||
|
| **bark** | Text-to-audio generation | CUDA 12, ROCm, Intel |
|
||||||
|
| **bark-cpp** | C++ implementation of Bark | CUDA, Metal, CPU |
|
||||||
|
| **coqui** | Advanced TTS with 1100+ languages | CUDA 12, ROCm, Intel, CPU |
|
||||||
|
| **kokoro** | Lightweight TTS model | CUDA 12, ROCm, Intel, CPU |
|
||||||
|
| **chatterbox** | Production-grade TTS | CUDA 11/12, CPU |
|
||||||
|
| **piper** | Fast neural TTS system | CPU |
|
||||||
|
| **kitten-tts** | Kitten TTS models | CPU |
|
||||||
|
| **silero-vad** | Voice Activity Detection | CPU |
|
||||||
|
|
||||||
|
### Image & Video Generation
|
||||||
|
| Backend | Description | Acceleration Support |
|
||||||
|
|---------|-------------|---------------------|
|
||||||
|
| **stablediffusion.cpp** | Stable Diffusion in C/C++ | CUDA 12, Intel SYCL, Vulkan, CPU |
|
||||||
|
| **diffusers** | HuggingFace diffusion models | CUDA 11/12, ROCm, Intel, Metal, CPU |
|
||||||
|
|
||||||
|
### Specialized AI Tasks
|
||||||
|
| Backend | Description | Acceleration Support |
|
||||||
|
|---------|-------------|---------------------|
|
||||||
|
| **rfdetr** | Real-time object detection | CUDA 12, Intel, CPU |
|
||||||
|
| **rerankers** | Document reranking API | CUDA 11/12, ROCm, Intel, CPU |
|
||||||
|
| **local-store** | Vector database | CPU |
|
||||||
|
| **huggingface** | HuggingFace API integration | API-based |
|
||||||
|
|
||||||
|
### Hardware Acceleration Matrix
|
||||||
|
|
||||||
|
| Acceleration Type | Supported Backends | Hardware Support |
|
||||||
|
|-------------------|-------------------|------------------|
|
||||||
|
| **NVIDIA CUDA 11** | llama.cpp, whisper, stablediffusion, diffusers, rerankers, bark, chatterbox | Nvidia hardware |
|
||||||
|
| **NVIDIA CUDA 12** | All CUDA-compatible backends | Nvidia hardware |
|
||||||
|
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, bark | AMD Graphics |
|
||||||
|
| **Intel oneAPI** | llama.cpp, whisper, stablediffusion, vllm, transformers, diffusers, rfdetr, rerankers, exllama2, coqui, kokoro, bark | Intel Arc, Intel iGPUs |
|
||||||
|
| **Apple Metal** | llama.cpp, whisper, diffusers, MLX, MLX-VLM, bark-cpp | Apple M1/M2/M3+ |
|
||||||
|
| **Vulkan** | llama.cpp, whisper, stablediffusion | Cross-platform GPUs |
|
||||||
|
| **NVIDIA Jetson** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr | ARM64 embedded AI |
|
||||||
|
| **CPU Optimized** | All backends | AVX/AVX2/AVX512, quantization support |
|
||||||
|
|
||||||
### 🔗 Community and integrations
|
### 🔗 Community and integrations
|
||||||
|
|
||||||
@@ -245,6 +307,9 @@ WebUIs:
|
|||||||
Model galleries
|
Model galleries
|
||||||
- https://github.com/go-skynet/model-gallery
|
- https://github.com/go-skynet/model-gallery
|
||||||
|
|
||||||
|
Voice:
|
||||||
|
- https://github.com/richiejp/VoxInput
|
||||||
|
|
||||||
Other:
|
Other:
|
||||||
- Helm chart https://github.com/go-skynet/helm-charts
|
- Helm chart https://github.com/go-skynet/helm-charts
|
||||||
- VSCode extension https://github.com/badgooooor/localai-vscode-plugin
|
- VSCode extension https://github.com/badgooooor/localai-vscode-plugin
|
||||||
|
|||||||
@@ -2,10 +2,10 @@ context_size: 4096
|
|||||||
f16: true
|
f16: true
|
||||||
backend: llama-cpp
|
backend: llama-cpp
|
||||||
mmap: true
|
mmap: true
|
||||||
mmproj: minicpm-v-2_6-mmproj-f16.gguf
|
mmproj: minicpm-v-4_5-mmproj-f16.gguf
|
||||||
name: gpt-4o
|
name: gpt-4o
|
||||||
parameters:
|
parameters:
|
||||||
model: minicpm-v-2_6-Q4_K_M.gguf
|
model: minicpm-v-4_5-Q4_K_M.gguf
|
||||||
stopwords:
|
stopwords:
|
||||||
- <|im_end|>
|
- <|im_end|>
|
||||||
- <dummy32000>
|
- <dummy32000>
|
||||||
@@ -42,9 +42,9 @@ template:
|
|||||||
<|im_start|>assistant
|
<|im_start|>assistant
|
||||||
|
|
||||||
download_files:
|
download_files:
|
||||||
- filename: minicpm-v-2_6-Q4_K_M.gguf
|
- filename: minicpm-v-4_5-Q4_K_M.gguf
|
||||||
sha256: 3a4078d53b46f22989adbf998ce5a3fd090b6541f112d7e936eb4204a04100b1
|
sha256: c1c3c33100b15b4caf7319acce4e23c0eb0ce1cbd12f70e8d24f05aa67b7512f
|
||||||
uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/ggml-model-Q4_K_M.gguf
|
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/ggml-model-Q4_K_M.gguf
|
||||||
- filename: minicpm-v-2_6-mmproj-f16.gguf
|
- filename: minicpm-v-4_5-mmproj-f16.gguf
|
||||||
uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/mmproj-model-f16.gguf
|
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/mmproj-model-f16.gguf
|
||||||
sha256: 4485f68a0f1aa404c391e788ea88ea653c100d8e98fe572698f701e5809711fd
|
sha256: 7a7225a32e8d453aaa3d22d8c579b5bf833c253f784cdb05c99c9a76fd616df8
|
||||||
@@ -2,10 +2,10 @@ context_size: 4096
|
|||||||
backend: llama-cpp
|
backend: llama-cpp
|
||||||
f16: true
|
f16: true
|
||||||
mmap: true
|
mmap: true
|
||||||
mmproj: minicpm-v-2_6-mmproj-f16.gguf
|
mmproj: minicpm-v-4_5-mmproj-f16.gguf
|
||||||
name: gpt-4o
|
name: gpt-4o
|
||||||
parameters:
|
parameters:
|
||||||
model: minicpm-v-2_6-Q4_K_M.gguf
|
model: minicpm-v-4_5-Q4_K_M.gguf
|
||||||
stopwords:
|
stopwords:
|
||||||
- <|im_end|>
|
- <|im_end|>
|
||||||
- <dummy32000>
|
- <dummy32000>
|
||||||
@@ -42,9 +42,9 @@ template:
|
|||||||
<|im_start|>assistant
|
<|im_start|>assistant
|
||||||
|
|
||||||
download_files:
|
download_files:
|
||||||
- filename: minicpm-v-2_6-Q4_K_M.gguf
|
- filename: minicpm-v-4_5-Q4_K_M.gguf
|
||||||
sha256: 3a4078d53b46f22989adbf998ce5a3fd090b6541f112d7e936eb4204a04100b1
|
sha256: c1c3c33100b15b4caf7319acce4e23c0eb0ce1cbd12f70e8d24f05aa67b7512f
|
||||||
uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/ggml-model-Q4_K_M.gguf
|
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/ggml-model-Q4_K_M.gguf
|
||||||
- filename: minicpm-v-2_6-mmproj-f16.gguf
|
- filename: minicpm-v-4_5-mmproj-f16.gguf
|
||||||
uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/mmproj-model-f16.gguf
|
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/mmproj-model-f16.gguf
|
||||||
sha256: 4485f68a0f1aa404c391e788ea88ea653c100d8e98fe572698f701e5809711fd
|
sha256: 7a7225a32e8d453aaa3d22d8c579b5bf833c253f784cdb05c99c9a76fd616df8
|
||||||
@@ -2,10 +2,10 @@ context_size: 4096
|
|||||||
backend: llama-cpp
|
backend: llama-cpp
|
||||||
f16: true
|
f16: true
|
||||||
mmap: true
|
mmap: true
|
||||||
mmproj: minicpm-v-2_6-mmproj-f16.gguf
|
mmproj: minicpm-v-4_5-mmproj-f16.gguf
|
||||||
name: gpt-4o
|
name: gpt-4o
|
||||||
parameters:
|
parameters:
|
||||||
model: minicpm-v-2_6-Q4_K_M.gguf
|
model: minicpm-v-4_5-Q4_K_M.gguf
|
||||||
stopwords:
|
stopwords:
|
||||||
- <|im_end|>
|
- <|im_end|>
|
||||||
- <dummy32000>
|
- <dummy32000>
|
||||||
@@ -43,9 +43,9 @@ template:
|
|||||||
|
|
||||||
|
|
||||||
download_files:
|
download_files:
|
||||||
- filename: minicpm-v-2_6-Q4_K_M.gguf
|
- filename: minicpm-v-4_5-Q4_K_M.gguf
|
||||||
sha256: 3a4078d53b46f22989adbf998ce5a3fd090b6541f112d7e936eb4204a04100b1
|
sha256: c1c3c33100b15b4caf7319acce4e23c0eb0ce1cbd12f70e8d24f05aa67b7512f
|
||||||
uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/ggml-model-Q4_K_M.gguf
|
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/ggml-model-Q4_K_M.gguf
|
||||||
- filename: minicpm-v-2_6-mmproj-f16.gguf
|
- filename: minicpm-v-4_5-mmproj-f16.gguf
|
||||||
uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/mmproj-model-f16.gguf
|
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/mmproj-model-f16.gguf
|
||||||
sha256: 4485f68a0f1aa404c391e788ea88ea653c100d8e98fe572698f701e5809711fd
|
sha256: 7a7225a32e8d453aaa3d22d8c579b5bf833c253f784cdb05c99c9a76fd616df8
|
||||||
@@ -96,17 +96,6 @@ RUN if [ "${BUILD_TYPE}" = "hipblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
|
|||||||
ldconfig \
|
ldconfig \
|
||||||
; fi
|
; fi
|
||||||
|
|
||||||
# Intel oneAPI requirements
|
|
||||||
RUN <<EOT bash
|
|
||||||
if [[ "${BUILD_TYPE}" == sycl* ]] && [ "${SKIP_DRIVERS}" = "false" ]; then
|
|
||||||
apt-get update && \
|
|
||||||
apt-get install -y --no-install-recommends \
|
|
||||||
intel-oneapi-runtime-libs && \
|
|
||||||
apt-get clean && \
|
|
||||||
rm -rf /var/lib/apt/lists/*
|
|
||||||
fi
|
|
||||||
EOT
|
|
||||||
|
|
||||||
# Install Go
|
# Install Go
|
||||||
RUN curl -L -s https://go.dev/dl/go${GO_VERSION}.linux-${TARGETARCH}.tar.gz | tar -C /usr/local -xz
|
RUN curl -L -s https://go.dev/dl/go${GO_VERSION}.linux-${TARGETARCH}.tar.gz | tar -C /usr/local -xz
|
||||||
ENV PATH=$PATH:/root/go/bin:/usr/local/go/bin:/usr/local/bin
|
ENV PATH=$PATH:/root/go/bin:/usr/local/go/bin:/usr/local/bin
|
||||||
@@ -23,7 +23,7 @@ RUN apt-get update && \
|
|||||||
libssl-dev \
|
libssl-dev \
|
||||||
git \
|
git \
|
||||||
git-lfs \
|
git-lfs \
|
||||||
unzip \
|
unzip clang \
|
||||||
upx-ucl \
|
upx-ucl \
|
||||||
curl python3-pip \
|
curl python3-pip \
|
||||||
python-is-python3 \
|
python-is-python3 \
|
||||||
@@ -116,7 +116,7 @@ COPY python/${BACKEND} /${BACKEND}
|
|||||||
COPY backend.proto /${BACKEND}/backend.proto
|
COPY backend.proto /${BACKEND}/backend.proto
|
||||||
COPY python/common/ /${BACKEND}/common
|
COPY python/common/ /${BACKEND}/common
|
||||||
|
|
||||||
RUN cd /${BACKEND} && make
|
RUN cd /${BACKEND} && PORTABLE_PYTHON=true make
|
||||||
|
|
||||||
FROM scratch
|
FROM scratch
|
||||||
ARG BACKEND=rerankers
|
ARG BACKEND=rerankers
|
||||||
|
|||||||
213
backend/README.md
Normal file
213
backend/README.md
Normal file
@@ -0,0 +1,213 @@
|
|||||||
|
# LocalAI Backend Architecture
|
||||||
|
|
||||||
|
This directory contains the core backend infrastructure for LocalAI, including the gRPC protocol definition, multi-language Dockerfiles, and language-specific backend implementations.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
LocalAI uses a unified gRPC-based architecture that allows different programming languages to implement AI backends while maintaining consistent interfaces and capabilities. The backend system supports multiple hardware acceleration targets and provides a standardized way to integrate various AI models and frameworks.
|
||||||
|
|
||||||
|
## Architecture Components
|
||||||
|
|
||||||
|
### 1. Protocol Definition (`backend.proto`)
|
||||||
|
|
||||||
|
The `backend.proto` file defines the gRPC service interface that all backends must implement. This ensures consistency across different language implementations and provides a contract for communication between LocalAI core and backend services.
|
||||||
|
|
||||||
|
#### Core Services
|
||||||
|
|
||||||
|
- **Text Generation**: `Predict`, `PredictStream` for LLM inference
|
||||||
|
- **Embeddings**: `Embedding` for text vectorization
|
||||||
|
- **Image Generation**: `GenerateImage` for stable diffusion and image models
|
||||||
|
- **Audio Processing**: `AudioTranscription`, `TTS`, `SoundGeneration`
|
||||||
|
- **Video Generation**: `GenerateVideo` for video synthesis
|
||||||
|
- **Object Detection**: `Detect` for computer vision tasks
|
||||||
|
- **Vector Storage**: `StoresSet`, `StoresGet`, `StoresFind` for RAG operations
|
||||||
|
- **Reranking**: `Rerank` for document relevance scoring
|
||||||
|
- **Voice Activity Detection**: `VAD` for audio segmentation
|
||||||
|
|
||||||
|
#### Key Message Types
|
||||||
|
|
||||||
|
- **`PredictOptions`**: Comprehensive configuration for text generation
|
||||||
|
- **`ModelOptions`**: Model loading and configuration parameters
|
||||||
|
- **`Result`**: Standardized response format
|
||||||
|
- **`StatusResponse`**: Backend health and memory usage information
|
||||||
|
|
||||||
|
### 2. Multi-Language Dockerfiles
|
||||||
|
|
||||||
|
The backend system provides language-specific Dockerfiles that handle the build environment and dependencies for different programming languages:
|
||||||
|
|
||||||
|
- `Dockerfile.python`
|
||||||
|
- `Dockerfile.golang`
|
||||||
|
- `Dockerfile.llama-cpp`
|
||||||
|
|
||||||
|
### 3. Language-Specific Implementations
|
||||||
|
|
||||||
|
#### Python Backends (`python/`)
|
||||||
|
- **transformers**: Hugging Face Transformers framework
|
||||||
|
- **vllm**: High-performance LLM inference
|
||||||
|
- **mlx**: Apple Silicon optimization
|
||||||
|
- **diffusers**: Stable Diffusion models
|
||||||
|
- **Audio**: bark, coqui, faster-whisper, kitten-tts
|
||||||
|
- **Vision**: mlx-vlm, rfdetr
|
||||||
|
- **Specialized**: rerankers, chatterbox, kokoro
|
||||||
|
|
||||||
|
#### Go Backends (`go/`)
|
||||||
|
- **whisper**: OpenAI Whisper speech recognition in Go with GGML cpp backend (whisper.cpp)
|
||||||
|
- **stablediffusion-ggml**: Stable Diffusion in Go with GGML Cpp backend
|
||||||
|
- **huggingface**: Hugging Face model integration
|
||||||
|
- **piper**: Text-to-speech synthesis Golang with C bindings using rhaspy/piper
|
||||||
|
- **bark-cpp**: Bark TTS models Golang with Cpp bindings
|
||||||
|
- **local-store**: Vector storage backend
|
||||||
|
|
||||||
|
#### C++ Backends (`cpp/`)
|
||||||
|
- **llama-cpp**: Llama.cpp integration
|
||||||
|
- **grpc**: GRPC utilities and helpers
|
||||||
|
|
||||||
|
## Hardware Acceleration Support
|
||||||
|
|
||||||
|
### CUDA (NVIDIA)
|
||||||
|
- **Versions**: CUDA 11.x, 12.x
|
||||||
|
- **Features**: cuBLAS, cuDNN, TensorRT optimization
|
||||||
|
- **Targets**: x86_64, ARM64 (Jetson)
|
||||||
|
|
||||||
|
### ROCm (AMD)
|
||||||
|
- **Features**: HIP, rocBLAS, MIOpen
|
||||||
|
- **Targets**: AMD GPUs with ROCm support
|
||||||
|
|
||||||
|
### Intel
|
||||||
|
- **Features**: oneAPI, Intel Extension for PyTorch
|
||||||
|
- **Targets**: Intel GPUs, XPUs, CPUs
|
||||||
|
|
||||||
|
### Vulkan
|
||||||
|
- **Features**: Cross-platform GPU acceleration
|
||||||
|
- **Targets**: Windows, Linux, Android, macOS
|
||||||
|
|
||||||
|
### Apple Silicon
|
||||||
|
- **Features**: MLX framework, Metal Performance Shaders
|
||||||
|
- **Targets**: M1/M2/M3 Macs
|
||||||
|
|
||||||
|
## Backend Registry (`index.yaml`)
|
||||||
|
|
||||||
|
The `index.yaml` file serves as a central registry for all available backends, providing:
|
||||||
|
|
||||||
|
- **Metadata**: Name, description, license, icons
|
||||||
|
- **Capabilities**: Hardware targets and optimization profiles
|
||||||
|
- **Tags**: Categorization for discovery
|
||||||
|
- **URLs**: Source code and documentation links
|
||||||
|
|
||||||
|
## Building Backends
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
- Docker with multi-architecture support
|
||||||
|
- Appropriate hardware drivers (CUDA, ROCm, etc.)
|
||||||
|
- Build tools (make, cmake, compilers)
|
||||||
|
|
||||||
|
### Build Commands
|
||||||
|
|
||||||
|
Example of build commands with Docker
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build Python backend
|
||||||
|
docker build -f backend/Dockerfile.python \
|
||||||
|
--build-arg BACKEND=transformers \
|
||||||
|
--build-arg BUILD_TYPE=cublas12 \
|
||||||
|
--build-arg CUDA_MAJOR_VERSION=12 \
|
||||||
|
--build-arg CUDA_MINOR_VERSION=0 \
|
||||||
|
-t localai-backend-transformers .
|
||||||
|
|
||||||
|
# Build Go backend
|
||||||
|
docker build -f backend/Dockerfile.golang \
|
||||||
|
--build-arg BACKEND=whisper \
|
||||||
|
--build-arg BUILD_TYPE=cpu \
|
||||||
|
-t localai-backend-whisper .
|
||||||
|
|
||||||
|
# Build C++ backend
|
||||||
|
docker build -f backend/Dockerfile.llama-cpp \
|
||||||
|
--build-arg BACKEND=llama-cpp \
|
||||||
|
--build-arg BUILD_TYPE=cublas12 \
|
||||||
|
-t localai-backend-llama-cpp .
|
||||||
|
```
|
||||||
|
|
||||||
|
For ARM64/Mac builds, docker can't be used, and the makefile in the respective backend has to be used.
|
||||||
|
|
||||||
|
### Build Types
|
||||||
|
|
||||||
|
- **`cpu`**: CPU-only optimization
|
||||||
|
- **`cublas11`**: CUDA 11.x with cuBLAS
|
||||||
|
- **`cublas12`**: CUDA 12.x with cuBLAS
|
||||||
|
- **`hipblas`**: ROCm with rocBLAS
|
||||||
|
- **`intel`**: Intel oneAPI optimization
|
||||||
|
- **`vulkan`**: Vulkan-based acceleration
|
||||||
|
- **`metal`**: Apple Metal optimization
|
||||||
|
|
||||||
|
## Backend Development
|
||||||
|
|
||||||
|
### Creating a New Backend
|
||||||
|
|
||||||
|
1. **Choose Language**: Select Python, Go, or C++ based on requirements
|
||||||
|
2. **Implement Interface**: Implement the gRPC service defined in `backend.proto`
|
||||||
|
3. **Add Dependencies**: Create appropriate requirements files
|
||||||
|
4. **Configure Build**: Set up Dockerfile and build scripts
|
||||||
|
5. **Register Backend**: Add entry to `index.yaml`
|
||||||
|
6. **Test Integration**: Verify gRPC communication and functionality
|
||||||
|
|
||||||
|
### Backend Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
backend-name/
|
||||||
|
├── backend.py/go/cpp # Main implementation
|
||||||
|
├── requirements.txt # Dependencies
|
||||||
|
├── Dockerfile # Build configuration
|
||||||
|
├── install.sh # Installation script
|
||||||
|
├── run.sh # Execution script
|
||||||
|
├── test.sh # Test script
|
||||||
|
└── README.md # Backend documentation
|
||||||
|
```
|
||||||
|
|
||||||
|
### Required gRPC Methods
|
||||||
|
|
||||||
|
At minimum, backends must implement:
|
||||||
|
- `Health()` - Service health check
|
||||||
|
- `LoadModel()` - Model loading and initialization
|
||||||
|
- `Predict()` - Main inference endpoint
|
||||||
|
- `Status()` - Backend status and metrics
|
||||||
|
|
||||||
|
## Integration with LocalAI Core
|
||||||
|
|
||||||
|
Backends communicate with LocalAI core through gRPC:
|
||||||
|
|
||||||
|
1. **Service Discovery**: Core discovers available backends
|
||||||
|
2. **Model Loading**: Core requests model loading via `LoadModel`
|
||||||
|
3. **Inference**: Core sends requests via `Predict` or specialized endpoints
|
||||||
|
4. **Streaming**: Core handles streaming responses for real-time generation
|
||||||
|
5. **Monitoring**: Core tracks backend health and performance
|
||||||
|
|
||||||
|
## Performance Optimization
|
||||||
|
|
||||||
|
### Memory Management
|
||||||
|
- **Model Caching**: Efficient model loading and caching
|
||||||
|
- **Batch Processing**: Optimize for multiple concurrent requests
|
||||||
|
- **Memory Pinning**: GPU memory optimization for CUDA/ROCm
|
||||||
|
|
||||||
|
### Hardware Utilization
|
||||||
|
- **Multi-GPU**: Support for tensor parallelism
|
||||||
|
- **Mixed Precision**: FP16/BF16 for memory efficiency
|
||||||
|
- **Kernel Fusion**: Optimized CUDA/ROCm kernels
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Common Issues
|
||||||
|
|
||||||
|
1. **GRPC Connection**: Verify backend service is running and accessible
|
||||||
|
2. **Model Loading**: Check model paths and dependencies
|
||||||
|
3. **Hardware Detection**: Ensure appropriate drivers and libraries
|
||||||
|
4. **Memory Issues**: Monitor GPU memory usage and model sizes
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
When contributing to the backend system:
|
||||||
|
|
||||||
|
1. **Follow Protocol**: Implement the exact gRPC interface
|
||||||
|
2. **Add Tests**: Include comprehensive test coverage
|
||||||
|
3. **Document**: Provide clear usage examples
|
||||||
|
4. **Optimize**: Consider performance and resource usage
|
||||||
|
5. **Validate**: Test across different hardware targets
|
||||||
@@ -20,6 +20,7 @@ service Backend {
|
|||||||
rpc SoundGeneration(SoundGenerationRequest) returns (Result) {}
|
rpc SoundGeneration(SoundGenerationRequest) returns (Result) {}
|
||||||
rpc TokenizeString(PredictOptions) returns (TokenizationResponse) {}
|
rpc TokenizeString(PredictOptions) returns (TokenizationResponse) {}
|
||||||
rpc Status(HealthMessage) returns (StatusResponse) {}
|
rpc Status(HealthMessage) returns (StatusResponse) {}
|
||||||
|
rpc Detect(DetectOptions) returns (DetectResponse) {}
|
||||||
|
|
||||||
rpc StoresSet(StoresSetOptions) returns (Result) {}
|
rpc StoresSet(StoresSetOptions) returns (Result) {}
|
||||||
rpc StoresDelete(StoresDeleteOptions) returns (Result) {}
|
rpc StoresDelete(StoresDeleteOptions) returns (Result) {}
|
||||||
@@ -241,7 +242,7 @@ message ModelOptions {
|
|||||||
|
|
||||||
string Type = 49;
|
string Type = 49;
|
||||||
|
|
||||||
bool FlashAttention = 56;
|
string FlashAttention = 56;
|
||||||
bool NoKVOffload = 57;
|
bool NoKVOffload = 57;
|
||||||
|
|
||||||
string ModelPath = 59;
|
string ModelPath = 59;
|
||||||
@@ -275,6 +276,7 @@ message TranscriptRequest {
|
|||||||
string language = 3;
|
string language = 3;
|
||||||
uint32 threads = 4;
|
uint32 threads = 4;
|
||||||
bool translate = 5;
|
bool translate = 5;
|
||||||
|
bool diarize = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
message TranscriptResult {
|
message TranscriptResult {
|
||||||
@@ -304,19 +306,24 @@ message GenerateImageRequest {
|
|||||||
// Diffusers
|
// Diffusers
|
||||||
string EnableParameters = 10;
|
string EnableParameters = 10;
|
||||||
int32 CLIPSkip = 11;
|
int32 CLIPSkip = 11;
|
||||||
|
|
||||||
|
// Reference images for models that support them (e.g., Flux Kontext)
|
||||||
|
repeated string ref_images = 12;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GenerateVideoRequest {
|
message GenerateVideoRequest {
|
||||||
string prompt = 1;
|
string prompt = 1;
|
||||||
string start_image = 2; // Path or base64 encoded image for the start frame
|
string negative_prompt = 2; // Negative prompt for video generation
|
||||||
string end_image = 3; // Path or base64 encoded image for the end frame
|
string start_image = 3; // Path or base64 encoded image for the start frame
|
||||||
int32 width = 4;
|
string end_image = 4; // Path or base64 encoded image for the end frame
|
||||||
int32 height = 5;
|
int32 width = 5;
|
||||||
int32 num_frames = 6; // Number of frames to generate
|
int32 height = 6;
|
||||||
int32 fps = 7; // Frames per second
|
int32 num_frames = 7; // Number of frames to generate
|
||||||
int32 seed = 8;
|
int32 fps = 8; // Frames per second
|
||||||
float cfg_scale = 9; // Classifier-free guidance scale
|
int32 seed = 9;
|
||||||
string dst = 10; // Output path for the generated video
|
float cfg_scale = 10; // Classifier-free guidance scale
|
||||||
|
int32 step = 11; // Number of inference steps
|
||||||
|
string dst = 12; // Output path for the generated video
|
||||||
}
|
}
|
||||||
|
|
||||||
message TTSRequest {
|
message TTSRequest {
|
||||||
@@ -376,3 +383,20 @@ message Message {
|
|||||||
string role = 1;
|
string role = 1;
|
||||||
string content = 2;
|
string content = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message DetectOptions {
|
||||||
|
string src = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Detection {
|
||||||
|
float x = 1;
|
||||||
|
float y = 2;
|
||||||
|
float width = 3;
|
||||||
|
float height = 4;
|
||||||
|
float confidence = 5;
|
||||||
|
string class_name = 6;
|
||||||
|
}
|
||||||
|
|
||||||
|
message DetectResponse {
|
||||||
|
repeated Detection Detections = 1;
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
|
|
||||||
LLAMA_VERSION?=acd6cb1c41676f6bbb25c2a76fa5abeb1719301e
|
LLAMA_VERSION?=d64c8104f090b27b1f99e8da5995ffcfa6b726e2
|
||||||
LLAMA_REPO?=https://github.com/ggerganov/llama.cpp
|
LLAMA_REPO?=https://github.com/ggerganov/llama.cpp
|
||||||
|
|
||||||
CMAKE_ARGS?=
|
CMAKE_ARGS?=
|
||||||
@@ -7,13 +7,14 @@ BUILD_TYPE?=
|
|||||||
NATIVE?=false
|
NATIVE?=false
|
||||||
ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh
|
ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh
|
||||||
TARGET?=--target grpc-server
|
TARGET?=--target grpc-server
|
||||||
|
JOBS?=$(shell nproc)
|
||||||
|
|
||||||
# Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
|
# Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
|
||||||
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF -DLLAMA_CURL=OFF
|
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF -DLLAMA_CURL=OFF
|
||||||
|
|
||||||
CURRENT_MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
|
CURRENT_MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
|
||||||
ifeq ($(NATIVE),false)
|
ifeq ($(NATIVE),false)
|
||||||
CMAKE_ARGS+=-DGGML_NATIVE=OFF
|
CMAKE_ARGS+=-DGGML_NATIVE=OFF -DLLAMA_OPENSSL=OFF
|
||||||
endif
|
endif
|
||||||
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
|
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
|
||||||
ifeq ($(BUILD_TYPE),cublas)
|
ifeq ($(BUILD_TYPE),cublas)
|
||||||
@@ -25,16 +26,14 @@ else ifeq ($(BUILD_TYPE),openblas)
|
|||||||
# If build type is clblas (openCL) we set -DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
|
# If build type is clblas (openCL) we set -DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
|
||||||
else ifeq ($(BUILD_TYPE),clblas)
|
else ifeq ($(BUILD_TYPE),clblas)
|
||||||
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
|
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
|
||||||
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
|
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
|
||||||
else ifeq ($(BUILD_TYPE),hipblas)
|
else ifeq ($(BUILD_TYPE),hipblas)
|
||||||
ROCM_HOME ?= /opt/rocm
|
ROCM_HOME ?= /opt/rocm
|
||||||
ROCM_PATH ?= /opt/rocm
|
ROCM_PATH ?= /opt/rocm
|
||||||
export CXX=$(ROCM_HOME)/llvm/bin/clang++
|
export CXX=$(ROCM_HOME)/llvm/bin/clang++
|
||||||
export CC=$(ROCM_HOME)/llvm/bin/clang
|
export CC=$(ROCM_HOME)/llvm/bin/clang
|
||||||
# GPU_TARGETS ?= gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102
|
AMDGPU_TARGETS?=gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102,gfx1200,gfx1201
|
||||||
# AMDGPU_TARGETS ?= "$(GPU_TARGETS)"
|
CMAKE_ARGS+=-DGGML_HIP=ON -DAMDGPU_TARGETS=$(AMDGPU_TARGETS)
|
||||||
CMAKE_ARGS+=-DGGML_HIP=ON
|
|
||||||
# CMAKE_ARGS+=-DGGML_HIP=ON -DAMDGPU_TARGETS="$(AMDGPU_TARGETS)" -DGPU_TARGETS="$(GPU_TARGETS)"
|
|
||||||
else ifeq ($(BUILD_TYPE),vulkan)
|
else ifeq ($(BUILD_TYPE),vulkan)
|
||||||
CMAKE_ARGS+=-DGGML_VULKAN=1
|
CMAKE_ARGS+=-DGGML_VULKAN=1
|
||||||
else ifeq ($(OS),Darwin)
|
else ifeq ($(OS),Darwin)
|
||||||
@@ -160,8 +159,8 @@ grpc-server: llama.cpp llama.cpp/tools/grpc-server
|
|||||||
@echo "Building grpc-server with $(BUILD_TYPE) build type and $(CMAKE_ARGS)"
|
@echo "Building grpc-server with $(BUILD_TYPE) build type and $(CMAKE_ARGS)"
|
||||||
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
|
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
|
||||||
+bash -c "source $(ONEAPI_VARS); \
|
+bash -c "source $(ONEAPI_VARS); \
|
||||||
cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release $(TARGET)"
|
cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release -j $(JOBS) $(TARGET)"
|
||||||
else
|
else
|
||||||
+cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release $(TARGET)
|
+cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release -j $(JOBS) $(TARGET)
|
||||||
endif
|
endif
|
||||||
cp llama.cpp/build/bin/grpc-server .
|
cp llama.cpp/build/bin/grpc-server .
|
||||||
|
|||||||
@@ -53,9 +53,9 @@ static void start_llama_server(server_context& ctx_server) {
|
|||||||
LOG_INF("%s: model loaded\n", __func__);
|
LOG_INF("%s: model loaded\n", __func__);
|
||||||
|
|
||||||
// print sample chat example to make it clear which template is used
|
// print sample chat example to make it clear which template is used
|
||||||
LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__,
|
// LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__,
|
||||||
common_chat_templates_source(ctx_server.chat_templates.get()),
|
// common_chat_templates_source(ctx_server.chat_templates.get()),
|
||||||
common_chat_format_example(ctx_server.chat_templates.get(), ctx_server.params_base.use_jinja).c_str());
|
// common_chat_format_example(ctx_server.chat_templates.get(), ctx_server.params_base.use_jinja).c_str(), ctx_server.params_base.default_template_kwargs);
|
||||||
|
|
||||||
// Reset the chat templates
|
// Reset the chat templates
|
||||||
// TODO: We should make this configurable by respecting the option that is already present in LocalAI for vLLM
|
// TODO: We should make this configurable by respecting the option that is already present in LocalAI for vLLM
|
||||||
@@ -231,6 +231,7 @@ static void params_parse(const backend::ModelOptions* request,
|
|||||||
params.cpuparams.n_threads = request->threads();
|
params.cpuparams.n_threads = request->threads();
|
||||||
params.n_gpu_layers = request->ngpulayers();
|
params.n_gpu_layers = request->ngpulayers();
|
||||||
params.n_batch = request->nbatch();
|
params.n_batch = request->nbatch();
|
||||||
|
params.n_ubatch = request->nbatch(); // fixes issue with reranking models being limited to 512 tokens (the default n_ubatch size); allows for setting the maximum input amount of tokens thereby avoiding this error "input is too large to process. increase the physical batch size"
|
||||||
// Set params.n_parallel by environment variable (LLAMA_PARALLEL), defaults to 1
|
// Set params.n_parallel by environment variable (LLAMA_PARALLEL), defaults to 1
|
||||||
//params.n_parallel = 1;
|
//params.n_parallel = 1;
|
||||||
const char *env_parallel = std::getenv("LLAMACPP_PARALLEL");
|
const char *env_parallel = std::getenv("LLAMACPP_PARALLEL");
|
||||||
@@ -304,7 +305,15 @@ static void params_parse(const backend::ModelOptions* request,
|
|||||||
}
|
}
|
||||||
params.use_mlock = request->mlock();
|
params.use_mlock = request->mlock();
|
||||||
params.use_mmap = request->mmap();
|
params.use_mmap = request->mmap();
|
||||||
params.flash_attn = request->flashattention();
|
|
||||||
|
if (request->flashattention() == "on" || request->flashattention() == "enabled") {
|
||||||
|
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_ENABLED;
|
||||||
|
} else if (request->flashattention() == "off" || request->flashattention() == "disabled") {
|
||||||
|
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_DISABLED;
|
||||||
|
} else if (request->flashattention() == "auto") {
|
||||||
|
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_AUTO;
|
||||||
|
}
|
||||||
|
|
||||||
params.no_kv_offload = request->nokvoffload();
|
params.no_kv_offload = request->nokvoffload();
|
||||||
params.ctx_shift = false; // We control context-shifting in any case (and we disable it as it could just lead to infinite loops)
|
params.ctx_shift = false; // We control context-shifting in any case (and we disable it as it could just lead to infinite loops)
|
||||||
|
|
||||||
@@ -313,9 +322,11 @@ static void params_parse(const backend::ModelOptions* request,
|
|||||||
params.pooling_type = LLAMA_POOLING_TYPE_RANK;
|
params.pooling_type = LLAMA_POOLING_TYPE_RANK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if (request->ropescaling() == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_NONE; }
|
if (request->ropescaling() == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_NONE; }
|
||||||
else if (request->ropescaling() == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_YARN; }
|
else if (request->ropescaling() == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_YARN; }
|
||||||
else { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_LINEAR; }
|
else if (request->ropescaling() == "linear") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_LINEAR; }
|
||||||
|
|
||||||
if ( request->yarnextfactor() != 0.0f ) {
|
if ( request->yarnextfactor() != 0.0f ) {
|
||||||
params.yarn_ext_factor = request->yarnextfactor();
|
params.yarn_ext_factor = request->yarnextfactor();
|
||||||
}
|
}
|
||||||
@@ -435,24 +446,7 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// process files
|
|
||||||
mtmd::bitmaps bitmaps;
|
|
||||||
const bool has_mtmd = ctx_server.mctx != nullptr;
|
const bool has_mtmd = ctx_server.mctx != nullptr;
|
||||||
{
|
|
||||||
if (!has_mtmd && !files.empty()) {
|
|
||||||
throw std::runtime_error("This server does not support multimodal");
|
|
||||||
}
|
|
||||||
for (auto & file : files) {
|
|
||||||
mtmd::bitmap bmp(mtmd_helper_bitmap_init_from_buf(ctx_server.mctx, file.data(), file.size()));
|
|
||||||
if (!bmp.ptr) {
|
|
||||||
throw std::runtime_error("Failed to load image/audio");
|
|
||||||
}
|
|
||||||
// calculate bitmap hash (for KV caching)
|
|
||||||
std::string hash = fnv_hash(bmp.data(), bmp.n_bytes());
|
|
||||||
bmp.set_id(hash.c_str());
|
|
||||||
bitmaps.entries.push_back(std::move(bmp));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// process prompt
|
// process prompt
|
||||||
std::vector<server_tokens> inputs;
|
std::vector<server_tokens> inputs;
|
||||||
@@ -462,32 +456,10 @@ public:
|
|||||||
|
|
||||||
if (has_mtmd) {
|
if (has_mtmd) {
|
||||||
// multimodal
|
// multimodal
|
||||||
std::string prompt_str = prompt.get<std::string>();
|
inputs.push_back(process_mtmd_prompt(ctx_server.mctx, prompt.get<std::string>(), files));
|
||||||
mtmd_input_text inp_txt = {
|
|
||||||
prompt_str.c_str(),
|
|
||||||
/* add_special */ true,
|
|
||||||
/* parse_special */ true,
|
|
||||||
};
|
|
||||||
mtmd::input_chunks chunks(mtmd_input_chunks_init());
|
|
||||||
auto bitmaps_c_ptr = bitmaps.c_ptr();
|
|
||||||
int32_t tokenized = mtmd_tokenize(ctx_server.mctx,
|
|
||||||
chunks.ptr.get(),
|
|
||||||
&inp_txt,
|
|
||||||
bitmaps_c_ptr.data(),
|
|
||||||
bitmaps_c_ptr.size());
|
|
||||||
if (tokenized != 0) {
|
|
||||||
throw std::runtime_error("Failed to tokenize prompt");
|
|
||||||
}
|
|
||||||
|
|
||||||
server_tokens tmp(chunks, true);
|
|
||||||
inputs.push_back(std::move(tmp));
|
|
||||||
} else {
|
} else {
|
||||||
// non-multimodal version
|
// Everything else, including multimodal completions.
|
||||||
auto tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, prompt, true, true);
|
inputs = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, prompt, true, true);
|
||||||
for (auto & p : tokenized_prompts) {
|
|
||||||
auto tmp = server_tokens(p, ctx_server.mctx != nullptr);
|
|
||||||
inputs.push_back(std::move(tmp));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
tasks.reserve(inputs.size());
|
tasks.reserve(inputs.size());
|
||||||
@@ -628,23 +600,7 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// process files
|
// process files
|
||||||
mtmd::bitmaps bitmaps;
|
|
||||||
const bool has_mtmd = ctx_server.mctx != nullptr;
|
const bool has_mtmd = ctx_server.mctx != nullptr;
|
||||||
{
|
|
||||||
if (!has_mtmd && !files.empty()) {
|
|
||||||
throw std::runtime_error("This server does not support multimodal");
|
|
||||||
}
|
|
||||||
for (auto & file : files) {
|
|
||||||
mtmd::bitmap bmp(mtmd_helper_bitmap_init_from_buf(ctx_server.mctx, file.data(), file.size()));
|
|
||||||
if (!bmp.ptr) {
|
|
||||||
throw std::runtime_error("Failed to load image/audio");
|
|
||||||
}
|
|
||||||
// calculate bitmap hash (for KV caching)
|
|
||||||
std::string hash = fnv_hash(bmp.data(), bmp.n_bytes());
|
|
||||||
bmp.set_id(hash.c_str());
|
|
||||||
bitmaps.entries.push_back(std::move(bmp));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// process prompt
|
// process prompt
|
||||||
std::vector<server_tokens> inputs;
|
std::vector<server_tokens> inputs;
|
||||||
@@ -655,33 +611,10 @@ public:
|
|||||||
|
|
||||||
if (has_mtmd) {
|
if (has_mtmd) {
|
||||||
// multimodal
|
// multimodal
|
||||||
std::string prompt_str = prompt.get<std::string>();
|
inputs.push_back(process_mtmd_prompt(ctx_server.mctx, prompt.get<std::string>(), files));
|
||||||
mtmd_input_text inp_txt = {
|
|
||||||
prompt_str.c_str(),
|
|
||||||
/* add_special */ true,
|
|
||||||
/* parse_special */ true,
|
|
||||||
};
|
|
||||||
mtmd::input_chunks chunks(mtmd_input_chunks_init());
|
|
||||||
auto bitmaps_c_ptr = bitmaps.c_ptr();
|
|
||||||
int32_t tokenized = mtmd_tokenize(ctx_server.mctx,
|
|
||||||
chunks.ptr.get(),
|
|
||||||
&inp_txt,
|
|
||||||
bitmaps_c_ptr.data(),
|
|
||||||
bitmaps_c_ptr.size());
|
|
||||||
if (tokenized != 0) {
|
|
||||||
std::cout << "[PREDICT] Failed to tokenize prompt" << std::endl;
|
|
||||||
throw std::runtime_error("Failed to tokenize prompt");
|
|
||||||
}
|
|
||||||
|
|
||||||
server_tokens tmp(chunks, true);
|
|
||||||
inputs.push_back(std::move(tmp));
|
|
||||||
} else {
|
} else {
|
||||||
// non-multimodal version
|
// Everything else, including multimodal completions.
|
||||||
auto tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, prompt, true, true);
|
inputs = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, prompt, true, true);
|
||||||
for (auto & p : tokenized_prompts) {
|
|
||||||
auto tmp = server_tokens(p, ctx_server.mctx != nullptr);
|
|
||||||
inputs.push_back(std::move(tmp));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
tasks.reserve(inputs.size());
|
tasks.reserve(inputs.size());
|
||||||
@@ -769,10 +702,10 @@ public:
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
// for the shape of input/content, see tokenize_input_prompts()
|
// for the shape of input/content, see tokenize_input_prompts()
|
||||||
json prompt = body.at("prompt");
|
json prompt = body.at("embeddings");
|
||||||
|
|
||||||
|
|
||||||
auto tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, prompt, true, true);
|
auto tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, prompt, true, true);
|
||||||
for (const auto & tokens : tokenized_prompts) {
|
for (const auto & tokens : tokenized_prompts) {
|
||||||
// this check is necessary for models that do not add BOS token to the input
|
// this check is necessary for models that do not add BOS token to the input
|
||||||
if (tokens.empty()) {
|
if (tokens.empty()) {
|
||||||
@@ -780,6 +713,7 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int embd_normalize = 2; // default to Euclidean/L2 norm
|
||||||
// create and queue the task
|
// create and queue the task
|
||||||
json responses = json::array();
|
json responses = json::array();
|
||||||
bool error = false;
|
bool error = false;
|
||||||
@@ -791,11 +725,10 @@ public:
|
|||||||
|
|
||||||
task.id = ctx_server.queue_tasks.get_new_id();
|
task.id = ctx_server.queue_tasks.get_new_id();
|
||||||
task.index = i;
|
task.index = i;
|
||||||
task.prompt_tokens = server_tokens(tokenized_prompts[i], ctx_server.mctx != nullptr);
|
task.prompt_tokens = std::move(tokenized_prompts[i]);
|
||||||
|
|
||||||
// OAI-compat
|
|
||||||
task.params.oaicompat = OAICOMPAT_TYPE_EMBEDDING;
|
|
||||||
|
|
||||||
|
task.params.oaicompat = OAICOMPAT_TYPE_NONE;
|
||||||
|
task.params.embd_normalize = embd_normalize;
|
||||||
tasks.push_back(std::move(task));
|
tasks.push_back(std::move(task));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -811,9 +744,8 @@ public:
|
|||||||
responses.push_back(res->to_json());
|
responses.push_back(res->to_json());
|
||||||
}
|
}
|
||||||
}, [&](const json & error_data) {
|
}, [&](const json & error_data) {
|
||||||
return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, error_data.value("content", ""));
|
error = true;
|
||||||
}, [&]() {
|
}, [&]() {
|
||||||
// NOTE: we should try to check when the writer is closed here
|
|
||||||
return false;
|
return false;
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -823,12 +755,36 @@ public:
|
|||||||
return grpc::Status(grpc::StatusCode::INTERNAL, "Error in receiving results");
|
return grpc::Status(grpc::StatusCode::INTERNAL, "Error in receiving results");
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<float> embeddings = responses[0].value("embedding", std::vector<float>());
|
std::cout << "[DEBUG] Responses size: " << responses.size() << std::endl;
|
||||||
// loop the vector and set the embeddings results
|
|
||||||
for (int i = 0; i < embeddings.size(); i++) {
|
// Process the responses and extract embeddings
|
||||||
embeddingResult->add_embeddings(embeddings[i]);
|
for (const auto & response_elem : responses) {
|
||||||
|
// Check if the response has an "embedding" field
|
||||||
|
if (response_elem.contains("embedding")) {
|
||||||
|
json embedding_data = json_value(response_elem, "embedding", json::array());
|
||||||
|
|
||||||
|
if (embedding_data.is_array() && !embedding_data.empty()) {
|
||||||
|
for (const auto & embedding_vector : embedding_data) {
|
||||||
|
if (embedding_vector.is_array()) {
|
||||||
|
for (const auto & embedding_value : embedding_vector) {
|
||||||
|
embeddingResult->add_embeddings(embedding_value.get<float>());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Check if the response itself contains the embedding data directly
|
||||||
|
if (response_elem.is_array()) {
|
||||||
|
for (const auto & embedding_value : response_elem) {
|
||||||
|
embeddingResult->add_embeddings(embedding_value.get<float>());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
return grpc::Status::OK;
|
return grpc::Status::OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -846,9 +802,6 @@ public:
|
|||||||
return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "\"documents\" must be a non-empty string array");
|
return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "\"documents\" must be a non-empty string array");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tokenize the query
|
|
||||||
llama_tokens tokenized_query = tokenize_input_prompts(ctx_server.vocab, request->query(), /* add_special */ false, true)[0];
|
|
||||||
|
|
||||||
// Create and queue the task
|
// Create and queue the task
|
||||||
json responses = json::array();
|
json responses = json::array();
|
||||||
bool error = false;
|
bool error = false;
|
||||||
@@ -860,14 +813,13 @@ public:
|
|||||||
documents.push_back(request->documents(i));
|
documents.push_back(request->documents(i));
|
||||||
}
|
}
|
||||||
|
|
||||||
auto tokenized_docs = tokenize_input_prompts(ctx_server.vocab, documents, /* add_special */ false, true);
|
tasks.reserve(documents.size());
|
||||||
tasks.reserve(tokenized_docs.size());
|
for (size_t i = 0; i < documents.size(); i++) {
|
||||||
for (size_t i = 0; i < tokenized_docs.size(); i++) {
|
auto tmp = format_rerank(ctx_server.model, ctx_server.vocab, ctx_server.mctx, request->query(), documents[i]);
|
||||||
auto tmp = format_rerank(ctx_server.vocab, tokenized_query, tokenized_docs[i]);
|
|
||||||
server_task task = server_task(SERVER_TASK_TYPE_RERANK);
|
server_task task = server_task(SERVER_TASK_TYPE_RERANK);
|
||||||
task.id = ctx_server.queue_tasks.get_new_id();
|
task.id = ctx_server.queue_tasks.get_new_id();
|
||||||
task.index = i;
|
task.index = i;
|
||||||
task.prompt_tokens = server_tokens(tmp, ctx_server.mctx != nullptr);
|
task.prompt_tokens = std::move(tmp);
|
||||||
tasks.push_back(std::move(task));
|
tasks.push_back(std::move(task));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -42,7 +42,8 @@ fi
|
|||||||
|
|
||||||
# Extend ld library path with the dir where this script is located/lib
|
# Extend ld library path with the dir where this script is located/lib
|
||||||
if [ "$(uname)" == "Darwin" ]; then
|
if [ "$(uname)" == "Darwin" ]; then
|
||||||
DYLD_FALLBACK_LIBRARY_PATH=$CURDIR/lib:$DYLD_FALLBACK_LIBRARY_PATH
|
export DYLD_LIBRARY_PATH=$CURDIR/lib:$DYLD_LIBRARY_PATH
|
||||||
|
#export DYLD_FALLBACK_LIBRARY_PATH=$CURDIR/lib:$DYLD_FALLBACK_LIBRARY_PATH
|
||||||
else
|
else
|
||||||
export LD_LIBRARY_PATH=$CURDIR/lib:$LD_LIBRARY_PATH
|
export LD_LIBRARY_PATH=$CURDIR/lib:$LD_LIBRARY_PATH
|
||||||
fi
|
fi
|
||||||
@@ -57,5 +58,5 @@ fi
|
|||||||
echo "Using binary: $BINARY"
|
echo "Using binary: $BINARY"
|
||||||
exec $CURDIR/$BINARY "$@"
|
exec $CURDIR/$BINARY "$@"
|
||||||
|
|
||||||
# In case we fail execing, just run fallback
|
# We should never reach this point, however just in case we do, run fallback
|
||||||
exec $CURDIR/llama-cpp-fallback "$@"
|
exec $CURDIR/llama-cpp-fallback "$@"
|
||||||
6
backend/go/stablediffusion-ggml/.gitignore
vendored
Normal file
6
backend/go/stablediffusion-ggml/.gitignore
vendored
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
package/
|
||||||
|
sources/
|
||||||
|
.cache/
|
||||||
|
build/
|
||||||
|
libgosd.so
|
||||||
|
stablediffusion-ggml
|
||||||
20
backend/go/stablediffusion-ggml/CMakeLists.txt
Normal file
20
backend/go/stablediffusion-ggml/CMakeLists.txt
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
cmake_minimum_required(VERSION 3.12)
|
||||||
|
project(gosd LANGUAGES C CXX)
|
||||||
|
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
||||||
|
|
||||||
|
add_subdirectory(./sources/stablediffusion-ggml.cpp)
|
||||||
|
|
||||||
|
add_library(gosd MODULE gosd.cpp)
|
||||||
|
target_link_libraries(gosd PRIVATE stable-diffusion ggml)
|
||||||
|
|
||||||
|
if(CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 9.0)
|
||||||
|
target_link_libraries(gosd PRIVATE stdc++fs)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
target_include_directories(gosd PUBLIC
|
||||||
|
stable-diffusion.cpp
|
||||||
|
stable-diffusion.cpp/thirdparty
|
||||||
|
)
|
||||||
|
|
||||||
|
set_property(TARGET gosd PROPERTY CXX_STANDARD 17)
|
||||||
|
set_target_properties(gosd PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
|
||||||
@@ -1,28 +1,16 @@
|
|||||||
INCLUDE_PATH := $(abspath ./)
|
|
||||||
LIBRARY_PATH := $(abspath ./)
|
|
||||||
|
|
||||||
AR?=ar
|
|
||||||
CMAKE_ARGS?=
|
CMAKE_ARGS?=
|
||||||
BUILD_TYPE?=
|
BUILD_TYPE?=
|
||||||
NATIVE?=false
|
NATIVE?=false
|
||||||
CUDA_LIBPATH?=/usr/local/cuda/lib64/
|
|
||||||
ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh
|
|
||||||
# keep standard at C11 and C++11
|
|
||||||
CXXFLAGS = -I. -I$(INCLUDE_PATH)/sources/stablediffusion-ggml.cpp/thirdparty -I$(INCLUDE_PATH)/sources/stablediffusion-ggml.cpp/ggml/include -I$(INCLUDE_PATH)/sources/stablediffusion-ggml.cpp -O3 -DNDEBUG -std=c++17 -fPIC
|
|
||||||
|
|
||||||
GOCMD?=go
|
GOCMD?=go
|
||||||
CGO_LDFLAGS?=
|
|
||||||
# Avoid parent make file overwriting CGO_LDFLAGS which is needed for hipblas
|
|
||||||
CGO_LDFLAGS_SYCL=
|
|
||||||
GO_TAGS?=
|
GO_TAGS?=
|
||||||
LD_FLAGS?=
|
JOBS?=$(shell nproc --ignore=1)
|
||||||
|
|
||||||
# stablediffusion.cpp (ggml)
|
# stablediffusion.cpp (ggml)
|
||||||
STABLEDIFFUSION_GGML_REPO?=https://github.com/richiejp/stable-diffusion.cpp
|
STABLEDIFFUSION_GGML_REPO?=https://github.com/leejet/stable-diffusion.cpp
|
||||||
STABLEDIFFUSION_GGML_VERSION?=53e3b17eb3d0b5760ced06a1f98320b68b34aaae
|
STABLEDIFFUSION_GGML_VERSION?=0ebe6fe118f125665939b27c89f34ed38716bff8
|
||||||
|
|
||||||
# Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
|
CMAKE_ARGS+=-DGGML_MAX_NAME=128
|
||||||
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF
|
|
||||||
|
|
||||||
ifeq ($(NATIVE),false)
|
ifeq ($(NATIVE),false)
|
||||||
CMAKE_ARGS+=-DGGML_NATIVE=OFF
|
CMAKE_ARGS+=-DGGML_NATIVE=OFF
|
||||||
@@ -31,7 +19,6 @@ endif
|
|||||||
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
|
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
|
||||||
ifeq ($(BUILD_TYPE),cublas)
|
ifeq ($(BUILD_TYPE),cublas)
|
||||||
CMAKE_ARGS+=-DSD_CUDA=ON -DGGML_CUDA=ON
|
CMAKE_ARGS+=-DSD_CUDA=ON -DGGML_CUDA=ON
|
||||||
CGO_LDFLAGS+=-lcublas -lcudart -L$(CUDA_LIBPATH) -L$(CUDA_LIBPATH)/stubs/ -lcuda
|
|
||||||
# If build type is openblas then we set -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
|
# If build type is openblas then we set -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
|
||||||
# to CMAKE_ARGS automatically
|
# to CMAKE_ARGS automatically
|
||||||
else ifeq ($(BUILD_TYPE),openblas)
|
else ifeq ($(BUILD_TYPE),openblas)
|
||||||
@@ -42,18 +29,14 @@ else ifeq ($(BUILD_TYPE),clblas)
|
|||||||
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
|
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
|
||||||
else ifeq ($(BUILD_TYPE),hipblas)
|
else ifeq ($(BUILD_TYPE),hipblas)
|
||||||
CMAKE_ARGS+=-DSD_HIPBLAS=ON -DGGML_HIPBLAS=ON
|
CMAKE_ARGS+=-DSD_HIPBLAS=ON -DGGML_HIPBLAS=ON
|
||||||
# If it's OSX, DO NOT embed the metal library - -DGGML_METAL_EMBED_LIBRARY=ON requires further investigation
|
|
||||||
# But if it's OSX without metal, disable it here
|
|
||||||
else ifeq ($(BUILD_TYPE),vulkan)
|
else ifeq ($(BUILD_TYPE),vulkan)
|
||||||
CMAKE_ARGS+=-DSD_VULKAN=ON -DGGML_VULKAN=ON
|
CMAKE_ARGS+=-DSD_VULKAN=ON -DGGML_VULKAN=ON
|
||||||
CGO_LDFLAGS+=-lvulkan
|
|
||||||
else ifeq ($(OS),Darwin)
|
else ifeq ($(OS),Darwin)
|
||||||
ifneq ($(BUILD_TYPE),metal)
|
ifneq ($(BUILD_TYPE),metal)
|
||||||
CMAKE_ARGS+=-DSD_METAL=OFF -DGGML_METAL=OFF
|
CMAKE_ARGS+=-DSD_METAL=OFF -DGGML_METAL=OFF
|
||||||
else
|
else
|
||||||
CMAKE_ARGS+=-DSD_METAL=ON -DGGML_METAL=ON
|
CMAKE_ARGS+=-DSD_METAL=ON -DGGML_METAL=ON
|
||||||
CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON
|
CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON
|
||||||
TARGET+=--target ggml-metal
|
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
@@ -63,12 +46,6 @@ ifeq ($(BUILD_TYPE),sycl_f16)
|
|||||||
-DCMAKE_CXX_COMPILER=icpx \
|
-DCMAKE_CXX_COMPILER=icpx \
|
||||||
-DSD_SYCL=ON \
|
-DSD_SYCL=ON \
|
||||||
-DGGML_SYCL_F16=ON
|
-DGGML_SYCL_F16=ON
|
||||||
export CC=icx
|
|
||||||
export CXX=icpx
|
|
||||||
CGO_LDFLAGS_SYCL += -fsycl -L${DNNLROOT}/lib -ldnnl ${MKLROOT}/lib/intel64/libmkl_sycl.a -fiopenmp -fopenmp-targets=spir64 -lOpenCL
|
|
||||||
CGO_LDFLAGS_SYCL += $(shell pkg-config --libs mkl-static-lp64-gomp)
|
|
||||||
CGO_CXXFLAGS += -fiopenmp -fopenmp-targets=spir64
|
|
||||||
CGO_CXXFLAGS += $(shell pkg-config --cflags mkl-static-lp64-gomp )
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifeq ($(BUILD_TYPE),sycl_f32)
|
ifeq ($(BUILD_TYPE),sycl_f32)
|
||||||
@@ -76,83 +53,29 @@ ifeq ($(BUILD_TYPE),sycl_f32)
|
|||||||
-DCMAKE_C_COMPILER=icx \
|
-DCMAKE_C_COMPILER=icx \
|
||||||
-DCMAKE_CXX_COMPILER=icpx \
|
-DCMAKE_CXX_COMPILER=icpx \
|
||||||
-DSD_SYCL=ON
|
-DSD_SYCL=ON
|
||||||
export CC=icx
|
|
||||||
export CXX=icpx
|
|
||||||
CGO_LDFLAGS_SYCL += -fsycl -L${DNNLROOT}/lib -ldnnl ${MKLROOT}/lib/intel64/libmkl_sycl.a -fiopenmp -fopenmp-targets=spir64 -lOpenCL
|
|
||||||
CGO_LDFLAGS_SYCL += $(shell pkg-config --libs mkl-static-lp64-gomp)
|
|
||||||
CGO_CXXFLAGS += -fiopenmp -fopenmp-targets=spir64
|
|
||||||
CGO_CXXFLAGS += $(shell pkg-config --cflags mkl-static-lp64-gomp )
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# warnings
|
|
||||||
# CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function
|
|
||||||
|
|
||||||
# Find all .a archives in ARCHIVE_DIR
|
|
||||||
# (ggml can have different backends cpu, cuda, etc., each backend generates a .a archive)
|
|
||||||
GGML_ARCHIVE_DIR := build/ggml/src/
|
|
||||||
ALL_ARCHIVES := $(shell find $(GGML_ARCHIVE_DIR) -type f -name '*.a')
|
|
||||||
|
|
||||||
# Name of the single merged library
|
|
||||||
COMBINED_LIB := libggmlall.a
|
|
||||||
|
|
||||||
# Rule to merge all the .a files into one
|
|
||||||
$(COMBINED_LIB): $(ALL_ARCHIVES)
|
|
||||||
@echo "Merging all .a into $(COMBINED_LIB)"
|
|
||||||
rm -f $@
|
|
||||||
mkdir -p merge-tmp
|
|
||||||
for a in $(ALL_ARCHIVES); do \
|
|
||||||
( cd merge-tmp && ar x ../$$a ); \
|
|
||||||
done
|
|
||||||
( cd merge-tmp && ar rcs ../$@ *.o )
|
|
||||||
# Ensure we have a proper index
|
|
||||||
ranlib $@
|
|
||||||
# Clean up
|
|
||||||
rm -rf merge-tmp
|
|
||||||
|
|
||||||
build/libstable-diffusion.a:
|
|
||||||
@echo "Building SD with $(BUILD_TYPE) build type and $(CMAKE_ARGS)"
|
|
||||||
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
|
|
||||||
+bash -c "source $(ONEAPI_VARS); \
|
|
||||||
mkdir -p build && \
|
|
||||||
cd build && \
|
|
||||||
cmake $(CMAKE_ARGS) ../sources/stablediffusion-ggml.cpp && \
|
|
||||||
cmake --build . --config Release"
|
|
||||||
else
|
|
||||||
mkdir -p build && \
|
|
||||||
cd build && \
|
|
||||||
cmake $(CMAKE_ARGS) ../sources/stablediffusion-ggml.cpp && \
|
|
||||||
cmake --build . --config Release
|
|
||||||
endif
|
|
||||||
$(MAKE) $(COMBINED_LIB)
|
|
||||||
|
|
||||||
gosd.o:
|
|
||||||
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
|
|
||||||
+bash -c "source $(ONEAPI_VARS); \
|
|
||||||
$(CXX) $(CXXFLAGS) gosd.cpp -o gosd.o -c"
|
|
||||||
else
|
|
||||||
$(CXX) $(CXXFLAGS) gosd.cpp -o gosd.o -c
|
|
||||||
endif
|
|
||||||
|
|
||||||
## stablediffusion (ggml)
|
|
||||||
sources/stablediffusion-ggml.cpp:
|
sources/stablediffusion-ggml.cpp:
|
||||||
git clone --recursive $(STABLEDIFFUSION_GGML_REPO) sources/stablediffusion-ggml.cpp && \
|
git clone --recursive $(STABLEDIFFUSION_GGML_REPO) sources/stablediffusion-ggml.cpp && \
|
||||||
cd sources/stablediffusion-ggml.cpp && \
|
cd sources/stablediffusion-ggml.cpp && \
|
||||||
git checkout $(STABLEDIFFUSION_GGML_VERSION) && \
|
git checkout $(STABLEDIFFUSION_GGML_VERSION) && \
|
||||||
git submodule update --init --recursive --depth 1 --single-branch
|
git submodule update --init --recursive --depth 1 --single-branch
|
||||||
|
|
||||||
libsd.a: sources/stablediffusion-ggml.cpp build/libstable-diffusion.a gosd.o
|
libgosd.so: sources/stablediffusion-ggml.cpp CMakeLists.txt gosd.cpp gosd.h
|
||||||
cp $(INCLUDE_PATH)/build/libstable-diffusion.a ./libsd.a
|
mkdir -p build && \
|
||||||
$(AR) rcs libsd.a gosd.o
|
cd build && \
|
||||||
|
cmake .. $(CMAKE_ARGS) && \
|
||||||
|
cmake --build . --config Release -j$(JOBS) && \
|
||||||
|
cd .. && \
|
||||||
|
mv build/libgosd.so ./
|
||||||
|
|
||||||
stablediffusion-ggml: libsd.a
|
stablediffusion-ggml: main.go gosd.go libgosd.so
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS) $(CGO_LDFLAGS_SYCL)" C_INCLUDE_PATH="$(INCLUDE_PATH)" LIBRARY_PATH="$(LIBRARY_PATH)" \
|
CGO_ENABLED=0 $(GOCMD) build -tags "$(GO_TAGS)" -o stablediffusion-ggml ./
|
||||||
CC="$(CC)" CXX="$(CXX)" CGO_CXXFLAGS="$(CGO_CXXFLAGS)" \
|
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o stablediffusion-ggml ./
|
|
||||||
|
|
||||||
package:
|
package: stablediffusion-ggml
|
||||||
bash package.sh
|
bash package.sh
|
||||||
|
|
||||||
build: stablediffusion-ggml package
|
build: package
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -rf gosd.o libsd.a build $(COMBINED_LIB)
|
rm -rf libgosd.so build stablediffusion-ggml package sources
|
||||||
|
|||||||
@@ -1,16 +1,14 @@
|
|||||||
|
#include <cstdint>
|
||||||
|
#define GGML_MAX_NAME 128
|
||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <time.h>
|
#include <time.h>
|
||||||
#include <iostream>
|
|
||||||
#include <random>
|
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
#include <filesystem>
|
||||||
#include "gosd.h"
|
#include "gosd.h"
|
||||||
|
|
||||||
// #include "preprocessing.hpp"
|
|
||||||
#include "flux.hpp"
|
|
||||||
#include "stable-diffusion.h"
|
|
||||||
|
|
||||||
#define STB_IMAGE_IMPLEMENTATION
|
#define STB_IMAGE_IMPLEMENTATION
|
||||||
#define STB_IMAGE_STATIC
|
#define STB_IMAGE_STATIC
|
||||||
#include "stb_image.h"
|
#include "stb_image.h"
|
||||||
@@ -25,7 +23,7 @@
|
|||||||
|
|
||||||
// Names of the sampler method, same order as enum sample_method in stable-diffusion.h
|
// Names of the sampler method, same order as enum sample_method in stable-diffusion.h
|
||||||
const char* sample_method_str[] = {
|
const char* sample_method_str[] = {
|
||||||
"euler_a",
|
"default",
|
||||||
"euler",
|
"euler",
|
||||||
"heun",
|
"heun",
|
||||||
"dpm2",
|
"dpm2",
|
||||||
@@ -37,43 +35,89 @@ const char* sample_method_str[] = {
|
|||||||
"lcm",
|
"lcm",
|
||||||
"ddim_trailing",
|
"ddim_trailing",
|
||||||
"tcd",
|
"tcd",
|
||||||
|
"euler_a",
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static_assert(std::size(sample_method_str) == SAMPLE_METHOD_COUNT, "sample method mismatch");
|
||||||
|
|
||||||
// Names of the sigma schedule overrides, same order as sample_schedule in stable-diffusion.h
|
// Names of the sigma schedule overrides, same order as sample_schedule in stable-diffusion.h
|
||||||
const char* schedule_str[] = {
|
const char* schedulers[] = {
|
||||||
"default",
|
"default",
|
||||||
"discrete",
|
"discrete",
|
||||||
"karras",
|
"karras",
|
||||||
"exponential",
|
"exponential",
|
||||||
"ays",
|
"ays",
|
||||||
"gits",
|
"gits",
|
||||||
|
"smoothstep",
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static_assert(std::size(schedulers) == SCHEDULE_COUNT, "schedulers mismatch");
|
||||||
|
|
||||||
sd_ctx_t* sd_c;
|
sd_ctx_t* sd_c;
|
||||||
|
// Moved from the context (load time) to generation time params
|
||||||
|
scheduler_t scheduler = scheduler_t::DEFAULT;
|
||||||
|
|
||||||
sample_method_t sample_method;
|
sample_method_t sample_method;
|
||||||
|
|
||||||
int load_model(char *model, char* options[], int threads, int diff) {
|
// Copied from the upstream CLI
|
||||||
fprintf (stderr, "Loading model!\n");
|
static void sd_log_cb(enum sd_log_level_t level, const char* log, void* data) {
|
||||||
|
//SDParams* params = (SDParams*)data;
|
||||||
|
const char* level_str;
|
||||||
|
|
||||||
char *stableDiffusionModel = "";
|
if (!log /*|| (!params->verbose && level <= SD_LOG_DEBUG)*/) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (level) {
|
||||||
|
case SD_LOG_DEBUG:
|
||||||
|
level_str = "DEBUG";
|
||||||
|
break;
|
||||||
|
case SD_LOG_INFO:
|
||||||
|
level_str = "INFO";
|
||||||
|
break;
|
||||||
|
case SD_LOG_WARN:
|
||||||
|
level_str = "WARN";
|
||||||
|
break;
|
||||||
|
case SD_LOG_ERROR:
|
||||||
|
level_str = "ERROR";
|
||||||
|
break;
|
||||||
|
default: /* Potential future-proofing */
|
||||||
|
level_str = "?????";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
fprintf(stderr, "[%-5s] ", level_str);
|
||||||
|
fputs(log, stderr);
|
||||||
|
fflush(stderr);
|
||||||
|
}
|
||||||
|
|
||||||
|
int load_model(const char *model, char *model_path, char* options[], int threads, int diff) {
|
||||||
|
fprintf (stderr, "Loading model: %p=%s\n", model, model);
|
||||||
|
|
||||||
|
sd_set_log_callback(sd_log_cb, NULL);
|
||||||
|
|
||||||
|
const char *stableDiffusionModel = "";
|
||||||
if (diff == 1 ) {
|
if (diff == 1 ) {
|
||||||
stableDiffusionModel = model;
|
stableDiffusionModel = model;
|
||||||
model = "";
|
model = "";
|
||||||
}
|
}
|
||||||
|
|
||||||
// decode options. Options are in form optname:optvale, or if booleans only optname.
|
// decode options. Options are in form optname:optvale, or if booleans only optname.
|
||||||
char *clip_l_path = "";
|
const char *clip_l_path = "";
|
||||||
char *clip_g_path = "";
|
const char *clip_g_path = "";
|
||||||
char *t5xxl_path = "";
|
const char *t5xxl_path = "";
|
||||||
char *vae_path = "";
|
const char *vae_path = "";
|
||||||
char *scheduler = "";
|
const char *scheduler_str = "";
|
||||||
char *sampler = "";
|
const char *sampler = "";
|
||||||
|
char *lora_dir = model_path;
|
||||||
|
bool lora_dir_allocated = false;
|
||||||
|
|
||||||
|
fprintf(stderr, "parsing options: %p\n", options);
|
||||||
|
|
||||||
// If options is not NULL, parse options
|
// If options is not NULL, parse options
|
||||||
for (int i = 0; options[i] != NULL; i++) {
|
for (int i = 0; options[i] != NULL; i++) {
|
||||||
char *optname = strtok(options[i], ":");
|
const char *optname = strtok(options[i], ":");
|
||||||
char *optval = strtok(NULL, ":");
|
const char *optval = strtok(NULL, ":");
|
||||||
if (optval == NULL) {
|
if (optval == NULL) {
|
||||||
optval = "true";
|
optval = "true";
|
||||||
}
|
}
|
||||||
@@ -91,77 +135,132 @@ int load_model(char *model, char* options[], int threads, int diff) {
|
|||||||
vae_path = optval;
|
vae_path = optval;
|
||||||
}
|
}
|
||||||
if (!strcmp(optname, "scheduler")) {
|
if (!strcmp(optname, "scheduler")) {
|
||||||
scheduler = optval;
|
scheduler_str = optval;
|
||||||
}
|
}
|
||||||
if (!strcmp(optname, "sampler")) {
|
if (!strcmp(optname, "sampler")) {
|
||||||
sampler = optval;
|
sampler = optval;
|
||||||
}
|
}
|
||||||
|
if (!strcmp(optname, "lora_dir")) {
|
||||||
|
// Path join with model dir
|
||||||
|
if (model_path && strlen(model_path) > 0) {
|
||||||
|
std::filesystem::path model_path_str(model_path);
|
||||||
|
std::filesystem::path lora_path(optval);
|
||||||
|
std::filesystem::path full_lora_path = model_path_str / lora_path;
|
||||||
|
lora_dir = strdup(full_lora_path.string().c_str());
|
||||||
|
lora_dir_allocated = true;
|
||||||
|
fprintf(stderr, "Lora dir resolved to: %s\n", lora_dir);
|
||||||
|
} else {
|
||||||
|
lora_dir = strdup(optval);
|
||||||
|
lora_dir_allocated = true;
|
||||||
|
fprintf(stderr, "No model path provided, using lora dir as-is: %s\n", lora_dir);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fprintf(stderr, "parsed options\n");
|
||||||
|
|
||||||
int sample_method_found = -1;
|
int sample_method_found = -1;
|
||||||
for (int m = 0; m < N_SAMPLE_METHODS; m++) {
|
for (int m = 0; m < SAMPLE_METHOD_COUNT; m++) {
|
||||||
if (!strcmp(sampler, sample_method_str[m])) {
|
if (!strcmp(sampler, sample_method_str[m])) {
|
||||||
sample_method_found = m;
|
sample_method_found = m;
|
||||||
|
fprintf(stderr, "Found sampler: %s\n", sampler);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (sample_method_found == -1) {
|
if (sample_method_found == -1) {
|
||||||
fprintf(stderr, "Invalid sample method, default to EULER_A!\n");
|
fprintf(stderr, "Invalid sample method, default to EULER_A!\n");
|
||||||
sample_method_found = EULER_A;
|
sample_method_found = sample_method_t::SAMPLE_METHOD_DEFAULT;
|
||||||
}
|
}
|
||||||
sample_method = (sample_method_t)sample_method_found;
|
sample_method = (sample_method_t)sample_method_found;
|
||||||
|
|
||||||
int schedule_found = -1;
|
for (int d = 0; d < SCHEDULE_COUNT; d++) {
|
||||||
for (int d = 0; d < N_SCHEDULES; d++) {
|
if (!strcmp(scheduler_str, schedulers[d])) {
|
||||||
if (!strcmp(scheduler, schedule_str[d])) {
|
scheduler = (scheduler_t)d;
|
||||||
schedule_found = d;
|
fprintf (stderr, "Found scheduler: %s\n", scheduler_str);
|
||||||
fprintf (stderr, "Found scheduler: %s\n", scheduler);
|
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (schedule_found == -1) {
|
|
||||||
fprintf (stderr, "Invalid scheduler! using DEFAULT\n");
|
|
||||||
schedule_found = DEFAULT;
|
|
||||||
}
|
|
||||||
|
|
||||||
schedule_t schedule = (schedule_t)schedule_found;
|
|
||||||
|
|
||||||
fprintf (stderr, "Creating context\n");
|
fprintf (stderr, "Creating context\n");
|
||||||
sd_ctx_t* sd_ctx = new_sd_ctx(model,
|
sd_ctx_params_t ctx_params;
|
||||||
clip_l_path,
|
sd_ctx_params_init(&ctx_params);
|
||||||
clip_g_path,
|
ctx_params.model_path = model;
|
||||||
t5xxl_path,
|
ctx_params.clip_l_path = clip_l_path;
|
||||||
stableDiffusionModel,
|
ctx_params.clip_g_path = clip_g_path;
|
||||||
vae_path,
|
ctx_params.t5xxl_path = t5xxl_path;
|
||||||
"",
|
ctx_params.diffusion_model_path = stableDiffusionModel;
|
||||||
"",
|
ctx_params.vae_path = vae_path;
|
||||||
"",
|
ctx_params.taesd_path = "";
|
||||||
"",
|
ctx_params.control_net_path = "";
|
||||||
"",
|
ctx_params.lora_model_dir = lora_dir;
|
||||||
false,
|
ctx_params.embedding_dir = "";
|
||||||
false,
|
ctx_params.vae_decode_only = false;
|
||||||
false,
|
ctx_params.free_params_immediately = false;
|
||||||
threads,
|
ctx_params.n_threads = threads;
|
||||||
SD_TYPE_COUNT,
|
ctx_params.rng_type = STD_DEFAULT_RNG;
|
||||||
STD_DEFAULT_RNG,
|
sd_ctx_t* sd_ctx = new_sd_ctx(&ctx_params);
|
||||||
schedule,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false);
|
|
||||||
|
|
||||||
if (sd_ctx == NULL) {
|
if (sd_ctx == NULL) {
|
||||||
fprintf (stderr, "failed loading model (generic error)\n");
|
fprintf (stderr, "failed loading model (generic error)\n");
|
||||||
|
// Clean up allocated memory
|
||||||
|
if (lora_dir_allocated && lora_dir) {
|
||||||
|
free(lora_dir);
|
||||||
|
}
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
fprintf (stderr, "Created context: OK\n");
|
fprintf (stderr, "Created context: OK\n");
|
||||||
|
|
||||||
sd_c = sd_ctx;
|
sd_c = sd_ctx;
|
||||||
|
|
||||||
|
// Clean up allocated memory
|
||||||
|
if (lora_dir_allocated && lora_dir) {
|
||||||
|
free(lora_dir);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int gen_image(char *text, char *negativeText, int width, int height, int steps, int seed , char *dst, float cfg_scale) {
|
void sd_tiling_params_set_enabled(sd_tiling_params_t *params, bool enabled) {
|
||||||
|
params->enabled = enabled;
|
||||||
|
}
|
||||||
|
|
||||||
|
void sd_tiling_params_set_tile_sizes(sd_tiling_params_t *params, int tile_size_x, int tile_size_y) {
|
||||||
|
params->tile_size_x = tile_size_x;
|
||||||
|
params->tile_size_y = tile_size_y;
|
||||||
|
}
|
||||||
|
|
||||||
|
void sd_tiling_params_set_rel_sizes(sd_tiling_params_t *params, float rel_size_x, float rel_size_y) {
|
||||||
|
params->rel_size_x = rel_size_x;
|
||||||
|
params->rel_size_y = rel_size_y;
|
||||||
|
}
|
||||||
|
|
||||||
|
void sd_tiling_params_set_target_overlap(sd_tiling_params_t *params, float target_overlap) {
|
||||||
|
params->target_overlap = target_overlap;
|
||||||
|
}
|
||||||
|
|
||||||
|
sd_tiling_params_t* sd_img_gen_params_get_vae_tiling_params(sd_img_gen_params_t *params) {
|
||||||
|
return ¶ms->vae_tiling_params;
|
||||||
|
}
|
||||||
|
|
||||||
|
sd_img_gen_params_t* sd_img_gen_params_new(void) {
|
||||||
|
sd_img_gen_params_t *params = (sd_img_gen_params_t *)std::malloc(sizeof(sd_img_gen_params_t));
|
||||||
|
sd_img_gen_params_init(params);
|
||||||
|
return params;
|
||||||
|
}
|
||||||
|
|
||||||
|
void sd_img_gen_params_set_prompts(sd_img_gen_params_t *params, const char *prompt, const char *negative_prompt) {
|
||||||
|
params->prompt = prompt;
|
||||||
|
params->negative_prompt = negative_prompt;
|
||||||
|
}
|
||||||
|
|
||||||
|
void sd_img_gen_params_set_dimensions(sd_img_gen_params_t *params, int width, int height) {
|
||||||
|
params->width = width;
|
||||||
|
params->height = height;
|
||||||
|
}
|
||||||
|
|
||||||
|
void sd_img_gen_params_set_seed(sd_img_gen_params_t *params, int64_t seed) {
|
||||||
|
params->seed = seed;
|
||||||
|
}
|
||||||
|
|
||||||
|
int gen_image(sd_img_gen_params_t *p, int steps, char *dst, float cfg_scale, char *src_image, float strength, char *mask_image, char **ref_images, int ref_images_count) {
|
||||||
|
|
||||||
sd_image_t* results;
|
sd_image_t* results;
|
||||||
|
|
||||||
@@ -169,37 +268,199 @@ int gen_image(char *text, char *negativeText, int width, int height, int steps,
|
|||||||
|
|
||||||
fprintf (stderr, "Generating image\n");
|
fprintf (stderr, "Generating image\n");
|
||||||
|
|
||||||
results = txt2img(sd_c,
|
p->sample_params.guidance.txt_cfg = cfg_scale;
|
||||||
text,
|
p->sample_params.guidance.slg.layers = skip_layers.data();
|
||||||
negativeText,
|
p->sample_params.guidance.slg.layer_count = skip_layers.size();
|
||||||
-1, //clip_skip
|
p->sample_params.sample_method = sample_method;
|
||||||
cfg_scale, // sfg_scale
|
p->sample_params.sample_steps = steps;
|
||||||
3.5f,
|
p->sample_params.scheduler = scheduler;
|
||||||
0, // eta
|
|
||||||
width,
|
int width = p->width;
|
||||||
height,
|
int height = p->height;
|
||||||
sample_method,
|
|
||||||
steps,
|
// Handle input image for img2img
|
||||||
seed,
|
bool has_input_image = (src_image != NULL && strlen(src_image) > 0);
|
||||||
1,
|
bool has_mask_image = (mask_image != NULL && strlen(mask_image) > 0);
|
||||||
NULL,
|
|
||||||
0.9f,
|
uint8_t* input_image_buffer = NULL;
|
||||||
20.f,
|
uint8_t* mask_image_buffer = NULL;
|
||||||
false,
|
std::vector<uint8_t> default_mask_image_vec;
|
||||||
"",
|
|
||||||
skip_layers.data(),
|
if (has_input_image) {
|
||||||
skip_layers.size(),
|
fprintf(stderr, "Loading input image: %s\n", src_image);
|
||||||
0,
|
|
||||||
0.01,
|
int c = 0;
|
||||||
0.2);
|
int img_width = 0;
|
||||||
|
int img_height = 0;
|
||||||
|
input_image_buffer = stbi_load(src_image, &img_width, &img_height, &c, 3);
|
||||||
|
if (input_image_buffer == NULL) {
|
||||||
|
fprintf(stderr, "Failed to load input image from '%s'\n", src_image);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
if (c < 3) {
|
||||||
|
fprintf(stderr, "Input image must have at least 3 channels, got %d\n", c);
|
||||||
|
free(input_image_buffer);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resize input image if dimensions don't match
|
||||||
|
if (img_width != width || img_height != height) {
|
||||||
|
fprintf(stderr, "Resizing input image from %dx%d to %dx%d\n", img_width, img_height, width, height);
|
||||||
|
|
||||||
|
uint8_t* resized_image_buffer = (uint8_t*)malloc(height * width * 3);
|
||||||
|
if (resized_image_buffer == NULL) {
|
||||||
|
fprintf(stderr, "Failed to allocate memory for resized image\n");
|
||||||
|
free(input_image_buffer);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
stbir_resize(input_image_buffer, img_width, img_height, 0,
|
||||||
|
resized_image_buffer, width, height, 0, STBIR_TYPE_UINT8,
|
||||||
|
3, STBIR_ALPHA_CHANNEL_NONE, 0,
|
||||||
|
STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP,
|
||||||
|
STBIR_FILTER_BOX, STBIR_FILTER_BOX,
|
||||||
|
STBIR_COLORSPACE_SRGB, nullptr);
|
||||||
|
|
||||||
|
free(input_image_buffer);
|
||||||
|
input_image_buffer = resized_image_buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
p->init_image = {(uint32_t)width, (uint32_t)height, 3, input_image_buffer};
|
||||||
|
p->strength = strength;
|
||||||
|
fprintf(stderr, "Using img2img with strength: %.2f\n", strength);
|
||||||
|
} else {
|
||||||
|
// No input image, use empty image for text-to-image
|
||||||
|
p->init_image = {(uint32_t)width, (uint32_t)height, 3, NULL};
|
||||||
|
p->strength = 0.0f;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle mask image for inpainting
|
||||||
|
if (has_mask_image) {
|
||||||
|
fprintf(stderr, "Loading mask image: %s\n", mask_image);
|
||||||
|
|
||||||
|
int c = 0;
|
||||||
|
int mask_width = 0;
|
||||||
|
int mask_height = 0;
|
||||||
|
mask_image_buffer = stbi_load(mask_image, &mask_width, &mask_height, &c, 1);
|
||||||
|
if (mask_image_buffer == NULL) {
|
||||||
|
fprintf(stderr, "Failed to load mask image from '%s'\n", mask_image);
|
||||||
|
if (input_image_buffer) free(input_image_buffer);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resize mask if dimensions don't match
|
||||||
|
if (mask_width != width || mask_height != height) {
|
||||||
|
fprintf(stderr, "Resizing mask image from %dx%d to %dx%d\n", mask_width, mask_height, width, height);
|
||||||
|
|
||||||
|
uint8_t* resized_mask_buffer = (uint8_t*)malloc(height * width);
|
||||||
|
if (resized_mask_buffer == NULL) {
|
||||||
|
fprintf(stderr, "Failed to allocate memory for resized mask\n");
|
||||||
|
free(mask_image_buffer);
|
||||||
|
if (input_image_buffer) free(input_image_buffer);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
stbir_resize(mask_image_buffer, mask_width, mask_height, 0,
|
||||||
|
resized_mask_buffer, width, height, 0, STBIR_TYPE_UINT8,
|
||||||
|
1, STBIR_ALPHA_CHANNEL_NONE, 0,
|
||||||
|
STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP,
|
||||||
|
STBIR_FILTER_BOX, STBIR_FILTER_BOX,
|
||||||
|
STBIR_COLORSPACE_SRGB, nullptr);
|
||||||
|
|
||||||
|
free(mask_image_buffer);
|
||||||
|
mask_image_buffer = resized_mask_buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
p->mask_image = {(uint32_t)width, (uint32_t)height, 1, mask_image_buffer};
|
||||||
|
fprintf(stderr, "Using inpainting with mask\n");
|
||||||
|
} else {
|
||||||
|
// No mask image, create default full mask
|
||||||
|
default_mask_image_vec.resize(width * height, 255);
|
||||||
|
p->mask_image = {(uint32_t)width, (uint32_t)height, 1, default_mask_image_vec.data()};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle reference images
|
||||||
|
std::vector<sd_image_t> ref_images_vec;
|
||||||
|
std::vector<uint8_t*> ref_image_buffers;
|
||||||
|
|
||||||
|
if (ref_images_count > 0 && ref_images != NULL) {
|
||||||
|
fprintf(stderr, "Loading %d reference images\n", ref_images_count);
|
||||||
|
|
||||||
|
for (int i = 0; i < ref_images_count; i++) {
|
||||||
|
if (ref_images[i] == NULL || strlen(ref_images[i]) == 0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
fprintf(stderr, "Loading reference image %d: %s\n", i + 1, ref_images[i]);
|
||||||
|
|
||||||
|
int c = 0;
|
||||||
|
int ref_width = 0;
|
||||||
|
int ref_height = 0;
|
||||||
|
uint8_t* ref_image_buffer = stbi_load(ref_images[i], &ref_width, &ref_height, &c, 3);
|
||||||
|
if (ref_image_buffer == NULL) {
|
||||||
|
fprintf(stderr, "Failed to load reference image from '%s'\n", ref_images[i]);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (c < 3) {
|
||||||
|
fprintf(stderr, "Reference image must have at least 3 channels, got %d\n", c);
|
||||||
|
free(ref_image_buffer);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resize reference image if dimensions don't match
|
||||||
|
if (ref_width != width || ref_height != height) {
|
||||||
|
fprintf(stderr, "Resizing reference image from %dx%d to %dx%d\n", ref_width, ref_height, width, height);
|
||||||
|
|
||||||
|
uint8_t* resized_ref_buffer = (uint8_t*)malloc(height * width * 3);
|
||||||
|
if (resized_ref_buffer == NULL) {
|
||||||
|
fprintf(stderr, "Failed to allocate memory for resized reference image\n");
|
||||||
|
free(ref_image_buffer);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
stbir_resize(ref_image_buffer, ref_width, ref_height, 0,
|
||||||
|
resized_ref_buffer, width, height, 0, STBIR_TYPE_UINT8,
|
||||||
|
3, STBIR_ALPHA_CHANNEL_NONE, 0,
|
||||||
|
STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP,
|
||||||
|
STBIR_FILTER_BOX, STBIR_FILTER_BOX,
|
||||||
|
STBIR_COLORSPACE_SRGB, nullptr);
|
||||||
|
|
||||||
|
free(ref_image_buffer);
|
||||||
|
ref_image_buffer = resized_ref_buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
ref_image_buffers.push_back(ref_image_buffer);
|
||||||
|
ref_images_vec.push_back({(uint32_t)width, (uint32_t)height, 3, ref_image_buffer});
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!ref_images_vec.empty()) {
|
||||||
|
p->ref_images = ref_images_vec.data();
|
||||||
|
p->ref_images_count = ref_images_vec.size();
|
||||||
|
fprintf(stderr, "Using %zu reference images\n", ref_images_vec.size());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
results = generate_image(sd_c, p);
|
||||||
|
|
||||||
|
std::free(p);
|
||||||
|
|
||||||
if (results == NULL) {
|
if (results == NULL) {
|
||||||
fprintf (stderr, "NO results\n");
|
fprintf (stderr, "NO results\n");
|
||||||
|
if (input_image_buffer) free(input_image_buffer);
|
||||||
|
if (mask_image_buffer) free(mask_image_buffer);
|
||||||
|
for (auto buffer : ref_image_buffers) {
|
||||||
|
if (buffer) free(buffer);
|
||||||
|
}
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (results[0].data == NULL) {
|
if (results[0].data == NULL) {
|
||||||
fprintf (stderr, "Results with no data\n");
|
fprintf (stderr, "Results with no data\n");
|
||||||
|
if (input_image_buffer) free(input_image_buffer);
|
||||||
|
if (mask_image_buffer) free(mask_image_buffer);
|
||||||
|
for (auto buffer : ref_image_buffers) {
|
||||||
|
if (buffer) free(buffer);
|
||||||
|
}
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -215,17 +476,21 @@ int gen_image(char *text, char *negativeText, int width, int height, int steps,
|
|||||||
results[0].data, 0, NULL);
|
results[0].data, 0, NULL);
|
||||||
fprintf (stderr, "Saved resulting image to '%s'\n", dst);
|
fprintf (stderr, "Saved resulting image to '%s'\n", dst);
|
||||||
|
|
||||||
// TODO: free results. Why does it crash?
|
// Clean up
|
||||||
|
|
||||||
free(results[0].data);
|
free(results[0].data);
|
||||||
results[0].data = NULL;
|
results[0].data = NULL;
|
||||||
free(results);
|
free(results);
|
||||||
fprintf (stderr, "gen_image is done", dst);
|
if (input_image_buffer) free(input_image_buffer);
|
||||||
|
if (mask_image_buffer) free(mask_image_buffer);
|
||||||
|
for (auto buffer : ref_image_buffers) {
|
||||||
|
if (buffer) free(buffer);
|
||||||
|
}
|
||||||
|
fprintf (stderr, "gen_image is done: %s", dst);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int unload() {
|
int unload() {
|
||||||
free_sd_ctx(sd_c);
|
free_sd_ctx(sd_c);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,15 +1,10 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
// #cgo CXXFLAGS: -I${SRCDIR}/sources/stablediffusion-ggml.cpp/thirdparty -I${SRCDIR}/sources/stablediffusion-ggml.cpp -I${SRCDIR}/sources/stablediffusion-ggml.cpp/ggml/include
|
|
||||||
// #cgo LDFLAGS: -L${SRCDIR}/ -lsd -lstdc++ -lm -lggmlall -lgomp
|
|
||||||
// #include <gosd.h>
|
|
||||||
// #include <stdlib.h>
|
|
||||||
import "C"
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
@@ -25,20 +20,45 @@ type SDGGML struct {
|
|||||||
cfgScale float32
|
cfgScale float32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
LoadModel func(model, model_apth string, options []uintptr, threads int32, diff int) int
|
||||||
|
GenImage func(params uintptr, steps int, dst string, cfgScale float32, srcImage string, strength float32, maskImage string, refImages []string, refImagesCount int) int
|
||||||
|
|
||||||
|
TilingParamsSetEnabled func(params uintptr, enabled bool)
|
||||||
|
TilingParamsSetTileSizes func(params uintptr, tileSizeX int, tileSizeY int)
|
||||||
|
TilingParamsSetRelSizes func(params uintptr, relSizeX float32, relSizeY float32)
|
||||||
|
TilingParamsSetTargetOverlap func(params uintptr, targetOverlap float32)
|
||||||
|
|
||||||
|
ImgGenParamsNew func() uintptr
|
||||||
|
ImgGenParamsSetPrompts func(params uintptr, prompt string, negativePrompt string)
|
||||||
|
ImgGenParamsSetDimensions func(params uintptr, width int, height int)
|
||||||
|
ImgGenParamsSetSeed func(params uintptr, seed int64)
|
||||||
|
ImgGenParamsGetVaeTilingParams func(params uintptr) uintptr
|
||||||
|
)
|
||||||
|
|
||||||
|
// Copied from Purego internal/strings
|
||||||
|
// TODO: We should upstream sending []string
|
||||||
|
func hasSuffix(s, suffix string) bool {
|
||||||
|
return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix
|
||||||
|
}
|
||||||
|
|
||||||
|
func CString(name string) *byte {
|
||||||
|
if hasSuffix(name, "\x00") {
|
||||||
|
return &(*(*[]byte)(unsafe.Pointer(&name)))[0]
|
||||||
|
}
|
||||||
|
b := make([]byte, len(name)+1)
|
||||||
|
copy(b, name)
|
||||||
|
return &b[0]
|
||||||
|
}
|
||||||
|
|
||||||
func (sd *SDGGML) Load(opts *pb.ModelOptions) error {
|
func (sd *SDGGML) Load(opts *pb.ModelOptions) error {
|
||||||
|
|
||||||
sd.threads = int(opts.Threads)
|
sd.threads = int(opts.Threads)
|
||||||
|
|
||||||
modelFile := C.CString(opts.ModelFile)
|
modelPath := opts.ModelPath
|
||||||
defer C.free(unsafe.Pointer(modelFile))
|
|
||||||
|
|
||||||
var options **C.char
|
modelFile := opts.ModelFile
|
||||||
// prepare the options array to pass to C
|
modelPathC := modelPath
|
||||||
|
|
||||||
size := C.size_t(unsafe.Sizeof((*C.char)(nil)))
|
|
||||||
length := C.size_t(len(opts.Options))
|
|
||||||
options = (**C.char)(C.malloc(length * size))
|
|
||||||
view := (*[1 << 30]*C.char)(unsafe.Pointer(options))[0:len(opts.Options):len(opts.Options)]
|
|
||||||
|
|
||||||
var diffusionModel int
|
var diffusionModel int
|
||||||
|
|
||||||
@@ -63,31 +83,63 @@ func (sd *SDGGML) Load(opts *pb.ModelOptions) error {
|
|||||||
|
|
||||||
fmt.Fprintf(os.Stderr, "Options: %+v\n", oo)
|
fmt.Fprintf(os.Stderr, "Options: %+v\n", oo)
|
||||||
|
|
||||||
for i, x := range oo {
|
// At the time of writing Purego doesn't recurse into slices and convert Go strings to pointers so we need to do that
|
||||||
view[i] = C.CString(x)
|
var keepAlive []any
|
||||||
|
options := make([]uintptr, len(oo), len(oo)+1)
|
||||||
|
for i, op := range oo {
|
||||||
|
bytep := CString(op)
|
||||||
|
options[i] = uintptr(unsafe.Pointer(bytep))
|
||||||
|
keepAlive = append(keepAlive, bytep)
|
||||||
}
|
}
|
||||||
|
|
||||||
sd.cfgScale = opts.CFGScale
|
sd.cfgScale = opts.CFGScale
|
||||||
|
|
||||||
ret := C.load_model(modelFile, options, C.int(opts.Threads), C.int(diffusionModel))
|
ret := LoadModel(modelFile, modelPathC, options, opts.Threads, diffusionModel)
|
||||||
if ret != 0 {
|
if ret != 0 {
|
||||||
return fmt.Errorf("could not load model")
|
return fmt.Errorf("could not load model")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
runtime.KeepAlive(keepAlive)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sd *SDGGML) GenerateImage(opts *pb.GenerateImageRequest) error {
|
func (sd *SDGGML) GenerateImage(opts *pb.GenerateImageRequest) error {
|
||||||
t := C.CString(opts.PositivePrompt)
|
t := opts.PositivePrompt
|
||||||
defer C.free(unsafe.Pointer(t))
|
dst := opts.Dst
|
||||||
|
negative := opts.NegativePrompt
|
||||||
|
srcImage := opts.Src
|
||||||
|
|
||||||
dst := C.CString(opts.Dst)
|
var maskImage string
|
||||||
defer C.free(unsafe.Pointer(dst))
|
if opts.EnableParameters != "" {
|
||||||
|
if strings.Contains(opts.EnableParameters, "mask:") {
|
||||||
|
parts := strings.Split(opts.EnableParameters, "mask:")
|
||||||
|
if len(parts) > 1 {
|
||||||
|
maskPath := strings.TrimSpace(parts[1])
|
||||||
|
if maskPath != "" {
|
||||||
|
maskImage = maskPath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
negative := C.CString(opts.NegativePrompt)
|
refImagesCount := len(opts.RefImages)
|
||||||
defer C.free(unsafe.Pointer(negative))
|
refImages := make([]string, refImagesCount, refImagesCount+1)
|
||||||
|
copy(refImages, opts.RefImages)
|
||||||
|
*(*uintptr)(unsafe.Add(unsafe.Pointer(&refImages), refImagesCount)) = 0
|
||||||
|
|
||||||
ret := C.gen_image(t, negative, C.int(opts.Width), C.int(opts.Height), C.int(opts.Step), C.int(opts.Seed), dst, C.float(sd.cfgScale))
|
// Default strength for img2img (0.75 is a good default)
|
||||||
|
strength := float32(0.75)
|
||||||
|
|
||||||
|
// free'd by GenImage
|
||||||
|
p := ImgGenParamsNew()
|
||||||
|
ImgGenParamsSetPrompts(p, t, negative)
|
||||||
|
ImgGenParamsSetDimensions(p, int(opts.Width), int(opts.Height))
|
||||||
|
ImgGenParamsSetSeed(p, int64(opts.Seed))
|
||||||
|
vaep := ImgGenParamsGetVaeTilingParams(p)
|
||||||
|
TilingParamsSetEnabled(vaep, false)
|
||||||
|
|
||||||
|
ret := GenImage(p, int(opts.Step), dst, sd.cfgScale, srcImage, strength, maskImage, refImages, refImagesCount)
|
||||||
if ret != 0 {
|
if ret != 0 {
|
||||||
return fmt.Errorf("inference failed")
|
return fmt.Errorf("inference failed")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,8 +1,23 @@
|
|||||||
|
#include <cstdint>
|
||||||
|
#include "stable-diffusion.h"
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
int load_model(char *model, char* options[], int threads, int diffusionModel);
|
|
||||||
int gen_image(char *text, char *negativeText, int width, int height, int steps, int seed, char *dst, float cfg_scale);
|
void sd_tiling_params_set_enabled(sd_tiling_params_t *params, bool enabled);
|
||||||
|
void sd_tiling_params_set_tile_sizes(sd_tiling_params_t *params, int tile_size_x, int tile_size_y);
|
||||||
|
void sd_tiling_params_set_rel_sizes(sd_tiling_params_t *params, float rel_size_x, float rel_size_y);
|
||||||
|
void sd_tiling_params_set_target_overlap(sd_tiling_params_t *params, float target_overlap);
|
||||||
|
sd_tiling_params_t* sd_img_gen_params_get_vae_tiling_params(sd_img_gen_params_t *params);
|
||||||
|
|
||||||
|
sd_img_gen_params_t* sd_img_gen_params_new(void);
|
||||||
|
void sd_img_gen_params_set_prompts(sd_img_gen_params_t *params, const char *prompt, const char *negative_prompt);
|
||||||
|
void sd_img_gen_params_set_dimensions(sd_img_gen_params_t *params, int width, int height);
|
||||||
|
void sd_img_gen_params_set_seed(sd_img_gen_params_t *params, int64_t seed);
|
||||||
|
|
||||||
|
int load_model(const char *model, char *model_path, char* options[], int threads, int diffusionModel);
|
||||||
|
int gen_image(sd_img_gen_params_t *p, int steps, char *dst, float cfg_scale, char *src_image, float strength, char *mask_image, char **ref_images, int ref_images_count);
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
|
||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
|
|
||||||
|
"github.com/ebitengine/purego"
|
||||||
grpc "github.com/mudler/LocalAI/pkg/grpc"
|
grpc "github.com/mudler/LocalAI/pkg/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -11,7 +11,36 @@ var (
|
|||||||
addr = flag.String("addr", "localhost:50051", "the address to connect to")
|
addr = flag.String("addr", "localhost:50051", "the address to connect to")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type LibFuncs struct {
|
||||||
|
FuncPtr any
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
gosd, err := purego.Dlopen("./libgosd.so", purego.RTLD_NOW|purego.RTLD_GLOBAL)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
libFuncs := []LibFuncs{
|
||||||
|
{&LoadModel, "load_model"},
|
||||||
|
{&GenImage, "gen_image"},
|
||||||
|
{&TilingParamsSetEnabled, "sd_tiling_params_set_enabled"},
|
||||||
|
{&TilingParamsSetTileSizes, "sd_tiling_params_set_tile_sizes"},
|
||||||
|
{&TilingParamsSetRelSizes, "sd_tiling_params_set_rel_sizes"},
|
||||||
|
{&TilingParamsSetTargetOverlap, "sd_tiling_params_set_target_overlap"},
|
||||||
|
|
||||||
|
{&ImgGenParamsNew, "sd_img_gen_params_new"},
|
||||||
|
{&ImgGenParamsSetPrompts, "sd_img_gen_params_set_prompts"},
|
||||||
|
{&ImgGenParamsSetDimensions, "sd_img_gen_params_set_dimensions"},
|
||||||
|
{&ImgGenParamsSetSeed, "sd_img_gen_params_set_seed"},
|
||||||
|
{&ImgGenParamsGetVaeTilingParams, "sd_img_gen_params_get_vae_tiling_params"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, lf := range libFuncs {
|
||||||
|
purego.RegisterLibFunc(lf.FuncPtr, gosd, lf.Name)
|
||||||
|
}
|
||||||
|
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
if err := grpc.StartServer(*addr, &SDGGML{}); err != nil {
|
if err := grpc.StartServer(*addr, &SDGGML{}); err != nil {
|
||||||
|
|||||||
@@ -10,8 +10,9 @@ CURDIR=$(dirname "$(realpath $0)")
|
|||||||
# Create lib directory
|
# Create lib directory
|
||||||
mkdir -p $CURDIR/package/lib
|
mkdir -p $CURDIR/package/lib
|
||||||
|
|
||||||
cp -avrf $CURDIR/stablediffusion-ggml $CURDIR/package/
|
cp -avf $CURDIR/libgosd.so $CURDIR/package/
|
||||||
cp -rfv $CURDIR/run.sh $CURDIR/package/
|
cp -avf $CURDIR/stablediffusion-ggml $CURDIR/package/
|
||||||
|
cp -fv $CURDIR/run.sh $CURDIR/package/
|
||||||
|
|
||||||
# Detect architecture and copy appropriate libraries
|
# Detect architecture and copy appropriate libraries
|
||||||
if [ -f "/lib64/ld-linux-x86-64.so.2" ]; then
|
if [ -f "/lib64/ld-linux-x86-64.so.2" ]; then
|
||||||
@@ -42,11 +43,13 @@ elif [ -f "/lib/ld-linux-aarch64.so.1" ]; then
|
|||||||
cp -arfLv /lib/aarch64-linux-gnu/libdl.so.2 $CURDIR/package/lib/libdl.so.2
|
cp -arfLv /lib/aarch64-linux-gnu/libdl.so.2 $CURDIR/package/lib/libdl.so.2
|
||||||
cp -arfLv /lib/aarch64-linux-gnu/librt.so.1 $CURDIR/package/lib/librt.so.1
|
cp -arfLv /lib/aarch64-linux-gnu/librt.so.1 $CURDIR/package/lib/librt.so.1
|
||||||
cp -arfLv /lib/aarch64-linux-gnu/libpthread.so.0 $CURDIR/package/lib/libpthread.so.0
|
cp -arfLv /lib/aarch64-linux-gnu/libpthread.so.0 $CURDIR/package/lib/libpthread.so.0
|
||||||
|
elif [ $(uname -s) = "Darwin" ]; then
|
||||||
|
echo "Detected Darwin"
|
||||||
else
|
else
|
||||||
echo "Error: Could not detect architecture"
|
echo "Error: Could not detect architecture"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Packaging completed successfully"
|
echo "Packaging completed successfully"
|
||||||
ls -liah $CURDIR/package/
|
ls -liah $CURDIR/package/
|
||||||
ls -liah $CURDIR/package/lib/
|
ls -liah $CURDIR/package/lib/
|
||||||
|
|||||||
7
backend/go/whisper/.gitignore
vendored
Normal file
7
backend/go/whisper/.gitignore
vendored
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
.cache/
|
||||||
|
sources/
|
||||||
|
build/
|
||||||
|
package/
|
||||||
|
whisper
|
||||||
|
libgowhisper.so
|
||||||
|
|
||||||
16
backend/go/whisper/CMakeLists.txt
Normal file
16
backend/go/whisper/CMakeLists.txt
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
cmake_minimum_required(VERSION 3.12)
|
||||||
|
project(gowhisper LANGUAGES C CXX)
|
||||||
|
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
|
||||||
|
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
||||||
|
|
||||||
|
add_subdirectory(./sources/whisper.cpp)
|
||||||
|
|
||||||
|
add_library(gowhisper MODULE gowhisper.cpp)
|
||||||
|
target_link_libraries(gowhisper PRIVATE whisper ggml)
|
||||||
|
|
||||||
|
if(CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 9.0)
|
||||||
|
target_link_libraries(gosd PRIVATE stdc++fs)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set_property(TARGET gowhisper PROPERTY CXX_STANDARD 17)
|
||||||
|
set_target_properties(gowhisper PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
|
||||||
@@ -1,110 +1,53 @@
|
|||||||
GOCMD=go
|
CMAKE_ARGS?=
|
||||||
|
BUILD_TYPE?=
|
||||||
NATIVE?=false
|
NATIVE?=false
|
||||||
|
|
||||||
BUILD_TYPE?=
|
GOCMD?=go
|
||||||
CMAKE_ARGS?=
|
GO_TAGS?=
|
||||||
|
JOBS?=$(shell nproc --ignore=1)
|
||||||
|
|
||||||
# whisper.cpp version
|
# whisper.cpp version
|
||||||
WHISPER_REPO?=https://github.com/ggml-org/whisper.cpp
|
WHISPER_REPO?=https://github.com/ggml-org/whisper.cpp
|
||||||
WHISPER_CPP_VERSION?=1f5cf0b2888402d57bb17b2029b2caa97e5f3baf
|
WHISPER_CPP_VERSION?=7849aff7a2e1f4234aa31b01a1870906d5431959
|
||||||
|
|
||||||
export WHISPER_CMAKE_ARGS?=-DBUILD_SHARED_LIBS=OFF
|
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF
|
||||||
export WHISPER_DIR=$(abspath ./sources/whisper.cpp)
|
|
||||||
export WHISPER_INCLUDE_PATH=$(WHISPER_DIR)/include:$(WHISPER_DIR)/ggml/include
|
|
||||||
export WHISPER_LIBRARY_PATH=$(WHISPER_DIR)/build/src/:$(WHISPER_DIR)/build/ggml/src
|
|
||||||
|
|
||||||
CGO_LDFLAGS_WHISPER?=
|
|
||||||
CGO_LDFLAGS_WHISPER+=-lggml
|
|
||||||
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF -DLLAMA_CURL=OFF
|
|
||||||
CUDA_LIBPATH?=/usr/local/cuda/lib64/
|
|
||||||
|
|
||||||
ONEAPI_VERSION?=2025.2
|
|
||||||
|
|
||||||
# IF native is false, we add -DGGML_NATIVE=OFF to CMAKE_ARGS
|
|
||||||
ifeq ($(NATIVE),false)
|
|
||||||
CMAKE_ARGS+=-DGGML_NATIVE=OFF
|
|
||||||
WHISPER_CMAKE_ARGS+=-DGGML_NATIVE=OFF
|
|
||||||
endif
|
|
||||||
CURRENT_MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
|
|
||||||
ifeq ($(NATIVE),false)
|
ifeq ($(NATIVE),false)
|
||||||
CMAKE_ARGS+=-DGGML_NATIVE=OFF
|
CMAKE_ARGS+=-DGGML_NATIVE=OFF
|
||||||
endif
|
endif
|
||||||
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
|
|
||||||
ifeq ($(BUILD_TYPE),cublas)
|
ifeq ($(BUILD_TYPE),cublas)
|
||||||
CGO_LDFLAGS+=-lcublas -lcudart -L$(CUDA_LIBPATH) -L$(CUDA_LIBPATH)/stubs/ -lcuda
|
|
||||||
CMAKE_ARGS+=-DGGML_CUDA=ON
|
CMAKE_ARGS+=-DGGML_CUDA=ON
|
||||||
CGO_LDFLAGS_WHISPER+=-lcufft -lggml-cuda
|
|
||||||
export WHISPER_LIBRARY_PATH:=$(WHISPER_LIBRARY_PATH):$(WHISPER_DIR)/build/ggml/src/ggml-cuda/
|
|
||||||
# If build type is openblas then we set -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
|
|
||||||
# to CMAKE_ARGS automatically
|
|
||||||
else ifeq ($(BUILD_TYPE),openblas)
|
else ifeq ($(BUILD_TYPE),openblas)
|
||||||
CMAKE_ARGS+=-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
|
CMAKE_ARGS+=-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
|
||||||
# If build type is clblas (openCL) we set -DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
|
|
||||||
else ifeq ($(BUILD_TYPE),clblas)
|
else ifeq ($(BUILD_TYPE),clblas)
|
||||||
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
|
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
|
||||||
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
|
|
||||||
else ifeq ($(BUILD_TYPE),hipblas)
|
else ifeq ($(BUILD_TYPE),hipblas)
|
||||||
ROCM_HOME ?= /opt/rocm
|
CMAKE_ARGS+=-DGGML_HIPBLAS=ON
|
||||||
ROCM_PATH ?= /opt/rocm
|
|
||||||
LD_LIBRARY_PATH ?= /opt/rocm/lib:/opt/rocm/llvm/lib
|
|
||||||
export STABLE_BUILD_TYPE=
|
|
||||||
export CXX=$(ROCM_HOME)/llvm/bin/clang++
|
|
||||||
export CC=$(ROCM_HOME)/llvm/bin/clang
|
|
||||||
# GPU_TARGETS ?= gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102
|
|
||||||
# AMDGPU_TARGETS ?= "$(GPU_TARGETS)"
|
|
||||||
CMAKE_ARGS+=-DGGML_HIP=ON
|
|
||||||
CGO_LDFLAGS += -O3 --rtlib=compiler-rt -unwindlib=libgcc -lhipblas -lrocblas --hip-link -L${ROCM_HOME}/lib/llvm/lib -L$(CURRENT_MAKEFILE_DIR)/sources/whisper.cpp/build/ggml/src/ggml-hip/ -lggml-hip
|
|
||||||
# CMAKE_ARGS+=-DGGML_HIP=ON -DAMDGPU_TARGETS="$(AMDGPU_TARGETS)" -DGPU_TARGETS="$(GPU_TARGETS)"
|
|
||||||
else ifeq ($(BUILD_TYPE),vulkan)
|
else ifeq ($(BUILD_TYPE),vulkan)
|
||||||
CMAKE_ARGS+=-DGGML_VULKAN=1
|
CMAKE_ARGS+=-DGGML_VULKAN=ON
|
||||||
CGO_LDFLAGS_WHISPER+=-lggml-vulkan -lvulkan
|
|
||||||
export WHISPER_LIBRARY_PATH:=$(WHISPER_LIBRARY_PATH):$(WHISPER_DIR)/build/ggml/src/ggml-vulkan/
|
|
||||||
else ifeq ($(OS),Darwin)
|
else ifeq ($(OS),Darwin)
|
||||||
ifeq ($(BUILD_TYPE),)
|
|
||||||
BUILD_TYPE=metal
|
|
||||||
endif
|
|
||||||
ifneq ($(BUILD_TYPE),metal)
|
ifneq ($(BUILD_TYPE),metal)
|
||||||
CMAKE_ARGS+=-DGGML_METAL=OFF
|
CMAKE_ARGS+=-DGGML_METAL=OFF
|
||||||
CGO_LDFLAGS_WHISPER+=-lggml-blas
|
|
||||||
export WHISPER_LIBRARY_PATH:=$(WHISPER_LIBRARY_PATH):$(WHISPER_DIR)/build/ggml/src/ggml-blas
|
|
||||||
else
|
else
|
||||||
CMAKE_ARGS+=-DGGML_METAL=ON
|
CMAKE_ARGS+=-DGGML_METAL=ON
|
||||||
CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON
|
CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON
|
||||||
CMAKE_ARGS+=-DGGML_METAL_USE_BF16=ON
|
|
||||||
CMAKE_ARGS+=-DGGML_OPENMP=OFF
|
|
||||||
CMAKE_ARGS+=-DWHISPER_BUILD_EXAMPLES=OFF
|
|
||||||
CMAKE_ARGS+=-DWHISPER_BUILD_TESTS=OFF
|
|
||||||
CMAKE_ARGS+=-DWHISPER_BUILD_SERVER=OFF
|
|
||||||
CGO_LDFLAGS += -framework Accelerate
|
|
||||||
CGO_LDFLAGS_WHISPER+=-lggml-metal -lggml-blas
|
|
||||||
export WHISPER_LIBRARY_PATH:=$(WHISPER_LIBRARY_PATH):$(WHISPER_DIR)/build/ggml/src/ggml-metal/:$(WHISPER_DIR)/build/ggml/src/ggml-blas
|
|
||||||
endif
|
endif
|
||||||
TARGET+=--target ggml-metal
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
|
|
||||||
export CC=icx
|
|
||||||
export CXX=icpx
|
|
||||||
CGO_LDFLAGS_WHISPER += -fsycl -L${DNNLROOT}/lib -rpath ${ONEAPI_ROOT}/${ONEAPI_VERSION}/lib -ldnnl ${MKLROOT}/lib/intel64/libmkl_sycl.a -fiopenmp -fopenmp-targets=spir64 -lOpenCL -lggml-sycl
|
|
||||||
CGO_LDFLAGS_WHISPER += $(shell pkg-config --libs mkl-static-lp64-gomp)
|
|
||||||
CGO_CXXFLAGS_WHISPER += -fiopenmp -fopenmp-targets=spir64
|
|
||||||
CGO_CXXFLAGS_WHISPER += $(shell pkg-config --cflags mkl-static-lp64-gomp )
|
|
||||||
export WHISPER_LIBRARY_PATH:=$(WHISPER_LIBRARY_PATH):$(WHISPER_DIR)/build/ggml/src/ggml-sycl/
|
|
||||||
CMAKE_ARGS+=-DGGML_SYCL=ON \
|
|
||||||
-DCMAKE_C_COMPILER=icx \
|
|
||||||
-DCMAKE_CXX_COMPILER=icpx \
|
|
||||||
-DCMAKE_CXX_FLAGS="-fsycl"
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifeq ($(BUILD_TYPE),sycl_f16)
|
ifeq ($(BUILD_TYPE),sycl_f16)
|
||||||
CMAKE_ARGS+=-DGGML_SYCL_F16=ON
|
CMAKE_ARGS+=-DGGML_SYCL=ON \
|
||||||
|
-DCMAKE_C_COMPILER=icx \
|
||||||
|
-DCMAKE_CXX_COMPILER=icpx \
|
||||||
|
-DGGML_SYCL_F16=ON
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifneq ($(OS),Darwin)
|
ifeq ($(BUILD_TYPE),sycl_f32)
|
||||||
CGO_LDFLAGS_WHISPER+=-lgomp
|
CMAKE_ARGS+=-DGGML_SYCL=ON \
|
||||||
|
-DCMAKE_C_COMPILER=icx \
|
||||||
|
-DCMAKE_CXX_COMPILER=icpx
|
||||||
endif
|
endif
|
||||||
|
|
||||||
## whisper
|
|
||||||
sources/whisper.cpp:
|
sources/whisper.cpp:
|
||||||
mkdir -p sources/whisper.cpp
|
mkdir -p sources/whisper.cpp
|
||||||
cd sources/whisper.cpp && \
|
cd sources/whisper.cpp && \
|
||||||
@@ -114,18 +57,21 @@ sources/whisper.cpp:
|
|||||||
git checkout $(WHISPER_CPP_VERSION) && \
|
git checkout $(WHISPER_CPP_VERSION) && \
|
||||||
git submodule update --init --recursive --depth 1 --single-branch
|
git submodule update --init --recursive --depth 1 --single-branch
|
||||||
|
|
||||||
sources/whisper.cpp/build/src/libwhisper.a: sources/whisper.cpp
|
libgowhisper.so: sources/whisper.cpp CMakeLists.txt gowhisper.cpp gowhisper.h
|
||||||
cd sources/whisper.cpp && cmake $(CMAKE_ARGS) $(WHISPER_CMAKE_ARGS) . -B ./build
|
mkdir -p build && \
|
||||||
cd sources/whisper.cpp/build && cmake --build . --config Release
|
cd build && \
|
||||||
|
cmake .. $(CMAKE_ARGS) && \
|
||||||
|
cmake --build . --config Release -j$(JOBS) && \
|
||||||
|
cd .. && \
|
||||||
|
mv build/libgowhisper.so ./
|
||||||
|
|
||||||
whisper: sources/whisper.cpp sources/whisper.cpp/build/src/libwhisper.a
|
whisper: main.go gowhisper.go libgowhisper.so
|
||||||
$(GOCMD) mod edit -replace github.com/ggerganov/whisper.cpp=$(CURDIR)/sources/whisper.cpp
|
CGO_ENABLED=0 $(GOCMD) build -tags "$(GO_TAGS)" -o whisper ./
|
||||||
$(GOCMD) mod edit -replace github.com/ggerganov/whisper.cpp/bindings/go=$(CURDIR)/sources/whisper.cpp/bindings/go
|
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS) $(CGO_LDFLAGS_WHISPER)" C_INCLUDE_PATH="${WHISPER_INCLUDE_PATH}" LIBRARY_PATH="${WHISPER_LIBRARY_PATH}" LD_LIBRARY_PATH="${WHISPER_LIBRARY_PATH}" \
|
|
||||||
CGO_CXXFLAGS="$(CGO_CXXFLAGS_WHISPER)" \
|
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o whisper ./
|
|
||||||
|
|
||||||
package:
|
package: whisper
|
||||||
bash package.sh
|
bash package.sh
|
||||||
|
|
||||||
build: whisper package
|
build: package
|
||||||
|
|
||||||
|
clean:
|
||||||
|
rm -rf libgowhisper.o build whisper
|
||||||
|
|||||||
154
backend/go/whisper/gowhisper.cpp
Normal file
154
backend/go/whisper/gowhisper.cpp
Normal file
@@ -0,0 +1,154 @@
|
|||||||
|
#include "gowhisper.h"
|
||||||
|
#include "ggml-backend.h"
|
||||||
|
#include "whisper.h"
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
static struct whisper_vad_context *vctx;
|
||||||
|
static struct whisper_context *ctx;
|
||||||
|
static std::vector<float> flat_segs;
|
||||||
|
|
||||||
|
static void ggml_log_cb(enum ggml_log_level level, const char *log,
|
||||||
|
void *data) {
|
||||||
|
const char *level_str;
|
||||||
|
|
||||||
|
if (!log) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (level) {
|
||||||
|
case GGML_LOG_LEVEL_DEBUG:
|
||||||
|
level_str = "DEBUG";
|
||||||
|
break;
|
||||||
|
case GGML_LOG_LEVEL_INFO:
|
||||||
|
level_str = "INFO";
|
||||||
|
break;
|
||||||
|
case GGML_LOG_LEVEL_WARN:
|
||||||
|
level_str = "WARN";
|
||||||
|
break;
|
||||||
|
case GGML_LOG_LEVEL_ERROR:
|
||||||
|
level_str = "ERROR";
|
||||||
|
break;
|
||||||
|
default: /* Potential future-proofing */
|
||||||
|
level_str = "?????";
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
fprintf(stderr, "[%-5s] ", level_str);
|
||||||
|
fputs(log, stderr);
|
||||||
|
fflush(stderr);
|
||||||
|
}
|
||||||
|
|
||||||
|
int load_model(const char *const model_path) {
|
||||||
|
whisper_log_set(ggml_log_cb, nullptr);
|
||||||
|
ggml_backend_load_all();
|
||||||
|
|
||||||
|
struct whisper_context_params cparams = whisper_context_default_params();
|
||||||
|
|
||||||
|
ctx = whisper_init_from_file_with_params(model_path, cparams);
|
||||||
|
if (ctx == nullptr) {
|
||||||
|
fprintf(stderr, "error: Also failed to init model as transcriber\n");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int load_model_vad(const char *const model_path) {
|
||||||
|
whisper_log_set(ggml_log_cb, nullptr);
|
||||||
|
ggml_backend_load_all();
|
||||||
|
|
||||||
|
struct whisper_vad_context_params vcparams =
|
||||||
|
whisper_vad_default_context_params();
|
||||||
|
|
||||||
|
// XXX: Overridden to false in upstream due to performance?
|
||||||
|
// vcparams.use_gpu = true;
|
||||||
|
|
||||||
|
vctx = whisper_vad_init_from_file_with_params(model_path, vcparams);
|
||||||
|
if (vctx == nullptr) {
|
||||||
|
fprintf(stderr, "error: Failed to init model as VAD\n");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int vad(float pcmf32[], size_t pcmf32_len, float **segs_out,
|
||||||
|
size_t *segs_out_len) {
|
||||||
|
if (!whisper_vad_detect_speech(vctx, pcmf32, pcmf32_len)) {
|
||||||
|
fprintf(stderr, "error: failed to detect speech\n");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct whisper_vad_params params = whisper_vad_default_params();
|
||||||
|
struct whisper_vad_segments *segs =
|
||||||
|
whisper_vad_segments_from_probs(vctx, params);
|
||||||
|
size_t segn = whisper_vad_segments_n_segments(segs);
|
||||||
|
|
||||||
|
// fprintf(stderr, "Got segments %zd\n", segn);
|
||||||
|
|
||||||
|
flat_segs.clear();
|
||||||
|
|
||||||
|
for (int i = 0; i < segn; i++) {
|
||||||
|
flat_segs.push_back(whisper_vad_segments_get_segment_t0(segs, i));
|
||||||
|
flat_segs.push_back(whisper_vad_segments_get_segment_t1(segs, i));
|
||||||
|
}
|
||||||
|
|
||||||
|
// fprintf(stderr, "setting out variables: %p=%p -> %p, %p=%zx -> %zx\n",
|
||||||
|
// segs_out, *segs_out, flat_segs.data(), segs_out_len, *segs_out_len,
|
||||||
|
// flat_segs.size());
|
||||||
|
*segs_out = flat_segs.data();
|
||||||
|
*segs_out_len = flat_segs.size();
|
||||||
|
|
||||||
|
// fprintf(stderr, "freeing segs\n");
|
||||||
|
whisper_vad_free_segments(segs);
|
||||||
|
|
||||||
|
// fprintf(stderr, "returning\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int transcribe(uint32_t threads, char *lang, bool translate, bool tdrz,
|
||||||
|
float pcmf32[], size_t pcmf32_len, size_t *segs_out_len) {
|
||||||
|
whisper_full_params wparams =
|
||||||
|
whisper_full_default_params(WHISPER_SAMPLING_GREEDY);
|
||||||
|
|
||||||
|
wparams.n_threads = threads;
|
||||||
|
if (*lang != '\0')
|
||||||
|
wparams.language = lang;
|
||||||
|
else {
|
||||||
|
wparams.language = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
wparams.translate = translate;
|
||||||
|
wparams.debug_mode = true;
|
||||||
|
wparams.print_progress = true;
|
||||||
|
wparams.tdrz_enable = tdrz;
|
||||||
|
|
||||||
|
fprintf(stderr, "info: Enable tdrz: %d\n", tdrz);
|
||||||
|
|
||||||
|
if (whisper_full(ctx, wparams, pcmf32, pcmf32_len)) {
|
||||||
|
fprintf(stderr, "error: transcription failed\n");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
*segs_out_len = whisper_full_n_segments(ctx);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char *get_segment_text(int i) {
|
||||||
|
return whisper_full_get_segment_text(ctx, i);
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t get_segment_t0(int i) { return whisper_full_get_segment_t0(ctx, i); }
|
||||||
|
|
||||||
|
int64_t get_segment_t1(int i) { return whisper_full_get_segment_t1(ctx, i); }
|
||||||
|
|
||||||
|
int n_tokens(int i) { return whisper_full_n_tokens(ctx, i); }
|
||||||
|
|
||||||
|
int32_t get_token_id(int i, int j) {
|
||||||
|
return whisper_full_get_token_id(ctx, i, j);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool get_segment_speaker_turn_next(int i) {
|
||||||
|
return whisper_full_get_segment_speaker_turn_next(ctx, i);
|
||||||
|
}
|
||||||
161
backend/go/whisper/gowhisper.go
Normal file
161
backend/go/whisper/gowhisper.go
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/go-audio/wav"
|
||||||
|
"github.com/mudler/LocalAI/pkg/grpc/base"
|
||||||
|
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||||
|
"github.com/mudler/LocalAI/pkg/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
CppLoadModel func(modelPath string) int
|
||||||
|
CppLoadModelVAD func(modelPath string) int
|
||||||
|
CppVAD func(pcmf32 []float32, pcmf32Size uintptr, segsOut unsafe.Pointer, segsOutLen unsafe.Pointer) int
|
||||||
|
CppTranscribe func(threads uint32, lang string, translate bool, diarize bool, pcmf32 []float32, pcmf32Len uintptr, segsOutLen unsafe.Pointer) int
|
||||||
|
CppGetSegmentText func(i int) string
|
||||||
|
CppGetSegmentStart func(i int) int64
|
||||||
|
CppGetSegmentEnd func(i int) int64
|
||||||
|
CppNTokens func(i int) int
|
||||||
|
CppGetTokenID func(i int, j int) int
|
||||||
|
CppGetSegmentSpeakerTurnNext func(i int) bool
|
||||||
|
)
|
||||||
|
|
||||||
|
type Whisper struct {
|
||||||
|
base.SingleThread
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Whisper) Load(opts *pb.ModelOptions) error {
|
||||||
|
vadOnly := false
|
||||||
|
|
||||||
|
for _, oo := range opts.Options {
|
||||||
|
if oo == "vad_only" {
|
||||||
|
vadOnly = true
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(os.Stderr, "Unrecognized option: %v\n", oo)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if vadOnly {
|
||||||
|
if ret := CppLoadModelVAD(opts.ModelFile); ret != 0 {
|
||||||
|
return fmt.Errorf("Failed to load Whisper VAD model")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if ret := CppLoadModel(opts.ModelFile); ret != 0 {
|
||||||
|
return fmt.Errorf("Failed to load Whisper transcription model")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Whisper) VAD(req *pb.VADRequest) (pb.VADResponse, error) {
|
||||||
|
audio := req.Audio
|
||||||
|
// We expect 0xdeadbeef to be overwritten and if we see it in a stack trace we know it wasn't
|
||||||
|
segsPtr, segsLen := uintptr(0xdeadbeef), uintptr(0xdeadbeef)
|
||||||
|
segsPtrPtr, segsLenPtr := unsafe.Pointer(&segsPtr), unsafe.Pointer(&segsLen)
|
||||||
|
|
||||||
|
if ret := CppVAD(audio, uintptr(len(audio)), segsPtrPtr, segsLenPtr); ret != 0 {
|
||||||
|
return pb.VADResponse{}, fmt.Errorf("Failed VAD")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Happens when CPP vector has not had any elements pushed to it
|
||||||
|
if segsPtr == 0 {
|
||||||
|
return pb.VADResponse{
|
||||||
|
Segments: []*pb.VADSegment{},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// unsafeptr warning is caused by segsPtr being on the stack and therefor being subject to stack copying AFAICT
|
||||||
|
// however the stack shouldn't have grown between setting segsPtr and now, also the memory pointed to is allocated by C++
|
||||||
|
segs := unsafe.Slice((*float32)(unsafe.Pointer(segsPtr)), segsLen)
|
||||||
|
|
||||||
|
vadSegments := []*pb.VADSegment{}
|
||||||
|
for i := range len(segs) >> 1 {
|
||||||
|
s := segs[2*i] / 100
|
||||||
|
t := segs[2*i+1] / 100
|
||||||
|
vadSegments = append(vadSegments, &pb.VADSegment{
|
||||||
|
Start: s,
|
||||||
|
End: t,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return pb.VADResponse{
|
||||||
|
Segments: vadSegments,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *Whisper) AudioTranscription(opts *pb.TranscriptRequest) (pb.TranscriptResult, error) {
|
||||||
|
dir, err := os.MkdirTemp("", "whisper")
|
||||||
|
if err != nil {
|
||||||
|
return pb.TranscriptResult{}, err
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(dir)
|
||||||
|
|
||||||
|
convertedPath := filepath.Join(dir, "converted.wav")
|
||||||
|
|
||||||
|
if err := utils.AudioToWav(opts.Dst, convertedPath); err != nil {
|
||||||
|
return pb.TranscriptResult{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open samples
|
||||||
|
fh, err := os.Open(convertedPath)
|
||||||
|
if err != nil {
|
||||||
|
return pb.TranscriptResult{}, err
|
||||||
|
}
|
||||||
|
defer fh.Close()
|
||||||
|
|
||||||
|
// Read samples
|
||||||
|
d := wav.NewDecoder(fh)
|
||||||
|
buf, err := d.FullPCMBuffer()
|
||||||
|
if err != nil {
|
||||||
|
return pb.TranscriptResult{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
data := buf.AsFloat32Buffer().Data
|
||||||
|
segsLen := uintptr(0xdeadbeef)
|
||||||
|
segsLenPtr := unsafe.Pointer(&segsLen)
|
||||||
|
|
||||||
|
if ret := CppTranscribe(opts.Threads, opts.Language, opts.Translate, opts.Diarize, data, uintptr(len(data)), segsLenPtr); ret != 0 {
|
||||||
|
return pb.TranscriptResult{}, fmt.Errorf("Failed Transcribe")
|
||||||
|
}
|
||||||
|
|
||||||
|
segments := []*pb.TranscriptSegment{}
|
||||||
|
text := ""
|
||||||
|
for i := range int(segsLen) {
|
||||||
|
s := CppGetSegmentStart(i)
|
||||||
|
t := CppGetSegmentEnd(i)
|
||||||
|
txt := strings.Clone(CppGetSegmentText(i))
|
||||||
|
tokens := make([]int32, CppNTokens(i))
|
||||||
|
|
||||||
|
if opts.Diarize && CppGetSegmentSpeakerTurnNext(i) {
|
||||||
|
txt += " [SPEAKER_TURN]"
|
||||||
|
}
|
||||||
|
|
||||||
|
for j := range tokens {
|
||||||
|
tokens[j] = int32(CppGetTokenID(i, j))
|
||||||
|
}
|
||||||
|
segment := &pb.TranscriptSegment{
|
||||||
|
Id: int32(i),
|
||||||
|
Text: txt,
|
||||||
|
Start: s, End: t,
|
||||||
|
Tokens: tokens,
|
||||||
|
}
|
||||||
|
|
||||||
|
segments = append(segments, segment)
|
||||||
|
|
||||||
|
text += " " + strings.TrimSpace(txt)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pb.TranscriptResult{
|
||||||
|
Segments: segments,
|
||||||
|
Text: strings.TrimSpace(text),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
17
backend/go/whisper/gowhisper.h
Normal file
17
backend/go/whisper/gowhisper.h
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
#include <cstddef>
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
int load_model(const char *const model_path);
|
||||||
|
int load_model_vad(const char *const model_path);
|
||||||
|
int vad(float pcmf32[], size_t pcmf32_size, float **segs_out,
|
||||||
|
size_t *segs_out_len);
|
||||||
|
int transcribe(uint32_t threads, char *lang, bool translate, bool tdrz,
|
||||||
|
float pcmf32[], size_t pcmf32_len, size_t *segs_out_len);
|
||||||
|
const char *get_segment_text(int i);
|
||||||
|
int64_t get_segment_t0(int i);
|
||||||
|
int64_t get_segment_t1(int i);
|
||||||
|
int n_tokens(int i);
|
||||||
|
int32_t get_token_id(int i, int j);
|
||||||
|
bool get_segment_speaker_turn_next(int i);
|
||||||
|
}
|
||||||
@@ -1,10 +1,10 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
// Note: this is started internally by LocalAI and a server is allocated for each model
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
|
|
||||||
|
"github.com/ebitengine/purego"
|
||||||
grpc "github.com/mudler/LocalAI/pkg/grpc"
|
grpc "github.com/mudler/LocalAI/pkg/grpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -12,7 +12,34 @@ var (
|
|||||||
addr = flag.String("addr", "localhost:50051", "the address to connect to")
|
addr = flag.String("addr", "localhost:50051", "the address to connect to")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type LibFuncs struct {
|
||||||
|
FuncPtr any
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
gosd, err := purego.Dlopen("./libgowhisper.so", purego.RTLD_NOW|purego.RTLD_GLOBAL)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
libFuncs := []LibFuncs{
|
||||||
|
{&CppLoadModel, "load_model"},
|
||||||
|
{&CppLoadModelVAD, "load_model_vad"},
|
||||||
|
{&CppVAD, "vad"},
|
||||||
|
{&CppTranscribe, "transcribe"},
|
||||||
|
{&CppGetSegmentText, "get_segment_text"},
|
||||||
|
{&CppGetSegmentStart, "get_segment_t0"},
|
||||||
|
{&CppGetSegmentEnd, "get_segment_t1"},
|
||||||
|
{&CppNTokens, "n_tokens"},
|
||||||
|
{&CppGetTokenID, "get_token_id"},
|
||||||
|
{&CppGetSegmentSpeakerTurnNext, "get_segment_speaker_turn_next"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, lf := range libFuncs {
|
||||||
|
purego.RegisterLibFunc(lf.FuncPtr, gosd, lf.Name)
|
||||||
|
}
|
||||||
|
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
if err := grpc.StartServer(*addr, &Whisper{}); err != nil {
|
if err := grpc.StartServer(*addr, &Whisper{}); err != nil {
|
||||||
|
|||||||
@@ -10,8 +10,8 @@ CURDIR=$(dirname "$(realpath $0)")
|
|||||||
# Create lib directory
|
# Create lib directory
|
||||||
mkdir -p $CURDIR/package/lib
|
mkdir -p $CURDIR/package/lib
|
||||||
|
|
||||||
cp -avrf $CURDIR/whisper $CURDIR/package/
|
cp -avf $CURDIR/whisper $CURDIR/libgowhisper.so $CURDIR/package/
|
||||||
cp -rfv $CURDIR/run.sh $CURDIR/package/
|
cp -fv $CURDIR/run.sh $CURDIR/package/
|
||||||
|
|
||||||
# Detect architecture and copy appropriate libraries
|
# Detect architecture and copy appropriate libraries
|
||||||
if [ -f "/lib64/ld-linux-x86-64.so.2" ]; then
|
if [ -f "/lib64/ld-linux-x86-64.so.2" ]; then
|
||||||
@@ -42,11 +42,13 @@ elif [ -f "/lib/ld-linux-aarch64.so.1" ]; then
|
|||||||
cp -arfLv /lib/aarch64-linux-gnu/libdl.so.2 $CURDIR/package/lib/libdl.so.2
|
cp -arfLv /lib/aarch64-linux-gnu/libdl.so.2 $CURDIR/package/lib/libdl.so.2
|
||||||
cp -arfLv /lib/aarch64-linux-gnu/librt.so.1 $CURDIR/package/lib/librt.so.1
|
cp -arfLv /lib/aarch64-linux-gnu/librt.so.1 $CURDIR/package/lib/librt.so.1
|
||||||
cp -arfLv /lib/aarch64-linux-gnu/libpthread.so.0 $CURDIR/package/lib/libpthread.so.0
|
cp -arfLv /lib/aarch64-linux-gnu/libpthread.so.0 $CURDIR/package/lib/libpthread.so.0
|
||||||
|
elif [ $(uname -s) = "Darwin" ]; then
|
||||||
|
echo "Detected Darwin"
|
||||||
else
|
else
|
||||||
echo "Error: Could not detect architecture"
|
echo "Error: Could not detect architecture"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Packaging completed successfully"
|
echo "Packaging completed successfully"
|
||||||
ls -liah $CURDIR/package/
|
ls -liah $CURDIR/package/
|
||||||
ls -liah $CURDIR/package/lib/
|
ls -liah $CURDIR/package/lib/
|
||||||
|
|||||||
@@ -1,105 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
// This is a wrapper to statisfy the GRPC service interface
|
|
||||||
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/ggerganov/whisper.cpp/bindings/go/pkg/whisper"
|
|
||||||
"github.com/go-audio/wav"
|
|
||||||
"github.com/mudler/LocalAI/pkg/grpc/base"
|
|
||||||
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
|
||||||
"github.com/mudler/LocalAI/pkg/utils"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Whisper struct {
|
|
||||||
base.SingleThread
|
|
||||||
whisper whisper.Model
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sd *Whisper) Load(opts *pb.ModelOptions) error {
|
|
||||||
// Note: the Model here is a path to a directory containing the model files
|
|
||||||
w, err := whisper.New(opts.ModelFile)
|
|
||||||
sd.whisper = w
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sd *Whisper) AudioTranscription(opts *pb.TranscriptRequest) (pb.TranscriptResult, error) {
|
|
||||||
|
|
||||||
dir, err := os.MkdirTemp("", "whisper")
|
|
||||||
if err != nil {
|
|
||||||
return pb.TranscriptResult{}, err
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
convertedPath := filepath.Join(dir, "converted.wav")
|
|
||||||
|
|
||||||
if err := utils.AudioToWav(opts.Dst, convertedPath); err != nil {
|
|
||||||
return pb.TranscriptResult{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open samples
|
|
||||||
fh, err := os.Open(convertedPath)
|
|
||||||
if err != nil {
|
|
||||||
return pb.TranscriptResult{}, err
|
|
||||||
}
|
|
||||||
defer fh.Close()
|
|
||||||
|
|
||||||
// Read samples
|
|
||||||
d := wav.NewDecoder(fh)
|
|
||||||
buf, err := d.FullPCMBuffer()
|
|
||||||
if err != nil {
|
|
||||||
return pb.TranscriptResult{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
data := buf.AsFloat32Buffer().Data
|
|
||||||
|
|
||||||
// Process samples
|
|
||||||
context, err := sd.whisper.NewContext()
|
|
||||||
if err != nil {
|
|
||||||
return pb.TranscriptResult{}, err
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
context.SetThreads(uint(opts.Threads))
|
|
||||||
|
|
||||||
if opts.Language != "" {
|
|
||||||
context.SetLanguage(opts.Language)
|
|
||||||
} else {
|
|
||||||
context.SetLanguage("auto")
|
|
||||||
}
|
|
||||||
|
|
||||||
if opts.Translate {
|
|
||||||
context.SetTranslate(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := context.Process(data, nil, nil, nil); err != nil {
|
|
||||||
return pb.TranscriptResult{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
segments := []*pb.TranscriptSegment{}
|
|
||||||
text := ""
|
|
||||||
for {
|
|
||||||
s, err := context.NextSegment()
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
var tokens []int32
|
|
||||||
for _, t := range s.Tokens {
|
|
||||||
tokens = append(tokens, int32(t.Id))
|
|
||||||
}
|
|
||||||
|
|
||||||
segment := &pb.TranscriptSegment{Id: int32(s.Num), Text: s.Text, Start: int64(s.Start), End: int64(s.End), Tokens: tokens}
|
|
||||||
segments = append(segments, segment)
|
|
||||||
|
|
||||||
text += s.Text
|
|
||||||
}
|
|
||||||
|
|
||||||
return pb.TranscriptResult{
|
|
||||||
Segments: segments,
|
|
||||||
Text: text,
|
|
||||||
}, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,38 +1,190 @@
|
|||||||
# Common commands about conda environment
|
# Python Backends for LocalAI
|
||||||
|
|
||||||
## Create a new empty conda environment
|
This directory contains Python-based AI backends for LocalAI, providing support for various AI models and hardware acceleration targets.
|
||||||
|
|
||||||
```
|
## Overview
|
||||||
conda create --name <env-name> python=<your version> -y
|
|
||||||
|
|
||||||
conda create --name autogptq python=3.11 -y
|
The Python backends use a unified build system based on `libbackend.sh` that provides:
|
||||||
|
- **Automatic virtual environment management** with support for both `uv` and `pip`
|
||||||
|
- **Hardware-specific dependency installation** (CPU, CUDA, Intel, MLX, etc.)
|
||||||
|
- **Portable Python support** for standalone deployments
|
||||||
|
- **Consistent backend execution** across different environments
|
||||||
|
|
||||||
|
## Available Backends
|
||||||
|
|
||||||
|
### Core AI Models
|
||||||
|
- **transformers** - Hugging Face Transformers framework (PyTorch-based)
|
||||||
|
- **vllm** - High-performance LLM inference engine
|
||||||
|
- **mlx** - Apple Silicon optimized ML framework
|
||||||
|
- **exllama2** - ExLlama2 quantized models
|
||||||
|
|
||||||
|
### Audio & Speech
|
||||||
|
- **bark** - Text-to-speech synthesis
|
||||||
|
- **coqui** - Coqui TTS models
|
||||||
|
- **faster-whisper** - Fast Whisper speech recognition
|
||||||
|
- **kitten-tts** - Lightweight TTS
|
||||||
|
- **mlx-audio** - Apple Silicon audio processing
|
||||||
|
- **chatterbox** - TTS model
|
||||||
|
- **kokoro** - TTS models
|
||||||
|
|
||||||
|
### Computer Vision
|
||||||
|
- **diffusers** - Stable Diffusion and image generation
|
||||||
|
- **mlx-vlm** - Vision-language models for Apple Silicon
|
||||||
|
- **rfdetr** - Object detection models
|
||||||
|
|
||||||
|
### Specialized
|
||||||
|
|
||||||
|
- **rerankers** - Text reranking models
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
- Python 3.10+ (default: 3.10.18)
|
||||||
|
- `uv` package manager (recommended) or `pip`
|
||||||
|
- Appropriate hardware drivers for your target (CUDA, Intel, etc.)
|
||||||
|
|
||||||
|
### Installation
|
||||||
|
|
||||||
|
Each backend can be installed individually:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Navigate to a specific backend
|
||||||
|
cd backend/python/transformers
|
||||||
|
|
||||||
|
# Install dependencies
|
||||||
|
make transformers
|
||||||
|
# or
|
||||||
|
bash install.sh
|
||||||
|
|
||||||
|
# Run the backend
|
||||||
|
make run
|
||||||
|
# or
|
||||||
|
bash run.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
## To activate the environment
|
### Using the Unified Build System
|
||||||
|
|
||||||
As of conda 4.4
|
The `libbackend.sh` script provides consistent commands across all backends:
|
||||||
```
|
|
||||||
conda activate autogptq
|
```bash
|
||||||
|
# Source the library in your backend script
|
||||||
|
source $(dirname $0)/../common/libbackend.sh
|
||||||
|
|
||||||
|
# Install requirements (automatically handles hardware detection)
|
||||||
|
installRequirements
|
||||||
|
|
||||||
|
# Start the backend server
|
||||||
|
startBackend $@
|
||||||
|
|
||||||
|
# Run tests
|
||||||
|
runUnittests
|
||||||
```
|
```
|
||||||
|
|
||||||
The conda version older than 4.4
|
## Hardware Targets
|
||||||
|
|
||||||
```
|
The build system automatically detects and configures for different hardware:
|
||||||
source activate autogptq
|
|
||||||
|
- **CPU** - Standard CPU-only builds
|
||||||
|
- **CUDA** - NVIDIA GPU acceleration (supports CUDA 11/12)
|
||||||
|
- **Intel** - Intel XPU/GPU optimization
|
||||||
|
- **MLX** - Apple Silicon (M1/M2/M3) optimization
|
||||||
|
- **HIP** - AMD GPU acceleration
|
||||||
|
|
||||||
|
### Target-Specific Requirements
|
||||||
|
|
||||||
|
Backends can specify hardware-specific dependencies:
|
||||||
|
- `requirements.txt` - Base requirements
|
||||||
|
- `requirements-cpu.txt` - CPU-specific packages
|
||||||
|
- `requirements-cublas11.txt` - CUDA 11 packages
|
||||||
|
- `requirements-cublas12.txt` - CUDA 12 packages
|
||||||
|
- `requirements-intel.txt` - Intel-optimized packages
|
||||||
|
- `requirements-mps.txt` - Apple Silicon packages
|
||||||
|
|
||||||
|
## Configuration Options
|
||||||
|
|
||||||
|
### Environment Variables
|
||||||
|
|
||||||
|
- `PYTHON_VERSION` - Python version (default: 3.10)
|
||||||
|
- `PYTHON_PATCH` - Python patch version (default: 18)
|
||||||
|
- `BUILD_TYPE` - Force specific build target
|
||||||
|
- `USE_PIP` - Use pip instead of uv (default: false)
|
||||||
|
- `PORTABLE_PYTHON` - Enable portable Python builds
|
||||||
|
- `LIMIT_TARGETS` - Restrict backend to specific targets
|
||||||
|
|
||||||
|
### Example: CUDA 12 Only Backend
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# In your backend script
|
||||||
|
LIMIT_TARGETS="cublas12"
|
||||||
|
source $(dirname $0)/../common/libbackend.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
## Install the packages to your environment
|
### Example: Intel-Optimized Backend
|
||||||
|
|
||||||
Sometimes you need to install the packages from the conda-forge channel
|
```bash
|
||||||
|
# In your backend script
|
||||||
By using `conda`
|
LIMIT_TARGETS="intel"
|
||||||
```
|
source $(dirname $0)/../common/libbackend.sh
|
||||||
conda install <your-package-name>
|
|
||||||
|
|
||||||
conda install -c conda-forge <your package-name>
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Or by using `pip`
|
## Development
|
||||||
|
|
||||||
|
### Adding a New Backend
|
||||||
|
|
||||||
|
1. Create a new directory in `backend/python/`
|
||||||
|
2. Copy the template structure from `common/template/`
|
||||||
|
3. Implement your `backend.py` with the required gRPC interface
|
||||||
|
4. Add appropriate requirements files for your target hardware
|
||||||
|
5. Use `libbackend.sh` for consistent build and execution
|
||||||
|
|
||||||
|
### Testing
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run backend tests
|
||||||
|
make test
|
||||||
|
# or
|
||||||
|
bash test.sh
|
||||||
```
|
```
|
||||||
pip install <your-package-name>
|
|
||||||
|
### Building
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install dependencies
|
||||||
|
make <backend-name>
|
||||||
|
|
||||||
|
# Clean build artifacts
|
||||||
|
make clean
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
Each backend follows a consistent structure:
|
||||||
|
```
|
||||||
|
backend-name/
|
||||||
|
├── backend.py # Main backend implementation
|
||||||
|
├── requirements.txt # Base dependencies
|
||||||
|
├── requirements-*.txt # Hardware-specific dependencies
|
||||||
|
├── install.sh # Installation script
|
||||||
|
├── run.sh # Execution script
|
||||||
|
├── test.sh # Test script
|
||||||
|
├── Makefile # Build targets
|
||||||
|
└── test.py # Unit tests
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Common Issues
|
||||||
|
|
||||||
|
1. **Missing dependencies**: Ensure all requirements files are properly configured
|
||||||
|
2. **Hardware detection**: Check that `BUILD_TYPE` matches your system
|
||||||
|
3. **Python version**: Verify Python 3.10+ is available
|
||||||
|
4. **Virtual environment**: Use `ensureVenv` to create/activate environments
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
When adding new backends or modifying existing ones:
|
||||||
|
1. Follow the established directory structure
|
||||||
|
2. Use `libbackend.sh` for consistent behavior
|
||||||
|
3. Include appropriate requirements files for all target hardware
|
||||||
|
4. Add comprehensive tests
|
||||||
|
5. Update this README if adding new backend types
|
||||||
|
|||||||
@@ -1,29 +1,23 @@
|
|||||||
.PHONY: ttsbark
|
.PHONY: ttsbark
|
||||||
ttsbark: protogen
|
ttsbark:
|
||||||
bash install.sh
|
bash install.sh
|
||||||
|
|
||||||
.PHONY: run
|
.PHONY: run
|
||||||
run: protogen
|
run: ttsbark
|
||||||
@echo "Running bark..."
|
@echo "Running bark..."
|
||||||
bash run.sh
|
bash run.sh
|
||||||
@echo "bark run."
|
@echo "bark run."
|
||||||
|
|
||||||
.PHONY: test
|
.PHONY: test
|
||||||
test: protogen
|
test: ttsbark
|
||||||
@echo "Testing bark..."
|
@echo "Testing bark..."
|
||||||
bash test.sh
|
bash test.sh
|
||||||
@echo "bark tested."
|
@echo "bark tested."
|
||||||
|
|
||||||
.PHONY: protogen
|
|
||||||
protogen: backend_pb2_grpc.py backend_pb2.py
|
|
||||||
|
|
||||||
.PHONY: protogen-clean
|
.PHONY: protogen-clean
|
||||||
protogen-clean:
|
protogen-clean:
|
||||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||||
|
|
||||||
backend_pb2_grpc.py backend_pb2.py:
|
|
||||||
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
|
|
||||||
|
|
||||||
.PHONY: clean
|
.PHONY: clean
|
||||||
clean: protogen-clean
|
clean: protogen-clean
|
||||||
rm -rf venv __pycache__
|
rm -rf venv __pycache__
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||||
intel-extension-for-pytorch==2.3.110+xpu
|
intel-extension-for-pytorch==2.8.10+xpu
|
||||||
torch==2.3.1+cxx11.abi
|
torch==2.3.1+cxx11.abi
|
||||||
torchaudio==2.3.1+cxx11.abi
|
torchaudio==2.3.1+cxx11.abi
|
||||||
oneccl_bind_pt==2.3.100+xpu
|
oneccl_bind_pt==2.3.100+xpu
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
bark==0.1.5
|
bark==0.1.5
|
||||||
grpcio==1.71.0
|
grpcio==1.75.1
|
||||||
protobuf
|
protobuf
|
||||||
certifi
|
certifi
|
||||||
@@ -1,29 +1,23 @@
|
|||||||
.PHONY: coqui
|
.PHONY: chatterbox
|
||||||
coqui: protogen
|
chatterbox:
|
||||||
bash install.sh
|
bash install.sh
|
||||||
|
|
||||||
.PHONY: run
|
.PHONY: run
|
||||||
run: protogen
|
run: chatterbox
|
||||||
@echo "Running coqui..."
|
@echo "Running coqui..."
|
||||||
bash run.sh
|
bash run.sh
|
||||||
@echo "coqui run."
|
@echo "coqui run."
|
||||||
|
|
||||||
.PHONY: test
|
.PHONY: test
|
||||||
test: protogen
|
test: chatterbox
|
||||||
@echo "Testing coqui..."
|
@echo "Testing coqui..."
|
||||||
bash test.sh
|
bash test.sh
|
||||||
@echo "coqui tested."
|
@echo "coqui tested."
|
||||||
|
|
||||||
.PHONY: protogen
|
|
||||||
protogen: backend_pb2_grpc.py backend_pb2.py
|
|
||||||
|
|
||||||
.PHONY: protogen-clean
|
.PHONY: protogen-clean
|
||||||
protogen-clean:
|
protogen-clean:
|
||||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||||
|
|
||||||
backend_pb2_grpc.py backend_pb2.py:
|
|
||||||
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
|
|
||||||
|
|
||||||
.PHONY: clean
|
.PHONY: clean
|
||||||
clean: protogen-clean
|
clean: protogen-clean
|
||||||
rm -rf venv __pycache__
|
rm -rf venv __pycache__
|
||||||
@@ -14,9 +14,23 @@ import backend_pb2_grpc
|
|||||||
import torch
|
import torch
|
||||||
import torchaudio as ta
|
import torchaudio as ta
|
||||||
from chatterbox.tts import ChatterboxTTS
|
from chatterbox.tts import ChatterboxTTS
|
||||||
|
from chatterbox.mtl_tts import ChatterboxMultilingualTTS
|
||||||
import grpc
|
import grpc
|
||||||
|
|
||||||
|
def is_float(s):
|
||||||
|
"""Check if a string can be converted to float."""
|
||||||
|
try:
|
||||||
|
float(s)
|
||||||
|
return True
|
||||||
|
except ValueError:
|
||||||
|
return False
|
||||||
|
def is_int(s):
|
||||||
|
"""Check if a string can be converted to int."""
|
||||||
|
try:
|
||||||
|
int(s)
|
||||||
|
return True
|
||||||
|
except ValueError:
|
||||||
|
return False
|
||||||
|
|
||||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||||
|
|
||||||
@@ -41,10 +55,34 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
|||||||
else:
|
else:
|
||||||
print("CUDA is not available", file=sys.stderr)
|
print("CUDA is not available", file=sys.stderr)
|
||||||
device = "cpu"
|
device = "cpu"
|
||||||
|
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
||||||
|
if mps_available:
|
||||||
|
device = "mps"
|
||||||
if not torch.cuda.is_available() and request.CUDA:
|
if not torch.cuda.is_available() and request.CUDA:
|
||||||
return backend_pb2.Result(success=False, message="CUDA is not available")
|
return backend_pb2.Result(success=False, message="CUDA is not available")
|
||||||
|
|
||||||
|
|
||||||
|
options = request.Options
|
||||||
|
|
||||||
|
# empty dict
|
||||||
|
self.options = {}
|
||||||
|
|
||||||
|
# The options are a list of strings in this form optname:optvalue
|
||||||
|
# We are storing all the options in a dict so we can use it later when
|
||||||
|
# generating the images
|
||||||
|
for opt in options:
|
||||||
|
if ":" not in opt:
|
||||||
|
continue
|
||||||
|
key, value = opt.split(":")
|
||||||
|
# if value is a number, convert it to the appropriate type
|
||||||
|
if is_float(value):
|
||||||
|
value = float(value)
|
||||||
|
elif is_int(value):
|
||||||
|
value = int(value)
|
||||||
|
elif value.lower() in ["true", "false"]:
|
||||||
|
value = value.lower() == "true"
|
||||||
|
self.options[key] = value
|
||||||
|
|
||||||
self.AudioPath = None
|
self.AudioPath = None
|
||||||
|
|
||||||
if os.path.isabs(request.AudioPath):
|
if os.path.isabs(request.AudioPath):
|
||||||
@@ -54,10 +92,14 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
|||||||
modelFileBase = os.path.dirname(request.ModelFile)
|
modelFileBase = os.path.dirname(request.ModelFile)
|
||||||
# modify LoraAdapter to be relative to modelFileBase
|
# modify LoraAdapter to be relative to modelFileBase
|
||||||
self.AudioPath = os.path.join(modelFileBase, request.AudioPath)
|
self.AudioPath = os.path.join(modelFileBase, request.AudioPath)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
print("Preparing models, please wait", file=sys.stderr)
|
print("Preparing models, please wait", file=sys.stderr)
|
||||||
self.model = ChatterboxTTS.from_pretrained(device=device)
|
if "multilingual" in self.options:
|
||||||
|
# remove key from options
|
||||||
|
del self.options["multilingual"]
|
||||||
|
self.model = ChatterboxMultilingualTTS.from_pretrained(device=device)
|
||||||
|
else:
|
||||||
|
self.model = ChatterboxTTS.from_pretrained(device=device)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||||
# Implement your logic here for the LoadModel service
|
# Implement your logic here for the LoadModel service
|
||||||
@@ -66,12 +108,18 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
|||||||
|
|
||||||
def TTS(self, request, context):
|
def TTS(self, request, context):
|
||||||
try:
|
try:
|
||||||
# Generate audio using ChatterboxTTS
|
kwargs = {}
|
||||||
|
|
||||||
|
if "language" in self.options:
|
||||||
|
kwargs["language_id"] = self.options["language"]
|
||||||
if self.AudioPath is not None:
|
if self.AudioPath is not None:
|
||||||
wav = self.model.generate(request.text, audio_prompt_path=self.AudioPath)
|
kwargs["audio_prompt_path"] = self.AudioPath
|
||||||
else:
|
|
||||||
wav = self.model.generate(request.text)
|
# add options to kwargs
|
||||||
|
kwargs.update(self.options)
|
||||||
|
|
||||||
|
# Generate audio using ChatterboxTTS
|
||||||
|
wav = self.model.generate(request.text, **kwargs)
|
||||||
# Save the generated audio
|
# Save the generated audio
|
||||||
ta.save(request.dst, wav, self.model.sr)
|
ta.save(request.dst, wav, self.model.sr)
|
||||||
|
|
||||||
|
|||||||
@@ -15,5 +15,6 @@ fi
|
|||||||
if [ "x${BUILD_PROFILE}" == "xintel" ]; then
|
if [ "x${BUILD_PROFILE}" == "xintel" ]; then
|
||||||
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
|
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
|
||||||
fi
|
fi
|
||||||
|
EXTRA_PIP_INSTALL_FLAGS+=" --no-build-isolation"
|
||||||
|
|
||||||
installRequirements
|
installRequirements
|
||||||
|
|||||||
@@ -1,5 +1,8 @@
|
|||||||
|
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||||
accelerate
|
accelerate
|
||||||
torch==2.6.0
|
torch
|
||||||
torchaudio==2.6.0
|
torchaudio
|
||||||
transformers==4.46.3
|
transformers
|
||||||
chatterbox-tts
|
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289
|
||||||
|
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
|
||||||
|
#chatterbox-tts==0.1.4
|
||||||
@@ -2,5 +2,6 @@
|
|||||||
torch==2.6.0+cu118
|
torch==2.6.0+cu118
|
||||||
torchaudio==2.6.0+cu118
|
torchaudio==2.6.0+cu118
|
||||||
transformers==4.46.3
|
transformers==4.46.3
|
||||||
chatterbox-tts
|
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289
|
||||||
|
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
|
||||||
accelerate
|
accelerate
|
||||||
@@ -1,5 +1,6 @@
|
|||||||
torch==2.6.0
|
torch
|
||||||
torchaudio==2.6.0
|
torchaudio
|
||||||
transformers==4.46.3
|
transformers
|
||||||
chatterbox-tts
|
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289
|
||||||
|
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
|
||||||
accelerate
|
accelerate
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/rocm6.0
|
--extra-index-url https://download.pytorch.org/whl/rocm6.0
|
||||||
torch==2.6.0+rocm6.1
|
torch==2.6.0+rocm6.1
|
||||||
torchaudio==2.6.0+rocm6.1
|
torchaudio==2.6.0+rocm6.1
|
||||||
transformers==4.46.3
|
transformers
|
||||||
chatterbox-tts
|
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289
|
||||||
|
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
|
||||||
accelerate
|
accelerate
|
||||||
|
|||||||
@@ -2,10 +2,10 @@
|
|||||||
intel-extension-for-pytorch==2.3.110+xpu
|
intel-extension-for-pytorch==2.3.110+xpu
|
||||||
torch==2.3.1+cxx11.abi
|
torch==2.3.1+cxx11.abi
|
||||||
torchaudio==2.3.1+cxx11.abi
|
torchaudio==2.3.1+cxx11.abi
|
||||||
transformers==4.46.3
|
transformers
|
||||||
chatterbox-tts
|
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289
|
||||||
|
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
|
||||||
accelerate
|
accelerate
|
||||||
oneccl_bind_pt==2.3.100+xpu
|
oneccl_bind_pt==2.3.100+xpu
|
||||||
optimum[openvino]
|
optimum[openvino]
|
||||||
setuptools
|
setuptools
|
||||||
accelerate
|
|
||||||
6
backend/python/chatterbox/requirements-l4t.txt
Normal file
6
backend/python/chatterbox/requirements-l4t.txt
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
--extra-index-url https://pypi.jetson-ai-lab.io/jp6/cu126/
|
||||||
|
torch
|
||||||
|
torchaudio
|
||||||
|
transformers
|
||||||
|
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
|
||||||
|
accelerate
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
|
||||||
# init handles the setup of the library
|
|
||||||
#
|
#
|
||||||
# use the library by adding the following line to a script:
|
# use the library by adding the following line to a script:
|
||||||
# source $(dirname $0)/../common/libbackend.sh
|
# source $(dirname $0)/../common/libbackend.sh
|
||||||
@@ -17,29 +17,182 @@
|
|||||||
# LIMIT_TARGETS="cublas12"
|
# LIMIT_TARGETS="cublas12"
|
||||||
# source $(dirname $0)/../common/libbackend.sh
|
# source $(dirname $0)/../common/libbackend.sh
|
||||||
#
|
#
|
||||||
|
# You can switch between uv (conda-like) and pip installation methods by setting USE_PIP:
|
||||||
|
# USE_PIP=true source $(dirname $0)/../common/libbackend.sh
|
||||||
|
#
|
||||||
|
# ===================== user-configurable defaults =====================
|
||||||
|
PYTHON_VERSION="${PYTHON_VERSION:-3.10}" # e.g. 3.10 / 3.11 / 3.12 / 3.13
|
||||||
|
PYTHON_PATCH="${PYTHON_PATCH:-18}" # e.g. 18 -> 3.10.18 ; 13 -> 3.11.13
|
||||||
|
PY_STANDALONE_TAG="${PY_STANDALONE_TAG:-20250818}" # release tag date
|
||||||
|
# Enable/disable bundling of a portable Python build
|
||||||
|
PORTABLE_PYTHON="${PORTABLE_PYTHON:-false}"
|
||||||
|
|
||||||
PYTHON_VERSION="3.10"
|
# If you want to fully pin the filename (including tuned CPU targets), set:
|
||||||
|
# PORTABLE_PY_FILENAME="cpython-3.10.18+20250818-x86_64_v3-unknown-linux-gnu-install_only.tar.gz"
|
||||||
|
: "${PORTABLE_PY_FILENAME:=}"
|
||||||
|
: "${PORTABLE_PY_SHA256:=}" # optional; if set we verify the download
|
||||||
|
# =====================================================================
|
||||||
|
|
||||||
|
# Default to uv if USE_PIP is not set
|
||||||
|
if [ "x${USE_PIP:-}" == "x" ]; then
|
||||||
|
USE_PIP=false
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ----------------------- helpers -----------------------
|
||||||
|
function _is_musl() {
|
||||||
|
# detect musl (Alpine, etc)
|
||||||
|
if command -v ldd >/dev/null 2>&1; then
|
||||||
|
ldd --version 2>&1 | grep -qi musl && return 0
|
||||||
|
fi
|
||||||
|
# busybox-ish fallback
|
||||||
|
if command -v getconf >/dev/null 2>&1; then
|
||||||
|
getconf GNU_LIBC_VERSION >/dev/null 2>&1 || return 0
|
||||||
|
fi
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
function _triple() {
|
||||||
|
local os="" arch="" libc="gnu"
|
||||||
|
case "$(uname -s)" in
|
||||||
|
Linux*) os="unknown-linux" ;;
|
||||||
|
Darwin*) os="apple-darwin" ;;
|
||||||
|
MINGW*|MSYS*|CYGWIN*) os="pc-windows-msvc" ;; # best-effort for Git Bash
|
||||||
|
*) echo "Unsupported OS $(uname -s)"; exit 1;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
case "$(uname -m)" in
|
||||||
|
x86_64) arch="x86_64" ;;
|
||||||
|
aarch64|arm64) arch="aarch64" ;;
|
||||||
|
armv7l) arch="armv7" ;;
|
||||||
|
i686|i386) arch="i686" ;;
|
||||||
|
ppc64le) arch="ppc64le" ;;
|
||||||
|
s390x) arch="s390x" ;;
|
||||||
|
riscv64) arch="riscv64" ;;
|
||||||
|
*) echo "Unsupported arch $(uname -m)"; exit 1;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
if [[ "$os" == "unknown-linux" ]]; then
|
||||||
|
if _is_musl; then
|
||||||
|
libc="musl"
|
||||||
|
else
|
||||||
|
libc="gnu"
|
||||||
|
fi
|
||||||
|
echo "${arch}-${os}-${libc}"
|
||||||
|
else
|
||||||
|
echo "${arch}-${os}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function _portable_dir() {
|
||||||
|
echo "${EDIR}/python"
|
||||||
|
}
|
||||||
|
|
||||||
|
function _portable_bin() {
|
||||||
|
# python-build-standalone puts python in ./bin
|
||||||
|
echo "$(_portable_dir)/bin"
|
||||||
|
}
|
||||||
|
|
||||||
|
function _portable_python() {
|
||||||
|
if [ -x "$(_portable_bin)/python3" ]; then
|
||||||
|
echo "$(_portable_bin)/python3"
|
||||||
|
else
|
||||||
|
echo "$(_portable_bin)/python"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# macOS loader env for the portable CPython
|
||||||
|
_macosPortableEnv() {
|
||||||
|
if [ "$(uname -s)" = "Darwin" ]; then
|
||||||
|
export DYLD_LIBRARY_PATH="$(_portable_dir)/lib${DYLD_LIBRARY_PATH:+:${DYLD_LIBRARY_PATH}}"
|
||||||
|
export DYLD_FALLBACK_LIBRARY_PATH="$(_portable_dir)/lib${DYLD_FALLBACK_LIBRARY_PATH:+:${DYLD_FALLBACK_LIBRARY_PATH}}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Good hygiene on macOS for downloaded/extracted trees
|
||||||
|
_unquarantinePortablePython() {
|
||||||
|
if [ "$(uname -s)" = "Darwin" ]; then
|
||||||
|
command -v xattr >/dev/null 2>&1 && xattr -dr com.apple.quarantine "$(_portable_dir)" || true
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# ------------------ ### PORTABLE PYTHON ------------------
|
||||||
|
function ensurePortablePython() {
|
||||||
|
local pdir="$(_portable_dir)"
|
||||||
|
local pbin="$(_portable_bin)"
|
||||||
|
local pyexe
|
||||||
|
|
||||||
|
if [ -x "${pbin}/python3" ] || [ -x "${pbin}/python" ]; then
|
||||||
|
_macosPortableEnv
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
mkdir -p "${pdir}"
|
||||||
|
local triple="$(_triple)"
|
||||||
|
|
||||||
|
local full_ver="${PYTHON_VERSION}.${PYTHON_PATCH}"
|
||||||
|
local fn=""
|
||||||
|
if [ -n "${PORTABLE_PY_FILENAME}" ]; then
|
||||||
|
fn="${PORTABLE_PY_FILENAME}"
|
||||||
|
else
|
||||||
|
# generic asset name: cpython-<full_ver>+<tag>-<triple>-install_only.tar.gz
|
||||||
|
fn="cpython-${full_ver}+${PY_STANDALONE_TAG}-${triple}-install_only.tar.gz"
|
||||||
|
fi
|
||||||
|
|
||||||
|
local url="https://github.com/astral-sh/python-build-standalone/releases/download/${PY_STANDALONE_TAG}/${fn}"
|
||||||
|
local tmp="${pdir}/${fn}"
|
||||||
|
echo "Downloading portable Python: ${fn}"
|
||||||
|
# curl with retries; fall back to wget if needed
|
||||||
|
if command -v curl >/dev/null 2>&1; then
|
||||||
|
curl -L --fail --retry 3 --retry-delay 1 -o "${tmp}" "${url}"
|
||||||
|
else
|
||||||
|
wget -O "${tmp}" "${url}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "${PORTABLE_PY_SHA256}" ]; then
|
||||||
|
echo "${PORTABLE_PY_SHA256} ${tmp}" | sha256sum -c -
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Extracting ${fn} -> ${pdir}"
|
||||||
|
# always a .tar.gz (we purposely choose install_only)
|
||||||
|
tar -xzf "${tmp}" -C "${pdir}"
|
||||||
|
rm -f "${tmp}"
|
||||||
|
|
||||||
|
# Some archives nest a directory; if so, flatten to ${pdir}
|
||||||
|
# Find the first dir with a 'bin/python*'
|
||||||
|
local inner
|
||||||
|
inner="$(find "${pdir}" -type f -path "*/bin/python*" -maxdepth 3 2>/dev/null | head -n1 || true)"
|
||||||
|
if [ -n "${inner}" ]; then
|
||||||
|
local inner_root
|
||||||
|
inner_root="$(dirname "$(dirname "${inner}")")" # .../bin -> root
|
||||||
|
if [ "${inner_root}" != "${pdir}" ]; then
|
||||||
|
# move contents up one level
|
||||||
|
shopt -s dotglob
|
||||||
|
mv "${inner_root}/"* "${pdir}/"
|
||||||
|
rm -rf "${inner_root}"
|
||||||
|
shopt -u dotglob
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
_unquarantinePortablePython
|
||||||
|
_macosPortableEnv
|
||||||
|
# Make sure it's runnable
|
||||||
|
pyexe="$(_portable_python)"
|
||||||
|
"${pyexe}" -V
|
||||||
|
}
|
||||||
|
|
||||||
|
# init handles the setup of the library
|
||||||
function init() {
|
function init() {
|
||||||
# Name of the backend (directory name)
|
|
||||||
BACKEND_NAME=${PWD##*/}
|
BACKEND_NAME=${PWD##*/}
|
||||||
|
MY_DIR=$(realpath "$(dirname "$0")")
|
||||||
# Path where all backends files are
|
|
||||||
MY_DIR=$(realpath `dirname $0`)
|
|
||||||
|
|
||||||
# Build type
|
|
||||||
BUILD_PROFILE=$(getBuildProfile)
|
BUILD_PROFILE=$(getBuildProfile)
|
||||||
|
|
||||||
# Environment directory
|
|
||||||
EDIR=${MY_DIR}
|
EDIR=${MY_DIR}
|
||||||
|
if [ "x${ENV_DIR:-}" != "x" ]; then
|
||||||
# Allow to specify a custom env dir for shared environments
|
|
||||||
if [ "x${ENV_DIR}" != "x" ]; then
|
|
||||||
EDIR=${ENV_DIR}
|
EDIR=${ENV_DIR}
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If a backend has defined a list of valid build profiles...
|
if [ ! -z "${LIMIT_TARGETS:-}" ]; then
|
||||||
if [ ! -z "${LIMIT_TARGETS}" ]; then
|
|
||||||
isValidTarget=$(checkTargets ${LIMIT_TARGETS})
|
isValidTarget=$(checkTargets ${LIMIT_TARGETS})
|
||||||
if [ ${isValidTarget} != true ]; then
|
if [ ${isValidTarget} != true ]; then
|
||||||
echo "${BACKEND_NAME} can only be used on the following targets: ${LIMIT_TARGETS}"
|
echo "${BACKEND_NAME} can only be used on the following targets: ${LIMIT_TARGETS}"
|
||||||
@@ -50,6 +203,7 @@ function init() {
|
|||||||
echo "Initializing libbackend for ${BACKEND_NAME}"
|
echo "Initializing libbackend for ${BACKEND_NAME}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# getBuildProfile will inspect the system to determine which build profile is appropriate:
|
# getBuildProfile will inspect the system to determine which build profile is appropriate:
|
||||||
# returns one of the following:
|
# returns one of the following:
|
||||||
# - cublas11
|
# - cublas11
|
||||||
@@ -57,53 +211,140 @@ function init() {
|
|||||||
# - hipblas
|
# - hipblas
|
||||||
# - intel
|
# - intel
|
||||||
function getBuildProfile() {
|
function getBuildProfile() {
|
||||||
# First check if we are a cublas build, and if so report the correct build profile
|
if [ x"${BUILD_TYPE:-}" == "xcublas" ]; then
|
||||||
if [ x"${BUILD_TYPE}" == "xcublas" ]; then
|
if [ ! -z "${CUDA_MAJOR_VERSION:-}" ]; then
|
||||||
if [ ! -z ${CUDA_MAJOR_VERSION} ]; then
|
|
||||||
# If we have been given a CUDA version, we trust it
|
|
||||||
echo ${BUILD_TYPE}${CUDA_MAJOR_VERSION}
|
echo ${BUILD_TYPE}${CUDA_MAJOR_VERSION}
|
||||||
else
|
else
|
||||||
# We don't know what version of cuda we are, so we report ourselves as a generic cublas
|
|
||||||
echo ${BUILD_TYPE}
|
echo ${BUILD_TYPE}
|
||||||
fi
|
fi
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If /opt/intel exists, then we are doing an intel/ARC build
|
|
||||||
if [ -d "/opt/intel" ]; then
|
if [ -d "/opt/intel" ]; then
|
||||||
echo "intel"
|
echo "intel"
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If for any other values of BUILD_TYPE, we don't need any special handling/discovery
|
if [ -n "${BUILD_TYPE:-}" ]; then
|
||||||
if [ ! -z ${BUILD_TYPE} ]; then
|
|
||||||
echo ${BUILD_TYPE}
|
echo ${BUILD_TYPE}
|
||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If there is no BUILD_TYPE set at all, set a build-profile value of CPU, we aren't building for any GPU targets
|
|
||||||
echo "cpu"
|
echo "cpu"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Make the venv relocatable:
|
||||||
|
# - rewrite venv/bin/python{,3} to relative symlinks into $(_portable_dir)
|
||||||
|
# - normalize entrypoint shebangs to /usr/bin/env python3
|
||||||
|
_makeVenvPortable() {
|
||||||
|
local venv_dir="${EDIR}/venv"
|
||||||
|
local vbin="${venv_dir}/bin"
|
||||||
|
|
||||||
|
[ -d "${vbin}" ] || return 0
|
||||||
|
|
||||||
|
# 1) Replace python symlinks with relative ones to ../../python/bin/python3
|
||||||
|
# (venv/bin -> venv -> EDIR -> python/bin)
|
||||||
|
local rel_py='../../python/bin/python3'
|
||||||
|
|
||||||
|
for name in python3 python; do
|
||||||
|
if [ -e "${vbin}/${name}" ] || [ -L "${vbin}/${name}" ]; then
|
||||||
|
rm -f "${vbin}/${name}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
ln -s "${rel_py}" "${vbin}/python3"
|
||||||
|
ln -s "python3" "${vbin}/python"
|
||||||
|
|
||||||
|
# 2) Rewrite shebangs of entry points to use env, so the venv is relocatable
|
||||||
|
# Only touch text files that start with #! and reference the current venv.
|
||||||
|
local ve_abs="${vbin}/python"
|
||||||
|
local sed_i=(sed -i)
|
||||||
|
# macOS/BSD sed needs a backup suffix; GNU sed doesn't. Make it portable:
|
||||||
|
if sed --version >/dev/null 2>&1; then
|
||||||
|
sed_i=(sed -i)
|
||||||
|
else
|
||||||
|
sed_i=(sed -i '')
|
||||||
|
fi
|
||||||
|
|
||||||
|
for f in "${vbin}"/*; do
|
||||||
|
[ -f "$f" ] || continue
|
||||||
|
# Fast path: check first two bytes (#!)
|
||||||
|
head -c2 "$f" 2>/dev/null | grep -q '^#!' || continue
|
||||||
|
# Only rewrite if the shebang mentions the (absolute) venv python
|
||||||
|
if head -n1 "$f" | grep -Fq "${ve_abs}"; then
|
||||||
|
"${sed_i[@]}" '1s|^#!.*$|#!/usr/bin/env python3|' "$f"
|
||||||
|
chmod +x "$f" 2>/dev/null || true
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
# ensureVenv makes sure that the venv for the backend both exists, and is activated.
|
# ensureVenv makes sure that the venv for the backend both exists, and is activated.
|
||||||
#
|
#
|
||||||
# This function is idempotent, so you can call it as many times as you want and it will
|
# This function is idempotent, so you can call it as many times as you want and it will
|
||||||
# always result in an activated virtual environment
|
# always result in an activated virtual environment
|
||||||
function ensureVenv() {
|
function ensureVenv() {
|
||||||
|
local interpreter=""
|
||||||
|
|
||||||
|
if [ "x${PORTABLE_PYTHON}" == "xtrue" ] || [ -e "$(_portable_python)" ]; then
|
||||||
|
echo "Using portable Python"
|
||||||
|
ensurePortablePython
|
||||||
|
interpreter="$(_portable_python)"
|
||||||
|
else
|
||||||
|
# Prefer system python${PYTHON_VERSION}, else python3, else fall back to bundled
|
||||||
|
if command -v python${PYTHON_VERSION} >/dev/null 2>&1; then
|
||||||
|
interpreter="python${PYTHON_VERSION}"
|
||||||
|
elif command -v python3 >/dev/null 2>&1; then
|
||||||
|
interpreter="python3"
|
||||||
|
else
|
||||||
|
echo "No suitable system Python found, bootstrapping portable build..."
|
||||||
|
ensurePortablePython
|
||||||
|
interpreter="$(_portable_python)"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
if [ ! -d "${EDIR}/venv" ]; then
|
if [ ! -d "${EDIR}/venv" ]; then
|
||||||
uv venv --python ${PYTHON_VERSION} ${EDIR}/venv
|
if [ "x${USE_PIP}" == "xtrue" ]; then
|
||||||
echo "virtualenv created"
|
"${interpreter}" -m venv --copies "${EDIR}/venv"
|
||||||
|
source "${EDIR}/venv/bin/activate"
|
||||||
|
"${interpreter}" -m pip install --upgrade pip
|
||||||
|
else
|
||||||
|
if [ "x${PORTABLE_PYTHON}" == "xtrue" ]; then
|
||||||
|
uv venv --python "${interpreter}" "${EDIR}/venv"
|
||||||
|
else
|
||||||
|
uv venv --python "${PYTHON_VERSION}" "${EDIR}/venv"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
if [ "x${PORTABLE_PYTHON}" == "xtrue" ]; then
|
||||||
|
_makeVenvPortable
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Source if we are not already in a Virtual env
|
# We call it here to make sure that when we source a venv we can still use python as expected
|
||||||
if [ "x${VIRTUAL_ENV}" != "x${EDIR}/venv" ]; then
|
if [ -x "$(_portable_python)" ]; then
|
||||||
source ${EDIR}/venv/bin/activate
|
_macosPortableEnv
|
||||||
echo "virtualenv activated"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "activated virtualenv has been ensured"
|
if [ "x${VIRTUAL_ENV:-}" != "x${EDIR}/venv" ]; then
|
||||||
|
source "${EDIR}/venv/bin/activate"
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function runProtogen() {
|
||||||
|
ensureVenv
|
||||||
|
if [ "x${USE_PIP}" == "xtrue" ]; then
|
||||||
|
pip install grpcio-tools
|
||||||
|
else
|
||||||
|
uv pip install grpcio-tools
|
||||||
|
fi
|
||||||
|
pushd "${EDIR}" >/dev/null
|
||||||
|
# use the venv python (ensures correct interpreter & sys.path)
|
||||||
|
python -m grpc_tools.protoc -I../../ -I./ --python_out=. --grpc_python_out=. backend.proto
|
||||||
|
popd >/dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
# installRequirements looks for several requirements files and if they exist runs the install for them in order
|
# installRequirements looks for several requirements files and if they exist runs the install for them in order
|
||||||
#
|
#
|
||||||
# - requirements-install.txt
|
# - requirements-install.txt
|
||||||
@@ -111,7 +352,7 @@ function ensureVenv() {
|
|||||||
# - requirements-${BUILD_TYPE}.txt
|
# - requirements-${BUILD_TYPE}.txt
|
||||||
# - requirements-${BUILD_PROFILE}.txt
|
# - requirements-${BUILD_PROFILE}.txt
|
||||||
#
|
#
|
||||||
# BUILD_PROFILE is a pore specific version of BUILD_TYPE, ex: cuda11 or cuda12
|
# BUILD_PROFILE is a more specific version of BUILD_TYPE, ex: cuda-11 or cuda-12
|
||||||
# it can also include some options that we do not have BUILD_TYPES for, ex: intel
|
# it can also include some options that we do not have BUILD_TYPES for, ex: intel
|
||||||
#
|
#
|
||||||
# NOTE: for BUILD_PROFILE==intel, this function does NOT automatically use the Intel python package index.
|
# NOTE: for BUILD_PROFILE==intel, this function does NOT automatically use the Intel python package index.
|
||||||
@@ -127,36 +368,41 @@ function ensureVenv() {
|
|||||||
# installRequirements
|
# installRequirements
|
||||||
function installRequirements() {
|
function installRequirements() {
|
||||||
ensureVenv
|
ensureVenv
|
||||||
|
|
||||||
# These are the requirements files we will attempt to install, in order
|
|
||||||
declare -a requirementFiles=(
|
declare -a requirementFiles=(
|
||||||
"${EDIR}/requirements-install.txt"
|
"${EDIR}/requirements-install.txt"
|
||||||
"${EDIR}/requirements.txt"
|
"${EDIR}/requirements.txt"
|
||||||
"${EDIR}/requirements-${BUILD_TYPE}.txt"
|
"${EDIR}/requirements-${BUILD_TYPE:-}.txt"
|
||||||
)
|
)
|
||||||
|
|
||||||
if [ "x${BUILD_TYPE}" != "x${BUILD_PROFILE}" ]; then
|
if [ "x${BUILD_TYPE:-}" != "x${BUILD_PROFILE}" ]; then
|
||||||
requirementFiles+=("${EDIR}/requirements-${BUILD_PROFILE}.txt")
|
requirementFiles+=("${EDIR}/requirements-${BUILD_PROFILE}.txt")
|
||||||
fi
|
fi
|
||||||
|
if [ "x${BUILD_TYPE:-}" == "x" ]; then
|
||||||
# if BUILD_TYPE is empty, we are a CPU build, so we should try to install the CPU requirements
|
|
||||||
if [ "x${BUILD_TYPE}" == "x" ]; then
|
|
||||||
requirementFiles+=("${EDIR}/requirements-cpu.txt")
|
requirementFiles+=("${EDIR}/requirements-cpu.txt")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
requirementFiles+=("${EDIR}/requirements-after.txt")
|
requirementFiles+=("${EDIR}/requirements-after.txt")
|
||||||
|
if [ "x${BUILD_TYPE:-}" != "x${BUILD_PROFILE}" ]; then
|
||||||
if [ "x${BUILD_TYPE}" != "x${BUILD_PROFILE}" ]; then
|
|
||||||
requirementFiles+=("${EDIR}/requirements-${BUILD_PROFILE}-after.txt")
|
requirementFiles+=("${EDIR}/requirements-${BUILD_PROFILE}-after.txt")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# This is needed to build wheels that e.g. depends on Python.h
|
||||||
|
if [ "x${PORTABLE_PYTHON}" == "xtrue" ]; then
|
||||||
|
export C_INCLUDE_PATH="${C_INCLUDE_PATH:-}:$(_portable_dir)/include/python${PYTHON_VERSION}"
|
||||||
|
fi
|
||||||
|
|
||||||
for reqFile in ${requirementFiles[@]}; do
|
for reqFile in ${requirementFiles[@]}; do
|
||||||
if [ -f ${reqFile} ]; then
|
if [ -f "${reqFile}" ]; then
|
||||||
echo "starting requirements install for ${reqFile}"
|
echo "starting requirements install for ${reqFile}"
|
||||||
uv pip install ${EXTRA_PIP_INSTALL_FLAGS} --requirement ${reqFile}
|
if [ "x${USE_PIP}" == "xtrue" ]; then
|
||||||
|
pip install ${EXTRA_PIP_INSTALL_FLAGS:-} --requirement "${reqFile}"
|
||||||
|
else
|
||||||
|
uv pip install ${EXTRA_PIP_INSTALL_FLAGS:-} --requirement "${reqFile}"
|
||||||
|
fi
|
||||||
echo "finished requirements install for ${reqFile}"
|
echo "finished requirements install for ${reqFile}"
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
|
runProtogen
|
||||||
}
|
}
|
||||||
|
|
||||||
# startBackend discovers and runs the backend GRPC server
|
# startBackend discovers and runs the backend GRPC server
|
||||||
@@ -174,18 +420,18 @@ function installRequirements() {
|
|||||||
# - ${BACKEND_NAME}.py
|
# - ${BACKEND_NAME}.py
|
||||||
function startBackend() {
|
function startBackend() {
|
||||||
ensureVenv
|
ensureVenv
|
||||||
|
if [ ! -z "${BACKEND_FILE:-}" ]; then
|
||||||
if [ ! -z ${BACKEND_FILE} ]; then
|
exec "${EDIR}/venv/bin/python" "${BACKEND_FILE}" "$@"
|
||||||
exec ${EDIR}/venv/bin/python ${BACKEND_FILE} $@
|
|
||||||
elif [ -e "${MY_DIR}/server.py" ]; then
|
elif [ -e "${MY_DIR}/server.py" ]; then
|
||||||
exec ${EDIR}/venv/bin/python ${MY_DIR}/server.py $@
|
exec "${EDIR}/venv/bin/python" "${MY_DIR}/server.py" "$@"
|
||||||
elif [ -e "${MY_DIR}/backend.py" ]; then
|
elif [ -e "${MY_DIR}/backend.py" ]; then
|
||||||
exec ${EDIR}/venv/bin/python ${MY_DIR}/backend.py $@
|
exec "${EDIR}/venv/bin/python" "${MY_DIR}/backend.py" "$@"
|
||||||
elif [ -e "${MY_DIR}/${BACKEND_NAME}.py" ]; then
|
elif [ -e "${MY_DIR}/${BACKEND_NAME}.py" ]; then
|
||||||
exec ${EDIR}/venv/bin/python ${MY_DIR}/${BACKEND_NAME}.py $@
|
exec "${EDIR}/venv/bin/python" "${MY_DIR}/${BACKEND_NAME}.py" "$@"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# runUnittests discovers and runs python unittests
|
# runUnittests discovers and runs python unittests
|
||||||
#
|
#
|
||||||
# You can specify a specific test file to use by setting TEST_FILE before calling runUnittests.
|
# You can specify a specific test file to use by setting TEST_FILE before calling runUnittests.
|
||||||
@@ -198,41 +444,36 @@ function startBackend() {
|
|||||||
# be default a file named test.py in the backends directory will be used
|
# be default a file named test.py in the backends directory will be used
|
||||||
function runUnittests() {
|
function runUnittests() {
|
||||||
ensureVenv
|
ensureVenv
|
||||||
|
if [ ! -z "${TEST_FILE:-}" ]; then
|
||||||
if [ ! -z ${TEST_FILE} ]; then
|
testDir=$(dirname "$(realpath "${TEST_FILE}")")
|
||||||
testDir=$(dirname `realpath ${TEST_FILE}`)
|
testFile=$(basename "${TEST_FILE}")
|
||||||
testFile=$(basename ${TEST_FILE})
|
pushd "${testDir}" >/dev/null
|
||||||
pushd ${testDir}
|
python -m unittest "${testFile}"
|
||||||
python -m unittest ${testFile}
|
popd >/dev/null
|
||||||
popd
|
|
||||||
elif [ -f "${MY_DIR}/test.py" ]; then
|
elif [ -f "${MY_DIR}/test.py" ]; then
|
||||||
pushd ${MY_DIR}
|
pushd "${MY_DIR}" >/dev/null
|
||||||
python -m unittest test.py
|
python -m unittest test.py
|
||||||
popd
|
popd >/dev/null
|
||||||
else
|
else
|
||||||
echo "no tests defined for ${BACKEND_NAME}"
|
echo "no tests defined for ${BACKEND_NAME}"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
##################################################################################
|
##################################################################################
|
||||||
# Below here are helper functions not intended to be used outside of the library #
|
# Below here are helper functions not intended to be used outside of the library #
|
||||||
##################################################################################
|
##################################################################################
|
||||||
|
|
||||||
# checkTargets determines if the current BUILD_TYPE or BUILD_PROFILE is in a list of valid targets
|
# checkTargets determines if the current BUILD_TYPE or BUILD_PROFILE is in a list of valid targets
|
||||||
function checkTargets() {
|
function checkTargets() {
|
||||||
# Collect all provided targets into a variable and...
|
|
||||||
targets=$@
|
targets=$@
|
||||||
# ...convert it into an array
|
|
||||||
declare -a targets=($targets)
|
declare -a targets=($targets)
|
||||||
|
|
||||||
for target in ${targets[@]}; do
|
for target in ${targets[@]}; do
|
||||||
if [ "x${BUILD_TYPE}" == "x${target}" ]; then
|
if [ "x${BUILD_TYPE:-}" == "x${target}" ]; then
|
||||||
echo true
|
echo true; return 0
|
||||||
return 0
|
|
||||||
fi
|
fi
|
||||||
if [ "x${BUILD_PROFILE}" == "x${target}" ]; then
|
if [ "x${BUILD_PROFILE}" == "x${target}" ]; then
|
||||||
echo true
|
echo true; return 0
|
||||||
return 0
|
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
echo false
|
echo false
|
||||||
|
|||||||
@@ -3,18 +3,11 @@
|
|||||||
.PHONY: install
|
.PHONY: install
|
||||||
install:
|
install:
|
||||||
bash install.sh
|
bash install.sh
|
||||||
$(MAKE) protogen
|
|
||||||
|
|
||||||
.PHONY: protogen
|
|
||||||
protogen: backend_pb2_grpc.py backend_pb2.py
|
|
||||||
|
|
||||||
.PHONY: protogen-clean
|
.PHONY: protogen-clean
|
||||||
protogen-clean:
|
protogen-clean:
|
||||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||||
|
|
||||||
backend_pb2_grpc.py backend_pb2.py:
|
|
||||||
bash protogen.sh
|
|
||||||
|
|
||||||
.PHONY: clean
|
.PHONY: clean
|
||||||
clean: protogen-clean
|
clean: protogen-clean
|
||||||
rm -rf venv __pycache__
|
rm -rf venv __pycache__
|
||||||
@@ -8,4 +8,4 @@ else
|
|||||||
source $backend_dir/../common/libbackend.sh
|
source $backend_dir/../common/libbackend.sh
|
||||||
fi
|
fi
|
||||||
|
|
||||||
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
|
runProtogen
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||||
intel-extension-for-pytorch==2.3.110+xpu
|
intel-extension-for-pytorch==2.8.10+xpu
|
||||||
torch==2.3.1+cxx11.abi
|
torch==2.8.0
|
||||||
oneccl_bind_pt==2.3.100+xpu
|
oneccl_bind_pt==2.8.0+xpu
|
||||||
optimum[openvino]
|
optimum[openvino]
|
||||||
@@ -1,3 +1,3 @@
|
|||||||
grpcio==1.71.0
|
grpcio==1.75.1
|
||||||
protobuf
|
protobuf
|
||||||
grpcio-tools
|
grpcio-tools
|
||||||
@@ -1,29 +1,23 @@
|
|||||||
.PHONY: coqui
|
.PHONY: coqui
|
||||||
coqui: protogen
|
coqui:
|
||||||
bash install.sh
|
bash install.sh
|
||||||
|
|
||||||
.PHONY: run
|
.PHONY: run
|
||||||
run: protogen
|
run: coqui
|
||||||
@echo "Running coqui..."
|
@echo "Running coqui..."
|
||||||
bash run.sh
|
bash run.sh
|
||||||
@echo "coqui run."
|
@echo "coqui run."
|
||||||
|
|
||||||
.PHONY: test
|
.PHONY: test
|
||||||
test: protogen
|
test: coqui
|
||||||
@echo "Testing coqui..."
|
@echo "Testing coqui..."
|
||||||
bash test.sh
|
bash test.sh
|
||||||
@echo "coqui tested."
|
@echo "coqui tested."
|
||||||
|
|
||||||
.PHONY: protogen
|
|
||||||
protogen: backend_pb2_grpc.py backend_pb2.py
|
|
||||||
|
|
||||||
.PHONY: protogen-clean
|
.PHONY: protogen-clean
|
||||||
protogen-clean:
|
protogen-clean:
|
||||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||||
|
|
||||||
backend_pb2_grpc.py backend_pb2.py:
|
|
||||||
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
|
|
||||||
|
|
||||||
.PHONY: clean
|
.PHONY: clean
|
||||||
clean: protogen-clean
|
clean: protogen-clean
|
||||||
rm -rf venv __pycache__
|
rm -rf venv __pycache__
|
||||||
@@ -40,7 +40,9 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
|||||||
else:
|
else:
|
||||||
print("CUDA is not available", file=sys.stderr)
|
print("CUDA is not available", file=sys.stderr)
|
||||||
device = "cpu"
|
device = "cpu"
|
||||||
|
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
||||||
|
if mps_available:
|
||||||
|
device = "mps"
|
||||||
if not torch.cuda.is_available() and request.CUDA:
|
if not torch.cuda.is_available() and request.CUDA:
|
||||||
return backend_pb2.Result(success=False, message="CUDA is not available")
|
return backend_pb2.Result(success=False, message="CUDA is not available")
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
grpcio==1.71.0
|
grpcio==1.75.1
|
||||||
protobuf
|
protobuf
|
||||||
certifi
|
certifi
|
||||||
packaging==24.1
|
packaging==24.1
|
||||||
@@ -12,28 +12,22 @@ export SKIP_CONDA=1
|
|||||||
endif
|
endif
|
||||||
|
|
||||||
.PHONY: diffusers
|
.PHONY: diffusers
|
||||||
diffusers: protogen
|
diffusers:
|
||||||
bash install.sh
|
bash install.sh
|
||||||
|
|
||||||
.PHONY: run
|
.PHONY: run
|
||||||
run: protogen
|
run: diffusers
|
||||||
@echo "Running diffusers..."
|
@echo "Running diffusers..."
|
||||||
bash run.sh
|
bash run.sh
|
||||||
@echo "Diffusers run."
|
@echo "Diffusers run."
|
||||||
|
|
||||||
test: protogen
|
test: diffusers
|
||||||
bash test.sh
|
bash test.sh
|
||||||
|
|
||||||
.PHONY: protogen
|
|
||||||
protogen: backend_pb2_grpc.py backend_pb2.py
|
|
||||||
|
|
||||||
.PHONY: protogen-clean
|
.PHONY: protogen-clean
|
||||||
protogen-clean:
|
protogen-clean:
|
||||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||||
|
|
||||||
backend_pb2_grpc.py backend_pb2.py:
|
|
||||||
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
|
|
||||||
|
|
||||||
.PHONY: clean
|
.PHONY: clean
|
||||||
clean: protogen-clean
|
clean: protogen-clean
|
||||||
rm -rf venv __pycache__
|
rm -rf venv __pycache__
|
||||||
@@ -18,7 +18,7 @@ import backend_pb2_grpc
|
|||||||
import grpc
|
import grpc
|
||||||
|
|
||||||
from diffusers import SanaPipeline, StableDiffusion3Pipeline, StableDiffusionXLPipeline, StableDiffusionDepth2ImgPipeline, DPMSolverMultistepScheduler, StableDiffusionPipeline, DiffusionPipeline, \
|
from diffusers import SanaPipeline, StableDiffusion3Pipeline, StableDiffusionXLPipeline, StableDiffusionDepth2ImgPipeline, DPMSolverMultistepScheduler, StableDiffusionPipeline, DiffusionPipeline, \
|
||||||
EulerAncestralDiscreteScheduler, FluxPipeline, FluxTransformer2DModel
|
EulerAncestralDiscreteScheduler, FluxPipeline, FluxTransformer2DModel, QwenImageEditPipeline, AutoencoderKLWan, WanPipeline, WanImageToVideoPipeline
|
||||||
from diffusers import StableDiffusionImg2ImgPipeline, AutoPipelineForText2Image, ControlNetModel, StableVideoDiffusionPipeline, Lumina2Text2ImgPipeline
|
from diffusers import StableDiffusionImg2ImgPipeline, AutoPipelineForText2Image, ControlNetModel, StableVideoDiffusionPipeline, Lumina2Text2ImgPipeline
|
||||||
from diffusers.pipelines.stable_diffusion import safety_checker
|
from diffusers.pipelines.stable_diffusion import safety_checker
|
||||||
from diffusers.utils import load_image, export_to_video
|
from diffusers.utils import load_image, export_to_video
|
||||||
@@ -65,6 +65,21 @@ from diffusers.schedulers import (
|
|||||||
UniPCMultistepScheduler,
|
UniPCMultistepScheduler,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def is_float(s):
|
||||||
|
"""Check if a string can be converted to float."""
|
||||||
|
try:
|
||||||
|
float(s)
|
||||||
|
return True
|
||||||
|
except ValueError:
|
||||||
|
return False
|
||||||
|
def is_int(s):
|
||||||
|
"""Check if a string can be converted to int."""
|
||||||
|
try:
|
||||||
|
int(s)
|
||||||
|
return True
|
||||||
|
except ValueError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
# The scheduler list mapping was taken from here: https://github.com/neggles/animatediff-cli/blob/6f336f5f4b5e38e85d7f06f1744ef42d0a45f2a7/src/animatediff/schedulers.py#L39
|
# The scheduler list mapping was taken from here: https://github.com/neggles/animatediff-cli/blob/6f336f5f4b5e38e85d7f06f1744ef42d0a45f2a7/src/animatediff/schedulers.py#L39
|
||||||
# Credits to https://github.com/neggles
|
# Credits to https://github.com/neggles
|
||||||
@@ -169,8 +184,26 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
|||||||
if ":" not in opt:
|
if ":" not in opt:
|
||||||
continue
|
continue
|
||||||
key, value = opt.split(":")
|
key, value = opt.split(":")
|
||||||
|
# if value is a number, convert it to the appropriate type
|
||||||
|
if is_float(value):
|
||||||
|
value = float(value)
|
||||||
|
elif is_int(value):
|
||||||
|
value = int(value)
|
||||||
|
elif value.lower() in ["true", "false"]:
|
||||||
|
value = value.lower() == "true"
|
||||||
self.options[key] = value
|
self.options[key] = value
|
||||||
|
|
||||||
|
# From options, extract if present "torch_dtype" and set it to the appropriate type
|
||||||
|
if "torch_dtype" in self.options:
|
||||||
|
if self.options["torch_dtype"] == "fp16":
|
||||||
|
torchType = torch.float16
|
||||||
|
elif self.options["torch_dtype"] == "bf16":
|
||||||
|
torchType = torch.bfloat16
|
||||||
|
elif self.options["torch_dtype"] == "fp32":
|
||||||
|
torchType = torch.float32
|
||||||
|
# remove it from options
|
||||||
|
del self.options["torch_dtype"]
|
||||||
|
|
||||||
print(f"Options: {self.options}", file=sys.stderr)
|
print(f"Options: {self.options}", file=sys.stderr)
|
||||||
|
|
||||||
local = False
|
local = False
|
||||||
@@ -234,6 +267,9 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
|||||||
elif request.PipelineType == "DiffusionPipeline":
|
elif request.PipelineType == "DiffusionPipeline":
|
||||||
self.pipe = DiffusionPipeline.from_pretrained(request.Model,
|
self.pipe = DiffusionPipeline.from_pretrained(request.Model,
|
||||||
torch_dtype=torchType)
|
torch_dtype=torchType)
|
||||||
|
elif request.PipelineType == "QwenImageEditPipeline":
|
||||||
|
self.pipe = QwenImageEditPipeline.from_pretrained(request.Model,
|
||||||
|
torch_dtype=torchType)
|
||||||
elif request.PipelineType == "VideoDiffusionPipeline":
|
elif request.PipelineType == "VideoDiffusionPipeline":
|
||||||
self.txt2vid = True
|
self.txt2vid = True
|
||||||
self.pipe = DiffusionPipeline.from_pretrained(request.Model,
|
self.pipe = DiffusionPipeline.from_pretrained(request.Model,
|
||||||
@@ -302,6 +338,32 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
|||||||
torch_dtype=torch.bfloat16)
|
torch_dtype=torch.bfloat16)
|
||||||
self.pipe.vae.to(torch.bfloat16)
|
self.pipe.vae.to(torch.bfloat16)
|
||||||
self.pipe.text_encoder.to(torch.bfloat16)
|
self.pipe.text_encoder.to(torch.bfloat16)
|
||||||
|
elif request.PipelineType == "WanPipeline":
|
||||||
|
# WAN2.2 pipeline requires special VAE handling
|
||||||
|
vae = AutoencoderKLWan.from_pretrained(
|
||||||
|
request.Model,
|
||||||
|
subfolder="vae",
|
||||||
|
torch_dtype=torch.float32
|
||||||
|
)
|
||||||
|
self.pipe = WanPipeline.from_pretrained(
|
||||||
|
request.Model,
|
||||||
|
vae=vae,
|
||||||
|
torch_dtype=torchType
|
||||||
|
)
|
||||||
|
self.txt2vid = True # WAN2.2 is a text-to-video pipeline
|
||||||
|
elif request.PipelineType == "WanImageToVideoPipeline":
|
||||||
|
# WAN2.2 image-to-video pipeline
|
||||||
|
vae = AutoencoderKLWan.from_pretrained(
|
||||||
|
request.Model,
|
||||||
|
subfolder="vae",
|
||||||
|
torch_dtype=torch.float32
|
||||||
|
)
|
||||||
|
self.pipe = WanImageToVideoPipeline.from_pretrained(
|
||||||
|
request.Model,
|
||||||
|
vae=vae,
|
||||||
|
torch_dtype=torchType
|
||||||
|
)
|
||||||
|
self.img2vid = True # WAN2.2 image-to-video pipeline
|
||||||
|
|
||||||
if CLIPSKIP and request.CLIPSkip != 0:
|
if CLIPSKIP and request.CLIPSkip != 0:
|
||||||
self.clip_skip = request.CLIPSkip
|
self.clip_skip = request.CLIPSkip
|
||||||
@@ -336,6 +398,9 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
|||||||
device = "cpu" if not request.CUDA else "cuda"
|
device = "cpu" if not request.CUDA else "cuda"
|
||||||
if XPU:
|
if XPU:
|
||||||
device = "xpu"
|
device = "xpu"
|
||||||
|
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
||||||
|
if mps_available:
|
||||||
|
device = "mps"
|
||||||
self.device = device
|
self.device = device
|
||||||
if request.LoraAdapter:
|
if request.LoraAdapter:
|
||||||
# Check if its a local file and not a directory ( we load lora differently for a safetensor file )
|
# Check if its a local file and not a directory ( we load lora differently for a safetensor file )
|
||||||
@@ -440,11 +505,24 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
|||||||
"num_inference_steps": steps,
|
"num_inference_steps": steps,
|
||||||
}
|
}
|
||||||
|
|
||||||
if request.src != "" and not self.controlnet and not self.img2vid:
|
# Handle image source: prioritize RefImages over request.src
|
||||||
image = Image.open(request.src)
|
image_src = None
|
||||||
|
if hasattr(request, 'ref_images') and request.ref_images and len(request.ref_images) > 0:
|
||||||
|
# Use the first reference image if available
|
||||||
|
image_src = request.ref_images[0]
|
||||||
|
print(f"Using reference image: {image_src}", file=sys.stderr)
|
||||||
|
elif request.src != "":
|
||||||
|
# Fall back to request.src if no ref_images
|
||||||
|
image_src = request.src
|
||||||
|
print(f"Using source image: {image_src}", file=sys.stderr)
|
||||||
|
else:
|
||||||
|
print("No image source provided", file=sys.stderr)
|
||||||
|
|
||||||
|
if image_src and not self.controlnet and not self.img2vid:
|
||||||
|
image = Image.open(image_src)
|
||||||
options["image"] = image
|
options["image"] = image
|
||||||
elif self.controlnet and request.src:
|
elif self.controlnet and image_src:
|
||||||
pose_image = load_image(request.src)
|
pose_image = load_image(image_src)
|
||||||
options["image"] = pose_image
|
options["image"] = pose_image
|
||||||
|
|
||||||
if CLIPSKIP and self.clip_skip != 0:
|
if CLIPSKIP and self.clip_skip != 0:
|
||||||
@@ -486,7 +564,11 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
|||||||
|
|
||||||
if self.img2vid:
|
if self.img2vid:
|
||||||
# Load the conditioning image
|
# Load the conditioning image
|
||||||
image = load_image(request.src)
|
if image_src:
|
||||||
|
image = load_image(image_src)
|
||||||
|
else:
|
||||||
|
# Fallback to request.src for img2vid if no ref_images
|
||||||
|
image = load_image(request.src)
|
||||||
image = image.resize((1024, 576))
|
image = image.resize((1024, 576))
|
||||||
|
|
||||||
generator = torch.manual_seed(request.seed)
|
generator = torch.manual_seed(request.seed)
|
||||||
@@ -523,6 +605,96 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
|||||||
|
|
||||||
return backend_pb2.Result(message="Media generated", success=True)
|
return backend_pb2.Result(message="Media generated", success=True)
|
||||||
|
|
||||||
|
def GenerateVideo(self, request, context):
|
||||||
|
try:
|
||||||
|
prompt = request.prompt
|
||||||
|
if not prompt:
|
||||||
|
return backend_pb2.Result(success=False, message="No prompt provided for video generation")
|
||||||
|
|
||||||
|
# Set default values from request or use defaults
|
||||||
|
num_frames = request.num_frames if request.num_frames > 0 else 81
|
||||||
|
fps = request.fps if request.fps > 0 else 16
|
||||||
|
cfg_scale = request.cfg_scale if request.cfg_scale > 0 else 4.0
|
||||||
|
num_inference_steps = request.step if request.step > 0 else 40
|
||||||
|
|
||||||
|
# Prepare generation parameters
|
||||||
|
kwargs = {
|
||||||
|
"prompt": prompt,
|
||||||
|
"negative_prompt": request.negative_prompt if request.negative_prompt else "",
|
||||||
|
"height": request.height if request.height > 0 else 720,
|
||||||
|
"width": request.width if request.width > 0 else 1280,
|
||||||
|
"num_frames": num_frames,
|
||||||
|
"guidance_scale": cfg_scale,
|
||||||
|
"num_inference_steps": num_inference_steps,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Add custom options from self.options (including guidance_scale_2 if specified)
|
||||||
|
kwargs.update(self.options)
|
||||||
|
|
||||||
|
# Set seed if provided
|
||||||
|
if request.seed > 0:
|
||||||
|
kwargs["generator"] = torch.Generator(device=self.device).manual_seed(request.seed)
|
||||||
|
|
||||||
|
# Handle start and end images for video generation
|
||||||
|
if request.start_image:
|
||||||
|
kwargs["start_image"] = load_image(request.start_image)
|
||||||
|
if request.end_image:
|
||||||
|
kwargs["end_image"] = load_image(request.end_image)
|
||||||
|
|
||||||
|
print(f"Generating video with {kwargs=}", file=sys.stderr)
|
||||||
|
|
||||||
|
# Generate video frames based on pipeline type
|
||||||
|
if self.PipelineType == "WanPipeline":
|
||||||
|
# WAN2.2 text-to-video generation
|
||||||
|
output = self.pipe(**kwargs)
|
||||||
|
frames = output.frames[0] # WAN2.2 returns frames in this format
|
||||||
|
elif self.PipelineType == "WanImageToVideoPipeline":
|
||||||
|
# WAN2.2 image-to-video generation
|
||||||
|
if request.start_image:
|
||||||
|
# Load and resize the input image according to WAN2.2 requirements
|
||||||
|
image = load_image(request.start_image)
|
||||||
|
# Use request dimensions or defaults, but respect WAN2.2 constraints
|
||||||
|
request_height = request.height if request.height > 0 else 480
|
||||||
|
request_width = request.width if request.width > 0 else 832
|
||||||
|
max_area = request_height * request_width
|
||||||
|
aspect_ratio = image.height / image.width
|
||||||
|
mod_value = self.pipe.vae_scale_factor_spatial * self.pipe.transformer.config.patch_size[1]
|
||||||
|
height = round((max_area * aspect_ratio) ** 0.5 / mod_value) * mod_value
|
||||||
|
width = round((max_area / aspect_ratio) ** 0.5 / mod_value) * mod_value
|
||||||
|
image = image.resize((width, height))
|
||||||
|
kwargs["image"] = image
|
||||||
|
kwargs["height"] = height
|
||||||
|
kwargs["width"] = width
|
||||||
|
|
||||||
|
output = self.pipe(**kwargs)
|
||||||
|
frames = output.frames[0]
|
||||||
|
elif self.img2vid:
|
||||||
|
# Generic image-to-video generation
|
||||||
|
if request.start_image:
|
||||||
|
image = load_image(request.start_image)
|
||||||
|
image = image.resize((request.width if request.width > 0 else 1024,
|
||||||
|
request.height if request.height > 0 else 576))
|
||||||
|
kwargs["image"] = image
|
||||||
|
|
||||||
|
output = self.pipe(**kwargs)
|
||||||
|
frames = output.frames[0]
|
||||||
|
elif self.txt2vid:
|
||||||
|
# Generic text-to-video generation
|
||||||
|
output = self.pipe(**kwargs)
|
||||||
|
frames = output.frames[0]
|
||||||
|
else:
|
||||||
|
return backend_pb2.Result(success=False, message=f"Pipeline {self.PipelineType} does not support video generation")
|
||||||
|
|
||||||
|
# Export video
|
||||||
|
export_to_video(frames, request.dst, fps=fps)
|
||||||
|
|
||||||
|
return backend_pb2.Result(message="Video generated successfully", success=True)
|
||||||
|
|
||||||
|
except Exception as err:
|
||||||
|
print(f"Error generating video: {err}", file=sys.stderr)
|
||||||
|
traceback.print_exc()
|
||||||
|
return backend_pb2.Result(success=False, message=f"Error generating video: {err}")
|
||||||
|
|
||||||
|
|
||||||
def serve(address):
|
def serve(address):
|
||||||
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
||||||
|
|||||||
@@ -1,9 +1,12 @@
|
|||||||
diffusers
|
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||||
|
git+https://github.com/huggingface/diffusers
|
||||||
opencv-python
|
opencv-python
|
||||||
transformers
|
transformers
|
||||||
|
torchvision==0.22.1
|
||||||
accelerate
|
accelerate
|
||||||
compel
|
compel
|
||||||
peft
|
peft
|
||||||
sentencepiece
|
sentencepiece
|
||||||
torch==2.4.1
|
torch==2.7.1
|
||||||
optimum-quanto
|
optimum-quanto
|
||||||
|
ftfy
|
||||||
@@ -1,10 +1,12 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||||
torch==2.4.1+cu118
|
git+https://github.com/huggingface/diffusers
|
||||||
diffusers
|
|
||||||
opencv-python
|
opencv-python
|
||||||
transformers
|
transformers
|
||||||
|
torchvision==0.22.1
|
||||||
accelerate
|
accelerate
|
||||||
compel
|
compel
|
||||||
peft
|
peft
|
||||||
sentencepiece
|
sentencepiece
|
||||||
optimum-quanto
|
torch==2.7.1
|
||||||
|
optimum-quanto
|
||||||
|
ftfy
|
||||||
@@ -1,9 +1,12 @@
|
|||||||
torch==2.4.1
|
--extra-index-url https://download.pytorch.org/whl/cu121
|
||||||
diffusers
|
git+https://github.com/huggingface/diffusers
|
||||||
opencv-python
|
opencv-python
|
||||||
transformers
|
transformers
|
||||||
|
torchvision
|
||||||
accelerate
|
accelerate
|
||||||
compel
|
compel
|
||||||
peft
|
peft
|
||||||
sentencepiece
|
sentencepiece
|
||||||
optimum-quanto
|
torch
|
||||||
|
ftfy
|
||||||
|
optimum-quanto
|
||||||
|
|||||||
@@ -1,11 +1,12 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/rocm6.0
|
--extra-index-url https://download.pytorch.org/whl/rocm6.3
|
||||||
torch==2.3.1+rocm6.0
|
torch==2.7.1+rocm6.3
|
||||||
torchvision==0.18.1+rocm6.0
|
torchvision==0.22.1+rocm6.3
|
||||||
diffusers
|
git+https://github.com/huggingface/diffusers
|
||||||
opencv-python
|
opencv-python
|
||||||
transformers
|
transformers
|
||||||
accelerate
|
accelerate
|
||||||
compel
|
compel
|
||||||
peft
|
peft
|
||||||
sentencepiece
|
sentencepiece
|
||||||
optimum-quanto
|
optimum-quanto
|
||||||
|
ftfy
|
||||||
@@ -1,15 +1,16 @@
|
|||||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||||
intel-extension-for-pytorch==2.3.110+xpu
|
intel-extension-for-pytorch==2.3.110+xpu
|
||||||
torch==2.3.1+cxx11.abi
|
torch==2.5.1+cxx11.abi
|
||||||
torchvision==0.18.1+cxx11.abi
|
torchvision==0.20.1+cxx11.abi
|
||||||
oneccl_bind_pt==2.3.100+xpu
|
oneccl_bind_pt==2.8.0+xpu
|
||||||
optimum[openvino]
|
optimum[openvino]
|
||||||
setuptools
|
setuptools
|
||||||
diffusers
|
git+https://github.com/huggingface/diffusers
|
||||||
opencv-python
|
opencv-python
|
||||||
transformers
|
transformers
|
||||||
accelerate
|
accelerate
|
||||||
compel
|
compel
|
||||||
peft
|
peft
|
||||||
sentencepiece
|
sentencepiece
|
||||||
optimum-quanto
|
optimum-quanto
|
||||||
|
ftfy
|
||||||
12
backend/python/diffusers/requirements-l4t.txt
Normal file
12
backend/python/diffusers/requirements-l4t.txt
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
--extra-index-url https://pypi.jetson-ai-lab.io/jp6/cu126/
|
||||||
|
torch
|
||||||
|
diffusers
|
||||||
|
transformers
|
||||||
|
accelerate
|
||||||
|
compel
|
||||||
|
peft
|
||||||
|
optimum-quanto
|
||||||
|
numpy<2
|
||||||
|
sentencepiece
|
||||||
|
torchvision
|
||||||
|
ftfy
|
||||||
11
backend/python/diffusers/requirements-mps.txt
Normal file
11
backend/python/diffusers/requirements-mps.txt
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
torch==2.7.1
|
||||||
|
torchvision==0.22.1
|
||||||
|
git+https://github.com/huggingface/diffusers
|
||||||
|
opencv-python
|
||||||
|
transformers
|
||||||
|
accelerate
|
||||||
|
compel
|
||||||
|
peft
|
||||||
|
sentencepiece
|
||||||
|
optimum-quanto
|
||||||
|
ftfy
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
setuptools
|
setuptools
|
||||||
grpcio==1.71.0
|
grpcio==1.75.1
|
||||||
pillow
|
pillow
|
||||||
protobuf
|
protobuf
|
||||||
certifi
|
certifi
|
||||||
|
|||||||
@@ -12,4 +12,6 @@ if [ -d "/opt/intel" ]; then
|
|||||||
export XPU=1
|
export XPU=1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||||
|
|
||||||
startBackend $@
|
startBackend $@
|
||||||
|
|||||||
@@ -1,23 +1,17 @@
|
|||||||
.PHONY: exllama2
|
.PHONY: exllama2
|
||||||
exllama2: protogen
|
exllama2:
|
||||||
bash install.sh
|
bash install.sh
|
||||||
|
|
||||||
.PHONY: run
|
.PHONY: run
|
||||||
run: protogen
|
run: exllama2
|
||||||
@echo "Running exllama2..."
|
@echo "Running exllama2..."
|
||||||
bash run.sh
|
bash run.sh
|
||||||
@echo "exllama2 run."
|
@echo "exllama2 run."
|
||||||
|
|
||||||
.PHONY: protogen
|
|
||||||
protogen: backend_pb2_grpc.py backend_pb2.py
|
|
||||||
|
|
||||||
.PHONY: protogen-clean
|
.PHONY: protogen-clean
|
||||||
protogen-clean:
|
protogen-clean:
|
||||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||||
|
|
||||||
backend_pb2_grpc.py backend_pb2.py:
|
|
||||||
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
|
|
||||||
|
|
||||||
.PHONY: clean
|
.PHONY: clean
|
||||||
clean: protogen-clean
|
clean: protogen-clean
|
||||||
$(RM) -r venv source __pycache__
|
$(RM) -r venv source __pycache__
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
grpcio==1.71.0
|
grpcio==1.75.1
|
||||||
protobuf
|
protobuf
|
||||||
certifi
|
certifi
|
||||||
wheel
|
wheel
|
||||||
|
|||||||
@@ -3,18 +3,11 @@
|
|||||||
.PHONY: install
|
.PHONY: install
|
||||||
install:
|
install:
|
||||||
bash install.sh
|
bash install.sh
|
||||||
$(MAKE) protogen
|
|
||||||
|
|
||||||
.PHONY: protogen
|
|
||||||
protogen: backend_pb2_grpc.py backend_pb2.py
|
|
||||||
|
|
||||||
.PHONY: protogen-clean
|
.PHONY: protogen-clean
|
||||||
protogen-clean:
|
protogen-clean:
|
||||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||||
|
|
||||||
backend_pb2_grpc.py backend_pb2.py:
|
|
||||||
bash protogen.sh
|
|
||||||
|
|
||||||
.PHONY: clean
|
.PHONY: clean
|
||||||
clean: protogen-clean
|
clean: protogen-clean
|
||||||
rm -rf venv __pycache__
|
rm -rf venv __pycache__
|
||||||
@@ -10,7 +10,7 @@ import sys
|
|||||||
import os
|
import os
|
||||||
import backend_pb2
|
import backend_pb2
|
||||||
import backend_pb2_grpc
|
import backend_pb2_grpc
|
||||||
|
import torch
|
||||||
from faster_whisper import WhisperModel
|
from faster_whisper import WhisperModel
|
||||||
|
|
||||||
import grpc
|
import grpc
|
||||||
@@ -35,7 +35,9 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
|||||||
# device = "cuda" if request.CUDA else "cpu"
|
# device = "cuda" if request.CUDA else "cpu"
|
||||||
if request.CUDA:
|
if request.CUDA:
|
||||||
device = "cuda"
|
device = "cuda"
|
||||||
|
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
||||||
|
if mps_available:
|
||||||
|
device = "mps"
|
||||||
try:
|
try:
|
||||||
print("Preparing models, please wait", file=sys.stderr)
|
print("Preparing models, please wait", file=sys.stderr)
|
||||||
self.model = WhisperModel(request.Model, device=device, compute_type="float16")
|
self.model = WhisperModel(request.Model, device=device, compute_type="float16")
|
||||||
|
|||||||
23
backend/python/kitten-tts/Makefile
Normal file
23
backend/python/kitten-tts/Makefile
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
.PHONY: kitten-tts
|
||||||
|
kitten-tts:
|
||||||
|
bash install.sh
|
||||||
|
|
||||||
|
.PHONY: run
|
||||||
|
run: kitten-tts
|
||||||
|
@echo "Running kitten-tts..."
|
||||||
|
bash run.sh
|
||||||
|
@echo "kitten-tts run."
|
||||||
|
|
||||||
|
.PHONY: test
|
||||||
|
test: kitten-tts
|
||||||
|
@echo "Testing kitten-tts..."
|
||||||
|
bash test.sh
|
||||||
|
@echo "kitten-tts tested."
|
||||||
|
|
||||||
|
.PHONY: protogen-clean
|
||||||
|
protogen-clean:
|
||||||
|
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||||
|
|
||||||
|
.PHONY: clean
|
||||||
|
clean: protogen-clean
|
||||||
|
rm -rf venv __pycache__
|
||||||
109
backend/python/kitten-tts/backend.py
Normal file
109
backend/python/kitten-tts/backend.py
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
This is an extra gRPC server of LocalAI for Kitten TTS
|
||||||
|
"""
|
||||||
|
from concurrent import futures
|
||||||
|
import time
|
||||||
|
import argparse
|
||||||
|
import signal
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import backend_pb2
|
||||||
|
import backend_pb2_grpc
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from kittentts import KittenTTS
|
||||||
|
import soundfile as sf
|
||||||
|
|
||||||
|
import grpc
|
||||||
|
|
||||||
|
|
||||||
|
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||||
|
|
||||||
|
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
|
||||||
|
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
||||||
|
KITTEN_LANGUAGE = os.environ.get('KITTEN_LANGUAGE', None)
|
||||||
|
|
||||||
|
# Implement the BackendServicer class with the service methods
|
||||||
|
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||||
|
"""
|
||||||
|
BackendServicer is the class that implements the gRPC service
|
||||||
|
"""
|
||||||
|
def Health(self, request, context):
|
||||||
|
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
||||||
|
def LoadModel(self, request, context):
|
||||||
|
|
||||||
|
self.AudioPath = None
|
||||||
|
# List available KittenTTS models
|
||||||
|
print("Available KittenTTS voices: expr-voice-2-m, expr-voice-2-f, expr-voice-3-m, expr-voice-3-f, expr-voice-4-m, expr-voice-4-f, expr-voice-5-m, expr-voice-5-f")
|
||||||
|
if os.path.isabs(request.AudioPath):
|
||||||
|
self.AudioPath = request.AudioPath
|
||||||
|
elif request.AudioPath and request.ModelFile != "" and not os.path.isabs(request.AudioPath):
|
||||||
|
# get base path of modelFile
|
||||||
|
modelFileBase = os.path.dirname(request.ModelFile)
|
||||||
|
# modify LoraAdapter to be relative to modelFileBase
|
||||||
|
self.AudioPath = os.path.join(modelFileBase, request.AudioPath)
|
||||||
|
|
||||||
|
try:
|
||||||
|
print("Preparing KittenTTS model, please wait", file=sys.stderr)
|
||||||
|
# Use the model name from request.Model, defaulting to "KittenML/kitten-tts-nano-0.1" if not specified
|
||||||
|
model_name = request.Model if request.Model else "KittenML/kitten-tts-nano-0.1"
|
||||||
|
self.tts = KittenTTS(model_name)
|
||||||
|
except Exception as err:
|
||||||
|
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||||
|
# Implement your logic here for the LoadModel service
|
||||||
|
# Replace this with your desired response
|
||||||
|
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
||||||
|
|
||||||
|
def TTS(self, request, context):
|
||||||
|
try:
|
||||||
|
# KittenTTS doesn't use language parameter like TTS, so we ignore it
|
||||||
|
# For multi-speaker models, use voice parameter
|
||||||
|
voice = request.voice if request.voice else "expr-voice-2-f"
|
||||||
|
|
||||||
|
# Generate audio using KittenTTS
|
||||||
|
audio = self.tts.generate(request.text, voice=voice)
|
||||||
|
|
||||||
|
# Save the audio using soundfile
|
||||||
|
sf.write(request.dst, audio, 24000)
|
||||||
|
|
||||||
|
except Exception as err:
|
||||||
|
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||||
|
return backend_pb2.Result(success=True)
|
||||||
|
|
||||||
|
def serve(address):
|
||||||
|
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
||||||
|
options=[
|
||||||
|
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
|
||||||
|
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
|
||||||
|
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
|
||||||
|
])
|
||||||
|
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||||
|
server.add_insecure_port(address)
|
||||||
|
server.start()
|
||||||
|
print("Server started. Listening on: " + address, file=sys.stderr)
|
||||||
|
|
||||||
|
# Define the signal handler function
|
||||||
|
def signal_handler(sig, frame):
|
||||||
|
print("Received termination signal. Shutting down...")
|
||||||
|
server.stop(0)
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
# Set the signal handlers for SIGINT and SIGTERM
|
||||||
|
signal.signal(signal.SIGINT, signal_handler)
|
||||||
|
signal.signal(signal.SIGTERM, signal_handler)
|
||||||
|
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
time.sleep(_ONE_DAY_IN_SECONDS)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
server.stop(0)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
parser = argparse.ArgumentParser(description="Run the gRPC server.")
|
||||||
|
parser.add_argument(
|
||||||
|
"--addr", default="localhost:50051", help="The address to bind the server to."
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
serve(args.addr)
|
||||||
19
backend/python/kitten-tts/install.sh
Executable file
19
backend/python/kitten-tts/install.sh
Executable file
@@ -0,0 +1,19 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
backend_dir=$(dirname $0)
|
||||||
|
if [ -d $backend_dir/common ]; then
|
||||||
|
source $backend_dir/common/libbackend.sh
|
||||||
|
else
|
||||||
|
source $backend_dir/../common/libbackend.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
# This is here because the Intel pip index is broken and returns 200 status codes for every package name, it just doesn't return any package links.
|
||||||
|
# This makes uv think that the package exists in the Intel pip index, and by default it stops looking at other pip indexes once it finds a match.
|
||||||
|
# We need uv to continue falling through to the pypi default index to find optimum[openvino] in the pypi index
|
||||||
|
# the --upgrade actually allows us to *downgrade* torch to the version provided in the Intel pip index
|
||||||
|
if [ "x${BUILD_PROFILE}" == "xintel" ]; then
|
||||||
|
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
|
||||||
|
fi
|
||||||
|
|
||||||
|
installRequirements
|
||||||
5
backend/python/kitten-tts/requirements.txt
Normal file
5
backend/python/kitten-tts/requirements.txt
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
grpcio==1.71.0
|
||||||
|
protobuf
|
||||||
|
certifi
|
||||||
|
packaging==24.1
|
||||||
|
https://github.com/KittenML/KittenTTS/releases/download/0.1/kittentts-0.1.0-py3-none-any.whl
|
||||||
9
backend/python/kitten-tts/run.sh
Executable file
9
backend/python/kitten-tts/run.sh
Executable file
@@ -0,0 +1,9 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
backend_dir=$(dirname $0)
|
||||||
|
if [ -d $backend_dir/common ]; then
|
||||||
|
source $backend_dir/common/libbackend.sh
|
||||||
|
else
|
||||||
|
source $backend_dir/../common/libbackend.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
startBackend $@
|
||||||
82
backend/python/kitten-tts/test.py
Normal file
82
backend/python/kitten-tts/test.py
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
"""
|
||||||
|
A test script to test the gRPC service
|
||||||
|
"""
|
||||||
|
import unittest
|
||||||
|
import subprocess
|
||||||
|
import time
|
||||||
|
import backend_pb2
|
||||||
|
import backend_pb2_grpc
|
||||||
|
|
||||||
|
import grpc
|
||||||
|
|
||||||
|
|
||||||
|
class TestBackendServicer(unittest.TestCase):
|
||||||
|
"""
|
||||||
|
TestBackendServicer is the class that tests the gRPC service
|
||||||
|
"""
|
||||||
|
def setUp(self):
|
||||||
|
"""
|
||||||
|
This method sets up the gRPC service by starting the server
|
||||||
|
"""
|
||||||
|
self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"])
|
||||||
|
time.sleep(30)
|
||||||
|
|
||||||
|
def tearDown(self) -> None:
|
||||||
|
"""
|
||||||
|
This method tears down the gRPC service by terminating the server
|
||||||
|
"""
|
||||||
|
self.service.terminate()
|
||||||
|
self.service.wait()
|
||||||
|
|
||||||
|
def test_server_startup(self):
|
||||||
|
"""
|
||||||
|
This method tests if the server starts up successfully
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
self.setUp()
|
||||||
|
with grpc.insecure_channel("localhost:50051") as channel:
|
||||||
|
stub = backend_pb2_grpc.BackendStub(channel)
|
||||||
|
response = stub.Health(backend_pb2.HealthMessage())
|
||||||
|
self.assertEqual(response.message, b'OK')
|
||||||
|
except Exception as err:
|
||||||
|
print(err)
|
||||||
|
self.fail("Server failed to start")
|
||||||
|
finally:
|
||||||
|
self.tearDown()
|
||||||
|
|
||||||
|
def test_load_model(self):
|
||||||
|
"""
|
||||||
|
This method tests if the model is loaded successfully
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
self.setUp()
|
||||||
|
with grpc.insecure_channel("localhost:50051") as channel:
|
||||||
|
stub = backend_pb2_grpc.BackendStub(channel)
|
||||||
|
response = stub.LoadModel(backend_pb2.ModelOptions(Model="tts_models/en/vctk/vits"))
|
||||||
|
print(response)
|
||||||
|
self.assertTrue(response.success)
|
||||||
|
self.assertEqual(response.message, "Model loaded successfully")
|
||||||
|
except Exception as err:
|
||||||
|
print(err)
|
||||||
|
self.fail("LoadModel service failed")
|
||||||
|
finally:
|
||||||
|
self.tearDown()
|
||||||
|
|
||||||
|
def test_tts(self):
|
||||||
|
"""
|
||||||
|
This method tests if the embeddings are generated successfully
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
self.setUp()
|
||||||
|
with grpc.insecure_channel("localhost:50051") as channel:
|
||||||
|
stub = backend_pb2_grpc.BackendStub(channel)
|
||||||
|
response = stub.LoadModel(backend_pb2.ModelOptions(Model="tts_models/en/vctk/vits"))
|
||||||
|
self.assertTrue(response.success)
|
||||||
|
tts_request = backend_pb2.TTSRequest(text="80s TV news production music hit for tonight's biggest story")
|
||||||
|
tts_response = stub.TTS(tts_request)
|
||||||
|
self.assertIsNotNone(tts_response)
|
||||||
|
except Exception as err:
|
||||||
|
print(err)
|
||||||
|
self.fail("TTS service failed")
|
||||||
|
finally:
|
||||||
|
self.tearDown()
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user