mirror of
https://github.com/mudler/LocalAI.git
synced 2026-02-08 13:42:00 -05:00
Compare commits
629 Commits
libmtmd
...
test-fix-u
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
30bf8d41d7 | ||
|
|
ebbcba342a | ||
|
|
0de75519dc | ||
|
|
37f5e4f5c1 | ||
|
|
ffa934b959 | ||
|
|
59311d8b1e | ||
|
|
d9e25af7b5 | ||
|
|
e4f8b63b40 | ||
|
|
1364ae9be6 | ||
|
|
cfd6a9150d | ||
|
|
cd352d0c5f | ||
|
|
8d47309695 | ||
|
|
5f6fc02a55 | ||
|
|
0b528458d8 | ||
|
|
caab380c5d | ||
|
|
8a3a362504 | ||
|
|
07238eb743 | ||
|
|
e905e90dd7 | ||
|
|
08432d49e5 | ||
|
|
e51e2aacb9 | ||
|
|
9c3d85fc28 | ||
|
|
007ca647a7 | ||
|
|
59af928379 | ||
|
|
dbc2bb561b | ||
|
|
c72c85dcac | ||
|
|
ef984901e6 | ||
|
|
9911ec84a3 | ||
|
|
1956681d4c | ||
|
|
326f6e5ccb | ||
|
|
302958efd6 | ||
|
|
3dc86b247d | ||
|
|
5ec724af06 | ||
|
|
1f1e156bf0 | ||
|
|
df625e366a | ||
|
|
9e6685ac9c | ||
|
|
90c818aa71 | ||
|
|
034b9b691b | ||
|
|
ba52822e5c | ||
|
|
eb30f6c090 | ||
|
|
caba098959 | ||
|
|
3c75ea1e0e | ||
|
|
c5f911812f | ||
|
|
d82922786a | ||
|
|
d9e9bb4c0e | ||
|
|
657027bec6 | ||
|
|
2f5635308d | ||
|
|
63b5338dbd | ||
|
|
3150174962 | ||
|
|
4330fdce33 | ||
|
|
fef8583144 | ||
|
|
d4d6a56a4f | ||
|
|
2900a601a0 | ||
|
|
43e0437db6 | ||
|
|
976c159fdb | ||
|
|
969922ffec | ||
|
|
739573e41b | ||
|
|
dbdf2908ad | ||
|
|
317f8641dc | ||
|
|
54ff70e451 | ||
|
|
723f01c87e | ||
|
|
79a41a5e07 | ||
|
|
d0b6aa3f7d | ||
|
|
ad99399c6e | ||
|
|
e6ebfd3ba1 | ||
|
|
ead00a28b9 | ||
|
|
9621edb4c5 | ||
|
|
7ce92f0646 | ||
|
|
6a4ab3c1e0 | ||
|
|
83b85494c1 | ||
|
|
df6a80b38d | ||
|
|
21faa4114b | ||
|
|
e35ad56602 | ||
|
|
3be8b2d8e1 | ||
|
|
900745bb4d | ||
|
|
15a7fc7e9a | ||
|
|
03dddec538 | ||
|
|
3d34386712 | ||
|
|
1b3f66018b | ||
|
|
4381e892b8 | ||
|
|
3c3f477854 | ||
|
|
f8a8cf3e95 | ||
|
|
0fc88b3cdf | ||
|
|
4993df81c3 | ||
|
|
599bc88c6c | ||
|
|
1a0d06f3db | ||
|
|
5e1a8b3621 | ||
|
|
960e51e527 | ||
|
|
195aa22e77 | ||
|
|
be132fe816 | ||
|
|
ff5d2dc8be | ||
|
|
c1cfa08226 | ||
|
|
fec8a36b36 | ||
|
|
5d4f5d2355 | ||
|
|
057248008f | ||
|
|
9f2c9cd691 | ||
|
|
6971f71a6c | ||
|
|
1ba66d00f5 | ||
|
|
259383cf5e | ||
|
|
209c0694f5 | ||
|
|
0fd395d6ec | ||
|
|
d04bd47116 | ||
|
|
1d830ce7dd | ||
|
|
6dccfb09f8 | ||
|
|
e4d9cf8349 | ||
|
|
c899e90277 | ||
|
|
8193d18c7c | ||
|
|
2e4dc6456f | ||
|
|
4594430a3e | ||
|
|
9c7f92c81f | ||
|
|
060037bcd4 | ||
|
|
d9da4676b4 | ||
|
|
5ef4c2e471 | ||
|
|
27ce570844 | ||
|
|
42c7859ab1 | ||
|
|
e7e83d0fa6 | ||
|
|
c6dc1d86f1 | ||
|
|
6fd2e1964d | ||
|
|
49ae41b716 | ||
|
|
b3f0ed62fd | ||
|
|
4b9afc418b | ||
|
|
e44ff8514b | ||
|
|
2b6be10b6b | ||
|
|
1361d844a1 | ||
|
|
fcc521cae5 | ||
|
|
8cad7138be | ||
|
|
ebd1db2f09 | ||
|
|
7920d75805 | ||
|
|
1d0e24a865 | ||
|
|
9eed5ef872 | ||
|
|
39ab80442a | ||
|
|
1b101df2c0 | ||
|
|
784bd5db33 | ||
|
|
b8b1ca782c | ||
|
|
1149fb66d3 | ||
|
|
243e86176e | ||
|
|
8da38a0d10 | ||
|
|
60786fc876 | ||
|
|
9486b88a25 | ||
|
|
bef4c10629 | ||
|
|
80f15851c5 | ||
|
|
22067e3384 | ||
|
|
4fbd639463 | ||
|
|
70f7d0c25f | ||
|
|
576e821298 | ||
|
|
7293f26fcf | ||
|
|
79973a28ad | ||
|
|
8ab51509cc | ||
|
|
b3384e5428 | ||
|
|
7050c9f69d | ||
|
|
089efe05fd | ||
|
|
253b7537dc | ||
|
|
19c92c70c5 | ||
|
|
b52bfaf1b3 | ||
|
|
bf60ca5bf0 | ||
|
|
2b44467bd1 | ||
|
|
8c1f4a131e | ||
|
|
10a3f0bd92 | ||
|
|
72f4d541d0 | ||
|
|
9f812fdb84 | ||
|
|
b70ee45fff | ||
|
|
9d9c853541 | ||
|
|
18fcd8557c | ||
|
|
d8e27c38d7 | ||
|
|
3b0dc87932 | ||
|
|
2374485222 | ||
|
|
0ca1765c17 | ||
|
|
90b5ed9a1e | ||
|
|
d438b769da | ||
|
|
2e4bd1e33d | ||
|
|
ff73800970 | ||
|
|
94cb20ae7f | ||
|
|
47c20f9adb | ||
|
|
a7fe153630 | ||
|
|
27519d2233 | ||
|
|
8cab0f880b | ||
|
|
8c48b250c4 | ||
|
|
ba802c2ee4 | ||
|
|
429bb7a88c | ||
|
|
b2e8b6d1aa | ||
|
|
fba5b557a1 | ||
|
|
6db19c5cb9 | ||
|
|
5428678209 | ||
|
|
06129139eb | ||
|
|
05757e2738 | ||
|
|
240b790f29 | ||
|
|
5f221f5946 | ||
|
|
def7cdc0bf | ||
|
|
ea9bf3dba2 | ||
|
|
b8eca530b6 | ||
|
|
47034ddacd | ||
|
|
9a41331855 | ||
|
|
facc0181df | ||
|
|
4733adb983 | ||
|
|
326fda3223 | ||
|
|
abf61e5b42 | ||
|
|
2ae45e7635 | ||
|
|
7d41551e10 | ||
|
|
6fbd720515 | ||
|
|
4e40a8d1ed | ||
|
|
003b9292fe | ||
|
|
09457b9221 | ||
|
|
41aa7e107f | ||
|
|
bda875f962 | ||
|
|
224063f0f7 | ||
|
|
89978c8b57 | ||
|
|
987b5dcac1 | ||
|
|
ec1276e5a9 | ||
|
|
61ba98d43d | ||
|
|
b9a25b16e6 | ||
|
|
6a8149e1fd | ||
|
|
9c2840ac38 | ||
|
|
20a70e1244 | ||
|
|
3295a298f4 | ||
|
|
da6f37f000 | ||
|
|
c092633cd7 | ||
|
|
7e2a522229 | ||
|
|
03e8592450 | ||
|
|
f207bd1427 | ||
|
|
a5c0fe31c3 | ||
|
|
c68907ac65 | ||
|
|
9087ddc4de | ||
|
|
33bebd5114 | ||
|
|
2913676157 | ||
|
|
e83652489c | ||
|
|
d6274eaf4a | ||
|
|
4d90971424 | ||
|
|
90f5639639 | ||
|
|
a35a701052 | ||
|
|
3d8ec72dbf | ||
|
|
2a9d675d62 | ||
|
|
c782e8abf1 | ||
|
|
a1e1942d83 | ||
|
|
787302b204 | ||
|
|
0b085089b9 | ||
|
|
624f3b1fc8 | ||
|
|
c07bc55fee | ||
|
|
173e0774c0 | ||
|
|
8ece26ab7c | ||
|
|
d704cc7970 | ||
|
|
ab17baaae1 | ||
|
|
ca358fcdca | ||
|
|
9aadfd485f | ||
|
|
da3b0850de | ||
|
|
8b1e8b4cda | ||
|
|
3d22bfc27c | ||
|
|
4438b4361e | ||
|
|
04bad9a2da | ||
|
|
8235e53602 | ||
|
|
eb5c3670f1 | ||
|
|
89e61fca90 | ||
|
|
9d6efe8842 | ||
|
|
60726d16f2 | ||
|
|
9d7ec09ec0 | ||
|
|
36179ffbed | ||
|
|
d25145e641 | ||
|
|
949e5b9be8 | ||
|
|
73ecb7f90b | ||
|
|
053bed6e5f | ||
|
|
932360bf7e | ||
|
|
6d0b52843f | ||
|
|
078c22f485 | ||
|
|
6ef3852de5 | ||
|
|
a8057b952c | ||
|
|
fd5c1d916f | ||
|
|
5ce982b9c9 | ||
|
|
47ccfccf7a | ||
|
|
a760f7ff39 | ||
|
|
facf7625f3 | ||
|
|
b3600b3c50 | ||
|
|
f0b47cfe6a | ||
|
|
ee625fc34e | ||
|
|
693aa0b5de | ||
|
|
3973e6e5da | ||
|
|
fb6ec68090 | ||
|
|
0301fc7c46 | ||
|
|
813cb4296d | ||
|
|
deda3a4972 | ||
|
|
a28f27604a | ||
|
|
8fe9fa98f2 | ||
|
|
4db1b80278 | ||
|
|
b3c2a3c257 | ||
|
|
61c2304638 | ||
|
|
92c5ab97e2 | ||
|
|
76e471441c | ||
|
|
9cecf5e7ac | ||
|
|
b7b3164736 | ||
|
|
5f7ece3e94 | ||
|
|
c717b8d800 | ||
|
|
f1d35c4149 | ||
|
|
ee7e77b6c1 | ||
|
|
324fecbb75 | ||
|
|
a79bfcf0a7 | ||
|
|
82495e7fb6 | ||
|
|
6030b12283 | ||
|
|
b5be867e28 | ||
|
|
9b806250d4 | ||
|
|
5f066e702f | ||
|
|
47bb3a3db2 | ||
|
|
51230a801e | ||
|
|
754bedc3ea | ||
|
|
98e5291afc | ||
|
|
e29b2c3aff | ||
|
|
8dc574f3c4 | ||
|
|
05bf2493a5 | ||
|
|
eae4ca08da | ||
|
|
fa284f7445 | ||
|
|
8f69b80520 | ||
|
|
b1fc5acd4a | ||
|
|
fab41c29dd | ||
|
|
fb0ec96396 | ||
|
|
7659461036 | ||
|
|
580687da46 | ||
|
|
1929eb2894 | ||
|
|
b29544d747 | ||
|
|
7c30e82647 | ||
|
|
a1d061c835 | ||
|
|
851c67019c | ||
|
|
53ed5ef189 | ||
|
|
294f7022f3 | ||
|
|
932f6b01a6 | ||
|
|
e96452c5d4 | ||
|
|
5fc8d5bb78 | ||
|
|
121937ed6f | ||
|
|
2e38f2a054 | ||
|
|
2a6187bc01 | ||
|
|
584c48df5a | ||
|
|
8dd67748a1 | ||
|
|
3fd0bf3c88 | ||
|
|
4062a6c404 | ||
|
|
354c0b763e | ||
|
|
40f9065367 | ||
|
|
fc02bc0aba | ||
|
|
45badb75e8 | ||
|
|
d7e1922582 | ||
|
|
642a39afa0 | ||
|
|
34d9deaf39 | ||
|
|
ef37a73e1b | ||
|
|
37de945ae8 | ||
|
|
468f1f4539 | ||
|
|
0640451368 | ||
|
|
99058511cc | ||
|
|
ec293b3b59 | ||
|
|
9b1b6df8e9 | ||
|
|
cd7fbafcd2 | ||
|
|
e5125216cf | ||
|
|
2105f82433 | ||
|
|
49c0c7881a | ||
|
|
f8829376d8 | ||
|
|
0475f63675 | ||
|
|
ec206cc67c | ||
|
|
34171fcf94 | ||
|
|
238c334aa7 | ||
|
|
d2df0a1769 | ||
|
|
d58647ac31 | ||
|
|
c1d3ce9a93 | ||
|
|
c1dd4ff5d5 | ||
|
|
48118b9582 | ||
|
|
ceda2e69db | ||
|
|
cea1703acc | ||
|
|
33fc9b9922 | ||
|
|
b783997c52 | ||
|
|
f6ec06d21c | ||
|
|
7e1f2657d5 | ||
|
|
9589097252 | ||
|
|
cb87d331a9 | ||
|
|
6dfc96249a | ||
|
|
a2564ed654 | ||
|
|
6c747caa34 | ||
|
|
8ae5e0feb9 | ||
|
|
c35dd0a7b8 | ||
|
|
2f5af6b246 | ||
|
|
00cf2e0e0a | ||
|
|
c7a1d9c089 | ||
|
|
ad7ba52166 | ||
|
|
c5b9f45166 | ||
|
|
61b64a65ab | ||
|
|
8276952920 | ||
|
|
b7cd5bfaec | ||
|
|
da4312e4d3 | ||
|
|
7d507c54ed | ||
|
|
df7ed49889 | ||
|
|
bfdc29d316 | ||
|
|
7fdc006071 | ||
|
|
615830245b | ||
|
|
61376c0fa7 | ||
|
|
d0fb23514f | ||
|
|
780d034ac9 | ||
|
|
ec2a044c7e | ||
|
|
ad6fdd21fd | ||
|
|
cd94e6b352 | ||
|
|
b37cef3718 | ||
|
|
9f957d547d | ||
|
|
f0d9f0c5d8 | ||
|
|
d33e1c72a3 | ||
|
|
33f9ee06c9 | ||
|
|
c54677402d | ||
|
|
3fe3a7b23d | ||
|
|
f8ff6fa1fd | ||
|
|
dfadc3696e | ||
|
|
dbcf5fb4fc | ||
|
|
2633137a17 | ||
|
|
d9c17dd23b | ||
|
|
d8b7bd4860 | ||
|
|
a611cbc0f4 | ||
|
|
850b525159 | ||
|
|
35b3426a2a | ||
|
|
cd2b0c0e7c | ||
|
|
73d80c43a8 | ||
|
|
665562b850 | ||
|
|
7a78e4f482 | ||
|
|
6f41a6f934 | ||
|
|
bb54f2da2b | ||
|
|
e1cc7ee107 | ||
|
|
cfc9dfa3d5 | ||
|
|
6a650e68cb | ||
|
|
5e1373877a | ||
|
|
b5b0ab26e7 | ||
|
|
9725bb4bbd | ||
|
|
33b4275bbc | ||
|
|
6644af10c6 | ||
|
|
7c4a2e9b85 | ||
|
|
bcccee3909 | ||
|
|
c6f50ddd0c | ||
|
|
6613373b1b | ||
|
|
1659b3f795 | ||
|
|
30600dd5cb | ||
|
|
179fcf5541 | ||
|
|
9cb75086bb | ||
|
|
594bb462ab | ||
|
|
aa730a7b96 | ||
|
|
0a454c527a | ||
|
|
cf86bcb984 | ||
|
|
a6d9988e84 | ||
|
|
f3a114342e | ||
|
|
0d275ccc03 | ||
|
|
58dba3f01c | ||
|
|
b68d6e8088 | ||
|
|
2352cec7e6 | ||
|
|
de72ae79b5 | ||
|
|
884c07d5f9 | ||
|
|
cca7cbef1e | ||
|
|
32cd0d03d4 | ||
|
|
ee4d9e83d0 | ||
|
|
5547e08a30 | ||
|
|
ca7385c303 | ||
|
|
28759e79d3 | ||
|
|
40249b6b84 | ||
|
|
e09e47bada | ||
|
|
3796558aeb | ||
|
|
cca4f010f8 | ||
|
|
be3ff482d0 | ||
|
|
af255cd0be | ||
|
|
8000228d1b | ||
|
|
79abe0ad77 | ||
|
|
8131d11d1f | ||
|
|
beb01c91f3 | ||
|
|
1ccd64ff6a | ||
|
|
fc7681c68c | ||
|
|
49d026a229 | ||
|
|
f9b968e19d | ||
|
|
022d4a5ecb | ||
|
|
0e917eb01d | ||
|
|
efde0eaf83 | ||
|
|
add8fc35a2 | ||
|
|
9bcf4c56f1 | ||
|
|
3fcfaec7c8 | ||
|
|
a463d40a3e | ||
|
|
1e1f0ee321 | ||
|
|
80b3139fa0 | ||
|
|
5173d37acb | ||
|
|
470e48a900 | ||
|
|
b706dddc93 | ||
|
|
867db3f888 | ||
|
|
b79aa31398 | ||
|
|
fb9a09d49c | ||
|
|
0a78f0ad2d | ||
|
|
d68660bd5a | ||
|
|
30ceee2dec | ||
|
|
18c38335fc | ||
|
|
89040ff6f7 | ||
|
|
de343700fd | ||
|
|
87d18ad951 | ||
|
|
912c8eff04 | ||
|
|
481f30bde8 | ||
|
|
236ac30252 | ||
|
|
6f761e62e4 | ||
|
|
1f29b5f38e | ||
|
|
33d702c5e0 | ||
|
|
95ff236127 | ||
|
|
2d64269763 | ||
|
|
a7a6020328 | ||
|
|
40618164b2 | ||
|
|
eb8c29f90a | ||
|
|
63116a2c6a | ||
|
|
311c2cf539 | ||
|
|
a6fcbd991d | ||
|
|
2e1dc8deef | ||
|
|
282e017b22 | ||
|
|
f86cb8be2d | ||
|
|
5c56ec4f87 | ||
|
|
dd2845a034 | ||
|
|
2e7db014b6 | ||
|
|
6faeee1d92 | ||
|
|
31d73eb934 | ||
|
|
60863b9e52 | ||
|
|
a9fc71e2f3 | ||
|
|
ce9a9a30e0 | ||
|
|
2693a21da5 | ||
|
|
d460eab18e | ||
|
|
c61e5fe266 | ||
|
|
88e570b5de | ||
|
|
6efa97ce0b | ||
|
|
41cde5468a | ||
|
|
d650647db9 | ||
|
|
5bc7ef37a2 | ||
|
|
e0a52807c8 | ||
|
|
1a95a19f87 | ||
|
|
bcfc08e5bf | ||
|
|
4d282ca963 | ||
|
|
525f49b69d | ||
|
|
786aa1de05 | ||
|
|
ea82deb16b | ||
|
|
b0891309ba | ||
|
|
b034cff149 | ||
|
|
432f34f001 | ||
|
|
cbd61dccd4 | ||
|
|
0de0817d71 | ||
|
|
bf57d6e5ac | ||
|
|
0b9603e010 | ||
|
|
8d925217f6 | ||
|
|
669a1ccae6 | ||
|
|
7a7d36ad63 | ||
|
|
8b889955b4 | ||
|
|
a226555949 | ||
|
|
f38f17865a | ||
|
|
03f380701b | ||
|
|
65e2866c97 | ||
|
|
cd3cd899ad | ||
|
|
c2ae3100e7 | ||
|
|
ec0868e691 | ||
|
|
489c289916 | ||
|
|
ac5fb50bcc | ||
|
|
7c9f011d91 | ||
|
|
80f7f17843 | ||
|
|
f0c41d6405 | ||
|
|
8472321a81 | ||
|
|
3bac4724ac | ||
|
|
59db154cbc | ||
|
|
1cc4525f15 | ||
|
|
45c58752e5 | ||
|
|
d5c9c717b5 | ||
|
|
dd7fa6b9f7 | ||
|
|
039c318607 | ||
|
|
0870bf5af6 | ||
|
|
6073b9944e | ||
|
|
ef0e0f3777 | ||
|
|
b7de9e0aa0 | ||
|
|
39292407a1 | ||
|
|
f257bf8d14 | ||
|
|
8ca2fb5ef1 | ||
|
|
3a790fed13 | ||
|
|
a334f28a07 | ||
|
|
dc6663d121 | ||
|
|
103caf9823 | ||
|
|
4226d2d837 | ||
|
|
7434256fc9 | ||
|
|
86a0563ae1 | ||
|
|
c68951cbfe | ||
|
|
8408084120 | ||
|
|
0f2f4c7e23 | ||
|
|
5ffad3b004 | ||
|
|
e5ccd97b8c | ||
|
|
a3b08d46ec | ||
|
|
090f5065fc | ||
|
|
88de2ea01a | ||
|
|
9650d490d4 | ||
|
|
4de1c83764 | ||
|
|
e5978dc714 | ||
|
|
f784986e19 | ||
|
|
bf6426aef2 | ||
|
|
4a91950848 | ||
|
|
4614ea1685 | ||
|
|
f0bf59d1d9 | ||
|
|
83dd678959 | ||
|
|
9d6c9f874a | ||
|
|
c62f2bb336 | ||
|
|
38aeca6f9c | ||
|
|
3b0cf52f6a | ||
|
|
bac3022044 | ||
|
|
cd41701524 | ||
|
|
6a382a1afe | ||
|
|
8dcab2f9c7 | ||
|
|
1d1d5627f0 | ||
|
|
233b3369ad | ||
|
|
c587ac0aef | ||
|
|
38c5d16b57 | ||
|
|
ef6fc052eb | ||
|
|
7ff35c08ac | ||
|
|
43f75ee7f3 | ||
|
|
82811a9630 | ||
|
|
04a3d8e5ac | ||
|
|
9af09b3f8c | ||
|
|
0d590a4044 | ||
|
|
e0a54de4f5 | ||
|
|
6bc2ae5467 | ||
|
|
8caaf49f5d | ||
|
|
1db51044bb | ||
|
|
ec21b58008 | ||
|
|
996259b529 | ||
|
|
f2942cc0e1 | ||
|
|
f8fbfd4fa3 | ||
|
|
41e239c67e | ||
|
|
587827e779 | ||
|
|
456b4982ef | ||
|
|
159388cce8 | ||
|
|
cfc73c7773 | ||
|
|
6d5bde860b | ||
|
|
6ef383033b | ||
|
|
cd494089d9 | ||
|
|
3033845f94 | ||
|
|
0f365ac204 | ||
|
|
525cf198be | ||
|
|
658c2a4f55 | ||
|
|
c987de090d | ||
|
|
04365843e6 | ||
|
|
1dc5781679 | ||
|
|
30704292de | ||
|
|
e52c66c76e | ||
|
|
cb28aef93b |
@@ -2,9 +2,6 @@
|
|||||||
|
|
||||||
cd /workspace
|
cd /workspace
|
||||||
|
|
||||||
# Grab the pre-stashed backend assets to avoid build issues
|
|
||||||
cp -r /build/backend-assets /workspace/backend-assets
|
|
||||||
|
|
||||||
# Ensures generated source files are present upon load
|
# Ensures generated source files are present upon load
|
||||||
make prepare
|
make prepare
|
||||||
|
|
||||||
|
|||||||
@@ -4,10 +4,6 @@ services:
|
|||||||
context: ..
|
context: ..
|
||||||
dockerfile: Dockerfile
|
dockerfile: Dockerfile
|
||||||
target: devcontainer
|
target: devcontainer
|
||||||
args:
|
|
||||||
- FFMPEG=true
|
|
||||||
- IMAGE_TYPE=extras
|
|
||||||
- GO_TAGS=p2p tts
|
|
||||||
env_file:
|
env_file:
|
||||||
- ../.env
|
- ../.env
|
||||||
ports:
|
ports:
|
||||||
|
|||||||
@@ -3,7 +3,13 @@
|
|||||||
.vscode
|
.vscode
|
||||||
.devcontainer
|
.devcontainer
|
||||||
models
|
models
|
||||||
|
backends
|
||||||
examples/chatbot-ui/models
|
examples/chatbot-ui/models
|
||||||
|
backend/go/image/stablediffusion-ggml/build/
|
||||||
|
backend/go/*/build
|
||||||
|
backend/go/*/.cache
|
||||||
|
backend/go/*/sources
|
||||||
|
backend/go/*/package
|
||||||
examples/rwkv/models
|
examples/rwkv/models
|
||||||
examples/**/models
|
examples/**/models
|
||||||
Dockerfile*
|
Dockerfile*
|
||||||
|
|||||||
7
.env
7
.env
@@ -41,13 +41,6 @@
|
|||||||
## Uncomment and set to true to enable rebuilding from source
|
## Uncomment and set to true to enable rebuilding from source
|
||||||
# REBUILD=true
|
# REBUILD=true
|
||||||
|
|
||||||
## Enable go tags, available: p2p, tts
|
|
||||||
## p2p: enable distributed inferencing
|
|
||||||
## tts: enables text-to-speech with go-piper
|
|
||||||
## (requires REBUILD=true)
|
|
||||||
#
|
|
||||||
# GO_TAGS=p2p
|
|
||||||
|
|
||||||
## Path where to store generated images
|
## Path where to store generated images
|
||||||
# LOCALAI_IMAGE_PATH=/tmp/generated/images
|
# LOCALAI_IMAGE_PATH=/tmp/generated/images
|
||||||
|
|
||||||
|
|||||||
9
.github/bump_deps.sh
vendored
9
.github/bump_deps.sh
vendored
@@ -3,15 +3,20 @@ set -xe
|
|||||||
REPO=$1
|
REPO=$1
|
||||||
BRANCH=$2
|
BRANCH=$2
|
||||||
VAR=$3
|
VAR=$3
|
||||||
|
FILE=$4
|
||||||
|
|
||||||
|
if [ -z "$FILE" ]; then
|
||||||
|
FILE="Makefile"
|
||||||
|
fi
|
||||||
|
|
||||||
LAST_COMMIT=$(curl -s -H "Accept: application/vnd.github.VERSION.sha" "https://api.github.com/repos/$REPO/commits/$BRANCH")
|
LAST_COMMIT=$(curl -s -H "Accept: application/vnd.github.VERSION.sha" "https://api.github.com/repos/$REPO/commits/$BRANCH")
|
||||||
|
|
||||||
# Read $VAR from Makefile (only first match)
|
# Read $VAR from Makefile (only first match)
|
||||||
set +e
|
set +e
|
||||||
CURRENT_COMMIT="$(grep -m1 "^$VAR?=" Makefile | cut -d'=' -f2)"
|
CURRENT_COMMIT="$(grep -m1 "^$VAR?=" $FILE | cut -d'=' -f2)"
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
sed -i Makefile -e "s/$VAR?=.*/$VAR?=$LAST_COMMIT/"
|
sed -i $FILE -e "s/$VAR?=.*/$VAR?=$LAST_COMMIT/"
|
||||||
|
|
||||||
if [ -z "$CURRENT_COMMIT" ]; then
|
if [ -z "$CURRENT_COMMIT" ]; then
|
||||||
echo "Could not find $VAR in Makefile."
|
echo "Could not find $VAR in Makefile."
|
||||||
|
|||||||
4
.github/dependabot.yml
vendored
4
.github/dependabot.yml
vendored
@@ -61,10 +61,6 @@ updates:
|
|||||||
directory: "/backend/python/openvoice"
|
directory: "/backend/python/openvoice"
|
||||||
schedule:
|
schedule:
|
||||||
interval: "weekly"
|
interval: "weekly"
|
||||||
- package-ecosystem: "pip"
|
|
||||||
directory: "/backend/python/parler-tts"
|
|
||||||
schedule:
|
|
||||||
interval: "weekly"
|
|
||||||
- package-ecosystem: "pip"
|
- package-ecosystem: "pip"
|
||||||
directory: "/backend/python/rerankers"
|
directory: "/backend/python/rerankers"
|
||||||
schedule:
|
schedule:
|
||||||
|
|||||||
1186
.github/workflows/backend.yml
vendored
Normal file
1186
.github/workflows/backend.yml
vendored
Normal file
File diff suppressed because it is too large
Load Diff
243
.github/workflows/backend_build.yml
vendored
Normal file
243
.github/workflows/backend_build.yml
vendored
Normal file
@@ -0,0 +1,243 @@
|
|||||||
|
---
|
||||||
|
name: 'build python backend container images (reusable)'
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
inputs:
|
||||||
|
base-image:
|
||||||
|
description: 'Base image'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
build-type:
|
||||||
|
description: 'Build type'
|
||||||
|
default: ''
|
||||||
|
type: string
|
||||||
|
cuda-major-version:
|
||||||
|
description: 'CUDA major version'
|
||||||
|
default: "12"
|
||||||
|
type: string
|
||||||
|
cuda-minor-version:
|
||||||
|
description: 'CUDA minor version'
|
||||||
|
default: "1"
|
||||||
|
type: string
|
||||||
|
platforms:
|
||||||
|
description: 'Platforms'
|
||||||
|
default: ''
|
||||||
|
type: string
|
||||||
|
tag-latest:
|
||||||
|
description: 'Tag latest'
|
||||||
|
default: ''
|
||||||
|
type: string
|
||||||
|
tag-suffix:
|
||||||
|
description: 'Tag suffix'
|
||||||
|
default: ''
|
||||||
|
type: string
|
||||||
|
runs-on:
|
||||||
|
description: 'Runs on'
|
||||||
|
required: true
|
||||||
|
default: ''
|
||||||
|
type: string
|
||||||
|
backend:
|
||||||
|
description: 'Backend to build'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
context:
|
||||||
|
description: 'Build context'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
dockerfile:
|
||||||
|
description: 'Build Dockerfile'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
skip-drivers:
|
||||||
|
description: 'Skip drivers'
|
||||||
|
default: 'false'
|
||||||
|
type: string
|
||||||
|
secrets:
|
||||||
|
dockerUsername:
|
||||||
|
required: false
|
||||||
|
dockerPassword:
|
||||||
|
required: false
|
||||||
|
quayUsername:
|
||||||
|
required: true
|
||||||
|
quayPassword:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
backend-build:
|
||||||
|
runs-on: ${{ inputs.runs-on }}
|
||||||
|
env:
|
||||||
|
quay_username: ${{ secrets.quayUsername }}
|
||||||
|
steps:
|
||||||
|
|
||||||
|
|
||||||
|
- name: Free Disk Space (Ubuntu)
|
||||||
|
if: inputs.runs-on == 'ubuntu-latest'
|
||||||
|
uses: jlumbroso/free-disk-space@main
|
||||||
|
with:
|
||||||
|
# this might remove tools that are actually needed,
|
||||||
|
# if set to "true" but frees about 6 GB
|
||||||
|
tool-cache: true
|
||||||
|
# all of these default to true, but feel free to set to
|
||||||
|
# "false" if necessary for your workflow
|
||||||
|
android: true
|
||||||
|
dotnet: true
|
||||||
|
haskell: true
|
||||||
|
large-packages: true
|
||||||
|
docker-images: true
|
||||||
|
swap-storage: true
|
||||||
|
|
||||||
|
- name: Force Install GIT latest
|
||||||
|
run: |
|
||||||
|
sudo apt-get update \
|
||||||
|
&& sudo apt-get install -y software-properties-common \
|
||||||
|
&& sudo apt-get update \
|
||||||
|
&& sudo add-apt-repository -y ppa:git-core/ppa \
|
||||||
|
&& sudo apt-get update \
|
||||||
|
&& sudo apt-get install -y git
|
||||||
|
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v5
|
||||||
|
|
||||||
|
- name: Release space from worker
|
||||||
|
if: inputs.runs-on == 'ubuntu-latest'
|
||||||
|
run: |
|
||||||
|
echo "Listing top largest packages"
|
||||||
|
pkgs=$(dpkg-query -Wf '${Installed-Size}\t${Package}\t${Status}\n' | awk '$NF == "installed"{print $1 "\t" $2}' | sort -nr)
|
||||||
|
head -n 30 <<< "${pkgs}"
|
||||||
|
echo
|
||||||
|
df -h
|
||||||
|
echo
|
||||||
|
sudo apt-get remove -y '^llvm-.*|^libllvm.*' || true
|
||||||
|
sudo apt-get remove --auto-remove android-sdk-platform-tools snapd || true
|
||||||
|
sudo apt-get purge --auto-remove android-sdk-platform-tools snapd || true
|
||||||
|
sudo rm -rf /usr/local/lib/android
|
||||||
|
sudo apt-get remove -y '^dotnet-.*|^aspnetcore-.*' || true
|
||||||
|
sudo rm -rf /usr/share/dotnet
|
||||||
|
sudo apt-get remove -y '^mono-.*' || true
|
||||||
|
sudo apt-get remove -y '^ghc-.*' || true
|
||||||
|
sudo apt-get remove -y '.*jdk.*|.*jre.*' || true
|
||||||
|
sudo apt-get remove -y 'php.*' || true
|
||||||
|
sudo apt-get remove -y hhvm powershell firefox monodoc-manual msbuild || true
|
||||||
|
sudo apt-get remove -y '^google-.*' || true
|
||||||
|
sudo apt-get remove -y azure-cli || true
|
||||||
|
sudo apt-get remove -y '^mongo.*-.*|^postgresql-.*|^mysql-.*|^mssql-.*' || true
|
||||||
|
sudo apt-get remove -y '^gfortran-.*' || true
|
||||||
|
sudo apt-get remove -y microsoft-edge-stable || true
|
||||||
|
sudo apt-get remove -y firefox || true
|
||||||
|
sudo apt-get remove -y powershell || true
|
||||||
|
sudo apt-get remove -y r-base-core || true
|
||||||
|
sudo apt-get autoremove -y
|
||||||
|
sudo apt-get clean
|
||||||
|
echo
|
||||||
|
echo "Listing top largest packages"
|
||||||
|
pkgs=$(dpkg-query -Wf '${Installed-Size}\t${Package}\t${Status}\n' | awk '$NF == "installed"{print $1 "\t" $2}' | sort -nr)
|
||||||
|
head -n 30 <<< "${pkgs}"
|
||||||
|
echo
|
||||||
|
sudo rm -rfv build || true
|
||||||
|
sudo rm -rf /usr/share/dotnet || true
|
||||||
|
sudo rm -rf /opt/ghc || true
|
||||||
|
sudo rm -rf "/usr/local/share/boost" || true
|
||||||
|
sudo rm -rf "$AGENT_TOOLSDIRECTORY" || true
|
||||||
|
df -h
|
||||||
|
|
||||||
|
- name: Docker meta
|
||||||
|
id: meta
|
||||||
|
if: github.event_name != 'pull_request'
|
||||||
|
uses: docker/metadata-action@v5
|
||||||
|
with:
|
||||||
|
images: |
|
||||||
|
quay.io/go-skynet/local-ai-backends
|
||||||
|
localai/localai-backends
|
||||||
|
tags: |
|
||||||
|
type=ref,event=branch
|
||||||
|
type=semver,pattern={{raw}}
|
||||||
|
type=sha
|
||||||
|
flavor: |
|
||||||
|
latest=${{ inputs.tag-latest }}
|
||||||
|
suffix=${{ inputs.tag-suffix }},onlatest=true
|
||||||
|
|
||||||
|
- name: Docker meta for PR
|
||||||
|
id: meta_pull_request
|
||||||
|
if: github.event_name == 'pull_request'
|
||||||
|
uses: docker/metadata-action@v5
|
||||||
|
with:
|
||||||
|
images: |
|
||||||
|
quay.io/go-skynet/ci-tests
|
||||||
|
tags: |
|
||||||
|
type=ref,event=branch,suffix=${{ github.event.number }}-${{ inputs.backend }}-${{ inputs.build-type }}-${{ inputs.cuda-major-version }}-${{ inputs.cuda-minor-version }}
|
||||||
|
type=semver,pattern={{raw}},suffix=${{ github.event.number }}-${{ inputs.backend }}-${{ inputs.build-type }}-${{ inputs.cuda-major-version }}-${{ inputs.cuda-minor-version }}
|
||||||
|
type=sha,suffix=${{ github.event.number }}-${{ inputs.backend }}-${{ inputs.build-type }}-${{ inputs.cuda-major-version }}-${{ inputs.cuda-minor-version }}
|
||||||
|
flavor: |
|
||||||
|
latest=${{ inputs.tag-latest }}
|
||||||
|
suffix=${{ inputs.tag-suffix }},onlatest=true
|
||||||
|
## End testing image
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@master
|
||||||
|
with:
|
||||||
|
platforms: all
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
id: buildx
|
||||||
|
uses: docker/setup-buildx-action@master
|
||||||
|
|
||||||
|
- name: Login to DockerHub
|
||||||
|
if: github.event_name != 'pull_request'
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.dockerUsername }}
|
||||||
|
password: ${{ secrets.dockerPassword }}
|
||||||
|
|
||||||
|
- name: Login to Quay.io
|
||||||
|
if: ${{ env.quay_username != '' }}
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: quay.io
|
||||||
|
username: ${{ secrets.quayUsername }}
|
||||||
|
password: ${{ secrets.quayPassword }}
|
||||||
|
|
||||||
|
- name: Build and push
|
||||||
|
uses: docker/build-push-action@v6
|
||||||
|
if: github.event_name != 'pull_request'
|
||||||
|
with:
|
||||||
|
builder: ${{ steps.buildx.outputs.name }}
|
||||||
|
build-args: |
|
||||||
|
BUILD_TYPE=${{ inputs.build-type }}
|
||||||
|
SKIP_DRIVERS=${{ inputs.skip-drivers }}
|
||||||
|
CUDA_MAJOR_VERSION=${{ inputs.cuda-major-version }}
|
||||||
|
CUDA_MINOR_VERSION=${{ inputs.cuda-minor-version }}
|
||||||
|
BASE_IMAGE=${{ inputs.base-image }}
|
||||||
|
BACKEND=${{ inputs.backend }}
|
||||||
|
context: ${{ inputs.context }}
|
||||||
|
file: ${{ inputs.dockerfile }}
|
||||||
|
cache-from: type=gha
|
||||||
|
platforms: ${{ inputs.platforms }}
|
||||||
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
|
|
||||||
|
- name: Build and push (PR)
|
||||||
|
uses: docker/build-push-action@v6
|
||||||
|
if: github.event_name == 'pull_request'
|
||||||
|
with:
|
||||||
|
builder: ${{ steps.buildx.outputs.name }}
|
||||||
|
build-args: |
|
||||||
|
BUILD_TYPE=${{ inputs.build-type }}
|
||||||
|
SKIP_DRIVERS=${{ inputs.skip-drivers }}
|
||||||
|
CUDA_MAJOR_VERSION=${{ inputs.cuda-major-version }}
|
||||||
|
CUDA_MINOR_VERSION=${{ inputs.cuda-minor-version }}
|
||||||
|
BASE_IMAGE=${{ inputs.base-image }}
|
||||||
|
BACKEND=${{ inputs.backend }}
|
||||||
|
context: ${{ inputs.context }}
|
||||||
|
file: ${{ inputs.dockerfile }}
|
||||||
|
cache-from: type=gha
|
||||||
|
platforms: ${{ inputs.platforms }}
|
||||||
|
push: ${{ env.quay_username != '' }}
|
||||||
|
tags: ${{ steps.meta_pull_request.outputs.tags }}
|
||||||
|
labels: ${{ steps.meta_pull_request.outputs.labels }}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- name: job summary
|
||||||
|
run: |
|
||||||
|
echo "Built image: ${{ steps.meta.outputs.labels }}" >> $GITHUB_STEP_SUMMARY
|
||||||
144
.github/workflows/backend_build_darwin.yml
vendored
Normal file
144
.github/workflows/backend_build_darwin.yml
vendored
Normal file
@@ -0,0 +1,144 @@
|
|||||||
|
---
|
||||||
|
name: 'build darwin python backend container images (reusable)'
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
inputs:
|
||||||
|
backend:
|
||||||
|
description: 'Backend to build'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
build-type:
|
||||||
|
description: 'Build type (e.g., mps)'
|
||||||
|
default: ''
|
||||||
|
type: string
|
||||||
|
use-pip:
|
||||||
|
description: 'Use pip to install dependencies'
|
||||||
|
default: false
|
||||||
|
type: boolean
|
||||||
|
lang:
|
||||||
|
description: 'Programming language (e.g. go)'
|
||||||
|
default: 'python'
|
||||||
|
type: string
|
||||||
|
go-version:
|
||||||
|
description: 'Go version to use'
|
||||||
|
default: '1.24.x'
|
||||||
|
type: string
|
||||||
|
tag-suffix:
|
||||||
|
description: 'Tag suffix for the built image'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
runs-on:
|
||||||
|
description: 'Runner to use'
|
||||||
|
default: 'macOS-14'
|
||||||
|
type: string
|
||||||
|
secrets:
|
||||||
|
dockerUsername:
|
||||||
|
required: false
|
||||||
|
dockerPassword:
|
||||||
|
required: false
|
||||||
|
quayUsername:
|
||||||
|
required: true
|
||||||
|
quayPassword:
|
||||||
|
required: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
darwin-backend-build:
|
||||||
|
runs-on: ${{ inputs.runs-on }}
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
go-version: ['${{ inputs.go-version }}']
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
uses: actions/checkout@v5
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
|
||||||
|
- name: Setup Go ${{ matrix.go-version }}
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: ${{ matrix.go-version }}
|
||||||
|
cache: false
|
||||||
|
|
||||||
|
# You can test your matrix by printing the current Go version
|
||||||
|
- name: Display Go version
|
||||||
|
run: go version
|
||||||
|
|
||||||
|
- name: Dependencies
|
||||||
|
run: |
|
||||||
|
brew install protobuf grpc make protoc-gen-go protoc-gen-go-grpc libomp llvm
|
||||||
|
|
||||||
|
- name: Build ${{ inputs.backend }}-darwin
|
||||||
|
run: |
|
||||||
|
make protogen-go
|
||||||
|
BACKEND=${{ inputs.backend }} BUILD_TYPE=${{ inputs.build-type }} USE_PIP=${{ inputs.use-pip }} make build-darwin-${{ inputs.lang }}-backend
|
||||||
|
|
||||||
|
- name: Upload ${{ inputs.backend }}.tar
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: ${{ inputs.backend }}-tar
|
||||||
|
path: backend-images/${{ inputs.backend }}.tar
|
||||||
|
|
||||||
|
darwin-backend-publish:
|
||||||
|
needs: darwin-backend-build
|
||||||
|
if: github.event_name != 'pull_request'
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Download ${{ inputs.backend }}.tar
|
||||||
|
uses: actions/download-artifact@v5
|
||||||
|
with:
|
||||||
|
name: ${{ inputs.backend }}-tar
|
||||||
|
path: .
|
||||||
|
|
||||||
|
- name: Install crane
|
||||||
|
run: |
|
||||||
|
curl -L https://github.com/google/go-containerregistry/releases/latest/download/go-containerregistry_Linux_x86_64.tar.gz | tar -xz
|
||||||
|
sudo mv crane /usr/local/bin/
|
||||||
|
|
||||||
|
- name: Log in to DockerHub
|
||||||
|
run: |
|
||||||
|
echo "${{ secrets.dockerPassword }}" | crane auth login docker.io -u "${{ secrets.dockerUsername }}" --password-stdin
|
||||||
|
|
||||||
|
- name: Log in to quay.io
|
||||||
|
run: |
|
||||||
|
echo "${{ secrets.quayPassword }}" | crane auth login quay.io -u "${{ secrets.quayUsername }}" --password-stdin
|
||||||
|
|
||||||
|
- name: Docker meta
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v5
|
||||||
|
with:
|
||||||
|
images: |
|
||||||
|
localai/localai-backends
|
||||||
|
tags: |
|
||||||
|
type=ref,event=branch
|
||||||
|
type=semver,pattern={{raw}}
|
||||||
|
type=sha
|
||||||
|
flavor: |
|
||||||
|
latest=auto
|
||||||
|
suffix=${{ inputs.tag-suffix }},onlatest=true
|
||||||
|
|
||||||
|
- name: Docker meta
|
||||||
|
id: quaymeta
|
||||||
|
uses: docker/metadata-action@v5
|
||||||
|
with:
|
||||||
|
images: |
|
||||||
|
quay.io/go-skynet/local-ai-backends
|
||||||
|
tags: |
|
||||||
|
type=ref,event=branch
|
||||||
|
type=semver,pattern={{raw}}
|
||||||
|
type=sha
|
||||||
|
flavor: |
|
||||||
|
latest=auto
|
||||||
|
suffix=${{ inputs.tag-suffix }},onlatest=true
|
||||||
|
|
||||||
|
- name: Push Docker image (DockerHub)
|
||||||
|
run: |
|
||||||
|
for tag in $(echo "${{ steps.meta.outputs.tags }}" | tr ',' '\n'); do
|
||||||
|
crane push ${{ inputs.backend }}.tar $tag
|
||||||
|
done
|
||||||
|
|
||||||
|
- name: Push Docker image (Quay)
|
||||||
|
run: |
|
||||||
|
for tag in $(echo "${{ steps.quaymeta.outputs.tags }}" | tr ',' '\n'); do
|
||||||
|
crane push ${{ inputs.backend }}.tar $tag
|
||||||
|
done
|
||||||
78
.github/workflows/backend_pr.yml
vendored
Normal file
78
.github/workflows/backend_pr.yml
vendored
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
name: 'build backend container images (PR-filtered)'
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ci-backends-pr-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
generate-matrix:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||||
|
matrix-darwin: ${{ steps.set-matrix.outputs.matrix-darwin }}
|
||||||
|
has-backends: ${{ steps.set-matrix.outputs.has-backends }}
|
||||||
|
has-backends-darwin: ${{ steps.set-matrix.outputs.has-backends-darwin }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v5
|
||||||
|
|
||||||
|
- name: Setup Bun
|
||||||
|
uses: oven-sh/setup-bun@v2
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
bun add js-yaml
|
||||||
|
bun add @octokit/core
|
||||||
|
|
||||||
|
# filters the matrix in backend.yml
|
||||||
|
- name: Filter matrix for changed backends
|
||||||
|
id: set-matrix
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
GITHUB_EVENT_PATH: ${{ github.event_path }}
|
||||||
|
run: bun run scripts/changed-backends.js
|
||||||
|
|
||||||
|
backend-jobs:
|
||||||
|
needs: generate-matrix
|
||||||
|
uses: ./.github/workflows/backend_build.yml
|
||||||
|
if: needs.generate-matrix.outputs.has-backends == 'true'
|
||||||
|
with:
|
||||||
|
tag-latest: ${{ matrix.tag-latest }}
|
||||||
|
tag-suffix: ${{ matrix.tag-suffix }}
|
||||||
|
build-type: ${{ matrix.build-type }}
|
||||||
|
cuda-major-version: ${{ matrix.cuda-major-version }}
|
||||||
|
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
||||||
|
platforms: ${{ matrix.platforms }}
|
||||||
|
runs-on: ${{ matrix.runs-on }}
|
||||||
|
base-image: ${{ matrix.base-image }}
|
||||||
|
backend: ${{ matrix.backend }}
|
||||||
|
dockerfile: ${{ matrix.dockerfile }}
|
||||||
|
skip-drivers: ${{ matrix.skip-drivers }}
|
||||||
|
context: ${{ matrix.context }}
|
||||||
|
secrets:
|
||||||
|
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||||
|
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: true
|
||||||
|
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
|
||||||
|
backend-jobs-darwin:
|
||||||
|
needs: generate-matrix
|
||||||
|
uses: ./.github/workflows/backend_build_darwin.yml
|
||||||
|
if: needs.generate-matrix.outputs.has-backends-darwin == 'true'
|
||||||
|
with:
|
||||||
|
backend: ${{ matrix.backend }}
|
||||||
|
build-type: ${{ matrix.build-type }}
|
||||||
|
go-version: "1.24.x"
|
||||||
|
tag-suffix: ${{ matrix.tag-suffix }}
|
||||||
|
lang: ${{ matrix.lang || 'python' }}
|
||||||
|
use-pip: ${{ matrix.backend == 'diffusers' }}
|
||||||
|
runs-on: "macOS-14"
|
||||||
|
secrets:
|
||||||
|
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||||
|
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: true
|
||||||
|
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix-darwin) }}
|
||||||
67
.github/workflows/build-test.yaml
vendored
Normal file
67
.github/workflows/build-test.yaml
vendored
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
name: Build test
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
pull_request:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v5
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: 1.23
|
||||||
|
- name: Run GoReleaser
|
||||||
|
run: |
|
||||||
|
make dev-dist
|
||||||
|
launcher-build-darwin:
|
||||||
|
runs-on: macos-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v5
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: 1.23
|
||||||
|
- name: Build launcher for macOS ARM64
|
||||||
|
run: |
|
||||||
|
make build-launcher-darwin
|
||||||
|
ls -liah dist
|
||||||
|
- name: Upload macOS launcher artifacts
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: launcher-macos
|
||||||
|
path: dist/
|
||||||
|
retention-days: 30
|
||||||
|
|
||||||
|
launcher-build-linux:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v5
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: 1.23
|
||||||
|
- name: Build launcher for Linux
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install golang gcc libgl1-mesa-dev xorg-dev libxkbcommon-dev
|
||||||
|
make build-launcher-linux
|
||||||
|
- name: Upload Linux launcher artifacts
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: launcher-linux
|
||||||
|
path: local-ai-launcher-linux.tar.xz
|
||||||
|
retention-days: 30
|
||||||
14
.github/workflows/bump_deps.yaml
vendored
14
.github/workflows/bump_deps.yaml
vendored
@@ -10,30 +10,32 @@ jobs:
|
|||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- repository: "ggml-org/llama.cpp"
|
- repository: "ggml-org/llama.cpp"
|
||||||
variable: "CPPLLAMA_VERSION"
|
variable: "LLAMA_VERSION"
|
||||||
branch: "master"
|
branch: "master"
|
||||||
|
file: "backend/cpp/llama-cpp/Makefile"
|
||||||
- repository: "ggml-org/whisper.cpp"
|
- repository: "ggml-org/whisper.cpp"
|
||||||
variable: "WHISPER_CPP_VERSION"
|
variable: "WHISPER_CPP_VERSION"
|
||||||
branch: "master"
|
branch: "master"
|
||||||
|
file: "backend/go/whisper/Makefile"
|
||||||
- repository: "PABannier/bark.cpp"
|
- repository: "PABannier/bark.cpp"
|
||||||
variable: "BARKCPP_VERSION"
|
variable: "BARKCPP_VERSION"
|
||||||
branch: "main"
|
branch: "main"
|
||||||
|
file: "Makefile"
|
||||||
- repository: "leejet/stable-diffusion.cpp"
|
- repository: "leejet/stable-diffusion.cpp"
|
||||||
variable: "STABLEDIFFUSION_GGML_VERSION"
|
variable: "STABLEDIFFUSION_GGML_VERSION"
|
||||||
branch: "master"
|
branch: "master"
|
||||||
- repository: "mudler/go-stable-diffusion"
|
file: "backend/go/stablediffusion-ggml/Makefile"
|
||||||
variable: "STABLEDIFFUSION_VERSION"
|
|
||||||
branch: "master"
|
|
||||||
- repository: "mudler/go-piper"
|
- repository: "mudler/go-piper"
|
||||||
variable: "PIPER_VERSION"
|
variable: "PIPER_VERSION"
|
||||||
branch: "master"
|
branch: "master"
|
||||||
|
file: "backend/go/piper/Makefile"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v5
|
||||||
- name: Bump dependencies 🔧
|
- name: Bump dependencies 🔧
|
||||||
id: bump
|
id: bump
|
||||||
run: |
|
run: |
|
||||||
bash .github/bump_deps.sh ${{ matrix.repository }} ${{ matrix.branch }} ${{ matrix.variable }}
|
bash .github/bump_deps.sh ${{ matrix.repository }} ${{ matrix.branch }} ${{ matrix.variable }} ${{ matrix.file }}
|
||||||
{
|
{
|
||||||
echo 'message<<EOF'
|
echo 'message<<EOF'
|
||||||
cat "${{ matrix.variable }}_message.txt"
|
cat "${{ matrix.variable }}_message.txt"
|
||||||
|
|||||||
2
.github/workflows/bump_docs.yaml
vendored
2
.github/workflows/bump_docs.yaml
vendored
@@ -12,7 +12,7 @@ jobs:
|
|||||||
- repository: "mudler/LocalAI"
|
- repository: "mudler/LocalAI"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v5
|
||||||
- name: Bump dependencies 🔧
|
- name: Bump dependencies 🔧
|
||||||
run: |
|
run: |
|
||||||
bash .github/bump_docs.sh ${{ matrix.repository }}
|
bash .github/bump_docs.sh ${{ matrix.repository }}
|
||||||
|
|||||||
5
.github/workflows/checksum_checker.yaml
vendored
5
.github/workflows/checksum_checker.yaml
vendored
@@ -5,7 +5,7 @@ on:
|
|||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
jobs:
|
jobs:
|
||||||
checksum_check:
|
checksum_check:
|
||||||
runs-on: arc-runner-set
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Force Install GIT latest
|
- name: Force Install GIT latest
|
||||||
run: |
|
run: |
|
||||||
@@ -15,12 +15,11 @@ jobs:
|
|||||||
&& sudo add-apt-repository -y ppa:git-core/ppa \
|
&& sudo add-apt-repository -y ppa:git-core/ppa \
|
||||||
&& sudo apt-get update \
|
&& sudo apt-get update \
|
||||||
&& sudo apt-get install -y git
|
&& sudo apt-get install -y git
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v5
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install -y pip wget
|
sudo apt-get install -y pip wget
|
||||||
sudo pip install --upgrade pip
|
|
||||||
pip install huggingface_hub
|
pip install huggingface_hub
|
||||||
- name: 'Setup yq'
|
- name: 'Setup yq'
|
||||||
uses: dcarbone/install-yq-action@v1.3.1
|
uses: dcarbone/install-yq-action@v1.3.1
|
||||||
|
|||||||
2
.github/workflows/dependabot_auto.yml
vendored
2
.github/workflows/dependabot_auto.yml
vendored
@@ -20,7 +20,7 @@ jobs:
|
|||||||
skip-commit-verification: true
|
skip-commit-verification: true
|
||||||
|
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
|
|
||||||
- name: Approve a PR if not already approved
|
- name: Approve a PR if not already approved
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
4
.github/workflows/deploy-explorer.yaml
vendored
4
.github/workflows/deploy-explorer.yaml
vendored
@@ -15,7 +15,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
@@ -31,7 +31,7 @@ jobs:
|
|||||||
make protogen-go
|
make protogen-go
|
||||||
- name: Build api
|
- name: Build api
|
||||||
run: |
|
run: |
|
||||||
CGO_ENABLED=0 make build-api
|
CGO_ENABLED=0 make build
|
||||||
- name: rm
|
- name: rm
|
||||||
uses: appleboy/ssh-action@v1.2.2
|
uses: appleboy/ssh-action@v1.2.2
|
||||||
with:
|
with:
|
||||||
|
|||||||
4
.github/workflows/generate_grpc_cache.yaml
vendored
4
.github/workflows/generate_grpc_cache.yaml
vendored
@@ -17,7 +17,7 @@ jobs:
|
|||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- grpc-base-image: ubuntu:22.04
|
- grpc-base-image: ubuntu:22.04
|
||||||
runs-on: 'arc-runner-set'
|
runs-on: 'ubuntu-latest'
|
||||||
platforms: 'linux/amd64,linux/arm64'
|
platforms: 'linux/amd64,linux/arm64'
|
||||||
runs-on: ${{matrix.runs-on}}
|
runs-on: ${{matrix.runs-on}}
|
||||||
steps:
|
steps:
|
||||||
@@ -73,7 +73,7 @@ jobs:
|
|||||||
uses: docker/setup-buildx-action@master
|
uses: docker/setup-buildx-action@master
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
|
|
||||||
- name: Cache GRPC
|
- name: Cache GRPC
|
||||||
uses: docker/build-push-action@v6
|
uses: docker/build-push-action@v6
|
||||||
|
|||||||
4
.github/workflows/generate_intel_image.yaml
vendored
4
.github/workflows/generate_intel_image.yaml
vendored
@@ -15,7 +15,7 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- base-image: intel/oneapi-basekit:2025.1.0-0-devel-ubuntu22.04
|
- base-image: intel/oneapi-basekit:2025.2.0-0-devel-ubuntu22.04
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'ubuntu-latest'
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
runs-on: ${{matrix.runs-on}}
|
runs-on: ${{matrix.runs-on}}
|
||||||
@@ -43,7 +43,7 @@ jobs:
|
|||||||
uses: docker/setup-buildx-action@master
|
uses: docker/setup-buildx-action@master
|
||||||
|
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
|
|
||||||
- name: Cache Intel images
|
- name: Cache Intel images
|
||||||
uses: docker/build-push-action@v6
|
uses: docker/build-push-action@v6
|
||||||
|
|||||||
102
.github/workflows/image-pr.yml
vendored
102
.github/workflows/image-pr.yml
vendored
@@ -9,13 +9,11 @@ concurrency:
|
|||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
extras-image-build:
|
image-build:
|
||||||
uses: ./.github/workflows/image_build.yml
|
uses: ./.github/workflows/image_build.yml
|
||||||
with:
|
with:
|
||||||
tag-latest: ${{ matrix.tag-latest }}
|
tag-latest: ${{ matrix.tag-latest }}
|
||||||
tag-suffix: ${{ matrix.tag-suffix }}
|
tag-suffix: ${{ matrix.tag-suffix }}
|
||||||
ffmpeg: ${{ matrix.ffmpeg }}
|
|
||||||
image-type: ${{ matrix.image-type }}
|
|
||||||
build-type: ${{ matrix.build-type }}
|
build-type: ${{ matrix.build-type }}
|
||||||
cuda-major-version: ${{ matrix.cuda-major-version }}
|
cuda-major-version: ${{ matrix.cuda-major-version }}
|
||||||
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
||||||
@@ -36,115 +34,35 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
# This is basically covered by the AIO test
|
|
||||||
# - build-type: ''
|
|
||||||
# platforms: 'linux/amd64'
|
|
||||||
# tag-latest: 'false'
|
|
||||||
# tag-suffix: '-ffmpeg'
|
|
||||||
# ffmpeg: 'true'
|
|
||||||
# image-type: 'extras'
|
|
||||||
# runs-on: 'arc-runner-set'
|
|
||||||
# base-image: "ubuntu:22.04"
|
|
||||||
# makeflags: "--jobs=3 --output-sync=target"
|
|
||||||
- build-type: 'cublas'
|
- build-type: 'cublas'
|
||||||
cuda-major-version: "12"
|
cuda-major-version: "12"
|
||||||
cuda-minor-version: "0"
|
cuda-minor-version: "8"
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'false'
|
tag-latest: 'false'
|
||||||
tag-suffix: '-cublas-cuda12-ffmpeg'
|
tag-suffix: '-gpu-nvidia-cuda-12'
|
||||||
ffmpeg: 'true'
|
runs-on: 'ubuntu-latest'
|
||||||
image-type: 'extras'
|
|
||||||
runs-on: 'arc-runner-set'
|
|
||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
makeflags: "--jobs=3 --output-sync=target"
|
makeflags: "--jobs=3 --output-sync=target"
|
||||||
- build-type: 'hipblas'
|
- build-type: 'hipblas'
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'false'
|
tag-latest: 'false'
|
||||||
tag-suffix: '-hipblas'
|
tag-suffix: '-hipblas'
|
||||||
ffmpeg: 'false'
|
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||||
image-type: 'extras'
|
|
||||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
|
||||||
grpc-base-image: "ubuntu:22.04"
|
grpc-base-image: "ubuntu:22.04"
|
||||||
runs-on: 'arc-runner-set'
|
runs-on: 'ubuntu-latest'
|
||||||
makeflags: "--jobs=3 --output-sync=target"
|
makeflags: "--jobs=3 --output-sync=target"
|
||||||
- build-type: 'sycl_f16'
|
- build-type: 'sycl'
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'false'
|
tag-latest: 'false'
|
||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||||
grpc-base-image: "ubuntu:22.04"
|
grpc-base-image: "ubuntu:22.04"
|
||||||
tag-suffix: 'sycl-f16-ffmpeg'
|
tag-suffix: 'sycl'
|
||||||
ffmpeg: 'true'
|
runs-on: 'ubuntu-latest'
|
||||||
image-type: 'extras'
|
|
||||||
runs-on: 'arc-runner-set'
|
|
||||||
makeflags: "--jobs=3 --output-sync=target"
|
makeflags: "--jobs=3 --output-sync=target"
|
||||||
- build-type: 'vulkan'
|
- build-type: 'vulkan'
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'false'
|
tag-latest: 'false'
|
||||||
tag-suffix: '-vulkan-ffmpeg-core'
|
tag-suffix: '-vulkan-core'
|
||||||
ffmpeg: 'true'
|
|
||||||
image-type: 'core'
|
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'ubuntu-latest'
|
||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
makeflags: "--jobs=4 --output-sync=target"
|
makeflags: "--jobs=4 --output-sync=target"
|
||||||
# core-image-build:
|
|
||||||
# uses: ./.github/workflows/image_build.yml
|
|
||||||
# with:
|
|
||||||
# tag-latest: ${{ matrix.tag-latest }}
|
|
||||||
# tag-suffix: ${{ matrix.tag-suffix }}
|
|
||||||
# ffmpeg: ${{ matrix.ffmpeg }}
|
|
||||||
# image-type: ${{ matrix.image-type }}
|
|
||||||
# build-type: ${{ matrix.build-type }}
|
|
||||||
# cuda-major-version: ${{ matrix.cuda-major-version }}
|
|
||||||
# cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
|
||||||
# platforms: ${{ matrix.platforms }}
|
|
||||||
# runs-on: ${{ matrix.runs-on }}
|
|
||||||
# base-image: ${{ matrix.base-image }}
|
|
||||||
# grpc-base-image: ${{ matrix.grpc-base-image }}
|
|
||||||
# makeflags: ${{ matrix.makeflags }}
|
|
||||||
# secrets:
|
|
||||||
# dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
||||||
# dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
|
||||||
# quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
|
||||||
# quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
|
||||||
# strategy:
|
|
||||||
# matrix:
|
|
||||||
# include:
|
|
||||||
# - build-type: ''
|
|
||||||
# platforms: 'linux/amd64'
|
|
||||||
# tag-latest: 'false'
|
|
||||||
# tag-suffix: '-ffmpeg-core'
|
|
||||||
# ffmpeg: 'true'
|
|
||||||
# image-type: 'core'
|
|
||||||
# runs-on: 'ubuntu-latest'
|
|
||||||
# base-image: "ubuntu:22.04"
|
|
||||||
# makeflags: "--jobs=4 --output-sync=target"
|
|
||||||
# - build-type: 'sycl_f16'
|
|
||||||
# platforms: 'linux/amd64'
|
|
||||||
# tag-latest: 'false'
|
|
||||||
# base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
|
||||||
# grpc-base-image: "ubuntu:22.04"
|
|
||||||
# tag-suffix: 'sycl-f16-ffmpeg-core'
|
|
||||||
# ffmpeg: 'true'
|
|
||||||
# image-type: 'core'
|
|
||||||
# runs-on: 'arc-runner-set'
|
|
||||||
# makeflags: "--jobs=3 --output-sync=target"
|
|
||||||
# - build-type: 'cublas'
|
|
||||||
# cuda-major-version: "12"
|
|
||||||
# cuda-minor-version: "0"
|
|
||||||
# platforms: 'linux/amd64'
|
|
||||||
# tag-latest: 'false'
|
|
||||||
# tag-suffix: '-cublas-cuda12-ffmpeg-core'
|
|
||||||
# ffmpeg: 'true'
|
|
||||||
# image-type: 'core'
|
|
||||||
# runs-on: 'ubuntu-latest'
|
|
||||||
# base-image: "ubuntu:22.04"
|
|
||||||
# makeflags: "--jobs=4 --output-sync=target"
|
|
||||||
# - build-type: 'vulkan'
|
|
||||||
# platforms: 'linux/amd64'
|
|
||||||
# tag-latest: 'false'
|
|
||||||
# tag-suffix: '-vulkan-ffmpeg-core'
|
|
||||||
# ffmpeg: 'true'
|
|
||||||
# image-type: 'core'
|
|
||||||
# runs-on: 'ubuntu-latest'
|
|
||||||
# base-image: "ubuntu:22.04"
|
|
||||||
# makeflags: "--jobs=4 --output-sync=target"
|
|
||||||
|
|||||||
203
.github/workflows/image.yml
vendored
203
.github/workflows/image.yml
vendored
@@ -18,8 +18,6 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
tag-latest: ${{ matrix.tag-latest }}
|
tag-latest: ${{ matrix.tag-latest }}
|
||||||
tag-suffix: ${{ matrix.tag-suffix }}
|
tag-suffix: ${{ matrix.tag-suffix }}
|
||||||
ffmpeg: ${{ matrix.ffmpeg }}
|
|
||||||
image-type: ${{ matrix.image-type }}
|
|
||||||
build-type: ${{ matrix.build-type }}
|
build-type: ${{ matrix.build-type }}
|
||||||
cuda-major-version: ${{ matrix.cuda-major-version }}
|
cuda-major-version: ${{ matrix.cuda-major-version }}
|
||||||
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
||||||
@@ -29,157 +27,29 @@ jobs:
|
|||||||
grpc-base-image: ${{ matrix.grpc-base-image }}
|
grpc-base-image: ${{ matrix.grpc-base-image }}
|
||||||
aio: ${{ matrix.aio }}
|
aio: ${{ matrix.aio }}
|
||||||
makeflags: ${{ matrix.makeflags }}
|
makeflags: ${{ matrix.makeflags }}
|
||||||
latest-image: ${{ matrix.latest-image }}
|
|
||||||
latest-image-aio: ${{ matrix.latest-image-aio }}
|
|
||||||
secrets:
|
secrets:
|
||||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||||
strategy:
|
strategy:
|
||||||
# Pushing with all jobs in parallel
|
|
||||||
# eats the bandwidth of all the nodes
|
|
||||||
max-parallel: 2
|
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- build-type: 'hipblas'
|
- build-type: 'hipblas'
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-hipblas-extras'
|
tag-suffix: '-gpu-hipblas'
|
||||||
ffmpeg: 'true'
|
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||||
image-type: 'extras'
|
grpc-base-image: "ubuntu:22.04"
|
||||||
|
runs-on: 'ubuntu-latest'
|
||||||
|
makeflags: "--jobs=3 --output-sync=target"
|
||||||
aio: "-aio-gpu-hipblas"
|
aio: "-aio-gpu-hipblas"
|
||||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
|
||||||
grpc-base-image: "ubuntu:22.04"
|
|
||||||
latest-image: 'latest-gpu-hipblas-extras'
|
|
||||||
latest-image-aio: 'latest-aio-gpu-hipblas'
|
|
||||||
runs-on: 'arc-runner-set'
|
|
||||||
makeflags: "--jobs=3 --output-sync=target"
|
|
||||||
- build-type: 'hipblas'
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'false'
|
|
||||||
tag-suffix: '-hipblas'
|
|
||||||
ffmpeg: 'true'
|
|
||||||
image-type: 'core'
|
|
||||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
|
||||||
grpc-base-image: "ubuntu:22.04"
|
|
||||||
runs-on: 'arc-runner-set'
|
|
||||||
makeflags: "--jobs=3 --output-sync=target"
|
|
||||||
latest-image: 'latest-gpu-hipblas'
|
|
||||||
self-hosted-jobs:
|
|
||||||
uses: ./.github/workflows/image_build.yml
|
|
||||||
with:
|
|
||||||
tag-latest: ${{ matrix.tag-latest }}
|
|
||||||
tag-suffix: ${{ matrix.tag-suffix }}
|
|
||||||
ffmpeg: ${{ matrix.ffmpeg }}
|
|
||||||
image-type: ${{ matrix.image-type }}
|
|
||||||
build-type: ${{ matrix.build-type }}
|
|
||||||
cuda-major-version: ${{ matrix.cuda-major-version }}
|
|
||||||
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
|
||||||
platforms: ${{ matrix.platforms }}
|
|
||||||
runs-on: ${{ matrix.runs-on }}
|
|
||||||
base-image: ${{ matrix.base-image }}
|
|
||||||
grpc-base-image: ${{ matrix.grpc-base-image }}
|
|
||||||
aio: ${{ matrix.aio }}
|
|
||||||
makeflags: ${{ matrix.makeflags }}
|
|
||||||
latest-image: ${{ matrix.latest-image }}
|
|
||||||
latest-image-aio: ${{ matrix.latest-image-aio }}
|
|
||||||
secrets:
|
|
||||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
||||||
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
|
||||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
|
||||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
|
||||||
strategy:
|
|
||||||
# Pushing with all jobs in parallel
|
|
||||||
# eats the bandwidth of all the nodes
|
|
||||||
max-parallel: ${{ github.event_name != 'pull_request' && 5 || 8 }}
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- build-type: 'cublas'
|
|
||||||
cuda-major-version: "11"
|
|
||||||
cuda-minor-version: "7"
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'false'
|
|
||||||
tag-suffix: '-cublas-cuda11-extras'
|
|
||||||
ffmpeg: 'true'
|
|
||||||
image-type: 'extras'
|
|
||||||
runs-on: 'arc-runner-set'
|
|
||||||
base-image: "ubuntu:22.04"
|
|
||||||
aio: "-aio-gpu-nvidia-cuda-11"
|
|
||||||
latest-image: 'latest-gpu-nvidia-cuda-11-extras'
|
|
||||||
latest-image-aio: 'latest-aio-gpu-nvidia-cuda-11'
|
|
||||||
makeflags: "--jobs=3 --output-sync=target"
|
|
||||||
- build-type: 'cublas'
|
|
||||||
cuda-major-version: "12"
|
|
||||||
cuda-minor-version: "0"
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'false'
|
|
||||||
tag-suffix: '-cublas-cuda12-extras'
|
|
||||||
ffmpeg: 'true'
|
|
||||||
image-type: 'extras'
|
|
||||||
runs-on: 'arc-runner-set'
|
|
||||||
base-image: "ubuntu:22.04"
|
|
||||||
aio: "-aio-gpu-nvidia-cuda-12"
|
|
||||||
latest-image: 'latest-gpu-nvidia-cuda-12-extras'
|
|
||||||
latest-image-aio: 'latest-aio-gpu-nvidia-cuda-12'
|
|
||||||
makeflags: "--jobs=3 --output-sync=target"
|
|
||||||
- build-type: 'sycl_f16'
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'false'
|
|
||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
|
||||||
grpc-base-image: "ubuntu:22.04"
|
|
||||||
tag-suffix: '-sycl-f16-extras'
|
|
||||||
ffmpeg: 'true'
|
|
||||||
image-type: 'extras'
|
|
||||||
runs-on: 'arc-runner-set'
|
|
||||||
aio: "-aio-gpu-intel-f16"
|
|
||||||
latest-image: 'latest-gpu-intel-f16-extras'
|
|
||||||
latest-image-aio: 'latest-aio-gpu-intel-f16'
|
|
||||||
makeflags: "--jobs=3 --output-sync=target"
|
|
||||||
- build-type: 'sycl_f32'
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'false'
|
|
||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
|
||||||
grpc-base-image: "ubuntu:22.04"
|
|
||||||
tag-suffix: '-sycl-f32-extras'
|
|
||||||
ffmpeg: 'true'
|
|
||||||
image-type: 'extras'
|
|
||||||
runs-on: 'arc-runner-set'
|
|
||||||
aio: "-aio-gpu-intel-f32"
|
|
||||||
latest-image: 'latest-gpu-intel-f32-extras'
|
|
||||||
latest-image-aio: 'latest-aio-gpu-intel-f32'
|
|
||||||
makeflags: "--jobs=3 --output-sync=target"
|
|
||||||
# Core images
|
|
||||||
- build-type: 'sycl_f16'
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'false'
|
|
||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
|
||||||
grpc-base-image: "ubuntu:22.04"
|
|
||||||
tag-suffix: '-sycl-f16'
|
|
||||||
ffmpeg: 'true'
|
|
||||||
image-type: 'core'
|
|
||||||
runs-on: 'arc-runner-set'
|
|
||||||
makeflags: "--jobs=3 --output-sync=target"
|
|
||||||
latest-image: 'latest-gpu-intel-f16'
|
|
||||||
- build-type: 'sycl_f32'
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'false'
|
|
||||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
|
||||||
grpc-base-image: "ubuntu:22.04"
|
|
||||||
tag-suffix: '-sycl-f32'
|
|
||||||
ffmpeg: 'true'
|
|
||||||
image-type: 'core'
|
|
||||||
runs-on: 'arc-runner-set'
|
|
||||||
makeflags: "--jobs=3 --output-sync=target"
|
|
||||||
latest-image: 'latest-gpu-intel-f32'
|
|
||||||
|
|
||||||
core-image-build:
|
core-image-build:
|
||||||
uses: ./.github/workflows/image_build.yml
|
uses: ./.github/workflows/image_build.yml
|
||||||
with:
|
with:
|
||||||
tag-latest: ${{ matrix.tag-latest }}
|
tag-latest: ${{ matrix.tag-latest }}
|
||||||
tag-suffix: ${{ matrix.tag-suffix }}
|
tag-suffix: ${{ matrix.tag-suffix }}
|
||||||
ffmpeg: ${{ matrix.ffmpeg }}
|
|
||||||
image-type: ${{ matrix.image-type }}
|
|
||||||
build-type: ${{ matrix.build-type }}
|
build-type: ${{ matrix.build-type }}
|
||||||
cuda-major-version: ${{ matrix.cuda-major-version }}
|
cuda-major-version: ${{ matrix.cuda-major-version }}
|
||||||
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
||||||
@@ -189,8 +59,6 @@ jobs:
|
|||||||
base-image: ${{ matrix.base-image }}
|
base-image: ${{ matrix.base-image }}
|
||||||
grpc-base-image: ${{ matrix.grpc-base-image }}
|
grpc-base-image: ${{ matrix.grpc-base-image }}
|
||||||
makeflags: ${{ matrix.makeflags }}
|
makeflags: ${{ matrix.makeflags }}
|
||||||
latest-image: ${{ matrix.latest-image }}
|
|
||||||
latest-image-aio: ${{ matrix.latest-image-aio }}
|
|
||||||
skip-drivers: ${{ matrix.skip-drivers }}
|
skip-drivers: ${{ matrix.skip-drivers }}
|
||||||
secrets:
|
secrets:
|
||||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
@@ -198,66 +66,64 @@ jobs:
|
|||||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||||
strategy:
|
strategy:
|
||||||
max-parallel: ${{ github.event_name != 'pull_request' && 2 || 4 }}
|
#max-parallel: ${{ github.event_name != 'pull_request' && 2 || 4 }}
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- build-type: ''
|
- build-type: ''
|
||||||
platforms: 'linux/amd64,linux/arm64'
|
platforms: 'linux/amd64,linux/arm64'
|
||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
tag-suffix: ''
|
tag-suffix: ''
|
||||||
ffmpeg: 'true'
|
|
||||||
image-type: 'core'
|
|
||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
runs-on: 'arc-runner-set'
|
runs-on: 'ubuntu-latest'
|
||||||
aio: "-aio-cpu"
|
aio: "-aio-cpu"
|
||||||
latest-image: 'latest-cpu'
|
|
||||||
latest-image-aio: 'latest-aio-cpu'
|
|
||||||
makeflags: "--jobs=4 --output-sync=target"
|
makeflags: "--jobs=4 --output-sync=target"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
- build-type: 'cublas'
|
- build-type: 'cublas'
|
||||||
cuda-major-version: "11"
|
cuda-major-version: "11"
|
||||||
cuda-minor-version: "7"
|
cuda-minor-version: "7"
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'false'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-cublas-cuda11'
|
tag-suffix: '-gpu-nvidia-cuda-11'
|
||||||
ffmpeg: 'true'
|
runs-on: 'ubuntu-latest'
|
||||||
image-type: 'core'
|
|
||||||
runs-on: 'arc-runner-set'
|
|
||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
makeflags: "--jobs=4 --output-sync=target"
|
makeflags: "--jobs=4 --output-sync=target"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
latest-image: 'latest-gpu-nvidia-cuda-12'
|
aio: "-aio-gpu-nvidia-cuda-11"
|
||||||
- build-type: 'cublas'
|
- build-type: 'cublas'
|
||||||
cuda-major-version: "12"
|
cuda-major-version: "12"
|
||||||
cuda-minor-version: "0"
|
cuda-minor-version: "8"
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'false'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-cublas-cuda12'
|
tag-suffix: '-gpu-nvidia-cuda-12'
|
||||||
ffmpeg: 'true'
|
runs-on: 'ubuntu-latest'
|
||||||
image-type: 'core'
|
|
||||||
runs-on: 'arc-runner-set'
|
|
||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
makeflags: "--jobs=4 --output-sync=target"
|
makeflags: "--jobs=4 --output-sync=target"
|
||||||
latest-image: 'latest-gpu-nvidia-cuda-12'
|
aio: "-aio-gpu-nvidia-cuda-12"
|
||||||
- build-type: 'vulkan'
|
- build-type: 'vulkan'
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'false'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-vulkan'
|
tag-suffix: '-gpu-vulkan'
|
||||||
ffmpeg: 'true'
|
runs-on: 'ubuntu-latest'
|
||||||
image-type: 'core'
|
|
||||||
runs-on: 'arc-runner-set'
|
|
||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
skip-drivers: 'false'
|
skip-drivers: 'false'
|
||||||
makeflags: "--jobs=4 --output-sync=target"
|
makeflags: "--jobs=4 --output-sync=target"
|
||||||
latest-image: 'latest-gpu-vulkan'
|
aio: "-aio-gpu-vulkan"
|
||||||
|
- build-type: 'intel'
|
||||||
|
platforms: 'linux/amd64'
|
||||||
|
tag-latest: 'auto'
|
||||||
|
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||||
|
grpc-base-image: "ubuntu:22.04"
|
||||||
|
tag-suffix: '-gpu-intel'
|
||||||
|
runs-on: 'ubuntu-latest'
|
||||||
|
makeflags: "--jobs=3 --output-sync=target"
|
||||||
|
aio: "-aio-gpu-intel"
|
||||||
|
|
||||||
gh-runner:
|
gh-runner:
|
||||||
uses: ./.github/workflows/image_build.yml
|
uses: ./.github/workflows/image_build.yml
|
||||||
with:
|
with:
|
||||||
tag-latest: ${{ matrix.tag-latest }}
|
tag-latest: ${{ matrix.tag-latest }}
|
||||||
tag-suffix: ${{ matrix.tag-suffix }}
|
tag-suffix: ${{ matrix.tag-suffix }}
|
||||||
ffmpeg: ${{ matrix.ffmpeg }}
|
|
||||||
image-type: ${{ matrix.image-type }}
|
|
||||||
build-type: ${{ matrix.build-type }}
|
build-type: ${{ matrix.build-type }}
|
||||||
cuda-major-version: ${{ matrix.cuda-major-version }}
|
cuda-major-version: ${{ matrix.cuda-major-version }}
|
||||||
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
||||||
@@ -267,8 +133,6 @@ jobs:
|
|||||||
base-image: ${{ matrix.base-image }}
|
base-image: ${{ matrix.base-image }}
|
||||||
grpc-base-image: ${{ matrix.grpc-base-image }}
|
grpc-base-image: ${{ matrix.grpc-base-image }}
|
||||||
makeflags: ${{ matrix.makeflags }}
|
makeflags: ${{ matrix.makeflags }}
|
||||||
latest-image: ${{ matrix.latest-image }}
|
|
||||||
latest-image-aio: ${{ matrix.latest-image-aio }}
|
|
||||||
skip-drivers: ${{ matrix.skip-drivers }}
|
skip-drivers: ${{ matrix.skip-drivers }}
|
||||||
secrets:
|
secrets:
|
||||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
@@ -280,13 +144,10 @@ jobs:
|
|||||||
include:
|
include:
|
||||||
- build-type: 'cublas'
|
- build-type: 'cublas'
|
||||||
cuda-major-version: "12"
|
cuda-major-version: "12"
|
||||||
cuda-minor-version: "0"
|
cuda-minor-version: "8"
|
||||||
platforms: 'linux/arm64'
|
platforms: 'linux/arm64'
|
||||||
tag-latest: 'false'
|
tag-latest: 'auto'
|
||||||
tag-suffix: '-nvidia-l4t-arm64'
|
tag-suffix: '-nvidia-l4t-arm64'
|
||||||
latest-image: 'latest-nvidia-l4t-arm64'
|
|
||||||
ffmpeg: 'true'
|
|
||||||
image-type: 'core'
|
|
||||||
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
||||||
runs-on: 'ubuntu-24.04-arm'
|
runs-on: 'ubuntu-24.04-arm'
|
||||||
makeflags: "--jobs=4 --output-sync=target"
|
makeflags: "--jobs=4 --output-sync=target"
|
||||||
|
|||||||
89
.github/workflows/image_build.yml
vendored
89
.github/workflows/image_build.yml
vendored
@@ -33,30 +33,14 @@ on:
|
|||||||
description: 'Tag latest'
|
description: 'Tag latest'
|
||||||
default: ''
|
default: ''
|
||||||
type: string
|
type: string
|
||||||
latest-image:
|
|
||||||
description: 'Tag latest'
|
|
||||||
default: ''
|
|
||||||
type: string
|
|
||||||
latest-image-aio:
|
|
||||||
description: 'Tag latest'
|
|
||||||
default: ''
|
|
||||||
type: string
|
|
||||||
tag-suffix:
|
tag-suffix:
|
||||||
description: 'Tag suffix'
|
description: 'Tag suffix'
|
||||||
default: ''
|
default: ''
|
||||||
type: string
|
type: string
|
||||||
ffmpeg:
|
|
||||||
description: 'FFMPEG'
|
|
||||||
default: ''
|
|
||||||
type: string
|
|
||||||
skip-drivers:
|
skip-drivers:
|
||||||
description: 'Skip drivers by default'
|
description: 'Skip drivers by default'
|
||||||
default: 'false'
|
default: 'false'
|
||||||
type: string
|
type: string
|
||||||
image-type:
|
|
||||||
description: 'Image type'
|
|
||||||
default: ''
|
|
||||||
type: string
|
|
||||||
runs-on:
|
runs-on:
|
||||||
description: 'Runs on'
|
description: 'Runs on'
|
||||||
required: true
|
required: true
|
||||||
@@ -85,6 +69,22 @@ jobs:
|
|||||||
reusable_image-build:
|
reusable_image-build:
|
||||||
runs-on: ${{ inputs.runs-on }}
|
runs-on: ${{ inputs.runs-on }}
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
|
- name: Free Disk Space (Ubuntu)
|
||||||
|
if: inputs.runs-on == 'ubuntu-latest'
|
||||||
|
uses: jlumbroso/free-disk-space@main
|
||||||
|
with:
|
||||||
|
# this might remove tools that are actually needed,
|
||||||
|
# if set to "true" but frees about 6 GB
|
||||||
|
tool-cache: true
|
||||||
|
# all of these default to true, but feel free to set to
|
||||||
|
# "false" if necessary for your workflow
|
||||||
|
android: true
|
||||||
|
dotnet: true
|
||||||
|
haskell: true
|
||||||
|
large-packages: true
|
||||||
|
docker-images: true
|
||||||
|
swap-storage: true
|
||||||
- name: Force Install GIT latest
|
- name: Force Install GIT latest
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update \
|
sudo apt-get update \
|
||||||
@@ -94,7 +94,7 @@ jobs:
|
|||||||
&& sudo apt-get update \
|
&& sudo apt-get update \
|
||||||
&& sudo apt-get install -y git
|
&& sudo apt-get install -y git
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
|
|
||||||
- name: Release space from worker
|
- name: Release space from worker
|
||||||
if: inputs.runs-on == 'ubuntu-latest'
|
if: inputs.runs-on == 'ubuntu-latest'
|
||||||
@@ -106,8 +106,8 @@ jobs:
|
|||||||
df -h
|
df -h
|
||||||
echo
|
echo
|
||||||
sudo apt-get remove -y '^llvm-.*|^libllvm.*' || true
|
sudo apt-get remove -y '^llvm-.*|^libllvm.*' || true
|
||||||
sudo apt-get remove --auto-remove android-sdk-platform-tools || true
|
sudo apt-get remove --auto-remove android-sdk-platform-tools snapd || true
|
||||||
sudo apt-get purge --auto-remove android-sdk-platform-tools || true
|
sudo apt-get purge --auto-remove android-sdk-platform-tools snapd || true
|
||||||
sudo rm -rf /usr/local/lib/android
|
sudo rm -rf /usr/local/lib/android
|
||||||
sudo apt-get remove -y '^dotnet-.*|^aspnetcore-.*' || true
|
sudo apt-get remove -y '^dotnet-.*|^aspnetcore-.*' || true
|
||||||
sudo rm -rf /usr/share/dotnet
|
sudo rm -rf /usr/share/dotnet
|
||||||
@@ -152,18 +152,18 @@ jobs:
|
|||||||
type=sha
|
type=sha
|
||||||
flavor: |
|
flavor: |
|
||||||
latest=${{ inputs.tag-latest }}
|
latest=${{ inputs.tag-latest }}
|
||||||
suffix=${{ inputs.tag-suffix }}
|
suffix=${{ inputs.tag-suffix }},onlatest=true
|
||||||
- name: Docker meta for PR
|
- name: Docker meta for PR
|
||||||
id: meta_pull_request
|
id: meta_pull_request
|
||||||
if: github.event_name == 'pull_request'
|
if: github.event_name == 'pull_request'
|
||||||
uses: docker/metadata-action@v5
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
images: |
|
images: |
|
||||||
ttl.sh/localai-ci-pr-${{ github.event.number }}
|
quay.io/go-skynet/ci-tests
|
||||||
tags: |
|
tags: |
|
||||||
type=ref,event=branch
|
type=ref,event=branch,suffix=localai${{ github.event.number }}-${{ inputs.build-type }}-${{ inputs.cuda-major-version }}-${{ inputs.cuda-minor-version }}
|
||||||
type=semver,pattern={{raw}}
|
type=semver,pattern={{raw}},suffix=localai${{ github.event.number }}-${{ inputs.build-type }}-${{ inputs.cuda-major-version }}-${{ inputs.cuda-minor-version }}
|
||||||
type=sha
|
type=sha,suffix=localai${{ github.event.number }}-${{ inputs.build-type }}-${{ inputs.cuda-major-version }}-${{ inputs.cuda-minor-version }}
|
||||||
flavor: |
|
flavor: |
|
||||||
latest=${{ inputs.tag-latest }}
|
latest=${{ inputs.tag-latest }}
|
||||||
suffix=${{ inputs.tag-suffix }}
|
suffix=${{ inputs.tag-suffix }}
|
||||||
@@ -179,7 +179,7 @@ jobs:
|
|||||||
type=semver,pattern={{raw}}
|
type=semver,pattern={{raw}}
|
||||||
flavor: |
|
flavor: |
|
||||||
latest=${{ inputs.tag-latest }}
|
latest=${{ inputs.tag-latest }}
|
||||||
suffix=${{ inputs.aio }}
|
suffix=${{ inputs.aio }},onlatest=true
|
||||||
|
|
||||||
- name: Docker meta AIO (dockerhub)
|
- name: Docker meta AIO (dockerhub)
|
||||||
if: inputs.aio != ''
|
if: inputs.aio != ''
|
||||||
@@ -192,7 +192,8 @@ jobs:
|
|||||||
type=ref,event=branch
|
type=ref,event=branch
|
||||||
type=semver,pattern={{raw}}
|
type=semver,pattern={{raw}}
|
||||||
flavor: |
|
flavor: |
|
||||||
suffix=${{ inputs.aio }}
|
latest=${{ inputs.tag-latest }}
|
||||||
|
suffix=${{ inputs.aio }},onlatest=true
|
||||||
|
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@master
|
uses: docker/setup-qemu-action@master
|
||||||
@@ -231,8 +232,6 @@ jobs:
|
|||||||
BUILD_TYPE=${{ inputs.build-type }}
|
BUILD_TYPE=${{ inputs.build-type }}
|
||||||
CUDA_MAJOR_VERSION=${{ inputs.cuda-major-version }}
|
CUDA_MAJOR_VERSION=${{ inputs.cuda-major-version }}
|
||||||
CUDA_MINOR_VERSION=${{ inputs.cuda-minor-version }}
|
CUDA_MINOR_VERSION=${{ inputs.cuda-minor-version }}
|
||||||
FFMPEG=${{ inputs.ffmpeg }}
|
|
||||||
IMAGE_TYPE=${{ inputs.image-type }}
|
|
||||||
BASE_IMAGE=${{ inputs.base-image }}
|
BASE_IMAGE=${{ inputs.base-image }}
|
||||||
GRPC_BASE_IMAGE=${{ inputs.grpc-base-image || inputs.base-image }}
|
GRPC_BASE_IMAGE=${{ inputs.grpc-base-image || inputs.base-image }}
|
||||||
GRPC_MAKEFLAGS=--jobs=4 --output-sync=target
|
GRPC_MAKEFLAGS=--jobs=4 --output-sync=target
|
||||||
@@ -260,8 +259,6 @@ jobs:
|
|||||||
BUILD_TYPE=${{ inputs.build-type }}
|
BUILD_TYPE=${{ inputs.build-type }}
|
||||||
CUDA_MAJOR_VERSION=${{ inputs.cuda-major-version }}
|
CUDA_MAJOR_VERSION=${{ inputs.cuda-major-version }}
|
||||||
CUDA_MINOR_VERSION=${{ inputs.cuda-minor-version }}
|
CUDA_MINOR_VERSION=${{ inputs.cuda-minor-version }}
|
||||||
FFMPEG=${{ inputs.ffmpeg }}
|
|
||||||
IMAGE_TYPE=${{ inputs.image-type }}
|
|
||||||
BASE_IMAGE=${{ inputs.base-image }}
|
BASE_IMAGE=${{ inputs.base-image }}
|
||||||
GRPC_BASE_IMAGE=${{ inputs.grpc-base-image || inputs.base-image }}
|
GRPC_BASE_IMAGE=${{ inputs.grpc-base-image || inputs.base-image }}
|
||||||
GRPC_MAKEFLAGS=--jobs=4 --output-sync=target
|
GRPC_MAKEFLAGS=--jobs=4 --output-sync=target
|
||||||
@@ -272,13 +269,9 @@ jobs:
|
|||||||
file: ./Dockerfile
|
file: ./Dockerfile
|
||||||
cache-from: type=gha
|
cache-from: type=gha
|
||||||
platforms: ${{ inputs.platforms }}
|
platforms: ${{ inputs.platforms }}
|
||||||
push: true
|
#push: true
|
||||||
tags: ${{ steps.meta_pull_request.outputs.tags }}
|
tags: ${{ steps.meta_pull_request.outputs.tags }}
|
||||||
labels: ${{ steps.meta_pull_request.outputs.labels }}
|
labels: ${{ steps.meta_pull_request.outputs.labels }}
|
||||||
- name: Testing image
|
|
||||||
if: github.event_name == 'pull_request'
|
|
||||||
run: |
|
|
||||||
echo "Image is available at ttl.sh/localai-ci-pr-${{ github.event.number }}:${{ steps.meta_pull_request.outputs.version }}" >> $GITHUB_STEP_SUMMARY
|
|
||||||
## End testing image
|
## End testing image
|
||||||
- name: Build and push AIO image
|
- name: Build and push AIO image
|
||||||
if: inputs.aio != ''
|
if: inputs.aio != ''
|
||||||
@@ -310,32 +303,6 @@ jobs:
|
|||||||
tags: ${{ steps.meta_aio_dockerhub.outputs.tags }}
|
tags: ${{ steps.meta_aio_dockerhub.outputs.tags }}
|
||||||
labels: ${{ steps.meta_aio_dockerhub.outputs.labels }}
|
labels: ${{ steps.meta_aio_dockerhub.outputs.labels }}
|
||||||
|
|
||||||
- name: Cleanup
|
|
||||||
run: |
|
|
||||||
docker builder prune -f
|
|
||||||
docker system prune --force --volumes --all
|
|
||||||
|
|
||||||
- name: Latest tag
|
|
||||||
# run this on branches, when it is a tag and there is a latest-image defined
|
|
||||||
if: github.event_name != 'pull_request' && inputs.latest-image != '' && github.ref_type == 'tag'
|
|
||||||
run: |
|
|
||||||
docker pull localai/localai:${{ steps.meta.outputs.version }}
|
|
||||||
docker tag localai/localai:${{ steps.meta.outputs.version }} localai/localai:${{ inputs.latest-image }}
|
|
||||||
docker push localai/localai:${{ inputs.latest-image }}
|
|
||||||
docker pull quay.io/go-skynet/local-ai:${{ steps.meta.outputs.version }}
|
|
||||||
docker tag quay.io/go-skynet/local-ai:${{ steps.meta.outputs.version }} quay.io/go-skynet/local-ai:${{ inputs.latest-image }}
|
|
||||||
docker push quay.io/go-skynet/local-ai:${{ inputs.latest-image }}
|
|
||||||
- name: Latest AIO tag
|
|
||||||
# run this on branches, when it is a tag and there is a latest-image defined
|
|
||||||
if: github.event_name != 'pull_request' && inputs.latest-image-aio != '' && github.ref_type == 'tag'
|
|
||||||
run: |
|
|
||||||
docker pull localai/localai:${{ steps.meta_aio_dockerhub.outputs.version }}
|
|
||||||
docker tag localai/localai:${{ steps.meta_aio_dockerhub.outputs.version }} localai/localai:${{ inputs.latest-image-aio }}
|
|
||||||
docker push localai/localai:${{ inputs.latest-image-aio }}
|
|
||||||
docker pull quay.io/go-skynet/local-ai:${{ steps.meta_aio.outputs.version }}
|
|
||||||
docker tag quay.io/go-skynet/local-ai:${{ steps.meta_aio.outputs.version }} quay.io/go-skynet/local-ai:${{ inputs.latest-image-aio }}
|
|
||||||
docker push quay.io/go-skynet/local-ai:${{ inputs.latest-image-aio }}
|
|
||||||
|
|
||||||
- name: job summary
|
- name: job summary
|
||||||
run: |
|
run: |
|
||||||
echo "Built image: ${{ steps.meta.outputs.labels }}" >> $GITHUB_STEP_SUMMARY
|
echo "Built image: ${{ steps.meta.outputs.labels }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|||||||
2
.github/workflows/labeler.yml
vendored
2
.github/workflows/labeler.yml
vendored
@@ -9,4 +9,4 @@ jobs:
|
|||||||
pull-requests: write
|
pull-requests: write
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/labeler@v5
|
- uses: actions/labeler@v6
|
||||||
2
.github/workflows/localaibot_automerge.yml
vendored
2
.github/workflows/localaibot_automerge.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
|||||||
if: ${{ github.actor == 'localai-bot' }}
|
if: ${{ github.actor == 'localai-bot' }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
|
|
||||||
- name: Approve a PR if not already approved
|
- name: Approve a PR if not already approved
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
10
.github/workflows/notify-models.yaml
vendored
10
.github/workflows/notify-models.yaml
vendored
@@ -11,14 +11,14 @@ jobs:
|
|||||||
MODEL_NAME: gemma-3-12b-it
|
MODEL_NAME: gemma-3-12b-it
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0 # needed to checkout all branches for this Action to work
|
fetch-depth: 0 # needed to checkout all branches for this Action to work
|
||||||
- uses: mudler/localai-github-action@v1
|
- uses: mudler/localai-github-action@v1
|
||||||
with:
|
with:
|
||||||
model: 'gemma-3-12b-it' # Any from models.localai.io, or from huggingface.com with: "huggingface://<repository>/file"
|
model: 'gemma-3-12b-it' # Any from models.localai.io, or from huggingface.com with: "huggingface://<repository>/file"
|
||||||
# Check the PR diff using the current branch and the base branch of the PR
|
# Check the PR diff using the current branch and the base branch of the PR
|
||||||
- uses: GrantBirki/git-diff-action@v2.8.0
|
- uses: GrantBirki/git-diff-action@v2.8.1
|
||||||
id: git-diff-action
|
id: git-diff-action
|
||||||
with:
|
with:
|
||||||
json_diff_file_output: diff.json
|
json_diff_file_output: diff.json
|
||||||
@@ -90,16 +90,16 @@ jobs:
|
|||||||
MODEL_NAME: gemma-3-12b-it
|
MODEL_NAME: gemma-3-12b-it
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0 # needed to checkout all branches for this Action to work
|
fetch-depth: 0 # needed to checkout all branches for this Action to work
|
||||||
- name: Start LocalAI
|
- name: Start LocalAI
|
||||||
run: |
|
run: |
|
||||||
echo "Starting LocalAI..."
|
echo "Starting LocalAI..."
|
||||||
docker run -e -ti -d --name local-ai -p 8080:8080 localai/localai:master-ffmpeg-core run --debug $MODEL_NAME
|
docker run -e -ti -d --name local-ai -p 8080:8080 localai/localai:master run --debug $MODEL_NAME
|
||||||
until [ "`docker inspect -f {{.State.Health.Status}} local-ai`" == "healthy" ]; do echo "Waiting for container to be ready"; docker logs --tail 10 local-ai; sleep 2; done
|
until [ "`docker inspect -f {{.State.Health.Status}} local-ai`" == "healthy" ]; do echo "Waiting for container to be ready"; docker logs --tail 10 local-ai; sleep 2; done
|
||||||
# Check the PR diff using the current branch and the base branch of the PR
|
# Check the PR diff using the current branch and the base branch of the PR
|
||||||
- uses: GrantBirki/git-diff-action@v2.8.0
|
- uses: GrantBirki/git-diff-action@v2.8.1
|
||||||
id: git-diff-action
|
id: git-diff-action
|
||||||
with:
|
with:
|
||||||
json_diff_file_output: diff.json
|
json_diff_file_output: diff.json
|
||||||
|
|||||||
348
.github/workflows/release.yaml
vendored
348
.github/workflows/release.yaml
vendored
@@ -1,324 +1,64 @@
|
|||||||
name: Build and Release
|
name: goreleaser
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
|
||||||
- master
|
|
||||||
tags:
|
tags:
|
||||||
- 'v*'
|
- 'v*'
|
||||||
pull_request:
|
|
||||||
|
|
||||||
env:
|
|
||||||
GRPC_VERSION: v1.65.0
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: write
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ci-releases-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
goreleaser:
|
||||||
build-linux-arm:
|
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
submodules: true
|
fetch-depth: 0
|
||||||
- uses: actions/setup-go@v5
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: '1.21.x'
|
go-version: 1.23
|
||||||
cache: false
|
- name: Run GoReleaser
|
||||||
- name: Dependencies
|
uses: goreleaser/goreleaser-action@v6
|
||||||
run: |
|
with:
|
||||||
sudo apt-get update
|
version: v2.11.0
|
||||||
sudo apt-get install build-essential ffmpeg protobuf-compiler ccache upx-ucl gawk
|
args: release --clean
|
||||||
sudo apt-get install -qy binutils-aarch64-linux-gnu gcc-aarch64-linux-gnu g++-aarch64-linux-gnu libgmock-dev
|
|
||||||
make install-go-tools
|
|
||||||
- name: Install CUDA Dependencies
|
|
||||||
run: |
|
|
||||||
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/cross-linux-aarch64/cuda-keyring_1.1-1_all.deb
|
|
||||||
sudo dpkg -i cuda-keyring_1.1-1_all.deb
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y cuda-cross-aarch64 cuda-nvcc-cross-aarch64-${CUDA_VERSION} libcublas-cross-aarch64-${CUDA_VERSION}
|
|
||||||
env:
|
env:
|
||||||
CUDA_VERSION: 12-4
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Cache grpc
|
launcher-build-darwin:
|
||||||
id: cache-grpc
|
runs-on: macos-latest
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: grpc
|
|
||||||
key: ${{ runner.os }}-arm-grpc-${{ env.GRPC_VERSION }}
|
|
||||||
- name: Build grpc
|
|
||||||
if: steps.cache-grpc.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
|
|
||||||
git clone --recurse-submodules -b ${{ env.GRPC_VERSION }} --depth 1 --shallow-submodules https://github.com/grpc/grpc && \
|
|
||||||
cd grpc && sed -i "216i\ TESTONLY" "third_party/abseil-cpp/absl/container/CMakeLists.txt" && mkdir -p cmake/build && \
|
|
||||||
cd cmake/build && cmake -DgRPC_INSTALL=ON \
|
|
||||||
-DgRPC_BUILD_TESTS=OFF \
|
|
||||||
../.. && sudo make --jobs 5 --output-sync=target
|
|
||||||
- name: Install gRPC
|
|
||||||
run: |
|
|
||||||
GNU_HOST=aarch64-linux-gnu
|
|
||||||
C_COMPILER_ARM_LINUX=$GNU_HOST-gcc
|
|
||||||
CXX_COMPILER_ARM_LINUX=$GNU_HOST-g++
|
|
||||||
|
|
||||||
CROSS_TOOLCHAIN=/usr/$GNU_HOST
|
|
||||||
CROSS_STAGING_PREFIX=$CROSS_TOOLCHAIN/stage
|
|
||||||
CMAKE_CROSS_TOOLCHAIN=/tmp/arm.toolchain.cmake
|
|
||||||
|
|
||||||
# https://cmake.org/cmake/help/v3.13/manual/cmake-toolchains.7.html#cross-compiling-for-linux
|
|
||||||
echo "set(CMAKE_SYSTEM_NAME Linux)" >> $CMAKE_CROSS_TOOLCHAIN && \
|
|
||||||
echo "set(CMAKE_SYSTEM_PROCESSOR arm)" >> $CMAKE_CROSS_TOOLCHAIN && \
|
|
||||||
echo "set(CMAKE_STAGING_PREFIX $CROSS_STAGING_PREFIX)" >> $CMAKE_CROSS_TOOLCHAIN && \
|
|
||||||
echo "set(CMAKE_SYSROOT ${CROSS_TOOLCHAIN}/sysroot)" >> $CMAKE_CROSS_TOOLCHAIN && \
|
|
||||||
echo "set(CMAKE_C_COMPILER /usr/bin/$C_COMPILER_ARM_LINUX)" >> $CMAKE_CROSS_TOOLCHAIN && \
|
|
||||||
echo "set(CMAKE_CXX_COMPILER /usr/bin/$CXX_COMPILER_ARM_LINUX)" >> $CMAKE_CROSS_TOOLCHAIN && \
|
|
||||||
echo "set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)" >> $CMAKE_CROSS_TOOLCHAIN && \
|
|
||||||
echo "set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)" >> $CMAKE_CROSS_TOOLCHAIN && \
|
|
||||||
echo "set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)" >> $CMAKE_CROSS_TOOLCHAIN && \
|
|
||||||
echo "set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)" >> $CMAKE_CROSS_TOOLCHAIN
|
|
||||||
GRPC_DIR=$PWD/grpc
|
|
||||||
cd grpc && cd cmake/build && sudo make --jobs 5 --output-sync=target install && \
|
|
||||||
GRPC_CROSS_BUILD_DIR=$GRPC_DIR/cmake/cross_build && \
|
|
||||||
mkdir -p $GRPC_CROSS_BUILD_DIR && \
|
|
||||||
cd $GRPC_CROSS_BUILD_DIR && \
|
|
||||||
cmake -DCMAKE_TOOLCHAIN_FILE=$CMAKE_CROSS_TOOLCHAIN \
|
|
||||||
-DCMAKE_BUILD_TYPE=Release \
|
|
||||||
-DCMAKE_INSTALL_PREFIX=$CROSS_TOOLCHAIN/grpc_install \
|
|
||||||
../.. && \
|
|
||||||
sudo make -j`nproc` install
|
|
||||||
- name: Build
|
|
||||||
id: build
|
|
||||||
run: |
|
|
||||||
GNU_HOST=aarch64-linux-gnu
|
|
||||||
C_COMPILER_ARM_LINUX=$GNU_HOST-gcc
|
|
||||||
CXX_COMPILER_ARM_LINUX=$GNU_HOST-g++
|
|
||||||
|
|
||||||
CROSS_TOOLCHAIN=/usr/$GNU_HOST
|
|
||||||
CROSS_STAGING_PREFIX=$CROSS_TOOLCHAIN/stage
|
|
||||||
CMAKE_CROSS_TOOLCHAIN=/tmp/arm.toolchain.cmake
|
|
||||||
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
|
|
||||||
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2
|
|
||||||
export PATH=$PATH:$GOPATH/bin
|
|
||||||
export PATH=/usr/local/cuda/bin:$PATH
|
|
||||||
sudo rm -rf /usr/aarch64-linux-gnu/lib/libstdc++.so.6
|
|
||||||
sudo cp -rf /usr/aarch64-linux-gnu/lib/libstdc++.so* /usr/aarch64-linux-gnu/lib/libstdc++.so.6
|
|
||||||
sudo cp /usr/aarch64-linux-gnu/lib/ld-linux-aarch64.so.1 ld.so
|
|
||||||
BACKEND_LIBS="./grpc/cmake/cross_build/third_party/re2/libre2.a ./grpc/cmake/cross_build/libgrpc.a ./grpc/cmake/cross_build/libgrpc++.a ./grpc/cmake/cross_build/third_party/protobuf/libprotobuf.a /usr/aarch64-linux-gnu/lib/libc.so.6 /usr/aarch64-linux-gnu/lib/libstdc++.so.6 /usr/aarch64-linux-gnu/lib/libgomp.so.1 /usr/aarch64-linux-gnu/lib/libm.so.6 /usr/aarch64-linux-gnu/lib/libgcc_s.so.1 /usr/aarch64-linux-gnu/lib/libdl.so.2 /usr/aarch64-linux-gnu/lib/libpthread.so.0 ./ld.so" \
|
|
||||||
GOOS=linux \
|
|
||||||
GOARCH=arm64 \
|
|
||||||
CMAKE_ARGS="-DProtobuf_INCLUDE_DIRS=$CROSS_STAGING_PREFIX/include -DProtobuf_DIR=$CROSS_STAGING_PREFIX/lib/cmake/protobuf -DgRPC_DIR=$CROSS_STAGING_PREFIX/lib/cmake/grpc -DCMAKE_TOOLCHAIN_FILE=$CMAKE_CROSS_TOOLCHAIN -DCMAKE_C_COMPILER=aarch64-linux-gnu-gcc -DCMAKE_CXX_COMPILER=aarch64-linux-gnu-g++" make dist-cross-linux-arm64
|
|
||||||
- uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: LocalAI-linux-arm64
|
|
||||||
path: release/
|
|
||||||
- name: Release
|
|
||||||
uses: softprops/action-gh-release@v2
|
|
||||||
if: startsWith(github.ref, 'refs/tags/')
|
|
||||||
with:
|
|
||||||
files: |
|
|
||||||
release/*
|
|
||||||
- name: Setup tmate session if tests fail
|
|
||||||
if: ${{ failure() }}
|
|
||||||
uses: mxschmitt/action-tmate@v3.22
|
|
||||||
with:
|
|
||||||
detached: true
|
|
||||||
connect-timeout-seconds: 180
|
|
||||||
limit-access-to-actor: true
|
|
||||||
build-linux:
|
|
||||||
runs-on: arc-runner-set
|
|
||||||
steps:
|
steps:
|
||||||
- name: Force Install GIT latest
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v5
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: 1.23
|
||||||
|
- name: Build launcher for macOS ARM64
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update \
|
make build-launcher-darwin
|
||||||
&& sudo apt-get install -y software-properties-common \
|
- name: Upload DMG to Release
|
||||||
&& sudo apt-get update \
|
uses: softprops/action-gh-release@v2
|
||||||
&& sudo add-apt-repository -y ppa:git-core/ppa \
|
|
||||||
&& sudo apt-get update \
|
|
||||||
&& sudo apt-get install -y git
|
|
||||||
- name: Clone
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
with:
|
||||||
submodules: true
|
files: ./dist/LocalAI.dmg
|
||||||
- uses: actions/setup-go@v5
|
launcher-build-linux:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
go-version: '1.21.x'
|
fetch-depth: 0
|
||||||
cache: false
|
- name: Set up Go
|
||||||
- name: Dependencies
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: 1.23
|
||||||
|
- name: Build launcher for Linux
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install -y wget curl build-essential ffmpeg protobuf-compiler ccache upx-ucl gawk cmake libgmock-dev
|
sudo apt-get install golang gcc libgl1-mesa-dev xorg-dev libxkbcommon-dev
|
||||||
make install-go-tools
|
make build-launcher-linux
|
||||||
- name: Intel Dependencies
|
- name: Upload Linux launcher artifacts
|
||||||
run: |
|
|
||||||
wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpg --dearmor | sudo tee /usr/share/keyrings/oneapi-archive-keyring.gpg > /dev/null
|
|
||||||
echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main" | sudo tee /etc/apt/sources.list.d/oneAPI.list
|
|
||||||
sudo apt update
|
|
||||||
sudo apt install -y intel-basekit
|
|
||||||
- name: Install CUDA Dependencies
|
|
||||||
run: |
|
|
||||||
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb
|
|
||||||
sudo dpkg -i cuda-keyring_1.1-1_all.deb
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y cuda-nvcc-${CUDA_VERSION} libcublas-dev-${CUDA_VERSION}
|
|
||||||
env:
|
|
||||||
CUDA_VERSION: 12-5
|
|
||||||
- name: "Install Hipblas"
|
|
||||||
env:
|
|
||||||
ROCM_VERSION: "6.1"
|
|
||||||
AMDGPU_VERSION: "6.1"
|
|
||||||
run: |
|
|
||||||
set -ex
|
|
||||||
|
|
||||||
sudo apt-get update
|
|
||||||
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends ca-certificates curl libnuma-dev gnupg
|
|
||||||
|
|
||||||
curl -sL https://repo.radeon.com/rocm/rocm.gpg.key | sudo apt-key add -
|
|
||||||
|
|
||||||
printf "deb [arch=amd64] https://repo.radeon.com/rocm/apt/$ROCM_VERSION/ jammy main" | sudo tee /etc/apt/sources.list.d/rocm.list
|
|
||||||
|
|
||||||
printf "deb [arch=amd64] https://repo.radeon.com/amdgpu/$AMDGPU_VERSION/ubuntu jammy main" | sudo tee /etc/apt/sources.list.d/amdgpu.list
|
|
||||||
printf 'Package: *\nPin: release o=repo.radeon.com\nPin-Priority: 600' | sudo tee /etc/apt/preferences.d/rocm-pin-600
|
|
||||||
sudo apt-get update
|
|
||||||
|
|
||||||
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
|
||||||
hipblas-dev rocm-dev \
|
|
||||||
rocblas-dev
|
|
||||||
|
|
||||||
sudo apt-get clean
|
|
||||||
sudo rm -rf /var/lib/apt/lists/*
|
|
||||||
sudo ldconfig
|
|
||||||
- name: Cache grpc
|
|
||||||
id: cache-grpc
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: grpc
|
|
||||||
key: ${{ runner.os }}-grpc-${{ env.GRPC_VERSION }}
|
|
||||||
- name: Build grpc
|
|
||||||
if: steps.cache-grpc.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
git clone --recurse-submodules -b ${{ env.GRPC_VERSION }} --depth 1 --shallow-submodules https://github.com/grpc/grpc && \
|
|
||||||
cd grpc && sed -i "216i\ TESTONLY" "third_party/abseil-cpp/absl/container/CMakeLists.txt" && mkdir -p cmake/build && \
|
|
||||||
cd cmake/build && cmake -DgRPC_INSTALL=ON \
|
|
||||||
-DgRPC_BUILD_TESTS=OFF \
|
|
||||||
../.. && sudo make --jobs 5 --output-sync=target
|
|
||||||
- name: Install gRPC
|
|
||||||
run: |
|
|
||||||
cd grpc && cd cmake/build && sudo make --jobs 5 --output-sync=target install
|
|
||||||
# BACKEND_LIBS needed for gpu-workload: /opt/intel/oneapi/*/lib/libiomp5.so /opt/intel/oneapi/*/lib/libmkl_core.so /opt/intel/oneapi/*/lib/libmkl_core.so.2 /opt/intel/oneapi/*/lib/libmkl_intel_ilp64.so /opt/intel/oneapi/*/lib/libmkl_intel_ilp64.so.2 /opt/intel/oneapi/*/lib/libmkl_sycl_blas.so /opt/intel/oneapi/*/lib/libmkl_sycl_blas.so.4 /opt/intel/oneapi/*/lib/libmkl_tbb_thread.so /opt/intel/oneapi/*/lib/libmkl_tbb_thread.so.2 /opt/intel/oneapi/*/lib/libsycl.so /opt/intel/oneapi/*/lib/libsycl.so.7 /opt/intel/oneapi/*/lib/libsycl.so.7.1.0 /opt/rocm-*/lib/libamdhip64.so /opt/rocm-*/lib/libamdhip64.so.5 /opt/rocm-*/lib/libamdhip64.so.6 /opt/rocm-*/lib/libamdhip64.so.6.1.60100 /opt/rocm-*/lib/libhipblas.so /opt/rocm-*/lib/libhipblas.so.2 /opt/rocm-*/lib/libhipblas.so.2.1.60100 /opt/rocm-*/lib/librocblas.so /opt/rocm-*/lib/librocblas.so.4 /opt/rocm-*/lib/librocblas.so.4.1.60100 /usr/lib/x86_64-linux-gnu/libstdc++.so.6 /usr/lib/x86_64-linux-gnu/libOpenCL.so.1 /usr/lib/x86_64-linux-gnu/libOpenCL.so.1.0.0 /usr/lib/x86_64-linux-gnu/libm.so.6 /usr/lib/x86_64-linux-gnu/libgcc_s.so.1 /usr/lib/x86_64-linux-gnu/libc.so.6 /usr/lib/x86_64-linux-gnu/librt.so.1 /usr/local/cuda-*/targets/x86_64-linux/lib/libcublas.so /usr/local/cuda-*/targets/x86_64-linux/lib/libcublasLt.so /usr/local/cuda-*/targets/x86_64-linux/lib/libcudart.so /usr/local/cuda-*/targets/x86_64-linux/lib/stubs/libcuda.so
|
|
||||||
- name: Build
|
|
||||||
id: build
|
|
||||||
run: |
|
|
||||||
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
|
|
||||||
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2
|
|
||||||
export PATH=$PATH:$GOPATH/bin
|
|
||||||
export PATH=/usr/local/cuda/bin:$PATH
|
|
||||||
export PATH=/opt/rocm/bin:$PATH
|
|
||||||
source /opt/intel/oneapi/setvars.sh
|
|
||||||
sudo cp /lib64/ld-linux-x86-64.so.2 ld.so
|
|
||||||
BACKEND_LIBS="./ld.so ./sources/go-piper/piper/build/fi/lib/libfmt.a ./sources/go-piper/piper-phonemize/pi/lib/libonnxruntime.so.1.14.1 ./sources/go-piper/piper-phonemize/pi/src/libespeak-ng/libespeak-ng.so /usr/lib/x86_64-linux-gnu/libdl.so.2 /usr/lib/x86_64-linux-gnu/librt.so.1 /usr/lib/x86_64-linux-gnu/libpthread.so.0 ./sources/go-piper/piper-phonemize/pi/lib/libpiper_phonemize.so.1 ./sources/go-piper/piper/build/si/lib/libspdlog.a ./sources/go-piper/espeak/ei/lib/libucd.so" \
|
|
||||||
make -j4 dist
|
|
||||||
- uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: LocalAI-linux
|
|
||||||
path: release/
|
|
||||||
- name: Release
|
|
||||||
uses: softprops/action-gh-release@v2
|
uses: softprops/action-gh-release@v2
|
||||||
if: startsWith(github.ref, 'refs/tags/')
|
|
||||||
with:
|
with:
|
||||||
files: |
|
files: ./local-ai-launcher-linux.tar.xz
|
||||||
release/*
|
|
||||||
- name: Setup tmate session if tests fail
|
|
||||||
if: ${{ failure() }}
|
|
||||||
uses: mxschmitt/action-tmate@v3.22
|
|
||||||
with:
|
|
||||||
detached: true
|
|
||||||
connect-timeout-seconds: 180
|
|
||||||
limit-access-to-actor: true
|
|
||||||
|
|
||||||
|
|
||||||
build-macOS-x86_64:
|
|
||||||
runs-on: macos-13
|
|
||||||
steps:
|
|
||||||
- name: Clone
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
- uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: '1.21.x'
|
|
||||||
cache: false
|
|
||||||
- name: Dependencies
|
|
||||||
run: |
|
|
||||||
brew install protobuf grpc
|
|
||||||
make install-go-tools
|
|
||||||
- name: Build
|
|
||||||
id: build
|
|
||||||
run: |
|
|
||||||
export C_INCLUDE_PATH=/usr/local/include
|
|
||||||
export CPLUS_INCLUDE_PATH=/usr/local/include
|
|
||||||
export PATH=$PATH:$GOPATH/bin
|
|
||||||
export SKIP_GRPC_BACKEND=backend-assets/grpc/whisper
|
|
||||||
make dist
|
|
||||||
- uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: LocalAI-MacOS-x86_64
|
|
||||||
path: release/
|
|
||||||
- name: Release
|
|
||||||
uses: softprops/action-gh-release@v2
|
|
||||||
if: startsWith(github.ref, 'refs/tags/')
|
|
||||||
with:
|
|
||||||
files: |
|
|
||||||
release/*
|
|
||||||
- name: Setup tmate session if tests fail
|
|
||||||
if: ${{ failure() }}
|
|
||||||
uses: mxschmitt/action-tmate@v3.22
|
|
||||||
with:
|
|
||||||
detached: true
|
|
||||||
connect-timeout-seconds: 180
|
|
||||||
limit-access-to-actor: true
|
|
||||||
|
|
||||||
build-macOS-arm64:
|
|
||||||
runs-on: macos-14
|
|
||||||
steps:
|
|
||||||
- name: Clone
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
with:
|
|
||||||
submodules: true
|
|
||||||
- uses: actions/setup-go@v5
|
|
||||||
with:
|
|
||||||
go-version: '1.21.x'
|
|
||||||
cache: false
|
|
||||||
- name: Dependencies
|
|
||||||
run: |
|
|
||||||
brew install protobuf grpc libomp llvm
|
|
||||||
make install-go-tools
|
|
||||||
- name: Build
|
|
||||||
id: build
|
|
||||||
run: |
|
|
||||||
export C_INCLUDE_PATH=/usr/local/include
|
|
||||||
export CPLUS_INCLUDE_PATH=/usr/local/include
|
|
||||||
export PATH=$PATH:$GOPATH/bin
|
|
||||||
export CC=/opt/homebrew/opt/llvm/bin/clang
|
|
||||||
make dist
|
|
||||||
- uses: actions/upload-artifact@v4
|
|
||||||
with:
|
|
||||||
name: LocalAI-MacOS-arm64
|
|
||||||
path: release/
|
|
||||||
- name: Release
|
|
||||||
uses: softprops/action-gh-release@v2
|
|
||||||
if: startsWith(github.ref, 'refs/tags/')
|
|
||||||
with:
|
|
||||||
files: |
|
|
||||||
release/*
|
|
||||||
- name: Setup tmate session if tests fail
|
|
||||||
if: ${{ failure() }}
|
|
||||||
uses: mxschmitt/action-tmate@v3.22
|
|
||||||
with:
|
|
||||||
detached: true
|
|
||||||
connect-timeout-seconds: 180
|
|
||||||
limit-access-to-actor: true
|
|
||||||
|
|||||||
4
.github/workflows/secscan.yaml
vendored
4
.github/workflows/secscan.yaml
vendored
@@ -14,11 +14,11 @@ jobs:
|
|||||||
GO111MODULE: on
|
GO111MODULE: on
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout Source
|
- name: Checkout Source
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
if: ${{ github.actor != 'dependabot[bot]' }}
|
if: ${{ github.actor != 'dependabot[bot]' }}
|
||||||
- name: Run Gosec Security Scanner
|
- name: Run Gosec Security Scanner
|
||||||
if: ${{ github.actor != 'dependabot[bot]' }}
|
if: ${{ github.actor != 'dependabot[bot]' }}
|
||||||
uses: securego/gosec@v2.22.4
|
uses: securego/gosec@v2.22.8
|
||||||
with:
|
with:
|
||||||
# we let the report trigger content trigger a failure using the GitHub Security features.
|
# we let the report trigger content trigger a failure using the GitHub Security features.
|
||||||
args: '-no-fail -fmt sarif -out results.sarif ./...'
|
args: '-no-fail -fmt sarif -out results.sarif ./...'
|
||||||
|
|||||||
24
.github/workflows/stalebot.yml
vendored
Normal file
24
.github/workflows/stalebot.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
name: 'Close stale issues and PRs'
|
||||||
|
permissions:
|
||||||
|
issues: write
|
||||||
|
pull-requests: write
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: '30 1 * * *'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
stale:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/stale@3a9db7e6a41a89f618792c92c0e97cc736e1b13f # v9
|
||||||
|
with:
|
||||||
|
stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
|
||||||
|
stale-pr-message: 'This PR is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 10 days.'
|
||||||
|
close-issue-message: 'This issue was closed because it has been stalled for 5 days with no activity.'
|
||||||
|
close-pr-message: 'This PR was closed because it has been stalled for 10 days with no activity.'
|
||||||
|
days-before-issue-stale: 90
|
||||||
|
days-before-pr-stale: 90
|
||||||
|
days-before-issue-close: 5
|
||||||
|
days-before-pr-close: 10
|
||||||
|
exempt-issue-labels: 'roadmap'
|
||||||
|
exempt-pr-labels: 'roadmap'
|
||||||
38
.github/workflows/test-extra.yml
vendored
38
.github/workflows/test-extra.yml
vendored
@@ -14,11 +14,33 @@ concurrency:
|
|||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
# Requires CUDA
|
||||||
|
# tests-chatterbox-tts:
|
||||||
|
# runs-on: ubuntu-latest
|
||||||
|
# steps:
|
||||||
|
# - name: Clone
|
||||||
|
# uses: actions/checkout@v5
|
||||||
|
# with:
|
||||||
|
# submodules: true
|
||||||
|
# - name: Dependencies
|
||||||
|
# run: |
|
||||||
|
# sudo apt-get update
|
||||||
|
# sudo apt-get install build-essential ffmpeg
|
||||||
|
# # Install UV
|
||||||
|
# curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
|
# sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||||
|
# sudo apt-get install -y libopencv-dev
|
||||||
|
# pip install --user --no-cache-dir grpcio-tools==1.64.1
|
||||||
|
|
||||||
|
# - name: Test chatterbox-tts
|
||||||
|
# run: |
|
||||||
|
# make --jobs=5 --output-sync=target -C backend/python/chatterbox
|
||||||
|
# make --jobs=5 --output-sync=target -C backend/python/chatterbox test
|
||||||
tests-transformers:
|
tests-transformers:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
@@ -39,7 +61,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
@@ -61,7 +83,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
@@ -82,7 +104,7 @@ jobs:
|
|||||||
# runs-on: ubuntu-latest
|
# runs-on: ubuntu-latest
|
||||||
# steps:
|
# steps:
|
||||||
# - name: Clone
|
# - name: Clone
|
||||||
# uses: actions/checkout@v4
|
# uses: actions/checkout@v5
|
||||||
# with:
|
# with:
|
||||||
# submodules: true
|
# submodules: true
|
||||||
# - name: Dependencies
|
# - name: Dependencies
|
||||||
@@ -102,7 +124,7 @@ jobs:
|
|||||||
# runs-on: ubuntu-latest
|
# runs-on: ubuntu-latest
|
||||||
# steps:
|
# steps:
|
||||||
# - name: Clone
|
# - name: Clone
|
||||||
# uses: actions/checkout@v4
|
# uses: actions/checkout@v5
|
||||||
# with:
|
# with:
|
||||||
# submodules: true
|
# submodules: true
|
||||||
# - name: Dependencies
|
# - name: Dependencies
|
||||||
@@ -164,7 +186,7 @@ jobs:
|
|||||||
# sudo rm -rf "$AGENT_TOOLSDIRECTORY" || true
|
# sudo rm -rf "$AGENT_TOOLSDIRECTORY" || true
|
||||||
# df -h
|
# df -h
|
||||||
# - name: Clone
|
# - name: Clone
|
||||||
# uses: actions/checkout@v4
|
# uses: actions/checkout@v5
|
||||||
# with:
|
# with:
|
||||||
# submodules: true
|
# submodules: true
|
||||||
# - name: Dependencies
|
# - name: Dependencies
|
||||||
@@ -189,7 +211,7 @@ jobs:
|
|||||||
# runs-on: ubuntu-latest
|
# runs-on: ubuntu-latest
|
||||||
# steps:
|
# steps:
|
||||||
# - name: Clone
|
# - name: Clone
|
||||||
# uses: actions/checkout@v4
|
# uses: actions/checkout@v5
|
||||||
# with:
|
# with:
|
||||||
# submodules: true
|
# submodules: true
|
||||||
# - name: Dependencies
|
# - name: Dependencies
|
||||||
@@ -210,7 +232,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
|
|||||||
81
.github/workflows/test.yml
vendored
81
.github/workflows/test.yml
vendored
@@ -23,6 +23,20 @@ jobs:
|
|||||||
matrix:
|
matrix:
|
||||||
go-version: ['1.21.x']
|
go-version: ['1.21.x']
|
||||||
steps:
|
steps:
|
||||||
|
- name: Free Disk Space (Ubuntu)
|
||||||
|
uses: jlumbroso/free-disk-space@main
|
||||||
|
with:
|
||||||
|
# this might remove tools that are actually needed,
|
||||||
|
# if set to "true" but frees about 6 GB
|
||||||
|
tool-cache: true
|
||||||
|
# all of these default to true, but feel free to set to
|
||||||
|
# "false" if necessary for your workflow
|
||||||
|
android: true
|
||||||
|
dotnet: true
|
||||||
|
haskell: true
|
||||||
|
large-packages: true
|
||||||
|
docker-images: true
|
||||||
|
swap-storage: true
|
||||||
- name: Release space from worker
|
- name: Release space from worker
|
||||||
run: |
|
run: |
|
||||||
echo "Listing top largest packages"
|
echo "Listing top largest packages"
|
||||||
@@ -56,7 +70,7 @@ jobs:
|
|||||||
sudo rm -rfv build || true
|
sudo rm -rfv build || true
|
||||||
df -h
|
df -h
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
- name: Setup Go ${{ matrix.go-version }}
|
- name: Setup Go ${{ matrix.go-version }}
|
||||||
@@ -67,18 +81,20 @@ jobs:
|
|||||||
# You can test your matrix by printing the current Go version
|
# You can test your matrix by printing the current Go version
|
||||||
- name: Display Go version
|
- name: Display Go version
|
||||||
run: go version
|
run: go version
|
||||||
|
- name: Proto Dependencies
|
||||||
|
run: |
|
||||||
|
# Install protoc
|
||||||
|
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v26.1/protoc-26.1-linux-x86_64.zip -o protoc.zip && \
|
||||||
|
unzip -j -d /usr/local/bin protoc.zip bin/protoc && \
|
||||||
|
rm protoc.zip
|
||||||
|
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2
|
||||||
|
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
|
||||||
|
PATH="$PATH:$HOME/go/bin" make protogen-go
|
||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
run: |
|
run: |
|
||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install build-essential ccache upx-ucl curl ffmpeg
|
sudo apt-get install build-essential ccache upx-ucl curl ffmpeg
|
||||||
sudo apt-get install -y libgmock-dev clang
|
sudo apt-get install -y libgmock-dev clang
|
||||||
curl https://repo.anaconda.com/pkgs/misc/gpgkeys/anaconda.asc | gpg --dearmor > conda.gpg && \
|
|
||||||
sudo install -o root -g root -m 644 conda.gpg /usr/share/keyrings/conda-archive-keyring.gpg && \
|
|
||||||
gpg --keyring /usr/share/keyrings/conda-archive-keyring.gpg --no-default-keyring --fingerprint 34161F5BF5EB1D4BFBBB8F0A8AEB4F8B29D82806 && \
|
|
||||||
sudo /bin/bash -c 'echo "deb [arch=amd64 signed-by=/usr/share/keyrings/conda-archive-keyring.gpg] https://repo.anaconda.com/pkgs/misc/debrepo/conda stable main" > /etc/apt/sources.list.d/conda.list' && \
|
|
||||||
sudo /bin/bash -c 'echo "deb [arch=amd64 signed-by=/usr/share/keyrings/conda-archive-keyring.gpg] https://repo.anaconda.com/pkgs/misc/debrepo/conda stable main" | tee -a /etc/apt/sources.list.d/conda.list' && \
|
|
||||||
sudo apt-get update && \
|
|
||||||
sudo apt-get install -y conda
|
|
||||||
# Install UV
|
# Install UV
|
||||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
sudo apt-get install -y ca-certificates cmake patch python3-pip unzip
|
sudo apt-get install -y ca-certificates cmake patch python3-pip unzip
|
||||||
@@ -94,38 +110,15 @@ jobs:
|
|||||||
sudo apt-get install -y cuda-nvcc-${CUDA_VERSION} libcublas-dev-${CUDA_VERSION}
|
sudo apt-get install -y cuda-nvcc-${CUDA_VERSION} libcublas-dev-${CUDA_VERSION}
|
||||||
export CUDACXX=/usr/local/cuda/bin/nvcc
|
export CUDACXX=/usr/local/cuda/bin/nvcc
|
||||||
|
|
||||||
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2
|
|
||||||
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
|
|
||||||
go install github.com/GeertJohan/go.rice/rice@latest
|
|
||||||
|
|
||||||
# The python3-grpc-tools package in 22.04 is too old
|
# The python3-grpc-tools package in 22.04 is too old
|
||||||
pip install --user grpcio-tools
|
pip install --user grpcio-tools==1.71.0 grpcio==1.71.0
|
||||||
|
|
||||||
make -C backend/python/transformers
|
make -C backend/python/transformers
|
||||||
|
|
||||||
# Pre-build piper before we start tests in order to have shared libraries in place
|
make backends/huggingface backends/llama-cpp backends/local-store backends/silero-vad backends/piper backends/whisper backends/stablediffusion-ggml
|
||||||
make sources/go-piper && \
|
|
||||||
GO_TAGS="tts" make -C sources/go-piper piper.o && \
|
|
||||||
sudo cp -rfv sources/go-piper/piper-phonemize/pi/lib/. /usr/lib/
|
|
||||||
env:
|
env:
|
||||||
CUDA_VERSION: 12-4
|
CUDA_VERSION: 12-4
|
||||||
- name: Cache grpc
|
|
||||||
id: cache-grpc
|
|
||||||
uses: actions/cache@v4
|
|
||||||
with:
|
|
||||||
path: grpc
|
|
||||||
key: ${{ runner.os }}-grpc-${{ env.GRPC_VERSION }}
|
|
||||||
- name: Build grpc
|
|
||||||
if: steps.cache-grpc.outputs.cache-hit != 'true'
|
|
||||||
run: |
|
|
||||||
git clone --recurse-submodules -b ${{ env.GRPC_VERSION }} --depth 1 --jobs 5 --shallow-submodules https://github.com/grpc/grpc && \
|
|
||||||
cd grpc && sed -i "216i\ TESTONLY" "third_party/abseil-cpp/absl/container/CMakeLists.txt" && mkdir -p cmake/build && cd cmake/build && \
|
|
||||||
cmake -DgRPC_INSTALL=ON \
|
|
||||||
-DgRPC_BUILD_TESTS=OFF \
|
|
||||||
../.. && sudo make --jobs 5
|
|
||||||
- name: Install gRPC
|
|
||||||
run: |
|
|
||||||
cd grpc && cd cmake/build && sudo make --jobs 5 install
|
|
||||||
- name: Test
|
- name: Test
|
||||||
run: |
|
run: |
|
||||||
PATH="$PATH:/root/go/bin" GO_TAGS="tts" make --jobs 5 --output-sync=target test
|
PATH="$PATH:/root/go/bin" GO_TAGS="tts" make --jobs 5 --output-sync=target test
|
||||||
@@ -173,7 +166,7 @@ jobs:
|
|||||||
sudo rm -rfv build || true
|
sudo rm -rfv build || true
|
||||||
df -h
|
df -h
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
@@ -184,16 +177,10 @@ jobs:
|
|||||||
rm protoc.zip
|
rm protoc.zip
|
||||||
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2
|
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2
|
||||||
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
|
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
|
||||||
go install github.com/GeertJohan/go.rice/rice@latest
|
|
||||||
PATH="$PATH:$HOME/go/bin" make protogen-go
|
PATH="$PATH:$HOME/go/bin" make protogen-go
|
||||||
- name: Build images
|
|
||||||
run: |
|
|
||||||
docker build --build-arg FFMPEG=true --build-arg IMAGE_TYPE=extras --build-arg EXTRA_BACKENDS=rerankers --build-arg MAKEFLAGS="--jobs=5 --output-sync=target" -t local-ai:tests -f Dockerfile .
|
|
||||||
BASE_IMAGE=local-ai:tests DOCKER_AIO_IMAGE=local-ai-aio:test make docker-aio
|
|
||||||
- name: Test
|
- name: Test
|
||||||
run: |
|
run: |
|
||||||
PATH="$PATH:$HOME/go/bin" LOCALAI_MODELS_DIR=$PWD/models LOCALAI_IMAGE_TAG=test LOCALAI_IMAGE=local-ai-aio \
|
PATH="$PATH:$HOME/go/bin" make backends/local-store backends/silero-vad backends/llama-cpp backends/whisper backends/piper backends/stablediffusion-ggml docker-build-aio e2e-aio
|
||||||
make run-e2e-aio
|
|
||||||
- name: Setup tmate session if tests fail
|
- name: Setup tmate session if tests fail
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
uses: mxschmitt/action-tmate@v3.22
|
uses: mxschmitt/action-tmate@v3.22
|
||||||
@@ -209,7 +196,7 @@ jobs:
|
|||||||
go-version: ['1.21.x']
|
go-version: ['1.21.x']
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v5
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
- name: Setup Go ${{ matrix.go-version }}
|
- name: Setup Go ${{ matrix.go-version }}
|
||||||
@@ -223,8 +210,11 @@ jobs:
|
|||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
run: |
|
run: |
|
||||||
brew install protobuf grpc make protoc-gen-go protoc-gen-go-grpc libomp llvm
|
brew install protobuf grpc make protoc-gen-go protoc-gen-go-grpc libomp llvm
|
||||||
pip install --user --no-cache-dir grpcio-tools
|
pip install --user --no-cache-dir grpcio-tools==1.71.0 grpcio==1.71.0
|
||||||
go install github.com/GeertJohan/go.rice/rice@latest
|
- name: Build llama-cpp-darwin
|
||||||
|
run: |
|
||||||
|
make protogen-go
|
||||||
|
make backends/llama-cpp-darwin
|
||||||
- name: Test
|
- name: Test
|
||||||
run: |
|
run: |
|
||||||
export C_INCLUDE_PATH=/usr/local/include
|
export C_INCLUDE_PATH=/usr/local/include
|
||||||
@@ -232,7 +222,8 @@ jobs:
|
|||||||
export CC=/opt/homebrew/opt/llvm/bin/clang
|
export CC=/opt/homebrew/opt/llvm/bin/clang
|
||||||
# Used to run the newer GNUMake version from brew that supports --output-sync
|
# Used to run the newer GNUMake version from brew that supports --output-sync
|
||||||
export PATH="/opt/homebrew/opt/make/libexec/gnubin:$PATH"
|
export PATH="/opt/homebrew/opt/make/libexec/gnubin:$PATH"
|
||||||
BUILD_TYPE="GITHUB_CI_HAS_BROKEN_METAL" CMAKE_ARGS="-DGGML_F16C=OFF -DGGML_AVX512=OFF -DGGML_AVX2=OFF -DGGML_FMA=OFF" make --jobs 4 --output-sync=target test
|
PATH="$PATH:$HOME/go/bin" make protogen-go
|
||||||
|
PATH="$PATH:$HOME/go/bin" BUILD_TYPE="GITHUB_CI_HAS_BROKEN_METAL" CMAKE_ARGS="-DGGML_F16C=OFF -DGGML_AVX512=OFF -DGGML_AVX2=OFF -DGGML_FMA=OFF" make --jobs 4 --output-sync=target test
|
||||||
- name: Setup tmate session if tests fail
|
- name: Setup tmate session if tests fail
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
uses: mxschmitt/action-tmate@v3.22
|
uses: mxschmitt/action-tmate@v3.22
|
||||||
|
|||||||
2
.github/workflows/update_swagger.yaml
vendored
2
.github/workflows/update_swagger.yaml
vendored
@@ -9,7 +9,7 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v5
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 'stable'
|
go-version: 'stable'
|
||||||
|
|||||||
10
.github/workflows/yaml-check.yml
vendored
10
.github/workflows/yaml-check.yml
vendored
@@ -8,7 +8,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: 'Checkout'
|
- name: 'Checkout'
|
||||||
uses: actions/checkout@master
|
uses: actions/checkout@master
|
||||||
- name: 'Yamllint'
|
- name: 'Yamllint model gallery'
|
||||||
uses: karancode/yamllint-github-action@master
|
uses: karancode/yamllint-github-action@master
|
||||||
with:
|
with:
|
||||||
yamllint_file_or_dir: 'gallery'
|
yamllint_file_or_dir: 'gallery'
|
||||||
@@ -16,3 +16,11 @@ jobs:
|
|||||||
yamllint_comment: true
|
yamllint_comment: true
|
||||||
env:
|
env:
|
||||||
GITHUB_ACCESS_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_ACCESS_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- name: 'Yamllint Backend gallery'
|
||||||
|
uses: karancode/yamllint-github-action@master
|
||||||
|
with:
|
||||||
|
yamllint_file_or_dir: 'backend'
|
||||||
|
yamllint_strict: false
|
||||||
|
yamllint_comment: true
|
||||||
|
env:
|
||||||
|
GITHUB_ACCESS_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
11
.gitignore
vendored
11
.gitignore
vendored
@@ -5,9 +5,14 @@ __pycache__/
|
|||||||
*.o
|
*.o
|
||||||
get-sources
|
get-sources
|
||||||
prepare-sources
|
prepare-sources
|
||||||
/backend/cpp/llama/grpc-server
|
/backend/cpp/llama-cpp/grpc-server
|
||||||
/backend/cpp/llama/llama.cpp
|
/backend/cpp/llama-cpp/llama.cpp
|
||||||
/backend/cpp/llama-*
|
/backend/cpp/llama-*
|
||||||
|
!backend/cpp/llama-cpp
|
||||||
|
/backends
|
||||||
|
/backend-images
|
||||||
|
/result.yaml
|
||||||
|
protoc
|
||||||
|
|
||||||
*.log
|
*.log
|
||||||
|
|
||||||
@@ -19,7 +24,7 @@ go-bert
|
|||||||
|
|
||||||
# LocalAI build binary
|
# LocalAI build binary
|
||||||
LocalAI
|
LocalAI
|
||||||
local-ai
|
/local-ai
|
||||||
# prevent above rules from omitting the helm chart
|
# prevent above rules from omitting the helm chart
|
||||||
!charts/*
|
!charts/*
|
||||||
# prevent above rules from omitting the api/localai folder
|
# prevent above rules from omitting the api/localai folder
|
||||||
|
|||||||
33
.goreleaser.yaml
Normal file
33
.goreleaser.yaml
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
version: 2
|
||||||
|
before:
|
||||||
|
hooks:
|
||||||
|
- make protogen-go
|
||||||
|
- go mod tidy
|
||||||
|
dist: release
|
||||||
|
source:
|
||||||
|
enabled: true
|
||||||
|
name_template: '{{ .ProjectName }}-{{ .Tag }}-source'
|
||||||
|
builds:
|
||||||
|
- main: ./cmd/local-ai
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
ldflags:
|
||||||
|
- -s -w
|
||||||
|
- -X "github.com/mudler/LocalAI/internal.Version={{ .Tag }}"
|
||||||
|
- -X "github.com/mudler/LocalAI/internal.Commit={{ .FullCommit }}"
|
||||||
|
goos:
|
||||||
|
- linux
|
||||||
|
- darwin
|
||||||
|
#- windows
|
||||||
|
goarch:
|
||||||
|
- amd64
|
||||||
|
- arm64
|
||||||
|
archives:
|
||||||
|
- formats: [ 'binary' ] # this removes the tar of the archives, leaving the binaries alone
|
||||||
|
name_template: local-ai-{{ .Tag }}-{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}
|
||||||
|
checksum:
|
||||||
|
name_template: '{{ .ProjectName }}-{{ .Tag }}-checksums.txt'
|
||||||
|
snapshot:
|
||||||
|
version_template: "{{ .Tag }}-next"
|
||||||
|
changelog:
|
||||||
|
use: github-native
|
||||||
2
.vscode/launch.json
vendored
2
.vscode/launch.json
vendored
@@ -26,7 +26,7 @@
|
|||||||
"LOCALAI_P2P": "true",
|
"LOCALAI_P2P": "true",
|
||||||
"LOCALAI_FEDERATED": "true"
|
"LOCALAI_FEDERATED": "true"
|
||||||
},
|
},
|
||||||
"buildFlags": ["-tags", "p2p tts", "-v"],
|
"buildFlags": ["-tags", "", "-v"],
|
||||||
"envFile": "${workspaceFolder}/.env",
|
"envFile": "${workspaceFolder}/.env",
|
||||||
"cwd": "${workspaceRoot}"
|
"cwd": "${workspaceRoot}"
|
||||||
}
|
}
|
||||||
|
|||||||
399
Dockerfile
399
Dockerfile
@@ -1,121 +1,32 @@
|
|||||||
ARG IMAGE_TYPE=extras
|
|
||||||
ARG BASE_IMAGE=ubuntu:22.04
|
ARG BASE_IMAGE=ubuntu:22.04
|
||||||
ARG GRPC_BASE_IMAGE=${BASE_IMAGE}
|
ARG GRPC_BASE_IMAGE=${BASE_IMAGE}
|
||||||
ARG INTEL_BASE_IMAGE=${BASE_IMAGE}
|
ARG INTEL_BASE_IMAGE=${BASE_IMAGE}
|
||||||
|
|
||||||
# The requirements-core target is common to all images. It should not be placed in requirements-core unless every single build will use it.
|
FROM ${BASE_IMAGE} AS requirements
|
||||||
FROM ${BASE_IMAGE} AS requirements-core
|
|
||||||
|
|
||||||
USER root
|
|
||||||
|
|
||||||
ARG GO_VERSION=1.22.6
|
|
||||||
ARG CMAKE_VERSION=3.26.4
|
|
||||||
ARG CMAKE_FROM_SOURCE=false
|
|
||||||
ARG TARGETARCH
|
|
||||||
ARG TARGETVARIANT
|
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
ENV EXTERNAL_GRPC_BACKENDS="coqui:/build/backend/python/coqui/run.sh,transformers:/build/backend/python/transformers/run.sh,rerankers:/build/backend/python/rerankers/run.sh,bark:/build/backend/python/bark/run.sh,diffusers:/build/backend/python/diffusers/run.sh,faster-whisper:/build/backend/python/faster-whisper/run.sh,kokoro:/build/backend/python/kokoro/run.sh,vllm:/build/backend/python/vllm/run.sh,exllama2:/build/backend/python/exllama2/run.sh"
|
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y --no-install-recommends \
|
apt-get install -y --no-install-recommends \
|
||||||
build-essential \
|
ca-certificates curl wget espeak-ng libgomp1 \
|
||||||
ccache \
|
ffmpeg libopenblas-base libopenblas-dev && \
|
||||||
ca-certificates \
|
|
||||||
curl libssl-dev \
|
|
||||||
git \
|
|
||||||
git-lfs \
|
|
||||||
unzip upx-ucl && \
|
|
||||||
apt-get clean && \
|
apt-get clean && \
|
||||||
rm -rf /var/lib/apt/lists/*
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Install CMake (the version in 22.04 is too old)
|
|
||||||
RUN <<EOT bash
|
|
||||||
if [ "${CMAKE_FROM_SOURCE}}" = "true" ]; then
|
|
||||||
curl -L -s https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz -o cmake.tar.gz && tar xvf cmake.tar.gz && cd cmake-${CMAKE_VERSION} && ./configure && make && make install
|
|
||||||
else
|
|
||||||
apt-get update && \
|
|
||||||
apt-get install -y \
|
|
||||||
cmake && \
|
|
||||||
apt-get clean && \
|
|
||||||
rm -rf /var/lib/apt/lists/*
|
|
||||||
fi
|
|
||||||
EOT
|
|
||||||
|
|
||||||
# Install Go
|
|
||||||
RUN curl -L -s https://go.dev/dl/go${GO_VERSION}.linux-${TARGETARCH}.tar.gz | tar -C /usr/local -xz
|
|
||||||
ENV PATH=$PATH:/root/go/bin:/usr/local/go/bin
|
|
||||||
|
|
||||||
# Install grpc compilers and rice
|
|
||||||
RUN go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2 && \
|
|
||||||
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af && \
|
|
||||||
go install github.com/GeertJohan/go.rice/rice@latest
|
|
||||||
|
|
||||||
COPY --chmod=644 custom-ca-certs/* /usr/local/share/ca-certificates/
|
|
||||||
RUN update-ca-certificates
|
|
||||||
|
|
||||||
RUN test -n "$TARGETARCH" \
|
|
||||||
|| (echo 'warn: missing $TARGETARCH, either set this `ARG` manually, or run using `docker buildkit`')
|
|
||||||
|
|
||||||
# Use the variables in subsequent instructions
|
|
||||||
RUN echo "Target Architecture: $TARGETARCH"
|
|
||||||
RUN echo "Target Variant: $TARGETVARIANT"
|
|
||||||
|
|
||||||
# Cuda
|
|
||||||
ENV PATH=/usr/local/cuda/bin:${PATH}
|
|
||||||
|
|
||||||
# HipBLAS requirements
|
|
||||||
ENV PATH=/opt/rocm/bin:${PATH}
|
|
||||||
|
|
||||||
# OpenBLAS requirements and stable diffusion
|
|
||||||
RUN apt-get update && \
|
|
||||||
apt-get install -y --no-install-recommends \
|
|
||||||
libopenblas-dev && \
|
|
||||||
apt-get clean && \
|
|
||||||
rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
WORKDIR /build
|
|
||||||
|
|
||||||
###################################
|
|
||||||
###################################
|
|
||||||
|
|
||||||
# The requirements-extras target is for any builds with IMAGE_TYPE=extras. It should not be placed in this target unless every IMAGE_TYPE=extras build will use it
|
|
||||||
FROM requirements-core AS requirements-extras
|
|
||||||
|
|
||||||
# Install uv as a system package
|
|
||||||
RUN curl -LsSf https://astral.sh/uv/install.sh | UV_INSTALL_DIR=/usr/bin sh
|
|
||||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
|
||||||
|
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
|
||||||
RUN apt-get update && \
|
|
||||||
apt-get install -y --no-install-recommends \
|
|
||||||
espeak-ng \
|
|
||||||
espeak \
|
|
||||||
python3-pip \
|
|
||||||
python-is-python3 \
|
|
||||||
python3-dev llvm \
|
|
||||||
python3-venv && \
|
|
||||||
apt-get clean && \
|
|
||||||
rm -rf /var/lib/apt/lists/* && \
|
|
||||||
pip install --upgrade pip
|
|
||||||
|
|
||||||
# Install grpcio-tools (the version in 22.04 is too old)
|
|
||||||
RUN pip install --user grpcio-tools
|
|
||||||
|
|
||||||
###################################
|
|
||||||
###################################
|
|
||||||
|
|
||||||
# The requirements-drivers target is for BUILD_TYPE specific items. If you need to install something specific to CUDA, or specific to ROCM, it goes here.
|
# The requirements-drivers target is for BUILD_TYPE specific items. If you need to install something specific to CUDA, or specific to ROCM, it goes here.
|
||||||
# This target will be built on top of requirements-core or requirements-extras as retermined by the IMAGE_TYPE build-arg
|
FROM requirements AS requirements-drivers
|
||||||
FROM requirements-${IMAGE_TYPE} AS requirements-drivers
|
|
||||||
|
|
||||||
ARG BUILD_TYPE
|
ARG BUILD_TYPE
|
||||||
ARG CUDA_MAJOR_VERSION=12
|
ARG CUDA_MAJOR_VERSION=12
|
||||||
ARG CUDA_MINOR_VERSION=0
|
ARG CUDA_MINOR_VERSION=8
|
||||||
ARG SKIP_DRIVERS=false
|
ARG SKIP_DRIVERS=false
|
||||||
|
ARG TARGETARCH
|
||||||
|
ARG TARGETVARIANT
|
||||||
ENV BUILD_TYPE=${BUILD_TYPE}
|
ENV BUILD_TYPE=${BUILD_TYPE}
|
||||||
|
|
||||||
|
RUN mkdir -p /run/localai
|
||||||
|
RUN echo "default" > /run/localai/capability
|
||||||
|
|
||||||
# Vulkan requirements
|
# Vulkan requirements
|
||||||
RUN <<EOT bash
|
RUN <<EOT bash
|
||||||
if [ "${BUILD_TYPE}" = "vulkan" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
|
if [ "${BUILD_TYPE}" = "vulkan" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
|
||||||
@@ -128,7 +39,8 @@ RUN <<EOT bash
|
|||||||
apt-get install -y \
|
apt-get install -y \
|
||||||
vulkan-sdk && \
|
vulkan-sdk && \
|
||||||
apt-get clean && \
|
apt-get clean && \
|
||||||
rm -rf /var/lib/apt/lists/*
|
rm -rf /var/lib/apt/lists/* && \
|
||||||
|
echo "vulkan" > /run/localai/capability
|
||||||
fi
|
fi
|
||||||
EOT
|
EOT
|
||||||
|
|
||||||
@@ -155,7 +67,14 @@ RUN <<EOT bash
|
|||||||
libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
|
libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
|
||||||
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} && \
|
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} && \
|
||||||
apt-get clean && \
|
apt-get clean && \
|
||||||
rm -rf /var/lib/apt/lists/*
|
rm -rf /var/lib/apt/lists/* && \
|
||||||
|
echo "nvidia" > /run/localai/capability
|
||||||
|
fi
|
||||||
|
EOT
|
||||||
|
|
||||||
|
RUN <<EOT bash
|
||||||
|
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${TARGETARCH}" = "arm64" ]; then
|
||||||
|
echo "nvidia-l4t" > /run/localai/capability
|
||||||
fi
|
fi
|
||||||
EOT
|
EOT
|
||||||
|
|
||||||
@@ -175,11 +94,94 @@ RUN if [ "${BUILD_TYPE}" = "hipblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
|
|||||||
rocblas-dev && \
|
rocblas-dev && \
|
||||||
apt-get clean && \
|
apt-get clean && \
|
||||||
rm -rf /var/lib/apt/lists/* && \
|
rm -rf /var/lib/apt/lists/* && \
|
||||||
|
echo "amd" > /run/localai/capability && \
|
||||||
# I have no idea why, but the ROCM lib packages don't trigger ldconfig after they install, which results in local-ai and others not being able
|
# I have no idea why, but the ROCM lib packages don't trigger ldconfig after they install, which results in local-ai and others not being able
|
||||||
# to locate the libraries. We run ldconfig ourselves to work around this packaging deficiency
|
# to locate the libraries. We run ldconfig ourselves to work around this packaging deficiency
|
||||||
ldconfig \
|
ldconfig \
|
||||||
; fi
|
; fi
|
||||||
|
|
||||||
|
RUN if [ "${BUILD_TYPE}" = "hipblas" ]; then \
|
||||||
|
ln -s /opt/rocm-**/lib/llvm/lib/libomp.so /usr/lib/libomp.so \
|
||||||
|
; fi
|
||||||
|
|
||||||
|
RUN expr "${BUILD_TYPE}" = intel && echo "intel" > /run/localai/capability || echo "not intel"
|
||||||
|
|
||||||
|
# Cuda
|
||||||
|
ENV PATH=/usr/local/cuda/bin:${PATH}
|
||||||
|
|
||||||
|
# HipBLAS requirements
|
||||||
|
ENV PATH=/opt/rocm/bin:${PATH}
|
||||||
|
|
||||||
|
###################################
|
||||||
|
###################################
|
||||||
|
|
||||||
|
# The requirements-core target is common to all images. It should not be placed in requirements-core unless every single build will use it.
|
||||||
|
FROM requirements-drivers AS build-requirements
|
||||||
|
|
||||||
|
ARG GO_VERSION=1.22.6
|
||||||
|
ARG CMAKE_VERSION=3.26.4
|
||||||
|
ARG CMAKE_FROM_SOURCE=false
|
||||||
|
ARG TARGETARCH
|
||||||
|
ARG TARGETVARIANT
|
||||||
|
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
build-essential \
|
||||||
|
ccache \
|
||||||
|
ca-certificates espeak-ng \
|
||||||
|
curl libssl-dev \
|
||||||
|
git \
|
||||||
|
git-lfs \
|
||||||
|
unzip upx-ucl python3 python-is-python3 && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Install CMake (the version in 22.04 is too old)
|
||||||
|
RUN <<EOT bash
|
||||||
|
if [ "${CMAKE_FROM_SOURCE}" = "true" ]; then
|
||||||
|
curl -L -s https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz -o cmake.tar.gz && tar xvf cmake.tar.gz && cd cmake-${CMAKE_VERSION} && ./configure && make && make install
|
||||||
|
else
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install -y \
|
||||||
|
cmake && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
fi
|
||||||
|
EOT
|
||||||
|
|
||||||
|
# Install Go
|
||||||
|
RUN curl -L -s https://go.dev/dl/go${GO_VERSION}.linux-${TARGETARCH}.tar.gz | tar -C /usr/local -xz
|
||||||
|
ENV PATH=$PATH:/root/go/bin:/usr/local/go/bin
|
||||||
|
|
||||||
|
# Install grpc compilers
|
||||||
|
RUN go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2 && \
|
||||||
|
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
|
||||||
|
|
||||||
|
COPY --chmod=644 custom-ca-certs/* /usr/local/share/ca-certificates/
|
||||||
|
RUN update-ca-certificates
|
||||||
|
|
||||||
|
|
||||||
|
# OpenBLAS requirements and stable diffusion
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
libopenblas-dev && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN test -n "$TARGETARCH" \
|
||||||
|
|| (echo 'warn: missing $TARGETARCH, either set this `ARG` manually, or run using `docker buildkit`')
|
||||||
|
|
||||||
|
# Use the variables in subsequent instructions
|
||||||
|
RUN echo "Target Architecture: $TARGETARCH"
|
||||||
|
RUN echo "Target Variant: $TARGETVARIANT"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
WORKDIR /build
|
||||||
|
|
||||||
|
|
||||||
###################################
|
###################################
|
||||||
###################################
|
###################################
|
||||||
|
|
||||||
@@ -190,69 +192,25 @@ FROM ${INTEL_BASE_IMAGE} AS intel
|
|||||||
RUN wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | \
|
RUN wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | \
|
||||||
gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
|
gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
|
||||||
RUN echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy/lts/2350 unified" > /etc/apt/sources.list.d/intel-graphics.list
|
RUN echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy/lts/2350 unified" > /etc/apt/sources.list.d/intel-graphics.list
|
||||||
|
|
||||||
###################################
|
|
||||||
###################################
|
|
||||||
|
|
||||||
# The grpc target does one thing, it builds and installs GRPC. This is in it's own layer so that it can be effectively cached by CI.
|
|
||||||
# You probably don't need to change anything here, and if you do, make sure that CI is adjusted so that the cache continues to work.
|
|
||||||
FROM ${GRPC_BASE_IMAGE} AS grpc
|
|
||||||
|
|
||||||
# This is a bit of a hack, but it's required in order to be able to effectively cache this layer in CI
|
|
||||||
ARG GRPC_MAKEFLAGS="-j4 -Otarget"
|
|
||||||
ARG GRPC_VERSION=v1.65.0
|
|
||||||
ARG CMAKE_FROM_SOURCE=false
|
|
||||||
ARG CMAKE_VERSION=3.26.4
|
|
||||||
|
|
||||||
ENV MAKEFLAGS=${GRPC_MAKEFLAGS}
|
|
||||||
|
|
||||||
WORKDIR /build
|
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y --no-install-recommends \
|
apt-get install -y --no-install-recommends \
|
||||||
ca-certificates \
|
intel-oneapi-runtime-libs && \
|
||||||
build-essential curl libssl-dev \
|
|
||||||
git && \
|
|
||||||
apt-get clean && \
|
apt-get clean && \
|
||||||
rm -rf /var/lib/apt/lists/*
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Install CMake (the version in 22.04 is too old)
|
|
||||||
RUN <<EOT bash
|
|
||||||
if [ "${CMAKE_FROM_SOURCE}}" = "true" ]; then
|
|
||||||
curl -L -s https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz -o cmake.tar.gz && tar xvf cmake.tar.gz && cd cmake-${CMAKE_VERSION} && ./configure && make && make install
|
|
||||||
else
|
|
||||||
apt-get update && \
|
|
||||||
apt-get install -y \
|
|
||||||
cmake && \
|
|
||||||
apt-get clean && \
|
|
||||||
rm -rf /var/lib/apt/lists/*
|
|
||||||
fi
|
|
||||||
EOT
|
|
||||||
|
|
||||||
# We install GRPC to a different prefix here so that we can copy in only the build artifacts later
|
|
||||||
# saves several hundred MB on the final docker image size vs copying in the entire GRPC source tree
|
|
||||||
# and running make install in the target container
|
|
||||||
RUN git clone --recurse-submodules --jobs 4 -b ${GRPC_VERSION} --depth 1 --shallow-submodules https://github.com/grpc/grpc && \
|
|
||||||
mkdir -p /build/grpc/cmake/build && \
|
|
||||||
cd /build/grpc/cmake/build && \
|
|
||||||
sed -i "216i\ TESTONLY" "../../third_party/abseil-cpp/absl/container/CMakeLists.txt" && \
|
|
||||||
cmake -DgRPC_INSTALL=ON -DgRPC_BUILD_TESTS=OFF -DCMAKE_INSTALL_PREFIX:PATH=/opt/grpc ../.. && \
|
|
||||||
make && \
|
|
||||||
make install && \
|
|
||||||
rm -rf /build
|
|
||||||
|
|
||||||
###################################
|
###################################
|
||||||
###################################
|
###################################
|
||||||
|
|
||||||
# The builder-base target has the arguments, variables, and copies shared between full builder images and the uncompiled devcontainer
|
# The builder-base target has the arguments, variables, and copies shared between full builder images and the uncompiled devcontainer
|
||||||
|
|
||||||
FROM requirements-drivers AS builder-base
|
FROM build-requirements AS builder-base
|
||||||
|
|
||||||
ARG GO_TAGS="tts p2p"
|
ARG GO_TAGS=""
|
||||||
ARG GRPC_BACKENDS
|
ARG GRPC_BACKENDS
|
||||||
ARG MAKEFLAGS
|
ARG MAKEFLAGS
|
||||||
ARG LD_FLAGS="-s -w"
|
ARG LD_FLAGS="-s -w"
|
||||||
|
ARG TARGETARCH
|
||||||
|
ARG TARGETVARIANT
|
||||||
ENV GRPC_BACKENDS=${GRPC_BACKENDS}
|
ENV GRPC_BACKENDS=${GRPC_BACKENDS}
|
||||||
ENV GO_TAGS=${GO_TAGS}
|
ENV GO_TAGS=${GO_TAGS}
|
||||||
ENV MAKEFLAGS=${MAKEFLAGS}
|
ENV MAKEFLAGS=${MAKEFLAGS}
|
||||||
@@ -266,9 +224,7 @@ RUN echo "GO_TAGS: $GO_TAGS" && echo "TARGETARCH: $TARGETARCH"
|
|||||||
WORKDIR /build
|
WORKDIR /build
|
||||||
|
|
||||||
|
|
||||||
# We need protoc installed, and the version in 22.04 is too old. We will create one as part installing the GRPC build below
|
# We need protoc installed, and the version in 22.04 is too old.
|
||||||
# but that will also being in a newer version of absl which stablediffusion cannot compile with. This version of protoc is only
|
|
||||||
# here so that we can generate the grpc code for the stablediffusion build
|
|
||||||
RUN <<EOT bash
|
RUN <<EOT bash
|
||||||
if [ "amd64" = "$TARGETARCH" ]; then
|
if [ "amd64" = "$TARGETARCH" ]; then
|
||||||
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v27.1/protoc-27.1-linux-x86_64.zip -o protoc.zip && \
|
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v27.1/protoc-27.1-linux-x86_64.zip -o protoc.zip && \
|
||||||
@@ -285,34 +241,39 @@ EOT
|
|||||||
###################################
|
###################################
|
||||||
###################################
|
###################################
|
||||||
|
|
||||||
|
# Compile backends first in a separate stage
|
||||||
|
FROM builder-base AS builder-backends
|
||||||
|
ARG TARGETARCH
|
||||||
|
ARG TARGETVARIANT
|
||||||
|
|
||||||
|
WORKDIR /build
|
||||||
|
|
||||||
|
COPY ./Makefile .
|
||||||
|
COPY ./backend ./backend
|
||||||
|
COPY ./go.mod .
|
||||||
|
COPY ./go.sum .
|
||||||
|
COPY ./.git ./.git
|
||||||
|
|
||||||
|
# Some of the Go backends use libs from the main src, we could further optimize the caching by building the CPP backends before here
|
||||||
|
COPY ./pkg/grpc ./pkg/grpc
|
||||||
|
COPY ./pkg/utils ./pkg/utils
|
||||||
|
COPY ./pkg/langchain ./pkg/langchain
|
||||||
|
|
||||||
|
RUN ls -l ./
|
||||||
|
RUN make protogen-go
|
||||||
|
|
||||||
# The builder target compiles LocalAI. This target is not the target that will be uploaded to the registry.
|
# The builder target compiles LocalAI. This target is not the target that will be uploaded to the registry.
|
||||||
# Adjustments to the build process should likely be made here.
|
# Adjustments to the build process should likely be made here.
|
||||||
FROM builder-base AS builder
|
FROM builder-backends AS builder
|
||||||
|
|
||||||
# Install the pre-built GRPC
|
|
||||||
COPY --from=grpc /opt/grpc /usr/local
|
|
||||||
|
|
||||||
# Rebuild with defaults backends
|
|
||||||
WORKDIR /build
|
WORKDIR /build
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
COPY .git .
|
|
||||||
|
|
||||||
RUN make prepare
|
|
||||||
|
|
||||||
## Build the binary
|
## Build the binary
|
||||||
## If we're on arm64 AND using cublas/hipblas, skip some of the llama-compat backends to save space
|
## If we're on arm64 AND using cublas/hipblas, skip some of the llama-compat backends to save space
|
||||||
## Otherwise just run the normal build
|
## Otherwise just run the normal build
|
||||||
RUN if [ "${TARGETARCH}" = "arm64" ] || [ "${BUILD_TYPE}" = "hipblas" ]; then \
|
RUN make build
|
||||||
SKIP_GRPC_BACKEND="backend-assets/grpc/llama-cpp-avx512 backend-assets/grpc/llama-cpp-avx backend-assets/grpc/llama-cpp-avx2" make build; \
|
|
||||||
else \
|
|
||||||
make build; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
RUN if [ ! -d "/build/sources/go-piper/piper-phonemize/pi/lib/" ]; then \
|
|
||||||
mkdir -p /build/sources/go-piper/piper-phonemize/pi/lib/ \
|
|
||||||
touch /build/sources/go-piper/piper-phonemize/pi/lib/keep \
|
|
||||||
; fi
|
|
||||||
|
|
||||||
###################################
|
###################################
|
||||||
###################################
|
###################################
|
||||||
@@ -322,24 +283,11 @@ RUN if [ ! -d "/build/sources/go-piper/piper-phonemize/pi/lib/" ]; then \
|
|||||||
|
|
||||||
FROM builder-base AS devcontainer
|
FROM builder-base AS devcontainer
|
||||||
|
|
||||||
ARG FFMPEG
|
|
||||||
|
|
||||||
COPY --from=grpc /opt/grpc /usr/local
|
|
||||||
|
|
||||||
COPY .devcontainer-scripts /.devcontainer-scripts
|
COPY .devcontainer-scripts /.devcontainer-scripts
|
||||||
|
|
||||||
# Add FFmpeg
|
|
||||||
RUN if [ "${FFMPEG}" = "true" ]; then \
|
|
||||||
apt-get update && \
|
|
||||||
apt-get install -y --no-install-recommends \
|
|
||||||
ffmpeg && \
|
|
||||||
apt-get clean && \
|
|
||||||
rm -rf /var/lib/apt/lists/* \
|
|
||||||
; fi
|
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y --no-install-recommends \
|
apt-get install -y --no-install-recommends \
|
||||||
ssh less wget
|
ssh less
|
||||||
# For the devcontainer, leave apt functional in case additional devtools are needed at runtime.
|
# For the devcontainer, leave apt functional in case additional devtools are needed at runtime.
|
||||||
|
|
||||||
RUN go install github.com/go-delve/delve/cmd/dlv@latest
|
RUN go install github.com/go-delve/delve/cmd/dlv@latest
|
||||||
@@ -353,98 +301,27 @@ RUN go install github.com/mikefarah/yq/v4@latest
|
|||||||
# If you cannot find a more suitable place for an addition, this layer is a suitable place for it.
|
# If you cannot find a more suitable place for an addition, this layer is a suitable place for it.
|
||||||
FROM requirements-drivers
|
FROM requirements-drivers
|
||||||
|
|
||||||
ARG FFMPEG
|
|
||||||
ARG BUILD_TYPE
|
|
||||||
ARG TARGETARCH
|
|
||||||
ARG IMAGE_TYPE=extras
|
|
||||||
ARG EXTRA_BACKENDS
|
|
||||||
ARG MAKEFLAGS
|
|
||||||
|
|
||||||
ENV BUILD_TYPE=${BUILD_TYPE}
|
|
||||||
ENV REBUILD=false
|
|
||||||
ENV HEALTHCHECK_ENDPOINT=http://localhost:8080/readyz
|
ENV HEALTHCHECK_ENDPOINT=http://localhost:8080/readyz
|
||||||
ENV MAKEFLAGS=${MAKEFLAGS}
|
|
||||||
|
|
||||||
ARG CUDA_MAJOR_VERSION=12
|
ARG CUDA_MAJOR_VERSION=12
|
||||||
ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
|
ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
|
||||||
ENV NVIDIA_REQUIRE_CUDA="cuda>=${CUDA_MAJOR_VERSION}.0"
|
ENV NVIDIA_REQUIRE_CUDA="cuda>=${CUDA_MAJOR_VERSION}.0"
|
||||||
ENV NVIDIA_VISIBLE_DEVICES=all
|
ENV NVIDIA_VISIBLE_DEVICES=all
|
||||||
|
|
||||||
# Add FFmpeg
|
WORKDIR /
|
||||||
RUN if [ "${FFMPEG}" = "true" ]; then \
|
|
||||||
apt-get update && \
|
|
||||||
apt-get install -y --no-install-recommends \
|
|
||||||
ffmpeg && \
|
|
||||||
apt-get clean && \
|
|
||||||
rm -rf /var/lib/apt/lists/* \
|
|
||||||
; fi
|
|
||||||
|
|
||||||
WORKDIR /build
|
COPY ./entrypoint.sh .
|
||||||
|
|
||||||
# we start fresh & re-copy all assets because `make build` does not clean up nicely after itself
|
|
||||||
# so when `entrypoint.sh` runs `make build` again (which it does by default), the build would fail
|
|
||||||
# see https://github.com/go-skynet/LocalAI/pull/658#discussion_r1241971626 and
|
|
||||||
# https://github.com/go-skynet/LocalAI/pull/434
|
|
||||||
COPY . .
|
|
||||||
|
|
||||||
COPY --from=builder /build/sources ./sources/
|
|
||||||
COPY --from=grpc /opt/grpc /usr/local
|
|
||||||
|
|
||||||
RUN make prepare-sources
|
|
||||||
|
|
||||||
# Copy the binary
|
# Copy the binary
|
||||||
COPY --from=builder /build/local-ai ./
|
COPY --from=builder /build/local-ai ./
|
||||||
|
|
||||||
# Copy shared libraries for piper
|
|
||||||
COPY --from=builder /build/sources/go-piper/piper-phonemize/pi/lib/* /usr/lib/
|
|
||||||
|
|
||||||
# Change the shell to bash so we can use [[ tests below
|
|
||||||
SHELL ["/bin/bash", "-c"]
|
|
||||||
# We try to strike a balance between individual layer size (as that affects total push time) and total image size
|
|
||||||
# Splitting the backends into more groups with fewer items results in a larger image, but a smaller size for the largest layer
|
|
||||||
# Splitting the backends into fewer groups with more items results in a smaller image, but a larger size for the largest layer
|
|
||||||
|
|
||||||
RUN if [[ ( "${IMAGE_TYPE}" == "extras ")]]; then \
|
|
||||||
apt-get -qq -y install espeak-ng \
|
|
||||||
; fi
|
|
||||||
|
|
||||||
RUN if [[ ( "${EXTRA_BACKENDS}" =~ "coqui" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
|
||||||
make -C backend/python/coqui \
|
|
||||||
; fi && \
|
|
||||||
if [[ ( "${EXTRA_BACKENDS}" =~ "faster-whisper" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
|
||||||
make -C backend/python/faster-whisper \
|
|
||||||
; fi && \
|
|
||||||
if [[ ( "${EXTRA_BACKENDS}" =~ "diffusers" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
|
||||||
make -C backend/python/diffusers \
|
|
||||||
; fi
|
|
||||||
|
|
||||||
RUN if [[ ( "${EXTRA_BACKENDS}" =~ "kokoro" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
|
||||||
make -C backend/python/kokoro \
|
|
||||||
; fi && \
|
|
||||||
if [[ ( "${EXTRA_BACKENDS}" =~ "exllama2" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
|
||||||
make -C backend/python/exllama2 \
|
|
||||||
; fi && \
|
|
||||||
if [[ ( "${EXTRA_BACKENDS}" =~ "transformers" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
|
||||||
make -C backend/python/transformers \
|
|
||||||
; fi
|
|
||||||
|
|
||||||
RUN if [[ ( "${EXTRA_BACKENDS}" =~ "vllm" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
|
||||||
make -C backend/python/vllm \
|
|
||||||
; fi && \
|
|
||||||
if [[ ( "${EXTRA_BACKENDS}" =~ "bark" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
|
||||||
make -C backend/python/bark \
|
|
||||||
; fi && \
|
|
||||||
if [[ ( "${EXTRA_BACKENDS}" =~ "rerankers" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
|
||||||
make -C backend/python/rerankers \
|
|
||||||
; fi
|
|
||||||
|
|
||||||
# Make sure the models directory exists
|
# Make sure the models directory exists
|
||||||
RUN mkdir -p /build/models
|
RUN mkdir -p /models /backends
|
||||||
|
|
||||||
# Define the health check command
|
# Define the health check command
|
||||||
HEALTHCHECK --interval=1m --timeout=10m --retries=10 \
|
HEALTHCHECK --interval=1m --timeout=10m --retries=10 \
|
||||||
CMD curl -f ${HEALTHCHECK_ENDPOINT} || exit 1
|
CMD curl -f ${HEALTHCHECK_ENDPOINT} || exit 1
|
||||||
|
|
||||||
VOLUME /build/models
|
VOLUME /models /backends
|
||||||
EXPOSE 8080
|
EXPOSE 8080
|
||||||
ENTRYPOINT [ "/build/entrypoint.sh" ]
|
ENTRYPOINT [ "/entrypoint.sh" ]
|
||||||
|
|||||||
@@ -1,5 +0,0 @@
|
|||||||
VERSION 0.7
|
|
||||||
|
|
||||||
build:
|
|
||||||
FROM DOCKERFILE -f Dockerfile .
|
|
||||||
SAVE ARTIFACT /usr/bin/local-ai AS LOCAL local-ai
|
|
||||||
137
README.md
137
README.md
@@ -1,6 +1,6 @@
|
|||||||
<h1 align="center">
|
<h1 align="center">
|
||||||
<br>
|
<br>
|
||||||
<img height="300" src="./core/http/static/logo.png"> <br>
|
<img width="300" src="./core/http/static/logo.png"> <br>
|
||||||
<br>
|
<br>
|
||||||
</h1>
|
</h1>
|
||||||
|
|
||||||
@@ -110,25 +110,72 @@ curl https://localai.io/install.sh | sh
|
|||||||
|
|
||||||
For more installation options, see [Installer Options](https://localai.io/docs/advanced/installer/).
|
For more installation options, see [Installer Options](https://localai.io/docs/advanced/installer/).
|
||||||
|
|
||||||
|
### macOS Download:
|
||||||
|
|
||||||
|
<a href="https://github.com/mudler/LocalAI/releases/latest/download/LocalAI.dmg">
|
||||||
|
<img src="https://img.shields.io/badge/Download-macOS-blue?style=for-the-badge&logo=apple&logoColor=white" alt="Download LocalAI for macOS"/>
|
||||||
|
</a>
|
||||||
|
|
||||||
Or run with docker:
|
Or run with docker:
|
||||||
|
|
||||||
### CPU only image:
|
### CPU only image:
|
||||||
```bash
|
|
||||||
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-cpu
|
|
||||||
```
|
|
||||||
### Nvidia GPU:
|
|
||||||
```bash
|
|
||||||
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-gpu-nvidia-cuda-12
|
|
||||||
```
|
|
||||||
### CPU and GPU image (bigger size):
|
|
||||||
```bash
|
```bash
|
||||||
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest
|
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest
|
||||||
```
|
```
|
||||||
### AIO images (it will pre-download a set of models ready for use, see https://localai.io/basics/container/)
|
|
||||||
|
### NVIDIA GPU Images:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-cpu
|
# CUDA 12.0
|
||||||
|
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-gpu-nvidia-cuda-12
|
||||||
|
|
||||||
|
# CUDA 11.7
|
||||||
|
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-gpu-nvidia-cuda-11
|
||||||
|
|
||||||
|
# NVIDIA Jetson (L4T) ARM64
|
||||||
|
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-nvidia-l4t-arm64
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### AMD GPU Images (ROCm):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run -ti --name local-ai -p 8080:8080 --device=/dev/kfd --device=/dev/dri --group-add=video localai/localai:latest-gpu-hipblas
|
||||||
|
```
|
||||||
|
|
||||||
|
### Intel GPU Images (oneAPI):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run -ti --name local-ai -p 8080:8080 --device=/dev/dri/card1 --device=/dev/dri/renderD128 localai/localai:latest-gpu-intel
|
||||||
|
```
|
||||||
|
|
||||||
|
### Vulkan GPU Images:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-gpu-vulkan
|
||||||
|
```
|
||||||
|
|
||||||
|
### AIO Images (pre-downloaded models):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# CPU version
|
||||||
|
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-cpu
|
||||||
|
|
||||||
|
# NVIDIA CUDA 12 version
|
||||||
|
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||||
|
|
||||||
|
# NVIDIA CUDA 11 version
|
||||||
|
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-11
|
||||||
|
|
||||||
|
# Intel GPU version
|
||||||
|
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-gpu-intel
|
||||||
|
|
||||||
|
# AMD GPU version
|
||||||
|
docker run -ti --name local-ai -p 8080:8080 --device=/dev/kfd --device=/dev/dri --group-add=video localai/localai:latest-aio-gpu-hipblas
|
||||||
|
```
|
||||||
|
|
||||||
|
For more information about the AIO images and pre-downloaded models, see [Container Documentation](https://localai.io/basics/container/).
|
||||||
|
|
||||||
To load models:
|
To load models:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -144,10 +191,19 @@ local-ai run https://gist.githubusercontent.com/.../phi-2.yaml
|
|||||||
local-ai run oci://localai/phi-2:latest
|
local-ai run oci://localai/phi-2:latest
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> ⚡ **Automatic Backend Detection**: When you install models from the gallery or YAML files, LocalAI automatically detects your system's GPU capabilities (NVIDIA, AMD, Intel) and downloads the appropriate backend. For advanced configuration options, see [GPU Acceleration](https://localai.io/features/gpu-acceleration/#automatic-backend-detection).
|
||||||
|
|
||||||
For more information, see [💻 Getting started](https://localai.io/basics/getting_started/index.html)
|
For more information, see [💻 Getting started](https://localai.io/basics/getting_started/index.html)
|
||||||
|
|
||||||
## 📰 Latest project news
|
## 📰 Latest project news
|
||||||
|
|
||||||
|
- August 2025: MLX, MLX-VLM, Diffusers and llama.cpp are now supported on Mac M1/M2/M3+ chips ( with `development` suffix in the gallery ): https://github.com/mudler/LocalAI/pull/6049 https://github.com/mudler/LocalAI/pull/6119 https://github.com/mudler/LocalAI/pull/6121 https://github.com/mudler/LocalAI/pull/6060
|
||||||
|
- July/August 2025: 🔍 [Object Detection](https://localai.io/features/object-detection/) added to the API featuring [rf-detr](https://github.com/roboflow/rf-detr)
|
||||||
|
- July 2025: All backends migrated outside of the main binary. LocalAI is now more lightweight, small, and automatically downloads the required backend to run the model. [Read the release notes](https://github.com/mudler/LocalAI/releases/tag/v3.2.0)
|
||||||
|
- June 2025: [Backend management](https://github.com/mudler/LocalAI/pull/5607) has been added. Attention: extras images are going to be deprecated from the next release! Read [the backend management PR](https://github.com/mudler/LocalAI/pull/5607).
|
||||||
|
- May 2025: [Audio input](https://github.com/mudler/LocalAI/pull/5466) and [Reranking](https://github.com/mudler/LocalAI/pull/5396) in llama.cpp backend, [Realtime API](https://github.com/mudler/LocalAI/pull/5392), Support to Gemma, SmollVLM, and more multimodal models (available in the gallery).
|
||||||
|
- May 2025: Important: image name changes [See release](https://github.com/mudler/LocalAI/releases/tag/v2.29.0)
|
||||||
|
- Apr 2025: Rebrand, WebUI enhancements
|
||||||
- Apr 2025: [LocalAGI](https://github.com/mudler/LocalAGI) and [LocalRecall](https://github.com/mudler/LocalRecall) join the LocalAI family stack.
|
- Apr 2025: [LocalAGI](https://github.com/mudler/LocalAGI) and [LocalRecall](https://github.com/mudler/LocalRecall) join the LocalAI family stack.
|
||||||
- Apr 2025: WebUI overhaul, AIO images updates
|
- Apr 2025: WebUI overhaul, AIO images updates
|
||||||
- Feb 2025: Backend cleanup, Breaking changes, new backends (kokoro, OutelTTS, faster-whisper), Nvidia L4T images
|
- Feb 2025: Backend cleanup, Breaking changes, new backends (kokoro, OutelTTS, faster-whisper), Nvidia L4T images
|
||||||
@@ -166,6 +222,7 @@ Roadmap items: [List of issues](https://github.com/mudler/LocalAI/issues?q=is%3A
|
|||||||
|
|
||||||
## 🚀 [Features](https://localai.io/features/)
|
## 🚀 [Features](https://localai.io/features/)
|
||||||
|
|
||||||
|
- 🧩 [Backend Gallery](https://localai.io/backends/): Install/remove backends on the fly, powered by OCI images — fully customizable and API-driven.
|
||||||
- 📖 [Text generation with GPTs](https://localai.io/features/text-generation/) (`llama.cpp`, `transformers`, `vllm` ... [:book: and more](https://localai.io/model-compatibility/index.html#model-compatibility-table))
|
- 📖 [Text generation with GPTs](https://localai.io/features/text-generation/) (`llama.cpp`, `transformers`, `vllm` ... [:book: and more](https://localai.io/model-compatibility/index.html#model-compatibility-table))
|
||||||
- 🗣 [Text to Audio](https://localai.io/features/text-to-audio/)
|
- 🗣 [Text to Audio](https://localai.io/features/text-to-audio/)
|
||||||
- 🔈 [Audio to Text](https://localai.io/features/audio-to-text/) (Audio transcription with `whisper.cpp`)
|
- 🔈 [Audio to Text](https://localai.io/features/audio-to-text/) (Audio transcription with `whisper.cpp`)
|
||||||
@@ -175,12 +232,67 @@ Roadmap items: [List of issues](https://github.com/mudler/LocalAI/issues?q=is%3A
|
|||||||
- ✍️ [Constrained grammars](https://localai.io/features/constrained_grammars/)
|
- ✍️ [Constrained grammars](https://localai.io/features/constrained_grammars/)
|
||||||
- 🖼️ [Download Models directly from Huggingface ](https://localai.io/models/)
|
- 🖼️ [Download Models directly from Huggingface ](https://localai.io/models/)
|
||||||
- 🥽 [Vision API](https://localai.io/features/gpt-vision/)
|
- 🥽 [Vision API](https://localai.io/features/gpt-vision/)
|
||||||
|
- 🔍 [Object Detection](https://localai.io/features/object-detection/)
|
||||||
- 📈 [Reranker API](https://localai.io/features/reranker/)
|
- 📈 [Reranker API](https://localai.io/features/reranker/)
|
||||||
- 🆕🖧 [P2P Inferencing](https://localai.io/features/distribute/)
|
- 🆕🖧 [P2P Inferencing](https://localai.io/features/distribute/)
|
||||||
- [Agentic capabilities](https://github.com/mudler/LocalAGI)
|
- [Agentic capabilities](https://github.com/mudler/LocalAGI)
|
||||||
- 🔊 Voice activity detection (Silero-VAD support)
|
- 🔊 Voice activity detection (Silero-VAD support)
|
||||||
- 🌍 Integrated WebUI!
|
- 🌍 Integrated WebUI!
|
||||||
|
|
||||||
|
## 🧩 Supported Backends & Acceleration
|
||||||
|
|
||||||
|
LocalAI supports a comprehensive range of AI backends with multiple acceleration options:
|
||||||
|
|
||||||
|
### Text Generation & Language Models
|
||||||
|
| Backend | Description | Acceleration Support |
|
||||||
|
|---------|-------------|---------------------|
|
||||||
|
| **llama.cpp** | LLM inference in C/C++ | CUDA 11/12, ROCm, Intel SYCL, Vulkan, Metal, CPU |
|
||||||
|
| **vLLM** | Fast LLM inference with PagedAttention | CUDA 12, ROCm, Intel |
|
||||||
|
| **transformers** | HuggingFace transformers framework | CUDA 11/12, ROCm, Intel, CPU |
|
||||||
|
| **exllama2** | GPTQ inference library | CUDA 12 |
|
||||||
|
| **MLX** | Apple Silicon LLM inference | Metal (M1/M2/M3+) |
|
||||||
|
| **MLX-VLM** | Apple Silicon Vision-Language Models | Metal (M1/M2/M3+) |
|
||||||
|
|
||||||
|
### Audio & Speech Processing
|
||||||
|
| Backend | Description | Acceleration Support |
|
||||||
|
|---------|-------------|---------------------|
|
||||||
|
| **whisper.cpp** | OpenAI Whisper in C/C++ | CUDA 12, ROCm, Intel SYCL, Vulkan, CPU |
|
||||||
|
| **faster-whisper** | Fast Whisper with CTranslate2 | CUDA 12, ROCm, Intel, CPU |
|
||||||
|
| **bark** | Text-to-audio generation | CUDA 12, ROCm, Intel |
|
||||||
|
| **bark-cpp** | C++ implementation of Bark | CUDA, Metal, CPU |
|
||||||
|
| **coqui** | Advanced TTS with 1100+ languages | CUDA 12, ROCm, Intel, CPU |
|
||||||
|
| **kokoro** | Lightweight TTS model | CUDA 12, ROCm, Intel, CPU |
|
||||||
|
| **chatterbox** | Production-grade TTS | CUDA 11/12, CPU |
|
||||||
|
| **piper** | Fast neural TTS system | CPU |
|
||||||
|
| **kitten-tts** | Kitten TTS models | CPU |
|
||||||
|
| **silero-vad** | Voice Activity Detection | CPU |
|
||||||
|
|
||||||
|
### Image & Video Generation
|
||||||
|
| Backend | Description | Acceleration Support |
|
||||||
|
|---------|-------------|---------------------|
|
||||||
|
| **stablediffusion.cpp** | Stable Diffusion in C/C++ | CUDA 12, Intel SYCL, Vulkan, CPU |
|
||||||
|
| **diffusers** | HuggingFace diffusion models | CUDA 11/12, ROCm, Intel, Metal, CPU |
|
||||||
|
|
||||||
|
### Specialized AI Tasks
|
||||||
|
| Backend | Description | Acceleration Support |
|
||||||
|
|---------|-------------|---------------------|
|
||||||
|
| **rfdetr** | Real-time object detection | CUDA 12, Intel, CPU |
|
||||||
|
| **rerankers** | Document reranking API | CUDA 11/12, ROCm, Intel, CPU |
|
||||||
|
| **local-store** | Vector database | CPU |
|
||||||
|
| **huggingface** | HuggingFace API integration | API-based |
|
||||||
|
|
||||||
|
### Hardware Acceleration Matrix
|
||||||
|
|
||||||
|
| Acceleration Type | Supported Backends | Hardware Support |
|
||||||
|
|-------------------|-------------------|------------------|
|
||||||
|
| **NVIDIA CUDA 11** | llama.cpp, whisper, stablediffusion, diffusers, rerankers, bark, chatterbox | Nvidia hardware |
|
||||||
|
| **NVIDIA CUDA 12** | All CUDA-compatible backends | Nvidia hardware |
|
||||||
|
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, bark | AMD Graphics |
|
||||||
|
| **Intel oneAPI** | llama.cpp, whisper, stablediffusion, vllm, transformers, diffusers, rfdetr, rerankers, exllama2, coqui, kokoro, bark | Intel Arc, Intel iGPUs |
|
||||||
|
| **Apple Metal** | llama.cpp, whisper, diffusers, MLX, MLX-VLM, bark-cpp | Apple M1/M2/M3+ |
|
||||||
|
| **Vulkan** | llama.cpp, whisper, stablediffusion | Cross-platform GPUs |
|
||||||
|
| **NVIDIA Jetson** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr | ARM64 embedded AI |
|
||||||
|
| **CPU Optimized** | All backends | AVX/AVX2/AVX512, quantization support |
|
||||||
|
|
||||||
### 🔗 Community and integrations
|
### 🔗 Community and integrations
|
||||||
|
|
||||||
@@ -195,6 +307,9 @@ WebUIs:
|
|||||||
Model galleries
|
Model galleries
|
||||||
- https://github.com/go-skynet/model-gallery
|
- https://github.com/go-skynet/model-gallery
|
||||||
|
|
||||||
|
Voice:
|
||||||
|
- https://github.com/richiejp/VoxInput
|
||||||
|
|
||||||
Other:
|
Other:
|
||||||
- Helm chart https://github.com/go-skynet/helm-charts
|
- Helm chart https://github.com/go-skynet/helm-charts
|
||||||
- VSCode extension https://github.com/badgooooor/localai-vscode-plugin
|
- VSCode extension https://github.com/badgooooor/localai-vscode-plugin
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
embeddings: true
|
embeddings: true
|
||||||
name: text-embedding-ada-002
|
name: text-embedding-ada-002
|
||||||
|
backend: llama-cpp
|
||||||
parameters:
|
parameters:
|
||||||
model: huggingface://bartowski/granite-embedding-107m-multilingual-GGUF/granite-embedding-107m-multilingual-f16.gguf
|
model: huggingface://bartowski/granite-embedding-107m-multilingual-GGUF/granite-embedding-107m-multilingual-f16.gguf
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,13 @@
|
|||||||
name: jina-reranker-v1-base-en
|
name: jina-reranker-v1-base-en
|
||||||
backend: rerankers
|
reranking: true
|
||||||
|
f16: true
|
||||||
parameters:
|
parameters:
|
||||||
model: cross-encoder
|
model: jina-reranker-v1-tiny-en.f16.gguf
|
||||||
|
backend: llama-cpp
|
||||||
|
download_files:
|
||||||
|
- filename: jina-reranker-v1-tiny-en.f16.gguf
|
||||||
|
sha256: 5f696cf0d0f3d347c4a279eee8270e5918554cdac0ed1f632f2619e4e8341407
|
||||||
|
uri: huggingface://mradermacher/jina-reranker-v1-tiny-en-GGUF/jina-reranker-v1-tiny-en.f16.gguf
|
||||||
|
|
||||||
usage: |
|
usage: |
|
||||||
You can test this model with curl like this:
|
You can test this model with curl like this:
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ name: tts-1
|
|||||||
download_files:
|
download_files:
|
||||||
- filename: voice-en-us-amy-low.tar.gz
|
- filename: voice-en-us-amy-low.tar.gz
|
||||||
uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-en-us-amy-low.tar.gz
|
uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-en-us-amy-low.tar.gz
|
||||||
|
backend: piper
|
||||||
parameters:
|
parameters:
|
||||||
model: en-us-amy-low.onnx
|
model: en-us-amy-low.onnx
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
context_size: 8192
|
context_size: 8192
|
||||||
f16: true
|
f16: true
|
||||||
|
backend: llama-cpp
|
||||||
function:
|
function:
|
||||||
grammar:
|
grammar:
|
||||||
no_mixed_free_string: true
|
no_mixed_free_string: true
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
context_size: 4096
|
context_size: 4096
|
||||||
f16: true
|
f16: true
|
||||||
|
backend: llama-cpp
|
||||||
mmap: true
|
mmap: true
|
||||||
mmproj: minicpm-v-2_6-mmproj-f16.gguf
|
mmproj: minicpm-v-2_6-mmproj-f16.gguf
|
||||||
name: gpt-4o
|
name: gpt-4o
|
||||||
|
|||||||
@@ -135,4 +135,4 @@ check_vars
|
|||||||
|
|
||||||
echo "===> Starting LocalAI[$PROFILE] with the following models: $MODELS"
|
echo "===> Starting LocalAI[$PROFILE] with the following models: $MODELS"
|
||||||
|
|
||||||
exec /build/entrypoint.sh "$@"
|
exec /entrypoint.sh "$@"
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
embeddings: true
|
embeddings: true
|
||||||
name: text-embedding-ada-002
|
name: text-embedding-ada-002
|
||||||
|
backend: llama-cpp
|
||||||
parameters:
|
parameters:
|
||||||
model: huggingface://bartowski/granite-embedding-107m-multilingual-GGUF/granite-embedding-107m-multilingual-f16.gguf
|
model: huggingface://bartowski/granite-embedding-107m-multilingual-GGUF/granite-embedding-107m-multilingual-f16.gguf
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,13 @@
|
|||||||
name: jina-reranker-v1-base-en
|
name: jina-reranker-v1-base-en
|
||||||
backend: rerankers
|
reranking: true
|
||||||
|
f16: true
|
||||||
parameters:
|
parameters:
|
||||||
model: cross-encoder
|
model: jina-reranker-v1-tiny-en.f16.gguf
|
||||||
|
backend: llama-cpp
|
||||||
|
download_files:
|
||||||
|
- filename: jina-reranker-v1-tiny-en.f16.gguf
|
||||||
|
sha256: 5f696cf0d0f3d347c4a279eee8270e5918554cdac0ed1f632f2619e4e8341407
|
||||||
|
uri: huggingface://mradermacher/jina-reranker-v1-tiny-en-GGUF/jina-reranker-v1-tiny-en.f16.gguf
|
||||||
|
|
||||||
usage: |
|
usage: |
|
||||||
You can test this model with curl like this:
|
You can test this model with curl like this:
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ name: tts-1
|
|||||||
download_files:
|
download_files:
|
||||||
- filename: voice-en-us-amy-low.tar.gz
|
- filename: voice-en-us-amy-low.tar.gz
|
||||||
uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-en-us-amy-low.tar.gz
|
uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-en-us-amy-low.tar.gz
|
||||||
|
backend: piper
|
||||||
parameters:
|
parameters:
|
||||||
model: en-us-amy-low.onnx
|
model: en-us-amy-low.onnx
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
context_size: 4096
|
context_size: 4096
|
||||||
f16: true
|
f16: true
|
||||||
|
backend: llama-cpp
|
||||||
function:
|
function:
|
||||||
capture_llm_results:
|
capture_llm_results:
|
||||||
- (?s)<Thought>(.*?)</Thought>
|
- (?s)<Thought>(.*?)</Thought>
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
context_size: 4096
|
context_size: 4096
|
||||||
|
backend: llama-cpp
|
||||||
f16: true
|
f16: true
|
||||||
mmap: true
|
mmap: true
|
||||||
mmproj: minicpm-v-2_6-mmproj-f16.gguf
|
mmproj: minicpm-v-2_6-mmproj-f16.gguf
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
embeddings: true
|
embeddings: true
|
||||||
name: text-embedding-ada-002
|
name: text-embedding-ada-002
|
||||||
|
backend: llama-cpp
|
||||||
parameters:
|
parameters:
|
||||||
model: huggingface://bartowski/granite-embedding-107m-multilingual-GGUF/granite-embedding-107m-multilingual-f16.gguf
|
model: huggingface://bartowski/granite-embedding-107m-multilingual-GGUF/granite-embedding-107m-multilingual-f16.gguf
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,13 @@
|
|||||||
name: jina-reranker-v1-base-en
|
name: jina-reranker-v1-base-en
|
||||||
backend: rerankers
|
reranking: true
|
||||||
|
f16: true
|
||||||
parameters:
|
parameters:
|
||||||
model: cross-encoder
|
model: jina-reranker-v1-tiny-en.f16.gguf
|
||||||
|
backend: llama-cpp
|
||||||
|
download_files:
|
||||||
|
- filename: jina-reranker-v1-tiny-en.f16.gguf
|
||||||
|
sha256: 5f696cf0d0f3d347c4a279eee8270e5918554cdac0ed1f632f2619e4e8341407
|
||||||
|
uri: huggingface://mradermacher/jina-reranker-v1-tiny-en-GGUF/jina-reranker-v1-tiny-en.f16.gguf
|
||||||
|
|
||||||
usage: |
|
usage: |
|
||||||
You can test this model with curl like this:
|
You can test this model with curl like this:
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ name: tts-1
|
|||||||
download_files:
|
download_files:
|
||||||
- filename: voice-en-us-amy-low.tar.gz
|
- filename: voice-en-us-amy-low.tar.gz
|
||||||
uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-en-us-amy-low.tar.gz
|
uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-en-us-amy-low.tar.gz
|
||||||
|
backend: piper
|
||||||
parameters:
|
parameters:
|
||||||
model: en-us-amy-low.onnx
|
model: en-us-amy-low.onnx
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
context_size: 4096
|
context_size: 4096
|
||||||
f16: true
|
f16: true
|
||||||
|
backend: llama-cpp
|
||||||
function:
|
function:
|
||||||
capture_llm_results:
|
capture_llm_results:
|
||||||
- (?s)<Thought>(.*?)</Thought>
|
- (?s)<Thought>(.*?)</Thought>
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
context_size: 4096
|
context_size: 4096
|
||||||
|
backend: llama-cpp
|
||||||
f16: true
|
f16: true
|
||||||
mmap: true
|
mmap: true
|
||||||
mmproj: minicpm-v-2_6-mmproj-f16.gguf
|
mmproj: minicpm-v-2_6-mmproj-f16.gguf
|
||||||
|
|||||||
15
assets.go
15
assets.go
@@ -1,15 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
rice "github.com/GeertJohan/go.rice"
|
|
||||||
)
|
|
||||||
|
|
||||||
var backendAssets *rice.Box
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
var err error
|
|
||||||
backendAssets, err = rice.FindBox("backend-assets")
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
131
backend/Dockerfile.golang
Normal file
131
backend/Dockerfile.golang
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
ARG BASE_IMAGE=ubuntu:22.04
|
||||||
|
|
||||||
|
FROM ${BASE_IMAGE} AS builder
|
||||||
|
ARG BACKEND=rerankers
|
||||||
|
ARG BUILD_TYPE
|
||||||
|
ENV BUILD_TYPE=${BUILD_TYPE}
|
||||||
|
ARG CUDA_MAJOR_VERSION
|
||||||
|
ARG CUDA_MINOR_VERSION
|
||||||
|
ARG SKIP_DRIVERS=false
|
||||||
|
ENV CUDA_MAJOR_VERSION=${CUDA_MAJOR_VERSION}
|
||||||
|
ENV CUDA_MINOR_VERSION=${CUDA_MINOR_VERSION}
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
ARG TARGETARCH
|
||||||
|
ARG TARGETVARIANT
|
||||||
|
ARG GO_VERSION=1.22.6
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
build-essential \
|
||||||
|
git ccache \
|
||||||
|
ca-certificates \
|
||||||
|
make cmake \
|
||||||
|
curl unzip \
|
||||||
|
libssl-dev && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
|
||||||
|
# Cuda
|
||||||
|
ENV PATH=/usr/local/cuda/bin:${PATH}
|
||||||
|
|
||||||
|
# HipBLAS requirements
|
||||||
|
ENV PATH=/opt/rocm/bin:${PATH}
|
||||||
|
|
||||||
|
# Vulkan requirements
|
||||||
|
RUN <<EOT bash
|
||||||
|
if [ "${BUILD_TYPE}" = "vulkan" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
software-properties-common pciutils wget gpg-agent && \
|
||||||
|
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
|
||||||
|
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install -y \
|
||||||
|
vulkan-sdk && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
fi
|
||||||
|
EOT
|
||||||
|
|
||||||
|
# CuBLAS requirements
|
||||||
|
RUN <<EOT bash
|
||||||
|
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
software-properties-common pciutils
|
||||||
|
if [ "amd64" = "$TARGETARCH" ]; then
|
||||||
|
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb
|
||||||
|
fi
|
||||||
|
if [ "arm64" = "$TARGETARCH" ]; then
|
||||||
|
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/arm64/cuda-keyring_1.1-1_all.deb
|
||||||
|
fi
|
||||||
|
dpkg -i cuda-keyring_1.1-1_all.deb && \
|
||||||
|
rm -f cuda-keyring_1.1-1_all.deb && \
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
cuda-nvcc-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
|
||||||
|
libcufft-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
|
||||||
|
libcurand-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
|
||||||
|
libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
|
||||||
|
libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
|
||||||
|
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
fi
|
||||||
|
EOT
|
||||||
|
|
||||||
|
# If we are building with clblas support, we need the libraries for the builds
|
||||||
|
RUN if [ "${BUILD_TYPE}" = "clblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
libclblast-dev && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/* \
|
||||||
|
; fi
|
||||||
|
|
||||||
|
RUN if [ "${BUILD_TYPE}" = "hipblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
hipblas-dev \
|
||||||
|
rocblas-dev && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/* && \
|
||||||
|
# I have no idea why, but the ROCM lib packages don't trigger ldconfig after they install, which results in local-ai and others not being able
|
||||||
|
# to locate the libraries. We run ldconfig ourselves to work around this packaging deficiency
|
||||||
|
ldconfig \
|
||||||
|
; fi
|
||||||
|
|
||||||
|
# Install Go
|
||||||
|
RUN curl -L -s https://go.dev/dl/go${GO_VERSION}.linux-${TARGETARCH}.tar.gz | tar -C /usr/local -xz
|
||||||
|
ENV PATH=$PATH:/root/go/bin:/usr/local/go/bin:/usr/local/bin
|
||||||
|
|
||||||
|
# Install grpc compilers
|
||||||
|
RUN go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2 && \
|
||||||
|
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
|
||||||
|
RUN echo "TARGETARCH: $TARGETARCH"
|
||||||
|
|
||||||
|
# We need protoc installed, and the version in 22.04 is too old. We will create one as part installing the GRPC build below
|
||||||
|
# but that will also being in a newer version of absl which stablediffusion cannot compile with. This version of protoc is only
|
||||||
|
# here so that we can generate the grpc code for the stablediffusion build
|
||||||
|
RUN <<EOT bash
|
||||||
|
if [ "amd64" = "$TARGETARCH" ]; then
|
||||||
|
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v27.1/protoc-27.1-linux-x86_64.zip -o protoc.zip && \
|
||||||
|
unzip -j -d /usr/local/bin protoc.zip bin/protoc && \
|
||||||
|
rm protoc.zip
|
||||||
|
fi
|
||||||
|
if [ "arm64" = "$TARGETARCH" ]; then
|
||||||
|
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v27.1/protoc-27.1-linux-aarch_64.zip -o protoc.zip && \
|
||||||
|
unzip -j -d /usr/local/bin protoc.zip bin/protoc && \
|
||||||
|
rm protoc.zip
|
||||||
|
fi
|
||||||
|
EOT
|
||||||
|
|
||||||
|
COPY . /LocalAI
|
||||||
|
|
||||||
|
RUN cd /LocalAI && make protogen-go && make -C /LocalAI/backend/go/${BACKEND} build
|
||||||
|
|
||||||
|
FROM scratch
|
||||||
|
ARG BACKEND=rerankers
|
||||||
|
|
||||||
|
COPY --from=builder /LocalAI/backend/go/${BACKEND}/package/. ./
|
||||||
207
backend/Dockerfile.llama-cpp
Normal file
207
backend/Dockerfile.llama-cpp
Normal file
@@ -0,0 +1,207 @@
|
|||||||
|
ARG BASE_IMAGE=ubuntu:22.04
|
||||||
|
ARG GRPC_BASE_IMAGE=${BASE_IMAGE}
|
||||||
|
|
||||||
|
|
||||||
|
# The grpc target does one thing, it builds and installs GRPC. This is in it's own layer so that it can be effectively cached by CI.
|
||||||
|
# You probably don't need to change anything here, and if you do, make sure that CI is adjusted so that the cache continues to work.
|
||||||
|
FROM ${GRPC_BASE_IMAGE} AS grpc
|
||||||
|
|
||||||
|
# This is a bit of a hack, but it's required in order to be able to effectively cache this layer in CI
|
||||||
|
ARG GRPC_MAKEFLAGS="-j4 -Otarget"
|
||||||
|
ARG GRPC_VERSION=v1.65.0
|
||||||
|
ARG CMAKE_FROM_SOURCE=false
|
||||||
|
ARG CMAKE_VERSION=3.26.4
|
||||||
|
|
||||||
|
ENV MAKEFLAGS=${GRPC_MAKEFLAGS}
|
||||||
|
|
||||||
|
WORKDIR /build
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
ca-certificates \
|
||||||
|
build-essential curl libssl-dev \
|
||||||
|
git && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Install CMake (the version in 22.04 is too old)
|
||||||
|
RUN <<EOT bash
|
||||||
|
if [ "${CMAKE_FROM_SOURCE}}" = "true" ]; then
|
||||||
|
curl -L -s https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz -o cmake.tar.gz && tar xvf cmake.tar.gz && cd cmake-${CMAKE_VERSION} && ./configure && make && make install
|
||||||
|
else
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install -y \
|
||||||
|
cmake && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
fi
|
||||||
|
EOT
|
||||||
|
|
||||||
|
# We install GRPC to a different prefix here so that we can copy in only the build artifacts later
|
||||||
|
# saves several hundred MB on the final docker image size vs copying in the entire GRPC source tree
|
||||||
|
# and running make install in the target container
|
||||||
|
RUN git clone --recurse-submodules --jobs 4 -b ${GRPC_VERSION} --depth 1 --shallow-submodules https://github.com/grpc/grpc && \
|
||||||
|
mkdir -p /build/grpc/cmake/build && \
|
||||||
|
cd /build/grpc/cmake/build && \
|
||||||
|
sed -i "216i\ TESTONLY" "../../third_party/abseil-cpp/absl/container/CMakeLists.txt" && \
|
||||||
|
cmake -DgRPC_INSTALL=ON -DgRPC_BUILD_TESTS=OFF -DCMAKE_INSTALL_PREFIX:PATH=/opt/grpc ../.. && \
|
||||||
|
make && \
|
||||||
|
make install && \
|
||||||
|
rm -rf /build
|
||||||
|
|
||||||
|
FROM ${BASE_IMAGE} AS builder
|
||||||
|
ARG BACKEND=rerankers
|
||||||
|
ARG BUILD_TYPE
|
||||||
|
ENV BUILD_TYPE=${BUILD_TYPE}
|
||||||
|
ARG CUDA_MAJOR_VERSION
|
||||||
|
ARG CUDA_MINOR_VERSION
|
||||||
|
ARG SKIP_DRIVERS=false
|
||||||
|
ENV CUDA_MAJOR_VERSION=${CUDA_MAJOR_VERSION}
|
||||||
|
ENV CUDA_MINOR_VERSION=${CUDA_MINOR_VERSION}
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
ARG TARGETARCH
|
||||||
|
ARG TARGETVARIANT
|
||||||
|
ARG GO_VERSION=1.22.6
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
build-essential \
|
||||||
|
ccache git \
|
||||||
|
ca-certificates \
|
||||||
|
make \
|
||||||
|
curl unzip \
|
||||||
|
libssl-dev && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Cuda
|
||||||
|
ENV PATH=/usr/local/cuda/bin:${PATH}
|
||||||
|
|
||||||
|
# HipBLAS requirements
|
||||||
|
ENV PATH=/opt/rocm/bin:${PATH}
|
||||||
|
|
||||||
|
# Vulkan requirements
|
||||||
|
RUN <<EOT bash
|
||||||
|
if [ "${BUILD_TYPE}" = "vulkan" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
software-properties-common pciutils wget gpg-agent && \
|
||||||
|
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
|
||||||
|
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install -y \
|
||||||
|
vulkan-sdk && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
fi
|
||||||
|
EOT
|
||||||
|
|
||||||
|
# CuBLAS requirements
|
||||||
|
RUN <<EOT bash
|
||||||
|
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
software-properties-common pciutils
|
||||||
|
if [ "amd64" = "$TARGETARCH" ]; then
|
||||||
|
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb
|
||||||
|
fi
|
||||||
|
if [ "arm64" = "$TARGETARCH" ]; then
|
||||||
|
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/arm64/cuda-keyring_1.1-1_all.deb
|
||||||
|
fi
|
||||||
|
dpkg -i cuda-keyring_1.1-1_all.deb && \
|
||||||
|
rm -f cuda-keyring_1.1-1_all.deb && \
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
cuda-nvcc-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
|
||||||
|
libcufft-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
|
||||||
|
libcurand-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
|
||||||
|
libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
|
||||||
|
libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
|
||||||
|
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
fi
|
||||||
|
EOT
|
||||||
|
|
||||||
|
# If we are building with clblas support, we need the libraries for the builds
|
||||||
|
RUN if [ "${BUILD_TYPE}" = "clblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
libclblast-dev && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/* \
|
||||||
|
; fi
|
||||||
|
|
||||||
|
RUN if [ "${BUILD_TYPE}" = "hipblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
hipblas-dev \
|
||||||
|
rocblas-dev && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/* && \
|
||||||
|
# I have no idea why, but the ROCM lib packages don't trigger ldconfig after they install, which results in local-ai and others not being able
|
||||||
|
# to locate the libraries. We run ldconfig ourselves to work around this packaging deficiency
|
||||||
|
ldconfig \
|
||||||
|
; fi
|
||||||
|
|
||||||
|
RUN echo "TARGETARCH: $TARGETARCH"
|
||||||
|
|
||||||
|
# We need protoc installed, and the version in 22.04 is too old. We will create one as part installing the GRPC build below
|
||||||
|
# but that will also being in a newer version of absl which stablediffusion cannot compile with. This version of protoc is only
|
||||||
|
# here so that we can generate the grpc code for the stablediffusion build
|
||||||
|
RUN <<EOT bash
|
||||||
|
if [ "amd64" = "$TARGETARCH" ]; then
|
||||||
|
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v27.1/protoc-27.1-linux-x86_64.zip -o protoc.zip && \
|
||||||
|
unzip -j -d /usr/local/bin protoc.zip bin/protoc && \
|
||||||
|
rm protoc.zip
|
||||||
|
fi
|
||||||
|
if [ "arm64" = "$TARGETARCH" ]; then
|
||||||
|
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v27.1/protoc-27.1-linux-aarch_64.zip -o protoc.zip && \
|
||||||
|
unzip -j -d /usr/local/bin protoc.zip bin/protoc && \
|
||||||
|
rm protoc.zip
|
||||||
|
fi
|
||||||
|
EOT
|
||||||
|
|
||||||
|
# Install CMake (the version in 22.04 is too old)
|
||||||
|
RUN <<EOT bash
|
||||||
|
if [ "${CMAKE_FROM_SOURCE}}" = "true" ]; then
|
||||||
|
curl -L -s https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz -o cmake.tar.gz && tar xvf cmake.tar.gz && cd cmake-${CMAKE_VERSION} && ./configure && make && make install
|
||||||
|
else
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install -y \
|
||||||
|
cmake && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
fi
|
||||||
|
EOT
|
||||||
|
|
||||||
|
COPY --from=grpc /opt/grpc /usr/local
|
||||||
|
|
||||||
|
|
||||||
|
COPY . /LocalAI
|
||||||
|
|
||||||
|
## Otherwise just run the normal build
|
||||||
|
RUN <<EOT bash
|
||||||
|
if [ "${TARGETARCH}" = "arm64" ] || [ "${BUILD_TYPE}" = "hipblas" ]; then \
|
||||||
|
cd /LocalAI/backend/cpp/llama-cpp && make llama-cpp-fallback && \
|
||||||
|
make llama-cpp-grpc && make llama-cpp-rpc-server; \
|
||||||
|
else \
|
||||||
|
cd /LocalAI/backend/cpp/llama-cpp && make llama-cpp-avx && \
|
||||||
|
make llama-cpp-avx2 && \
|
||||||
|
make llama-cpp-avx512 && \
|
||||||
|
make llama-cpp-fallback && \
|
||||||
|
make llama-cpp-grpc && \
|
||||||
|
make llama-cpp-rpc-server; \
|
||||||
|
fi
|
||||||
|
EOT
|
||||||
|
|
||||||
|
|
||||||
|
# Copy libraries using a script to handle architecture differences
|
||||||
|
RUN make -C /LocalAI/backend/cpp/llama-cpp package
|
||||||
|
|
||||||
|
|
||||||
|
FROM scratch
|
||||||
|
|
||||||
|
|
||||||
|
# Copy all available binaries (the build process only creates the appropriate ones for the target architecture)
|
||||||
|
COPY --from=builder /LocalAI/backend/cpp/llama-cpp/package/. ./
|
||||||
123
backend/Dockerfile.python
Normal file
123
backend/Dockerfile.python
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
ARG BASE_IMAGE=ubuntu:22.04
|
||||||
|
|
||||||
|
FROM ${BASE_IMAGE} AS builder
|
||||||
|
ARG BACKEND=rerankers
|
||||||
|
ARG BUILD_TYPE
|
||||||
|
ENV BUILD_TYPE=${BUILD_TYPE}
|
||||||
|
ARG CUDA_MAJOR_VERSION
|
||||||
|
ARG CUDA_MINOR_VERSION
|
||||||
|
ARG SKIP_DRIVERS=false
|
||||||
|
ENV CUDA_MAJOR_VERSION=${CUDA_MAJOR_VERSION}
|
||||||
|
ENV CUDA_MINOR_VERSION=${CUDA_MINOR_VERSION}
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
ARG TARGETARCH
|
||||||
|
ARG TARGETVARIANT
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
build-essential \
|
||||||
|
ccache \
|
||||||
|
ca-certificates \
|
||||||
|
espeak-ng \
|
||||||
|
curl \
|
||||||
|
libssl-dev \
|
||||||
|
git \
|
||||||
|
git-lfs \
|
||||||
|
unzip clang \
|
||||||
|
upx-ucl \
|
||||||
|
curl python3-pip \
|
||||||
|
python-is-python3 \
|
||||||
|
python3-dev llvm \
|
||||||
|
python3-venv make && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/* && \
|
||||||
|
pip install --upgrade pip
|
||||||
|
|
||||||
|
|
||||||
|
# Cuda
|
||||||
|
ENV PATH=/usr/local/cuda/bin:${PATH}
|
||||||
|
|
||||||
|
# HipBLAS requirements
|
||||||
|
ENV PATH=/opt/rocm/bin:${PATH}
|
||||||
|
|
||||||
|
# Vulkan requirements
|
||||||
|
RUN <<EOT bash
|
||||||
|
if [ "${BUILD_TYPE}" = "vulkan" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
software-properties-common pciutils wget gpg-agent && \
|
||||||
|
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
|
||||||
|
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install -y \
|
||||||
|
vulkan-sdk && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
fi
|
||||||
|
EOT
|
||||||
|
|
||||||
|
# CuBLAS requirements
|
||||||
|
RUN <<EOT bash
|
||||||
|
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
software-properties-common pciutils
|
||||||
|
if [ "amd64" = "$TARGETARCH" ]; then
|
||||||
|
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb
|
||||||
|
fi
|
||||||
|
if [ "arm64" = "$TARGETARCH" ]; then
|
||||||
|
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/arm64/cuda-keyring_1.1-1_all.deb
|
||||||
|
fi
|
||||||
|
dpkg -i cuda-keyring_1.1-1_all.deb && \
|
||||||
|
rm -f cuda-keyring_1.1-1_all.deb && \
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
cuda-nvcc-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
|
||||||
|
libcufft-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
|
||||||
|
libcurand-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
|
||||||
|
libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
|
||||||
|
libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
|
||||||
|
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
fi
|
||||||
|
EOT
|
||||||
|
|
||||||
|
# If we are building with clblas support, we need the libraries for the builds
|
||||||
|
RUN if [ "${BUILD_TYPE}" = "clblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
libclblast-dev && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/* \
|
||||||
|
; fi
|
||||||
|
|
||||||
|
RUN if [ "${BUILD_TYPE}" = "hipblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
|
||||||
|
apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
hipblas-dev \
|
||||||
|
rocblas-dev && \
|
||||||
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/* && \
|
||||||
|
# I have no idea why, but the ROCM lib packages don't trigger ldconfig after they install, which results in local-ai and others not being able
|
||||||
|
# to locate the libraries. We run ldconfig ourselves to work around this packaging deficiency
|
||||||
|
ldconfig \
|
||||||
|
; fi
|
||||||
|
# Install uv as a system package
|
||||||
|
RUN curl -LsSf https://astral.sh/uv/install.sh | UV_INSTALL_DIR=/usr/bin sh
|
||||||
|
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||||
|
|
||||||
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||||
|
|
||||||
|
# Install grpcio-tools (the version in 22.04 is too old)
|
||||||
|
RUN pip install --user grpcio-tools==1.71.0 grpcio==1.71.0
|
||||||
|
|
||||||
|
COPY python/${BACKEND} /${BACKEND}
|
||||||
|
COPY backend.proto /${BACKEND}/backend.proto
|
||||||
|
COPY python/common/ /${BACKEND}/common
|
||||||
|
|
||||||
|
RUN cd /${BACKEND} && PORTABLE_PYTHON=true make
|
||||||
|
|
||||||
|
FROM scratch
|
||||||
|
ARG BACKEND=rerankers
|
||||||
|
COPY --from=builder /${BACKEND}/ /
|
||||||
213
backend/README.md
Normal file
213
backend/README.md
Normal file
@@ -0,0 +1,213 @@
|
|||||||
|
# LocalAI Backend Architecture
|
||||||
|
|
||||||
|
This directory contains the core backend infrastructure for LocalAI, including the gRPC protocol definition, multi-language Dockerfiles, and language-specific backend implementations.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
LocalAI uses a unified gRPC-based architecture that allows different programming languages to implement AI backends while maintaining consistent interfaces and capabilities. The backend system supports multiple hardware acceleration targets and provides a standardized way to integrate various AI models and frameworks.
|
||||||
|
|
||||||
|
## Architecture Components
|
||||||
|
|
||||||
|
### 1. Protocol Definition (`backend.proto`)
|
||||||
|
|
||||||
|
The `backend.proto` file defines the gRPC service interface that all backends must implement. This ensures consistency across different language implementations and provides a contract for communication between LocalAI core and backend services.
|
||||||
|
|
||||||
|
#### Core Services
|
||||||
|
|
||||||
|
- **Text Generation**: `Predict`, `PredictStream` for LLM inference
|
||||||
|
- **Embeddings**: `Embedding` for text vectorization
|
||||||
|
- **Image Generation**: `GenerateImage` for stable diffusion and image models
|
||||||
|
- **Audio Processing**: `AudioTranscription`, `TTS`, `SoundGeneration`
|
||||||
|
- **Video Generation**: `GenerateVideo` for video synthesis
|
||||||
|
- **Object Detection**: `Detect` for computer vision tasks
|
||||||
|
- **Vector Storage**: `StoresSet`, `StoresGet`, `StoresFind` for RAG operations
|
||||||
|
- **Reranking**: `Rerank` for document relevance scoring
|
||||||
|
- **Voice Activity Detection**: `VAD` for audio segmentation
|
||||||
|
|
||||||
|
#### Key Message Types
|
||||||
|
|
||||||
|
- **`PredictOptions`**: Comprehensive configuration for text generation
|
||||||
|
- **`ModelOptions`**: Model loading and configuration parameters
|
||||||
|
- **`Result`**: Standardized response format
|
||||||
|
- **`StatusResponse`**: Backend health and memory usage information
|
||||||
|
|
||||||
|
### 2. Multi-Language Dockerfiles
|
||||||
|
|
||||||
|
The backend system provides language-specific Dockerfiles that handle the build environment and dependencies for different programming languages:
|
||||||
|
|
||||||
|
- `Dockerfile.python`
|
||||||
|
- `Dockerfile.golang`
|
||||||
|
- `Dockerfile.llama-cpp`
|
||||||
|
|
||||||
|
### 3. Language-Specific Implementations
|
||||||
|
|
||||||
|
#### Python Backends (`python/`)
|
||||||
|
- **transformers**: Hugging Face Transformers framework
|
||||||
|
- **vllm**: High-performance LLM inference
|
||||||
|
- **mlx**: Apple Silicon optimization
|
||||||
|
- **diffusers**: Stable Diffusion models
|
||||||
|
- **Audio**: bark, coqui, faster-whisper, kitten-tts
|
||||||
|
- **Vision**: mlx-vlm, rfdetr
|
||||||
|
- **Specialized**: rerankers, chatterbox, kokoro
|
||||||
|
|
||||||
|
#### Go Backends (`go/`)
|
||||||
|
- **whisper**: OpenAI Whisper speech recognition in Go with GGML cpp backend (whisper.cpp)
|
||||||
|
- **stablediffusion-ggml**: Stable Diffusion in Go with GGML Cpp backend
|
||||||
|
- **huggingface**: Hugging Face model integration
|
||||||
|
- **piper**: Text-to-speech synthesis Golang with C bindings using rhaspy/piper
|
||||||
|
- **bark-cpp**: Bark TTS models Golang with Cpp bindings
|
||||||
|
- **local-store**: Vector storage backend
|
||||||
|
|
||||||
|
#### C++ Backends (`cpp/`)
|
||||||
|
- **llama-cpp**: Llama.cpp integration
|
||||||
|
- **grpc**: GRPC utilities and helpers
|
||||||
|
|
||||||
|
## Hardware Acceleration Support
|
||||||
|
|
||||||
|
### CUDA (NVIDIA)
|
||||||
|
- **Versions**: CUDA 11.x, 12.x
|
||||||
|
- **Features**: cuBLAS, cuDNN, TensorRT optimization
|
||||||
|
- **Targets**: x86_64, ARM64 (Jetson)
|
||||||
|
|
||||||
|
### ROCm (AMD)
|
||||||
|
- **Features**: HIP, rocBLAS, MIOpen
|
||||||
|
- **Targets**: AMD GPUs with ROCm support
|
||||||
|
|
||||||
|
### Intel
|
||||||
|
- **Features**: oneAPI, Intel Extension for PyTorch
|
||||||
|
- **Targets**: Intel GPUs, XPUs, CPUs
|
||||||
|
|
||||||
|
### Vulkan
|
||||||
|
- **Features**: Cross-platform GPU acceleration
|
||||||
|
- **Targets**: Windows, Linux, Android, macOS
|
||||||
|
|
||||||
|
### Apple Silicon
|
||||||
|
- **Features**: MLX framework, Metal Performance Shaders
|
||||||
|
- **Targets**: M1/M2/M3 Macs
|
||||||
|
|
||||||
|
## Backend Registry (`index.yaml`)
|
||||||
|
|
||||||
|
The `index.yaml` file serves as a central registry for all available backends, providing:
|
||||||
|
|
||||||
|
- **Metadata**: Name, description, license, icons
|
||||||
|
- **Capabilities**: Hardware targets and optimization profiles
|
||||||
|
- **Tags**: Categorization for discovery
|
||||||
|
- **URLs**: Source code and documentation links
|
||||||
|
|
||||||
|
## Building Backends
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
- Docker with multi-architecture support
|
||||||
|
- Appropriate hardware drivers (CUDA, ROCm, etc.)
|
||||||
|
- Build tools (make, cmake, compilers)
|
||||||
|
|
||||||
|
### Build Commands
|
||||||
|
|
||||||
|
Example of build commands with Docker
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build Python backend
|
||||||
|
docker build -f backend/Dockerfile.python \
|
||||||
|
--build-arg BACKEND=transformers \
|
||||||
|
--build-arg BUILD_TYPE=cublas12 \
|
||||||
|
--build-arg CUDA_MAJOR_VERSION=12 \
|
||||||
|
--build-arg CUDA_MINOR_VERSION=8 \
|
||||||
|
-t localai-backend-transformers .
|
||||||
|
|
||||||
|
# Build Go backend
|
||||||
|
docker build -f backend/Dockerfile.golang \
|
||||||
|
--build-arg BACKEND=whisper \
|
||||||
|
--build-arg BUILD_TYPE=cpu \
|
||||||
|
-t localai-backend-whisper .
|
||||||
|
|
||||||
|
# Build C++ backend
|
||||||
|
docker build -f backend/Dockerfile.llama-cpp \
|
||||||
|
--build-arg BACKEND=llama-cpp \
|
||||||
|
--build-arg BUILD_TYPE=cublas12 \
|
||||||
|
-t localai-backend-llama-cpp .
|
||||||
|
```
|
||||||
|
|
||||||
|
For ARM64/Mac builds, docker can't be used, and the makefile in the respective backend has to be used.
|
||||||
|
|
||||||
|
### Build Types
|
||||||
|
|
||||||
|
- **`cpu`**: CPU-only optimization
|
||||||
|
- **`cublas11`**: CUDA 11.x with cuBLAS
|
||||||
|
- **`cublas12`**: CUDA 12.x with cuBLAS
|
||||||
|
- **`hipblas`**: ROCm with rocBLAS
|
||||||
|
- **`intel`**: Intel oneAPI optimization
|
||||||
|
- **`vulkan`**: Vulkan-based acceleration
|
||||||
|
- **`metal`**: Apple Metal optimization
|
||||||
|
|
||||||
|
## Backend Development
|
||||||
|
|
||||||
|
### Creating a New Backend
|
||||||
|
|
||||||
|
1. **Choose Language**: Select Python, Go, or C++ based on requirements
|
||||||
|
2. **Implement Interface**: Implement the gRPC service defined in `backend.proto`
|
||||||
|
3. **Add Dependencies**: Create appropriate requirements files
|
||||||
|
4. **Configure Build**: Set up Dockerfile and build scripts
|
||||||
|
5. **Register Backend**: Add entry to `index.yaml`
|
||||||
|
6. **Test Integration**: Verify gRPC communication and functionality
|
||||||
|
|
||||||
|
### Backend Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
backend-name/
|
||||||
|
├── backend.py/go/cpp # Main implementation
|
||||||
|
├── requirements.txt # Dependencies
|
||||||
|
├── Dockerfile # Build configuration
|
||||||
|
├── install.sh # Installation script
|
||||||
|
├── run.sh # Execution script
|
||||||
|
├── test.sh # Test script
|
||||||
|
└── README.md # Backend documentation
|
||||||
|
```
|
||||||
|
|
||||||
|
### Required gRPC Methods
|
||||||
|
|
||||||
|
At minimum, backends must implement:
|
||||||
|
- `Health()` - Service health check
|
||||||
|
- `LoadModel()` - Model loading and initialization
|
||||||
|
- `Predict()` - Main inference endpoint
|
||||||
|
- `Status()` - Backend status and metrics
|
||||||
|
|
||||||
|
## Integration with LocalAI Core
|
||||||
|
|
||||||
|
Backends communicate with LocalAI core through gRPC:
|
||||||
|
|
||||||
|
1. **Service Discovery**: Core discovers available backends
|
||||||
|
2. **Model Loading**: Core requests model loading via `LoadModel`
|
||||||
|
3. **Inference**: Core sends requests via `Predict` or specialized endpoints
|
||||||
|
4. **Streaming**: Core handles streaming responses for real-time generation
|
||||||
|
5. **Monitoring**: Core tracks backend health and performance
|
||||||
|
|
||||||
|
## Performance Optimization
|
||||||
|
|
||||||
|
### Memory Management
|
||||||
|
- **Model Caching**: Efficient model loading and caching
|
||||||
|
- **Batch Processing**: Optimize for multiple concurrent requests
|
||||||
|
- **Memory Pinning**: GPU memory optimization for CUDA/ROCm
|
||||||
|
|
||||||
|
### Hardware Utilization
|
||||||
|
- **Multi-GPU**: Support for tensor parallelism
|
||||||
|
- **Mixed Precision**: FP16/BF16 for memory efficiency
|
||||||
|
- **Kernel Fusion**: Optimized CUDA/ROCm kernels
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Common Issues
|
||||||
|
|
||||||
|
1. **GRPC Connection**: Verify backend service is running and accessible
|
||||||
|
2. **Model Loading**: Check model paths and dependencies
|
||||||
|
3. **Hardware Detection**: Ensure appropriate drivers and libraries
|
||||||
|
4. **Memory Issues**: Monitor GPU memory usage and model sizes
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
When contributing to the backend system:
|
||||||
|
|
||||||
|
1. **Follow Protocol**: Implement the exact gRPC interface
|
||||||
|
2. **Add Tests**: Include comprehensive test coverage
|
||||||
|
3. **Document**: Provide clear usage examples
|
||||||
|
4. **Optimize**: Consider performance and resource usage
|
||||||
|
5. **Validate**: Test across different hardware targets
|
||||||
@@ -20,6 +20,7 @@ service Backend {
|
|||||||
rpc SoundGeneration(SoundGenerationRequest) returns (Result) {}
|
rpc SoundGeneration(SoundGenerationRequest) returns (Result) {}
|
||||||
rpc TokenizeString(PredictOptions) returns (TokenizationResponse) {}
|
rpc TokenizeString(PredictOptions) returns (TokenizationResponse) {}
|
||||||
rpc Status(HealthMessage) returns (StatusResponse) {}
|
rpc Status(HealthMessage) returns (StatusResponse) {}
|
||||||
|
rpc Detect(DetectOptions) returns (DetectResponse) {}
|
||||||
|
|
||||||
rpc StoresSet(StoresSetOptions) returns (Result) {}
|
rpc StoresSet(StoresSetOptions) returns (Result) {}
|
||||||
rpc StoresDelete(StoresDeleteOptions) returns (Result) {}
|
rpc StoresDelete(StoresDeleteOptions) returns (Result) {}
|
||||||
@@ -162,6 +163,7 @@ message Reply {
|
|||||||
int32 prompt_tokens = 3;
|
int32 prompt_tokens = 3;
|
||||||
double timing_prompt_processing = 4;
|
double timing_prompt_processing = 4;
|
||||||
double timing_token_generation = 5;
|
double timing_token_generation = 5;
|
||||||
|
bytes audio = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GrammarTrigger {
|
message GrammarTrigger {
|
||||||
@@ -184,7 +186,6 @@ message ModelOptions {
|
|||||||
string MainGPU = 13;
|
string MainGPU = 13;
|
||||||
string TensorSplit = 14;
|
string TensorSplit = 14;
|
||||||
int32 Threads = 15;
|
int32 Threads = 15;
|
||||||
string LibrarySearchPath = 16;
|
|
||||||
float RopeFreqBase = 17;
|
float RopeFreqBase = 17;
|
||||||
float RopeFreqScale = 18;
|
float RopeFreqScale = 18;
|
||||||
float RMSNormEps = 19;
|
float RMSNormEps = 19;
|
||||||
@@ -241,7 +242,7 @@ message ModelOptions {
|
|||||||
|
|
||||||
string Type = 49;
|
string Type = 49;
|
||||||
|
|
||||||
bool FlashAttention = 56;
|
string FlashAttention = 56;
|
||||||
bool NoKVOffload = 57;
|
bool NoKVOffload = 57;
|
||||||
|
|
||||||
string ModelPath = 59;
|
string ModelPath = 59;
|
||||||
@@ -255,6 +256,10 @@ message ModelOptions {
|
|||||||
string CacheTypeValue = 64;
|
string CacheTypeValue = 64;
|
||||||
|
|
||||||
repeated GrammarTrigger GrammarTriggers = 65;
|
repeated GrammarTrigger GrammarTriggers = 65;
|
||||||
|
|
||||||
|
bool Reranking = 71;
|
||||||
|
|
||||||
|
repeated string Overrides = 72;
|
||||||
}
|
}
|
||||||
|
|
||||||
message Result {
|
message Result {
|
||||||
@@ -271,6 +276,7 @@ message TranscriptRequest {
|
|||||||
string language = 3;
|
string language = 3;
|
||||||
uint32 threads = 4;
|
uint32 threads = 4;
|
||||||
bool translate = 5;
|
bool translate = 5;
|
||||||
|
bool diarize = 6;
|
||||||
}
|
}
|
||||||
|
|
||||||
message TranscriptResult {
|
message TranscriptResult {
|
||||||
@@ -300,19 +306,24 @@ message GenerateImageRequest {
|
|||||||
// Diffusers
|
// Diffusers
|
||||||
string EnableParameters = 10;
|
string EnableParameters = 10;
|
||||||
int32 CLIPSkip = 11;
|
int32 CLIPSkip = 11;
|
||||||
|
|
||||||
|
// Reference images for models that support them (e.g., Flux Kontext)
|
||||||
|
repeated string ref_images = 12;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GenerateVideoRequest {
|
message GenerateVideoRequest {
|
||||||
string prompt = 1;
|
string prompt = 1;
|
||||||
string start_image = 2; // Path or base64 encoded image for the start frame
|
string negative_prompt = 2; // Negative prompt for video generation
|
||||||
string end_image = 3; // Path or base64 encoded image for the end frame
|
string start_image = 3; // Path or base64 encoded image for the start frame
|
||||||
int32 width = 4;
|
string end_image = 4; // Path or base64 encoded image for the end frame
|
||||||
int32 height = 5;
|
int32 width = 5;
|
||||||
int32 num_frames = 6; // Number of frames to generate
|
int32 height = 6;
|
||||||
int32 fps = 7; // Frames per second
|
int32 num_frames = 7; // Number of frames to generate
|
||||||
int32 seed = 8;
|
int32 fps = 8; // Frames per second
|
||||||
float cfg_scale = 9; // Classifier-free guidance scale
|
int32 seed = 9;
|
||||||
string dst = 10; // Output path for the generated video
|
float cfg_scale = 10; // Classifier-free guidance scale
|
||||||
|
int32 step = 11; // Number of inference steps
|
||||||
|
string dst = 12; // Output path for the generated video
|
||||||
}
|
}
|
||||||
|
|
||||||
message TTSRequest {
|
message TTSRequest {
|
||||||
@@ -372,3 +383,20 @@ message Message {
|
|||||||
string role = 1;
|
string role = 1;
|
||||||
string content = 2;
|
string content = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message DetectOptions {
|
||||||
|
string src = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Detection {
|
||||||
|
float x = 1;
|
||||||
|
float y = 2;
|
||||||
|
float width = 3;
|
||||||
|
float height = 4;
|
||||||
|
float confidence = 5;
|
||||||
|
string class_name = 6;
|
||||||
|
}
|
||||||
|
|
||||||
|
message DetectResponse {
|
||||||
|
repeated Detection Detections = 1;
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,20 +1,3 @@
|
|||||||
|
|
||||||
## XXX: In some versions of CMake clip wasn't being built before llama.
|
|
||||||
## This is an hack for now, but it should be fixed in the future.
|
|
||||||
# set(TARGET myclip)
|
|
||||||
# add_library(${TARGET} clip.cpp clip.h clip-impl.h llava.cpp llava.h)
|
|
||||||
# install(TARGETS ${TARGET} LIBRARY)
|
|
||||||
# target_include_directories(myclip PUBLIC .)
|
|
||||||
# target_include_directories(myclip PUBLIC ../..)
|
|
||||||
# target_include_directories(myclip PUBLIC ../../common)
|
|
||||||
# target_link_libraries(${TARGET} PRIVATE common ggml llama ${CMAKE_THREAD_LIBS_INIT})
|
|
||||||
# target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
|
||||||
# if (NOT MSVC)
|
|
||||||
# target_compile_options(${TARGET} PRIVATE -Wno-cast-qual) # stb_image.h
|
|
||||||
# endif()
|
|
||||||
# END CLIP hack
|
|
||||||
|
|
||||||
|
|
||||||
set(TARGET grpc-server)
|
set(TARGET grpc-server)
|
||||||
set(CMAKE_CXX_STANDARD 17)
|
set(CMAKE_CXX_STANDARD 17)
|
||||||
cmake_minimum_required(VERSION 3.15)
|
cmake_minimum_required(VERSION 3.15)
|
||||||
@@ -74,7 +57,7 @@ add_library(hw_grpc_proto
|
|||||||
${hw_proto_srcs}
|
${hw_proto_srcs}
|
||||||
${hw_proto_hdrs} )
|
${hw_proto_hdrs} )
|
||||||
|
|
||||||
add_executable(${TARGET} grpc-server.cpp utils.hpp json.hpp)
|
add_executable(${TARGET} grpc-server.cpp utils.hpp json.hpp httplib.h)
|
||||||
|
|
||||||
target_include_directories(${TARGET} PRIVATE ../llava)
|
target_include_directories(${TARGET} PRIVATE ../llava)
|
||||||
target_include_directories(${TARGET} PRIVATE ${CMAKE_SOURCE_DIR})
|
target_include_directories(${TARGET} PRIVATE ${CMAKE_SOURCE_DIR})
|
||||||
166
backend/cpp/llama-cpp/Makefile
Normal file
166
backend/cpp/llama-cpp/Makefile
Normal file
@@ -0,0 +1,166 @@
|
|||||||
|
|
||||||
|
LLAMA_VERSION?=fe4eb4f8ec25a1239b0923f1c7f87adf5730c3e5
|
||||||
|
LLAMA_REPO?=https://github.com/JohannesGaessler/llama.cpp
|
||||||
|
|
||||||
|
CMAKE_ARGS?=
|
||||||
|
BUILD_TYPE?=
|
||||||
|
NATIVE?=false
|
||||||
|
ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh
|
||||||
|
TARGET?=--target grpc-server
|
||||||
|
JOBS?=$(shell nproc)
|
||||||
|
|
||||||
|
# Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
|
||||||
|
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF -DLLAMA_CURL=OFF
|
||||||
|
|
||||||
|
CURRENT_MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
|
||||||
|
ifeq ($(NATIVE),false)
|
||||||
|
CMAKE_ARGS+=-DGGML_NATIVE=OFF
|
||||||
|
endif
|
||||||
|
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
|
||||||
|
ifeq ($(BUILD_TYPE),cublas)
|
||||||
|
CMAKE_ARGS+=-DGGML_CUDA=ON
|
||||||
|
# If build type is openblas then we set -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
|
||||||
|
# to CMAKE_ARGS automatically
|
||||||
|
else ifeq ($(BUILD_TYPE),openblas)
|
||||||
|
CMAKE_ARGS+=-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
|
||||||
|
# If build type is clblas (openCL) we set -DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
|
||||||
|
else ifeq ($(BUILD_TYPE),clblas)
|
||||||
|
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
|
||||||
|
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
|
||||||
|
else ifeq ($(BUILD_TYPE),hipblas)
|
||||||
|
ROCM_HOME ?= /opt/rocm
|
||||||
|
ROCM_PATH ?= /opt/rocm
|
||||||
|
export CXX=$(ROCM_HOME)/llvm/bin/clang++
|
||||||
|
export CC=$(ROCM_HOME)/llvm/bin/clang
|
||||||
|
AMDGPU_TARGETS?=gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102,gfx1200,gfx1201
|
||||||
|
CMAKE_ARGS+=-DGGML_HIP=ON -DAMDGPU_TARGETS=$(AMDGPU_TARGETS)
|
||||||
|
else ifeq ($(BUILD_TYPE),vulkan)
|
||||||
|
CMAKE_ARGS+=-DGGML_VULKAN=1
|
||||||
|
else ifeq ($(OS),Darwin)
|
||||||
|
ifeq ($(BUILD_TYPE),)
|
||||||
|
BUILD_TYPE=metal
|
||||||
|
endif
|
||||||
|
ifneq ($(BUILD_TYPE),metal)
|
||||||
|
CMAKE_ARGS+=-DGGML_METAL=OFF
|
||||||
|
else
|
||||||
|
CMAKE_ARGS+=-DGGML_METAL=ON
|
||||||
|
CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON
|
||||||
|
CMAKE_ARGS+=-DGGML_METAL_USE_BF16=ON
|
||||||
|
CMAKE_ARGS+=-DGGML_OPENMP=OFF
|
||||||
|
endif
|
||||||
|
TARGET+=--target ggml-metal
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifeq ($(BUILD_TYPE),sycl_f16)
|
||||||
|
CMAKE_ARGS+=-DGGML_SYCL=ON \
|
||||||
|
-DCMAKE_C_COMPILER=icx \
|
||||||
|
-DCMAKE_CXX_COMPILER=icpx \
|
||||||
|
-DCMAKE_CXX_FLAGS="-fsycl" \
|
||||||
|
-DGGML_SYCL_F16=ON
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifeq ($(BUILD_TYPE),sycl_f32)
|
||||||
|
CMAKE_ARGS+=-DGGML_SYCL=ON \
|
||||||
|
-DCMAKE_C_COMPILER=icx \
|
||||||
|
-DCMAKE_CXX_COMPILER=icpx \
|
||||||
|
-DCMAKE_CXX_FLAGS="-fsycl"
|
||||||
|
endif
|
||||||
|
|
||||||
|
INSTALLED_PACKAGES=$(CURDIR)/../grpc/installed_packages
|
||||||
|
INSTALLED_LIB_CMAKE=$(INSTALLED_PACKAGES)/lib/cmake
|
||||||
|
ADDED_CMAKE_ARGS=-Dabsl_DIR=${INSTALLED_LIB_CMAKE}/absl \
|
||||||
|
-DProtobuf_DIR=${INSTALLED_LIB_CMAKE}/protobuf \
|
||||||
|
-Dutf8_range_DIR=${INSTALLED_LIB_CMAKE}/utf8_range \
|
||||||
|
-DgRPC_DIR=${INSTALLED_LIB_CMAKE}/grpc \
|
||||||
|
-DCMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES=${INSTALLED_PACKAGES}/include
|
||||||
|
build-llama-cpp-grpc-server:
|
||||||
|
# Conditionally build grpc for the llama backend to use if needed
|
||||||
|
ifdef BUILD_GRPC_FOR_BACKEND_LLAMA
|
||||||
|
$(MAKE) -C ../../grpc build
|
||||||
|
_PROTOBUF_PROTOC=${INSTALLED_PACKAGES}/bin/proto \
|
||||||
|
_GRPC_CPP_PLUGIN_EXECUTABLE=${INSTALLED_PACKAGES}/bin/grpc_cpp_plugin \
|
||||||
|
PATH="${INSTALLED_PACKAGES}/bin:${PATH}" \
|
||||||
|
CMAKE_ARGS="${CMAKE_ARGS} ${ADDED_CMAKE_ARGS}" \
|
||||||
|
LLAMA_VERSION=$(LLAMA_VERSION) \
|
||||||
|
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../$(VARIANT) grpc-server
|
||||||
|
else
|
||||||
|
echo "BUILD_GRPC_FOR_BACKEND_LLAMA is not defined."
|
||||||
|
LLAMA_VERSION=$(LLAMA_VERSION) $(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../$(VARIANT) grpc-server
|
||||||
|
endif
|
||||||
|
|
||||||
|
llama-cpp-avx2: llama.cpp
|
||||||
|
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx2-build
|
||||||
|
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx2-build purge
|
||||||
|
$(info ${GREEN}I llama-cpp build info:avx2${RESET})
|
||||||
|
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on" $(MAKE) VARIANT="llama-cpp-avx2-build" build-llama-cpp-grpc-server
|
||||||
|
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx2-build/grpc-server llama-cpp-avx2
|
||||||
|
|
||||||
|
llama-cpp-avx512: llama.cpp
|
||||||
|
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx512-build
|
||||||
|
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx512-build purge
|
||||||
|
$(info ${GREEN}I llama-cpp build info:avx512${RESET})
|
||||||
|
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=on -DGGML_FMA=on -DGGML_F16C=on" $(MAKE) VARIANT="llama-cpp-avx512-build" build-llama-cpp-grpc-server
|
||||||
|
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx512-build/grpc-server llama-cpp-avx512
|
||||||
|
|
||||||
|
llama-cpp-avx: llama.cpp
|
||||||
|
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build
|
||||||
|
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build purge
|
||||||
|
$(info ${GREEN}I llama-cpp build info:avx${RESET})
|
||||||
|
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE) VARIANT="llama-cpp-avx-build" build-llama-cpp-grpc-server
|
||||||
|
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build/grpc-server llama-cpp-avx
|
||||||
|
|
||||||
|
llama-cpp-fallback: llama.cpp
|
||||||
|
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build
|
||||||
|
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build purge
|
||||||
|
$(info ${GREEN}I llama-cpp build info:fallback${RESET})
|
||||||
|
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE) VARIANT="llama-cpp-fallback-build" build-llama-cpp-grpc-server
|
||||||
|
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build/grpc-server llama-cpp-fallback
|
||||||
|
|
||||||
|
llama-cpp-grpc: llama.cpp
|
||||||
|
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build
|
||||||
|
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build purge
|
||||||
|
$(info ${GREEN}I llama-cpp build info:grpc${RESET})
|
||||||
|
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_RPC=ON -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" TARGET="--target grpc-server --target rpc-server" $(MAKE) VARIANT="llama-cpp-grpc-build" build-llama-cpp-grpc-server
|
||||||
|
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build/grpc-server llama-cpp-grpc
|
||||||
|
|
||||||
|
llama-cpp-rpc-server: llama-cpp-grpc
|
||||||
|
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build/llama.cpp/build/bin/rpc-server llama-cpp-rpc-server
|
||||||
|
|
||||||
|
llama.cpp:
|
||||||
|
mkdir -p llama.cpp
|
||||||
|
cd llama.cpp && \
|
||||||
|
git init && \
|
||||||
|
git remote add origin $(LLAMA_REPO) && \
|
||||||
|
git fetch origin && \
|
||||||
|
git checkout -b build $(LLAMA_VERSION) && \
|
||||||
|
git submodule update --init --recursive --depth 1 --single-branch
|
||||||
|
|
||||||
|
llama.cpp/tools/grpc-server: llama.cpp
|
||||||
|
mkdir -p llama.cpp/tools/grpc-server
|
||||||
|
bash prepare.sh
|
||||||
|
|
||||||
|
rebuild:
|
||||||
|
bash prepare.sh
|
||||||
|
rm -rf grpc-server
|
||||||
|
$(MAKE) grpc-server
|
||||||
|
|
||||||
|
package:
|
||||||
|
bash package.sh
|
||||||
|
|
||||||
|
purge:
|
||||||
|
rm -rf llama.cpp/build
|
||||||
|
rm -rf llama.cpp/tools/grpc-server
|
||||||
|
rm -rf grpc-server
|
||||||
|
|
||||||
|
clean: purge
|
||||||
|
rm -rf llama.cpp
|
||||||
|
|
||||||
|
grpc-server: llama.cpp llama.cpp/tools/grpc-server
|
||||||
|
@echo "Building grpc-server with $(BUILD_TYPE) build type and $(CMAKE_ARGS)"
|
||||||
|
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
|
||||||
|
+bash -c "source $(ONEAPI_VARS); \
|
||||||
|
cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release -j $(JOBS) $(TARGET)"
|
||||||
|
else
|
||||||
|
+cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release -j $(JOBS) $(TARGET)
|
||||||
|
endif
|
||||||
|
cp llama.cpp/build/bin/grpc-server .
|
||||||
978
backend/cpp/llama-cpp/grpc-server.cpp
Normal file
978
backend/cpp/llama-cpp/grpc-server.cpp
Normal file
@@ -0,0 +1,978 @@
|
|||||||
|
// llama.cpp gRPC C++ backend server
|
||||||
|
//
|
||||||
|
// Ettore Di Giacinto <mudler@localai.io> and llama.cpp authors
|
||||||
|
//
|
||||||
|
// This is a gRPC server for llama.cpp compatible with the LocalAI proto
|
||||||
|
// Note: this is a re-adaptation of the original llama.cpp example/server.cpp for HTTP (https://github.com/ggerganov/llama.cpp/tree/master/examples/server),
|
||||||
|
// but modified to work with gRPC
|
||||||
|
//
|
||||||
|
|
||||||
|
#include "server.cpp"
|
||||||
|
// LocalAI
|
||||||
|
|
||||||
|
#include "backend.pb.h"
|
||||||
|
#include "backend.grpc.pb.h"
|
||||||
|
#include "common.h"
|
||||||
|
#include <getopt.h>
|
||||||
|
#include <grpcpp/ext/proto_server_reflection_plugin.h>
|
||||||
|
#include <grpcpp/grpcpp.h>
|
||||||
|
#include <grpcpp/health_check_service_interface.h>
|
||||||
|
#include <regex>
|
||||||
|
|
||||||
|
|
||||||
|
using grpc::Server;
|
||||||
|
using grpc::ServerBuilder;
|
||||||
|
using grpc::ServerContext;
|
||||||
|
using grpc::Status;
|
||||||
|
// END LocalAI
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/////////////////////////////////
|
||||||
|
////////////////////////////////
|
||||||
|
//////// LOCALAI code starts below here
|
||||||
|
/////////////////////////////////
|
||||||
|
////////////////////////////////
|
||||||
|
|
||||||
|
bool loaded_model; // TODO: add a mutex for this, but happens only once loading the model
|
||||||
|
|
||||||
|
static void start_llama_server(server_context& ctx_server) {
|
||||||
|
|
||||||
|
LOG_INF("%s: starting llama server\n", __func__);
|
||||||
|
|
||||||
|
LOG_INF("%s: waiting for model to be loaded\n", __func__);
|
||||||
|
// Wait for model to be loaded first
|
||||||
|
while (!loaded_model) {
|
||||||
|
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx_server.init();
|
||||||
|
//state.store(SERVER_STATE_READY);
|
||||||
|
|
||||||
|
LOG_INF("%s: model loaded\n", __func__);
|
||||||
|
|
||||||
|
// print sample chat example to make it clear which template is used
|
||||||
|
// LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__,
|
||||||
|
// common_chat_templates_source(ctx_server.chat_templates.get()),
|
||||||
|
// common_chat_format_example(ctx_server.chat_templates.get(), ctx_server.params_base.use_jinja).c_str(), ctx_server.params_base.default_template_kwargs);
|
||||||
|
|
||||||
|
// Reset the chat templates
|
||||||
|
// TODO: We should make this configurable by respecting the option that is already present in LocalAI for vLLM
|
||||||
|
ctx_server.chat_templates.reset();
|
||||||
|
|
||||||
|
ctx_server.queue_tasks.on_new_task([&ctx_server](server_task && task) {
|
||||||
|
ctx_server.process_single_task(std::move(task));
|
||||||
|
});
|
||||||
|
|
||||||
|
ctx_server.queue_tasks.on_update_slots([&ctx_server]() {
|
||||||
|
ctx_server.update_slots();
|
||||||
|
});
|
||||||
|
|
||||||
|
shutdown_handler = [&](int) {
|
||||||
|
// this will unblock start_loop()
|
||||||
|
ctx_server.queue_tasks.terminate();
|
||||||
|
};
|
||||||
|
|
||||||
|
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
|
||||||
|
struct sigaction sigint_action;
|
||||||
|
sigint_action.sa_handler = signal_handler;
|
||||||
|
sigemptyset (&sigint_action.sa_mask);
|
||||||
|
sigint_action.sa_flags = 0;
|
||||||
|
sigaction(SIGINT, &sigint_action, NULL);
|
||||||
|
sigaction(SIGTERM, &sigint_action, NULL);
|
||||||
|
#elif defined (_WIN32)
|
||||||
|
auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
|
||||||
|
return (ctrl_type == CTRL_C_EVENT) ? (signal_handler(SIGINT), true) : false;
|
||||||
|
};
|
||||||
|
SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// this call blocks the main thread until queue_tasks.terminate() is called
|
||||||
|
ctx_server.queue_tasks.start_loop();
|
||||||
|
}
|
||||||
|
|
||||||
|
json parse_options(bool streaming, const backend::PredictOptions* predict)
|
||||||
|
{
|
||||||
|
|
||||||
|
// Create now a json data from the prediction options instead
|
||||||
|
//
|
||||||
|
json data;
|
||||||
|
data["stream"] = streaming;
|
||||||
|
data["cache_prompt"] = predict->promptcacheall();
|
||||||
|
data["n_predict"] = predict->tokens() == 0 ? -1 : predict->tokens();
|
||||||
|
data["top_k"] = predict->topk();
|
||||||
|
data["top_p"] = predict->topp();
|
||||||
|
data["typical_p"] = predict->typicalp();
|
||||||
|
data["temperature"] = predict->temperature();
|
||||||
|
data["repeat_last_n"] = predict->repeat();
|
||||||
|
data["repeat_penalty"] = predict->penalty();
|
||||||
|
data["frequency_penalty"] = predict->frequencypenalty();
|
||||||
|
data["presence_penalty"] = predict->presencepenalty();
|
||||||
|
data["mirostat"] = predict->mirostat();
|
||||||
|
data["mirostat_tau"] = predict->mirostattau();
|
||||||
|
data["mirostat_eta"] = predict->mirostateta();
|
||||||
|
data["n_keep"] = predict->nkeep();
|
||||||
|
data["seed"] = predict->seed();
|
||||||
|
data["grammar"] = predict->grammar();
|
||||||
|
data["prompt"] = predict->prompt();
|
||||||
|
data["ignore_eos"] = predict->ignoreeos();
|
||||||
|
data["embeddings"] = predict->embeddings();
|
||||||
|
// TODO: add back json_schema and let this be controlled by the user
|
||||||
|
// data["json_schema"] = predict->jsonschema();
|
||||||
|
|
||||||
|
// Add the correlationid to json data
|
||||||
|
data["correlation_id"] = predict->correlationid();
|
||||||
|
|
||||||
|
// for each image in the request, add the image data
|
||||||
|
//
|
||||||
|
for (int i = 0; i < predict->images_size(); i++) {
|
||||||
|
data["image_data"].push_back(json
|
||||||
|
{
|
||||||
|
{"id", i},
|
||||||
|
{"data", predict->images(i)},
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// for each audio in the request, add the audio data
|
||||||
|
for (int i = 0; i < predict->audios_size(); i++) {
|
||||||
|
data["audio_data"].push_back(json
|
||||||
|
{
|
||||||
|
{"id", i},
|
||||||
|
{"data", predict->audios(i)},
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
data["stop"] = predict->stopprompts();
|
||||||
|
// data["n_probs"] = predict->nprobs();
|
||||||
|
//TODO: images,
|
||||||
|
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
const std::vector<ggml_type> kv_cache_types = {
|
||||||
|
GGML_TYPE_F32,
|
||||||
|
GGML_TYPE_F16,
|
||||||
|
GGML_TYPE_BF16,
|
||||||
|
GGML_TYPE_Q8_0,
|
||||||
|
GGML_TYPE_Q4_0,
|
||||||
|
GGML_TYPE_Q4_1,
|
||||||
|
GGML_TYPE_IQ4_NL,
|
||||||
|
GGML_TYPE_Q5_0,
|
||||||
|
GGML_TYPE_Q5_1,
|
||||||
|
};
|
||||||
|
|
||||||
|
static ggml_type kv_cache_type_from_str(const std::string & s) {
|
||||||
|
for (const auto & type : kv_cache_types) {
|
||||||
|
if (ggml_type_name(type) == s) {
|
||||||
|
return type;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
throw std::runtime_error("Unsupported cache type: " + s);
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::string get_all_kv_cache_types() {
|
||||||
|
std::ostringstream msg;
|
||||||
|
for (const auto & type : kv_cache_types) {
|
||||||
|
msg << ggml_type_name(type) << (&type == &kv_cache_types.back() ? "" : ", ");
|
||||||
|
}
|
||||||
|
return msg.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Adds an RPC server
|
||||||
|
// https://github.com/ggerganov/llama.cpp/compare/4dbc8b9cb71876e005724f4e8f73a3544646bcf5..3edfa7d3753c29e44b964c0ff424d2ea8d5fdee6
|
||||||
|
static void add_rpc_devices(std::string servers) {
|
||||||
|
auto rpc_servers = string_split<std::string>(servers, ',');
|
||||||
|
if (rpc_servers.empty()) {
|
||||||
|
throw std::invalid_argument("no RPC servers specified");
|
||||||
|
}
|
||||||
|
ggml_backend_reg_t rpc_reg = ggml_backend_reg_by_name("RPC");
|
||||||
|
if (!rpc_reg) {
|
||||||
|
throw std::invalid_argument("failed to find RPC backend");
|
||||||
|
}
|
||||||
|
typedef ggml_backend_dev_t (*ggml_backend_rpc_add_device_t)(const char * endpoint);
|
||||||
|
ggml_backend_rpc_add_device_t ggml_backend_rpc_add_device_fn = (ggml_backend_rpc_add_device_t) ggml_backend_reg_get_proc_address(rpc_reg, "ggml_backend_rpc_add_device");
|
||||||
|
if (!ggml_backend_rpc_add_device_fn) {
|
||||||
|
throw std::invalid_argument("failed to find RPC device add function");
|
||||||
|
}
|
||||||
|
for (const auto & server : rpc_servers) {
|
||||||
|
ggml_backend_dev_t dev = ggml_backend_rpc_add_device_fn(server.c_str());
|
||||||
|
if (dev) {
|
||||||
|
ggml_backend_device_register(dev);
|
||||||
|
} else {
|
||||||
|
throw std::invalid_argument("failed to register RPC device");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void params_parse(const backend::ModelOptions* request,
|
||||||
|
common_params & params) {
|
||||||
|
|
||||||
|
// this is comparable to: https://github.com/ggerganov/llama.cpp/blob/d9b33fe95bd257b36c84ee5769cc048230067d6f/examples/server/server.cpp#L1809
|
||||||
|
|
||||||
|
params.model.path = request->modelfile();
|
||||||
|
if (!request->mmproj().empty()) {
|
||||||
|
// get the directory of modelfile
|
||||||
|
std::string model_dir = params.model.path.substr(0, params.model.path.find_last_of("/\\"));
|
||||||
|
params.mmproj.path = model_dir + "/"+ request->mmproj();
|
||||||
|
}
|
||||||
|
// params.model_alias ??
|
||||||
|
params.model_alias = request->modelfile();
|
||||||
|
if (!request->cachetypekey().empty()) {
|
||||||
|
params.cache_type_k = kv_cache_type_from_str(request->cachetypekey());
|
||||||
|
}
|
||||||
|
if (!request->cachetypevalue().empty()) {
|
||||||
|
params.cache_type_v = kv_cache_type_from_str(request->cachetypevalue());
|
||||||
|
}
|
||||||
|
params.n_ctx = request->contextsize();
|
||||||
|
//params.memory_f16 = request->f16memory();
|
||||||
|
params.cpuparams.n_threads = request->threads();
|
||||||
|
params.n_gpu_layers = request->ngpulayers();
|
||||||
|
params.n_batch = request->nbatch();
|
||||||
|
// Set params.n_parallel by environment variable (LLAMA_PARALLEL), defaults to 1
|
||||||
|
//params.n_parallel = 1;
|
||||||
|
const char *env_parallel = std::getenv("LLAMACPP_PARALLEL");
|
||||||
|
if (env_parallel != NULL) {
|
||||||
|
params.n_parallel = std::stoi(env_parallel);
|
||||||
|
params.cont_batching = true;
|
||||||
|
} else {
|
||||||
|
params.n_parallel = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
const char *llama_grpc_servers = std::getenv("LLAMACPP_GRPC_SERVERS");
|
||||||
|
if (llama_grpc_servers != NULL) {
|
||||||
|
add_rpc_devices(std::string(llama_grpc_servers));
|
||||||
|
}
|
||||||
|
|
||||||
|
// decode options. Options are in form optname:optvale, or if booleans only optname.
|
||||||
|
for (int i = 0; i < request->options_size(); i++) {
|
||||||
|
std::string opt = request->options(i);
|
||||||
|
char *optname = strtok(&opt[0], ":");
|
||||||
|
char *optval = strtok(NULL, ":");
|
||||||
|
if (optval == NULL) {
|
||||||
|
optval = "true";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!strcmp(optname, "gpu")) {
|
||||||
|
// llama.has_gpu = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add kv_overrides
|
||||||
|
if (request->overrides_size() > 0) {
|
||||||
|
for (int i = 0; i < request->overrides_size(); i++) {
|
||||||
|
string_parse_kv_override(request->overrides(i).c_str(), params.kv_overrides);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Add yarn
|
||||||
|
|
||||||
|
if (!request->tensorsplit().empty()) {
|
||||||
|
std::string arg_next = request->tensorsplit();
|
||||||
|
|
||||||
|
// split string by , and /
|
||||||
|
const std::regex regex{ R"([,/]+)" };
|
||||||
|
std::sregex_token_iterator it{ arg_next.begin(), arg_next.end(), regex, -1 };
|
||||||
|
std::vector<std::string> split_arg{ it, {} };
|
||||||
|
|
||||||
|
GGML_ASSERT(split_arg.size() <= llama_max_devices());
|
||||||
|
|
||||||
|
for (size_t i_device = 0; i_device < llama_max_devices(); ++i_device) {
|
||||||
|
if (i_device < split_arg.size()) {
|
||||||
|
params.tensor_split[i_device] = std::stof(split_arg[i_device]);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
params.tensor_split[i_device] = 0.0f;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!request->maingpu().empty()) {
|
||||||
|
params.main_gpu = std::stoi(request->maingpu());
|
||||||
|
}
|
||||||
|
if (!request->loraadapter().empty() && !request->lorabase().empty()) {
|
||||||
|
float scale_factor = 1.0f;
|
||||||
|
if (request->lorascale() != 0.0f) {
|
||||||
|
scale_factor = request->lorascale();
|
||||||
|
}
|
||||||
|
// get the directory of modelfile
|
||||||
|
std::string model_dir = params.model.path.substr(0, params.model.path.find_last_of("/\\"));
|
||||||
|
params.lora_adapters.push_back({ model_dir + "/"+request->loraadapter(), scale_factor });
|
||||||
|
}
|
||||||
|
params.use_mlock = request->mlock();
|
||||||
|
params.use_mmap = request->mmap();
|
||||||
|
|
||||||
|
if (request->flashattention() == "on" || request->flashattention() == "enabled") {
|
||||||
|
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_ENABLED;
|
||||||
|
} else if (request->flashattention() == "off" || request->flashattention() == "disabled") {
|
||||||
|
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_DISABLED;
|
||||||
|
} else if (request->flashattention() == "auto") {
|
||||||
|
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_AUTO;
|
||||||
|
}
|
||||||
|
|
||||||
|
params.no_kv_offload = request->nokvoffload();
|
||||||
|
params.ctx_shift = false; // We control context-shifting in any case (and we disable it as it could just lead to infinite loops)
|
||||||
|
|
||||||
|
params.embedding = request->embeddings() || request->reranking();
|
||||||
|
if (request->reranking()) {
|
||||||
|
params.pooling_type = LLAMA_POOLING_TYPE_RANK;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if (request->ropescaling() == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_NONE; }
|
||||||
|
else if (request->ropescaling() == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_YARN; }
|
||||||
|
else if (request->ropescaling() == "linear") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_LINEAR; }
|
||||||
|
|
||||||
|
if ( request->yarnextfactor() != 0.0f ) {
|
||||||
|
params.yarn_ext_factor = request->yarnextfactor();
|
||||||
|
}
|
||||||
|
if ( request->yarnattnfactor() != 0.0f ) {
|
||||||
|
params.yarn_attn_factor = request->yarnattnfactor();
|
||||||
|
}
|
||||||
|
if ( request->yarnbetafast() != 0.0f ) {
|
||||||
|
params.yarn_beta_fast = request->yarnbetafast();
|
||||||
|
}
|
||||||
|
if ( request->yarnbetaslow() != 0.0f ) {
|
||||||
|
params.yarn_beta_slow = request->yarnbetaslow();
|
||||||
|
}
|
||||||
|
if ( request->ropefreqbase() != 0.0f ) {
|
||||||
|
params.rope_freq_base = request->ropefreqbase();
|
||||||
|
}
|
||||||
|
if ( request->ropefreqscale() != 0.0f ) {
|
||||||
|
params.rope_freq_scale = request->ropefreqscale();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (request->grammartriggers_size() > 0) {
|
||||||
|
params.sampling.grammar_lazy = true;
|
||||||
|
for (int i = 0; i < request->grammartriggers_size(); i++) {
|
||||||
|
common_grammar_trigger trigger;
|
||||||
|
trigger.type = COMMON_GRAMMAR_TRIGGER_TYPE_WORD;
|
||||||
|
trigger.value = request->grammartriggers(i).word();
|
||||||
|
// trigger.at_start = request->grammartriggers(i).at_start();
|
||||||
|
params.sampling.grammar_triggers.push_back(trigger);
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// GRPC Server start
|
||||||
|
class BackendServiceImpl final : public backend::Backend::Service {
|
||||||
|
private:
|
||||||
|
server_context& ctx_server;
|
||||||
|
|
||||||
|
public:
|
||||||
|
BackendServiceImpl(server_context& ctx) : ctx_server(ctx) {}
|
||||||
|
|
||||||
|
grpc::Status Health(ServerContext* context, const backend::HealthMessage* request, backend::Reply* reply) {
|
||||||
|
// Implement Health RPC
|
||||||
|
reply->set_message("OK");
|
||||||
|
return Status::OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
grpc::Status LoadModel(ServerContext* context, const backend::ModelOptions* request, backend::Result* result) {
|
||||||
|
// Implement LoadModel RPC
|
||||||
|
common_params params;
|
||||||
|
params_parse(request, params);
|
||||||
|
|
||||||
|
common_init();
|
||||||
|
|
||||||
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
|
|
||||||
|
LOG_INF("system info: n_threads = %d, n_threads_batch = %d, total_threads = %d\n", params.cpuparams.n_threads, params.cpuparams_batch.n_threads, std::thread::hardware_concurrency());
|
||||||
|
LOG_INF("\n");
|
||||||
|
LOG_INF("%s\n", common_params_get_system_info(params).c_str());
|
||||||
|
LOG_INF("\n");
|
||||||
|
// load the model
|
||||||
|
if (!ctx_server.load_model(params)) {
|
||||||
|
result->set_message("Failed loading model");
|
||||||
|
result->set_success(false);
|
||||||
|
return Status::CANCELLED;
|
||||||
|
}
|
||||||
|
|
||||||
|
//ctx_server.init();
|
||||||
|
result->set_message("Loading succeeded");
|
||||||
|
result->set_success(true);
|
||||||
|
loaded_model = true;
|
||||||
|
ctx_server.slot_prompt_similarity = params.slot_prompt_similarity;
|
||||||
|
|
||||||
|
return Status::OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
grpc::Status PredictStream(grpc::ServerContext* context, const backend::PredictOptions* request, grpc::ServerWriter<backend::Reply>* writer) override {
|
||||||
|
json data = parse_options(true, request);
|
||||||
|
|
||||||
|
|
||||||
|
//Raise error if embeddings is set to true
|
||||||
|
if (ctx_server.params_base.embedding) {
|
||||||
|
return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "Embedding is not supported in streaming mode");
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
auto completion_id = gen_chatcmplid();
|
||||||
|
std::unordered_set<int> task_ids;
|
||||||
|
try {
|
||||||
|
std::vector<server_task> tasks;
|
||||||
|
|
||||||
|
const auto & prompt = data.at("prompt");
|
||||||
|
const auto type = SERVER_TASK_TYPE_COMPLETION;
|
||||||
|
// TODO: this log can become very long, put it behind a flag or think about a more compact format
|
||||||
|
//SRV_DBG("Prompt: %s\n", prompt.is_string() ? prompt.get<std::string>().c_str() : prompt.dump(2).c_str());
|
||||||
|
|
||||||
|
std::vector<raw_buffer> files;
|
||||||
|
const auto &images_data = data.find("image_data");
|
||||||
|
if (images_data != data.end() && images_data->is_array())
|
||||||
|
{
|
||||||
|
for (const auto &img : *images_data)
|
||||||
|
{
|
||||||
|
auto decoded_data = base64_decode(img["data"].get<std::string>());
|
||||||
|
files.push_back(decoded_data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const auto &audio_data = data.find("audio_data");
|
||||||
|
if (audio_data != data.end() && audio_data->is_array())
|
||||||
|
{
|
||||||
|
for (const auto &audio : *audio_data)
|
||||||
|
{
|
||||||
|
auto decoded_data = base64_decode(audio["data"].get<std::string>());
|
||||||
|
files.push_back(decoded_data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const bool has_mtmd = ctx_server.mctx != nullptr;
|
||||||
|
|
||||||
|
// process prompt
|
||||||
|
std::vector<server_tokens> inputs;
|
||||||
|
if (!prompt.is_string()) {
|
||||||
|
throw std::runtime_error("prompt must be a string");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (has_mtmd) {
|
||||||
|
// multimodal
|
||||||
|
inputs.push_back(process_mtmd_prompt(ctx_server.mctx, prompt.get<std::string>(), files));
|
||||||
|
} else {
|
||||||
|
// Everything else, including multimodal completions.
|
||||||
|
inputs = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, prompt, true, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
tasks.reserve(inputs.size());
|
||||||
|
for (size_t i = 0; i < inputs.size(); i++) {
|
||||||
|
server_task task = server_task(type);
|
||||||
|
|
||||||
|
task.id = ctx_server.queue_tasks.get_new_id();
|
||||||
|
task.index = i;
|
||||||
|
|
||||||
|
task.prompt_tokens = std::move(inputs[i]);
|
||||||
|
task.params = server_task::params_from_json_cmpl(
|
||||||
|
ctx_server.ctx,
|
||||||
|
ctx_server.params_base,
|
||||||
|
data);
|
||||||
|
task.id_selected_slot = json_value(data, "id_slot", -1);
|
||||||
|
|
||||||
|
// OAI-compat
|
||||||
|
task.params.oaicompat = OAICOMPAT_TYPE_NONE;
|
||||||
|
task.params.oaicompat_cmpl_id = completion_id;
|
||||||
|
// oaicompat_model is already populated by params_from_json_cmpl
|
||||||
|
|
||||||
|
tasks.push_back(std::move(task));
|
||||||
|
}
|
||||||
|
|
||||||
|
task_ids = server_task::get_list_id(tasks);
|
||||||
|
ctx_server.queue_results.add_waiting_tasks(tasks);
|
||||||
|
ctx_server.queue_tasks.post(std::move(tasks));
|
||||||
|
} catch (const std::exception & e) {
|
||||||
|
return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, e.what());
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx_server.receive_cmpl_results_stream(task_ids, [&](server_task_result_ptr & result) -> bool {
|
||||||
|
json res_json = result->to_json();
|
||||||
|
if (res_json.is_array()) {
|
||||||
|
for (const auto & res : res_json) {
|
||||||
|
std::string completion_text = res.value("content", "");
|
||||||
|
|
||||||
|
backend::Reply reply;
|
||||||
|
reply.set_message(completion_text);
|
||||||
|
int32_t tokens_predicted = res.value("tokens_predicted", 0);
|
||||||
|
reply.set_tokens(tokens_predicted);
|
||||||
|
int32_t tokens_evaluated = res.value("tokens_evaluated", 0);
|
||||||
|
reply.set_prompt_tokens(tokens_evaluated);
|
||||||
|
|
||||||
|
if (res.contains("timings")) {
|
||||||
|
double timing_prompt_processing = res.at("timings").value("prompt_ms", 0.0);
|
||||||
|
reply.set_timing_prompt_processing(timing_prompt_processing);
|
||||||
|
double timing_token_generation = res.at("timings").value("predicted_ms", 0.0);
|
||||||
|
reply.set_timing_token_generation(timing_token_generation);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log Request Correlation Id
|
||||||
|
|
||||||
|
// Send the reply
|
||||||
|
writer->Write(reply);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
std::string completion_text = res_json.value("content", "");
|
||||||
|
|
||||||
|
backend::Reply reply;
|
||||||
|
reply.set_message(completion_text);
|
||||||
|
int32_t tokens_predicted = res_json.value("tokens_predicted", 0);
|
||||||
|
reply.set_tokens(tokens_predicted);
|
||||||
|
int32_t tokens_evaluated = res_json.value("tokens_evaluated", 0);
|
||||||
|
reply.set_prompt_tokens(tokens_evaluated);
|
||||||
|
|
||||||
|
if (res_json.contains("timings")) {
|
||||||
|
double timing_prompt_processing = res_json.at("timings").value("prompt_ms", 0.0);
|
||||||
|
reply.set_timing_prompt_processing(timing_prompt_processing);
|
||||||
|
double timing_token_generation = res_json.at("timings").value("predicted_ms", 0.0);
|
||||||
|
reply.set_timing_token_generation(timing_token_generation);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
// Send the reply
|
||||||
|
writer->Write(reply);
|
||||||
|
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}, [&](const json & error_data) {
|
||||||
|
backend::Reply reply;
|
||||||
|
reply.set_message(error_data.value("content", ""));
|
||||||
|
writer->Write(reply);
|
||||||
|
return true;
|
||||||
|
}, [&]() {
|
||||||
|
// NOTE: we should try to check when the writer is closed here
|
||||||
|
return false;
|
||||||
|
});
|
||||||
|
|
||||||
|
ctx_server.queue_results.remove_waiting_task_ids(task_ids);
|
||||||
|
|
||||||
|
return grpc::Status::OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
grpc::Status Predict(ServerContext* context, const backend::PredictOptions* request, backend::Reply* reply) {
|
||||||
|
json data = parse_options(true, request);
|
||||||
|
|
||||||
|
data["stream"] = false;
|
||||||
|
//Raise error if embeddings is set to true
|
||||||
|
if (ctx_server.params_base.embedding) {
|
||||||
|
return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "Embedding is not supported in Predict mode");
|
||||||
|
}
|
||||||
|
std::cout << "[PREDICT] Received result: " << data.dump(2) << std::endl;
|
||||||
|
auto completion_id = gen_chatcmplid();
|
||||||
|
std::unordered_set<int> task_ids;
|
||||||
|
try {
|
||||||
|
std::vector<server_task> tasks;
|
||||||
|
|
||||||
|
const auto & prompt = data.at("prompt");
|
||||||
|
const auto type = SERVER_TASK_TYPE_COMPLETION;
|
||||||
|
// TODO: this log can become very long, put it behind a flag or think about a more compact format
|
||||||
|
//SRV_DBG("Prompt: %s\n", prompt.is_string() ? prompt.get<std::string>().c_str() : prompt.dump(2).c_str());
|
||||||
|
|
||||||
|
std::vector<raw_buffer> files;
|
||||||
|
const auto &images_data = data.find("image_data");
|
||||||
|
// std::cout << "[PREDICT] Images data: " << images_data->dump(2) << std::endl;
|
||||||
|
|
||||||
|
if (images_data != data.end() && images_data->is_array())
|
||||||
|
{
|
||||||
|
std::cout << "[PREDICT] Processing " << images_data->size() << " images" << std::endl;
|
||||||
|
for (const auto &img : *images_data)
|
||||||
|
{
|
||||||
|
std::cout << "[PREDICT] Processing image" << std::endl;
|
||||||
|
auto decoded_data = base64_decode(img["data"].get<std::string>());
|
||||||
|
files.push_back(decoded_data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const auto &audio_data = data.find("audio_data");
|
||||||
|
if (audio_data != data.end() && audio_data->is_array())
|
||||||
|
{
|
||||||
|
for (const auto &audio : *audio_data)
|
||||||
|
{
|
||||||
|
auto decoded_data = base64_decode(audio["data"].get<std::string>());
|
||||||
|
files.push_back(decoded_data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// process files
|
||||||
|
const bool has_mtmd = ctx_server.mctx != nullptr;
|
||||||
|
|
||||||
|
// process prompt
|
||||||
|
std::vector<server_tokens> inputs;
|
||||||
|
if (!prompt.is_string()) {
|
||||||
|
std::cout << "[PREDICT] Prompt must be a string" << std::endl;
|
||||||
|
throw std::runtime_error("prompt must be a string");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (has_mtmd) {
|
||||||
|
// multimodal
|
||||||
|
inputs.push_back(process_mtmd_prompt(ctx_server.mctx, prompt.get<std::string>(), files));
|
||||||
|
} else {
|
||||||
|
// Everything else, including multimodal completions.
|
||||||
|
inputs = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, prompt, true, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
tasks.reserve(inputs.size());
|
||||||
|
for (size_t i = 0; i < inputs.size(); i++) {
|
||||||
|
server_task task = server_task(type);
|
||||||
|
|
||||||
|
task.id = ctx_server.queue_tasks.get_new_id();
|
||||||
|
task.index = i;
|
||||||
|
|
||||||
|
task.prompt_tokens = std::move(inputs[i]);
|
||||||
|
task.params = server_task::params_from_json_cmpl(
|
||||||
|
ctx_server.ctx,
|
||||||
|
ctx_server.params_base,
|
||||||
|
data);
|
||||||
|
task.id_selected_slot = json_value(data, "id_slot", -1);
|
||||||
|
|
||||||
|
// OAI-compat
|
||||||
|
task.params.oaicompat = OAICOMPAT_TYPE_NONE;
|
||||||
|
task.params.oaicompat_cmpl_id = completion_id;
|
||||||
|
// oaicompat_model is already populated by params_from_json_cmpl
|
||||||
|
|
||||||
|
tasks.push_back(std::move(task));
|
||||||
|
}
|
||||||
|
|
||||||
|
task_ids = server_task::get_list_id(tasks);
|
||||||
|
ctx_server.queue_results.add_waiting_tasks(tasks);
|
||||||
|
ctx_server.queue_tasks.post(std::move(tasks));
|
||||||
|
} catch (const std::exception & e) {
|
||||||
|
return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, e.what());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::cout << "[DEBUG] Waiting for results..." << std::endl;
|
||||||
|
ctx_server.receive_multi_results(task_ids, [&](std::vector<server_task_result_ptr> & results) {
|
||||||
|
std::cout << "[DEBUG] Received " << results.size() << " results" << std::endl;
|
||||||
|
if (results.size() == 1) {
|
||||||
|
// single result
|
||||||
|
reply->set_message(results[0]->to_json().value("content", ""));
|
||||||
|
|
||||||
|
int32_t tokens_predicted = results[0]->to_json().value("tokens_predicted", 0);
|
||||||
|
reply->set_tokens(tokens_predicted);
|
||||||
|
int32_t tokens_evaluated = results[0]->to_json().value("tokens_evaluated", 0);
|
||||||
|
reply->set_prompt_tokens(tokens_evaluated);
|
||||||
|
|
||||||
|
if (results[0]->to_json().contains("timings")) {
|
||||||
|
double timing_prompt_processing = results[0]->to_json().at("timings").value("prompt_ms", 0.0);
|
||||||
|
reply->set_timing_prompt_processing(timing_prompt_processing);
|
||||||
|
double timing_token_generation = results[0]->to_json().at("timings").value("predicted_ms", 0.0);
|
||||||
|
reply->set_timing_token_generation(timing_token_generation);
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// multiple results (multitask)
|
||||||
|
json arr = json::array();
|
||||||
|
for (auto & res : results) {
|
||||||
|
arr.push_back(res->to_json().value("content", ""));
|
||||||
|
}
|
||||||
|
reply->set_message(arr);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}, [&](const json & error_data) {
|
||||||
|
std::cout << "[DEBUG] Error in results: " << error_data.value("content", "") << std::endl;
|
||||||
|
reply->set_message(error_data.value("content", ""));
|
||||||
|
}, [&]() {
|
||||||
|
return false;
|
||||||
|
});
|
||||||
|
|
||||||
|
ctx_server.queue_results.remove_waiting_task_ids(task_ids);
|
||||||
|
std::cout << "[DEBUG] Predict request completed successfully" << std::endl;
|
||||||
|
|
||||||
|
return grpc::Status::OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
grpc::Status Embedding(ServerContext* context, const backend::PredictOptions* request, backend::EmbeddingResult* embeddingResult) {
|
||||||
|
|
||||||
|
json body = parse_options(false, request);
|
||||||
|
|
||||||
|
body["stream"] = false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
if (llama_pooling_type(ctx_server.ctx) == LLAMA_POOLING_TYPE_NONE) {
|
||||||
|
return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "Pooling type 'none' is not OAI compatible. Please use a different pooling type");
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
// for the shape of input/content, see tokenize_input_prompts()
|
||||||
|
json prompt = body.at("prompt");
|
||||||
|
|
||||||
|
|
||||||
|
auto tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, prompt, true, true);
|
||||||
|
for (const auto & tokens : tokenized_prompts) {
|
||||||
|
// this check is necessary for models that do not add BOS token to the input
|
||||||
|
if (tokens.empty()) {
|
||||||
|
return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "Input content cannot be empty");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// create and queue the task
|
||||||
|
json responses = json::array();
|
||||||
|
bool error = false;
|
||||||
|
std::unordered_set<int> task_ids;
|
||||||
|
{
|
||||||
|
std::vector<server_task> tasks;
|
||||||
|
for (size_t i = 0; i < tokenized_prompts.size(); i++) {
|
||||||
|
server_task task = server_task(SERVER_TASK_TYPE_EMBEDDING);
|
||||||
|
|
||||||
|
task.id = ctx_server.queue_tasks.get_new_id();
|
||||||
|
task.index = i;
|
||||||
|
task.prompt_tokens = std::move(tokenized_prompts[i]);
|
||||||
|
|
||||||
|
// OAI-compat
|
||||||
|
task.params.oaicompat = OAICOMPAT_TYPE_EMBEDDING;
|
||||||
|
|
||||||
|
tasks.push_back(std::move(task));
|
||||||
|
}
|
||||||
|
|
||||||
|
task_ids = server_task::get_list_id(tasks);
|
||||||
|
ctx_server.queue_results.add_waiting_tasks(tasks);
|
||||||
|
ctx_server.queue_tasks.post(std::move(tasks));
|
||||||
|
}
|
||||||
|
|
||||||
|
// get the result
|
||||||
|
ctx_server.receive_multi_results(task_ids, [&](std::vector<server_task_result_ptr> & results) {
|
||||||
|
for (auto & res : results) {
|
||||||
|
GGML_ASSERT(dynamic_cast<server_task_result_embd*>(res.get()) != nullptr);
|
||||||
|
responses.push_back(res->to_json());
|
||||||
|
}
|
||||||
|
}, [&](const json & error_data) {
|
||||||
|
return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, error_data.value("content", ""));
|
||||||
|
}, [&]() {
|
||||||
|
// NOTE: we should try to check when the writer is closed here
|
||||||
|
return false;
|
||||||
|
});
|
||||||
|
|
||||||
|
ctx_server.queue_results.remove_waiting_task_ids(task_ids);
|
||||||
|
|
||||||
|
if (error) {
|
||||||
|
return grpc::Status(grpc::StatusCode::INTERNAL, "Error in receiving results");
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<float> embeddings = responses[0].value("embedding", std::vector<float>());
|
||||||
|
// loop the vector and set the embeddings results
|
||||||
|
for (int i = 0; i < embeddings.size(); i++) {
|
||||||
|
embeddingResult->add_embeddings(embeddings[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
return grpc::Status::OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
grpc::Status Rerank(ServerContext* context, const backend::RerankRequest* request, backend::RerankResult* rerankResult) {
|
||||||
|
if (!ctx_server.params_base.embedding || ctx_server.params_base.pooling_type != LLAMA_POOLING_TYPE_RANK) {
|
||||||
|
return grpc::Status(grpc::StatusCode::UNIMPLEMENTED, "This server does not support reranking. Start it with `--reranking` and without `--embedding`");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate request
|
||||||
|
if (request->query().empty()) {
|
||||||
|
return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "\"query\" must be provided");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (request->documents_size() == 0) {
|
||||||
|
return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "\"documents\" must be a non-empty string array");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tokenize the query
|
||||||
|
auto tokenized_query = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, request->query(), /* add_special */ false, true);
|
||||||
|
if (tokenized_query.size() != 1) {
|
||||||
|
return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "\"query\" must contain only a single prompt");
|
||||||
|
}
|
||||||
|
// Create and queue the task
|
||||||
|
json responses = json::array();
|
||||||
|
bool error = false;
|
||||||
|
std::unordered_set<int> task_ids;
|
||||||
|
{
|
||||||
|
std::vector<server_task> tasks;
|
||||||
|
std::vector<std::string> documents;
|
||||||
|
for (int i = 0; i < request->documents_size(); i++) {
|
||||||
|
documents.push_back(request->documents(i));
|
||||||
|
}
|
||||||
|
|
||||||
|
auto tokenized_docs = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, documents, /* add_special */ false, true);
|
||||||
|
tasks.reserve(tokenized_docs.size());
|
||||||
|
for (size_t i = 0; i < tokenized_docs.size(); i++) {
|
||||||
|
auto tmp = format_rerank(ctx_server.vocab, tokenized_query[0], tokenized_docs[i]);
|
||||||
|
server_task task = server_task(SERVER_TASK_TYPE_RERANK);
|
||||||
|
task.id = ctx_server.queue_tasks.get_new_id();
|
||||||
|
task.index = i;
|
||||||
|
task.prompt_tokens = std::move(tmp);
|
||||||
|
tasks.push_back(std::move(task));
|
||||||
|
}
|
||||||
|
|
||||||
|
task_ids = server_task::get_list_id(tasks);
|
||||||
|
ctx_server.queue_results.add_waiting_tasks(tasks);
|
||||||
|
ctx_server.queue_tasks.post(std::move(tasks));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the results
|
||||||
|
ctx_server.receive_multi_results(task_ids, [&](std::vector<server_task_result_ptr> & results) {
|
||||||
|
for (auto & res : results) {
|
||||||
|
GGML_ASSERT(dynamic_cast<server_task_result_rerank*>(res.get()) != nullptr);
|
||||||
|
responses.push_back(res->to_json());
|
||||||
|
}
|
||||||
|
}, [&](const json & error_data) {
|
||||||
|
error = true;
|
||||||
|
}, [&]() {
|
||||||
|
return false;
|
||||||
|
});
|
||||||
|
|
||||||
|
ctx_server.queue_results.remove_waiting_task_ids(task_ids);
|
||||||
|
|
||||||
|
if (error) {
|
||||||
|
return grpc::Status(grpc::StatusCode::INTERNAL, "Error in receiving results");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set usage information
|
||||||
|
backend::Usage* usage = rerankResult->mutable_usage();
|
||||||
|
int total_tokens = 0;
|
||||||
|
int prompt_tokens = 0;
|
||||||
|
|
||||||
|
// Create document results
|
||||||
|
for (const auto& response : responses) {
|
||||||
|
backend::DocumentResult* doc_result = rerankResult->add_results();
|
||||||
|
doc_result->set_index(response.value("index", 0));
|
||||||
|
doc_result->set_text(request->documents(response.value("index", 0)));
|
||||||
|
doc_result->set_relevance_score(response.value("score", 0.0f));
|
||||||
|
|
||||||
|
// Add tokens evaluated for this document
|
||||||
|
int tokens_evaluated = response.value("tokens_evaluated", 0);
|
||||||
|
total_tokens += tokens_evaluated;
|
||||||
|
prompt_tokens += tokens_evaluated;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the total tokens in usage
|
||||||
|
usage->set_total_tokens(total_tokens);
|
||||||
|
usage->set_prompt_tokens(prompt_tokens);
|
||||||
|
|
||||||
|
return grpc::Status::OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
grpc::Status TokenizeString(ServerContext* context, const backend::PredictOptions* request, backend::TokenizationResponse* response) {
|
||||||
|
json body = parse_options(false, request);
|
||||||
|
body["stream"] = false;
|
||||||
|
|
||||||
|
json tokens_response = json::array();
|
||||||
|
if (body.count("prompt") != 0) {
|
||||||
|
const bool add_special = json_value(body, "add_special", false);
|
||||||
|
const bool with_pieces = json_value(body, "with_pieces", false);
|
||||||
|
|
||||||
|
llama_tokens tokens = tokenize_mixed(ctx_server.vocab, body.at("content"), add_special, true);
|
||||||
|
|
||||||
|
|
||||||
|
for (const auto& token : tokens) {
|
||||||
|
std::string piece = common_token_to_piece(ctx_server.ctx, token);
|
||||||
|
response->add_tokens(token);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return grpc::Status::OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
grpc::Status GetMetrics(ServerContext* context, const backend::MetricsRequest* request, backend::MetricsResponse* response) {
|
||||||
|
|
||||||
|
// request slots data using task queue
|
||||||
|
int task_id = ctx_server.queue_tasks.get_new_id();
|
||||||
|
{
|
||||||
|
server_task task(SERVER_TASK_TYPE_METRICS);
|
||||||
|
task.id = task_id;
|
||||||
|
ctx_server.queue_results.add_waiting_task_id(task_id);
|
||||||
|
ctx_server.queue_tasks.post(std::move(task), true); // high-priority task
|
||||||
|
}
|
||||||
|
|
||||||
|
// get the result
|
||||||
|
server_task_result_ptr result = ctx_server.queue_results.recv(task_id);
|
||||||
|
ctx_server.queue_results.remove_waiting_task_id(task_id);
|
||||||
|
|
||||||
|
if (result->is_error()) {
|
||||||
|
// Handle case when no active slot exists
|
||||||
|
response->set_slot_id(0);
|
||||||
|
response->set_prompt_json_for_slot("");
|
||||||
|
response->set_tokens_per_second(0);
|
||||||
|
response->set_tokens_generated(0);
|
||||||
|
response->set_prompt_tokens_processed(0);
|
||||||
|
return grpc::Status(grpc::StatusCode::INTERNAL, "Error in receiving results");
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: get rid of this dynamic_cast
|
||||||
|
auto res_metrics = dynamic_cast<server_task_result_metrics*>(result.get());
|
||||||
|
GGML_ASSERT(res_metrics != nullptr);
|
||||||
|
|
||||||
|
// Populate the response with metrics
|
||||||
|
response->set_slot_id(0);
|
||||||
|
response->set_prompt_json_for_slot("");
|
||||||
|
response->set_tokens_per_second(res_metrics->n_prompt_tokens_processed ? 1.e3 / res_metrics->t_prompt_processing * res_metrics->n_prompt_tokens_processed : 0.);
|
||||||
|
response->set_tokens_generated(res_metrics->n_tokens_predicted_total);
|
||||||
|
response->set_prompt_tokens_processed(res_metrics->n_prompt_tokens_processed_total);
|
||||||
|
|
||||||
|
|
||||||
|
return grpc::Status::OK;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
int main(int argc, char** argv) {
|
||||||
|
std::string server_address("localhost:50051");
|
||||||
|
|
||||||
|
// Define long and short options
|
||||||
|
struct option long_options[] = {
|
||||||
|
{"addr", required_argument, nullptr, 'a'},
|
||||||
|
{nullptr, 0, nullptr, 0}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Parse command-line arguments
|
||||||
|
int option;
|
||||||
|
int option_index = 0;
|
||||||
|
while ((option = getopt_long(argc, argv, "a:", long_options, &option_index)) != -1) {
|
||||||
|
switch (option) {
|
||||||
|
case 'a':
|
||||||
|
server_address = optarg;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
std::cerr << "Usage: " << argv[0] << " [--addr=<address>] or [-a <address>]" << std::endl;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
server_context ctx_server;
|
||||||
|
BackendServiceImpl service(ctx_server);
|
||||||
|
|
||||||
|
ServerBuilder builder;
|
||||||
|
builder.AddListeningPort(server_address, grpc::InsecureServerCredentials());
|
||||||
|
builder.RegisterService(&service);
|
||||||
|
builder.SetMaxMessageSize(50 * 1024 * 1024); // 50MB
|
||||||
|
builder.SetMaxSendMessageSize(50 * 1024 * 1024); // 50MB
|
||||||
|
builder.SetMaxReceiveMessageSize(50 * 1024 * 1024); // 50MB
|
||||||
|
std::unique_ptr<Server> server(builder.BuildAndStart());
|
||||||
|
// run the HTTP server in a thread - see comment below
|
||||||
|
std::thread t([&]()
|
||||||
|
{
|
||||||
|
std::cout << "Server listening on " << server_address << std::endl;
|
||||||
|
server->Wait();
|
||||||
|
return 0;
|
||||||
|
});
|
||||||
|
|
||||||
|
// clean up function, to be called before exit
|
||||||
|
auto clean_up = [&server, &ctx_server]() {
|
||||||
|
SRV_INF("%s: cleaning up before exit...\n", __func__);
|
||||||
|
server->Shutdown();
|
||||||
|
ctx_server.queue_results.terminate();
|
||||||
|
llama_backend_free();
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
//);
|
||||||
|
start_llama_server(ctx_server);
|
||||||
|
std::cout << "stopping" << std::endl;
|
||||||
|
|
||||||
|
|
||||||
|
clean_up();
|
||||||
|
t.join();
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
42
backend/cpp/llama-cpp/package.sh
Executable file
42
backend/cpp/llama-cpp/package.sh
Executable file
@@ -0,0 +1,42 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Script to copy the appropriate libraries based on architecture
|
||||||
|
# This script is used in the final stage of the Dockerfile
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
CURDIR=$(dirname "$(realpath $0)")
|
||||||
|
|
||||||
|
# Create lib directory
|
||||||
|
mkdir -p $CURDIR/package/lib
|
||||||
|
|
||||||
|
cp -avrf $CURDIR/llama-cpp-* $CURDIR/package/
|
||||||
|
cp -rfv $CURDIR/run.sh $CURDIR/package/
|
||||||
|
|
||||||
|
# Detect architecture and copy appropriate libraries
|
||||||
|
if [ -f "/lib64/ld-linux-x86-64.so.2" ]; then
|
||||||
|
# x86_64 architecture
|
||||||
|
echo "Detected x86_64 architecture, copying x86_64 libraries..."
|
||||||
|
cp -arfLv /lib64/ld-linux-x86-64.so.2 $CURDIR/package/lib/ld.so
|
||||||
|
cp -arfLv /lib/x86_64-linux-gnu/libc.so.6 $CURDIR/package/lib/libc.so.6
|
||||||
|
cp -arfLv /lib/x86_64-linux-gnu/libgcc_s.so.1 $CURDIR/package/lib/libgcc_s.so.1
|
||||||
|
cp -arfLv /lib/x86_64-linux-gnu/libstdc++.so.6 $CURDIR/package/lib/libstdc++.so.6
|
||||||
|
cp -arfLv /lib/x86_64-linux-gnu/libm.so.6 $CURDIR/package/lib/libm.so.6
|
||||||
|
cp -arfLv /lib/x86_64-linux-gnu/libgomp.so.1 $CURDIR/package/lib/libgomp.so.1
|
||||||
|
elif [ -f "/lib/ld-linux-aarch64.so.1" ]; then
|
||||||
|
# ARM64 architecture
|
||||||
|
echo "Detected ARM64 architecture, copying ARM64 libraries..."
|
||||||
|
cp -arfLv /lib/ld-linux-aarch64.so.1 $CURDIR/package/lib/ld.so
|
||||||
|
cp -arfLv /lib/aarch64-linux-gnu/libc.so.6 $CURDIR/package/lib/libc.so.6
|
||||||
|
cp -arfLv /lib/aarch64-linux-gnu/libgcc_s.so.1 $CURDIR/package/lib/libgcc_s.so.1
|
||||||
|
cp -arfLv /lib/aarch64-linux-gnu/libstdc++.so.6 $CURDIR/package/lib/libstdc++.so.6
|
||||||
|
cp -arfLv /lib/aarch64-linux-gnu/libm.so.6 $CURDIR/package/lib/libm.so.6
|
||||||
|
cp -arfLv /lib/aarch64-linux-gnu/libgomp.so.1 $CURDIR/package/lib/libgomp.so.1
|
||||||
|
else
|
||||||
|
echo "Error: Could not detect architecture"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Packaging completed successfully"
|
||||||
|
ls -liah $CURDIR/package/
|
||||||
|
ls -liah $CURDIR/package/lib/
|
||||||
52
backend/cpp/llama-cpp/prepare.sh
Normal file
52
backend/cpp/llama-cpp/prepare.sh
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
## Patches
|
||||||
|
## Apply patches from the `patches` directory
|
||||||
|
for patch in $(ls patches); do
|
||||||
|
echo "Applying patch $patch"
|
||||||
|
patch -d llama.cpp/ -p1 < patches/$patch
|
||||||
|
done
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cp -r CMakeLists.txt llama.cpp/tools/grpc-server/
|
||||||
|
cp -r grpc-server.cpp llama.cpp/tools/grpc-server/
|
||||||
|
cp -rfv llama.cpp/vendor/nlohmann/json.hpp llama.cpp/tools/grpc-server/
|
||||||
|
cp -rfv llama.cpp/tools/server/utils.hpp llama.cpp/tools/grpc-server/
|
||||||
|
cp -rfv llama.cpp/vendor/cpp-httplib/httplib.h llama.cpp/tools/grpc-server/
|
||||||
|
|
||||||
|
set +e
|
||||||
|
if grep -q "grpc-server" llama.cpp/tools/CMakeLists.txt; then
|
||||||
|
echo "grpc-server already added"
|
||||||
|
else
|
||||||
|
echo "add_subdirectory(grpc-server)" >> llama.cpp/tools/CMakeLists.txt
|
||||||
|
fi
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Now to keep maximum compatibility with the original server.cpp, we need to remove the index.html.gz.hpp and loading.html.hpp includes
|
||||||
|
# and remove the main function
|
||||||
|
# TODO: upstream this to the original server.cpp by extracting the upstream main function to a separate file
|
||||||
|
awk '
|
||||||
|
/int[ \t]+main[ \t]*\(/ { # If the line starts the main function
|
||||||
|
in_main=1; # Set a flag
|
||||||
|
open_braces=0; # Track number of open braces
|
||||||
|
}
|
||||||
|
in_main {
|
||||||
|
open_braces += gsub(/\{/, "{"); # Count opening braces
|
||||||
|
open_braces -= gsub(/\}/, "}"); # Count closing braces
|
||||||
|
if (open_braces == 0) { # If all braces are closed
|
||||||
|
in_main=0; # End skipping
|
||||||
|
}
|
||||||
|
next; # Skip lines inside main
|
||||||
|
}
|
||||||
|
!in_main # Print lines not inside main
|
||||||
|
' "llama.cpp/tools/server/server.cpp" > llama.cpp/tools/grpc-server/server.cpp
|
||||||
|
|
||||||
|
# remove index.html.gz.hpp and loading.html.hpp includes
|
||||||
|
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||||
|
# macOS
|
||||||
|
sed -i '' '/#include "index\.html\.gz\.hpp"/d; /#include "loading\.html\.hpp"/d' llama.cpp/tools/grpc-server/server.cpp
|
||||||
|
else
|
||||||
|
# Linux and others
|
||||||
|
sed -i '/#include "index\.html\.gz\.hpp"/d; /#include "loading\.html\.hpp"/d' llama.cpp/tools/grpc-server/server.cpp
|
||||||
|
fi
|
||||||
62
backend/cpp/llama-cpp/run.sh
Executable file
62
backend/cpp/llama-cpp/run.sh
Executable file
@@ -0,0 +1,62 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
# Get the absolute current dir where the script is located
|
||||||
|
CURDIR=$(dirname "$(realpath $0)")
|
||||||
|
|
||||||
|
cd /
|
||||||
|
|
||||||
|
echo "CPU info:"
|
||||||
|
grep -e "model\sname" /proc/cpuinfo | head -1
|
||||||
|
grep -e "flags" /proc/cpuinfo | head -1
|
||||||
|
|
||||||
|
BINARY=llama-cpp-fallback
|
||||||
|
|
||||||
|
if grep -q -e "\savx\s" /proc/cpuinfo ; then
|
||||||
|
echo "CPU: AVX found OK"
|
||||||
|
if [ -e $CURDIR/llama-cpp-avx ]; then
|
||||||
|
BINARY=llama-cpp-avx
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if grep -q -e "\savx2\s" /proc/cpuinfo ; then
|
||||||
|
echo "CPU: AVX2 found OK"
|
||||||
|
if [ -e $CURDIR/llama-cpp-avx2 ]; then
|
||||||
|
BINARY=llama-cpp-avx2
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check avx 512
|
||||||
|
if grep -q -e "\savx512f\s" /proc/cpuinfo ; then
|
||||||
|
echo "CPU: AVX512F found OK"
|
||||||
|
if [ -e $CURDIR/llama-cpp-avx512 ]; then
|
||||||
|
BINARY=llama-cpp-avx512
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "$LLAMACPP_GRPC_SERVERS" ]; then
|
||||||
|
if [ -e $CURDIR/llama-cpp-grpc ]; then
|
||||||
|
BINARY=llama-cpp-grpc
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Extend ld library path with the dir where this script is located/lib
|
||||||
|
if [ "$(uname)" == "Darwin" ]; then
|
||||||
|
export DYLD_LIBRARY_PATH=$CURDIR/lib:$DYLD_LIBRARY_PATH
|
||||||
|
#export DYLD_FALLBACK_LIBRARY_PATH=$CURDIR/lib:$DYLD_FALLBACK_LIBRARY_PATH
|
||||||
|
else
|
||||||
|
export LD_LIBRARY_PATH=$CURDIR/lib:$LD_LIBRARY_PATH
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If there is a lib/ld.so, use it
|
||||||
|
if [ -f $CURDIR/lib/ld.so ]; then
|
||||||
|
echo "Using lib/ld.so"
|
||||||
|
echo "Using binary: $BINARY"
|
||||||
|
exec $CURDIR/lib/ld.so $CURDIR/$BINARY "$@"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Using binary: $BINARY"
|
||||||
|
exec $CURDIR/$BINARY "$@"
|
||||||
|
|
||||||
|
# We should never reach this point, however just in case we do, run fallback
|
||||||
|
exec $CURDIR/llama-cpp-fallback "$@"
|
||||||
@@ -1,87 +0,0 @@
|
|||||||
|
|
||||||
LLAMA_VERSION?=
|
|
||||||
LLAMA_REPO?=https://github.com/ggerganov/llama.cpp
|
|
||||||
|
|
||||||
CMAKE_ARGS?=
|
|
||||||
BUILD_TYPE?=
|
|
||||||
ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh
|
|
||||||
TARGET?=--target grpc-server
|
|
||||||
|
|
||||||
# Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
|
|
||||||
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF -DLLAMA_CURL=OFF
|
|
||||||
|
|
||||||
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
|
|
||||||
ifeq ($(BUILD_TYPE),cublas)
|
|
||||||
CMAKE_ARGS+=-DGGML_CUDA=ON
|
|
||||||
# If build type is openblas then we set -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
|
|
||||||
# to CMAKE_ARGS automatically
|
|
||||||
else ifeq ($(BUILD_TYPE),openblas)
|
|
||||||
CMAKE_ARGS+=-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
|
|
||||||
# If build type is clblas (openCL) we set -DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
|
|
||||||
else ifeq ($(BUILD_TYPE),clblas)
|
|
||||||
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
|
|
||||||
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
|
|
||||||
else ifeq ($(BUILD_TYPE),hipblas)
|
|
||||||
CMAKE_ARGS+=-DGGML_HIP=ON
|
|
||||||
# If it's OSX, DO NOT embed the metal library - -DGGML_METAL_EMBED_LIBRARY=ON requires further investigation
|
|
||||||
# But if it's OSX without metal, disable it here
|
|
||||||
else ifeq ($(OS),Darwin)
|
|
||||||
ifneq ($(BUILD_TYPE),metal)
|
|
||||||
CMAKE_ARGS+=-DGGML_METAL=OFF
|
|
||||||
else
|
|
||||||
CMAKE_ARGS+=-DGGML_METAL=ON
|
|
||||||
CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON
|
|
||||||
TARGET+=--target ggml-metal
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifeq ($(BUILD_TYPE),sycl_f16)
|
|
||||||
CMAKE_ARGS+=-DGGML_SYCL=ON \
|
|
||||||
-DCMAKE_C_COMPILER=icx \
|
|
||||||
-DCMAKE_CXX_COMPILER=icpx \
|
|
||||||
-DCMAKE_CXX_FLAGS="-fsycl" \
|
|
||||||
-DGGML_SYCL_F16=ON
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifeq ($(BUILD_TYPE),sycl_f32)
|
|
||||||
CMAKE_ARGS+=-DGGML_SYCL=ON \
|
|
||||||
-DCMAKE_C_COMPILER=icx \
|
|
||||||
-DCMAKE_CXX_COMPILER=icpx \
|
|
||||||
-DCMAKE_CXX_FLAGS="-fsycl"
|
|
||||||
endif
|
|
||||||
|
|
||||||
llama.cpp:
|
|
||||||
mkdir -p llama.cpp
|
|
||||||
cd llama.cpp && \
|
|
||||||
git init && \
|
|
||||||
git remote add origin $(LLAMA_REPO) && \
|
|
||||||
git fetch origin && \
|
|
||||||
git checkout -b build $(LLAMA_VERSION) && \
|
|
||||||
git submodule update --init --recursive --depth 1 --single-branch
|
|
||||||
|
|
||||||
llama.cpp/tools/grpc-server: llama.cpp
|
|
||||||
mkdir -p llama.cpp/tools/grpc-server
|
|
||||||
bash prepare.sh
|
|
||||||
|
|
||||||
rebuild:
|
|
||||||
bash prepare.sh
|
|
||||||
rm -rf grpc-server
|
|
||||||
$(MAKE) grpc-server
|
|
||||||
|
|
||||||
purge:
|
|
||||||
rm -rf llama.cpp/build
|
|
||||||
rm -rf llama.cpp/tools/grpc-server
|
|
||||||
rm -rf grpc-server
|
|
||||||
|
|
||||||
clean: purge
|
|
||||||
rm -rf llama.cpp
|
|
||||||
|
|
||||||
grpc-server: llama.cpp llama.cpp/tools/grpc-server
|
|
||||||
@echo "Building grpc-server with $(BUILD_TYPE) build type and $(CMAKE_ARGS)"
|
|
||||||
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
|
|
||||||
+bash -c "source $(ONEAPI_VARS); \
|
|
||||||
cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release $(TARGET)"
|
|
||||||
else
|
|
||||||
+cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release $(TARGET)
|
|
||||||
endif
|
|
||||||
cp llama.cpp/build/bin/grpc-server .
|
|
||||||
File diff suppressed because it is too large
Load Diff
24596
backend/cpp/llama/json.hpp
vendored
24596
backend/cpp/llama/json.hpp
vendored
File diff suppressed because it is too large
Load Diff
@@ -1,28 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
## Patches
|
|
||||||
## Apply patches from the `patches` directory
|
|
||||||
for patch in $(ls patches); do
|
|
||||||
echo "Applying patch $patch"
|
|
||||||
patch -d llama.cpp/ -p1 < patches/$patch
|
|
||||||
done
|
|
||||||
|
|
||||||
cp -r CMakeLists.txt llama.cpp/tools/grpc-server/
|
|
||||||
cp -r grpc-server.cpp llama.cpp/tools/grpc-server/
|
|
||||||
cp -rfv json.hpp llama.cpp/tools/grpc-server/
|
|
||||||
cp -rfv utils.hpp llama.cpp/tools/grpc-server/
|
|
||||||
|
|
||||||
if grep -q "grpc-server" llama.cpp/tools/CMakeLists.txt; then
|
|
||||||
echo "grpc-server already added"
|
|
||||||
else
|
|
||||||
echo "add_subdirectory(grpc-server)" >> llama.cpp/tools/CMakeLists.txt
|
|
||||||
fi
|
|
||||||
|
|
||||||
## XXX: In some versions of CMake clip wasn't being built before llama.
|
|
||||||
## This is an hack for now, but it should be fixed in the future.
|
|
||||||
# cp -rfv llama.cpp/tools/mtmd/clip.h llama.cpp/tools/grpc-server/clip.h
|
|
||||||
# cp -rfv llama.cpp/tools/mtmd/clip-impl.h llama.cpp/tools/grpc-server/clip-impl.h
|
|
||||||
# cp -rfv llama.cpp/tools/mtmd/llava.cpp llama.cpp/tools/grpc-server/llava.cpp
|
|
||||||
# echo '#include "llama.h"' > llama.cpp/tools/grpc-server/llava.h
|
|
||||||
# cat llama.cpp/tools/mtmd/llava.h >> llama.cpp/tools/grpc-server/llava.h
|
|
||||||
# cp -rfv llama.cpp/tools/mtmd/clip.cpp llama.cpp/tools/grpc-server/clip.cpp
|
|
||||||
910
backend/cpp/llama/utils.hpp
vendored
910
backend/cpp/llama/utils.hpp
vendored
@@ -1,910 +0,0 @@
|
|||||||
// https://github.com/ggerganov/llama.cpp/blob/master/tools/server/utils.hpp
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <string>
|
|
||||||
#include <vector>
|
|
||||||
#include <set>
|
|
||||||
#include <mutex>
|
|
||||||
#include <condition_variable>
|
|
||||||
#include <unordered_map>
|
|
||||||
|
|
||||||
#include "json.hpp"
|
|
||||||
|
|
||||||
#include "../mtmd/clip.h"
|
|
||||||
|
|
||||||
using json = nlohmann::json;
|
|
||||||
|
|
||||||
extern bool server_verbose;
|
|
||||||
|
|
||||||
#ifndef SERVER_VERBOSE
|
|
||||||
#define SERVER_VERBOSE 1
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if SERVER_VERBOSE != 1
|
|
||||||
#define LOG_VERBOSE(MSG, ...)
|
|
||||||
#else
|
|
||||||
#define LOG_VERBOSE(MSG, ...) \
|
|
||||||
do \
|
|
||||||
{ \
|
|
||||||
if (server_verbose) \
|
|
||||||
{ \
|
|
||||||
server_log("VERBOSE", __func__, __LINE__, MSG, __VA_ARGS__); \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define LOG_ERROR( MSG, ...) server_log("ERROR", __func__, __LINE__, MSG, __VA_ARGS__)
|
|
||||||
#define LOG_WARNING(MSG, ...) server_log("WARNING", __func__, __LINE__, MSG, __VA_ARGS__)
|
|
||||||
#define LOG_INFO( MSG, ...) server_log("INFO", __func__, __LINE__, MSG, __VA_ARGS__)
|
|
||||||
|
|
||||||
//
|
|
||||||
// parallel
|
|
||||||
//
|
|
||||||
|
|
||||||
enum server_state {
|
|
||||||
SERVER_STATE_LOADING_MODEL, // Server is starting up, model not fully loaded yet
|
|
||||||
SERVER_STATE_READY, // Server is ready and model is loaded
|
|
||||||
SERVER_STATE_ERROR // An error occurred, load_model failed
|
|
||||||
};
|
|
||||||
|
|
||||||
enum task_type {
|
|
||||||
TASK_TYPE_COMPLETION,
|
|
||||||
TASK_TYPE_CANCEL,
|
|
||||||
TASK_TYPE_NEXT_RESPONSE
|
|
||||||
};
|
|
||||||
|
|
||||||
struct task_server {
|
|
||||||
int id = -1; // to be filled by llama_server_queue
|
|
||||||
int target_id;
|
|
||||||
task_type type;
|
|
||||||
json data;
|
|
||||||
bool infill_mode = false;
|
|
||||||
bool embedding_mode = false;
|
|
||||||
int multitask_id = -1;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct task_result {
|
|
||||||
int id;
|
|
||||||
int multitask_id = -1;
|
|
||||||
bool stop;
|
|
||||||
bool error;
|
|
||||||
json result_json;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct task_multi {
|
|
||||||
int id;
|
|
||||||
std::set<int> subtasks_remaining{};
|
|
||||||
std::vector<task_result> results{};
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO: can become bool if we can't find use of more states
|
|
||||||
enum slot_state
|
|
||||||
{
|
|
||||||
IDLE,
|
|
||||||
PROCESSING,
|
|
||||||
};
|
|
||||||
|
|
||||||
enum slot_command
|
|
||||||
{
|
|
||||||
NONE,
|
|
||||||
LOAD_PROMPT,
|
|
||||||
RELEASE,
|
|
||||||
};
|
|
||||||
|
|
||||||
struct slot_params
|
|
||||||
{
|
|
||||||
bool stream = true;
|
|
||||||
bool cache_prompt = false; // remember the prompt to avoid reprocessing all prompt
|
|
||||||
|
|
||||||
uint32_t seed = -1; // RNG seed
|
|
||||||
int32_t n_keep = 0; // number of tokens to keep from initial prompt
|
|
||||||
int32_t n_predict = -1; // new tokens to predict
|
|
||||||
|
|
||||||
std::vector<std::string> antiprompt;
|
|
||||||
|
|
||||||
json input_prefix;
|
|
||||||
json input_suffix;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct slot_image
|
|
||||||
{
|
|
||||||
int32_t id;
|
|
||||||
|
|
||||||
bool request_encode_image = false;
|
|
||||||
float * image_embedding = nullptr;
|
|
||||||
int32_t image_tokens = 0;
|
|
||||||
|
|
||||||
clip_image_u8 * img_data;
|
|
||||||
|
|
||||||
std::string prefix_prompt; // before of this image
|
|
||||||
};
|
|
||||||
|
|
||||||
// completion token output with probabilities
|
|
||||||
struct completion_token_output
|
|
||||||
{
|
|
||||||
struct token_prob
|
|
||||||
{
|
|
||||||
llama_token tok;
|
|
||||||
float prob;
|
|
||||||
};
|
|
||||||
|
|
||||||
std::vector<token_prob> probs;
|
|
||||||
llama_token tok;
|
|
||||||
std::string text_to_send;
|
|
||||||
};
|
|
||||||
|
|
||||||
static inline void server_log(const char *level, const char *function, int line,
|
|
||||||
const char *message, const nlohmann::ordered_json &extra)
|
|
||||||
{
|
|
||||||
nlohmann::ordered_json log
|
|
||||||
{
|
|
||||||
{"timestamp", time(nullptr)},
|
|
||||||
{"level", level},
|
|
||||||
{"function", function},
|
|
||||||
{"line", line},
|
|
||||||
{"message", message},
|
|
||||||
};
|
|
||||||
|
|
||||||
if (!extra.empty())
|
|
||||||
{
|
|
||||||
log.merge_patch(extra);
|
|
||||||
}
|
|
||||||
|
|
||||||
const std::string str = log.dump(-1, ' ', false, json::error_handler_t::replace);
|
|
||||||
printf("%.*s\n", (int)str.size(), str.data());
|
|
||||||
fflush(stdout);
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// server utils
|
|
||||||
//
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
static T json_value(const json &body, const std::string &key, const T &default_value)
|
|
||||||
{
|
|
||||||
// Fallback null to default value
|
|
||||||
return body.contains(key) && !body.at(key).is_null()
|
|
||||||
? body.value(key, default_value)
|
|
||||||
: default_value;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline std::string format_chatml(std::vector<json> messages)
|
|
||||||
{
|
|
||||||
std::ostringstream chatml_msgs;
|
|
||||||
|
|
||||||
for (auto it = messages.begin(); it != messages.end(); ++it) {
|
|
||||||
chatml_msgs << "<|im_start|>"
|
|
||||||
<< json_value(*it, "role", std::string("user")) << '\n';
|
|
||||||
chatml_msgs << json_value(*it, "content", std::string(""))
|
|
||||||
<< "<|im_end|>\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
chatml_msgs << "<|im_start|>assistant" << '\n';
|
|
||||||
|
|
||||||
return chatml_msgs.str();
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// work queue utils
|
|
||||||
//
|
|
||||||
|
|
||||||
struct llama_server_queue {
|
|
||||||
int id = 0;
|
|
||||||
std::mutex mutex_tasks;
|
|
||||||
// queues
|
|
||||||
std::vector<task_server> queue_tasks;
|
|
||||||
std::vector<task_server> queue_tasks_deferred;
|
|
||||||
std::vector<task_multi> queue_multitasks;
|
|
||||||
std::condition_variable condition_tasks;
|
|
||||||
// callback functions
|
|
||||||
std::function<void(task_server&)> callback_new_task;
|
|
||||||
std::function<void(task_multi&)> callback_finish_multitask;
|
|
||||||
std::function<void(void)> callback_all_task_finished;
|
|
||||||
|
|
||||||
// Add a new task to the end of the queue
|
|
||||||
int post(task_server task) {
|
|
||||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
|
||||||
if (task.id == -1) {
|
|
||||||
task.id = id++;
|
|
||||||
}
|
|
||||||
queue_tasks.push_back(std::move(task));
|
|
||||||
condition_tasks.notify_one();
|
|
||||||
return task.id;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add a new task, but defer until one slot is available
|
|
||||||
void defer(task_server task) {
|
|
||||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
|
||||||
queue_tasks_deferred.push_back(std::move(task));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the next id for creating anew task
|
|
||||||
int get_new_id() {
|
|
||||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
|
||||||
return id++;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register function to process a new task
|
|
||||||
void on_new_task(std::function<void(task_server&)> callback) {
|
|
||||||
callback_new_task = callback;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register function to process a multitask
|
|
||||||
void on_finish_multitask(std::function<void(task_multi&)> callback) {
|
|
||||||
callback_finish_multitask = callback;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register the function to be called when the batch of tasks is finished
|
|
||||||
void on_all_tasks_finished(std::function<void(void)> callback) {
|
|
||||||
callback_all_task_finished = callback;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call when the state of one slot is changed
|
|
||||||
void notify_slot_changed() {
|
|
||||||
// move deferred tasks back to main loop
|
|
||||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
|
||||||
for (auto & task : queue_tasks_deferred) {
|
|
||||||
queue_tasks.push_back(std::move(task));
|
|
||||||
}
|
|
||||||
queue_tasks_deferred.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start the main loop. This call is blocking
|
|
||||||
[[noreturn]]
|
|
||||||
void start_loop() {
|
|
||||||
while (true) {
|
|
||||||
// new task arrived
|
|
||||||
LOG_VERBOSE("have new task", {});
|
|
||||||
{
|
|
||||||
while (true)
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
|
||||||
if (queue_tasks.empty()) {
|
|
||||||
lock.unlock();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
task_server task = queue_tasks.front();
|
|
||||||
queue_tasks.erase(queue_tasks.begin());
|
|
||||||
lock.unlock();
|
|
||||||
LOG_VERBOSE("callback_new_task", {});
|
|
||||||
callback_new_task(task);
|
|
||||||
}
|
|
||||||
LOG_VERBOSE("callback_all_task_finished", {});
|
|
||||||
// process and update all the multitasks
|
|
||||||
auto queue_iterator = queue_multitasks.begin();
|
|
||||||
while (queue_iterator != queue_multitasks.end())
|
|
||||||
{
|
|
||||||
if (queue_iterator->subtasks_remaining.empty())
|
|
||||||
{
|
|
||||||
// all subtasks done == multitask is done
|
|
||||||
task_multi current_multitask = *queue_iterator;
|
|
||||||
callback_finish_multitask(current_multitask);
|
|
||||||
// remove this multitask
|
|
||||||
queue_iterator = queue_multitasks.erase(queue_iterator);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
++queue_iterator;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// all tasks in the current loop is finished
|
|
||||||
callback_all_task_finished();
|
|
||||||
}
|
|
||||||
LOG_VERBOSE("wait for new task", {});
|
|
||||||
// wait for new task
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> lock(mutex_tasks);
|
|
||||||
if (queue_tasks.empty()) {
|
|
||||||
condition_tasks.wait(lock, [&]{
|
|
||||||
return !queue_tasks.empty();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//
|
|
||||||
// functions to manage multitasks
|
|
||||||
//
|
|
||||||
|
|
||||||
// add a multitask by specifying the id of all subtask (subtask is a task_server)
|
|
||||||
void add_multitask(int multitask_id, std::vector<int>& sub_ids)
|
|
||||||
{
|
|
||||||
std::lock_guard<std::mutex> lock(mutex_tasks);
|
|
||||||
task_multi multi;
|
|
||||||
multi.id = multitask_id;
|
|
||||||
std::copy(sub_ids.begin(), sub_ids.end(), std::inserter(multi.subtasks_remaining, multi.subtasks_remaining.end()));
|
|
||||||
queue_multitasks.push_back(multi);
|
|
||||||
}
|
|
||||||
|
|
||||||
// updatethe remaining subtasks, while appending results to multitask
|
|
||||||
void update_multitask(int multitask_id, int subtask_id, task_result& result)
|
|
||||||
{
|
|
||||||
std::lock_guard<std::mutex> lock(mutex_tasks);
|
|
||||||
for (auto& multitask : queue_multitasks)
|
|
||||||
{
|
|
||||||
if (multitask.id == multitask_id)
|
|
||||||
{
|
|
||||||
multitask.subtasks_remaining.erase(subtask_id);
|
|
||||||
multitask.results.push_back(result);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct llama_server_response {
|
|
||||||
typedef std::function<void(int, int, task_result&)> callback_multitask_t;
|
|
||||||
callback_multitask_t callback_update_multitask;
|
|
||||||
// for keeping track of all tasks waiting for the result
|
|
||||||
std::set<int> waiting_task_ids;
|
|
||||||
// the main result queue
|
|
||||||
std::vector<task_result> queue_results;
|
|
||||||
std::mutex mutex_results;
|
|
||||||
std::condition_variable condition_results;
|
|
||||||
|
|
||||||
void add_waiting_task_id(int task_id) {
|
|
||||||
std::unique_lock<std::mutex> lock(mutex_results);
|
|
||||||
waiting_task_ids.insert(task_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
void remove_waiting_task_id(int task_id) {
|
|
||||||
std::unique_lock<std::mutex> lock(mutex_results);
|
|
||||||
waiting_task_ids.erase(task_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function blocks the thread until there is a response for this task_id
|
|
||||||
task_result recv(int task_id) {
|
|
||||||
while (true)
|
|
||||||
{
|
|
||||||
std::unique_lock<std::mutex> lock(mutex_results);
|
|
||||||
condition_results.wait(lock, [&]{
|
|
||||||
return !queue_results.empty();
|
|
||||||
});
|
|
||||||
LOG_VERBOSE("condition_results unblock", {});
|
|
||||||
|
|
||||||
for (int i = 0; i < (int) queue_results.size(); i++)
|
|
||||||
{
|
|
||||||
if (queue_results[i].id == task_id)
|
|
||||||
{
|
|
||||||
assert(queue_results[i].multitask_id == -1);
|
|
||||||
task_result res = queue_results[i];
|
|
||||||
queue_results.erase(queue_results.begin() + i);
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// should never reach here
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register the function to update multitask
|
|
||||||
void on_multitask_update(callback_multitask_t callback) {
|
|
||||||
callback_update_multitask = callback;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send a new result to a waiting task_id
|
|
||||||
void send(task_result result) {
|
|
||||||
std::unique_lock<std::mutex> lock(mutex_results);
|
|
||||||
LOG_VERBOSE("send new result", {});
|
|
||||||
for (auto& task_id : waiting_task_ids) {
|
|
||||||
// LOG_TEE("waiting task id %i \n", task_id);
|
|
||||||
// for now, tasks that have associated parent multitasks just get erased once multitask picks up the result
|
|
||||||
if (result.multitask_id == task_id)
|
|
||||||
{
|
|
||||||
LOG_VERBOSE("callback_update_multitask", {});
|
|
||||||
callback_update_multitask(task_id, result.id, result);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (result.id == task_id)
|
|
||||||
{
|
|
||||||
LOG_VERBOSE("queue_results.push_back", {});
|
|
||||||
queue_results.push_back(result);
|
|
||||||
condition_results.notify_one();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
//
|
|
||||||
// base64 utils (TODO: move to common in the future)
|
|
||||||
//
|
|
||||||
|
|
||||||
static const std::string base64_chars =
|
|
||||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
|
||||||
"abcdefghijklmnopqrstuvwxyz"
|
|
||||||
"0123456789+/";
|
|
||||||
|
|
||||||
static inline bool is_base64(uint8_t c)
|
|
||||||
{
|
|
||||||
return (isalnum(c) || (c == '+') || (c == '/'));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline std::vector<uint8_t> base64_decode(const std::string & encoded_string)
|
|
||||||
{
|
|
||||||
int i = 0;
|
|
||||||
int j = 0;
|
|
||||||
int in_ = 0;
|
|
||||||
|
|
||||||
int in_len = encoded_string.size();
|
|
||||||
|
|
||||||
uint8_t char_array_4[4];
|
|
||||||
uint8_t char_array_3[3];
|
|
||||||
|
|
||||||
std::vector<uint8_t> ret;
|
|
||||||
|
|
||||||
while (in_len-- && (encoded_string[in_] != '=') && is_base64(encoded_string[in_]))
|
|
||||||
{
|
|
||||||
char_array_4[i++] = encoded_string[in_]; in_++;
|
|
||||||
if (i == 4)
|
|
||||||
{
|
|
||||||
for (i = 0; i <4; i++)
|
|
||||||
{
|
|
||||||
char_array_4[i] = base64_chars.find(char_array_4[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
char_array_3[0] = ((char_array_4[0] ) << 2) + ((char_array_4[1] & 0x30) >> 4);
|
|
||||||
char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
|
|
||||||
char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
|
|
||||||
|
|
||||||
for (i = 0; (i < 3); i++)
|
|
||||||
{
|
|
||||||
ret.push_back(char_array_3[i]);
|
|
||||||
}
|
|
||||||
i = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (i)
|
|
||||||
{
|
|
||||||
for (j = i; j <4; j++)
|
|
||||||
{
|
|
||||||
char_array_4[j] = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (j = 0; j <4; j++)
|
|
||||||
{
|
|
||||||
char_array_4[j] = base64_chars.find(char_array_4[j]);
|
|
||||||
}
|
|
||||||
|
|
||||||
char_array_3[0] = ((char_array_4[0] ) << 2) + ((char_array_4[1] & 0x30) >> 4);
|
|
||||||
char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
|
|
||||||
char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
|
|
||||||
|
|
||||||
for (j = 0; (j < i - 1); j++)
|
|
||||||
{
|
|
||||||
ret.push_back(char_array_3[j]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
//
|
|
||||||
// tokenizer and input processing utils
|
|
||||||
//
|
|
||||||
|
|
||||||
static bool json_is_array_of_numbers(const json & data) {
|
|
||||||
if (data.is_array()) {
|
|
||||||
for (const auto & e : data) {
|
|
||||||
if (!e.is_number_integer()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// is array having BOTH numbers & strings?
|
|
||||||
static bool json_is_array_of_mixed_numbers_strings(const json & data) {
|
|
||||||
bool seen_string = false;
|
|
||||||
bool seen_number = false;
|
|
||||||
if (data.is_array()) {
|
|
||||||
for (const auto & e : data) {
|
|
||||||
seen_string |= e.is_string();
|
|
||||||
seen_number |= e.is_number_integer();
|
|
||||||
if (seen_number && seen_string) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// get value by path(key1 / key2)
|
|
||||||
static json json_get_nested_values(const std::vector<std::string> & paths, const json & js) {
|
|
||||||
json result = json::object();
|
|
||||||
|
|
||||||
for (const std::string & path : paths) {
|
|
||||||
json current = js;
|
|
||||||
const auto keys = string_split<std::string>(path, /*separator*/ '/');
|
|
||||||
bool valid_path = true;
|
|
||||||
for (const std::string & k : keys) {
|
|
||||||
if (valid_path && current.is_object() && current.contains(k)) {
|
|
||||||
current = current[k];
|
|
||||||
} else {
|
|
||||||
valid_path = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (valid_path) {
|
|
||||||
result[path] = current;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* this handles 2 cases:
|
|
||||||
* - only string, example: "string"
|
|
||||||
* - mixed string and tokens, example: [12, 34, "string", 56, 78]
|
|
||||||
*/
|
|
||||||
static llama_tokens tokenize_mixed(const llama_vocab * vocab, const json & json_prompt, bool add_special, bool parse_special) {
|
|
||||||
// If `add_bos` is true, we only add BOS, when json_prompt is a string,
|
|
||||||
// or the first element of the json_prompt array is a string.
|
|
||||||
llama_tokens prompt_tokens;
|
|
||||||
|
|
||||||
if (json_prompt.is_array()) {
|
|
||||||
bool first = true;
|
|
||||||
for (const auto & p : json_prompt) {
|
|
||||||
if (p.is_string()) {
|
|
||||||
auto s = p.template get<std::string>();
|
|
||||||
|
|
||||||
llama_tokens p;
|
|
||||||
if (first) {
|
|
||||||
p = common_tokenize(vocab, s, add_special, parse_special);
|
|
||||||
first = false;
|
|
||||||
} else {
|
|
||||||
p = common_tokenize(vocab, s, false, parse_special);
|
|
||||||
}
|
|
||||||
|
|
||||||
prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end());
|
|
||||||
} else {
|
|
||||||
if (first) {
|
|
||||||
first = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
prompt_tokens.push_back(p.template get<llama_token>());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
auto s = json_prompt.template get<std::string>();
|
|
||||||
prompt_tokens = common_tokenize(vocab, s, add_special, parse_special);
|
|
||||||
}
|
|
||||||
|
|
||||||
return prompt_tokens;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* break the input "prompt" object into multiple prompt if needed, then tokenize them
|
|
||||||
* this supports these cases:
|
|
||||||
* - "prompt": "string"
|
|
||||||
* - "prompt": [12, 34, 56]
|
|
||||||
* - "prompt": [12, 34, "string", 56, 78]
|
|
||||||
* and multiple prompts (multi-tasks):
|
|
||||||
* - "prompt": ["string1", "string2"]
|
|
||||||
* - "prompt": ["string1", [12, 34, 56]]
|
|
||||||
* - "prompt": [[12, 34, 56], [78, 90, 12]]
|
|
||||||
* - "prompt": [[12, 34, "string", 56, 78], [12, 34, 56]]
|
|
||||||
*/
|
|
||||||
static std::vector<llama_tokens> tokenize_input_prompts(const llama_vocab * vocab, const json & json_prompt, bool add_special, bool parse_special) {
|
|
||||||
std::vector<llama_tokens> result;
|
|
||||||
if (json_prompt.is_string() || json_is_array_of_mixed_numbers_strings(json_prompt)) {
|
|
||||||
// string or mixed
|
|
||||||
result.push_back(tokenize_mixed(vocab, json_prompt, add_special, parse_special));
|
|
||||||
} else if (json_is_array_of_numbers(json_prompt)) {
|
|
||||||
// array of tokens
|
|
||||||
result.push_back(json_prompt.get<llama_tokens>());
|
|
||||||
} else if (json_prompt.is_array()) {
|
|
||||||
// array of prompts
|
|
||||||
result.reserve(json_prompt.size());
|
|
||||||
for (const auto & p : json_prompt) {
|
|
||||||
if (p.is_string() || json_is_array_of_mixed_numbers_strings(p)) {
|
|
||||||
result.push_back(tokenize_mixed(vocab, p, add_special, parse_special));
|
|
||||||
} else if (json_is_array_of_numbers(p)) {
|
|
||||||
// array of tokens
|
|
||||||
result.push_back(p.get<llama_tokens>());
|
|
||||||
} else {
|
|
||||||
throw std::runtime_error("element of \"prompt\" must be a string, an list of tokens, or a list of mixed strings & tokens");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
throw std::runtime_error("\"prompt\" must be a string, an list of tokens, a list of mixed strings & tokens, or a list of prompts");
|
|
||||||
}
|
|
||||||
if (result.empty()) {
|
|
||||||
throw std::runtime_error("\"prompt\" must not be empty");
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
//
|
|
||||||
// utils for interacting with libmtmd
|
|
||||||
// (may need to refactor in near future)
|
|
||||||
//
|
|
||||||
|
|
||||||
/**
|
|
||||||
* server_tokens is a helper to manage the input tokens and image for the server.
|
|
||||||
* it is made this way to simplify the logic of KV cache management.
|
|
||||||
*/
|
|
||||||
struct server_tokens {
|
|
||||||
bool has_mtmd = false;
|
|
||||||
|
|
||||||
private: // disallow accessing these members directly, risking out-of-sync
|
|
||||||
|
|
||||||
// map a **start** position in tokens to the image chunk
|
|
||||||
std::unordered_map<llama_pos, mtmd::input_chunk_ptr> map_pos_to_image;
|
|
||||||
|
|
||||||
// list of tokens
|
|
||||||
// it can include LLAMA_TOKEN_NULL, which is used to indicate a token that is not a text token
|
|
||||||
// a mtmd_input_chunk can occupy multiple tokens, one llama_token per **position**
|
|
||||||
// important: for models using mrope, an image can contain multiple tokens but will use only one **position**
|
|
||||||
llama_tokens tokens;
|
|
||||||
|
|
||||||
// for ex. with input of 5 text tokens and 2 images:
|
|
||||||
// [0] [1] [2] [3] [4] [img0] [img0] [img0] [img1] [img1]
|
|
||||||
// pos 0 1 2 3 4 5 6 7 8 9
|
|
||||||
// map_pos_to_image will contain: {5, img0}, {8, img1}
|
|
||||||
|
|
||||||
public:
|
|
||||||
server_tokens() = default;
|
|
||||||
~server_tokens() = default;
|
|
||||||
|
|
||||||
// Prevent copying
|
|
||||||
server_tokens(const server_tokens&) = delete;
|
|
||||||
server_tokens& operator=(const server_tokens&) = delete;
|
|
||||||
|
|
||||||
// Allow moving (usually implicitly generated if members are movable)
|
|
||||||
server_tokens(server_tokens&&) = default;
|
|
||||||
server_tokens& operator=(server_tokens&&) = default;
|
|
||||||
|
|
||||||
// Allow accessing elements using [] operator
|
|
||||||
llama_token operator[](size_t index) { return tokens[index]; }
|
|
||||||
const llama_token& operator[](size_t index) const { return tokens[index]; }
|
|
||||||
|
|
||||||
server_tokens(mtmd::input_chunks & mtmd_chunks, bool has_mtmd) : has_mtmd(has_mtmd) {
|
|
||||||
for (size_t i = 0; i < mtmd_chunks.size(); ++i) {
|
|
||||||
push_back(mtmd_chunks[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
server_tokens(llama_tokens & tokens, bool has_mtmd) : has_mtmd(has_mtmd), tokens(tokens) {}
|
|
||||||
|
|
||||||
// for debugging
|
|
||||||
std::string str() const {
|
|
||||||
std::ostringstream oss;
|
|
||||||
oss << "tokens: ";
|
|
||||||
for (const auto & t : tokens) {
|
|
||||||
if (t == LLAMA_TOKEN_NULL) {
|
|
||||||
oss << "<embd> ";
|
|
||||||
} else {
|
|
||||||
oss << t << " ";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
oss << "\n";
|
|
||||||
oss << "image pos: ";
|
|
||||||
for (const auto & it : map_pos_to_image) {
|
|
||||||
oss << it.first << ", ";
|
|
||||||
}
|
|
||||||
return oss.str();
|
|
||||||
}
|
|
||||||
|
|
||||||
const mtmd::input_chunk_ptr & find_chunk(llama_pos pos) const {
|
|
||||||
auto it = map_pos_to_image.find(pos);
|
|
||||||
if (it != map_pos_to_image.end()) {
|
|
||||||
return it->second;
|
|
||||||
} else {
|
|
||||||
throw std::runtime_error("Chunk not found");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void push_back(llama_token tok) {
|
|
||||||
if (tok == LLAMA_TOKEN_NULL) {
|
|
||||||
throw std::runtime_error("Invalid token");
|
|
||||||
}
|
|
||||||
tokens.emplace_back(tok);
|
|
||||||
}
|
|
||||||
|
|
||||||
// will create a copy of the chunk if it contains non-text data
|
|
||||||
void push_back(const mtmd_input_chunk * chunk) {
|
|
||||||
auto type = mtmd_input_chunk_get_type(chunk);
|
|
||||||
if (type == MTMD_INPUT_CHUNK_TYPE_IMAGE) {
|
|
||||||
GGML_ASSERT(has_mtmd);
|
|
||||||
auto img_tokens = mtmd_input_chunk_get_tokens_image(chunk);
|
|
||||||
const int n_pos = mtmd_image_tokens_get_n_pos(img_tokens);
|
|
||||||
llama_pos start_pos = tokens.size();
|
|
||||||
for (int i = 0; i < n_pos; ++i) {
|
|
||||||
tokens.emplace_back(LLAMA_TOKEN_NULL);
|
|
||||||
}
|
|
||||||
mtmd::input_chunk_ptr new_chunk(mtmd_input_chunk_copy(chunk));
|
|
||||||
map_pos_to_image[start_pos] = std::move(new_chunk);
|
|
||||||
} else if (type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
|
|
||||||
size_t n_tokens;
|
|
||||||
auto text_tokens = mtmd_input_chunk_get_tokens_text(chunk, &n_tokens);
|
|
||||||
for (size_t i = 0; i < n_tokens; ++i) {
|
|
||||||
push_back(text_tokens[i]);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
GGML_ABORT("Invalid chunk type");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// for compatibility with context shift and prompt truncation
|
|
||||||
void insert(const llama_tokens & inp_tokens) {
|
|
||||||
GGML_ASSERT(!has_mtmd); // only allow this if mtmd is disabled
|
|
||||||
tokens.insert(tokens.end(), inp_tokens.begin(), inp_tokens.end());
|
|
||||||
}
|
|
||||||
|
|
||||||
// for compatibility with speculative decoding, ctx shift, slot save/load
|
|
||||||
const llama_tokens & get_text_tokens() const {
|
|
||||||
GGML_ASSERT(!has_mtmd); // only allow this if mtmd is disabled
|
|
||||||
return tokens;
|
|
||||||
}
|
|
||||||
|
|
||||||
// for compatibility with speculative decoding
|
|
||||||
void set_token(llama_pos pos, llama_token id) {
|
|
||||||
GGML_ASSERT(!has_mtmd); // only allow this if mtmd is disabled
|
|
||||||
tokens[pos] = id;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t size() const {
|
|
||||||
return tokens.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
bool empty() const {
|
|
||||||
return tokens.empty();
|
|
||||||
}
|
|
||||||
|
|
||||||
void clear() {
|
|
||||||
tokens.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
void resize(size_t n) {
|
|
||||||
GGML_ASSERT(n <= tokens.size());
|
|
||||||
if (has_mtmd) {
|
|
||||||
// we throw an error if we try to remove a token in the middle of an image
|
|
||||||
// for ex. with input of 5 text tokens and 2 images:
|
|
||||||
// [0] [1] [2] [3] [4] [img0] [img0] [img0] [img1] [img1]
|
|
||||||
// n 1 2 3 4 5 6 7 8 9 10
|
|
||||||
// allowed to resize ^ ^
|
|
||||||
// disallowed to resize ^ ^ ^
|
|
||||||
if (n > 0) {
|
|
||||||
llama_token last_token = tokens[n - 1];
|
|
||||||
// make sure we never remove tokens in the middle of an image
|
|
||||||
if (last_token == LLAMA_TOKEN_NULL) {
|
|
||||||
find_chunk(n - 1); // will throw an error if the token is not begin-of-chunk
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// remove all image chunks that are not used anymore
|
|
||||||
for (auto it = map_pos_to_image.begin(); it != map_pos_to_image.end(); ) {
|
|
||||||
llama_pos pos = it->first;
|
|
||||||
if (pos >= (llama_pos)n) {
|
|
||||||
it = map_pos_to_image.erase(it);
|
|
||||||
} else {
|
|
||||||
++it;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
tokens.resize(n);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string detokenize(const llama_context * ctx, bool special) const {
|
|
||||||
llama_tokens text_tokens;
|
|
||||||
text_tokens.reserve(tokens.size());
|
|
||||||
for (const auto & t : tokens) {
|
|
||||||
if (t != LLAMA_TOKEN_NULL) {
|
|
||||||
text_tokens.push_back(t);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return common_detokenize(ctx, text_tokens, special);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t get_common_prefix(const server_tokens & b) const {
|
|
||||||
size_t max_idx = std::min(tokens.size(), b.tokens.size());
|
|
||||||
for (size_t i = 0; i < max_idx; ++i) {
|
|
||||||
auto & ai = tokens[i];
|
|
||||||
auto & bi = b.tokens[i];
|
|
||||||
|
|
||||||
if (ai == LLAMA_TOKEN_NULL && bi == LLAMA_TOKEN_NULL) {
|
|
||||||
GGML_ASSERT(has_mtmd);
|
|
||||||
const auto & a_chunk = find_chunk(i);
|
|
||||||
const auto & b_chunk = b.find_chunk(i);
|
|
||||||
GGML_ASSERT(a_chunk && b_chunk);
|
|
||||||
const auto * a_img = mtmd_input_chunk_get_tokens_image(a_chunk.get());
|
|
||||||
const auto * b_img = mtmd_input_chunk_get_tokens_image(b_chunk.get());
|
|
||||||
std::string ai_id = mtmd_image_tokens_get_id(a_img);
|
|
||||||
std::string bi_id = mtmd_image_tokens_get_id(b_img);
|
|
||||||
size_t a_pos = mtmd_image_tokens_get_n_pos(a_img);
|
|
||||||
size_t b_pos = mtmd_image_tokens_get_n_pos(b_img);
|
|
||||||
if (ai_id == bi_id && a_pos == b_pos) {
|
|
||||||
GGML_ASSERT(a_pos > 0 && "Invalid image token"); // should never happen
|
|
||||||
i += a_pos - 1; // will be +1 by the for loop
|
|
||||||
continue;
|
|
||||||
} else {
|
|
||||||
return i;
|
|
||||||
}
|
|
||||||
} else if (ai == bi) {
|
|
||||||
continue;
|
|
||||||
} else {
|
|
||||||
return i;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return max_idx; // all tokens are equal
|
|
||||||
}
|
|
||||||
|
|
||||||
// make sure all text tokens are within the vocab range
|
|
||||||
bool validate(const struct llama_context * ctx) const {
|
|
||||||
const llama_model * model = llama_get_model(ctx);
|
|
||||||
const llama_vocab * vocab = llama_model_get_vocab(model);
|
|
||||||
const int32_t n_vocab = llama_vocab_n_tokens(vocab);
|
|
||||||
|
|
||||||
for (size_t i = 0; i < tokens.size(); ++i) {
|
|
||||||
auto & t = tokens[i];
|
|
||||||
if (t == LLAMA_TOKEN_NULL) {
|
|
||||||
try {
|
|
||||||
const auto & chunk = find_chunk(i);
|
|
||||||
const auto * img_tokens = mtmd_input_chunk_get_tokens_image(chunk.get());
|
|
||||||
size_t n_pos = mtmd_image_tokens_get_n_pos(img_tokens);
|
|
||||||
i += n_pos - 1; // will be +1 by the for loop
|
|
||||||
} catch (const std::exception & e) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
} else if (t < 0 || t >= n_vocab) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// encode and decode the image chunk
|
|
||||||
int32_t process_chunk(
|
|
||||||
llama_context * ctx,
|
|
||||||
mtmd_context * mctx,
|
|
||||||
llama_pos n_past,
|
|
||||||
int32_t seq_id,
|
|
||||||
llama_pos & n_pos_out) {
|
|
||||||
auto it = map_pos_to_image.find(n_past);
|
|
||||||
if (it == map_pos_to_image.end()) {
|
|
||||||
throw std::runtime_error("Chunk not found");
|
|
||||||
}
|
|
||||||
// SRV_INF("%s\n", "processing image...");
|
|
||||||
int32_t n_batch = llama_n_batch(ctx);
|
|
||||||
int64_t t0 = ggml_time_ms();
|
|
||||||
llama_pos new_n_past = n_past;
|
|
||||||
int32_t result = mtmd_helper_eval_chunk_single(mctx, ctx,
|
|
||||||
it->second.get(), // chunk
|
|
||||||
n_past,
|
|
||||||
seq_id,
|
|
||||||
n_batch,
|
|
||||||
true, // logits last
|
|
||||||
&new_n_past);
|
|
||||||
//SRV_INF("image processed in %" PRId64 " ms\n", ggml_time_ms() - t0);
|
|
||||||
if (result != 0) {
|
|
||||||
LOG_ERR("mtmd_helper_eval failed with status %d", result);
|
|
||||||
n_pos_out = n_past;
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
n_pos_out = new_n_past;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Computes FNV-1a hash of the data
|
|
||||||
static std::string fnv_hash(const uint8_t * data, size_t len) {
|
|
||||||
const uint64_t fnv_prime = 0x100000001b3ULL;
|
|
||||||
uint64_t hash = 0xcbf29ce484222325ULL;
|
|
||||||
|
|
||||||
for (size_t i = 0; i < len; ++i) {
|
|
||||||
hash ^= data[i];
|
|
||||||
hash *= fnv_prime;
|
|
||||||
}
|
|
||||||
return std::to_string(hash);
|
|
||||||
}
|
|
||||||
51
backend/go/bark-cpp/Makefile
Normal file
51
backend/go/bark-cpp/Makefile
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
INCLUDE_PATH := $(abspath ./)
|
||||||
|
LIBRARY_PATH := $(abspath ./)
|
||||||
|
|
||||||
|
AR?=ar
|
||||||
|
|
||||||
|
CMAKE_ARGS?=-DGGML_NATIVE=OFF
|
||||||
|
BUILD_TYPE?=
|
||||||
|
GOCMD=go
|
||||||
|
# keep standard at C11 and C++11
|
||||||
|
CXXFLAGS = -I. -I$(INCLUDE_PATH)/sources/bark.cpp/examples -I$(INCLUDE_PATH)/sources/bark.cpp/encodec.cpp/ggml/include -I$(INCLUDE_PATH)/sources/bark.cpp/spm-headers -I$(INCLUDE_PATH)/sources/bark.cpp -O3 -DNDEBUG -std=c++17 -fPIC
|
||||||
|
LDFLAGS = -L$(LIBRARY_PATH) -L$(LIBRARY_PATH)/sources/bark.cpp/build/examples -lbark -lstdc++ -lm
|
||||||
|
|
||||||
|
# bark.cpp
|
||||||
|
BARKCPP_REPO?=https://github.com/PABannier/bark.cpp.git
|
||||||
|
BARKCPP_VERSION?=5d5be84f089ab9ea53b7a793f088d3fbf7247495
|
||||||
|
|
||||||
|
# warnings
|
||||||
|
CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function
|
||||||
|
|
||||||
|
## bark.cpp
|
||||||
|
sources/bark.cpp:
|
||||||
|
git clone --recursive $(BARKCPP_REPO) sources/bark.cpp && \
|
||||||
|
cd sources/bark.cpp && \
|
||||||
|
git checkout $(BARKCPP_VERSION) && \
|
||||||
|
git submodule update --init --recursive --depth 1 --single-branch
|
||||||
|
|
||||||
|
sources/bark.cpp/build/libbark.a: sources/bark.cpp
|
||||||
|
cd sources/bark.cpp && \
|
||||||
|
mkdir -p build && \
|
||||||
|
cd build && \
|
||||||
|
cmake $(CMAKE_ARGS) .. && \
|
||||||
|
cmake --build . --config Release
|
||||||
|
|
||||||
|
gobark.o:
|
||||||
|
$(CXX) $(CXXFLAGS) gobark.cpp -o gobark.o -c $(LDFLAGS)
|
||||||
|
|
||||||
|
libbark.a: sources/bark.cpp/build/libbark.a gobark.o
|
||||||
|
cp $(INCLUDE_PATH)/sources/bark.cpp/build/libbark.a ./
|
||||||
|
$(AR) rcs libbark.a gobark.o
|
||||||
|
|
||||||
|
bark-cpp: libbark.a
|
||||||
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH="$(CURDIR)" LIBRARY_PATH=$(CURDIR) \
|
||||||
|
$(GOCMD) build -v -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o bark-cpp ./
|
||||||
|
|
||||||
|
package:
|
||||||
|
bash package.sh
|
||||||
|
|
||||||
|
build: bark-cpp package
|
||||||
|
|
||||||
|
clean:
|
||||||
|
rm -f gobark.o libbark.a
|
||||||
@@ -48,7 +48,7 @@ int tts(char *text,int threads, char *dst ) {
|
|||||||
|
|
||||||
// generate audio
|
// generate audio
|
||||||
if (!bark_generate_audio(c, text, threads)) {
|
if (!bark_generate_audio(c, text, threads)) {
|
||||||
fprintf(stderr, "%s: An error occured. If the problem persists, feel free to open an issue to report it.\n", __func__);
|
fprintf(stderr, "%s: An error occurred. If the problem persists, feel free to open an issue to report it.\n", __func__);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
// #cgo CXXFLAGS: -I${SRCDIR}/../../../sources/bark.cpp/ -I${SRCDIR}/../../../sources/bark.cpp/encodec.cpp -I${SRCDIR}/../../../sources/bark.cpp/examples -I${SRCDIR}/../../../sources/bark.cpp/spm-headers
|
// #cgo CXXFLAGS: -I${SRCDIR}/sources/bark.cpp/ -I${SRCDIR}/sources/bark.cpp/encodec.cpp -I${SRCDIR}/sources/bark.cpp/encodec.cpp/ggml/include -I${SRCDIR}/sources/bark.cpp/examples -I${SRCDIR}/sources/bark.cpp/spm-headers
|
||||||
// #cgo LDFLAGS: -L${SRCDIR}/ -L${SRCDIR}/../../../sources/bark.cpp/build/examples -L${SRCDIR}/../../../sources/bark.cpp/build/encodec.cpp/ -lbark -lencodec -lcommon
|
// #cgo LDFLAGS: -L${SRCDIR}/ -L${SRCDIR}/sources/bark.cpp/build/examples -L${SRCDIR}/sources/bark.cpp/build/encodec.cpp/ggml/src/ -L${SRCDIR}/sources/bark.cpp/build/encodec.cpp/ -lbark -lencodec -lcommon -lggml -lgomp
|
||||||
// #include <gobark.h>
|
// #include <gobark.h>
|
||||||
// #include <stdlib.h>
|
// #include <stdlib.h>
|
||||||
import "C"
|
import "C"
|
||||||
41
backend/go/bark-cpp/package.sh
Executable file
41
backend/go/bark-cpp/package.sh
Executable file
@@ -0,0 +1,41 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Script to copy the appropriate libraries based on architecture
|
||||||
|
# This script is used in the final stage of the Dockerfile
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
CURDIR=$(dirname "$(realpath $0)")
|
||||||
|
|
||||||
|
# Create lib directory
|
||||||
|
mkdir -p $CURDIR/package/lib
|
||||||
|
cp -avrf $CURDIR/bark-cpp $CURDIR/package/
|
||||||
|
cp -rfv $CURDIR/run.sh $CURDIR/package/
|
||||||
|
|
||||||
|
# Detect architecture and copy appropriate libraries
|
||||||
|
if [ -f "/lib64/ld-linux-x86-64.so.2" ]; then
|
||||||
|
# x86_64 architecture
|
||||||
|
echo "Detected x86_64 architecture, copying x86_64 libraries..."
|
||||||
|
cp -arfLv /lib64/ld-linux-x86-64.so.2 $CURDIR/package/lib/ld.so
|
||||||
|
cp -arfLv /lib/x86_64-linux-gnu/libc.so.6 $CURDIR/package/lib/libc.so.6
|
||||||
|
cp -arfLv /lib/x86_64-linux-gnu/libgcc_s.so.1 $CURDIR/package/lib/libgcc_s.so.1
|
||||||
|
cp -arfLv /lib/x86_64-linux-gnu/libstdc++.so.6 $CURDIR/package/lib/libstdc++.so.6
|
||||||
|
cp -arfLv /lib/x86_64-linux-gnu/libm.so.6 $CURDIR/package/lib/libm.so.6
|
||||||
|
cp -arfLv /lib/x86_64-linux-gnu/libgomp.so.1 $CURDIR/package/lib/libgomp.so.1
|
||||||
|
elif [ -f "/lib/ld-linux-aarch64.so.1" ]; then
|
||||||
|
# ARM64 architecture
|
||||||
|
echo "Detected ARM64 architecture, copying ARM64 libraries..."
|
||||||
|
cp -arfLv /lib/ld-linux-aarch64.so.1 $CURDIR/package/lib/ld.so
|
||||||
|
cp -arfLv /lib/aarch64-linux-gnu/libc.so.6 $CURDIR/package/lib/libc.so.6
|
||||||
|
cp -arfLv /lib/aarch64-linux-gnu/libgcc_s.so.1 $CURDIR/package/lib/libgcc_s.so.1
|
||||||
|
cp -arfLv /lib/aarch64-linux-gnu/libstdc++.so.6 $CURDIR/package/lib/libstdc++.so.6
|
||||||
|
cp -arfLv /lib/aarch64-linux-gnu/libm.so.6 $CURDIR/package/lib/libm.so.6
|
||||||
|
cp -arfLv /lib/aarch64-linux-gnu/libgomp.so.1 $CURDIR/package/lib/libgomp.so.1
|
||||||
|
else
|
||||||
|
echo "Error: Could not detect architecture"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Packaging completed successfully"
|
||||||
|
ls -liah $CURDIR/package/
|
||||||
|
ls -liah $CURDIR/package/lib/
|
||||||
13
backend/go/bark-cpp/run.sh
Executable file
13
backend/go/bark-cpp/run.sh
Executable file
@@ -0,0 +1,13 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
CURDIR=$(dirname "$(realpath $0)")
|
||||||
|
export LD_LIBRARY_PATH=$CURDIR/lib:$LD_LIBRARY_PATH
|
||||||
|
|
||||||
|
# If there is a lib/ld.so, use it
|
||||||
|
if [ -f $CURDIR/lib/ld.so ]; then
|
||||||
|
echo "Using lib/ld.so"
|
||||||
|
exec $CURDIR/lib/ld.so $CURDIR/bark-cpp "$@"
|
||||||
|
fi
|
||||||
|
|
||||||
|
exec $CURDIR/bark-cpp "$@"
|
||||||
@@ -1,25 +0,0 @@
|
|||||||
INCLUDE_PATH := $(abspath ./)
|
|
||||||
LIBRARY_PATH := $(abspath ./)
|
|
||||||
|
|
||||||
AR?=ar
|
|
||||||
|
|
||||||
BUILD_TYPE?=
|
|
||||||
# keep standard at C11 and C++11
|
|
||||||
CXXFLAGS = -I. -I$(INCLUDE_PATH)/../../../sources/bark.cpp/examples -I$(INCLUDE_PATH)/../../../sources/bark.cpp/spm-headers -I$(INCLUDE_PATH)/../../../sources/bark.cpp -O3 -DNDEBUG -std=c++17 -fPIC
|
|
||||||
LDFLAGS = -L$(LIBRARY_PATH) -L$(LIBRARY_PATH)/../../../sources/bark.cpp/build/examples -lbark -lstdc++ -lm
|
|
||||||
|
|
||||||
# warnings
|
|
||||||
CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function
|
|
||||||
|
|
||||||
gobark.o:
|
|
||||||
$(CXX) $(CXXFLAGS) gobark.cpp -o gobark.o -c $(LDFLAGS)
|
|
||||||
|
|
||||||
libbark.a: gobark.o
|
|
||||||
cp $(INCLUDE_PATH)/../../../sources/bark.cpp/build/libbark.a ./
|
|
||||||
$(AR) rcs libbark.a gobark.o
|
|
||||||
$(AR) rcs libbark.a $(LIBRARY_PATH)/../../../sources/bark.cpp/build/encodec.cpp/ggml/src/CMakeFiles/ggml.dir/ggml.c.o
|
|
||||||
$(AR) rcs libbark.a $(LIBRARY_PATH)/../../../sources/bark.cpp/build/encodec.cpp/ggml/src/CMakeFiles/ggml.dir/ggml-alloc.c.o
|
|
||||||
$(AR) rcs libbark.a $(LIBRARY_PATH)/../../../sources/bark.cpp/build/encodec.cpp/ggml/src/CMakeFiles/ggml.dir/ggml-backend.c.o
|
|
||||||
|
|
||||||
clean:
|
|
||||||
rm -f gobark.o libbark.a
|
|
||||||
9
backend/go/huggingface/Makefile
Normal file
9
backend/go/huggingface/Makefile
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
GOCMD=go
|
||||||
|
|
||||||
|
huggingface:
|
||||||
|
CGO_ENABLED=0 $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o huggingface ./
|
||||||
|
|
||||||
|
package:
|
||||||
|
bash package.sh
|
||||||
|
|
||||||
|
build: huggingface package
|
||||||
12
backend/go/huggingface/package.sh
Executable file
12
backend/go/huggingface/package.sh
Executable file
@@ -0,0 +1,12 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Script to copy the appropriate libraries based on architecture
|
||||||
|
# This script is used in the final stage of the Dockerfile
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
CURDIR=$(dirname "$(realpath $0)")
|
||||||
|
|
||||||
|
mkdir -p $CURDIR/package
|
||||||
|
cp -avrf $CURDIR/huggingface $CURDIR/package/
|
||||||
|
cp -rfv $CURDIR/run.sh $CURDIR/package/
|
||||||
6
backend/go/huggingface/run.sh
Executable file
6
backend/go/huggingface/run.sh
Executable file
@@ -0,0 +1,6 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
CURDIR=$(dirname "$(realpath $0)")
|
||||||
|
|
||||||
|
exec $CURDIR/huggingface "$@"
|
||||||
@@ -1,135 +0,0 @@
|
|||||||
INCLUDE_PATH := $(abspath ./)
|
|
||||||
LIBRARY_PATH := $(abspath ./)
|
|
||||||
|
|
||||||
AR?=ar
|
|
||||||
CMAKE_ARGS?=
|
|
||||||
BUILD_TYPE?=
|
|
||||||
ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh
|
|
||||||
# keep standard at C11 and C++11
|
|
||||||
CXXFLAGS = -I. -I$(INCLUDE_PATH)/../../../../sources/stablediffusion-ggml.cpp/thirdparty -I$(INCLUDE_PATH)/../../../../sources/stablediffusion-ggml.cpp/ggml/include -I$(INCLUDE_PATH)/../../../../sources/stablediffusion-ggml.cpp -O3 -DNDEBUG -std=c++17 -fPIC
|
|
||||||
|
|
||||||
GOCMD?=go
|
|
||||||
CGO_LDFLAGS?=
|
|
||||||
# Avoid parent make file overwriting CGO_LDFLAGS which is needed for hipblas
|
|
||||||
CGO_LDFLAGS_SYCL=
|
|
||||||
GO_TAGS?=
|
|
||||||
LD_FLAGS?=
|
|
||||||
|
|
||||||
# Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
|
|
||||||
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF
|
|
||||||
|
|
||||||
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
|
|
||||||
ifeq ($(BUILD_TYPE),cublas)
|
|
||||||
CMAKE_ARGS+=-DSD_CUDA=ON
|
|
||||||
# If build type is openblas then we set -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
|
|
||||||
# to CMAKE_ARGS automatically
|
|
||||||
else ifeq ($(BUILD_TYPE),openblas)
|
|
||||||
CMAKE_ARGS+=-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
|
|
||||||
# If build type is clblas (openCL) we set -DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
|
|
||||||
else ifeq ($(BUILD_TYPE),clblas)
|
|
||||||
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
|
|
||||||
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
|
|
||||||
else ifeq ($(BUILD_TYPE),hipblas)
|
|
||||||
CMAKE_ARGS+=-DSD_HIPBLAS=ON
|
|
||||||
# If it's OSX, DO NOT embed the metal library - -DGGML_METAL_EMBED_LIBRARY=ON requires further investigation
|
|
||||||
# But if it's OSX without metal, disable it here
|
|
||||||
else ifeq ($(OS),Darwin)
|
|
||||||
ifneq ($(BUILD_TYPE),metal)
|
|
||||||
CMAKE_ARGS+=-DSD_METAL=OFF
|
|
||||||
else
|
|
||||||
CMAKE_ARGS+=-DSD_METAL=ON
|
|
||||||
CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON
|
|
||||||
TARGET+=--target ggml-metal
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifeq ($(BUILD_TYPE),sycl_f16)
|
|
||||||
CMAKE_ARGS+=-DGGML_SYCL=ON \
|
|
||||||
-DCMAKE_C_COMPILER=icx \
|
|
||||||
-DCMAKE_CXX_COMPILER=icpx \
|
|
||||||
-DSD_SYCL=ON \
|
|
||||||
-DGGML_SYCL_F16=ON
|
|
||||||
CC=icx
|
|
||||||
CXX=icpx
|
|
||||||
CGO_LDFLAGS_SYCL += -fsycl -L${DNNLROOT}/lib -ldnnl ${MKLROOT}/lib/intel64/libmkl_sycl.a -fiopenmp -fopenmp-targets=spir64 -lOpenCL
|
|
||||||
CGO_LDFLAGS_SYCL += $(shell pkg-config --libs mkl-static-lp64-gomp)
|
|
||||||
CGO_CXXFLAGS += -fiopenmp -fopenmp-targets=spir64
|
|
||||||
CGO_CXXFLAGS += $(shell pkg-config --cflags mkl-static-lp64-gomp )
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifeq ($(BUILD_TYPE),sycl_f32)
|
|
||||||
CMAKE_ARGS+=-DGGML_SYCL=ON \
|
|
||||||
-DCMAKE_C_COMPILER=icx \
|
|
||||||
-DCMAKE_CXX_COMPILER=icpx \
|
|
||||||
-DSD_SYCL=ON
|
|
||||||
CC=icx
|
|
||||||
CXX=icpx
|
|
||||||
CGO_LDFLAGS_SYCL += -fsycl -L${DNNLROOT}/lib -ldnnl ${MKLROOT}/lib/intel64/libmkl_sycl.a -fiopenmp -fopenmp-targets=spir64 -lOpenCL
|
|
||||||
CGO_LDFLAGS_SYCL += $(shell pkg-config --libs mkl-static-lp64-gomp)
|
|
||||||
CGO_CXXFLAGS += -fiopenmp -fopenmp-targets=spir64
|
|
||||||
CGO_CXXFLAGS += $(shell pkg-config --cflags mkl-static-lp64-gomp )
|
|
||||||
endif
|
|
||||||
|
|
||||||
# warnings
|
|
||||||
# CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function
|
|
||||||
|
|
||||||
# Find all .a archives in ARCHIVE_DIR
|
|
||||||
# (ggml can have different backends cpu, cuda, etc., each backend generates a .a archive)
|
|
||||||
GGML_ARCHIVE_DIR := build/ggml/src/
|
|
||||||
ALL_ARCHIVES := $(shell find $(GGML_ARCHIVE_DIR) -type f -name '*.a')
|
|
||||||
|
|
||||||
# Name of the single merged library
|
|
||||||
COMBINED_LIB := libggmlall.a
|
|
||||||
|
|
||||||
# Rule to merge all the .a files into one
|
|
||||||
$(COMBINED_LIB): $(ALL_ARCHIVES)
|
|
||||||
@echo "Merging all .a into $(COMBINED_LIB)"
|
|
||||||
rm -f $@
|
|
||||||
mkdir -p merge-tmp
|
|
||||||
for a in $(ALL_ARCHIVES); do \
|
|
||||||
( cd merge-tmp && ar x ../$$a ); \
|
|
||||||
done
|
|
||||||
( cd merge-tmp && ar rcs ../$@ *.o )
|
|
||||||
# Ensure we have a proper index
|
|
||||||
ranlib $@
|
|
||||||
# Clean up
|
|
||||||
rm -rf merge-tmp
|
|
||||||
|
|
||||||
build/libstable-diffusion.a:
|
|
||||||
@echo "Building SD with $(BUILD_TYPE) build type and $(CMAKE_ARGS)"
|
|
||||||
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
|
|
||||||
+bash -c "source $(ONEAPI_VARS); \
|
|
||||||
mkdir -p build && \
|
|
||||||
cd build && \
|
|
||||||
cmake $(CMAKE_ARGS) ../../../../../sources/stablediffusion-ggml.cpp && \
|
|
||||||
cmake --build . --config Release"
|
|
||||||
else
|
|
||||||
mkdir -p build && \
|
|
||||||
cd build && \
|
|
||||||
cmake $(CMAKE_ARGS) ../../../../../sources/stablediffusion-ggml.cpp && \
|
|
||||||
cmake --build . --config Release
|
|
||||||
endif
|
|
||||||
$(MAKE) $(COMBINED_LIB)
|
|
||||||
|
|
||||||
gosd.o:
|
|
||||||
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
|
|
||||||
+bash -c "source $(ONEAPI_VARS); \
|
|
||||||
$(CXX) $(CXXFLAGS) gosd.cpp -o gosd.o -c"
|
|
||||||
else
|
|
||||||
$(CXX) $(CXXFLAGS) gosd.cpp -o gosd.o -c
|
|
||||||
endif
|
|
||||||
|
|
||||||
libsd.a: gosd.o
|
|
||||||
cp $(INCLUDE_PATH)/build/libstable-diffusion.a ./libsd.a
|
|
||||||
$(AR) rcs libsd.a gosd.o
|
|
||||||
|
|
||||||
stablediffusion-ggml:
|
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS) $(CGO_LDFLAGS_SYCL)" C_INCLUDE_PATH="$(INCLUDE_PATH)" LIBRARY_PATH="$(LIBRARY_PATH)" \
|
|
||||||
CC="$(CC)" CXX="$(CXX)" CGO_CXXFLAGS="$(CGO_CXXFLAGS)" \
|
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o ../../../../backend-assets/grpc/stablediffusion-ggml ./
|
|
||||||
ifneq ($(UPX),)
|
|
||||||
$(UPX) ../../../../backend-assets/grpc/stablediffusion-ggml
|
|
||||||
endif
|
|
||||||
|
|
||||||
clean:
|
|
||||||
rm -rf gosd.o libsd.a build $(COMBINED_LIB)
|
|
||||||
@@ -1,231 +0,0 @@
|
|||||||
#include <stdio.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <time.h>
|
|
||||||
#include <iostream>
|
|
||||||
#include <random>
|
|
||||||
#include <string>
|
|
||||||
#include <vector>
|
|
||||||
#include "gosd.h"
|
|
||||||
|
|
||||||
// #include "preprocessing.hpp"
|
|
||||||
#include "flux.hpp"
|
|
||||||
#include "stable-diffusion.h"
|
|
||||||
|
|
||||||
#define STB_IMAGE_IMPLEMENTATION
|
|
||||||
#define STB_IMAGE_STATIC
|
|
||||||
#include "stb_image.h"
|
|
||||||
|
|
||||||
#define STB_IMAGE_WRITE_IMPLEMENTATION
|
|
||||||
#define STB_IMAGE_WRITE_STATIC
|
|
||||||
#include "stb_image_write.h"
|
|
||||||
|
|
||||||
#define STB_IMAGE_RESIZE_IMPLEMENTATION
|
|
||||||
#define STB_IMAGE_RESIZE_STATIC
|
|
||||||
#include "stb_image_resize.h"
|
|
||||||
|
|
||||||
// Names of the sampler method, same order as enum sample_method in stable-diffusion.h
|
|
||||||
const char* sample_method_str[] = {
|
|
||||||
"euler_a",
|
|
||||||
"euler",
|
|
||||||
"heun",
|
|
||||||
"dpm2",
|
|
||||||
"dpm++2s_a",
|
|
||||||
"dpm++2m",
|
|
||||||
"dpm++2mv2",
|
|
||||||
"ipndm",
|
|
||||||
"ipndm_v",
|
|
||||||
"lcm",
|
|
||||||
"ddim_trailing",
|
|
||||||
"tcd",
|
|
||||||
};
|
|
||||||
|
|
||||||
// Names of the sigma schedule overrides, same order as sample_schedule in stable-diffusion.h
|
|
||||||
const char* schedule_str[] = {
|
|
||||||
"default",
|
|
||||||
"discrete",
|
|
||||||
"karras",
|
|
||||||
"exponential",
|
|
||||||
"ays",
|
|
||||||
"gits",
|
|
||||||
};
|
|
||||||
|
|
||||||
sd_ctx_t* sd_c;
|
|
||||||
|
|
||||||
sample_method_t sample_method;
|
|
||||||
|
|
||||||
int load_model(char *model, char* options[], int threads, int diff) {
|
|
||||||
fprintf (stderr, "Loading model!\n");
|
|
||||||
|
|
||||||
char *stableDiffusionModel = "";
|
|
||||||
if (diff == 1 ) {
|
|
||||||
stableDiffusionModel = model;
|
|
||||||
model = "";
|
|
||||||
}
|
|
||||||
|
|
||||||
// decode options. Options are in form optname:optvale, or if booleans only optname.
|
|
||||||
char *clip_l_path = "";
|
|
||||||
char *clip_g_path = "";
|
|
||||||
char *t5xxl_path = "";
|
|
||||||
char *vae_path = "";
|
|
||||||
char *scheduler = "";
|
|
||||||
char *sampler = "";
|
|
||||||
|
|
||||||
// If options is not NULL, parse options
|
|
||||||
for (int i = 0; options[i] != NULL; i++) {
|
|
||||||
char *optname = strtok(options[i], ":");
|
|
||||||
char *optval = strtok(NULL, ":");
|
|
||||||
if (optval == NULL) {
|
|
||||||
optval = "true";
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!strcmp(optname, "clip_l_path")) {
|
|
||||||
clip_l_path = optval;
|
|
||||||
}
|
|
||||||
if (!strcmp(optname, "clip_g_path")) {
|
|
||||||
clip_g_path = optval;
|
|
||||||
}
|
|
||||||
if (!strcmp(optname, "t5xxl_path")) {
|
|
||||||
t5xxl_path = optval;
|
|
||||||
}
|
|
||||||
if (!strcmp(optname, "vae_path")) {
|
|
||||||
vae_path = optval;
|
|
||||||
}
|
|
||||||
if (!strcmp(optname, "scheduler")) {
|
|
||||||
scheduler = optval;
|
|
||||||
}
|
|
||||||
if (!strcmp(optname, "sampler")) {
|
|
||||||
sampler = optval;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int sample_method_found = -1;
|
|
||||||
for (int m = 0; m < N_SAMPLE_METHODS; m++) {
|
|
||||||
if (!strcmp(sampler, sample_method_str[m])) {
|
|
||||||
sample_method_found = m;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (sample_method_found == -1) {
|
|
||||||
fprintf(stderr, "Invalid sample method, default to EULER_A!\n");
|
|
||||||
sample_method_found = EULER_A;
|
|
||||||
}
|
|
||||||
sample_method = (sample_method_t)sample_method_found;
|
|
||||||
|
|
||||||
int schedule_found = -1;
|
|
||||||
for (int d = 0; d < N_SCHEDULES; d++) {
|
|
||||||
if (!strcmp(scheduler, schedule_str[d])) {
|
|
||||||
schedule_found = d;
|
|
||||||
fprintf (stderr, "Found scheduler: %s\n", scheduler);
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (schedule_found == -1) {
|
|
||||||
fprintf (stderr, "Invalid scheduler! using DEFAULT\n");
|
|
||||||
schedule_found = DEFAULT;
|
|
||||||
}
|
|
||||||
|
|
||||||
schedule_t schedule = (schedule_t)schedule_found;
|
|
||||||
|
|
||||||
fprintf (stderr, "Creating context\n");
|
|
||||||
sd_ctx_t* sd_ctx = new_sd_ctx(model,
|
|
||||||
clip_l_path,
|
|
||||||
clip_g_path,
|
|
||||||
t5xxl_path,
|
|
||||||
stableDiffusionModel,
|
|
||||||
vae_path,
|
|
||||||
"",
|
|
||||||
"",
|
|
||||||
"",
|
|
||||||
"",
|
|
||||||
"",
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
threads,
|
|
||||||
SD_TYPE_COUNT,
|
|
||||||
STD_DEFAULT_RNG,
|
|
||||||
schedule,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false);
|
|
||||||
|
|
||||||
if (sd_ctx == NULL) {
|
|
||||||
fprintf (stderr, "failed loading model (generic error)\n");
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
fprintf (stderr, "Created context: OK\n");
|
|
||||||
|
|
||||||
sd_c = sd_ctx;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int gen_image(char *text, char *negativeText, int width, int height, int steps, int seed , char *dst, float cfg_scale) {
|
|
||||||
|
|
||||||
sd_image_t* results;
|
|
||||||
|
|
||||||
std::vector<int> skip_layers = {7, 8, 9};
|
|
||||||
|
|
||||||
fprintf (stderr, "Generating image\n");
|
|
||||||
|
|
||||||
results = txt2img(sd_c,
|
|
||||||
text,
|
|
||||||
negativeText,
|
|
||||||
-1, //clip_skip
|
|
||||||
cfg_scale, // sfg_scale
|
|
||||||
3.5f,
|
|
||||||
0, // eta
|
|
||||||
width,
|
|
||||||
height,
|
|
||||||
sample_method,
|
|
||||||
steps,
|
|
||||||
seed,
|
|
||||||
1,
|
|
||||||
NULL,
|
|
||||||
0.9f,
|
|
||||||
20.f,
|
|
||||||
false,
|
|
||||||
"",
|
|
||||||
skip_layers.data(),
|
|
||||||
skip_layers.size(),
|
|
||||||
0,
|
|
||||||
0.01,
|
|
||||||
0.2);
|
|
||||||
|
|
||||||
if (results == NULL) {
|
|
||||||
fprintf (stderr, "NO results\n");
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (results[0].data == NULL) {
|
|
||||||
fprintf (stderr, "Results with no data\n");
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
fprintf (stderr, "Writing PNG\n");
|
|
||||||
|
|
||||||
fprintf (stderr, "DST: %s\n", dst);
|
|
||||||
fprintf (stderr, "Width: %d\n", results[0].width);
|
|
||||||
fprintf (stderr, "Height: %d\n", results[0].height);
|
|
||||||
fprintf (stderr, "Channel: %d\n", results[0].channel);
|
|
||||||
fprintf (stderr, "Data: %p\n", results[0].data);
|
|
||||||
|
|
||||||
stbi_write_png(dst, results[0].width, results[0].height, results[0].channel,
|
|
||||||
results[0].data, 0, NULL);
|
|
||||||
fprintf (stderr, "Saved resulting image to '%s'\n", dst);
|
|
||||||
|
|
||||||
// TODO: free results. Why does it crash?
|
|
||||||
|
|
||||||
free(results[0].data);
|
|
||||||
results[0].data = NULL;
|
|
||||||
free(results);
|
|
||||||
fprintf (stderr, "gen_image is done", dst);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int unload() {
|
|
||||||
free_sd_ctx(sd_c);
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -1,96 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
// #cgo CXXFLAGS: -I${SRCDIR}/../../../../sources/stablediffusion-ggml.cpp/thirdparty -I${SRCDIR}/../../../../sources/stablediffusion-ggml.cpp -I${SRCDIR}/../../../../sources/stablediffusion-ggml.cpp/ggml/include
|
|
||||||
// #cgo LDFLAGS: -L${SRCDIR}/ -lsd -lstdc++ -lm -lggmlall -lgomp
|
|
||||||
// #include <gosd.h>
|
|
||||||
// #include <stdlib.h>
|
|
||||||
import "C"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"github.com/mudler/LocalAI/pkg/grpc/base"
|
|
||||||
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
|
||||||
"github.com/mudler/LocalAI/pkg/utils"
|
|
||||||
)
|
|
||||||
|
|
||||||
type SDGGML struct {
|
|
||||||
base.SingleThread
|
|
||||||
threads int
|
|
||||||
sampleMethod string
|
|
||||||
cfgScale float32
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sd *SDGGML) Load(opts *pb.ModelOptions) error {
|
|
||||||
|
|
||||||
sd.threads = int(opts.Threads)
|
|
||||||
|
|
||||||
modelFile := C.CString(opts.ModelFile)
|
|
||||||
defer C.free(unsafe.Pointer(modelFile))
|
|
||||||
|
|
||||||
var options **C.char
|
|
||||||
// prepare the options array to pass to C
|
|
||||||
|
|
||||||
size := C.size_t(unsafe.Sizeof((*C.char)(nil)))
|
|
||||||
length := C.size_t(len(opts.Options))
|
|
||||||
options = (**C.char)(C.malloc(length * size))
|
|
||||||
view := (*[1 << 30]*C.char)(unsafe.Pointer(options))[0:len(opts.Options):len(opts.Options)]
|
|
||||||
|
|
||||||
var diffusionModel int
|
|
||||||
|
|
||||||
var oo []string
|
|
||||||
for _, op := range opts.Options {
|
|
||||||
if op == "diffusion_model" {
|
|
||||||
diffusionModel = 1
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// If it's an option path, we resolve absolute path from the model path
|
|
||||||
if strings.Contains(op, ":") && strings.Contains(op, "path") {
|
|
||||||
data := strings.Split(op, ":")
|
|
||||||
data[1] = filepath.Join(opts.ModelPath, data[1])
|
|
||||||
if err := utils.VerifyPath(data[1], opts.ModelPath); err == nil {
|
|
||||||
oo = append(oo, strings.Join(data, ":"))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
oo = append(oo, op)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprintf(os.Stderr, "Options: %+v\n", oo)
|
|
||||||
|
|
||||||
for i, x := range oo {
|
|
||||||
view[i] = C.CString(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
sd.cfgScale = opts.CFGScale
|
|
||||||
|
|
||||||
ret := C.load_model(modelFile, options, C.int(opts.Threads), C.int(diffusionModel))
|
|
||||||
if ret != 0 {
|
|
||||||
return fmt.Errorf("could not load model")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sd *SDGGML) GenerateImage(opts *pb.GenerateImageRequest) error {
|
|
||||||
t := C.CString(opts.PositivePrompt)
|
|
||||||
defer C.free(unsafe.Pointer(t))
|
|
||||||
|
|
||||||
dst := C.CString(opts.Dst)
|
|
||||||
defer C.free(unsafe.Pointer(dst))
|
|
||||||
|
|
||||||
negative := C.CString(opts.NegativePrompt)
|
|
||||||
defer C.free(unsafe.Pointer(negative))
|
|
||||||
|
|
||||||
ret := C.gen_image(t, negative, C.int(opts.Width), C.int(opts.Height), C.int(opts.Step), C.int(opts.Seed), dst, C.float(sd.cfgScale))
|
|
||||||
if ret != 0 {
|
|
||||||
return fmt.Errorf("inference failed")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
int load_model(char *model, char* options[], int threads, int diffusionModel);
|
|
||||||
int gen_image(char *text, char *negativeText, int width, int height, int steps, int seed, char *dst, float cfg_scale);
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
@@ -58,6 +58,9 @@ func (llm *LLM) Load(opts *pb.ModelOptions) error {
|
|||||||
if opts.Embeddings {
|
if opts.Embeddings {
|
||||||
llamaOpts = append(llamaOpts, llama.EnableEmbeddings)
|
llamaOpts = append(llamaOpts, llama.EnableEmbeddings)
|
||||||
}
|
}
|
||||||
|
if opts.Reranking {
|
||||||
|
llamaOpts = append(llamaOpts, llama.EnableReranking)
|
||||||
|
}
|
||||||
if opts.NGPULayers != 0 {
|
if opts.NGPULayers != 0 {
|
||||||
llamaOpts = append(llamaOpts, llama.SetGPULayers(int(opts.NGPULayers)))
|
llamaOpts = append(llamaOpts, llama.SetGPULayers(int(opts.NGPULayers)))
|
||||||
}
|
}
|
||||||
|
|||||||
9
backend/go/local-store/Makefile
Normal file
9
backend/go/local-store/Makefile
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
GOCMD=go
|
||||||
|
|
||||||
|
local-store:
|
||||||
|
CGO_ENABLED=0 $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o local-store ./
|
||||||
|
|
||||||
|
package:
|
||||||
|
bash package.sh
|
||||||
|
|
||||||
|
build: local-store package
|
||||||
12
backend/go/local-store/package.sh
Executable file
12
backend/go/local-store/package.sh
Executable file
@@ -0,0 +1,12 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Script to copy the appropriate libraries based on architecture
|
||||||
|
# This script is used in the final stage of the Dockerfile
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
CURDIR=$(dirname "$(realpath $0)")
|
||||||
|
|
||||||
|
mkdir -p $CURDIR/package
|
||||||
|
cp -avrf $CURDIR/local-store $CURDIR/package/
|
||||||
|
cp -rfv $CURDIR/run.sh $CURDIR/package/
|
||||||
6
backend/go/local-store/run.sh
Executable file
6
backend/go/local-store/run.sh
Executable file
@@ -0,0 +1,6 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -ex
|
||||||
|
|
||||||
|
CURDIR=$(dirname "$(realpath $0)")
|
||||||
|
|
||||||
|
exec $CURDIR/local-store "$@"
|
||||||
@@ -4,6 +4,7 @@ package main
|
|||||||
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
|
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
|
||||||
import (
|
import (
|
||||||
"container/heap"
|
"container/heap"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"slices"
|
"slices"
|
||||||
@@ -99,6 +100,9 @@ func sortIntoKeySlicese(keys []*pb.StoresKey) [][]float32 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Store) Load(opts *pb.ModelOptions) error {
|
func (s *Store) Load(opts *pb.ModelOptions) error {
|
||||||
|
if opts.Model != "" {
|
||||||
|
return errors.New("not implemented")
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -315,7 +319,7 @@ func isNormalized(k []float32) bool {
|
|||||||
|
|
||||||
for _, v := range k {
|
for _, v := range k {
|
||||||
v64 := float64(v)
|
v64 := float64(v)
|
||||||
sum += v64*v64
|
sum += v64 * v64
|
||||||
}
|
}
|
||||||
|
|
||||||
s := math.Sqrt(sum)
|
s := math.Sqrt(sum)
|
||||||
37
backend/go/piper/Makefile
Normal file
37
backend/go/piper/Makefile
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
|
||||||
|
# go-piper version
|
||||||
|
PIPER_REPO?=https://github.com/mudler/go-piper
|
||||||
|
PIPER_VERSION?=e10ca041a885d4a8f3871d52924b47792d5e5aa0
|
||||||
|
|
||||||
|
CURRENT_DIR=$(abspath ./)
|
||||||
|
GOCMD=go
|
||||||
|
|
||||||
|
PIPER_CGO_CXXFLAGS+=-I$(CURRENT_DIR)/sources/go-piper/piper/src/cpp -I$(CURRENT_DIR)/sources/go-piper/piper/build/fi/include -I$(CURRENT_DIR)/sources/go-piper/piper/build/pi/include -I$(CURRENT_DIR)/sources/go-piper/piper/build/si/include
|
||||||
|
PIPER_CGO_LDFLAGS+=-L$(CURRENT_DIR)/sources/go-piper/piper/build/fi/lib -L$(CURRENT_DIR)/sources/go-piper/piper/build/pi/lib -L$(CURRENT_DIR)/sources/go-piper/piper/build/si/lib -lfmt -lspdlog -lucd
|
||||||
|
|
||||||
|
## go-piper
|
||||||
|
sources/go-piper:
|
||||||
|
mkdir -p sources/go-piper
|
||||||
|
cd sources/go-piper && \
|
||||||
|
git init && \
|
||||||
|
git remote add origin $(PIPER_REPO) && \
|
||||||
|
git fetch origin && \
|
||||||
|
git checkout $(PIPER_VERSION) && \
|
||||||
|
git submodule update --init --recursive --depth 1 --single-branch
|
||||||
|
|
||||||
|
sources/go-piper/libpiper_binding.a: sources/go-piper
|
||||||
|
$(MAKE) -C sources/go-piper libpiper_binding.a example/main piper.o
|
||||||
|
|
||||||
|
espeak-ng-data: sources/go-piper sources/go-piper/libpiper_binding.a
|
||||||
|
mkdir -p espeak-ng-data
|
||||||
|
@cp -rf sources/go-piper/piper-phonemize/pi/share/espeak-ng-data/. espeak-ng-data
|
||||||
|
|
||||||
|
piper: sources/go-piper sources/go-piper/libpiper_binding.a espeak-ng-data
|
||||||
|
$(GOCMD) mod edit -replace github.com/mudler/go-piper=$(CURRENT_DIR)/sources/go-piper
|
||||||
|
CGO_CXXFLAGS="$(PIPER_CGO_CXXFLAGS)" CGO_LDFLAGS="$(PIPER_CGO_LDFLAGS)" LIBRARY_PATH=$(CURRENT_DIR)/sources/go-piper \
|
||||||
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o piper ./
|
||||||
|
|
||||||
|
package:
|
||||||
|
bash package.sh
|
||||||
|
|
||||||
|
build: piper package
|
||||||
54
backend/go/piper/package.sh
Executable file
54
backend/go/piper/package.sh
Executable file
@@ -0,0 +1,54 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Script to copy the appropriate libraries based on architecture
|
||||||
|
# This script is used in the final stage of the Dockerfile
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
CURDIR=$(dirname "$(realpath $0)")
|
||||||
|
|
||||||
|
# Create lib directory
|
||||||
|
mkdir -p $CURDIR/package/lib
|
||||||
|
|
||||||
|
cp -avrf $CURDIR/piper $CURDIR/package/
|
||||||
|
cp -avrf $CURDIR/espeak-ng-data $CURDIR/package/
|
||||||
|
cp -rfv $CURDIR/run.sh $CURDIR/package/
|
||||||
|
cp -rfLv $CURDIR/sources/go-piper/piper-phonemize/pi/lib/* $CURDIR/package/lib/
|
||||||
|
|
||||||
|
# Detect architecture and copy appropriate libraries
|
||||||
|
if [ -f "/lib64/ld-linux-x86-64.so.2" ]; then
|
||||||
|
# x86_64 architecture
|
||||||
|
echo "Detected x86_64 architecture, copying x86_64 libraries..."
|
||||||
|
cp -arfLv /lib64/ld-linux-x86-64.so.2 $CURDIR/package/lib/ld.so
|
||||||
|
cp -arfLv /lib/x86_64-linux-gnu/libc.so.6 $CURDIR/package/lib/libc.so.6
|
||||||
|
cp -arfLv /lib/x86_64-linux-gnu/libgcc_s.so.1 $CURDIR/package/lib/libgcc_s.so.1
|
||||||
|
cp -arfLv /lib/x86_64-linux-gnu/libstdc++.so.6 $CURDIR/package/lib/libstdc++.so.6
|
||||||
|
cp -arfLv /lib/x86_64-linux-gnu/libm.so.6 $CURDIR/package/lib/libm.so.6
|
||||||
|
cp -arfLv /lib/x86_64-linux-gnu/libgomp.so.1 $CURDIR/package/lib/libgomp.so.1
|
||||||
|
cp -arfLv /lib/x86_64-linux-gnu/libgcc_s.so.1 $CURDIR/package/lib/libgcc_s.so.1
|
||||||
|
cp -arfLv /lib/x86_64-linux-gnu/libstdc++.so.6 $CURDIR/package/lib/libstdc++.so.6
|
||||||
|
cp -arfLv /lib/x86_64-linux-gnu/libdl.so.2 $CURDIR/package/lib/libdl.so.2
|
||||||
|
cp -arfLv /lib/x86_64-linux-gnu/librt.so.1 $CURDIR/package/lib/librt.so.1
|
||||||
|
cp -arfLv /lib/x86_64-linux-gnu/libpthread.so.0 $CURDIR/package/lib/libpthread.so.0
|
||||||
|
elif [ -f "/lib/ld-linux-aarch64.so.1" ]; then
|
||||||
|
# ARM64 architecture
|
||||||
|
echo "Detected ARM64 architecture, copying ARM64 libraries..."
|
||||||
|
cp -arfLv /lib/ld-linux-aarch64.so.1 $CURDIR/package/lib/ld.so
|
||||||
|
cp -arfLv /lib/aarch64-linux-gnu/libc.so.6 $CURDIR/package/lib/libc.so.6
|
||||||
|
cp -arfLv /lib/aarch64-linux-gnu/libgcc_s.so.1 $CURDIR/package/lib/libgcc_s.so.1
|
||||||
|
cp -arfLv /lib/aarch64-linux-gnu/libstdc++.so.6 $CURDIR/package/lib/libstdc++.so.6
|
||||||
|
cp -arfLv /lib/aarch64-linux-gnu/libm.so.6 $CURDIR/package/lib/libm.so.6
|
||||||
|
cp -arfLv /lib/aarch64-linux-gnu/libgomp.so.1 $CURDIR/package/lib/libgomp.so.1
|
||||||
|
cp -arfLv /lib/aarch64-linux-gnu/libgcc_s.so.1 $CURDIR/package/lib/libgcc_s.so.1
|
||||||
|
cp -arfLv /lib/aarch64-linux-gnu/libstdc++.so.6 $CURDIR/package/lib/libstdc++.so.6
|
||||||
|
cp -arfLv /lib/aarch64-linux-gnu/libdl.so.2 $CURDIR/package/lib/libdl.so.2
|
||||||
|
cp -arfLv /lib/aarch64-linux-gnu/librt.so.1 $CURDIR/package/lib/librt.so.1
|
||||||
|
cp -arfLv /lib/aarch64-linux-gnu/libpthread.so.0 $CURDIR/package/lib/libpthread.so.0
|
||||||
|
else
|
||||||
|
echo "Error: Could not detect architecture"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Packaging completed successfully"
|
||||||
|
ls -liah $CURDIR/package/
|
||||||
|
ls -liah $CURDIR/package/lib/
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user