Compare commits

...

211 Commits

Author SHA1 Message Date
Ettore Di Giacinto
a00bbfe3eb chore(model): add silero-vad model config
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-26 14:28:41 +01:00
Ettore Di Giacinto
2b62260b6d feat(models): use rwkv from llama.cpp (#4264)
feat(rwkv): use rwkv from llama.cpp

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-26 14:22:55 +01:00
Ettore Di Giacinto
03800ccceb Revert "chore(deps): Bump faster-whisper from 0.9.0 to 1.1.0 in /back… (#4268)
Revert "chore(deps): Bump faster-whisper from 0.9.0 to 1.1.0 in /backend/pyth…"

This reverts commit 6c8e870812.
2024-11-26 14:22:10 +01:00
Ettore Di Giacinto
f1b86d6e7f Revert "chore(deps): Bump whisper-timestamped from 1.14.2 to 1.15.8 in /backend/python/openvoice" (#4267)
Revert "chore(deps): Bump whisper-timestamped from 1.14.2 to 1.15.8 in /backe…"

This reverts commit 0f8f249465.
2024-11-26 14:22:03 +01:00
Ettore Di Giacinto
404ca3cc23 chore(deps): bump llama.cpp to 47f931c8f9a26c072d71224bc8013cc66ea9e445 (#4263)
chore(deps): bump llama.cpp to '47f931c8f9a26c072d71224bc8013cc66ea9e445'

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-26 11:12:57 +01:00
Ettore Di Giacinto
7492179c67 chore(model): add llama-3.1_openscholar-8b to the gallery (#4262)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-26 09:50:24 +01:00
dependabot[bot]
eeb22317b5 chore(deps): Bump dcarbone/install-yq-action from 1.3.0 to 1.3.1 (#4253)
Bumps [dcarbone/install-yq-action](https://github.com/dcarbone/install-yq-action) from 1.3.0 to 1.3.1.
- [Release notes](https://github.com/dcarbone/install-yq-action/releases)
- [Commits](https://github.com/dcarbone/install-yq-action/compare/v1.3.0...v1.3.1)

---
updated-dependencies:
- dependency-name: dcarbone/install-yq-action
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-11-26 09:49:29 +01:00
LocalAI [bot]
9b46dcf006 chore(model-gallery): ⬆️ update checksum (#4261)
⬆️ Checksum updates in gallery/index.yaml

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-26 09:49:05 +01:00
dependabot[bot]
6c8e870812 chore(deps): Bump faster-whisper from 0.9.0 to 1.1.0 in /backend/python/openvoice (#4249)
chore(deps): Bump faster-whisper in /backend/python/openvoice

Bumps [faster-whisper](https://github.com/SYSTRAN/faster-whisper) from 0.9.0 to 1.1.0.
- [Release notes](https://github.com/SYSTRAN/faster-whisper/releases)
- [Commits](https://github.com/SYSTRAN/faster-whisper/compare/v0.9.0...v1.1.0)

---
updated-dependencies:
- dependency-name: faster-whisper
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-11-26 00:54:12 +00:00
dependabot[bot]
0f8f249465 chore(deps): Bump whisper-timestamped from 1.14.2 to 1.15.8 in /backend/python/openvoice (#4248)
chore(deps): Bump whisper-timestamped in /backend/python/openvoice

Bumps [whisper-timestamped](https://github.com/linto-ai/whisper-timestamped) from 1.14.2 to 1.15.8.
- [Release notes](https://github.com/linto-ai/whisper-timestamped/releases)
- [Commits](https://github.com/linto-ai/whisper-timestamped/compare/v1.14.2...v1.15.8)

---
updated-dependencies:
- dependency-name: whisper-timestamped
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-11-25 21:28:44 +00:00
Ettore Di Giacinto
720ffc1d9d chore(model): add steyrcannon-0.2-qwen2.5-72b to the gallery (#4244)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-25 09:25:55 +01:00
Ettore Di Giacinto
5c4e4c1cbc chore(model): add tulu-3.1-8b-supernova-i1 to the gallery (#4243)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-25 09:25:44 +01:00
Ettore Di Giacinto
32ca4a51e5 chore(model): add qwen2.5-coder-32b-instruct-uncensored-i1 to the gallery (#4241)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-25 09:25:33 +01:00
Ettore Di Giacinto
dbe98229e8 chore(model): add dark-chivalry_v1.0-i1 to the gallery (#4242)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-25 09:22:11 +01:00
LocalAI [bot]
1de20331ca chore: ⬆️ Update ggerganov/llama.cpp to cce5a9007572c6e9fa522296b77571d2e5071357 (#4238)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-24 22:03:48 +00:00
Ettore Di Giacinto
7d2f213dc8 chore(model): add l3.1-aspire-heart-matrix-8b to the gallery (#4237)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-24 10:46:40 +01:00
Ettore Di Giacinto
76c8d0b868 chore(model): add qwen2.5-3b-smart-i1 to the gallery (#4236)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-24 10:46:29 +01:00
Ettore Di Giacinto
aae7e5fe99 chore(model): add llama-sentient-3.2-3b-instruct to the gallery (#4235)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-24 10:46:19 +01:00
Adam Monsen
9cb30bedeb integrations: add Nextcloud (#4233)
Signed-off-by: Adam Monsen <haircut@gmail.com>
2024-11-24 10:33:18 +01:00
LocalAI [bot]
f1a72f3a16 chore: ⬆️ Update ggerganov/llama.cpp to 55ed008b2de01592659b9eba068ea01bb2f72160 (#4232)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-23 22:14:37 +00:00
Ettore Di Giacinto
a04cf9543d chore(model): add onellm-doey-v1-llama-3.2-3b to the gallery (#4230)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-23 11:09:24 +01:00
Ettore Di Giacinto
7a973c8c16 chore(model): add marco-o1 to the gallery (#4229)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-23 11:03:43 +01:00
Ettore Di Giacinto
66b06f43af chore(model): add llama-3.1-tulu-3-8b-dpo to the gallery (#4228)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-23 11:03:26 +01:00
LocalAI [bot]
74134ef99a chore: ⬆️ Update ggerganov/llama.cpp to 6dfcfef0787e9902df29f510b63621f60a09a50b (#4227)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-22 21:41:52 +00:00
Ettore Di Giacinto
f5fdef72e3 chore(model): add llama-3.1-8b-instruct-ortho-v3 to the gallery (#4226)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-22 11:33:20 +01:00
Ettore Di Giacinto
cfc45dff37 chore(model): add copus-2x8b-i1 to the gallery (#4225)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-22 11:33:04 +01:00
JackBekket
30f641fe12 add new community integrations (#4224) 2024-11-22 11:32:50 +01:00
Ettore Di Giacinto
76f5ba1412 chore(model): add llama-doctor-3.2-3b-instruct to the gallery (#4223)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-22 11:32:10 +01:00
Ettore Di Giacinto
b601535cdd models(gallery): add llama-mesh (#4222)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-22 11:25:28 +01:00
GPTLocalhost (Word Add-in)
c9c58a24a8 chore(docs): integrating LocalAI with Microsoft Word (#4218)
Integrating LocalAI with Microsoft Word

Signed-off-by: GPTLocalhost (Word Add-in) <72584872+GPTLocalhost@users.noreply.github.com>
2024-11-22 09:57:39 +01:00
LocalAI [bot]
4e3df95737 chore: ⬆️ Update ggerganov/llama.cpp to a5e47592b6171ae21f3eaa1aba6fb2b707875063 (#4221)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-21 22:34:56 +00:00
Ettore Di Giacinto
f028ee8a26 fix(p2p): parse correctly ExtraLLamaCPPArgs (#4220)
Previously we were sensible when args aren't defined and we would clash
parsing extra args.

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-21 15:17:48 +01:00
Ettore Di Giacinto
47dc4337ba fix(p2p): parse maddr correctly (#4219)
Previously in case of not specifying a value it would pass a slice of 1
empty element

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-21 14:06:49 +01:00
Ettore Di Giacinto
fa6fcdf53e Update README.md
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-11-21 11:05:00 +01:00
Ettore Di Giacinto
163ca74590 Update README.md
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-11-21 10:04:45 +01:00
Ettore Di Giacinto
961a993b88 Update README.md
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-11-21 09:56:05 +01:00
Ettore Di Giacinto
46847f3bd4 Update README.md
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-11-21 09:54:41 +01:00
rmmonster
f03bbf3188 fix: #4215 404 in documentation due to migrated configuration examples (#4216)
update link to examples which have moved to their own repository

Signed-off-by: Philipp Seelig <philipp@daxbau.net>
Co-authored-by: Philipp Seelig <philipp@daxbau.net>
Co-authored-by: Dave <dave@gray101.com>
2024-11-21 09:47:11 +01:00
LocalAI [bot]
73ab921391 chore: ⬆️ Update ggerganov/llama.cpp to 9abe9eeae98b11fa93b82632b264126a010225ff (#4212)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-11-21 00:24:16 +00:00
LocalAI [bot]
eaf0e3022a feat(swagger): update swagger (#4211)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-20 23:10:51 +01:00
Ettore Di Giacinto
7adbc16bae chore(model): add silero-vad to the gallery (#4210)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-20 19:11:29 +01:00
Ettore Di Giacinto
76d813ed1c chore(go.mod): tidy (#4209)
chore(go.mod): tidy

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-20 18:13:42 +01:00
Ettore Di Giacinto
4f1ab2366d chore(refactor): imply modelpath (#4208)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-20 18:06:35 +01:00
Ettore Di Giacinto
51e0db367a chore(go.mod): add valyala/fasttemplate (#4207)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-20 16:54:25 +01:00
Ettore Di Giacinto
c20e145aa5 fix(go.mod): add urfave/cli v2 (#4206)
chore(go.mod): add urfave/cli v2

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-20 15:46:42 +01:00
Ettore Di Giacinto
b1ea9318e6 feat(silero): add Silero-vad backend (#4204)
* feat(vad): add silero-vad backend (WIP)

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* feat(vad): add API endpoint

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* fix(vad): correctly place the onnxruntime libs

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* chore(vad): hook silero-vad to binary and container builds

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* feat(gRPC): register VAD Server

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* fix(Makefile): consume ONNX_OS consistently

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* fix(Makefile): handle macOS

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-11-20 14:48:40 +01:00
mintyleaf
9892d7d584 feat(p2p): add support for configuration of edgevpn listen_maddrs, dht_announce_maddrs and bootstrap_peers (#4200)
* add support for edgevpn listen_maddrs, dht_announce_maddrs, dht_bootstrap_peers

* upd docs for libp2p loglevel
2024-11-20 14:18:52 +01:00
LocalAI [bot]
96377feff6 chore: ⬆️ Update ggerganov/llama.cpp to 9fe0fb062630728e3c21b5839e3bce87bff2440a (#4203)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-20 09:19:19 +01:00
LocalAI [bot]
eeeedaf5c6 chore: ⬆️ Update ggerganov/whisper.cpp to 6266a9f9e56a5b925e9892acf650f3eb1245814d (#4202)
⬆️ Update ggerganov/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-20 00:15:59 +00:00
mintyleaf
de148cb2ad feat: add WebUI API token authorization (#4197)
* return 401 instead of 403, provide www-authenticate header, redirect to the login page, add cookie token support

* set cookies completely through js in auth page
2024-11-19 18:43:02 +01:00
Ettore Di Giacinto
8a4df3af99 chore(deps): bump sycl intel image (#4201)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-19 18:42:48 +01:00
dependabot[bot]
cfb0ac3992 chore(deps): Bump appleboy/ssh-action from 1.1.0 to 1.2.0 (#4183)
Bumps [appleboy/ssh-action](https://github.com/appleboy/ssh-action) from 1.1.0 to 1.2.0.
- [Release notes](https://github.com/appleboy/ssh-action/releases)
- [Changelog](https://github.com/appleboy/ssh-action/blob/master/.goreleaser.yaml)
- [Commits](https://github.com/appleboy/ssh-action/compare/v1.1.0...v1.2.0)

---
updated-dependencies:
- dependency-name: appleboy/ssh-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-11-19 08:43:30 +01:00
dependabot[bot]
57de92e727 chore(deps): Bump dcarbone/install-yq-action from 1.2.0 to 1.3.0 (#4182)
Bumps [dcarbone/install-yq-action](https://github.com/dcarbone/install-yq-action) from 1.2.0 to 1.3.0.
- [Release notes](https://github.com/dcarbone/install-yq-action/releases)
- [Commits](https://github.com/dcarbone/install-yq-action/compare/v1.2.0...v1.3.0)

---
updated-dependencies:
- dependency-name: dcarbone/install-yq-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-11-19 08:43:13 +01:00
LocalAI [bot]
ccf64cd7e2 chore: ⬆️ Update ggerganov/whisper.cpp to d24f981fb2fbf73ec7d72888c3129d1ed3f91916 (#4195)
⬆️ Update ggerganov/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-19 08:42:43 +01:00
LocalAI [bot]
47c4248703 chore: ⬆️ Update ggerganov/llama.cpp to d3481e631661b5e9517f78908cdd58cee63c4903 (#4196)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-19 08:42:09 +01:00
dependabot[bot]
faf203eeb3 chore(deps): Bump sentence-transformers from 3.3.0 to 3.3.1 in /backend/python/sentencetransformers (#4178)
chore(deps): Bump sentence-transformers

Bumps [sentence-transformers](https://github.com/UKPLab/sentence-transformers) from 3.3.0 to 3.3.1.
- [Release notes](https://github.com/UKPLab/sentence-transformers/releases)
- [Commits](https://github.com/UKPLab/sentence-transformers/compare/v3.3.0...v3.3.1)

---
updated-dependencies:
- dependency-name: sentence-transformers
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-11-18 21:03:37 +00:00
Ettore Di Giacinto
534cdf1306 chore(model): add miniclaus-qw1.5b-unamgs to the gallery (#4177)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-18 14:41:52 +01:00
Ettore Di Giacinto
569171ae97 chore(model): add evathene-v1.0 to the gallery (#4176)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-18 14:36:37 +01:00
Ettore Di Giacinto
b10c4ad90f chore(model): add l3.1-nemotron-sunfall-v0.7.0-i1 to the gallery (#4175)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-18 14:33:54 +01:00
Ettore Di Giacinto
a7db97e033 chore(Makefile): default to non-native builds for llama.cpp (#4173)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-18 13:59:06 +01:00
Ettore Di Giacinto
e0acc149fe chore(model): add llama3.2-3b-shiningvaliant2-i1 to the gallery (#4174)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-18 11:57:37 +01:00
Ettore Di Giacinto
61e14ad10b chore(model): add qwen2.5-7b-nerd-uncensored-v1.7 to the gallery (#4171)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-17 13:22:25 +01:00
LocalAI [bot]
a028d97888 chore: ⬆️ Update ggerganov/llama.cpp to db4cfd5dbc31c90f0d5c413a2e182d068b8ee308 (#4169)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-17 10:24:51 +01:00
Ettore Di Giacinto
e898e0bdc2 fix(hipblas): disable avx flags when accellerated bins are used (#4167)
chore(hipblas): disable avx flags when accellerated bins are used

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-17 10:23:59 +01:00
LocalAI [bot]
8b0b326875 chore: ⬆️ Update ggerganov/whisper.cpp to 01d3bd7d5ccd1956a7ddf1b57ee92d69f35aad93 (#4163)
⬆️ Update ggerganov/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-16 21:55:32 +00:00
Ettore Di Giacinto
57e793482a chore(deps): bump grpcio to 1.68.0 (#4166)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-16 10:33:47 +01:00
LocalAI [bot]
9b1d53f109 chore(model-gallery): ⬆️ update checksum (#4165)
⬆️ Checksum updates in gallery/index.yaml

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-16 09:10:59 +01:00
LocalAI [bot]
f6adcd49fb chore: ⬆️ Update ggerganov/llama.cpp to 883d206fbd2c5b2b9b589a9328503b9005e146c9 (#4164)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-15 21:41:43 +00:00
Ettore Di Giacinto
65bcc01a34 chore(model): add athene-v2-chat to the gallery (#4162)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-15 16:21:43 +01:00
Ettore Di Giacinto
3200090901 chore(model): add athene-v2-agent to the gallery (#4161)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-15 15:45:24 +01:00
Ettore Di Giacinto
6516c093cb chore(model): add magnum-v2-4b-i1 to the gallery (#4160)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-15 14:57:45 +01:00
Ettore Di Giacinto
f69afb457c chore(model): add l3.1-ms-astoria-70b-v2 to the gallery (#4159)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-15 14:48:29 +01:00
Ettore Di Giacinto
c53c0b068b models(gallery): add q2.5-ms-mistoria-72b-i1 (#4158)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-15 14:42:19 +01:00
Ettore Di Giacinto
939fbe59cc chore(deps): bump llama-cpp to ae8de6d50a09d49545e0afab2e50cc4acfb280e2 (#4157)
* chore(deps): bump llama-cpp to ae8de6d50a09d49545e0afab2e50cc4acfb280e2

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* fix(metal): metal file has moved

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-15 12:51:43 +01:00
LocalAI [bot]
62d0d004fa feat(swagger): update swagger (#4155)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-14 21:36:53 +00:00
Ettore Di Giacinto
de2b5748c3 chore(model): add l3.1-8b-slush-i1 to the gallery (#4152)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-14 15:38:13 +01:00
Ettore Di Giacinto
065215341f chore(model): add magnum-12b-v2.5-kto-i1 to the gallery (#4151)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-14 15:38:00 +01:00
Ettore Di Giacinto
1770b92fb6 chore(api): return values from schema (#4153)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-14 14:12:29 +01:00
Ettore Di Giacinto
a73c660fee chore(linguist): add *.hpp files to linguist-vendored (#4154)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-14 14:12:16 +01:00
Ettore Di Giacinto
b7d757186c chore(model): add llama-3.1-8b-arliai-rpmax-v1.3 to the gallery (#4150)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-14 11:34:22 +01:00
Ettore Di Giacinto
1ef379854e chore(model): add llama3.2-3b-enigma to the gallery (#4149)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-14 11:30:38 +01:00
Ettore Di Giacinto
216838b5da chore(model): add qwen2.5-32b-arliai-rpmax-v1.3 to the gallery (#4148)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-14 11:14:36 +01:00
Ettore Di Giacinto
6ce0c0e4df chore(model): add llama3.1-8b-cobalt to the gallery (#4147)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-14 11:07:47 +01:00
Ettore Di Giacinto
8ab7517294 models(gallery): add llama3.1-8b-enigma (#4146)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-14 11:04:36 +01:00
Ettore Di Giacinto
8a89aafc8c models(gallery): add celestial-harmony-14b-v1.0-experimental-1016-i1 (#4145)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-14 10:59:54 +01:00
Ettore Di Giacinto
c222b2b7c0 models(gallery): add tess-3-llama-3.1-70b (#4143)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-14 09:41:35 +01:00
LocalAI [bot]
5b166df96a chore: ⬆️ Update ggerganov/llama.cpp to fb4a0ec0833c71cff5a1a367ba375447ce6106eb (#4140)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-13 22:31:57 +01:00
LocalAI [bot]
489cb90322 chore: ⬆️ Update ggerganov/whisper.cpp to f19463ece2d43fd0b605dc513d8800eeb4e2315e (#4139)
⬆️ Update ggerganov/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-13 22:31:35 +01:00
Ettore Di Giacinto
c1d76290dc models(gallery): add mistral-nemo-prism-12b (#4141)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-13 22:09:48 +01:00
Ettore Di Giacinto
668ec2fadc models(gallery): add eva-qwen2.5-72b-v0.1-i1 (#4136)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-13 09:45:02 +01:00
Ettore Di Giacinto
ee4f1210bb models(gallery): add rombos-coder-v2.5-qwen-14b (#4135)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-13 09:42:26 +01:00
Ettore Di Giacinto
aebaf71be6 models(gallery): add rombos-coder-v2.5-qwen-32b (#4134)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-13 09:37:08 +01:00
Ettore Di Giacinto
1db504353c models(gallery): add rombos-coder-v2.5-qwen-7b (#4133)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-13 09:33:32 +01:00
Ettore Di Giacinto
b36ced8681 models(gallery): add qwen2.5-coder-7b-instruct-abliterated-i1 (#4132)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-12 12:33:39 +01:00
Ettore Di Giacinto
5de277cc78 models(gallery): add qwen2.5-coder-7b-3x-instruct-ties-v1.2-i1 (#4131)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-12 12:26:43 +01:00
Ettore Di Giacinto
daf1b25476 models(gallery): add qwen2.5-coder-7b-instruct (#4130)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-12 12:19:46 +01:00
Ettore Di Giacinto
e76bdaf61b models(gallery): add qwen2.5-coder-1.5b-instruct (#4129)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-12 12:14:05 +01:00
Ettore Di Giacinto
f3aeec6a4d models(gallery): add qwen2.5-coder-14b-instruct (#4128)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-12 12:07:27 +01:00
Ettore Di Giacinto
4e2a5719e7 models(gallery): add qwen2.5-coder-32b-instruct (#4127)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-12 12:03:14 +01:00
Ettore Di Giacinto
fe7ffdbc63 models(gallery): add qwen2.5-coder-3b-instruct (#4126)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-12 11:55:05 +01:00
Ettore Di Giacinto
8079ffee25 models(gallery): add qwen2.5-coder-14b (#4125)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-12 11:44:21 +01:00
Ikko Eltociear Ashimine
9688f516e0 chore: update jobresult_test.go (#4124)
recieve -> receive

Signed-off-by: Ikko Eltociear Ashimine <eltociear@gmail.com>
2024-11-12 08:52:18 +01:00
LocalAI [bot]
7903e1f6fa chore: ⬆️ Update ggerganov/llama.cpp to 54ef9cfc726a799e6f454ac22c4815d037716eda (#4122)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-12 00:53:49 +00:00
dependabot[bot]
1ec64bf683 chore(deps): Bump sentence-transformers from 3.2.0 to 3.3.0 in /backend/python/sentencetransformers (#4120)
chore(deps): Bump sentence-transformers

Bumps [sentence-transformers](https://github.com/UKPLab/sentence-transformers) from 3.2.0 to 3.3.0.
- [Release notes](https://github.com/UKPLab/sentence-transformers/releases)
- [Commits](https://github.com/UKPLab/sentence-transformers/compare/v3.2.0...v3.3.0)

---
updated-dependencies:
- dependency-name: sentence-transformers
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-11-11 22:54:57 +01:00
dependabot[bot]
2daf638ef8 chore(deps): Bump dcarbone/install-yq-action from 1.1.1 to 1.2.0 (#4114)
Bumps [dcarbone/install-yq-action](https://github.com/dcarbone/install-yq-action) from 1.1.1 to 1.2.0.
- [Release notes](https://github.com/dcarbone/install-yq-action/releases)
- [Commits](https://github.com/dcarbone/install-yq-action/compare/v1.1.1...v1.2.0)

---
updated-dependencies:
- dependency-name: dcarbone/install-yq-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-11-11 22:54:32 +01:00
Ettore Di Giacinto
bc25890a65 Revert "chore(deps): Bump setuptools from 69.5.1 to 75.4.0 in /backend/python/transformers" (#4123)
Revert "chore(deps): Bump setuptools from 69.5.1 to 75.4.0 in /backend/python…"

This reverts commit 066fcce57b.
2024-11-11 22:53:38 +01:00
dependabot[bot]
066fcce57b chore(deps): Bump setuptools from 69.5.1 to 75.4.0 in /backend/python/transformers (#4117)
chore(deps): Bump setuptools in /backend/python/transformers

Bumps [setuptools](https://github.com/pypa/setuptools) from 69.5.1 to 75.4.0.
- [Release notes](https://github.com/pypa/setuptools/releases)
- [Changelog](https://github.com/pypa/setuptools/blob/main/NEWS.rst)
- [Commits](https://github.com/pypa/setuptools/compare/v69.5.1...v75.4.0)

---
updated-dependencies:
- dependency-name: setuptools
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-11-11 21:42:24 +00:00
Ettore Di Giacinto
94fe07d073 models(gallery): add mn-tiramisu-12b (#4110)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-11 15:48:09 +01:00
Ettore Di Giacinto
8252a66034 models(gallery): add calme-3 llamaloi series (#4109)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-11 15:28:01 +01:00
Ettore Di Giacinto
5ab0ae9de5 models(gallery): add calme-3 qwenloi series (#4108)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-11 15:21:09 +01:00
Ettore Di Giacinto
7e2ef630aa models(gallery): add calme-3 qwen2.5 series (#4107)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-11 15:10:05 +01:00
LocalAI [bot]
e7d3efec14 docs: ⬆️ update docs version mudler/LocalAI (#4105)
⬆️ Update docs version mudler/LocalAI

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-11 08:55:54 +01:00
LocalAI [bot]
4f5ec946ac chore: ⬆️ Update ggerganov/llama.cpp to 4b3a9212b602be3d4e2e3ca26efd796cef13c55e (#4106)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-10 21:56:55 +00:00
Ettore Di Giacinto
9099d0c77e models(gallery): add tq2.5-14b-sugarquill-v1 (#4104)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-10 11:50:38 +01:00
Ettore Di Giacinto
b69614c2b3 models(gallery): add tissint-14b-128k-rp (#4103)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-10 10:38:19 +01:00
Ettore Di Giacinto
068b90a6dc models(gallery): add opencoder-1.5b instruct and base (#4102)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-10 10:32:12 +01:00
Ettore Di Giacinto
0586fe2d9c models(gallery): add opencoder-8b instruct and base (#4101)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-10 10:28:47 +01:00
LocalAI [bot]
f1e03bf474 chore: ⬆️ Update ggerganov/llama.cpp to 6423c65aa8be1b98f990cf207422505ac5a441a1 (#4100)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-09 22:13:13 +00:00
Ettore Di Giacinto
7f0093b2c9 models(gallery): add eva-qwen2.5-14b-v0.2 (#4099)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-09 09:01:15 +01:00
Ettore Di Giacinto
e8431d62a2 models(gallery): add llenn-v0.75-qwen2.5-72b-i1 (#4098)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-09 08:58:09 +01:00
LocalAI [bot]
adafd7cf23 chore: ⬆️ Update ggerganov/llama.cpp to ec450d3bbf9fdb3cd06b27c00c684fd1861cb0cf (#4097)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-11-08 23:00:05 +00:00
Ettore Di Giacinto
6daef00d30 chore(refactor): drop unnecessary code in loader (#4096)
* chore: simplify passing options to ModelOptions

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* chore(refactor): do not expose internal backend Loader

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-08 21:54:25 +01:00
Ettore Di Giacinto
a0cdd19038 models(gallery): add tess-r1-limerick-llama-3.1-70b (#4095)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-08 11:54:40 +01:00
Ettore Di Giacinto
d454118887 fix(container-images): install uv as system package (#4094)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-08 11:47:43 +01:00
LocalAI [bot]
356f23bacb chore: ⬆️ Update ggerganov/whisper.cpp to 31aea563a83803c710691fed3e8d700e06ae6788 (#4092)
⬆️ Update ggerganov/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-11-08 08:36:08 +01:00
LocalAI [bot]
196c249367 chore: ⬆️ Update ggerganov/llama.cpp to 97404c4a0374cac45c8c34a32d13819de1dd023d (#4093)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-07 22:55:56 +00:00
Ettore Di Giacinto
e2a8dd64db fix(tts): correctly pass backend config when generating model options (#4091)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-07 18:30:22 +01:00
Ettore Di Giacinto
20a5b20b59 chore(p2p): enhance logging (#4090)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-07 18:09:33 +01:00
Ettore Di Giacinto
06d0d00231 models(gallery): add valor-7b-v0.1 (#4089)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-07 10:05:50 +01:00
Ettore Di Giacinto
62c7f745ca models(gallery): add q25-1.5b-veolu (#4088)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-07 10:05:20 +01:00
LocalAI [bot]
551faa8ddb chore: ⬆️ Update ggerganov/llama.cpp to 5c333e014059122245c318e7ed4ec27d1085573c (#4087)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-06 21:48:57 +00:00
Ettore Di Giacinto
2c041a2077 feat(ui): move model detailed info to a modal (#4086)
* feat(ui): move model detailed info to a modal

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* chore: add static asset

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-06 18:25:59 +01:00
Ettore Di Giacinto
c4af769d4f chore: hide raw safetensors files (#4085)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-06 12:04:39 +01:00
Ettore Di Giacinto
b425a870b0 fix(diffusers): correctly parse height and width request without parametrization (#4082)
* fix(diffusers): allow to specify width and height without enable-parameters

Let's simplify usage by not gating width and height by parameters

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* chore: use sane defaults

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-06 08:53:02 +01:00
LocalAI [bot]
b59e16742e chore: ⬆️ Update ggerganov/llama.cpp to b8deef0ec0af5febac1d2cfd9119ff330ed0b762 (#4083)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-05 21:40:48 +00:00
Ettore Di Giacinto
947224b952 feat(diffusers): allow multiple lora adapters (#4081)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-05 15:14:33 +01:00
LocalAI [bot]
20cd8814c1 chore(model-gallery): ⬆️ update checksum (#4080)
⬆️ Checksum updates in gallery/index.yaml

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-05 08:38:34 +01:00
LocalAI [bot]
ce8045f521 chore: ⬆️ Update ggerganov/llama.cpp to d5a409e57fe8bd24fef597ab8a31110d390a6392 (#4079)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-11-05 05:01:26 +00:00
Ettore Di Giacinto
1bf5a11437 models(gallery): add g2-9b-sugarquill-v0 (#4073)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-04 22:30:17 +01:00
Ettore Di Giacinto
2daa5e6be0 models(gallery): add cybertron-v4-qw7b-mgs (#4063)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-04 22:30:07 +01:00
Ettore Di Giacinto
b91aa288b5 models(gallery): add g2-9b-aletheia-v1 (#4056)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-04 19:59:14 +01:00
Ettore Di Giacinto
43187d1aba models(gallery): add llama-3.1-whiterabbitneo-2-8b (#4043)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-04 11:28:16 +01:00
Ettore Di Giacinto
97b730e238 models(gallery): add whiterabbitneo-2.5-qwen-2.5-coder-7b (#4042)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-04 11:23:17 +01:00
LocalAI [bot]
d11ed5287b chore: ⬆️ Update ggerganov/llama.cpp to 9f409893519b4a6def46ef80cd6f5d05ac0fb157 (#4041)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-04 09:30:04 +01:00
LocalAI [bot]
81ac490202 chore: ⬆️ Update mudler/go-piper to e10ca041a885d4a8f3871d52924b47792d5e5aa0 (#3949)
⬆️ Update mudler/go-piper

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-03 21:39:43 +00:00
LocalAI [bot]
e53dd4a57b chore: ⬆️ Update ggerganov/llama.cpp to 9830b6923b61f1e652a35afeac77aa5f886dad09 (#4040)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-03 13:01:56 +00:00
Ettore Di Giacinto
d274df2fe2 models(gallery): add control-8b-v1.1 (#4039)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-03 10:00:20 +01:00
Arnaud A
0b3a55b9fe docs: Update documentation for text-to-audio feature regarding response_format (#4038) 2024-11-03 02:15:54 +00:00
LocalAI [bot]
abd5eea66d chore: ⬆️ Update ggerganov/llama.cpp to 42cadc74bda60afafb45b71b1a39d150ede0ed4d (#4037)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-02 22:33:55 +00:00
Arnaud A
65c3df392c feat(tts): Implement naive response_format for tts endpoint (#4035)
Signed-off-by: n-Arno <arnaud.alcabas@gmail.com>
2024-11-02 19:13:35 +00:00
Ettore Di Giacinto
57908df956 chore(docs): add top-header partial (#4034)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-02 12:07:40 +01:00
Ettore Di Giacinto
26e522a558 models(gallery): add smollm2-1.7b-instruct (#4033)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-02 11:01:39 +01:00
Ettore Di Giacinto
817685e4c1 models(gallery): add starcannon-unleashed-12b-v1.0 (#4032)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-02 10:44:51 +01:00
LocalAI [bot]
bcad3f3018 chore: ⬆️ Update ggerganov/llama.cpp to 418f5eef262cea07c2af4f45ee6a88d882221fcb (#4030)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-02 09:06:06 +01:00
LocalAI [bot]
303370ad87 chore: ⬆️ Update ggerganov/whisper.cpp to 0377596b77a3602e36430320cbe45f8c305ef04a (#4031)
⬆️ Update ggerganov/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-01 22:17:04 +00:00
Ettore Di Giacinto
a9fb7174ba models(gallery): add llama3.1-bestmix-chem-einstein-8b (#4028)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-11-01 17:36:31 +01:00
LocalAI [bot]
6d6f50340f chore: ⬆️ Update ggerganov/whisper.cpp to aa037a60f32018f32e54be3531ec6cc7802899eb (#4026)
⬆️ Update ggerganov/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-11-01 11:22:22 +01:00
LocalAI [bot]
6a136b2a4b chore: ⬆️ Update ggerganov/llama.cpp to ab3d71f97f5b2915a229099777af00d3eada1d24 (#4025)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-10-31 21:40:33 +00:00
Ettore Di Giacinto
8f7045cfa6 chore(tests): bump timeouts (#4024)
To avoid flaky runs

Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-10-31 15:40:43 +01:00
Ettore Di Giacinto
61c964dce7 fix(grpc): pass by modelpath (#4023)
Instead of trying to derive it from the model file. In backends that
specify HF url this results in a fragile logic.

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-10-31 12:12:22 +01:00
Ettore Di Giacinto
48d621c64e models(gallery): add spiral-da-hyah-qwen2.5-72b-i1 (#4022)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-10-31 10:28:26 +01:00
LocalAI [bot]
661dbbf2b4 chore: ⬆️ Update ggerganov/whisper.cpp to 19dca2bb1464326587cbeb7af00f93c4a59b01fd (#4020)
⬆️ Update ggerganov/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-10-31 09:56:06 +01:00
LocalAI [bot]
254f644c5f chore: ⬆️ Update ggerganov/llama.cpp to 61408e7fad082dc44a11c8a9f1398da4837aad44 (#4021)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-10-31 09:55:42 +01:00
Ettore Di Giacinto
88edb1e2af chore(tests): expand timeout (#4019)
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-10-30 15:34:44 +01:00
Ettore Di Giacinto
640a3f1bfe chore(embedded): modify phi-2 configuration URL
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-10-30 10:58:03 +01:00
Ettore Di Giacinto
b1243453f4 chore(tests): fix examples url
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-10-30 10:57:21 +01:00
Ettore Di Giacinto
dfc651f643 chore(readme): update examples link
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-10-30 09:12:45 +01:00
Ettore Di Giacinto
d4978383ff chore: create examples/README to redirect to the new repository
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-10-30 09:11:32 +01:00
Dave
cde0139363 chore: drop examples folder now that LocalAI-examples has been created (#4017)
Signed-off-by: Dave Lee <dave@gray101.com>
2024-10-30 09:10:33 +01:00
Ettore Di Giacinto
3d4bb757d2 chore(deps): bump llama-cpp to 8f275a7c4593aa34147595a90282cf950a853690 (#4016)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-10-30 08:31:13 +01:00
LocalAI [bot]
a4e749c22f chore: ⬆️ Update ggerganov/whisper.cpp to 55e422109b3504d1a824935cc2681ada7ee9fd38 (#4015)
⬆️ Update ggerganov/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-10-29 22:01:46 +00:00
LocalAI [bot]
25a9685e2f chore: ⬆️ Update ggerganov/whisper.cpp to d4bc413505b2fba98dffbb9a176ddd1b165941d0 (#4005)
⬆️ Update ggerganov/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-10-29 15:07:43 +01:00
LocalAI [bot]
94d417c2b7 chore: ⬆️ Update ggerganov/llama.cpp to 61715d5cc83a28181df6a641846e4f6a740f3c74 (#4006)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-10-29 15:06:57 +01:00
Ettore Di Giacinto
b897d47e0f chore(deps): bump grpcio to 1.67.1 (#4009)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-10-29 15:04:21 +01:00
dependabot[bot]
3422d21346 chore(deps): Bump openai from 1.52.0 to 1.52.2 in /examples/functions (#4000)
Bumps [openai](https://github.com/openai/openai-python) from 1.52.0 to 1.52.2.
- [Release notes](https://github.com/openai/openai-python/releases)
- [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md)
- [Commits](https://github.com/openai/openai-python/compare/v1.52.0...v1.52.2)

---
updated-dependencies:
- dependency-name: openai
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-29 09:30:03 +01:00
dependabot[bot]
a7917a2150 chore(deps): Bump frozenlist from 1.4.1 to 1.5.0 in /examples/langchain/langchainpy-localai-example (#3992)
chore(deps): Bump frozenlist

Bumps [frozenlist](https://github.com/aio-libs/frozenlist) from 1.4.1 to 1.5.0.
- [Release notes](https://github.com/aio-libs/frozenlist/releases)
- [Changelog](https://github.com/aio-libs/frozenlist/blob/master/CHANGES.rst)
- [Commits](https://github.com/aio-libs/frozenlist/compare/v1.4.1...v1.5.0)

---
updated-dependencies:
- dependency-name: frozenlist
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-29 09:29:20 +01:00
dependabot[bot]
7b23b894b4 chore(deps): Bump tqdm from 4.66.5 to 4.66.6 in /examples/langchain/langchainpy-localai-example (#3991)
chore(deps): Bump tqdm

Bumps [tqdm](https://github.com/tqdm/tqdm) from 4.66.5 to 4.66.6.
- [Release notes](https://github.com/tqdm/tqdm/releases)
- [Commits](https://github.com/tqdm/tqdm/compare/v4.66.5...v4.66.6)

---
updated-dependencies:
- dependency-name: tqdm
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-29 09:28:10 +01:00
dependabot[bot]
15c083f731 chore(deps): Bump llama-index from 0.11.19 to 0.11.20 in /examples/chainlit (#3990)
chore(deps): Bump llama-index in /examples/chainlit

Bumps [llama-index](https://github.com/run-llama/llama_index) from 0.11.19 to 0.11.20.
- [Release notes](https://github.com/run-llama/llama_index/releases)
- [Changelog](https://github.com/run-llama/llama_index/blob/main/CHANGELOG.md)
- [Commits](https://github.com/run-llama/llama_index/compare/v0.11.19...v0.11.20)

---
updated-dependencies:
- dependency-name: llama-index
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-29 09:27:44 +01:00
dependabot[bot]
293eaad69d chore(deps): Bump openai from 1.52.0 to 1.52.2 in /examples/langchain-chroma (#3989)
chore(deps): Bump openai in /examples/langchain-chroma

Bumps [openai](https://github.com/openai/openai-python) from 1.52.0 to 1.52.2.
- [Release notes](https://github.com/openai/openai-python/releases)
- [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md)
- [Commits](https://github.com/openai/openai-python/compare/v1.52.0...v1.52.2)

---
updated-dependencies:
- dependency-name: openai
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-29 09:26:45 +01:00
dependabot[bot]
605126db8a chore(deps): Bump llama-index from 0.11.19 to 0.11.20 in /examples/langchain-chroma (#3988)
chore(deps): Bump llama-index in /examples/langchain-chroma

Bumps [llama-index](https://github.com/run-llama/llama_index) from 0.11.19 to 0.11.20.
- [Release notes](https://github.com/run-llama/llama_index/releases)
- [Changelog](https://github.com/run-llama/llama_index/blob/main/CHANGELOG.md)
- [Commits](https://github.com/run-llama/llama_index/compare/v0.11.19...v0.11.20)

---
updated-dependencies:
- dependency-name: llama-index
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-29 09:26:12 +01:00
dependabot[bot]
3980beabd7 chore(deps): Bump docs/themes/hugo-theme-relearn from 06e70da to 28fce6b (#3986)
chore(deps): Bump docs/themes/hugo-theme-relearn

Bumps [docs/themes/hugo-theme-relearn](https://github.com/McShelby/hugo-theme-relearn) from `06e70da` to `28fce6b`.
- [Release notes](https://github.com/McShelby/hugo-theme-relearn/releases)
- [Commits](06e70da8a6...28fce6b04c)

---
updated-dependencies:
- dependency-name: docs/themes/hugo-theme-relearn
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-29 09:25:42 +01:00
Ettore Di Giacinto
11d3ce9edb Revert "chore(deps): Bump torchvision from 0.18.1+rocm6.0 to 0.20.0+cu118 in /backend/python/diffusers" (#4008)
Revert "chore(deps): Bump torchvision from 0.18.1+rocm6.0 to 0.20.0+cu118 in …"

This reverts commit 14cb620cd8.
2024-10-29 09:25:17 +01:00
dependabot[bot]
14cb620cd8 chore(deps): Bump torchvision from 0.18.1+rocm6.0 to 0.20.0+cu118 in /backend/python/diffusers (#3997)
chore(deps): Bump torchvision in /backend/python/diffusers

Bumps torchvision from 0.18.1+rocm6.0 to 0.20.0+cu118.

---
updated-dependencies:
- dependency-name: torchvision
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-28 23:33:35 +00:00
Ettore Di Giacinto
841dfefd62 models(gallery): add moe-girl-800ma-3bt (#3995)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-10-28 19:41:34 +01:00
Ettore Di Giacinto
d1cb2467fd models(gallery): add granite-3.0-1b-a400m-instruct (#3994)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-10-28 19:33:52 +01:00
dependabot[bot]
a8e10f03e9 chore(deps): Bump openai from 1.51.2 to 1.52.2 in /examples/langchain/langchainpy-localai-example (#3993)
chore(deps): Bump openai

Bumps [openai](https://github.com/openai/openai-python) from 1.51.2 to 1.52.2.
- [Release notes](https://github.com/openai/openai-python/releases)
- [Changelog](https://github.com/openai/openai-python/blob/main/CHANGELOG.md)
- [Commits](https://github.com/openai/openai-python/compare/v1.51.2...v1.52.2)

---
updated-dependencies:
- dependency-name: openai
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-28 19:33:05 +01:00
Ettore Di Giacinto
94010a0a44 models(gallery): add meraj-mini (#3987)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-10-28 19:12:59 +01:00
Ettore Di Giacinto
75bc933dc4 models(gallery): add l3-nymeria-maid-8b (#3985)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-10-28 19:00:55 +01:00
Ettore Di Giacinto
8de0f21f7c models(gallery): add llama-3-whiterabbitneo-8b-v2.0 (#3984)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-10-28 16:35:24 +01:00
Ettore Di Giacinto
66b03b54cb models(gallery): add magnum-v4-9b (#3983)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-10-28 16:24:14 +01:00
Ettore Di Giacinto
9ea8159683 models(gallery): add delirium-v1 (#3981)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-10-28 10:09:53 +01:00
Ettore Di Giacinto
c33083aeca models(gallery): add quill-v1 (#3980)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-10-28 09:59:21 +01:00
LocalAI [bot]
eb34f838f8 chore: ⬆️ Update ggerganov/llama.cpp to 8841ce3f439de6e770f70319b7e08b6613197ea7 (#3979)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-10-27 21:43:51 +00:00
Ettore Di Giacinto
8327e85e34 models(gallery): add llama-3.1-hawkish-8b (#3978)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-10-27 09:08:34 +01:00
Ettore Di Giacinto
a8c08d83d0 models(gallery): add l3.1-70blivion-v0.1-rc1-70b-i1 (#3977)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-10-27 09:06:27 +01:00
LocalAI [bot]
e314cdcdde chore: ⬆️ Update ggerganov/llama.cpp to cc2983d3753c94a630ca7257723914d4c4f6122b (#3976)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-10-26 21:40:42 +00:00
Ettore Di Giacinto
4528e969c9 models(gallery): add thebeagle-v2beta-32b-mgs (#3975)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-10-26 14:56:41 +02:00
Ettore Di Giacinto
175ae751ba models(gallery): add llama-3.2-3b-instruct-uncensored (#3974)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-10-26 14:56:02 +02:00
Ettore Di Giacinto
43bfdc9561 models(gallery): add darkest-muse-v1 (#3973)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-10-26 14:52:55 +02:00
Ettore Di Giacinto
546dce68a6 chore: change url to github repository (#3972)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-10-26 14:50:18 +02:00
Ettore Di Giacinto
82db2fa425 models(gallery): add llama-3.2-sun-2.5b-chat (#3971)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-10-26 09:09:22 +02:00
Ettore Di Giacinto
a27af2d7ad models(gallery): add llama3.1-darkstorm-aspire-8b (#3970)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-10-26 09:05:18 +02:00
Ettore Di Giacinto
9f43f37150 models(gallery): add l3.1-moe-2x8b-v0.2 (#3969)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-10-26 09:02:27 +02:00
Ettore Di Giacinto
3ad920b50a fix(parler-tts): pin protobuf (#3963)
* fix(parler-tts): pin protobuf

Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* debug

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Re-apply workaround

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-10-25 23:50:12 +02:00
LocalAI [bot]
dbe7ac484c chore: ⬆️ Update ggerganov/llama.cpp to 668750357e66bfa3d1504b65699f5a0dfe3cb7cb (#3965)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-10-25 21:42:18 +00:00
Ettore Di Giacinto
d9905ba050 fix(ci): drop grpcio-tools pin to apple CI test run (#3964)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-10-25 12:59:37 +02:00
Ettore Di Giacinto
dd2e243997 chore(python): update backend sample to consume grpcio from venv (#3961)
Backends can as well depends on grpcio and require different versions from
the ones that are installed in the system.

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-10-25 12:32:48 +02:00
Ettore Di Giacinto
fd905b483b fix(gallery): overrides for parler-tts in the gallery (#3962)
chore(parler-tts): fix overrides in the gallery

Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-10-25 12:32:37 +02:00
Ettore Di Giacinto
9c5cd9b38b fix(parler-tts): pin grpcio-tools (#3960)
Seems we require a specific version to build the backend files.

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-10-25 12:25:29 +02:00
Sertaç Özercan
07ce0a3c17 feat: add flux single file support (#3959)
feat: flux pipeline single file

Signed-off-by: Sertac Ozercan <sozercan@gmail.com>
2024-10-25 10:12:43 +02:00
LocalAI [bot]
5be2d22117 chore: ⬆️ Update ggerganov/llama.cpp to 958367bf530d943a902afa1ce1c342476098576b (#3956)
⬆️ Update ggerganov/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-10-24 22:45:26 +02:00
269 changed files with 3676 additions and 9964 deletions

View File

@@ -0,0 +1,11 @@
meta {
name: model delete
type: http
seq: 7
}
post {
url: {{PROTOCOL}}{{HOST}}:{{PORT}}/models/galleries
body: none
auth: none
}

View File

Binary file not shown.

View File

@@ -0,0 +1,16 @@
meta {
name: transcribe
type: http
seq: 1
}
post {
url: {{PROTOCOL}}{{HOST}}:{{PORT}}/v1/audio/transcriptions
body: multipartForm
auth: none
}
body:multipart-form {
file: @file(transcription/gb1.ogg)
model: whisper-1
}

1
.gitattributes vendored
View File

@@ -1 +1,2 @@
*.sh text eol=lf
backend/cpp/llama/*.hpp linguist-vendored

View File

@@ -23,7 +23,7 @@ jobs:
sudo pip install --upgrade pip
pip install huggingface_hub
- name: 'Setup yq'
uses: dcarbone/install-yq-action@v1.1.1
uses: dcarbone/install-yq-action@v1.3.1
with:
version: 'v4.44.2'
download-compressed: true

View File

@@ -33,7 +33,7 @@ jobs:
run: |
CGO_ENABLED=0 make build-api
- name: rm
uses: appleboy/ssh-action@v1.1.0
uses: appleboy/ssh-action@v1.2.0
with:
host: ${{ secrets.EXPLORER_SSH_HOST }}
username: ${{ secrets.EXPLORER_SSH_USERNAME }}
@@ -53,7 +53,7 @@ jobs:
rm: true
target: ./local-ai
- name: restarting
uses: appleboy/ssh-action@v1.1.0
uses: appleboy/ssh-action@v1.2.0
with:
host: ${{ secrets.EXPLORER_SSH_HOST }}
username: ${{ secrets.EXPLORER_SSH_USERNAME }}

View File

@@ -15,7 +15,7 @@ jobs:
strategy:
matrix:
include:
- base-image: intel/oneapi-basekit:2024.2.0-devel-ubuntu22.04
- base-image: intel/oneapi-basekit:2025.0.0-0-devel-ubuntu22.04
runs-on: 'ubuntu-latest'
platforms: 'linux/amd64'
runs-on: ${{matrix.runs-on}}

View File

@@ -123,6 +123,13 @@ jobs:
run: |
make --jobs=5 --output-sync=target -C backend/python/parler-tts
make --jobs=5 --output-sync=target -C backend/python/parler-tts test
- name: Setup tmate session if tests fail
if: ${{ failure() }}
uses: mxschmitt/action-tmate@v3.19
with:
detached: true
connect-timeout-seconds: 180
limit-access-to-actor: true
tests-openvoice:
runs-on: ubuntu-latest

View File

@@ -224,7 +224,7 @@ jobs:
- name: Dependencies
run: |
brew install protobuf grpc make protoc-gen-go protoc-gen-go-grpc libomp llvm
pip install --user --no-cache-dir grpcio-tools==1.64.1
pip install --user --no-cache-dir grpcio-tools
- name: Test
run: |
export C_INCLUDE_PATH=/usr/local/include

1
.gitignore vendored
View File

@@ -12,7 +12,6 @@ prepare-sources
go-ggml-transformers
go-gpt2
go-rwkv
whisper.cpp
/bloomz
go-bert

View File

@@ -85,7 +85,8 @@ WORKDIR /build
# The requirements-extras target is for any builds with IMAGE_TYPE=extras. It should not be placed in this target unless every IMAGE_TYPE=extras build will use it
FROM requirements-core AS requirements-extras
RUN curl -LsSf https://astral.sh/uv/install.sh | sh
# Install uv as a system package
RUN curl -LsSf https://astral.sh/uv/install.sh | UV_INSTALL_DIR=/usr/bin sh
ENV PATH="/root/.cargo/bin:${PATH}"
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y

View File

@@ -8,15 +8,11 @@ DETECT_LIBS?=true
# llama.cpp versions
GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp
GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be
CPPLLAMA_VERSION?=0a1c750c80147687df267114c81956757cc14382
# go-rwkv version
RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp
RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6
CPPLLAMA_VERSION?=47f931c8f9a26c072d71224bc8013cc66ea9e445
# whisper.cpp version
WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp
WHISPER_CPP_VERSION?=0fbaac9c891055796456df7b9122a70c220f9ca1
WHISPER_CPP_VERSION?=6266a9f9e56a5b925e9892acf650f3eb1245814d
# bert.cpp version
BERT_REPO?=https://github.com/go-skynet/go-bert.cpp
@@ -24,7 +20,7 @@ BERT_VERSION?=710044b124545415f555e4260d16b146c725a6e4
# go-piper version
PIPER_REPO?=https://github.com/mudler/go-piper
PIPER_VERSION?=9d0100873a7dbb0824dfea40e8cec70a1b110759
PIPER_VERSION?=e10ca041a885d4a8f3871d52924b47792d5e5aa0
# stablediffusion version
STABLEDIFFUSION_REPO?=https://github.com/mudler/go-stable-diffusion
@@ -34,6 +30,10 @@ STABLEDIFFUSION_VERSION?=4a3cd6aeae6f66ee57eae9a0075f8c58c3a6a38f
TINYDREAM_REPO?=https://github.com/M0Rf30/go-tiny-dream
TINYDREAM_VERSION?=c04fa463ace9d9a6464313aa5f9cd0f953b6c057
ONNX_VERSION?=1.20.0
ONNX_ARCH?=x64
ONNX_OS?=linux
export BUILD_TYPE?=
export STABLE_BUILD_TYPE?=$(BUILD_TYPE)
export CMAKE_ARGS?=
@@ -45,6 +45,7 @@ CGO_LDFLAGS_WHISPER+=-lggml
CUDA_LIBPATH?=/usr/local/cuda/lib64/
GO_TAGS?=
BUILD_ID?=
NATIVE?=false
TEST_DIR=/tmp/test
@@ -83,7 +84,25 @@ ifndef UNAME_S
UNAME_S := $(shell uname -s)
endif
# IF native is false, we add -DGGML_NATIVE=OFF to CMAKE_ARGS
ifeq ($(NATIVE),false)
CMAKE_ARGS+=-DGGML_NATIVE=OFF
endif
# Detect if we are running on arm64
ifneq (,$(findstring aarch64,$(shell uname -m)))
ONNX_ARCH=aarch64
endif
ifeq ($(OS),Darwin)
ONNX_OS=osx
ifneq (,$(findstring aarch64,$(shell uname -m)))
ONNX_ARCH=arm64
else ifneq (,$(findstring arm64,$(shell uname -m)))
ONNX_ARCH=arm64
else
ONNX_ARCH=x86_64
endif
ifeq ($(OSX_SIGNING_IDENTITY),)
OSX_SIGNING_IDENTITY := $(shell security find-identity -v -p codesigning | grep '"' | head -n 1 | sed -E 's/.*"(.*)"/\1/')
@@ -138,10 +157,10 @@ ifeq ($(BUILD_TYPE),hipblas)
export CC=$(ROCM_HOME)/llvm/bin/clang
# llama-ggml has no hipblas support, so override it here.
export STABLE_BUILD_TYPE=
export GGML_HIPBLAS=1
export GGML_HIP=1
GPU_TARGETS ?= gfx900,gfx906,gfx908,gfx940,gfx941,gfx942,gfx90a,gfx1030,gfx1031,gfx1100,gfx1101
AMDGPU_TARGETS ?= "$(GPU_TARGETS)"
CMAKE_ARGS+=-DGGML_HIPBLAS=ON -DAMDGPU_TARGETS="$(AMDGPU_TARGETS)" -DGPU_TARGETS="$(GPU_TARGETS)"
CMAKE_ARGS+=-DGGML_HIP=ON -DAMDGPU_TARGETS="$(AMDGPU_TARGETS)" -DGPU_TARGETS="$(GPU_TARGETS)"
CGO_LDFLAGS += -O3 --rtlib=compiler-rt -unwindlib=libgcc -lhipblas -lrocblas --hip-link -L${ROCM_HOME}/lib/llvm/lib
endif
@@ -186,9 +205,9 @@ ALL_GRPC_BACKENDS+=backend-assets/grpc/llama-cpp-fallback
ALL_GRPC_BACKENDS+=backend-assets/grpc/llama-ggml
ALL_GRPC_BACKENDS+=backend-assets/grpc/llama-cpp-grpc
ALL_GRPC_BACKENDS+=backend-assets/util/llama-cpp-rpc-server
ALL_GRPC_BACKENDS+=backend-assets/grpc/rwkv
ALL_GRPC_BACKENDS+=backend-assets/grpc/whisper
ALL_GRPC_BACKENDS+=backend-assets/grpc/local-store
ALL_GRPC_BACKENDS+=backend-assets/grpc/silero-vad
ALL_GRPC_BACKENDS+=$(OPTIONAL_GRPC)
# Use filter-out to remove the specified backends
ALL_GRPC_BACKENDS := $(filter-out $(SKIP_GRPC_BACKEND),$(ALL_GRPC_BACKENDS))
@@ -248,20 +267,6 @@ sources/go-piper:
sources/go-piper/libpiper_binding.a: sources/go-piper
$(MAKE) -C sources/go-piper libpiper_binding.a example/main piper.o
## RWKV
sources/go-rwkv.cpp:
mkdir -p sources/go-rwkv.cpp
cd sources/go-rwkv.cpp && \
git init && \
git remote add origin $(RWKV_REPO) && \
git fetch origin && \
git checkout $(RWKV_VERSION) && \
git submodule update --init --recursive --depth 1 --single-branch
sources/go-rwkv.cpp/librwkv.a: sources/go-rwkv.cpp
cd sources/go-rwkv.cpp && cd rwkv.cpp && cmake . -DRWKV_BUILD_SHARED_LIBRARY=OFF && cmake --build . && cp librwkv.a ..
## stable diffusion
sources/go-stable-diffusion:
mkdir -p sources/go-stable-diffusion
@@ -275,6 +280,20 @@ sources/go-stable-diffusion:
sources/go-stable-diffusion/libstablediffusion.a: sources/go-stable-diffusion
CPATH="$(CPATH):/usr/include/opencv4" $(MAKE) -C sources/go-stable-diffusion libstablediffusion.a
sources/onnxruntime:
mkdir -p sources/onnxruntime
curl -L https://github.com/microsoft/onnxruntime/releases/download/v$(ONNX_VERSION)/onnxruntime-$(ONNX_OS)-$(ONNX_ARCH)-$(ONNX_VERSION).tgz -o sources/onnxruntime/onnxruntime-$(ONNX_OS)-$(ONNX_ARCH)-$(ONNX_VERSION).tgz
cd sources/onnxruntime && tar -xvf onnxruntime-$(ONNX_OS)-$(ONNX_ARCH)-$(ONNX_VERSION).tgz && rm onnxruntime-$(ONNX_OS)-$(ONNX_ARCH)-$(ONNX_VERSION).tgz
cd sources/onnxruntime && mv onnxruntime-$(ONNX_OS)-$(ONNX_ARCH)-$(ONNX_VERSION)/* ./
backend-assets/lib/libonnxruntime.so.1: backend-assets/lib sources/onnxruntime
cp -rfv sources/onnxruntime/lib/* backend-assets/lib/
ifeq ($(OS),Darwin)
mv backend-assets/lib/libonnxruntime.$(ONNX_VERSION).dylib backend-assets/lib/libonnxruntime.dylib
else
mv backend-assets/lib/libonnxruntime.so.$(ONNX_VERSION) backend-assets/lib/libonnxruntime.so.1
endif
## tiny-dream
sources/go-tiny-dream:
mkdir -p sources/go-tiny-dream
@@ -301,10 +320,9 @@ sources/whisper.cpp:
sources/whisper.cpp/libwhisper.a: sources/whisper.cpp
cd sources/whisper.cpp && $(MAKE) libwhisper.a libggml.a
get-sources: sources/go-llama.cpp sources/go-piper sources/go-rwkv.cpp sources/whisper.cpp sources/go-bert.cpp sources/go-stable-diffusion sources/go-tiny-dream backend/cpp/llama/llama.cpp
get-sources: sources/go-llama.cpp sources/go-piper sources/whisper.cpp sources/go-bert.cpp sources/go-stable-diffusion sources/go-tiny-dream backend/cpp/llama/llama.cpp
replace:
$(GOCMD) mod edit -replace github.com/donomii/go-rwkv.cpp=$(CURDIR)/sources/go-rwkv.cpp
$(GOCMD) mod edit -replace github.com/ggerganov/whisper.cpp=$(CURDIR)/sources/whisper.cpp
$(GOCMD) mod edit -replace github.com/ggerganov/whisper.cpp/bindings/go=$(CURDIR)/sources/whisper.cpp/bindings/go
$(GOCMD) mod edit -replace github.com/go-skynet/go-bert.cpp=$(CURDIR)/sources/go-bert.cpp
@@ -314,7 +332,6 @@ replace:
$(GOCMD) mod edit -replace github.com/go-skynet/go-llama.cpp=$(CURDIR)/sources/go-llama.cpp
dropreplace:
$(GOCMD) mod edit -dropreplace github.com/donomii/go-rwkv.cpp
$(GOCMD) mod edit -dropreplace github.com/ggerganov/whisper.cpp
$(GOCMD) mod edit -dropreplace github.com/ggerganov/whisper.cpp/bindings/go
$(GOCMD) mod edit -dropreplace github.com/go-skynet/go-bert.cpp
@@ -330,7 +347,6 @@ prepare-sources: get-sources replace
rebuild: ## Rebuilds the project
$(GOCMD) clean -cache
$(MAKE) -C sources/go-llama.cpp clean
$(MAKE) -C sources/go-rwkv.cpp clean
$(MAKE) -C sources/whisper.cpp clean
$(MAKE) -C sources/go-stable-diffusion clean
$(MAKE) -C sources/go-bert.cpp clean
@@ -439,8 +455,6 @@ test-models/testmodel.ggml:
wget -q https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-base.en.bin -O test-models/whisper-en
wget -q https://huggingface.co/mudler/all-MiniLM-L6-v2/resolve/main/ggml-model-q4_0.bin -O test-models/bert
wget -q https://cdn.openai.com/whisper/draft-20220913a/micro-machines.wav -O test-dir/audio.wav
wget -q https://huggingface.co/mudler/rwkv-4-raven-1.5B-ggml/resolve/main/RWKV-4-Raven-1B5-v11-Eng99%2525-Other1%2525-20230425-ctx4096_Q4_0.bin -O test-models/rwkv
wget -q https://raw.githubusercontent.com/saharNooby/rwkv.cpp/5eb8f09c146ea8124633ab041d9ea0b1f1db4459/rwkv/20B_tokenizer.json -O test-models/rwkv.tokenizer.json
cp tests/models_fixtures/* test-models
prepare-test: grpcs
@@ -761,7 +775,7 @@ backend-assets/grpc/llama-cpp-fallback: backend-assets/grpc backend/cpp/llama/ll
cp -rfv backend/cpp/llama-fallback/grpc-server backend-assets/grpc/llama-cpp-fallback
# TODO: every binary should have its own folder instead, so can have different metal implementations
ifeq ($(BUILD_TYPE),metal)
cp backend/cpp/llama-fallback/llama.cpp/build/bin/default.metallib backend-assets/grpc/
cp backend/cpp/llama-fallback/llama.cpp/build/bin/ggml-metal.metal backend-assets/grpc/
endif
backend-assets/grpc/llama-cpp-cuda: backend-assets/grpc backend/cpp/llama/llama.cpp
@@ -775,7 +789,7 @@ backend-assets/grpc/llama-cpp-hipblas: backend-assets/grpc backend/cpp/llama/lla
cp -rf backend/cpp/llama backend/cpp/llama-hipblas
$(MAKE) -C backend/cpp/llama-hipblas purge
$(info ${GREEN}I llama-cpp build info:hipblas${RESET})
BUILD_TYPE="hipblas" $(MAKE) VARIANT="llama-hipblas" build-llama-cpp-grpc-server
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" BUILD_TYPE="hipblas" $(MAKE) VARIANT="llama-hipblas" build-llama-cpp-grpc-server
cp -rfv backend/cpp/llama-hipblas/grpc-server backend-assets/grpc/llama-cpp-hipblas
backend-assets/grpc/llama-cpp-sycl_f16: backend-assets/grpc backend/cpp/llama/llama.cpp
@@ -817,13 +831,6 @@ ifneq ($(UPX),)
$(UPX) backend-assets/grpc/piper
endif
backend-assets/grpc/rwkv: sources/go-rwkv.cpp sources/go-rwkv.cpp/librwkv.a backend-assets/grpc
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(CURDIR)/sources/go-rwkv.cpp LIBRARY_PATH=$(CURDIR)/sources/go-rwkv.cpp \
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/rwkv ./backend/go/llm/rwkv
ifneq ($(UPX),)
$(UPX) backend-assets/grpc/rwkv
endif
backend-assets/grpc/stablediffusion: sources/go-stable-diffusion sources/go-stable-diffusion/libstablediffusion.a backend-assets/grpc
CGO_LDFLAGS="$(CGO_LDFLAGS)" CPATH="$(CPATH):$(CURDIR)/sources/go-stable-diffusion/:/usr/include/opencv4" LIBRARY_PATH=$(CURDIR)/sources/go-stable-diffusion/ \
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/stablediffusion ./backend/go/image/stablediffusion
@@ -831,6 +838,13 @@ ifneq ($(UPX),)
$(UPX) backend-assets/grpc/stablediffusion
endif
backend-assets/grpc/silero-vad: backend-assets/grpc backend-assets/lib/libonnxruntime.so.1
CGO_LDFLAGS="$(CGO_LDFLAGS)" CPATH="$(CPATH):$(CURDIR)/sources/onnxruntime/include/" LIBRARY_PATH=$(CURDIR)/backend-assets/lib \
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/silero-vad ./backend/go/vad/silero
ifneq ($(UPX),)
$(UPX) backend-assets/grpc/silero-vad
endif
backend-assets/grpc/tinydream: sources/go-tiny-dream sources/go-tiny-dream/libtinydream.a backend-assets/grpc
CGO_LDFLAGS="$(CGO_LDFLAGS)" LIBRARY_PATH=$(CURDIR)/go-tiny-dream \
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/tinydream ./backend/go/image/tinydream
@@ -891,7 +905,7 @@ docker-aio-all:
docker-image-intel:
docker build \
--build-arg BASE_IMAGE=intel/oneapi-basekit:2024.2.0-devel-ubuntu22.04 \
--build-arg BASE_IMAGE=intel/oneapi-basekit:2025.0.0-0-devel-ubuntu22.04 \
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
--build-arg GO_TAGS="none" \
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
@@ -899,7 +913,7 @@ docker-image-intel:
docker-image-intel-xpu:
docker build \
--build-arg BASE_IMAGE=intel/oneapi-basekit:2024.2.0-devel-ubuntu22.04 \
--build-arg BASE_IMAGE=intel/oneapi-basekit:2025.0.0-0-devel-ubuntu22.04 \
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
--build-arg GO_TAGS="none" \
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \

View File

@@ -38,9 +38,13 @@
</a>
</p>
<p align="center">
<a href="https://trendshift.io/repositories/1484" target="_blank"><img src="https://trendshift.io/api/badge/repositories/1484" alt="go-skynet%2FLocalAI | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
</p>
> :bulb: Get help - [❓FAQ](https://localai.io/faq/) [💭Discussions](https://github.com/go-skynet/LocalAI/discussions) [:speech_balloon: Discord](https://discord.gg/uJAeKSAGDy) [:book: Documentation website](https://localai.io/)
>
> [💻 Quickstart](https://localai.io/basics/getting_started/) [🖼️ Models](https://models.localai.io/) [🚀 Roadmap](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) [🥽 Demo](https://demo.localai.io) [🌍 Explorer](https://explorer.localai.io) [🛫 Examples](https://github.com/go-skynet/LocalAI/tree/master/examples/)
> [💻 Quickstart](https://localai.io/basics/getting_started/) [🖼️ Models](https://models.localai.io/) [🚀 Roadmap](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) [🥽 Demo](https://demo.localai.io) [🌍 Explorer](https://explorer.localai.io) [🛫 Examples](https://github.com/mudler/LocalAI-examples)
[![tests](https://github.com/go-skynet/LocalAI/actions/workflows/test.yml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/test.yml)[![Build and Release](https://github.com/go-skynet/LocalAI/actions/workflows/release.yaml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/release.yaml)[![build container images](https://github.com/go-skynet/LocalAI/actions/workflows/image.yml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/image.yml)[![Bump dependencies](https://github.com/go-skynet/LocalAI/actions/workflows/bump_deps.yaml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/bump_deps.yaml)[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/localai)](https://artifacthub.io/packages/search?repo=localai)
@@ -56,14 +60,17 @@ curl https://localai.io/install.sh | sh
Or run with docker:
```bash
# CPU only image:
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-cpu
# Nvidia GPU:
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-gpu-nvidia-cuda-12
# CPU and GPU image (bigger size):
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest
# AIO images (it will pre-download a set of models ready for use, see https://localai.io/basics/container/)
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-cpu
# Alternative images:
# - if you have an Nvidia GPU:
# docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-12
# - without preconfigured models
# docker run -ti --name local-ai -p 8080:8080 localai/localai:latest
# - without preconfigured models for Nvidia GPUs
# docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-gpu-nvidia-cuda-12
```
To load models:
@@ -85,6 +92,8 @@ local-ai run oci://localai/phi-2:latest
## 📰 Latest project news
- Nov 2024: Voice activity detection models (**VAD**) added to the API: https://github.com/mudler/LocalAI/pull/4204
- Oct 2024: examples moved to [LocalAI-examples](https://github.com/mudler/LocalAI-examples)
- Aug 2024: 🆕 FLUX-1, [P2P Explorer](https://explorer.localai.io)
- July 2024: 🔥🔥 🆕 P2P Dashboard, LocalAI Federated mode and AI Swarms: https://github.com/mudler/LocalAI/pull/2723
- June 2024: 🆕 You can browse now the model gallery without LocalAI! Check out https://models.localai.io
@@ -155,6 +164,9 @@ Other:
- Slack bot https://github.com/mudler/LocalAGI/tree/main/examples/slack
- Shell-Pilot(Interact with LLM using LocalAI models via pure shell scripts on your Linux or MacOS system) https://github.com/reid41/shell-pilot
- Telegram bot https://github.com/mudler/LocalAI/tree/master/examples/telegram-bot
- Another Telegram Bot https://github.com/JackBekket/Hellper
- Auto-documentation https://github.com/JackBekket/Reflexia
- Github bot which answer on issues, with code and documentation as context https://github.com/JackBekket/GitHelper
- Github Actions: https://github.com/marketplace/actions/start-localai
- Examples: https://github.com/mudler/LocalAI/tree/master/examples/
@@ -229,7 +241,6 @@ LocalAI couldn't have been built without the help of great software already avai
- https://github.com/antimatter15/alpaca.cpp
- https://github.com/EdVince/Stable-Diffusion-NCNN
- https://github.com/ggerganov/whisper.cpp
- https://github.com/saharNooby/rwkv.cpp
- https://github.com/rhasspy/piper
## 🤗 Contributors

View File

@@ -28,6 +28,8 @@ service Backend {
rpc Rerank(RerankRequest) returns (RerankResult) {}
rpc GetMetrics(MetricsRequest) returns (MetricsResponse);
rpc VAD(VADRequest) returns (VADResponse) {}
}
// Define the empty request
@@ -233,6 +235,11 @@ message ModelOptions {
bool FlashAttention = 56;
bool NoKVOffload = 57;
string ModelPath = 59;
repeated string LoraAdapters = 60;
repeated float LoraScales = 61;
}
message Result {
@@ -288,6 +295,19 @@ message TTSRequest {
optional string language = 5;
}
message VADRequest {
repeated float audio = 1;
}
message VADSegment {
float start = 1;
float end = 2;
}
message VADResponse {
repeated VADSegment segments = 1;
}
message SoundGenerationRequest {
string text = 1;
string model = 2;

View File

@@ -22,7 +22,7 @@ else ifeq ($(BUILD_TYPE),clblas)
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
else ifeq ($(BUILD_TYPE),hipblas)
CMAKE_ARGS+=-DGGML_HIPBLAS=ON
CMAKE_ARGS+=-DGGML_HIP=ON
# If it's OSX, DO NOT embed the metal library - -DGGML_METAL_EMBED_LIBRARY=ON requires further investigation
# But if it's OSX without metal, disable it here
else ifeq ($(OS),Darwin)

View File

@@ -203,7 +203,7 @@ struct llama_client_slot
std::string stopping_word;
// sampling
struct common_sampler_params sparams;
struct common_params_sampling sparams;
common_sampler *ctx_sampling = nullptr;
int32_t ga_i = 0; // group-attention state
@@ -662,7 +662,7 @@ struct llama_server_context
bool launch_slot_with_data(llama_client_slot* &slot, json data) {
slot_params default_params;
common_sampler_params default_sparams;
common_params_sampling default_sparams;
slot->params.stream = json_value(data, "stream", false);
slot->params.cache_prompt = json_value(data, "cache_prompt", false);
@@ -670,7 +670,6 @@ struct llama_server_context
slot->sparams.top_k = json_value(data, "top_k", default_sparams.top_k);
slot->sparams.top_p = json_value(data, "top_p", default_sparams.top_p);
slot->sparams.min_p = json_value(data, "min_p", default_sparams.min_p);
slot->sparams.tfs_z = json_value(data, "tfs_z", default_sparams.tfs_z);
slot->sparams.typ_p = json_value(data, "typical_p", default_sparams.typ_p);
slot->sparams.temp = json_value(data, "temperature", default_sparams.temp);
slot->sparams.dynatemp_range = json_value(data, "dynatemp_range", default_sparams.dynatemp_range);
@@ -1206,7 +1205,6 @@ struct llama_server_context
{"top_k", slot.sparams.top_k},
{"top_p", slot.sparams.top_p},
{"min_p", slot.sparams.min_p},
{"tfs_z", slot.sparams.tfs_z},
{"typical_p", slot.sparams.typ_p},
{"repeat_last_n", slot.sparams.penalty_last_n},
{"repeat_penalty", slot.sparams.penalty_repeat},
@@ -2105,7 +2103,6 @@ json parse_options(bool streaming, const backend::PredictOptions* predict, llama
// slot->params.n_predict = json_value(data, "n_predict", default_params.n_predict);
// slot->sparams.top_k = json_value(data, "top_k", default_sparams.top_k);
// slot->sparams.top_p = json_value(data, "top_p", default_sparams.top_p);
// slot->sparams.tfs_z = json_value(data, "tfs_z", default_sparams.tfs_z);
// slot->sparams.typical_p = json_value(data, "typical_p", default_sparams.typical_p);
// slot->sparams.temp = json_value(data, "temperature", default_sparams.temp);
// slot->sparams.penalty_last_n = json_value(data, "repeat_last_n", default_sparams.penalty_last_n);
@@ -2129,7 +2126,6 @@ json parse_options(bool streaming, const backend::PredictOptions* predict, llama
data["n_predict"] = predict->tokens() == 0 ? -1 : predict->tokens();
data["top_k"] = predict->topk();
data["top_p"] = predict->topp();
data["tfs_z"] = predict->tailfreesamplingz();
data["typical_p"] = predict->typicalp();
data["temperature"] = predict->temperature();
data["repeat_last_n"] = predict->repeat();
@@ -2176,7 +2172,6 @@ json parse_options(bool streaming, const backend::PredictOptions* predict, llama
// llama.params.n_predict = predict->tokens() == 0 ? -1 : predict->tokens();
// llama.params.sparams.top_k = predict->topk();
// llama.params.sparams.top_p = predict->topp();
// llama.params.sparams.tfs_z = predict->tailfreesamplingz();
// llama.params.sparams.typical_p = predict->typicalp();
// llama.params.sparams.penalty_last_n = predict->repeat();
// llama.params.sparams.temp = predict->temperature();
@@ -2304,6 +2299,7 @@ static void params_parse(const backend::ModelOptions* request,
params.use_mmap = request->mmap();
params.flash_attn = request->flashattention();
params.no_kv_offload = request->nokvoffload();
params.ctx_shift = false; // We control context-shifting in any case (and we disable it as it could just lead to infinite loops)
params.embedding = request->embeddings();

View File

@@ -1,95 +0,0 @@
package main
// This is a wrapper to statisfy the GRPC service interface
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
import (
"fmt"
"path/filepath"
"github.com/donomii/go-rwkv.cpp"
"github.com/mudler/LocalAI/pkg/grpc/base"
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
)
const tokenizerSuffix = ".tokenizer.json"
type LLM struct {
base.SingleThread
rwkv *rwkv.RwkvState
}
func (llm *LLM) Load(opts *pb.ModelOptions) error {
tokenizerFile := opts.Tokenizer
if tokenizerFile == "" {
modelFile := filepath.Base(opts.ModelFile)
tokenizerFile = modelFile + tokenizerSuffix
}
modelPath := filepath.Dir(opts.ModelFile)
tokenizerPath := filepath.Join(modelPath, tokenizerFile)
model := rwkv.LoadFiles(opts.ModelFile, tokenizerPath, uint32(opts.GetThreads()))
if model == nil {
return fmt.Errorf("rwkv could not load model")
}
llm.rwkv = model
return nil
}
func (llm *LLM) Predict(opts *pb.PredictOptions) (string, error) {
stopWord := "\n"
if len(opts.StopPrompts) > 0 {
stopWord = opts.StopPrompts[0]
}
if err := llm.rwkv.ProcessInput(opts.Prompt); err != nil {
return "", err
}
response := llm.rwkv.GenerateResponse(int(opts.Tokens), stopWord, float32(opts.Temperature), float32(opts.TopP), nil)
return response, nil
}
func (llm *LLM) PredictStream(opts *pb.PredictOptions, results chan string) error {
go func() {
stopWord := "\n"
if len(opts.StopPrompts) > 0 {
stopWord = opts.StopPrompts[0]
}
if err := llm.rwkv.ProcessInput(opts.Prompt); err != nil {
fmt.Println("Error processing input: ", err)
return
}
llm.rwkv.GenerateResponse(int(opts.Tokens), stopWord, float32(opts.Temperature), float32(opts.TopP), func(s string) bool {
results <- s
return true
})
close(results)
}()
return nil
}
func (llm *LLM) TokenizeString(opts *pb.PredictOptions) (pb.TokenizationResponse, error) {
tokens, err := llm.rwkv.Tokenizer.Encode(opts.Prompt)
if err != nil {
return pb.TokenizationResponse{}, err
}
l := len(tokens)
i32Tokens := make([]int32, l)
for i, t := range tokens {
i32Tokens[i] = int32(t.ID)
}
return pb.TokenizationResponse{
Length: int32(l),
Tokens: i32Tokens,
}, nil
}

View File

@@ -15,7 +15,7 @@ var (
func main() {
flag.Parse()
if err := grpc.StartServer(*addr, &LLM{}); err != nil {
if err := grpc.StartServer(*addr, &VAD{}); err != nil {
panic(err)
}
}

View File

@@ -0,0 +1,54 @@
package main
// This is a wrapper to statisfy the GRPC service interface
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
import (
"fmt"
"github.com/mudler/LocalAI/pkg/grpc/base"
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
"github.com/streamer45/silero-vad-go/speech"
)
type VAD struct {
base.SingleThread
detector *speech.Detector
}
func (vad *VAD) Load(opts *pb.ModelOptions) error {
v, err := speech.NewDetector(speech.DetectorConfig{
ModelPath: opts.ModelFile,
SampleRate: 16000,
//WindowSize: 1024,
Threshold: 0.5,
MinSilenceDurationMs: 0,
SpeechPadMs: 0,
})
if err != nil {
return fmt.Errorf("create silero detector: %w", err)
}
vad.detector = v
return err
}
func (vad *VAD) VAD(req *pb.VADRequest) (pb.VADResponse, error) {
audio := req.Audio
segments, err := vad.detector.Detect(audio)
if err != nil {
return pb.VADResponse{}, fmt.Errorf("detect: %w", err)
}
vadSegments := []*pb.VADSegment{}
for _, s := range segments {
vadSegments = append(vadSegments, &pb.VADSegment{
Start: float32(s.SpeechStartAt),
End: float32(s.SpeechEndAt),
})
}
return pb.VADResponse{
Segments: vadSegments,
}, nil
}

View File

@@ -1,6 +1,6 @@
accelerate
auto-gptq==0.7.1
grpcio==1.67.0
grpcio==1.68.0
protobuf
certifi
transformers

View File

@@ -1,4 +1,4 @@
bark==0.1.5
grpcio==1.67.0
grpcio==1.68.0
protobuf
certifi

View File

@@ -1,8 +1,9 @@
.DEFAULT_GOAL := install
.PHONY: install
install: protogen
install:
bash install.sh
$(MAKE) protogen
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
@@ -12,7 +13,7 @@ protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
python3 -m grpc_tools.protoc -I../.. --python_out=. --grpc_python_out=. backend.proto
bash protogen.sh
.PHONY: clean
clean: protogen-clean

View File

@@ -0,0 +1,6 @@
#!/bin/bash
set -e
source $(dirname $0)/../common/libbackend.sh
python3 -m grpc_tools.protoc -I../.. --python_out=. --grpc_python_out=. backend.proto

View File

@@ -1,2 +1,3 @@
grpcio==1.67.0
protobuf
grpcio==1.68.0
protobuf
grpcio-tools

View File

@@ -1,4 +1,4 @@
grpcio==1.67.0
grpcio==1.68.0
protobuf
certifi
packaging==24.1

View File

@@ -247,11 +247,16 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
use_safetensors=True,
variant=variant)
elif request.PipelineType == "FluxPipeline":
if fromSingleFile:
self.pipe = FluxPipeline.from_single_file(modelFile,
torch_dtype=torchType,
use_safetensors=True)
else:
self.pipe = FluxPipeline.from_pretrained(
request.Model,
torch_dtype=torch.bfloat16)
if request.LowVRAM:
self.pipe.enable_model_cpu_offload()
if request.LowVRAM:
self.pipe.enable_model_cpu_offload()
elif request.PipelineType == "FluxTransformer2DModel":
dtype = torch.bfloat16
# specify from environment or default to "ChuckMcSneed/FLUX.1-dev"
@@ -296,22 +301,34 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
self.pipe.controlnet = self.controlnet
else:
self.controlnet = None
# Assume directory from request.ModelFile.
# Only if request.LoraAdapter it's not an absolute path
if request.LoraAdapter and request.ModelFile != "" and not os.path.isabs(request.LoraAdapter) and request.LoraAdapter:
# get base path of modelFile
modelFileBase = os.path.dirname(request.ModelFile)
if request.LoraAdapter and not os.path.isabs(request.LoraAdapter):
# modify LoraAdapter to be relative to modelFileBase
request.LoraAdapter = os.path.join(modelFileBase, request.LoraAdapter)
request.LoraAdapter = os.path.join(request.ModelPath, request.LoraAdapter)
device = "cpu" if not request.CUDA else "cuda"
self.device = device
if request.LoraAdapter:
# Check if its a local file and not a directory ( we load lora differently for a safetensor file )
if os.path.exists(request.LoraAdapter) and not os.path.isdir(request.LoraAdapter):
# self.load_lora_weights(request.LoraAdapter, 1, device, torchType)
self.pipe.load_lora_weights(request.LoraAdapter)
else:
self.pipe.unet.load_attn_procs(request.LoraAdapter)
if len(request.LoraAdapters) > 0:
i = 0
adapters_name = []
adapters_weights = []
for adapter in request.LoraAdapters:
if not os.path.isabs(adapter):
adapter = os.path.join(request.ModelPath, adapter)
self.pipe.load_lora_weights(adapter, adapter_name=f"adapter_{i}")
adapters_name.append(f"adapter_{i}")
i += 1
for adapters_weight in request.LoraScales:
adapters_weights.append(adapters_weight)
self.pipe.set_adapters(adapters_name, adapter_weights=adapters_weights)
if request.CUDA:
self.pipe.to('cuda')
@@ -392,8 +409,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
# create a dictionary of values for the parameters
options = {
"negative_prompt": request.negative_prompt,
"width": request.width,
"height": request.height,
"num_inference_steps": steps,
}
@@ -411,13 +426,13 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
keys = options.keys()
if request.EnableParameters != "":
keys = request.EnableParameters.split(",")
keys = [key.strip() for key in request.EnableParameters.split(",")]
if request.EnableParameters == "none":
keys = []
# create a dictionary of parameters by using the keys from EnableParameters and the values from defaults
kwargs = {key: options[key] for key in keys}
kwargs = {key: options.get(key) for key in keys if key in options}
# Set seed
if request.seed > 0:
@@ -428,6 +443,12 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
if self.PipelineType == "FluxPipeline":
kwargs["max_sequence_length"] = 256
if request.width:
kwargs["width"] = request.width
if request.height:
kwargs["height"] = request.height
if self.PipelineType == "FluxTransformer2DModel":
kwargs["output_type"] = "pil"
kwargs["generator"] = torch.Generator("cpu").manual_seed(0)
@@ -447,6 +468,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
export_to_video(video_frames, request.dst)
return backend_pb2.Result(message="Media generated successfully", success=True)
print(f"Generating image with {kwargs=}", file=sys.stderr)
image = {}
if COMPEL:
conditioning, pooled = self.compel.build_conditioning_tensor(prompt)

View File

@@ -1,5 +1,5 @@
setuptools
grpcio==1.67.0
grpcio==1.68.0
pillow
protobuf
certifi

View File

@@ -1,4 +1,4 @@
grpcio==1.67.0
grpcio==1.68.0
protobuf
certifi
wheel

View File

@@ -1,3 +1,3 @@
grpcio==1.67.0
grpcio==1.68.0
protobuf
certifi

View File

@@ -2,7 +2,7 @@
intel-extension-for-pytorch
torch
optimum[openvino]
grpcio==1.67.0
grpcio==1.68.0
protobuf
librosa==0.9.1
faster-whisper==0.9.0

View File

@@ -1,4 +1,4 @@
grpcio==1.67.0
grpcio==1.68.0
protobuf
librosa
faster-whisper

View File

@@ -12,9 +12,10 @@ export SKIP_CONDA=1
endif
.PHONY: parler-tts
parler-tts: protogen
parler-tts:
@echo "Installing $(CONDA_ENV_PATH)..."
bash install.sh $(CONDA_ENV_PATH)
$(MAKE) protogen
.PHONY: run
run: protogen
@@ -36,7 +37,7 @@ protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
python3 -m grpc_tools.protoc -I../.. --python_out=. --grpc_python_out=. backend.proto
bash protogen.sh
.PHONY: clean
clean: protogen-clean

View File

@@ -11,16 +11,18 @@ if [ "x${BUILD_PROFILE}" == "xintel" ]; then
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
fi
installRequirements
# https://github.com/descriptinc/audiotools/issues/101
# incompatible protobuf versions.
# PYDIR=python3.10
# pyenv="${MY_DIR}/venv/lib/${PYDIR}/site-packages/google/protobuf/internal/"
PYDIR=python3.10
pyenv="${MY_DIR}/venv/lib/${PYDIR}/site-packages/google/protobuf/internal/"
# if [ ! -d ${pyenv} ]; then
# echo "(parler-tts/install.sh): Error: ${pyenv} does not exist"
# exit 1
# fi
if [ ! -d ${pyenv} ]; then
echo "(parler-tts/install.sh): Error: ${pyenv} does not exist"
exit 1
fi
# curl -L https://raw.githubusercontent.com/protocolbuffers/protobuf/main/python/google/protobuf/internal/builder.py -o ${pyenv}/builder.py
curl -L https://raw.githubusercontent.com/protocolbuffers/protobuf/main/python/google/protobuf/internal/builder.py -o ${pyenv}/builder.py

View File

@@ -0,0 +1,6 @@
#!/bin/bash
set -e
source $(dirname $0)/../common/libbackend.sh
python3 -m grpc_tools.protoc -I../.. --python_out=. --grpc_python_out=. backend.proto

View File

@@ -1,4 +1,4 @@
git+https://github.com/huggingface/parler-tts.git@8e465f1b5fcd223478e07175cb40494d19ffbe17
llvmlite==0.43.0
numba==0.60.0
git+https://github.com/descriptinc/audiotools
grpcio-tools==1.42.0

View File

@@ -1,4 +1,3 @@
grpcio==1.67.0
protobuf
grpcio==1.68.0
certifi
llvmlite==0.43.0
llvmlite==0.43.0

View File

@@ -1,3 +1,3 @@
grpcio==1.67.0
grpcio==1.68.0
protobuf
certifi

View File

@@ -2,5 +2,5 @@ torch==2.4.1
accelerate
transformers
bitsandbytes
sentence-transformers==3.2.0
sentence-transformers==3.3.1
transformers

View File

@@ -1,5 +1,5 @@
--extra-index-url https://download.pytorch.org/whl/cu118
torch==2.4.1+cu118
accelerate
sentence-transformers==3.2.0
sentence-transformers==3.3.1
transformers

View File

@@ -1,4 +1,4 @@
torch==2.4.1
accelerate
sentence-transformers==3.2.0
sentence-transformers==3.3.1
transformers

View File

@@ -1,5 +1,5 @@
--extra-index-url https://download.pytorch.org/whl/rocm6.0
torch==2.4.1+rocm6.0
accelerate
sentence-transformers==3.2.0
sentence-transformers==3.3.1
transformers

View File

@@ -4,5 +4,5 @@ torch
optimum[openvino]
setuptools==69.5.1 # https://github.com/mudler/LocalAI/issues/2406
accelerate
sentence-transformers==3.2.0
sentence-transformers==3.3.1
transformers

View File

@@ -1,4 +1,4 @@
grpcio==1.67.0
grpcio==1.68.0
protobuf
certifi
datasets

View File

@@ -1,4 +1,4 @@
grpcio==1.67.0
grpcio==1.68.0
protobuf
scipy==1.14.0
certifi

View File

@@ -1,4 +1,4 @@
grpcio==1.67.0
grpcio==1.68.0
protobuf
certifi
setuptools==69.5.1 # https://github.com/mudler/LocalAI/issues/2406

View File

@@ -1,3 +1,3 @@
grpcio==1.67.0
grpcio==1.68.0
protobuf
certifi

View File

@@ -22,7 +22,7 @@ if [ "x${BUILD_TYPE}" == "x" ] && [ "x${FROM_SOURCE}" == "xtrue" ]; then
git clone https://github.com/vllm-project/vllm
fi
pushd vllm
uv pip install wheel packaging ninja "setuptools>=49.4.0" numpy typing-extensions pillow setuptools-scm grpcio==1.67.0 protobuf bitsandbytes
uv pip install wheel packaging ninja "setuptools>=49.4.0" numpy typing-extensions pillow setuptools-scm grpcio==1.68.0 protobuf bitsandbytes
uv pip install -v -r requirements-cpu.txt --extra-index-url https://download.pytorch.org/whl/cpu
VLLM_TARGET_DEVICE=cpu python setup.py install
popd

View File

@@ -1,4 +1,4 @@
grpcio==1.67.0
grpcio==1.68.0
protobuf
certifi
setuptools

View File

@@ -11,17 +11,9 @@ import (
func ModelEmbedding(s string, tokens []int, loader *model.ModelLoader, backendConfig config.BackendConfig, appConfig *config.ApplicationConfig) (func() ([]float32, error), error) {
var inferenceModel interface{}
var err error
opts := ModelOptions(backendConfig, appConfig)
opts := ModelOptions(backendConfig, appConfig, []model.Option{})
if backendConfig.Backend == "" {
inferenceModel, err = loader.GreedyLoader(opts...)
} else {
opts = append(opts, model.WithBackendString(backendConfig.Backend))
inferenceModel, err = loader.BackendLoader(opts...)
}
inferenceModel, err := loader.Load(opts...)
if err != nil {
return nil, err
}

View File

@@ -9,9 +9,8 @@ import (
func ImageGeneration(height, width, mode, step, seed int, positive_prompt, negative_prompt, src, dst string, loader *model.ModelLoader, backendConfig config.BackendConfig, appConfig *config.ApplicationConfig) (func() error, error) {
opts := ModelOptions(backendConfig, appConfig, []model.Option{})
inferenceModel, err := loader.BackendLoader(
opts := ModelOptions(backendConfig, appConfig)
inferenceModel, err := loader.Load(
opts...,
)
if err != nil {

View File

@@ -16,7 +16,6 @@ import (
"github.com/mudler/LocalAI/core/schema"
"github.com/mudler/LocalAI/core/gallery"
"github.com/mudler/LocalAI/pkg/grpc"
"github.com/mudler/LocalAI/pkg/grpc/proto"
model "github.com/mudler/LocalAI/pkg/model"
"github.com/mudler/LocalAI/pkg/utils"
@@ -35,15 +34,6 @@ type TokenUsage struct {
func ModelInference(ctx context.Context, s string, messages []schema.Message, images, videos, audios []string, loader *model.ModelLoader, c config.BackendConfig, o *config.ApplicationConfig, tokenCallback func(string, TokenUsage) bool) (func() (LLMResponse, error), error) {
modelFile := c.Model
var inferenceModel grpc.Backend
var err error
opts := ModelOptions(c, o, []model.Option{})
if c.Backend != "" {
opts = append(opts, model.WithBackendString(c.Backend))
}
// Check if the modelFile exists, if it doesn't try to load it from the gallery
if o.AutoloadGalleries { // experimental
if _, err := os.Stat(modelFile); os.IsNotExist(err) {
@@ -56,12 +46,8 @@ func ModelInference(ctx context.Context, s string, messages []schema.Message, im
}
}
if c.Backend == "" {
inferenceModel, err = loader.GreedyLoader(opts...)
} else {
inferenceModel, err = loader.BackendLoader(opts...)
}
opts := ModelOptions(c, o)
inferenceModel, err := loader.Load(opts...)
if err != nil {
return nil, err
}

View File

@@ -11,7 +11,7 @@ import (
"github.com/rs/zerolog/log"
)
func ModelOptions(c config.BackendConfig, so *config.ApplicationConfig, opts []model.Option) []model.Option {
func ModelOptions(c config.BackendConfig, so *config.ApplicationConfig, opts ...model.Option) []model.Option {
name := c.Name
if name == "" {
name = c.Model
@@ -125,6 +125,8 @@ func grpcModelOpts(c config.BackendConfig) *pb.ModelOptions {
CFGScale: c.Diffusers.CFGScale,
LoraAdapter: c.LoraAdapter,
LoraScale: c.LoraScale,
LoraAdapters: c.LoraAdapters,
LoraScales: c.LoraScales,
F16Memory: f16,
LoraBase: c.LoraBase,
IMG2IMG: c.Diffusers.IMG2IMG,

View File

@@ -11,8 +11,8 @@ import (
func Rerank(modelFile string, request *proto.RerankRequest, loader *model.ModelLoader, appConfig *config.ApplicationConfig, backendConfig config.BackendConfig) (*proto.RerankResult, error) {
opts := ModelOptions(backendConfig, appConfig, []model.Option{model.WithModel(modelFile)})
rerankModel, err := loader.BackendLoader(opts...)
opts := ModelOptions(backendConfig, appConfig, model.WithModel(modelFile))
rerankModel, err := loader.Load(opts...)
if err != nil {
return nil, err
}

View File

@@ -25,9 +25,8 @@ func SoundGeneration(
backendConfig config.BackendConfig,
) (string, *proto.Result, error) {
opts := ModelOptions(backendConfig, appConfig, []model.Option{model.WithModel(modelFile)})
soundGenModel, err := loader.BackendLoader(opts...)
opts := ModelOptions(backendConfig, appConfig, model.WithModel(modelFile))
soundGenModel, err := loader.Load(opts...)
if err != nil {
return "", nil, err
}

View File

@@ -8,16 +8,15 @@ import (
)
func StoreBackend(sl *model.ModelLoader, appConfig *config.ApplicationConfig, storeName string) (grpc.Backend, error) {
if storeName == "" {
storeName = "default"
}
if storeName == "" {
storeName = "default"
}
sc := []model.Option{
model.WithBackendString(model.LocalStoreBackend),
model.WithAssetDir(appConfig.AssetsDestination),
model.WithModel(storeName),
}
sc := []model.Option{
model.WithBackendString(model.LocalStoreBackend),
model.WithAssetDir(appConfig.AssetsDestination),
model.WithModel(storeName),
}
return sl.BackendLoader(sc...)
return sl.Load(sc...)
}

View File

@@ -15,10 +15,8 @@ func TokenMetrics(
appConfig *config.ApplicationConfig,
backendConfig config.BackendConfig) (*proto.MetricsResponse, error) {
opts := ModelOptions(backendConfig, appConfig, []model.Option{
model.WithModel(modelFile),
})
model, err := loader.BackendLoader(opts...)
opts := ModelOptions(backendConfig, appConfig, model.WithModel(modelFile))
model, err := loader.Load(opts...)
if err != nil {
return nil, err
}

View File

@@ -14,15 +14,13 @@ func ModelTokenize(s string, loader *model.ModelLoader, backendConfig config.Bac
var inferenceModel grpc.Backend
var err error
opts := ModelOptions(backendConfig, appConfig, []model.Option{
model.WithModel(modelFile),
})
opts := ModelOptions(backendConfig, appConfig, model.WithModel(modelFile))
if backendConfig.Backend == "" {
inferenceModel, err = loader.GreedyLoader(opts...)
inferenceModel, err = loader.Load(opts...)
} else {
opts = append(opts, model.WithBackendString(backendConfig.Backend))
inferenceModel, err = loader.BackendLoader(opts...)
inferenceModel, err = loader.Load(opts...)
}
if err != nil {
return schema.TokenizeResponse{}, err

View File

@@ -18,9 +18,9 @@ func ModelTranscription(audio, language string, translate bool, ml *model.ModelL
backendConfig.Backend = model.WhisperBackend
}
opts := ModelOptions(backendConfig, appConfig, []model.Option{})
opts := ModelOptions(backendConfig, appConfig)
transcriptionModel, err := ml.BackendLoader(opts...)
transcriptionModel, err := ml.Load(opts...)
if err != nil {
return nil, err
}

View File

@@ -28,11 +28,8 @@ func ModelTTS(
bb = model.PiperBackend
}
opts := ModelOptions(config.BackendConfig{}, appConfig, []model.Option{
model.WithBackendString(bb),
model.WithModel(modelFile),
})
ttsModel, err := loader.BackendLoader(opts...)
opts := ModelOptions(backendConfig, appConfig, model.WithBackendString(bb), model.WithModel(modelFile))
ttsModel, err := loader.Load(opts...)
if err != nil {
return "", nil, err
}

View File

@@ -76,8 +76,14 @@ func (r *P2P) Run(ctx *cliContext.Context) error {
"util",
"llama-cpp-rpc-server",
)
extraArgs := strings.Split(r.ExtraLLamaCPPArgs, " ")
var extraArgs []string
if r.ExtraLLamaCPPArgs != "" {
extraArgs = strings.Split(r.ExtraLLamaCPPArgs, " ")
}
args := append([]string{"--host", address, "--port", fmt.Sprint(port)}, extraArgs...)
log.Debug().Msgf("Starting llama-cpp-rpc-server on '%s:%d' with args: %+v (%d)", address, port, args, len(args))
args, grpcProcess = library.LoadLDSO(r.BackendAssetsPath, args, grpcProcess)
cmd := exec.Command(

View File

@@ -134,23 +134,25 @@ type LLMConfig struct {
TrimSpace []string `yaml:"trimspace"`
TrimSuffix []string `yaml:"trimsuffix"`
ContextSize *int `yaml:"context_size"`
NUMA bool `yaml:"numa"`
LoraAdapter string `yaml:"lora_adapter"`
LoraBase string `yaml:"lora_base"`
LoraScale float32 `yaml:"lora_scale"`
NoMulMatQ bool `yaml:"no_mulmatq"`
DraftModel string `yaml:"draft_model"`
NDraft int32 `yaml:"n_draft"`
Quantization string `yaml:"quantization"`
LoadFormat string `yaml:"load_format"`
GPUMemoryUtilization float32 `yaml:"gpu_memory_utilization"` // vLLM
TrustRemoteCode bool `yaml:"trust_remote_code"` // vLLM
EnforceEager bool `yaml:"enforce_eager"` // vLLM
SwapSpace int `yaml:"swap_space"` // vLLM
MaxModelLen int `yaml:"max_model_len"` // vLLM
TensorParallelSize int `yaml:"tensor_parallel_size"` // vLLM
MMProj string `yaml:"mmproj"`
ContextSize *int `yaml:"context_size"`
NUMA bool `yaml:"numa"`
LoraAdapter string `yaml:"lora_adapter"`
LoraBase string `yaml:"lora_base"`
LoraAdapters []string `yaml:"lora_adapters"`
LoraScales []float32 `yaml:"lora_scales"`
LoraScale float32 `yaml:"lora_scale"`
NoMulMatQ bool `yaml:"no_mulmatq"`
DraftModel string `yaml:"draft_model"`
NDraft int32 `yaml:"n_draft"`
Quantization string `yaml:"quantization"`
LoadFormat string `yaml:"load_format"`
GPUMemoryUtilization float32 `yaml:"gpu_memory_utilization"` // vLLM
TrustRemoteCode bool `yaml:"trust_remote_code"` // vLLM
EnforceEager bool `yaml:"enforce_eager"` // vLLM
SwapSpace int `yaml:"swap_space"` // vLLM
MaxModelLen int `yaml:"max_model_len"` // vLLM
TensorParallelSize int `yaml:"tensor_parallel_size"` // vLLM
MMProj string `yaml:"mmproj"`
FlashAttention bool `yaml:"flash_attention"`
NoKVOffloading bool `yaml:"no_kv_offloading"`

View File

@@ -140,7 +140,7 @@ func (bcl *BackendConfigLoader) LoadBackendConfigFileByName(modelName, modelPath
}
}
cfg.SetDefaults(opts...)
cfg.SetDefaults(append(opts, ModelPath(modelPath))...)
return cfg, nil
}

View File

@@ -345,7 +345,7 @@ var _ = Describe("API test", func() {
It("Should fail if the api key is missing", func() {
err, sc := postInvalidRequest("http://127.0.0.1:9090/models/available")
Expect(err).ToNot(BeNil())
Expect(sc).To(Equal(403))
Expect(sc).To(Equal(401))
})
})
@@ -438,7 +438,7 @@ var _ = Describe("API test", func() {
Eventually(func() bool {
response := getModelStatus("http://127.0.0.1:9090/models/jobs/" + uuid)
return response["processed"].(bool)
}, "360s", "10s").Should(Equal(true))
}, "900s", "10s").Should(Equal(true))
Eventually(func() []string {
models, _ := client.ListModels(context.TODO())
@@ -562,7 +562,7 @@ var _ = Describe("API test", func() {
Eventually(func() bool {
response := getModelStatus("http://127.0.0.1:9090/models/jobs/" + uuid)
return response["processed"].(bool)
}, "360s", "10s").Should(Equal(true))
}, "900s", "10s").Should(Equal(true))
By("testing chat")
resp, err := client.CreateChatCompletion(context.TODO(), openai.ChatCompletionRequest{Model: modelName, Messages: []openai.ChatCompletionMessage{

View File

@@ -0,0 +1,97 @@
package elements
import (
"strings"
"github.com/chasefleming/elem-go"
"github.com/chasefleming/elem-go/attrs"
"github.com/mudler/LocalAI/core/gallery"
)
func installButton(galleryName string) elem.Node {
return elem.Button(
attrs.Props{
"data-twe-ripple-init": "",
"data-twe-ripple-color": "light",
"class": "float-right inline-block rounded bg-primary px-6 pb-2.5 mb-3 pt-2.5 text-xs font-medium uppercase leading-normal text-white shadow-primary-3 transition duration-150 ease-in-out hover:bg-primary-accent-300 hover:shadow-primary-2 focus:bg-primary-accent-300 focus:shadow-primary-2 focus:outline-none focus:ring-0 active:bg-primary-600 active:shadow-primary-2 dark:shadow-black/30 dark:hover:shadow-dark-strong dark:focus:shadow-dark-strong dark:active:shadow-dark-strong",
"hx-swap": "outerHTML",
// post the Model ID as param
"hx-post": "/browse/install/model/" + galleryName,
},
elem.I(
attrs.Props{
"class": "fa-solid fa-download pr-2",
},
),
elem.Text("Install"),
)
}
func reInstallButton(galleryName string) elem.Node {
return elem.Button(
attrs.Props{
"data-twe-ripple-init": "",
"data-twe-ripple-color": "light",
"class": "float-right inline-block rounded bg-primary ml-2 px-6 pb-2.5 mb-3 pt-2.5 text-xs font-medium uppercase leading-normal text-white shadow-primary-3 transition duration-150 ease-in-out hover:bg-primary-accent-300 hover:shadow-primary-2 focus:bg-primary-accent-300 focus:shadow-primary-2 focus:outline-none focus:ring-0 active:bg-primary-600 active:shadow-primary-2 dark:shadow-black/30 dark:hover:shadow-dark-strong dark:focus:shadow-dark-strong dark:active:shadow-dark-strong",
"hx-target": "#action-div-" + dropBadChars(galleryName),
"hx-swap": "outerHTML",
// post the Model ID as param
"hx-post": "/browse/install/model/" + galleryName,
},
elem.I(
attrs.Props{
"class": "fa-solid fa-arrow-rotate-right pr-2",
},
),
elem.Text("Reinstall"),
)
}
func infoButton(m *gallery.GalleryModel) elem.Node {
return elem.Button(
attrs.Props{
"data-twe-ripple-init": "",
"data-twe-ripple-color": "light",
"class": "float-left inline-block rounded bg-primary px-6 pb-2.5 mb-3 pt-2.5 text-xs font-medium uppercase leading-normal text-white shadow-primary-3 transition duration-150 ease-in-out hover:bg-primary-accent-300 hover:shadow-primary-2 focus:bg-primary-accent-300 focus:shadow-primary-2 focus:outline-none focus:ring-0 active:bg-primary-600 active:shadow-primary-2 dark:shadow-black/30 dark:hover:shadow-dark-strong dark:focus:shadow-dark-strong dark:active:shadow-dark-strong",
"data-modal-target": modalName(m),
"data-modal-toggle": modalName(m),
},
elem.P(
attrs.Props{
"class": "flex items-center",
},
elem.I(
attrs.Props{
"class": "fas fa-info-circle pr-2",
},
),
elem.Text("Info"),
),
)
}
func deleteButton(galleryID string) elem.Node {
return elem.Button(
attrs.Props{
"data-twe-ripple-init": "",
"data-twe-ripple-color": "light",
"hx-confirm": "Are you sure you wish to delete the model?",
"class": "float-right inline-block rounded bg-red-800 px-6 pb-2.5 mb-3 pt-2.5 text-xs font-medium uppercase leading-normal text-white shadow-primary-3 transition duration-150 ease-in-out hover:bg-red-accent-300 hover:shadow-red-2 focus:bg-red-accent-300 focus:shadow-primary-2 focus:outline-none focus:ring-0 active:bg-red-600 active:shadow-primary-2 dark:shadow-black/30 dark:hover:shadow-dark-strong dark:focus:shadow-dark-strong dark:active:shadow-dark-strong",
"hx-target": "#action-div-" + dropBadChars(galleryID),
"hx-swap": "outerHTML",
// post the Model ID as param
"hx-post": "/browse/delete/model/" + galleryID,
},
elem.I(
attrs.Props{
"class": "fa-solid fa-cancel pr-2",
},
),
elem.Text("Delete"),
)
}
// Javascript/HTMX doesn't like weird IDs
func dropBadChars(s string) string {
return strings.ReplaceAll(s, "@", "__")
}

View File

@@ -2,13 +2,11 @@ package elements
import (
"fmt"
"strings"
"github.com/chasefleming/elem-go"
"github.com/chasefleming/elem-go/attrs"
"github.com/microcosm-cc/bluemonday"
"github.com/mudler/LocalAI/core/gallery"
"github.com/mudler/LocalAI/core/p2p"
"github.com/mudler/LocalAI/core/services"
)
@@ -16,231 +14,6 @@ const (
noImage = "https://upload.wikimedia.org/wikipedia/commons/6/65/No-Image-Placeholder.svg"
)
func renderElements(n []elem.Node) string {
render := ""
for _, r := range n {
render += r.Render()
}
return render
}
func DoneProgress(galleryID, text string, showDelete bool) string {
var modelName = galleryID
// Split by @ and grab the name
if strings.Contains(galleryID, "@") {
modelName = strings.Split(galleryID, "@")[1]
}
return elem.Div(
attrs.Props{
"id": "action-div-" + dropBadChars(galleryID),
},
elem.H3(
attrs.Props{
"role": "status",
"id": "pblabel",
"tabindex": "-1",
"autofocus": "",
},
elem.Text(bluemonday.StrictPolicy().Sanitize(text)),
),
elem.If(showDelete, deleteButton(galleryID, modelName), reInstallButton(galleryID)),
).Render()
}
func ErrorProgress(err, galleryName string) string {
return elem.Div(
attrs.Props{},
elem.H3(
attrs.Props{
"role": "status",
"id": "pblabel",
"tabindex": "-1",
"autofocus": "",
},
elem.Text("Error "+bluemonday.StrictPolicy().Sanitize(err)),
),
installButton(galleryName),
).Render()
}
func ProgressBar(progress string) string {
return elem.Div(attrs.Props{
"class": "progress",
"role": "progressbar",
"aria-valuemin": "0",
"aria-valuemax": "100",
"aria-valuenow": "0",
"aria-labelledby": "pblabel",
},
elem.Div(attrs.Props{
"id": "pb",
"class": "progress-bar",
"style": "width:" + progress + "%",
}),
).Render()
}
func P2PNodeStats(nodes []p2p.NodeData) string {
/*
<div class="bg-gray-800 p-6 rounded-lg shadow-lg text-left">
<p class="text-xl font-semibold text-gray-200">Total Workers Detected: {{ len .Nodes }}</p>
{{ $online := 0 }}
{{ range .Nodes }}
{{ if .IsOnline }}
{{ $online = add $online 1 }}
{{ end }}
{{ end }}
<p class="text-xl font-semibold text-gray-200">Total Online Workers: {{$online}}</p>
</div>
*/
online := 0
for _, n := range nodes {
if n.IsOnline() {
online++
}
}
class := "text-green-500"
if online == 0 {
class = "text-red-500"
}
/*
<i class="fas fa-circle animate-pulse text-green-500 ml-2 mr-1"></i>
*/
circle := elem.I(attrs.Props{
"class": "fas fa-circle animate-pulse " + class + " ml-2 mr-1",
})
nodesElements := []elem.Node{
elem.Span(
attrs.Props{
"class": class,
},
circle,
elem.Text(fmt.Sprintf("%d", online)),
),
elem.Span(
attrs.Props{
"class": "text-gray-200",
},
elem.Text(fmt.Sprintf("/%d", len(nodes))),
),
}
return renderElements(nodesElements)
}
func P2PNodeBoxes(nodes []p2p.NodeData) string {
/*
<div class="bg-gray-800 p-4 rounded-lg shadow-lg text-left">
<div class="flex items-center mb-2">
<i class="fas fa-desktop text-gray-400 mr-2"></i>
<span class="text-gray-200 font-semibold">{{.ID}}</span>
</div>
<p class="text-sm text-gray-400 mt-2 flex items-center">
Status:
<i class="fas fa-circle {{ if .IsOnline }}text-green-500{{ else }}text-red-500{{ end }} ml-2 mr-1"></i>
<span class="{{ if .IsOnline }}text-green-400{{ else }}text-red-400{{ end }}">
{{ if .IsOnline }}Online{{ else }}Offline{{ end }}
</span>
</p>
</div>
*/
nodesElements := []elem.Node{}
for _, n := range nodes {
nodesElements = append(nodesElements,
elem.Div(
attrs.Props{
"class": "bg-gray-700 p-6 rounded-lg shadow-lg text-left",
},
elem.P(
attrs.Props{
"class": "text-sm text-gray-400 mt-2 flex",
},
elem.I(
attrs.Props{
"class": "fas fa-desktop text-gray-400 mr-2",
},
),
elem.Text("Name: "),
elem.Span(
attrs.Props{
"class": "text-gray-200 font-semibold ml-2 mr-1",
},
elem.Text(bluemonday.StrictPolicy().Sanitize(n.ID)),
),
elem.Text("Status: "),
elem.If(
n.IsOnline(),
elem.I(
attrs.Props{
"class": "fas fa-circle animate-pulse text-green-500 ml-2 mr-1",
},
),
elem.I(
attrs.Props{
"class": "fas fa-circle animate-pulse text-red-500 ml-2 mr-1",
},
),
),
elem.If(
n.IsOnline(),
elem.Span(
attrs.Props{
"class": "text-green-400",
},
elem.Text("Online"),
),
elem.Span(
attrs.Props{
"class": "text-red-400",
},
elem.Text("Offline"),
),
),
),
))
}
return renderElements(nodesElements)
}
func StartProgressBar(uid, progress, text string) string {
if progress == "" {
progress = "0"
}
return elem.Div(
attrs.Props{
"hx-trigger": "done",
"hx-get": "/browse/job/" + uid,
"hx-swap": "outerHTML",
"hx-target": "this",
},
elem.H3(
attrs.Props{
"role": "status",
"id": "pblabel",
"tabindex": "-1",
"autofocus": "",
},
elem.Text(bluemonday.StrictPolicy().Sanitize(text)), //Perhaps overly defensive
elem.Div(attrs.Props{
"hx-get": "/browse/job/progress/" + uid,
"hx-trigger": "every 600ms",
"hx-target": "this",
"hx-swap": "innerHTML",
},
elem.Raw(ProgressBar(progress)),
),
),
).Render()
}
func cardSpan(text, icon string) elem.Node {
return elem.Span(
attrs.Props{
@@ -268,7 +41,6 @@ func searchableElement(text, icon string) elem.Node {
attrs.Props{
"class": "inline-block bg-gray-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2 hover:bg-gray-300 hover:shadow-gray-2",
},
elem.A(
attrs.Props{
// "name": "search",
@@ -290,7 +62,8 @@ func searchableElement(text, icon string) elem.Node {
)
}
func link(text, url string) elem.Node {
/*
func buttonLink(text, url string) elem.Node {
return elem.A(
attrs.Props{
"class": "inline-block bg-gray-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2 hover:bg-gray-300 hover:shadow-gray-2",
@@ -303,163 +76,255 @@ func link(text, url string) elem.Node {
elem.Text(bluemonday.StrictPolicy().Sanitize(text)),
)
}
func installButton(galleryName string) elem.Node {
return elem.Button(
attrs.Props{
"data-twe-ripple-init": "",
"data-twe-ripple-color": "light",
"class": "float-right inline-block rounded bg-primary px-6 pb-2.5 mb-3 pt-2.5 text-xs font-medium uppercase leading-normal text-white shadow-primary-3 transition duration-150 ease-in-out hover:bg-primary-accent-300 hover:shadow-primary-2 focus:bg-primary-accent-300 focus:shadow-primary-2 focus:outline-none focus:ring-0 active:bg-primary-600 active:shadow-primary-2 dark:shadow-black/30 dark:hover:shadow-dark-strong dark:focus:shadow-dark-strong dark:active:shadow-dark-strong",
"hx-swap": "outerHTML",
// post the Model ID as param
"hx-post": "/browse/install/model/" + galleryName,
},
elem.I(
attrs.Props{
"class": "fa-solid fa-download pr-2",
},
),
elem.Text("Install"),
)
}
*/
func reInstallButton(galleryName string) elem.Node {
return elem.Button(
func link(text, url string) elem.Node {
return elem.A(
attrs.Props{
"data-twe-ripple-init": "",
"data-twe-ripple-color": "light",
"class": "float-right inline-block rounded bg-primary ml-2 px-6 pb-2.5 mb-3 pt-2.5 text-xs font-medium uppercase leading-normal text-white shadow-primary-3 transition duration-150 ease-in-out hover:bg-primary-accent-300 hover:shadow-primary-2 focus:bg-primary-accent-300 focus:shadow-primary-2 focus:outline-none focus:ring-0 active:bg-primary-600 active:shadow-primary-2 dark:shadow-black/30 dark:hover:shadow-dark-strong dark:focus:shadow-dark-strong dark:active:shadow-dark-strong",
"hx-target": "#action-div-" + dropBadChars(galleryName),
"hx-swap": "outerHTML",
// post the Model ID as param
"hx-post": "/browse/install/model/" + galleryName,
"class": "text-base leading-relaxed text-gray-500 dark:text-gray-400",
"href": url,
"target": "_blank",
},
elem.I(
attrs.Props{
"class": "fa-solid fa-arrow-rotate-right pr-2",
},
),
elem.Text("Reinstall"),
elem.I(attrs.Props{
"class": "fas fa-link pr-2",
}),
elem.Text(bluemonday.StrictPolicy().Sanitize(text)),
)
}
func deleteButton(galleryID, modelName string) elem.Node {
return elem.Button(
attrs.Props{
"data-twe-ripple-init": "",
"data-twe-ripple-color": "light",
"hx-confirm": "Are you sure you wish to delete the model?",
"class": "float-right inline-block rounded bg-red-800 px-6 pb-2.5 mb-3 pt-2.5 text-xs font-medium uppercase leading-normal text-white shadow-primary-3 transition duration-150 ease-in-out hover:bg-red-accent-300 hover:shadow-red-2 focus:bg-red-accent-300 focus:shadow-primary-2 focus:outline-none focus:ring-0 active:bg-red-600 active:shadow-primary-2 dark:shadow-black/30 dark:hover:shadow-dark-strong dark:focus:shadow-dark-strong dark:active:shadow-dark-strong",
"hx-target": "#action-div-" + dropBadChars(galleryID),
"hx-swap": "outerHTML",
// post the Model ID as param
"hx-post": "/browse/delete/model/" + galleryID,
},
elem.I(
attrs.Props{
"class": "fa-solid fa-cancel pr-2",
},
),
elem.Text("Delete"),
)
}
// Javascript/HTMX doesn't like weird IDs
func dropBadChars(s string) string {
return strings.ReplaceAll(s, "@", "__")
}
type ProcessTracker interface {
Exists(string) bool
Get(string) string
}
func ListModels(models []*gallery.GalleryModel, processTracker ProcessTracker, galleryService *services.GalleryService) string {
modelsElements := []elem.Node{}
descriptionDiv := func(m *gallery.GalleryModel) elem.Node {
return elem.Div(
attrs.Props{
"class": "p-6 text-surface dark:text-white",
},
elem.H5(
attrs.Props{
"class": "mb-2 text-xl font-bold leading-tight",
},
elem.Text(bluemonday.StrictPolicy().Sanitize(m.Name)),
),
elem.P(
attrs.Props{
"class": "mb-4 text-sm [&:not(:hover)]:truncate text-base",
},
elem.Text(bluemonday.StrictPolicy().Sanitize(m.Description)),
),
func modalName(m *gallery.GalleryModel) string {
return m.Name + "-modal"
}
func modelDescription(m *gallery.GalleryModel) elem.Node {
urls := []elem.Node{}
for _, url := range m.URLs {
urls = append(urls,
elem.Li(attrs.Props{}, link(url, url)),
)
}
actionDiv := func(m *gallery.GalleryModel) elem.Node {
galleryID := fmt.Sprintf("%s@%s", m.Gallery.Name, m.Name)
currentlyProcessing := processTracker.Exists(galleryID)
jobID := ""
isDeletionOp := false
if currentlyProcessing {
status := galleryService.GetStatus(galleryID)
if status != nil && status.Deletion {
isDeletionOp = true
}
jobID = processTracker.Get(galleryID)
// TODO:
// case not handled, if status == nil : "Waiting"
}
nodes := []elem.Node{
cardSpan("Repository: "+m.Gallery.Name, "fa-brands fa-git-alt"),
}
if m.License != "" {
nodes = append(nodes,
cardSpan("License: "+m.License, "fas fa-book"),
)
}
tagsNodes := []elem.Node{}
for _, tag := range m.Tags {
tagsNodes = append(tagsNodes,
searchableElement(tag, "fas fa-tag"),
)
}
nodes = append(nodes,
elem.Div(
attrs.Props{
"class": "flex flex-row flex-wrap content-center",
},
tagsNodes...,
),
tagsNodes := []elem.Node{}
for _, tag := range m.Tags {
tagsNodes = append(tagsNodes,
searchableElement(tag, "fas fa-tag"),
)
}
for i, url := range m.URLs {
nodes = append(nodes,
link("Link #"+fmt.Sprintf("%d", i+1), url),
)
}
progressMessage := "Installation"
if isDeletionOp {
progressMessage = "Deletion"
}
return elem.Div(
return elem.Div(
attrs.Props{
"class": "p-6 text-surface dark:text-white",
},
elem.H5(
attrs.Props{
"class": "px-6 pt-4 pb-2",
"class": "mb-2 text-xl font-bold leading-tight",
},
elem.Text(bluemonday.StrictPolicy().Sanitize(m.Name)),
),
elem.Div( // small description
attrs.Props{
"class": "mb-4 text-sm truncate text-base",
},
elem.Text(bluemonday.StrictPolicy().Sanitize(m.Description)),
),
elem.Div(
attrs.Props{
"id": modalName(m),
"tabindex": "-1",
"aria-hidden": "true",
"class": "hidden overflow-y-auto overflow-x-hidden fixed top-0 right-0 left-0 z-50 justify-center items-center w-full md:inset-0 h-[calc(100%-1rem)] max-h-full",
},
elem.P(
attrs.Props{
"class": "mb-4 text-base",
},
nodes...,
),
elem.Div(
attrs.Props{
"id": "action-div-" + dropBadChars(galleryID),
"class": "relative p-4 w-full max-w-2xl max-h-full",
},
elem.Div(
attrs.Props{
"class": "relative p-4 w-full max-w-2xl max-h-full bg-white rounded-lg shadow dark:bg-gray-700",
},
// header
elem.Div(
attrs.Props{
"class": "flex items-center justify-between p-4 md:p-5 border-b rounded-t dark:border-gray-600",
},
elem.H3(
attrs.Props{
"class": "text-xl font-semibold text-gray-900 dark:text-white",
},
elem.Text(bluemonday.StrictPolicy().Sanitize(m.Name)),
),
elem.Button( // close button
attrs.Props{
"class": "text-gray-400 bg-transparent hover:bg-gray-200 hover:text-gray-900 rounded-lg text-sm w-8 h-8 ms-auto inline-flex justify-center items-center dark:hover:bg-gray-600 dark:hover:text-white",
"data-modal-hide": modalName(m),
},
elem.Raw(
`<svg class="w-3 h-3" aria-hidden="true" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 14 14">
<path stroke="currentColor" stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="m1 1 6 6m0 0 6 6M7 7l6-6M7 7l-6 6"/>
</svg>`,
),
elem.Span(
attrs.Props{
"class": "sr-only",
},
elem.Text("Close modal"),
),
),
),
// body
elem.Div(
attrs.Props{
"class": "p-4 md:p-5 space-y-4",
},
elem.Div(
attrs.Props{
"class": "flex justify-center items-center",
},
elem.Img(attrs.Props{
// "class": "rounded-t-lg object-fit object-center h-96",
"class": "lazy rounded-t-lg max-h-48 max-w-96 object-cover mt-3 entered loaded",
"src": m.Icon,
"loading": "lazy",
}),
),
elem.P(
attrs.Props{
"class": "text-base leading-relaxed text-gray-500 dark:text-gray-400",
},
elem.Text(bluemonday.StrictPolicy().Sanitize(m.Description)),
),
elem.Hr(
attrs.Props{},
),
elem.P(
attrs.Props{
"class": "text-sm font-semibold text-gray-900 dark:text-white",
},
elem.Text("Links"),
),
elem.Ul(
attrs.Props{},
urls...,
),
elem.If(
len(m.Tags) > 0,
elem.Div(
attrs.Props{},
elem.P(
attrs.Props{
"class": "text-sm mb-5 font-semibold text-gray-900 dark:text-white",
},
elem.Text("Tags"),
),
elem.Div(
attrs.Props{
"class": "flex flex-row flex-wrap content-center",
},
tagsNodes...,
),
),
elem.Div(attrs.Props{}),
),
),
// Footer
elem.Div(
attrs.Props{
"class": "flex items-center p-4 md:p-5 border-t border-gray-200 rounded-b dark:border-gray-600",
},
elem.Button(
attrs.Props{
"data-modal-hide": modalName(m),
"class": "py-2.5 px-5 ms-3 text-sm font-medium text-gray-900 focus:outline-none bg-white rounded-lg border border-gray-200 hover:bg-gray-100 hover:text-blue-700 focus:z-10 focus:ring-4 focus:ring-gray-100 dark:focus:ring-gray-700 dark:bg-gray-800 dark:text-gray-400 dark:border-gray-600 dark:hover:text-white dark:hover:bg-gray-700",
},
elem.Text("Close"),
),
),
),
),
),
)
}
func modelActionItems(m *gallery.GalleryModel, processTracker ProcessTracker, galleryService *services.GalleryService) elem.Node {
galleryID := fmt.Sprintf("%s@%s", m.Gallery.Name, m.Name)
currentlyProcessing := processTracker.Exists(galleryID)
jobID := ""
isDeletionOp := false
if currentlyProcessing {
status := galleryService.GetStatus(galleryID)
if status != nil && status.Deletion {
isDeletionOp = true
}
jobID = processTracker.Get(galleryID)
// TODO:
// case not handled, if status == nil : "Waiting"
}
nodes := []elem.Node{
cardSpan("Repository: "+m.Gallery.Name, "fa-brands fa-git-alt"),
}
if m.License != "" {
nodes = append(nodes,
cardSpan("License: "+m.License, "fas fa-book"),
)
}
/*
tagsNodes := []elem.Node{}
for _, tag := range m.Tags {
tagsNodes = append(tagsNodes,
searchableElement(tag, "fas fa-tag"),
)
}
nodes = append(nodes,
elem.Div(
attrs.Props{
"class": "flex flex-row flex-wrap content-center",
},
tagsNodes...,
),
)
for i, url := range m.URLs {
nodes = append(nodes,
buttonLink("Link #"+fmt.Sprintf("%d", i+1), url),
)
}
*/
progressMessage := "Installation"
if isDeletionOp {
progressMessage = "Deletion"
}
return elem.Div(
attrs.Props{
"class": "px-6 pt-4 pb-2",
},
elem.P(
attrs.Props{
"class": "mb-4 text-base",
},
nodes...,
),
elem.Div(
attrs.Props{
"id": "action-div-" + dropBadChars(galleryID),
"class": "flow-root", // To order buttons left and right
},
infoButton(m),
elem.Div(
attrs.Props{
"class": "float-right",
},
elem.If(
currentlyProcessing,
@@ -470,14 +335,18 @@ func ListModels(models []*gallery.GalleryModel, processTracker ProcessTracker, g
elem.Node(elem.Div(
attrs.Props{},
reInstallButton(m.ID()),
deleteButton(m.ID(), m.Name),
deleteButton(m.ID()),
)),
installButton(m.ID()),
),
),
),
)
}
),
)
}
func ListModels(models []*gallery.GalleryModel, processTracker ProcessTracker, galleryService *services.GalleryService) string {
modelsElements := []elem.Node{}
for _, m := range models {
elems := []elem.Node{}
@@ -521,7 +390,10 @@ func ListModels(models []*gallery.GalleryModel, processTracker ProcessTracker, g
))
}
elems = append(elems, descriptionDiv(m), actionDiv(m))
elems = append(elems,
modelDescription(m),
modelActionItems(m, processTracker, galleryService),
)
modelsElements = append(modelsElements,
elem.Div(
attrs.Props{

147
core/http/elements/p2p.go Normal file
View File

@@ -0,0 +1,147 @@
package elements
import (
"fmt"
"github.com/chasefleming/elem-go"
"github.com/chasefleming/elem-go/attrs"
"github.com/microcosm-cc/bluemonday"
"github.com/mudler/LocalAI/core/p2p"
)
func renderElements(n []elem.Node) string {
render := ""
for _, r := range n {
render += r.Render()
}
return render
}
func P2PNodeStats(nodes []p2p.NodeData) string {
/*
<div class="bg-gray-800 p-6 rounded-lg shadow-lg text-left">
<p class="text-xl font-semibold text-gray-200">Total Workers Detected: {{ len .Nodes }}</p>
{{ $online := 0 }}
{{ range .Nodes }}
{{ if .IsOnline }}
{{ $online = add $online 1 }}
{{ end }}
{{ end }}
<p class="text-xl font-semibold text-gray-200">Total Online Workers: {{$online}}</p>
</div>
*/
online := 0
for _, n := range nodes {
if n.IsOnline() {
online++
}
}
class := "text-green-500"
if online == 0 {
class = "text-red-500"
}
/*
<i class="fas fa-circle animate-pulse text-green-500 ml-2 mr-1"></i>
*/
circle := elem.I(attrs.Props{
"class": "fas fa-circle animate-pulse " + class + " ml-2 mr-1",
})
nodesElements := []elem.Node{
elem.Span(
attrs.Props{
"class": class,
},
circle,
elem.Text(fmt.Sprintf("%d", online)),
),
elem.Span(
attrs.Props{
"class": "text-gray-200",
},
elem.Text(fmt.Sprintf("/%d", len(nodes))),
),
}
return renderElements(nodesElements)
}
func P2PNodeBoxes(nodes []p2p.NodeData) string {
/*
<div class="bg-gray-800 p-4 rounded-lg shadow-lg text-left">
<div class="flex items-center mb-2">
<i class="fas fa-desktop text-gray-400 mr-2"></i>
<span class="text-gray-200 font-semibold">{{.ID}}</span>
</div>
<p class="text-sm text-gray-400 mt-2 flex items-center">
Status:
<i class="fas fa-circle {{ if .IsOnline }}text-green-500{{ else }}text-red-500{{ end }} ml-2 mr-1"></i>
<span class="{{ if .IsOnline }}text-green-400{{ else }}text-red-400{{ end }}">
{{ if .IsOnline }}Online{{ else }}Offline{{ end }}
</span>
</p>
</div>
*/
nodesElements := []elem.Node{}
for _, n := range nodes {
nodesElements = append(nodesElements,
elem.Div(
attrs.Props{
"class": "bg-gray-700 p-6 rounded-lg shadow-lg text-left",
},
elem.P(
attrs.Props{
"class": "text-sm text-gray-400 mt-2 flex",
},
elem.I(
attrs.Props{
"class": "fas fa-desktop text-gray-400 mr-2",
},
),
elem.Text("Name: "),
elem.Span(
attrs.Props{
"class": "text-gray-200 font-semibold ml-2 mr-1",
},
elem.Text(bluemonday.StrictPolicy().Sanitize(n.ID)),
),
elem.Text("Status: "),
elem.If(
n.IsOnline(),
elem.I(
attrs.Props{
"class": "fas fa-circle animate-pulse text-green-500 ml-2 mr-1",
},
),
elem.I(
attrs.Props{
"class": "fas fa-circle animate-pulse text-red-500 ml-2 mr-1",
},
),
),
elem.If(
n.IsOnline(),
elem.Span(
attrs.Props{
"class": "text-green-400",
},
elem.Text("Online"),
),
elem.Span(
attrs.Props{
"class": "text-red-400",
},
elem.Text("Offline"),
),
),
),
))
}
return renderElements(nodesElements)
}

View File

@@ -0,0 +1,89 @@
package elements
import (
"github.com/chasefleming/elem-go"
"github.com/chasefleming/elem-go/attrs"
"github.com/microcosm-cc/bluemonday"
)
func DoneProgress(galleryID, text string, showDelete bool) string {
return elem.Div(
attrs.Props{
"id": "action-div-" + dropBadChars(galleryID),
},
elem.H3(
attrs.Props{
"role": "status",
"id": "pblabel",
"tabindex": "-1",
"autofocus": "",
},
elem.Text(bluemonday.StrictPolicy().Sanitize(text)),
),
elem.If(showDelete, deleteButton(galleryID), reInstallButton(galleryID)),
).Render()
}
func ErrorProgress(err, galleryName string) string {
return elem.Div(
attrs.Props{},
elem.H3(
attrs.Props{
"role": "status",
"id": "pblabel",
"tabindex": "-1",
"autofocus": "",
},
elem.Text("Error "+bluemonday.StrictPolicy().Sanitize(err)),
),
installButton(galleryName),
).Render()
}
func ProgressBar(progress string) string {
return elem.Div(attrs.Props{
"class": "progress",
"role": "progressbar",
"aria-valuemin": "0",
"aria-valuemax": "100",
"aria-valuenow": "0",
"aria-labelledby": "pblabel",
},
elem.Div(attrs.Props{
"id": "pb",
"class": "progress-bar",
"style": "width:" + progress + "%",
}),
).Render()
}
func StartProgressBar(uid, progress, text string) string {
if progress == "" {
progress = "0"
}
return elem.Div(
attrs.Props{
"hx-trigger": "done",
"hx-get": "/browse/job/" + uid,
"hx-swap": "outerHTML",
"hx-target": "this",
},
elem.H3(
attrs.Props{
"role": "status",
"id": "pblabel",
"tabindex": "-1",
"autofocus": "",
},
elem.Text(bluemonday.StrictPolicy().Sanitize(text)), //Perhaps overly defensive
elem.Div(attrs.Props{
"hx-get": "/browse/job/progress/" + uid,
"hx-trigger": "every 600ms",
"hx-target": "this",
"hx-swap": "innerHTML",
},
elem.Raw(ProgressBar(progress)),
),
),
).Render()
}

View File

@@ -21,10 +21,15 @@ func SystemInformations(ml *model.ModelLoader, appConfig *config.ApplicationConf
for b := range appConfig.ExternalGRPCBackends {
availableBackends = append(availableBackends, b)
}
sysmodels := []schema.SysInfoModel{}
for _, m := range loadedModels {
sysmodels = append(sysmodels, schema.SysInfoModel{ID: m.ID})
}
return c.JSON(
schema.SystemInformationResponse{
Backends: availableBackends,
Models: loadedModels,
Models: sysmodels,
},
)
}

View File

@@ -9,16 +9,19 @@ import (
"github.com/gofiber/fiber/v2"
"github.com/mudler/LocalAI/core/schema"
"github.com/rs/zerolog/log"
"github.com/mudler/LocalAI/pkg/utils"
)
// TTSEndpoint is the OpenAI Speech API endpoint https://platform.openai.com/docs/api-reference/audio/createSpeech
// @Summary Generates audio from the input text.
// @Accept json
// @Produce audio/x-wav
// @Param request body schema.TTSRequest true "query params"
// @Success 200 {string} binary "generated audio/wav file"
// @Router /v1/audio/speech [post]
// @Router /tts [post]
//
// @Summary Generates audio from the input text.
// @Accept json
// @Produce audio/x-wav
// @Param request body schema.TTSRequest true "query params"
// @Success 200 {string} binary "generated audio/wav file"
// @Router /v1/audio/speech [post]
// @Router /tts [post]
func TTSEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) func(c *fiber.Ctx) error {
return func(c *fiber.Ctx) error {
@@ -67,6 +70,13 @@ func TTSEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfi
if err != nil {
return err
}
// Convert generated file to target format
filePath, err = utils.AudioConvert(filePath, input.Format)
if err != nil {
return err
}
return c.Download(filePath)
}
}

View File

@@ -0,0 +1,68 @@
package localai
import (
"github.com/gofiber/fiber/v2"
"github.com/mudler/LocalAI/core/backend"
"github.com/mudler/LocalAI/core/config"
fiberContext "github.com/mudler/LocalAI/core/http/ctx"
"github.com/mudler/LocalAI/core/schema"
"github.com/mudler/LocalAI/pkg/grpc/proto"
"github.com/mudler/LocalAI/pkg/model"
"github.com/rs/zerolog/log"
)
// VADEndpoint is Voice-Activation-Detection endpoint
// @Summary Detect voice fragments in an audio stream
// @Accept json
// @Param request body schema.VADRequest true "query params"
// @Success 200 {object} proto.VADResponse "Response"
// @Router /vad [post]
func VADEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) func(c *fiber.Ctx) error {
return func(c *fiber.Ctx) error {
input := new(schema.VADRequest)
// Get input data from the request body
if err := c.BodyParser(input); err != nil {
return err
}
modelFile, err := fiberContext.ModelFromContext(c, cl, ml, input.Model, false)
if err != nil {
modelFile = input.Model
log.Warn().Msgf("Model not found in context: %s", input.Model)
}
cfg, err := cl.LoadBackendConfigFileByName(modelFile, appConfig.ModelPath,
config.LoadOptionDebug(appConfig.Debug),
config.LoadOptionThreads(appConfig.Threads),
config.LoadOptionContextSize(appConfig.ContextSize),
config.LoadOptionF16(appConfig.F16),
)
if err != nil {
log.Err(err)
modelFile = input.Model
log.Warn().Msgf("Model not found in context: %s", input.Model)
} else {
modelFile = cfg.Model
}
log.Debug().Msgf("Request for model: %s", modelFile)
opts := backend.ModelOptions(*cfg, appConfig, model.WithBackendString(cfg.Backend), model.WithModel(modelFile))
vadModel, err := ml.Load(opts...)
if err != nil {
return err
}
req := proto.VADRequest{
Audio: input.Audio,
}
resp, err := vadModel.VAD(c.Context(), &req)
if err != nil {
return err
}
return c.JSON(resp)
}
}

View File

@@ -136,6 +136,11 @@ func ImageEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, appCon
config.Backend = model.StableDiffusionBackend
}
if !strings.Contains(input.Size, "x") {
input.Size = "512x512"
log.Warn().Msgf("Invalid size, using default 512x512")
}
sizeParts := strings.Split(input.Size, "x")
if len(sizeParts) != 2 {
return fmt.Errorf("invalid value for 'size'")

View File

@@ -304,7 +304,6 @@ func mergeRequestWithConfig(modelFile string, input *schema.OpenAIRequest, cm *c
config.LoadOptionThreads(threads),
config.LoadOptionContextSize(ctx),
config.LoadOptionF16(f16),
config.ModelPath(loader.ModelPath),
)
// Set the parameters for the language model prediction

View File

@@ -1,95 +1,95 @@
package middleware
import (
"crypto/subtle"
"errors"
"github.com/dave-gray101/v2keyauth"
"github.com/gofiber/fiber/v2"
"github.com/gofiber/fiber/v2/middleware/keyauth"
"github.com/microcosm-cc/bluemonday"
"github.com/mudler/LocalAI/core/config"
)
// This file contains the configuration generators and handler functions that are used along with the fiber/keyauth middleware
// Currently this requires an upstream patch - and feature patches are no longer accepted to v2
// Therefore `dave-gray101/v2keyauth` contains the v2 backport of the middleware until v3 stabilizes and we migrate.
func GetKeyAuthConfig(applicationConfig *config.ApplicationConfig) (*v2keyauth.Config, error) {
customLookup, err := v2keyauth.MultipleKeySourceLookup([]string{"header:Authorization", "header:x-api-key", "header:xi-api-key"}, keyauth.ConfigDefault.AuthScheme)
if err != nil {
return nil, err
}
return &v2keyauth.Config{
CustomKeyLookup: customLookup,
Next: getApiKeyRequiredFilterFunction(applicationConfig),
Validator: getApiKeyValidationFunction(applicationConfig),
ErrorHandler: getApiKeyErrorHandler(applicationConfig),
AuthScheme: "Bearer",
}, nil
}
func getApiKeyErrorHandler(applicationConfig *config.ApplicationConfig) fiber.ErrorHandler {
return func(ctx *fiber.Ctx, err error) error {
if errors.Is(err, v2keyauth.ErrMissingOrMalformedAPIKey) {
if len(applicationConfig.ApiKeys) == 0 {
return ctx.Next() // if no keys are set up, any error we get here is not an error.
}
if applicationConfig.OpaqueErrors {
return ctx.SendStatus(403)
}
return ctx.Status(403).SendString(bluemonday.StrictPolicy().Sanitize(err.Error()))
}
if applicationConfig.OpaqueErrors {
return ctx.SendStatus(500)
}
return err
}
}
func getApiKeyValidationFunction(applicationConfig *config.ApplicationConfig) func(*fiber.Ctx, string) (bool, error) {
if applicationConfig.UseSubtleKeyComparison {
return func(ctx *fiber.Ctx, apiKey string) (bool, error) {
if len(applicationConfig.ApiKeys) == 0 {
return true, nil // If no keys are setup, accept everything
}
for _, validKey := range applicationConfig.ApiKeys {
if subtle.ConstantTimeCompare([]byte(apiKey), []byte(validKey)) == 1 {
return true, nil
}
}
return false, v2keyauth.ErrMissingOrMalformedAPIKey
}
}
return func(ctx *fiber.Ctx, apiKey string) (bool, error) {
if len(applicationConfig.ApiKeys) == 0 {
return true, nil // If no keys are setup, accept everything
}
for _, validKey := range applicationConfig.ApiKeys {
if apiKey == validKey {
return true, nil
}
}
return false, v2keyauth.ErrMissingOrMalformedAPIKey
}
}
func getApiKeyRequiredFilterFunction(applicationConfig *config.ApplicationConfig) func(*fiber.Ctx) bool {
if applicationConfig.DisableApiKeyRequirementForHttpGet {
return func(c *fiber.Ctx) bool {
if c.Method() != "GET" {
return false
}
for _, rx := range applicationConfig.HttpGetExemptedEndpoints {
if rx.MatchString(c.Path()) {
return true
}
}
return false
}
}
return func(c *fiber.Ctx) bool { return false }
}
package middleware
import (
"crypto/subtle"
"errors"
"github.com/dave-gray101/v2keyauth"
"github.com/gofiber/fiber/v2"
"github.com/gofiber/fiber/v2/middleware/keyauth"
"github.com/mudler/LocalAI/core/config"
)
// This file contains the configuration generators and handler functions that are used along with the fiber/keyauth middleware
// Currently this requires an upstream patch - and feature patches are no longer accepted to v2
// Therefore `dave-gray101/v2keyauth` contains the v2 backport of the middleware until v3 stabilizes and we migrate.
func GetKeyAuthConfig(applicationConfig *config.ApplicationConfig) (*v2keyauth.Config, error) {
customLookup, err := v2keyauth.MultipleKeySourceLookup([]string{"header:Authorization", "header:x-api-key", "header:xi-api-key", "cookie:token"}, keyauth.ConfigDefault.AuthScheme)
if err != nil {
return nil, err
}
return &v2keyauth.Config{
CustomKeyLookup: customLookup,
Next: getApiKeyRequiredFilterFunction(applicationConfig),
Validator: getApiKeyValidationFunction(applicationConfig),
ErrorHandler: getApiKeyErrorHandler(applicationConfig),
AuthScheme: "Bearer",
}, nil
}
func getApiKeyErrorHandler(applicationConfig *config.ApplicationConfig) fiber.ErrorHandler {
return func(ctx *fiber.Ctx, err error) error {
if errors.Is(err, v2keyauth.ErrMissingOrMalformedAPIKey) {
if len(applicationConfig.ApiKeys) == 0 {
return ctx.Next() // if no keys are set up, any error we get here is not an error.
}
ctx.Set("WWW-Authenticate", "Bearer")
if applicationConfig.OpaqueErrors {
return ctx.SendStatus(401)
}
return ctx.Status(401).Render("views/login", nil)
}
if applicationConfig.OpaqueErrors {
return ctx.SendStatus(500)
}
return err
}
}
func getApiKeyValidationFunction(applicationConfig *config.ApplicationConfig) func(*fiber.Ctx, string) (bool, error) {
if applicationConfig.UseSubtleKeyComparison {
return func(ctx *fiber.Ctx, apiKey string) (bool, error) {
if len(applicationConfig.ApiKeys) == 0 {
return true, nil // If no keys are setup, accept everything
}
for _, validKey := range applicationConfig.ApiKeys {
if subtle.ConstantTimeCompare([]byte(apiKey), []byte(validKey)) == 1 {
return true, nil
}
}
return false, v2keyauth.ErrMissingOrMalformedAPIKey
}
}
return func(ctx *fiber.Ctx, apiKey string) (bool, error) {
if len(applicationConfig.ApiKeys) == 0 {
return true, nil // If no keys are setup, accept everything
}
for _, validKey := range applicationConfig.ApiKeys {
if apiKey == validKey {
return true, nil
}
}
return false, v2keyauth.ErrMissingOrMalformedAPIKey
}
}
func getApiKeyRequiredFilterFunction(applicationConfig *config.ApplicationConfig) func(*fiber.Ctx) bool {
if applicationConfig.DisableApiKeyRequirementForHttpGet {
return func(c *fiber.Ctx) bool {
if c.Method() != "GET" {
return false
}
for _, rx := range applicationConfig.HttpGetExemptedEndpoints {
if rx.MatchString(c.Path()) {
return true
}
}
return false
}
}
return func(c *fiber.Ctx) bool { return false }
}

View File

@@ -34,6 +34,7 @@ func RegisterLocalAIRoutes(app *fiber.App,
}
app.Post("/tts", localai.TTSEndpoint(cl, ml, appConfig))
app.Post("/vad", localai.VADEndpoint(cl, ml, appConfig))
// Stores
sl := model.NewModelLoader("")

View File

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,23 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Open Authenticated Website</title>
</head>
<body>
<h1>Authorization is required</h1>
<input type="text" id="token" placeholder="Token" />
<button onclick="login()">Login</button>
<script>
function login() {
const token = document.getElementById('token').value;
var date = new Date();
date.setTime(date.getTime() + (24*60*60*1000));
document.cookie = `token=${token}; expires=${date.toGMTString()}`;
window.location.reload();
}
</script>
</body>
</html>

View File

@@ -1,5 +1,5 @@
<footer class="text-center py-8">
LocalAI Version {{.Version}}<br>
<a href='https://localai.io' class="text-blue-400 hover:text-blue-600" target="_blank">LocalAI</a> © 2023-2024 <a href='https://mudler.pm' class="text-blue-400 hover:text-blue-600" target="_blank">Ettore Di Giacinto</a>
<a href='https://github.com/mudler/LocalAI' class="text-blue-400 hover:text-blue-600" target="_blank">LocalAI</a> © 2023-2024 <a href='https://mudler.pm' class="text-blue-400 hover:text-blue-600" target="_blank">Ettore Di Giacinto</a>
</footer>
<script src="/static/assets/tw-elements.js"></script>

View File

@@ -57,6 +57,7 @@
<link href="/static/assets/fontawesome/css/fontawesome.css" rel="stylesheet" />
<link href="/static/assets/fontawesome/css/brands.css" rel="stylesheet" />
<link href="/static/assets/fontawesome/css/solid.css" rel="stylesheet" />
<script src="/static/assets/flowbite.min.js"></script>
<script src="/static/assets/htmx.js" crossorigin="anonymous"></script>
<!-- P2P Animation START -->
<style>
@@ -118,4 +119,11 @@
100% { transform: rotate(0deg); } /* Return to center */
}
</style>
<!-- https://stackoverflow.com/questions/76051980/flowbite-component-not-working-when-loaded-via-htmx-django-project -->
<script>
htmx.onLoad(function(content) {
initFlowbite();
})
</script>
</head>

View File

@@ -10,6 +10,7 @@ import (
"io"
"net"
"os"
"strings"
"sync"
"time"
@@ -22,6 +23,7 @@ import (
"github.com/mudler/edgevpn/pkg/services"
"github.com/mudler/edgevpn/pkg/types"
eutils "github.com/mudler/edgevpn/pkg/utils"
"github.com/multiformats/go-multiaddr"
"github.com/phayes/freeport"
zlog "github.com/rs/zerolog/log"
@@ -231,10 +233,14 @@ func discoveryTunnels(ctx context.Context, n *node.Node, token, servicesID strin
data := ledger.LastBlock().Storage[servicesID]
zlog.Debug().Any("data", ledger.LastBlock().Storage).Msg("Ledger data")
if logLevel == logLevelDebug {
// We want to surface this debugging data only if p2p logging is set to debug
// (and not generally the whole application, as this can be really noisy)
zlog.Debug().Any("data", ledger.LastBlock().Storage).Msg("Ledger data")
}
for k, v := range data {
zlog.Debug().Msgf("New worker found in the ledger data '%s'", k)
// New worker found in the ledger data as k (worker id)
nd := &NodeData{}
if err := v.Unmarshal(nd); err != nil {
zlog.Error().Msg("cannot unmarshal node data")
@@ -269,7 +275,7 @@ func ensureService(ctx context.Context, n *node.Node, nd *NodeData, sserv string
if ndService, found := service[nd.Name]; !found {
if !nd.IsOnline() {
// if node is offline and not present, do nothing
zlog.Debug().Msgf("Node %s is offline", nd.ID)
// Node nd.ID is offline
return
}
@@ -381,22 +387,35 @@ func newNodeOpts(token string) ([]node.Option, error) {
noDHT := os.Getenv("LOCALAI_P2P_DISABLE_DHT") == "true"
noLimits := os.Getenv("LOCALAI_P2P_ENABLE_LIMITS") == "true"
loglevel := os.Getenv("LOCALAI_P2P_LOGLEVEL")
if loglevel == "" {
loglevel = "info"
var listenMaddrs []string
var bootstrapPeers []string
laddrs := os.Getenv("LOCALAI_P2P_LISTEN_MADDRS")
if laddrs != "" {
listenMaddrs = strings.Split(laddrs, ",")
}
libp2ploglevel := os.Getenv("LOCALAI_LIBP2P_LOGLEVEL")
bootmaddr := os.Getenv("LOCALAI_P2P_BOOTSTRAP_PEERS_MADDRS")
if bootmaddr != "" {
bootstrapPeers = strings.Split(bootmaddr, ",")
}
dhtAnnounceMaddrs := stringsToMultiAddr(strings.Split(os.Getenv("LOCALAI_P2P_DHT_ANNOUNCE_MADDRS"), ","))
libp2ploglevel := os.Getenv("LOCALAI_P2P_LIB_LOGLEVEL")
if libp2ploglevel == "" {
libp2ploglevel = "fatal"
}
c := config.Config{
ListenMaddrs: listenMaddrs,
DHTAnnounceMaddrs: dhtAnnounceMaddrs,
Limit: config.ResourceLimit{
Enable: noLimits,
MaxConns: 100,
},
NetworkToken: token,
LowProfile: false,
LogLevel: loglevel,
LogLevel: logLevel,
Libp2pLogLevel: libp2ploglevel,
Ledger: config.Ledger{
SyncInterval: defaultInterval,
@@ -411,9 +430,10 @@ func newNodeOpts(token string) ([]node.Option, error) {
RateLimitInterval: defaultInterval,
},
Discovery: config.Discovery{
DHT: !noDHT,
MDNS: true,
Interval: 10 * time.Second,
DHT: !noDHT,
MDNS: true,
Interval: 10 * time.Second,
BootstrapPeers: bootstrapPeers,
},
Connection: config.Connection{
HolePunch: true,
@@ -432,6 +452,18 @@ func newNodeOpts(token string) ([]node.Option, error) {
return nodeOpts, nil
}
func stringsToMultiAddr(peers []string) []multiaddr.Multiaddr {
res := []multiaddr.Multiaddr{}
for _, p := range peers {
addr, err := multiaddr.NewMultiaddr(p)
if err != nil {
continue
}
res = append(res, addr)
}
return res
}
func copyStream(closer chan struct{}, dst io.Writer, src io.Reader) {
defer func() { closer <- struct{}{} }() // connection is closed, send signal to stop proxy
io.Copy(dst, src)

Some files were not shown because too many files have changed in this diff Show More