Compare commits

...

791 Commits

Author SHA1 Message Date
Ettore Di Giacinto
9352107999 WIP 2025-09-17 21:52:53 +02:00
Ettore Di Giacinto
77c5acb9db Revert "feat(nvidia-gpu): bump images to cuda 12.8" (#6303)
Revert "feat(nvidia-gpu): bump images to cuda 12.8 (#6239)"

This reverts commit d9e25af7b5.
2025-09-17 19:31:43 +02:00
Ettore Di Giacinto
44bbf4d778 chore(model gallery): add websailor-7b (#6300)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-09-17 09:49:58 +02:00
Ettore Di Giacinto
633c12f93d chore(model gallery): add websailor-32b (#6299)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-09-17 09:48:16 +02:00
Ettore Di Giacinto
6f24135f1d chore(model gallery): add webwatcher-32b (#6298)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-09-17 09:42:54 +02:00
Ettore Di Giacinto
b72aa7b4fa chore(model gallery): add webwatcher-7b (#6297)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-09-17 09:36:25 +02:00
Ettore Di Giacinto
e94e725479 chore(model gallery): add alibaba-nlp_tongyi-deepresearch-30b-a3b (#6295)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-09-17 09:22:19 +02:00
LocalAI [bot]
e4ac7b14a3 chore: ⬆️ Update ggml-org/llama.cpp to 8ff206097c2bf3ca1c7aa95f9d6db779fc7bdd68 (#6292)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-16 21:09:47 +00:00
Ettore Di Giacinto
ddb39c73f2 chore(model gallery): add holo1.5-3b (#6291)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-09-16 18:13:11 +02:00
Ettore Di Giacinto
264b09fb1e chore(model gallery): add holo1.5-7b (#6290)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-09-16 18:10:27 +02:00
Ettore Di Giacinto
36dd45df51 chore(model gallery): add holo1.5-72b (#6289)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-09-16 18:07:50 +02:00
Ettore Di Giacinto
e5599f87b8 chore(model gallery): add k2-think-i1 (#6288)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-09-16 18:05:01 +02:00
LocalAI [bot]
e89b5cc0e3 chore: ⬆️ Update ggml-org/llama.cpp to b907255f4bd169b0dc7dca9553b4c54af5170865 (#6287)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-16 08:10:37 +02:00
Richard Palethorpe
10bf1084cc chore: ⬆️ Update leejet/stable-diffusion.cpp to 0ebe6fe118f125665939b27c89f34ed38716bff8 (#6271)
* ⬆️ Update leejet/stable-diffusion.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>

* fix(stablediffusion-ggml): Move parameters and start refactor of passing params

Signed-off-by: Richard Palethorpe <io@richiejp.com>

* fix(stablediffusion-ggml): Add default sampler option

Signed-off-by: Richard Palethorpe <io@richiejp.com>

---------

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Signed-off-by: Richard Palethorpe <io@richiejp.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-15 21:42:46 +02:00
Ettore Di Giacinto
b08ae559b3 chore(model gallery): add qwen3-stargate-sg1-uncensored-abliterated-8b-i1 (#6270)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-09-15 11:03:26 +02:00
Ettore Di Giacinto
aa7cb7e18c chore(model gallery): add aquif-ai_aquif-3.5-8b-think (#6269)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-09-15 10:42:42 +02:00
Ettore Di Giacinto
eadd3d4e46 chore(model gallery): add baidu_ernie-4.5-21b-a3b-thinking (#6267)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-09-15 10:27:02 +02:00
LocalAI [bot]
2a18206033 chore: ⬆️ Update ggml-org/llama.cpp to 6c019cb04e86e2dacfe62ce7666c64e9717dde1f (#6265)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-14 21:19:41 +00:00
LocalAI [bot]
39798d734e chore: ⬆️ Update ggml-org/llama.cpp to 0fa154e3502e940df914f03b41475a2b80b985b0 (#6263)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-14 19:59:58 +00:00
Gianluca Boiano
d0e99562af chore(aio): upgrade minicpm-v model to latest 4.5 (#6262)
chore(aio): upgrade vision model to MiniCPM-V 4.5

Signed-off-by: Gianluca Boiano <morf3089@gmail.com>
2025-09-14 15:04:58 +02:00
Ettore Di Giacinto
6410c99bf2 fix(llama-cpp): correctly calculate embeddings (#6259)
* chore(tests): check embeddings differs in llama.cpp

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* fix(llama.cpp): use the correct field for embedding

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* fix(llama.cpp): use embedding type none

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* chore(tests): add test-cases in aio-e2e suite

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-09-13 23:11:54 +02:00
LocalAI [bot]
55766d269b chore: ⬆️ Update ggml-org/llama.cpp to aa0c461efe3603639af1a1defed2438d9c16ca0f (#6261)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-13 21:11:18 +00:00
Ettore Di Giacinto
ffa0ad1eac Fix formatting issues in README.md links
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-09-13 09:16:17 +02:00
LocalAI [bot]
623789a29e chore: ⬆️ Update ggml-org/llama.cpp to 40be51152d4dc2d47444a4ed378285139859895b (#6260)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-12 21:10:39 +00:00
Richard Palethorpe
2b9a3d32c9 chore: ⬆️ Update leejet/stable-diffusion.cpp to fce6afcc6a3250a8e17923608922d2a99b339b47 (#6256)
* ⬆️ Update leejet/stable-diffusion.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>

* fix(stablediffusion-ggml): Add SMOOTHSTEP scheduler and assert sampler and scheduler counts

Signed-off-by: Richard Palethorpe <io@richiejp.com>

---------

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Signed-off-by: Richard Palethorpe <io@richiejp.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-12 12:28:20 +02:00
LocalAI [bot]
f8b71dc5d0 chore: ⬆️ Update ggml-org/llama.cpp to 0e6ff0046f4a2983b2c77950aa75960fe4b4f0e2 (#6235)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-11 21:21:49 +00:00
KingJ
1d3331b5cb fix(rocm): Rename tag suffix for hipblas whisper build to match backend config (#6247)
Rename tag suffix for hipblas whisper to match backend config

hipblas images generally have the suffix `-gpu-rocm-hipblas-X`. One exception to this currently is the hipblas build of Whisper which has the suffix `gpu-hipblas-whisper.

However, as `backend/index.yaml` references the image tag for Whisper using the more consistent form (i.e. `latest-gpu-rocm-hipblas-whisper`), it is not possible to add the backend as raised in #6114.

Therefore, rename the suffix for hipblas whisper images to use the more consistent form, aligning with other hipblas builds as well as the expected image name in `backend/index.yaml`.

Signed-off-by: Kingsley Jarrett <kj@kingj.net>
2025-09-11 21:19:09 +02:00
Mário Freitas
2c0b9c6349 fix(chat): use proper finish_reason for tool/function calling (#6243)
Signed-off-by: Mário Freitas <imkira@gmail.com>
2025-09-11 21:13:23 +02:00
qxo
3c6c976755 feat: support HF_ENDPOINT env for the HuggingFace endpoint (#6220)
ie: `HF_ENDPOINT=https://hf-mirror.com`
2025-09-11 21:04:57 +02:00
Sertaç Özercan
ebbcba342a fix: runtime capability detection for backends (#6149)
* runtime capability detection for backends

Signed-off-by: Sertac Ozercan <sozercan@gmail.com>

* test

Signed-off-by: Sertac Ozercan <sozercan@gmail.com>

* skip nvidia on darwin

Signed-off-by: Sertac Ozercan <sozercan@gmail.com>

* address review comments

Signed-off-by: Sertac Ozercan <sozercan@gmail.com>

* fix apple test

Signed-off-by: Sertac Ozercan <sozercan@gmail.com>

* remove unused func

Signed-off-by: Sertac Ozercan <sozercan@gmail.com>

---------

Signed-off-by: Sertac Ozercan <sozercan@gmail.com>
2025-09-11 10:46:19 +02:00
LocalAI [bot]
0de75519dc chore: ⬆️ Update leejet/stable-diffusion.cpp to b0179181069254389ccad604e44f17a2c25b4094 (#6246)
⬆️ Update leejet/stable-diffusion.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-10 23:43:12 +02:00
Richard Palethorpe
37f5e4f5c1 feat(whisper): Add diarization (tinydiarize) (#6184)
Signed-off-by: Richard Palethorpe <io@richiejp.com>
2025-09-10 19:09:28 +02:00
Ettore Di Giacinto
ffa934b959 feat(chatterbox): add MPS, and CPU, pin version (#6242)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-09-09 17:58:07 +02:00
Mauro Morales
59311d8b1e Point to LocalAI-examples repo for llava (#6241)
Signed-off-by: Mauro Morales <contact@mauromorales.com>
2025-09-09 16:40:55 +02:00
Ettore Di Giacinto
d9e25af7b5 feat(nvidia-gpu): bump images to cuda 12.8 (#6239)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-09-09 13:02:17 +02:00
dependabot[bot]
e4f8b63b40 chore(deps): bump actions/labeler from 5 to 6 (#6229)
Bumps [actions/labeler](https://github.com/actions/labeler) from 5 to 6.
- [Release notes](https://github.com/actions/labeler/releases)
- [Commits](https://github.com/actions/labeler/compare/v5...v6)

---
updated-dependencies:
- dependency-name: actions/labeler
  dependency-version: '6'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-09-09 08:57:13 +02:00
dependabot[bot]
1364ae9be6 chore(deps): bump github.com/swaggo/swag from 1.16.3 to 1.16.6 (#6222)
Bumps [github.com/swaggo/swag](https://github.com/swaggo/swag) from 1.16.3 to 1.16.6.
- [Release notes](https://github.com/swaggo/swag/releases)
- [Changelog](https://github.com/swaggo/swag/blob/master/.goreleaser.yml)
- [Commits](https://github.com/swaggo/swag/compare/v1.16.3...v1.16.6)

---
updated-dependencies:
- dependency-name: github.com/swaggo/swag
  dependency-version: 1.16.6
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-09-09 08:56:59 +02:00
dependabot[bot]
cfd6a9150d chore(deps): bump oras.land/oras-go/v2 from 2.5.0 to 2.6.0 (#6225)
Bumps [oras.land/oras-go/v2](https://github.com/oras-project/oras-go) from 2.5.0 to 2.6.0.
- [Release notes](https://github.com/oras-project/oras-go/releases)
- [Commits](https://github.com/oras-project/oras-go/compare/v2.5.0...v2.6.0)

---
updated-dependencies:
- dependency-name: oras.land/oras-go/v2
  dependency-version: 2.6.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-09-08 23:43:28 +00:00
dependabot[bot]
cd352d0c5f chore(deps): bump go.opentelemetry.io/otel/exporters/prometheus from 0.50.0 to 0.60.0 (#6226)
chore(deps): bump go.opentelemetry.io/otel/exporters/prometheus

Bumps [go.opentelemetry.io/otel/exporters/prometheus](https://github.com/open-telemetry/opentelemetry-go) from 0.50.0 to 0.60.0.
- [Release notes](https://github.com/open-telemetry/opentelemetry-go/releases)
- [Changelog](https://github.com/open-telemetry/opentelemetry-go/blob/main/CHANGELOG.md)
- [Commits](https://github.com/open-telemetry/opentelemetry-go/compare/example/prometheus/v0.50.0...exporters/prometheus/v0.60.0)

---
updated-dependencies:
- dependency-name: go.opentelemetry.io/otel/exporters/prometheus
  dependency-version: 0.60.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-09-09 00:21:03 +02:00
LocalAI [bot]
8d47309695 chore: ⬆️ Update ggml-org/whisper.cpp to edea8a9c3cf0eb7676dcdb604991eb2f95c3d984 (#6237)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-09 00:20:31 +02:00
LocalAI [bot]
5f6fc02a55 chore: ⬆️ Update leejet/stable-diffusion.cpp to abb115cd021fc2beed826604ed1a479b6a77671c (#6236)
⬆️ Update leejet/stable-diffusion.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-09 00:20:03 +02:00
Ettore Di Giacinto
0b528458d8 chore(docs): add MacOS dmg download button (#6233)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-09-09 00:19:37 +02:00
Ettore Di Giacinto
caab380c5d feat(launcher): show welcome page (#6234)
feat(launcher): add welcome window

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-09-09 00:14:58 +02:00
dependabot[bot]
8a3a362504 chore(deps): bump actions/stale from 9.1.0 to 10.0.0 (#6227)
Bumps [actions/stale](https://github.com/actions/stale) from 9.1.0 to 10.0.0.
- [Release notes](https://github.com/actions/stale/releases)
- [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md)
- [Commits](5bef64f19d...3a9db7e6a4)

---
updated-dependencies:
- dependency-name: actions/stale
  dependency-version: 10.0.0
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-09-08 22:50:59 +02:00
dependabot[bot]
07238eb743 chore(deps): bump github.com/opencontainers/image-spec from 1.1.0 to 1.1.1 (#6223)
chore(deps): bump github.com/opencontainers/image-spec

Bumps [github.com/opencontainers/image-spec](https://github.com/opencontainers/image-spec) from 1.1.0 to 1.1.1.
- [Release notes](https://github.com/opencontainers/image-spec/releases)
- [Changelog](https://github.com/opencontainers/image-spec/blob/main/RELEASES.md)
- [Commits](https://github.com/opencontainers/image-spec/compare/v1.1.0...v1.1.1)

---
updated-dependencies:
- dependency-name: github.com/opencontainers/image-spec
  dependency-version: 1.1.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-09-08 20:06:46 +00:00
Ettore Di Giacinto
e905e90dd7 Add MLX-audio entry to compatibility table
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-09-08 09:54:01 +02:00
LocalAI [bot]
08432d49e5 chore: ⬆️ Update ggml-org/llama.cpp to 3976dfbe00f02a62c0deca32c46138e4f0ca81d8 (#6214)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-08 08:33:33 +02:00
LocalAI [bot]
e51e2aacb9 chore: ⬆️ Update leejet/stable-diffusion.cpp to c648001030d4c2cc7c851fdaf509ee36d642dc99 (#6215)
⬆️ Update leejet/stable-diffusion.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-07 21:04:02 +00:00
Richard Palethorpe
9c3d85fc28 chore: ⬆️ Update leejet/stable-diffusion.cpp to d7f430cd693f2e12ecbaa0ce881746cf305c3b1f (#6213)
* ⬆️ Update leejet/stable-diffusion.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>

* fix(stablediffusion-ggml): Use new sample_params_t

Signed-off-by: Richard Palethorpe <io@richiejp.com>

---------

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Signed-off-by: Richard Palethorpe <io@richiejp.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-07 16:34:45 +02:00
LocalAI [bot]
007ca647a7 chore(model-gallery): ⬆️ update checksum (#6211)
⬆️ Checksum updates in gallery/index.yaml

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-07 00:41:40 +02:00
LocalAI [bot]
59af928379 chore: ⬆️ Update ggml-org/llama.cpp to c4df49a42d396bdf7344501813e7de53bc9e7bb3 (#6209)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-06 21:05:07 +00:00
LocalAI [bot]
dbc2bb561b chore: ⬆️ Update ggml-org/llama.cpp to 408ff524b40baf4f51a81d42a9828200dd4fcb6b (#6207)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-06 09:09:57 +02:00
LocalAI [bot]
c72c85dcac chore: ⬆️ Update ggml-org/whisper.cpp to bb0e1fc60f26a707cabf724edcf7cfcab2a269b6 (#6203)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-06 09:08:34 +02:00
Gianluca Boiano
ef984901e6 chore(model gallery): add MiniCPM-V-4.5-8b-q4_K_M (#6205)
Signed-off-by: Gianluca Boiano <morf3089@gmail.com>
2025-09-05 22:12:31 +02:00
Aliz Fara
9911ec84a3 Fix Typos in Docs (#6204)
Signed-off-by: alizfara112 <alizfaraafa@gmail.com>
2025-09-05 22:11:21 +02:00
LocalAI [bot]
1956681d4c chore: ⬆️ Update ggml-org/llama.cpp to fb15d649ed14ab447eeab911e0c9d21e35fb243e (#6202)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-05 08:42:50 +02:00
LocalAI [bot]
326f6e5ccb docs: ⬆️ update docs version mudler/LocalAI (#6201)
⬆️ Update docs version mudler/LocalAI

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-04 21:03:02 +00:00
Ettore Di Giacinto
302958efd6 fix(p2p): automatically install llama-cpp for p2p workers (#6199)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-09-04 21:57:39 +02:00
Ettore Di Giacinto
3dc86b247d fix: make sure to turn down all processes on exit (#6200)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-09-04 21:37:28 +02:00
Ettore Di Giacinto
5ec724af06 chore(model gallery): fix whisper model gallery links
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-09-04 13:32:09 +02:00
Ettore Di Giacinto
1f1e156bf0 chore(model gallery): add nousresearch_hermes-4-14b (#6197)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-09-04 09:48:53 +02:00
LocalAI [bot]
df625e366a chore: ⬆️ Update leejet/stable-diffusion.cpp to 2eb3845df5675a71565d5a9e13b7bad0881fafcd (#6192)
⬆️ Update leejet/stable-diffusion.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-04 07:35:58 +02:00
LocalAI [bot]
9e6685ac9c chore: ⬆️ Update ggml-org/llama.cpp to 0fce7a1248b74148c1eb0d368b7e18e8bcb96809 (#6193)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-04 07:35:28 +02:00
Ettore Di Giacinto
90c818aa71 Update DMG file path in release workflow
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-09-04 07:34:27 +02:00
Ettore Di Giacinto
034b9b691b chore(ci): fixup release pipeline
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-09-03 22:22:47 +02:00
dependabot[bot]
ba52822e5c chore(deps): bump github.com/docker/docker from 28.0.0+incompatible to 28.3.3+incompatible (#6181)
chore(deps): bump github.com/docker/docker

Bumps [github.com/docker/docker](https://github.com/docker/docker) from 28.0.0+incompatible to 28.3.3+incompatible.
- [Release notes](https://github.com/docker/docker/releases)
- [Commits](https://github.com/docker/docker/compare/v28.0.0...v28.3.3)

---
updated-dependencies:
- dependency-name: github.com/docker/docker
  dependency-version: 28.3.3+incompatible
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-09-03 13:42:54 +00:00
dependabot[bot]
eb30f6c090 chore(deps): bump github.com/tmc/langchaingo from 0.1.12 to 0.1.13 (#6190)
Bumps [github.com/tmc/langchaingo](https://github.com/tmc/langchaingo) from 0.1.12 to 0.1.13.
- [Release notes](https://github.com/tmc/langchaingo/releases)
- [Commits](https://github.com/tmc/langchaingo/compare/v0.1.12...v0.1.13)

---
updated-dependencies:
- dependency-name: github.com/tmc/langchaingo
  dependency-version: 0.1.13
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-09-03 14:22:51 +02:00
Ettore Di Giacinto
caba098959 chore(model gallery): add invisietch_l3.3-ignition-v0.1-70b (#6189)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-09-03 09:49:31 +02:00
Ettore Di Giacinto
3c75ea1e0e chore(model gallery): add aurore-reveil_koto-small-7b-it (#6188)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-09-03 09:47:57 +02:00
Ettore Di Giacinto
c5f911812f chore(model gallery): add nousresearch_hermes-4-70b (#6187)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-09-03 09:38:32 +02:00
LocalAI [bot]
d82922786a chore: ⬆️ Update ggml-org/llama.cpp to 3de008208b9b8a33f49f979097a99b4d59e6e521 (#6185)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-02 21:07:53 +00:00
dependabot[bot]
d9e9bb4c0e chore(deps): bump github.com/charmbracelet/glamour from 0.7.0 to 0.10.0 (#6183)
Bumps [github.com/charmbracelet/glamour](https://github.com/charmbracelet/glamour) from 0.7.0 to 0.10.0.
- [Release notes](https://github.com/charmbracelet/glamour/releases)
- [Changelog](https://github.com/charmbracelet/glamour/blob/master/.goreleaser.yml)
- [Commits](https://github.com/charmbracelet/glamour/compare/v0.7.0...v0.10.0)

---
updated-dependencies:
- dependency-name: github.com/charmbracelet/glamour
  dependency-version: 0.10.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-09-02 18:32:41 +00:00
dependabot[bot]
657027bec6 chore(deps): bump github.com/chasefleming/elem-go from 0.26.0 to 0.31.0 (#6178)
Bumps [github.com/chasefleming/elem-go](https://github.com/chasefleming/elem-go) from 0.26.0 to 0.31.0.
- [Release notes](https://github.com/chasefleming/elem-go/releases)
- [Commits](https://github.com/chasefleming/elem-go/compare/v0.26.0...v0.31.0)

---
updated-dependencies:
- dependency-name: github.com/chasefleming/elem-go
  dependency-version: 0.31.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-09-02 17:25:56 +00:00
dependabot[bot]
2f5635308d chore(deps): bump github.com/onsi/gomega from 1.36.2 to 1.38.2 (#6179)
Bumps [github.com/onsi/gomega](https://github.com/onsi/gomega) from 1.36.2 to 1.38.2.
- [Release notes](https://github.com/onsi/gomega/releases)
- [Changelog](https://github.com/onsi/gomega/blob/master/CHANGELOG.md)
- [Commits](https://github.com/onsi/gomega/compare/v1.36.2...v1.38.2)

---
updated-dependencies:
- dependency-name: github.com/onsi/gomega
  dependency-version: 1.38.2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-09-02 18:03:35 +02:00
dependabot[bot]
63b5338dbd chore(deps): bump github.com/microcosm-cc/bluemonday from 1.0.26 to 1.0.27 (#6177)
chore(deps): bump github.com/microcosm-cc/bluemonday

Bumps [github.com/microcosm-cc/bluemonday](https://github.com/microcosm-cc/bluemonday) from 1.0.26 to 1.0.27.
- [Release notes](https://github.com/microcosm-cc/bluemonday/releases)
- [Commits](https://github.com/microcosm-cc/bluemonday/compare/v1.0.26...v1.0.27)

---
updated-dependencies:
- dependency-name: github.com/microcosm-cc/bluemonday
  dependency-version: 1.0.27
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-09-02 12:16:24 +00:00
dependabot[bot]
3150174962 chore(deps): bump github.com/jaypipes/ghw from 0.12.0 to 0.19.1 (#6176)
Bumps [github.com/jaypipes/ghw](https://github.com/jaypipes/ghw) from 0.12.0 to 0.19.1.
- [Release notes](https://github.com/jaypipes/ghw/releases)
- [Commits](https://github.com/jaypipes/ghw/compare/v0.12.0...v0.19.1)

---
updated-dependencies:
- dependency-name: github.com/jaypipes/ghw
  dependency-version: 0.19.1
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-09-02 13:04:35 +02:00
LocalAI [bot]
4330fdce33 chore: ⬆️ Update ggml-org/llama.cpp to d4d8dbe383e8b9600cbe8b42016e3a4529b51219 (#6172)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-02 09:12:03 +02:00
Richard Palethorpe
fef8583144 fix(ci): Set default Darwin backend lang to python (#6175)
Signed-off-by: Richard Palethorpe <io@richiejp.com>
2025-09-02 09:11:42 +02:00
LocalAI [bot]
d4d6a56a4f chore: ⬆️ Update leejet/stable-diffusion.cpp to 4c6475f9176bf99271ccf5a2817b30a490b83db0 (#6171)
⬆️ Update leejet/stable-diffusion.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-01 23:25:12 +02:00
Ettore Di Giacinto
2900a601a0 chore(backends): add stablediffusion-ggml and whisper for metal (#6173)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-09-01 22:30:35 +02:00
Ettore Di Giacinto
43e0437db6 Revise GPU usage recommendations in documentation
Updated recommendations for GPU usage on Xorg.

Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-09-01 22:20:41 +02:00
Richard Palethorpe
976c159fdb chore(ci): Build some Go based backends on Darwin (#6164)
* chore(ci): Build Go based backends on Darwin

Signed-off-by: Richard Palethorpe <io@richiejp.com>

* chore(stablediffusion-ggml): Fixes for building on Darwin

Signed-off-by: Richard Palethorpe <io@richiejp.com>

* chore(whisper): Build on Darwin

Signed-off-by: Richard Palethorpe <io@richiejp.com>

---------

Signed-off-by: Richard Palethorpe <io@richiejp.com>
2025-09-01 22:18:30 +02:00
LocalAI [bot]
969922ffec chore: ⬆️ Update ggml-org/llama.cpp to e92d53b29e393fc4c0f9f1f7c3fe651be8d36faa (#6169)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-01 08:06:54 +00:00
Ettore Di Giacinto
739573e41b feat(flash_attention): set auto for flash_attention in llama.cpp (#6168)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-31 17:59:09 +02:00
LocalAI [bot]
dbdf2908ad chore: ⬆️ Update ggml-org/llama.cpp to 3d16b29c3bb1ec816ac0e782f20d169097063919 (#6165)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-29 21:14:03 +00:00
dependabot[bot]
317f8641dc chore(deps): bump the go_modules group with 4 updates (#6161)
Bumps the go_modules group with 4 updates: [github.com/containerd/containerd](https://github.com/containerd/containerd), [github.com/gofiber/fiber/v2](https://github.com/gofiber/fiber), [github.com/docker/docker](https://github.com/docker/docker) and [github.com/ulikunitz/xz](https://github.com/ulikunitz/xz).


Updates `github.com/containerd/containerd` from 1.7.19 to 1.7.27
- [Release notes](https://github.com/containerd/containerd/releases)
- [Changelog](https://github.com/containerd/containerd/blob/main/RELEASES.md)
- [Commits](https://github.com/containerd/containerd/compare/v1.7.19...v1.7.27)

Updates `github.com/gofiber/fiber/v2` from 2.52.5 to 2.52.9
- [Release notes](https://github.com/gofiber/fiber/releases)
- [Commits](https://github.com/gofiber/fiber/compare/v2.52.5...v2.52.9)

Updates `github.com/docker/docker` from 27.1.1+incompatible to 28.0.0+incompatible
- [Release notes](https://github.com/docker/docker/releases)
- [Commits](https://github.com/docker/docker/compare/v27.1.1...v28.0.0)

Updates `github.com/ulikunitz/xz` from 0.5.9 to 0.5.14
- [Commits](https://github.com/ulikunitz/xz/compare/v0.5.9...v0.5.14)

---
updated-dependencies:
- dependency-name: github.com/containerd/containerd
  dependency-version: 1.7.27
  dependency-type: direct:production
  dependency-group: go_modules
- dependency-name: github.com/gofiber/fiber/v2
  dependency-version: 2.52.9
  dependency-type: direct:production
  dependency-group: go_modules
- dependency-name: github.com/docker/docker
  dependency-version: 28.0.0+incompatible
  dependency-type: direct:production
  dependency-group: go_modules
- dependency-name: github.com/ulikunitz/xz
  dependency-version: 0.5.14
  dependency-type: indirect
  dependency-group: go_modules
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-29 08:12:12 +02:00
LocalAI [bot]
54ff70e451 feat(swagger): update swagger (#6162)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-29 08:11:34 +02:00
LocalAI [bot]
723f01c87e chore: ⬆️ Update ggml-org/llama.cpp to c97dc093912ad014f6d22743ede0d4d7fd82365a (#6163)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-28 21:16:18 +00:00
Ettore Di Giacinto
79a41a5e07 fix: register backends to model-loader during installation (#6159)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-28 19:11:02 +02:00
Matt Cowger
d0b6aa3f7d feat(gallery): Add 'Get Config' button for models (#6154)
* feat(gallery): Add 'Get Config' button for models

This commit introduces a 'Get Config' button to the model gallery UI. This allows users to download and save the configuration file for a model without installing the model's weights.

Key changes:
- Added a getConfigButton element and integrated it into the gallery card.
- Created a new API endpoint /browse/config/model/:id to handle fetching and saving the model configuration.
- Refactored the InstallModel function to allow saving only the configuration file without downloading model weights.
- Added a ToYAML method on ModelConfig for serialization.
- Fixed button spacing in the gallery UI.

Signed-off-by: Matt Cowger <matt.cowger@sigmacomputing.com>

* Update for reviewer comments

Signed-off-by: Matt Cowger <matt.cowger@sigmacomputing.com>

---------

Signed-off-by: Matt Cowger <matt.cowger@sigmacomputing.com>
2025-08-28 18:32:49 +02:00
Ettore Di Giacinto
ad99399c6e chore: stream errors while streaming SSE (#6160)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-28 18:27:23 +02:00
Richard Palethorpe
e6ebfd3ba1 feat(whisper-cpp): Convert to Purego and add VAD (#6087)
* fix(ci): Avoid matching wrong backend with the same prefix

Signed-off-by: Richard Palethorpe <io@richiejp.com>

* chore(whisper): Use Purego and enable VAD

This replaces the Whisper CGO bindings with our own Purego based module
to make compilation easier.

In addition this allows VAD models to be loaded by Whisper. There is not
much benefit now except that the same backend can be used for VAD and
transcription. Depending on upstream we may also be able to use GPU for
VAD in the future, but presently it is disabled.

Signed-off-by: Richard Palethorpe <io@richiejp.com>

---------

Signed-off-by: Richard Palethorpe <io@richiejp.com>
Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-08-28 17:25:18 +02:00
Ettore Di Giacinto
ead00a28b9 Add 'optimum-quanto' to requirements
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-08-28 13:32:03 +02:00
Ettore Di Giacinto
9621edb4c5 feat(diffusers): add support for wan2.2 (#6153)
* feat(diffusers): add support for wan2.2

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* chore(ci): use ttl.sh for PRs

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Add ftfy deps

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Revert "chore(ci): use ttl.sh for PRs"

This reverts commit c9fc3ecf28.

* Simplify

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* chore: do not pin torch/torchvision on cuda12

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-28 10:26:42 +02:00
Ettore Di Giacinto
7ce92f0646 fix: select portable environment if detected (#6158)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-28 10:00:54 +02:00
LocalAI [bot]
6a4ab3c1e0 chore: ⬆️ Update ggml-org/llama.cpp to fbef0fad7a7c765939f6c9e322fa05cd52cf0c15 (#6155)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-27 21:09:34 +00:00
Ettore Di Giacinto
83b85494c1 Update README with new resource links
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-08-27 16:24:23 +02:00
Matt Cowger
df6a80b38d feat: Add a model refresh button to manually refresh on-disk yaml (#6150)
Add a model refresh button
2025-08-27 09:44:40 +02:00
LocalAI [bot]
21faa4114b chore: ⬆️ Update ggml-org/llama.cpp to 8b696861364360770e9f61a3422d32941a477824 (#6151)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-26 22:07:38 +00:00
Ettore Di Giacinto
e35ad56602 chore(docs): add backends README
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-26 19:39:05 +02:00
Ettore Di Giacinto
3be8b2d8e1 chore(refactor): cli -> cmd, update docs (#6148)
* chore(refactor): cli -> cmd

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Update README

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-26 19:07:10 +02:00
Ettore Di Giacinto
900745bb4d chore(model gallery): add opengvlab_internvl3_5-2b (#6147)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-26 18:09:49 +02:00
Ettore Di Giacinto
15a7fc7e9a chore(model gallery): add opengvlab_internvl3_5-4b (#6146)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-26 17:55:53 +02:00
Ettore Di Giacinto
03dddec538 chore(model gallery): add opengvlab_internvl3_5-8b (#6145)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-26 17:49:31 +02:00
Ettore Di Giacinto
3d34386712 chore(model gallery): add opengvlab_internvl3_5-14b (#6144)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-26 17:17:49 +02:00
Ettore Di Giacinto
1b3f66018b chore(model gallery): add opengvlab_internvl3_5-30b-a3b (#6143)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-26 16:39:46 +02:00
Ettore Di Giacinto
4381e892b8 Revert "CI tests"
This reverts commit 913e132466.
2025-08-26 15:26:23 +02:00
Ettore Di Giacinto
3c3f477854 feat(mlx-audio): Add mlx-audio backend (#6138)
* feat(mlx-audio): Add mlx-audio backend

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* improve loading

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* CI tests

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* fix: set C_INCLUDE_PATH to point to python install

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-26 15:27:06 +02:00
Ettore Di Giacinto
f8a8cf3e95 feat(launcher): add LocalAI launcher app (#6127)
* Add launcher (WIP)

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Update gomod

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Cleanup, focus on systray

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Separate launcher from main

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Add a way to identify the binary version

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Implement save config, and start on boot

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Small fixups

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Save installed version as metadata

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Stop LocalAI on quit

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Fix goreleaser

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Check first if binary is there

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* do not show version if we don't have it

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Try to build on CI

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* use fyne package

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Add to release

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Fixups

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Fyne.Do

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* show WEBUI button only if LocalAI is started

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Default to localhost

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* CI

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Show rel notes

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Update logo

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Small improvements and fix tests

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Try to fix e2e tests

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-26 14:22:04 +02:00
LocalAI [bot]
0fc88b3cdf chore: ⬆️ Update ggml-org/llama.cpp to c4e9239064a564de7b94ee2b401ae907235a8fca (#6139)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-26 12:18:58 +02:00
Ettore Di Giacinto
4993df81c3 fix(metal-llama.cpp): add all libutf8_validity
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-26 10:19:55 +02:00
Ettore Di Giacinto
599bc88c6c fix(hipblas-llama.cpp): create symlink to libomp (#6140)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-26 10:05:17 +02:00
LocalAI [bot]
1a0d06f3db chore: ⬆️ Update ggml-org/llama.cpp to 043fb27d3808766d8ea8195bbd12359727264402 (#6137)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-25 08:57:09 +02:00
LocalAI [bot]
5e1a8b3621 chore: ⬆️ Update ggml-org/whisper.cpp to 7745fcf32846006128f16de429cfe1677c963b30 (#6136)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-24 21:12:13 +00:00
Ettore Di Giacinto
960e51e527 chore(diffusers): support both src and reference_images in diffusers (#6135)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-24 22:03:08 +02:00
Ettore Di Giacinto
195aa22e77 chore(docs): update list of supported backends (#6134)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-24 20:09:19 +02:00
Ettore Di Giacinto
be132fe816 Revise latest project news in README
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-08-24 11:50:20 +02:00
Ettore Di Giacinto
ff5d2dc8be Revert "fix(rfdetr): use cpu torch for cpu builds" (#6131)
Revert "fix(rfdetr): use cpu torch for cpu builds (#6129)"

This reverts commit fec8a36b36.
2025-08-24 11:41:08 +02:00
Ettore Di Giacinto
c1cfa08226 chore(Dockerfile): drop python from images (#6130)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-24 11:40:32 +02:00
Ettore Di Giacinto
fec8a36b36 fix(rfdetr): use cpu torch for cpu builds (#6129)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-24 10:17:25 +02:00
Ettore Di Giacinto
5d4f5d2355 feat(backends): add CPU variant for diffusers backend (#6128)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-24 10:17:10 +02:00
LocalAI [bot]
057248008f chore: ⬆️ Update ggml-org/llama.cpp to 710dfc465a68f7443b87d9f792cffba00ed739fe (#6126)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-24 08:41:39 +02:00
Ettore Di Giacinto
9f2c9cd691 feat(llama.cpp): Add gfx1201 support (#6125)
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-08-23 23:06:01 +02:00
Ettore Di Giacinto
6971f71a6c Add mlx-vlm (#6119)
* Add mlx-vlm

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Add to CI workflows

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Add requirements-mps.txt

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Simplify

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-23 23:05:30 +02:00
Ettore Di Giacinto
1ba66d00f5 feat: bundle python inside backends (#6123)
* feat(backends): bundle python

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* test ci

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* vllm on self-hosted

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Add clang

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Try to fix it for Mac

* Relocate links only when is portable

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Make sure to call macosPortableEnv

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Use self-hosted for vllm

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Fixups

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* CI

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-23 22:36:39 +02:00
Ettore Di Giacinto
259383cf5e chore(deps): bump llama.cpp to '45363632cbd593537d541e81b600242e0b3d47fc' (#6122)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-23 08:39:10 +02:00
Ettore Di Giacinto
209c0694f5 Update backend.yml
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-08-22 23:36:24 +02:00
Ettore Di Giacinto
0fd395d6ec feat(diffusers): add MPS version (#6121)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-22 23:14:54 +02:00
Ettore Di Giacinto
d04bd47116 chore(Makefile): small fixup for darwin MLX builds
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-22 08:52:29 +02:00
Ettore Di Giacinto
1d830ce7dd feat(mlx): add mlx backend (#6049)
* chore: allow to install with pip

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* WIP

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Make the backend to build and actually work

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* List models from system only

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Add script to build darwin python backends

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Run protogen in libbackend

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Detect if mps is available across python backends

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* CI: try to build backend

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Debug CI

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Fixups

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Fixups

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Index mlx-vlm

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Remove mlx-vlm

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Drop CI test

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-22 08:42:29 +02:00
LocalAI [bot]
6dccfb09f8 chore: ⬆️ Update ggml-org/llama.cpp to cd36b5e5c7fed2a3ac671dd542d579ca40b48b54 (#6118)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-22 07:57:27 +02:00
LocalAI [bot]
e4d9cf8349 chore: ⬆️ Update ggml-org/llama.cpp to 7a6e91ad26160dd6dfb33d29ac441617422f28e7 (#6116)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-20 21:05:39 +00:00
Ettore Di Giacinto
c899e90277 Update image-generation.md
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-08-20 10:37:11 +02:00
Ettore Di Giacinto
8193d18c7c feat(img2img): Add support to Qwen Image Edit (#6113)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-20 10:18:55 +02:00
LocalAI [bot]
2e4dc6456f chore: ⬆️ Update ggml-org/llama.cpp to fb22dd07a639e81c7415e30b146f545f1a2f2caf (#6112)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-20 09:01:36 +02:00
LocalAI [bot]
4594430a3e feat(swagger): update swagger (#6111)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-19 22:56:00 +02:00
Ettore Di Giacinto
9c7f92c81f feat(p2p): automatically sync installed models between instances (#6108)
* feat(p2p): sync models between federated nodes

This change makes sure that between federated nodes all the models are
synced with each other.

Note: this works exclusively with models belonging to a gallery. It does
not sync files between the nodes, but rather it synces the node setup.
E.g. All the nodes needs to have configured the same galleries and
install models without any local editing.

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Make nodes stable

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Fixups on syncing

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* ui: improve p2p view

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-19 19:37:46 +02:00
Ettore Di Giacinto
060037bcd4 Revert "chore(deps): bump transformers from 4.48.3 to 4.55.2 in /backend/python/coqui" (#6105)
Revert "chore(deps): bump transformers from 4.48.3 to 4.55.2 in /backend/pyth…"

This reverts commit 27ce570844.
2025-08-19 15:00:33 +02:00
Ettore Di Giacinto
d9da4676b4 Revert "chore(deps): bump torch from 2.3.1+cxx11.abi to 2.8.0 in /backend/python/coqui" (#6104)
Revert "chore(deps): bump torch from 2.3.1+cxx11.abi to 2.8.0 in /backend/pyt…"

This reverts commit 42c7859ab1.
2025-08-19 15:00:11 +02:00
Ettore Di Giacinto
5ef4c2e471 feat(diffusers): add torchvision to support qwen-image-edit (#6103)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-19 12:05:48 +02:00
dependabot[bot]
27ce570844 chore(deps): bump transformers from 4.48.3 to 4.55.2 in /backend/python/coqui (#6096)
chore(deps): bump transformers in /backend/python/coqui

Bumps [transformers](https://github.com/huggingface/transformers) from 4.48.3 to 4.55.2.
- [Release notes](https://github.com/huggingface/transformers/releases)
- [Commits](https://github.com/huggingface/transformers/compare/v4.48.3...v4.55.2)

---
updated-dependencies:
- dependency-name: transformers
  dependency-version: 4.55.2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-19 09:44:01 +00:00
dependabot[bot]
42c7859ab1 chore(deps): bump torch from 2.3.1+cxx11.abi to 2.8.0 in /backend/python/coqui (#6099)
chore(deps): bump torch in /backend/python/coqui

Bumps [torch](https://github.com/pytorch/pytorch) from 2.3.1+cxx11.abi to 2.8.0.
- [Release notes](https://github.com/pytorch/pytorch/releases)
- [Changelog](https://github.com/pytorch/pytorch/blob/main/RELEASE.md)
- [Commits](https://github.com/pytorch/pytorch/commits/v2.8.0)

---
updated-dependencies:
- dependency-name: torch
  dependency-version: 2.8.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-19 08:42:52 +00:00
Ettore Di Giacinto
e7e83d0fa6 Revert "chore(deps): bump intel-extension-for-pytorch from 2.3.110+xpu to 2.8.10+xpu in /backend/python/coqui" (#6102)
Revert "chore(deps): bump intel-extension-for-pytorch from 2.3.110+xpu to 2.8…"

This reverts commit c6dc1d86f1.
2025-08-19 09:29:56 +02:00
dependabot[bot]
c6dc1d86f1 chore(deps): bump intel-extension-for-pytorch from 2.3.110+xpu to 2.8.10+xpu in /backend/python/coqui (#6095)
chore(deps): bump intel-extension-for-pytorch in /backend/python/coqui

Bumps intel-extension-for-pytorch from 2.3.110+xpu to 2.8.10+xpu.

---
updated-dependencies:
- dependency-name: intel-extension-for-pytorch
  dependency-version: 2.8.10+xpu
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-19 07:09:47 +00:00
dependabot[bot]
6fd2e1964d chore(deps): bump grpcio from 1.71.0 to 1.74.0 in /backend/python/coqui (#6097)
Bumps [grpcio](https://github.com/grpc/grpc) from 1.71.0 to 1.74.0.
- [Release notes](https://github.com/grpc/grpc/releases)
- [Changelog](https://github.com/grpc/grpc/blob/master/doc/grpc_release_schedule.md)
- [Commits](https://github.com/grpc/grpc/compare/v1.71.0...v1.74.0)

---
updated-dependencies:
- dependency-name: grpcio
  dependency-version: 1.74.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-19 08:11:58 +02:00
dependabot[bot]
49ae41b716 chore(deps): bump securego/gosec from 2.22.7 to 2.22.8 (#6098)
Bumps [securego/gosec](https://github.com/securego/gosec) from 2.22.7 to 2.22.8.
- [Release notes](https://github.com/securego/gosec/releases)
- [Changelog](https://github.com/securego/gosec/blob/master/.goreleaser.yml)
- [Commits](https://github.com/securego/gosec/compare/v2.22.7...v2.22.8)

---
updated-dependencies:
- dependency-name: securego/gosec
  dependency-version: 2.22.8
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-19 08:11:26 +02:00
dependabot[bot]
b3f0ed62fd chore(deps): bump actions/checkout from 4 to 5 (#6101)
Bumps [actions/checkout](https://github.com/actions/checkout) from 4 to 5.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v4...v5)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-version: '5'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-19 08:10:54 +02:00
LocalAI [bot]
4b9afc418b chore: ⬆️ Update ggml-org/whisper.cpp to fc45bb86251f774ef817e89878bb4c2636c8a58f (#6089)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-19 08:10:25 +02:00
LocalAI [bot]
e44ff8514b chore: ⬆️ Update ggml-org/llama.cpp to 6d7f1117e3e3285d0c5c11b5ebb0439e27920082 (#6088)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-19 08:09:49 +02:00
dependabot[bot]
2b6be10b6b chore(deps): bump protobuf from 6.31.0 to 6.32.0 in /backend/python/transformers (#6100)
chore(deps): bump protobuf in /backend/python/transformers

Bumps [protobuf](https://github.com/protocolbuffers/protobuf) from 6.31.0 to 6.32.0.
- [Release notes](https://github.com/protocolbuffers/protobuf/releases)
- [Changelog](https://github.com/protocolbuffers/protobuf/blob/main/protobuf_release.bzl)
- [Commits](https://github.com/protocolbuffers/protobuf/commits)

---
updated-dependencies:
- dependency-name: protobuf
  dependency-version: 6.32.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-19 05:09:17 +00:00
Ettore Di Giacinto
1361d844a1 chore(model gallery): add lfm2-1.2b (#6092)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-18 22:24:50 +02:00
Ettore Di Giacinto
fcc521cae5 chore(model gallery): add lfm2-vl-1.6b (#6091)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-18 22:21:35 +02:00
Ettore Di Giacinto
8cad7138be chore(model gallery): add lfm2-vl-450m (#6090)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-18 22:17:03 +02:00
Richard Palethorpe
ebd1db2f09 chore(ci): Build modified backends on PR (#6086)
* chore(stablediffusion-ggml): rm redundant comment

Signed-off-by: Richard Palethorpe <io@richiejp.com>

* chore(ci): Build modified backends on PR

Signed-off-by: Richard Palethorpe <io@richiejp.com>

---------

Signed-off-by: Richard Palethorpe <io@richiejp.com>
2025-08-18 17:56:34 +02:00
LocalAI [bot]
7920d75805 chore: ⬆️ Update ggml-org/llama.cpp to 21c17b5befc5f6be5992bc87fc1ba99d388561df (#6084)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-18 08:26:58 +00:00
Ettore Di Giacinto
1d0e24a865 chore(model gallery): add impish_longtail_12b (#6082)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-17 09:41:01 +02:00
LocalAI [bot]
9eed5ef872 chore: ⬆️ Update ggml-org/llama.cpp to 1fe00296f587dfca0957e006d146f5875b61e43d (#6079)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-16 21:10:03 +00:00
Fabio Erculiani
39ab80442a Tune the "dark gray" font color in the webui to make it more readable (#6078)
Tune the "dark gray" font color in the LocalAI webui to make it more readable.
2025-08-16 21:16:25 +02:00
Ettore Di Giacinto
1b101df2c0 chore: InTrustedRoot -> VerifyPath
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-16 16:30:13 +02:00
Richard Palethorpe
784bd5db33 chore(build): Use Purego with stablediffusion backend (#6067)
Signed-off-by: Richard Palethorpe <io@richiejp.com>
2025-08-16 12:21:29 +02:00
Ettore Di Giacinto
b8b1ca782c chore(model gallery): add wingless_imp_8b-i1 (#6077)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-16 08:45:38 +02:00
Ettore Di Giacinto
1149fb66d3 chore(model gallery): add thedrummer_gemma-3-r1-4b-v1 (#6076)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-16 08:39:10 +02:00
LocalAI [bot]
243e86176e chore: ⬆️ Update ggml-org/llama.cpp to 5e6229a8409ac786e62cb133d09f1679a9aec13e (#6070)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-16 08:38:57 +02:00
Ettore Di Giacinto
8da38a0d10 chore(model gallery): add thedrummer_gemma-3-r1-12b-v1 (#6075)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-16 08:35:49 +02:00
Ettore Di Giacinto
60786fc876 chore(model gallery): add thedrummer_gemma-3-r1-27b-v1 (#6074)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-16 08:31:42 +02:00
LocalAI [bot]
9486b88a25 chore: ⬆️ Update ggml-org/whisper.cpp to 040510a132f0a9b51d4692b57a6abfd8c9660696 (#6069)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-16 08:30:54 +02:00
Ettore Di Giacinto
bef4c10629 feat(ui): General improvements (#6072)
* wip

* Simplify stop

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Improve UI

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Show installed backends at the index

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Imporve UI

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-16 07:44:50 +02:00
LocalAI [bot]
80f15851c5 chore(model-gallery): ⬆️ update checksum (#6071)
⬆️ Checksum updates in gallery/index.yaml

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-15 22:29:17 +02:00
Ettore Di Giacinto
22067e3384 chore(rocm): bump rocm image, add gfx1200 support (#6065)
Fixes: https://github.com/mudler/LocalAI/issues/6044

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-15 16:36:54 +02:00
Ettore Di Giacinto
4fbd639463 chore(ci): fixup builds for darwin and hipblas
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-15 15:58:02 +02:00
Ettore Di Giacinto
70f7d0c25f Revert "chore(build): Convert stablediffusion-ggml backend to Purego (#5989)" (#6064)
This reverts commit 94cb20ae7f.
2025-08-15 15:18:40 +02:00
Ettore Di Giacinto
576e821298 chore(deps): bump llama.cpp to 'df36bce667bf14f8e538645547754386f9516326 (#6062)
chore(deps): bump llama.cpp to 'df36bce667bf14f8e538645547754386f9516326'

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-15 13:28:15 +02:00
Ettore Di Giacinto
7293f26fcf chore(ci): fix darwin image publish
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-15 08:51:44 +02:00
Ettore Di Giacinto
79973a28ad chore(model gallery): add gemma-3-270m-it-qat (#6063)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-15 08:41:38 +02:00
Ettore Di Giacinto
8ab51509cc Update Makefile
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-08-15 08:33:25 +02:00
Ettore Di Giacinto
b3384e5428 Update Makefile
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-08-15 08:08:24 +02:00
Ettore Di Giacinto
7050c9f69d feat(webui): add import/edit model page (#6050)
* feat(webui): add import/edit model page

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Convert to a YAML editor

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Pass by the baseurl

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Fixups

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Add tests

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Simplify

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Improve visibility of the yaml editor

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Add test file

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Make reset work

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Emit error only if we can't delete the model yaml file

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-14 23:48:09 +02:00
Ettore Di Giacinto
089efe05fd feat(backends): add system backend, refactor (#6059)
- Add a system backend path
- Refactor and consolidate system information in system state
- Use system state in all the components to figure out the system paths
  to used whenever needed
- Refactor BackendConfig -> ModelConfig. This was otherway misleading as
  now we do have a backend configuration which is not the model config.

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-14 19:38:26 +02:00
Ettore Di Giacinto
253b7537dc fix(llama-cpp/darwin): make sure to bundle libutf8 libs (#6060)
fix(darwin): make sure to bundle libutf8_validity

Plus some refactoring, use makefile

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-14 17:56:35 +02:00
Ettore Di Giacinto
19c92c70c5 fix(backend-detection): default to CPU if there is less than 4GB of GPU available (#6057)
fix(gpu-detection): default to CPU if there is less than 4GB of GPU available

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-14 16:57:33 +02:00
Ettore Di Giacinto
b52bfaf1b3 fix: do not show invalid backends (#6058)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-14 13:01:56 +02:00
Ettore Di Giacinto
bf60ca5bf0 Update Makefile
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-08-14 11:53:43 +02:00
LocalAI [bot]
2b44467bd1 chore: ⬆️ Update ggml-org/llama.cpp to 29c8fbe4e05fd23c44950d0958299e25fbeabc5c (#6054)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-14 09:19:15 +02:00
LocalAI [bot]
8c1f4a131e chore: ⬆️ Update ggml-org/whisper.cpp to 16c2924cb2c4b5c9f79220aa7708eb5b346b029b (#6055)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-13 21:08:22 +00:00
Ettore Di Giacinto
10a3f0bd92 fix: chmod grpc processes only if needed (#6051)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-13 12:08:53 +02:00
LocalAI [bot]
72f4d541d0 chore: ⬆️ Update ggml-org/llama.cpp to f4586ee5986d6f965becb37876d6f3666478a961 (#6048)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-08-13 08:33:48 +02:00
LocalAI [bot]
9f812fdb84 chore: ⬆️ Update ggml-org/whisper.cpp to 5527454cdb3e15d7e2b8a6e2afcb58cb61651fd2 (#6047)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-12 21:12:07 +00:00
LocalAI [bot]
b70ee45fff docs: ⬆️ update docs version mudler/LocalAI (#6046)
⬆️ Update docs version mudler/LocalAI

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-12 22:05:50 +02:00
dependabot[bot]
9d9c853541 chore(deps): bump grpcio from 1.71.0 to 1.74.0 in /backend/python/transformers (#6013)
chore(deps): bump grpcio in /backend/python/transformers

Bumps [grpcio](https://github.com/grpc/grpc) from 1.71.0 to 1.74.0.
- [Release notes](https://github.com/grpc/grpc/releases)
- [Changelog](https://github.com/grpc/grpc/blob/master/doc/grpc_release_schedule.md)
- [Commits](https://github.com/grpc/grpc/compare/v1.71.0...v1.74.0)

---
updated-dependencies:
- dependency-name: grpcio
  dependency-version: 1.74.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 22:05:16 +02:00
Ettore Di Giacinto
18fcd8557c fix(llama.cpp): support gfx1200 (#6045)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-12 22:04:30 +02:00
dependabot[bot]
d8e27c38d7 chore(deps): bump oneccl-bind-pt from 2.3.100+xpu to 2.8.0+xpu in /backend/python/common/template (#6016)
chore(deps): bump oneccl-bind-pt in /backend/python/common/template

Bumps oneccl-bind-pt from 2.3.100+xpu to 2.8.0+xpu.

---
updated-dependencies:
- dependency-name: oneccl-bind-pt
  dependency-version: 2.8.0+xpu
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 18:57:20 +00:00
dependabot[bot]
3b0dc87932 chore(deps): bump torch from 2.3.1+cxx11.abi to 2.8.0 in /backend/python/common/template (#6025)
chore(deps): bump torch in /backend/python/common/template

Bumps [torch](https://github.com/pytorch/pytorch) from 2.3.1+cxx11.abi to 2.8.0.
- [Release notes](https://github.com/pytorch/pytorch/releases)
- [Changelog](https://github.com/pytorch/pytorch/blob/main/RELEASE.md)
- [Commits](https://github.com/pytorch/pytorch/commits/v2.8.0)

---
updated-dependencies:
- dependency-name: torch
  dependency-version: 2.8.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 17:58:33 +00:00
dependabot[bot]
2374485222 chore(deps): bump actions/download-artifact from 4 to 5 (#6015)
Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 4 to 5.
- [Release notes](https://github.com/actions/download-artifact/releases)
- [Commits](https://github.com/actions/download-artifact/compare/v4...v5)

---
updated-dependencies:
- dependency-name: actions/download-artifact
  dependency-version: '5'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 18:54:57 +02:00
dependabot[bot]
0ca1765c17 chore(deps): bump actions/checkout from 4 to 5 (#6014)
Bumps [actions/checkout](https://github.com/actions/checkout) from 4 to 5.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v4...v5)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-version: '5'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 18:54:39 +02:00
dependabot[bot]
90b5ed9a1e chore(deps): bump intel-extension-for-pytorch from 2.3.110+xpu to 2.8.10+xpu in /backend/python/common/template (#6034)
chore(deps): bump intel-extension-for-pytorch

Bumps intel-extension-for-pytorch from 2.3.110+xpu to 2.8.10+xpu.

---
updated-dependencies:
- dependency-name: intel-extension-for-pytorch
  dependency-version: 2.8.10+xpu
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 18:44:33 +02:00
dependabot[bot]
d438b769da chore(deps): bump intel-extension-for-pytorch from 2.3.110+xpu to 2.8.10+xpu in /backend/python/bark (#6043)
chore(deps): bump intel-extension-for-pytorch in /backend/python/bark

Bumps intel-extension-for-pytorch from 2.3.110+xpu to 2.8.10+xpu.

---
updated-dependencies:
- dependency-name: intel-extension-for-pytorch
  dependency-version: 2.8.10+xpu
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 18:44:05 +02:00
dependabot[bot]
2e4bd1e33d chore(deps): bump oneccl-bind-pt from 2.3.100+xpu to 2.8.0+xpu in /backend/python/rerankers (#6021)
chore(deps): bump oneccl-bind-pt in /backend/python/rerankers

Bumps oneccl-bind-pt from 2.3.100+xpu to 2.8.0+xpu.

---
updated-dependencies:
- dependency-name: oneccl-bind-pt
  dependency-version: 2.8.0+xpu
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 16:04:54 +00:00
dependabot[bot]
ff73800970 chore(deps): bump grpcio from 1.71.0 to 1.74.0 in /backend/python/exllama2 (#6019)
chore(deps): bump grpcio in /backend/python/exllama2

Bumps [grpcio](https://github.com/grpc/grpc) from 1.71.0 to 1.74.0.
- [Release notes](https://github.com/grpc/grpc/releases)
- [Changelog](https://github.com/grpc/grpc/blob/master/doc/grpc_release_schedule.md)
- [Commits](https://github.com/grpc/grpc/compare/v1.71.0...v1.74.0)

---
updated-dependencies:
- dependency-name: grpcio
  dependency-version: 1.74.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 16:42:46 +02:00
Richard Palethorpe
94cb20ae7f chore(build): Convert stablediffusion-ggml backend to Purego (#5989)
* Try converting SD to purego

* chore(build): Use Purego with stablediffusion backend

Signed-off-by: Richard Palethorpe <io@richiejp.com>

---------

Signed-off-by: Richard Palethorpe <io@richiejp.com>
2025-08-12 16:42:15 +02:00
dependabot[bot]
47c20f9adb chore(deps): bump grpcio from 1.71.0 to 1.74.0 in /backend/python/rerankers (#6022)
chore(deps): bump grpcio in /backend/python/rerankers

Bumps [grpcio](https://github.com/grpc/grpc) from 1.71.0 to 1.74.0.
- [Release notes](https://github.com/grpc/grpc/releases)
- [Changelog](https://github.com/grpc/grpc/blob/master/doc/grpc_release_schedule.md)
- [Commits](https://github.com/grpc/grpc/compare/v1.71.0...v1.74.0)

---
updated-dependencies:
- dependency-name: grpcio
  dependency-version: 1.74.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 15:24:48 +02:00
dependabot[bot]
a7fe153630 chore(deps): bump grpcio from 1.71.0 to 1.74.0 in /backend/python/bark (#6033)
Bumps [grpcio](https://github.com/grpc/grpc) from 1.71.0 to 1.74.0.
- [Release notes](https://github.com/grpc/grpc/releases)
- [Changelog](https://github.com/grpc/grpc/blob/master/doc/grpc_release_schedule.md)
- [Commits](https://github.com/grpc/grpc/compare/v1.71.0...v1.74.0)

---
updated-dependencies:
- dependency-name: grpcio
  dependency-version: 1.74.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 15:16:00 +02:00
dependabot[bot]
27519d2233 chore(deps): bump grpcio from 1.71.0 to 1.74.0 in /backend/python/common/template (#6035)
chore(deps): bump grpcio in /backend/python/common/template

Bumps [grpcio](https://github.com/grpc/grpc) from 1.71.0 to 1.74.0.
- [Release notes](https://github.com/grpc/grpc/releases)
- [Changelog](https://github.com/grpc/grpc/blob/master/doc/grpc_release_schedule.md)
- [Commits](https://github.com/grpc/grpc/compare/v1.71.0...v1.74.0)

---
updated-dependencies:
- dependency-name: grpcio
  dependency-version: 1.74.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 15:15:28 +02:00
dependabot[bot]
8cab0f880b chore(deps): bump sentence-transformers from 5.0.0 to 5.1.0 in /backend/python/transformers (#6028)
chore(deps): bump sentence-transformers in /backend/python/transformers

Bumps [sentence-transformers](https://github.com/UKPLab/sentence-transformers) from 5.0.0 to 5.1.0.
- [Release notes](https://github.com/UKPLab/sentence-transformers/releases)
- [Commits](https://github.com/UKPLab/sentence-transformers/compare/v5.0.0...v5.1.0)

---
updated-dependencies:
- dependency-name: sentence-transformers
  dependency-version: 5.1.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 15:15:07 +02:00
dependabot[bot]
8c48b250c4 chore(deps): bump grpcio from 1.71.0 to 1.74.0 in /backend/python/diffusers (#6037)
chore(deps): bump grpcio in /backend/python/diffusers

Bumps [grpcio](https://github.com/grpc/grpc) from 1.71.0 to 1.74.0.
- [Release notes](https://github.com/grpc/grpc/releases)
- [Changelog](https://github.com/grpc/grpc/blob/master/doc/grpc_release_schedule.md)
- [Commits](https://github.com/grpc/grpc/compare/v1.71.0...v1.74.0)

---
updated-dependencies:
- dependency-name: grpcio
  dependency-version: 1.74.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 15:14:35 +02:00
dependabot[bot]
ba802c2ee4 chore(deps): bump grpcio from 1.71.0 to 1.74.0 in /backend/python/vllm (#6036)
Bumps [grpcio](https://github.com/grpc/grpc) from 1.71.0 to 1.74.0.
- [Release notes](https://github.com/grpc/grpc/releases)
- [Changelog](https://github.com/grpc/grpc/blob/master/doc/grpc_release_schedule.md)
- [Commits](https://github.com/grpc/grpc/compare/v1.71.0...v1.74.0)

---
updated-dependencies:
- dependency-name: grpcio
  dependency-version: 1.74.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 15:14:15 +02:00
Ettore Di Giacinto
429bb7a88c chore(model gallery): add baichuan-inc_baichuan-m2-32 (#6042)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-12 09:43:13 +02:00
LocalAI [bot]
b2e8b6d1aa chore: ⬆️ Update ggml-org/llama.cpp to be48528b068111304e4a0bb82c028558b5705f05 (#6012)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-11 21:06:10 +00:00
LocalAI [bot]
fba5b557a1 chore: ⬆️ Update ggml-org/whisper.cpp to b02242d0adb5c6c4896d59ac86d9ec9fe0d0fe33 (#6009)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-11 12:54:41 +02:00
LocalAI [bot]
6db19c5cb9 chore: ⬆️ Update ggml-org/llama.cpp to 79c1160b073b8148a404f3dd2584be1606dccc66 (#6006)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-11 12:54:21 +02:00
Ettore Di Giacinto
5428678209 chore(ci): more cleanup
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-11 10:10:38 +02:00
LocalAI [bot]
06129139eb chore(model-gallery): ⬆️ update checksum (#6010)
⬆️ Checksum updates in gallery/index.yaml

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-11 07:54:01 +02:00
Ettore Di Giacinto
05757e2738 feat(backends install): allow to specify name and alias during manual installation (#5971)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-10 10:05:53 +02:00
Ettore Di Giacinto
240b790f29 chore(model gallery): add impish_nemo_12b (#6007)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-10 10:05:20 +02:00
Ettore Di Giacinto
5f221f5946 fix(l4t-diffusers): add sentencepiece (#6005)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-09 09:08:35 +02:00
LocalAI [bot]
def7cdc0bf chore: ⬆️ Update ggml-org/llama.cpp to cd6983d56d2cce94ecb86bb114ae8379a609073c (#6003)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-09 08:41:58 +02:00
Ettore Di Giacinto
ea9bf3dba2 Update backend.yml
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-08-08 23:00:47 +02:00
Ettore Di Giacinto
b8eca530b6 feat(diffusers): add builds for nvidia-l4t (#6004)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-08 22:48:38 +02:00
Ettore Di Giacinto
47034ddacd chore(deps): bump edgevpn (#6001)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-08 16:23:18 +02:00
Ettore Di Giacinto
9a41331855 chore(model gallery): add outetts (#6000)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-08 12:55:58 +02:00
Ettore Di Giacinto
facc0181df chore(model gallery): add chatterbox (#5999)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-08 12:53:13 +02:00
Ettore Di Giacinto
4733adb983 chore: add Dia to the model gallery, fix backend (#5998)
* fix: correctly call OuteTTS and DiaTTS

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* chore(model gallery): add dia

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-08 12:40:16 +02:00
Ettore Di Giacinto
326fda3223 chore(model gallery): add tarek07_nomad-llama-70b (#5997)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-08 12:06:20 +02:00
Ettore Di Giacinto
abf61e5b42 chore(model gallery): add openai-gpt-oss-20b-abliterated-uncensored-neo-imatrix (#5996)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-08 11:14:46 +02:00
Ettore Di Giacinto
2ae45e7635 chore(model gallery): add huihui-ai_huihui-gpt-oss-20b-bf16-abliterated (#5995)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-08 11:01:52 +02:00
lnnt
7d41551e10 docs: update links in advanced-usage and models documentation (#5994)
* docs: update links in advanced-usage and models documentation

* docs: update links in advanced-usage and models documentation
2025-08-08 10:23:42 +02:00
LocalAI [bot]
6fbd720515 chore: ⬆️ Update ggml-org/whisper.cpp to 4245c77b654cd384ad9f53a4a302be716b3e5861 (#5993)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-08 08:07:17 +02:00
LocalAI [bot]
4e40a8d1ed chore: ⬆️ Update ggml-org/llama.cpp to a0552c8beef74e843bb085c8ef0c63f9ed7a2b27 (#5992)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-07 21:13:14 +00:00
Ettore Di Giacinto
003b9292fe feat(transformers): add support to Dia (#5991)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-07 21:51:52 +02:00
Ettore Di Giacinto
09457b9221 chore(model gallery): add qwen_qwen3-4b-thinking-2507 (#5988)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-07 09:28:37 +02:00
Ettore Di Giacinto
41aa7e107f chore(model gallery): add qwen_qwen3-4b-instruct-2507 (#5987)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-07 09:20:15 +02:00
Ettore Di Giacinto
bda875f962 chore(ci): run bark CI job to self-hosted
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-08-07 08:40:15 +02:00
LocalAI [bot]
224063f0f7 feat(swagger): update swagger (#5983)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-07 00:32:11 +02:00
Ettore Di Giacinto
89978c8b57 fix(harmony): improve template by adding reasoning effort and system_prompt (#5985)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-07 00:31:37 +02:00
Ettore Di Giacinto
987b5dcac1 chore(model gallery): add openai_gpt-oss-20b-neo (#5986)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-07 00:31:17 +02:00
Ettore Di Giacinto
ec1276e5a9 fix(llama.cpp): do not default to linear rope (#5982)
This seems to somehow sneaked in during the initial pass to gRPC server,
instead of setting linear rope when required, we did default to it if
not specified.

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-06 23:20:28 +02:00
LocalAI [bot]
61ba98d43d chore: ⬆️ Update ggml-org/llama.cpp to e725a1a982ca870404a9c4935df52466327bbd02 (#5984)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-06 21:17:20 +00:00
Ettore Di Giacinto
b9a25b16e6 feat: add reasoning effort and metadata to template (#5981)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-06 21:56:05 +02:00
Ettore Di Giacinto
6a8149e1fd fix: build kokoro-hipblas on self-hosted
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-06 15:50:54 +02:00
Ettore Di Giacinto
9c2840ac38 feat(kokoro): complete kokoro integration (#5978)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-06 15:23:29 +02:00
Ettore Di Giacinto
20a70e1244 feat(backends): add KittenTTS (#5977)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-06 12:38:45 +02:00
Ettore Di Giacinto
3295a298f4 feat(webui): allow to specify image size (#5976)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-06 12:38:02 +02:00
Ettore Di Giacinto
da6f37f000 Update qwen-image.yaml
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-08-06 10:40:46 +02:00
Ettore Di Giacinto
c092633cd7 feat(models): add support to qwen-image (#5975)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-06 10:36:53 +02:00
Ettore Di Giacinto
7e2a522229 Update harmony.yaml
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-08-05 23:58:48 +02:00
LocalAI [bot]
03e8592450 chore: ⬆️ Update ggml-org/llama.cpp to fd1234cb468935ea087d6929b2487926c3afff4b (#5972)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-05 23:14:43 +02:00
Ettore Di Giacinto
f207bd1427 Update backend.yml
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-08-05 23:14:11 +02:00
Ettore Di Giacinto
a5c0fe31c3 chore(models): add gpt-oss-120b (#5974)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-05 23:13:24 +02:00
Ettore Di Giacinto
c68907ac65 chore(models): add gpt-oss-20b (#5973)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-05 23:08:34 +02:00
Ettore Di Giacinto
9087ddc4de chore(deps): bump torch and sentence-transformers (#5969)
* chore(deps): bump torch and sentence-transformers

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* chore(ci): add backend build tests

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* chore: move jobs to self-hosted

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-05 19:45:20 +02:00
Ettore Di Giacinto
33bebd5114 chore(deps): bump torch and diffusers (#5970)
* chore(ci): add backend build tests

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* chore(deps): bump torch and diffusers

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* chore(ci): run diffusers/hipblas on self-hosted

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* chore(ci): do not publish darwin if building from PRs

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-05 14:47:01 +02:00
LocalAI [bot]
2913676157 chore: ⬆️ Update ggml-org/llama.cpp to 41613437ffee0dbccad684fc744788bc504ec213 (#5968)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-04 23:16:30 +02:00
LocalAI [bot]
e83652489c docs: ⬆️ update docs version mudler/LocalAI (#5967)
⬆️ Update docs version mudler/LocalAI

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-04 21:00:23 +00:00
Richard Palethorpe
d6274eaf4a chore(build): Rename sycl to intel (#5964)
Signed-off-by: Richard Palethorpe <io@richiejp.com>
2025-08-04 11:01:28 +02:00
LocalAI [bot]
4d90971424 chore: ⬆️ Update ggml-org/llama.cpp to d31192b4ee1441bbbecd3cbf9e02633368bdc4f5 (#5965)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-03 21:03:20 +00:00
Ettore Di Giacinto
90f5639639 feat(backends): allow backends to not have a metadata file (#5963)
In this case we generate one on the fly and we infer the metadata we
can.

Obviously this have the side effect of not being able to register
potential aliases.

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-03 16:47:02 +02:00
Ettore Di Giacinto
a35a701052 feat(backends): install from local path (#5962)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-03 14:24:50 +02:00
Ettore Di Giacinto
3d8ec72dbf chore(stable-diffusion): bump, set GGML_MAX_NAME (#5961)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-03 10:47:02 +02:00
LocalAI [bot]
2a9d675d62 chore: ⬆️ Update ggml-org/llama.cpp to 5c0eb5ef544aeefd81c303e03208f768e158d93c (#5959)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-08-02 23:35:24 +02:00
LocalAI [bot]
c782e8abf1 chore: ⬆️ Update ggml-org/whisper.cpp to 0becabc8d68d9ffa6ddfba5240e38cd7a2642046 (#5958)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-02 21:04:13 +00:00
LocalAI [bot]
a1e1942d83 docs: ⬆️ update docs version mudler/LocalAI (#5956)
⬆️ Update docs version mudler/LocalAI

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-01 22:14:23 +02:00
Dedy F. Setyawan
787302b204 fix(docs): Improve responsiveness of tables (#5954)
Signed-off-by: Dedy F. Setyawan <dedyfajars@gmail.com>
2025-08-01 22:13:53 +02:00
LocalAI [bot]
0b085089b9 chore: ⬆️ Update ggml-org/llama.cpp to daf2dd788066b8b239cb7f68210e090c2124c199 (#5951)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-08-01 08:25:36 +02:00
LocalAI [bot]
624f3b1fc8 feat(swagger): update swagger (#5950)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-31 21:04:23 +00:00
Richard Palethorpe
c07bc55fee fix(intel): Set GPU vendor on Intel images and cleanup (#5945)
Signed-off-by: Richard Palethorpe <io@richiejp.com>
2025-07-31 19:44:46 +02:00
Ettore Di Giacinto
173e0774c0 chore(model gallery): add flux.1-krea-dev-ggml (#5949)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-31 18:32:06 +02:00
Ettore Di Giacinto
8ece26ab7c chore(model gallery): add flux.1-dev-ggml-abliterated-v2-q8_0 (#5948)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-31 17:23:48 +02:00
Ettore Di Giacinto
d704cc7970 chore(model gallery): add flux.1-dev-ggml-q8_0 (#5947)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-31 17:13:19 +02:00
Ettore Di Giacinto
ab17baaae1 chore(capability): improve messages (#5944)
* chore(capability): improve messages

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* chore: isolate to constants, do not detect from the first gpu

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-31 16:25:19 +02:00
Ettore Di Giacinto
ca358fcdca feat(stablediffusion-ggml): allow to load loras (#5943)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-31 16:25:05 +02:00
Ettore Di Giacinto
9aadfd485f chore: update swagger (#5946)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-31 16:22:27 +02:00
LocalAI [bot]
da3b0850de chore: ⬆️ Update ggml-org/whisper.cpp to f7502dca872866a310fe69d30b163fa87d256319 (#5941)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-31 09:26:30 +02:00
LocalAI [bot]
8b1e8b4cda chore: ⬆️ Update ggml-org/llama.cpp to e9192bec564780bd4313ad6524d20a0ab92797db (#5940)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-31 09:26:02 +02:00
Ettore Di Giacinto
3d22bfc27c feat(stablediffusion-ggml): add support to ref images (flux Kontext) (#5935)
* feat(stablediffusion-ggml): add support to ref images

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Add it to the model gallery

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-30 22:42:34 +02:00
Ettore Di Giacinto
4438b4361e chore(model gallery): add qwen_qwen3-30b-a3b-thinking-2507 (#5939)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-30 21:18:56 +02:00
Ettore Di Giacinto
04bad9a2da chore(model gallery): add arcee-ai_afm-4.5b (#5938)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-30 15:37:07 +02:00
Ettore Di Giacinto
8235e53602 chore(model gallery): add qwen_qwen3-30b-a3b-instruct-2507 (#5936)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-30 15:29:34 +02:00
LocalAI [bot]
eb5c3670f1 chore: ⬆️ Update ggml-org/llama.cpp to aa79524c51fb014f8df17069d31d7c44b9ea6cb8 (#5934)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-29 21:05:00 +00:00
LocalAI [bot]
89e61fca90 chore: ⬆️ Update ggml-org/whisper.cpp to d0a9d8c7f8f7b91c51d77bbaa394b915f79cde6b (#5932)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-29 08:02:01 +02:00
LocalAI [bot]
9d6efe8842 chore: ⬆️ Update leejet/stable-diffusion.cpp to f6b9aa1a4373e322ff12c15b8a0749e6dd6f0253 (#5930)
⬆️ Update leejet/stable-diffusion.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-29 08:01:30 +02:00
LocalAI [bot]
60726d16f2 chore: ⬆️ Update ggml-org/llama.cpp to 8ad7b3e65b5834e5574c2f5640056c9047b5d93b (#5931)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-29 08:01:03 +02:00
LocalAI [bot]
9d7ec09ec0 docs: ⬆️ update docs version mudler/LocalAI (#5929)
⬆️ Update docs version mudler/LocalAI

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-28 21:03:44 +00:00
Ettore Di Giacinto
36179ffbed fix(backend gallery): intel images for python-based backends, re-add exllama2 (#5928)
chore(backend gallery): fix intel images for python-based backends

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-28 15:15:19 +02:00
LocalAI [bot]
d25145e641 chore: ⬆️ Update ggml-org/llama.cpp to bf78f5439ee8e82e367674043303ebf8e92b4805 (#5927)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-27 21:08:32 +00:00
Ettore Di Giacinto
949e5b9be8 feat(rfdetr): add object detection API (#5923)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-27 22:02:51 +02:00
Ettore Di Giacinto
73ecb7f90b chore: drop assistants endpoint (#5926)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-27 21:06:09 +02:00
Ettore Di Giacinto
053bed6e5f feat: normalize search (#5925)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-27 11:51:28 +02:00
LocalAI [bot]
932360bf7e chore: ⬆️ Update ggml-org/llama.cpp to 11dd5a44eb180e1d69fac24d3852b5222d66fb7f (#5921)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-27 09:50:56 +02:00
LocalAI [bot]
6d0b52843f chore: ⬆️ Update ggml-org/whisper.cpp to e7bf0294ec9099b5fc21f5ba969805dfb2108cea (#5922)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-27 09:42:28 +02:00
LocalAI [bot]
078c22f485 docs: ⬆️ update docs version mudler/LocalAI (#5920)
⬆️ Update docs version mudler/LocalAI

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-26 20:58:54 +00:00
Ettore Di Giacinto
6ef3852de5 chore(docs): fixup tag
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-26 21:25:07 +02:00
Ettore Di Giacinto
a8057b952c fix(cuda): be consistent with image tag naming (#5916)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-26 08:30:59 +02:00
Ettore Di Giacinto
fd5c1d916f chore(docs): add documentation on backend detection override (#5915)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-26 08:18:31 +02:00
LocalAI [bot]
5ce982b9c9 chore: ⬆️ Update ggml-org/llama.cpp to c7f3169cd523140a288095f2d79befb20a0b73f4 (#5913)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-25 23:08:20 +02:00
Ettore Di Giacinto
47ccfccf7a fix(ci): add nvidia-l4t capability to l4t images (#5914)
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-07-25 22:45:09 +02:00
LocalAI [bot]
a760f7ff39 docs: ⬆️ update docs version mudler/LocalAI (#5912)
⬆️ Update docs version mudler/LocalAI

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-25 22:15:16 +02:00
Ettore Di Giacinto
facf7625f3 fix(vulkan): use correct image suffix (#5911)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-25 19:20:20 +02:00
Ettore Di Giacinto
b3600b3c50 feat(backend gallery): add mirrors (#5910)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-25 19:20:08 +02:00
Ettore Di Giacinto
f0b47cfe6a fix(backends gallery): trim string when reading cap from file (#5909)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-25 18:10:02 +02:00
Ettore Di Giacinto
ee625fc34e fix(backends gallery): pass-by backend galleries to the model service (#5906)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-25 16:38:09 +02:00
Ettore Di Giacinto
693aa0b5de Update README.md
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-07-25 11:51:23 +02:00
Ettore Di Giacinto
3973e6e5da fix(install.sh): update to use the new binary naming (#5903)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-25 10:43:22 +02:00
LocalAI [bot]
fb6ec68090 chore: ⬆️ Update ggml-org/whisper.cpp to 7de8dd783f7b2eab56bff6bbc5d3369e34f0e77f (#5902)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-25 08:40:24 +02:00
LocalAI [bot]
0301fc7c46 chore: ⬆️ Update leejet/stable-diffusion.cpp to eed97a5e1d054f9c1e7ac01982ae480411d4157e (#5901)
⬆️ Update leejet/stable-diffusion.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-25 08:40:06 +02:00
LocalAI [bot]
813cb4296d chore: ⬆️ Update ggml-org/llama.cpp to 3f4fc97f1d745f1d5d3c853949503136d419e6de (#5900)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-25 08:39:44 +02:00
Ettore Di Giacinto
deda3a4972 Update build documentation
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-24 22:53:08 +02:00
Ettore Di Giacinto
a28f27604a Update backends.md
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-07-24 16:18:25 +02:00
Richard Palethorpe
8fe9fa98f2 fix(stablediffusion-cpp): Switch back to upstream and update (#5880)
* sync(stablediffusion-cpp): Switch back to upstream and update

Signed-off-by: Richard Palethorpe <io@richiejp.com>

* fix(stablediffusion-ggml): NULL terminate options array to prevent segfault

Signed-off-by: Richard Palethorpe <io@richiejp.com>

* fix(build): Add BUILD_TYPE and BASE_IMAGE to all backends

Signed-off-by: Richard Palethorpe <io@richiejp.com>

---------

Signed-off-by: Richard Palethorpe <io@richiejp.com>
2025-07-24 16:03:18 +02:00
Nathaniel Hyson
4db1b80278 Update quickstart.md (#5898)
Fixed spelling mistake

Signed-off-by: Nathaniel Hyson <Shinrai@users.noreply.github.com>
2025-07-24 15:04:02 +02:00
Dave
b3c2a3c257 fix: untangle pkg and core (#5896)
* migrate core/system to pkg/system - it has no dependencies FROM core, and IS USED in pkg

Signed-off-by: Dave Lee <dave@gray101.com>

* move pkg/templates up to core/templates -- nothing in pkg references it, but it does reference core.

Signed-off-by: Dave Lee <dave@gray101.com>

* remove extra check, len of nil is 0

Signed-off-by: Dave Lee <dave@gray101.com>

* move pkg/startup to core/startup -- it does have important and unfixable dependencies on core

Signed-off-by: Dave Lee <dave@gray101.com>

---------

Signed-off-by: Dave Lee <dave@gray101.com>
2025-07-24 15:03:41 +02:00
LocalAI [bot]
61c2304638 chore: ⬆️ Update ggml-org/llama.cpp to a86f52b2859dae4db5a7a0bbc0f1ad9de6b43ec6 (#5894)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-24 15:02:37 +02:00
Ettore Di Giacinto
92c5ab97e2 chore(Makefile): drop unused targets (#5893)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-24 14:49:50 +02:00
LocalAI [bot]
76e471441c chore: ⬆️ Update richiejp/stable-diffusion.cpp to 10c6501bd05a697e014f1bee3a84e5664290c489 (#5732)
⬆️ Update richiejp/stable-diffusion.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-23 21:09:02 +00:00
Dave
9cecf5e7ac fix: rename Dockerfile.go --> Dockerfile.golang to avoid IDE errors (#5892)
extract up and out Dockerfile.go --> Dockerfile.golang rename. Prevents syntax highlighting and IDE errors

Signed-off-by: Dave Lee <dave@gray101.com>
2025-07-23 21:33:26 +02:00
Ettore Di Giacinto
b7b3164736 chore: try to speedup build
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-23 21:21:23 +02:00
Ettore Di Giacinto
5f7ece3e94 fix(p2p): adapt to backend changes, general improvements (#5889)
The binary is now named "llama-cpp-rpc-server" for p2p workers.

We also decrease the default token rotation interval, in this way
peer discovery is much more responsive.

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-23 12:40:32 +02:00
Ettore Di Giacinto
c717b8d800 chore(model gallery): add qwen3-coder-480b-a35b-instruct (#5888)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-23 11:59:58 +02:00
Ettore Di Giacinto
f1d35c4149 chore(model gallery): add qwen3-235b-a22b-instruct-2507 (#5887)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-23 11:54:58 +02:00
Ettore Di Giacinto
ee7e77b6c1 chore(model gallery): add menlo_lucy (#5886)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-23 11:51:51 +02:00
Ettore Di Giacinto
324fecbb75 chore(model gallery): add entfane_math-genius-7b (#5885)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-23 11:45:23 +02:00
Ettore Di Giacinto
a79bfcf0a7 chore(model gallery): add dream-org_dream-v0-instruct-7b (#5884)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-23 11:40:53 +02:00
Ettore Di Giacinto
82495e7fb6 chore(model gallery): add omega-qwen3-atom-8b (#5883)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-23 11:33:43 +02:00
Ettore Di Giacinto
6030b12283 chore(backend gallery): add name to 'diffusers' meta
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-23 09:21:04 +02:00
LocalAI [bot]
b5be867e28 chore: ⬆️ Update ggml-org/llama.cpp to acd6cb1c41676f6bbb25c2a76fa5abeb1719301e (#5882)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-22 21:12:06 +00:00
Ettore Di Giacinto
9b806250d4 chore: drop vllm for cuda 11 (#5881)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-22 18:47:31 +02:00
Ettore Di Giacinto
5f066e702f fix(darwin): add dashes on image suffix
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-22 17:08:19 +02:00
dependabot[bot]
47bb3a3db2 chore(deps): bump securego/gosec from 2.22.5 to 2.22.7 (#5878)
Bumps [securego/gosec](https://github.com/securego/gosec) from 2.22.5 to 2.22.7.
- [Release notes](https://github.com/securego/gosec/releases)
- [Changelog](https://github.com/securego/gosec/blob/master/.goreleaser.yml)
- [Commits](https://github.com/securego/gosec/compare/v2.22.5...v2.22.7)

---
updated-dependencies:
- dependency-name: securego/gosec
  dependency-version: 2.22.7
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-22 16:42:11 +02:00
Richard Palethorpe
51230a801e fix(build): Add and update ONEAPI_VERSION (#5874)
Signed-off-by: Richard Palethorpe <io@richiejp.com>
2025-07-22 16:41:49 +02:00
Richard Palethorpe
754bedc3ea fix(realtime): Reset speech started flag on commit (#5879)
Signed-off-by: Richard Palethorpe <io@richiejp.com>
2025-07-22 16:41:12 +02:00
Ettore Di Giacinto
98e5291afc feat: refactor build process, drop embedded backends (#5875)
* feat: split remaining backends and drop embedded backends

- Drop silero-vad, huggingface, and stores backend from embedded
  binaries
- Refactor Makefile and Dockerfile to avoid building grpc backends
- Drop golang code that was used to embed backends
- Simplify building by using goreleaser

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* chore(gallery): be specific with llama-cpp backend templates

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* chore(docs): update

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* chore(ci): minor fixes

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* chore: drop all ffmpeg references

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* fix: run protogen-go

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Always enable p2p mode

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Update gorelease file

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* fix(stores): do not always load

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Fix linting issues

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Simplify

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Mac OS fixup

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-22 16:31:04 +02:00
LocalAI [bot]
e29b2c3aff chore: ⬆️ Update ggml-org/llama.cpp to 6c9ee3b17e19dcc82ab93d52ae46fdd0226d4777 (#5877)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-22 08:25:43 +02:00
LocalAI [bot]
8dc574f3c4 chore: ⬆️ Update ggml-org/whisper.cpp to 1f5cf0b2888402d57bb17b2029b2caa97e5f3baf (#5876)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-22 08:25:13 +02:00
Ettore Di Giacinto
05bf2493a5 fix: do not pass by environ to ffmpeg (#5871)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-21 14:35:33 +02:00
Max Goltzsche
eae4ca08da feat(openai): support input_audio chat api field (#5870)
Improving the chat completion endpoint OpenAI API compatibility by supporting messages of type `input_audio`, e.g.:
```
{
  ...
  "messages": [
    {
      "role": "user",
      "content": [{
        "type": "input_audio",
        "input_audio": {
          "data": "<base64-encoded audio data>",
          "format": "wav"
        }
      }]
    }
  ]
}
```

Closes #5869

Signed-off-by: Max Goltzsche <max.goltzsche@gmail.com>
2025-07-21 09:15:55 +02:00
LocalAI [bot]
fa284f7445 chore: ⬆️ Update ggml-org/llama.cpp to 2be60cbc2707359241c2784f9d2e30d8fc7cdabb (#5867)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-21 09:14:09 +02:00
Ettore Di Giacinto
8f69b80520 Update index.yaml
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-07-20 22:54:12 +02:00
Ettore Di Giacinto
b1fc5acd4a feat: split whisper from main binary (#5863)
* feat: split whisper from main binary

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Cleanup makefile

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Add backend builds (missing only darwin)

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Test CI

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Add whisper backend to test runs

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Fixups

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Make sure we have runtime libs

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Less grpc on the main Dockerfile

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* fixups

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Fix hipblas build

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Add whisper to index

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Re-enable CI

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Adapt auto-bumper

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-20 22:52:45 +02:00
LocalAI [bot]
fab41c29dd chore(model-gallery): ⬆️ update checksum (#5865)
⬆️ Checksum updates in gallery/index.yaml

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-20 20:37:43 +02:00
Ettore Di Giacinto
fb0ec96396 ci: do not upgrade pip
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-20 12:30:12 +02:00
LocalAI [bot]
7659461036 chore: ⬆️ Update ggml-org/llama.cpp to a979ca22db0d737af1e548a73291193655c6be99 (#5862)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-20 08:43:36 +02:00
Ettore Di Giacinto
580687da46 feat: remove stablediffusion-ggml from main binary (#5861)
* feat: split stablediffusion-ggml from main binary

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Test CI

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Adapt ci tests

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Fixups

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Fixups

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Try to support nvidial4t

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Latest fixups

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-19 21:58:53 +02:00
LocalAI [bot]
1929eb2894 chore: ⬆️ Update ggml-org/llama.cpp to bf9087f59aab940cf312b85a67067ce33d9e365a (#5860)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-19 08:52:07 +02:00
Ettore Di Giacinto
b29544d747 feat: split piper from main binary (#5858)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-19 08:31:33 +02:00
Ettore Di Giacinto
7c30e82647 fix: autoload backends when installing models from YAML files (#5859)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-18 21:31:02 +02:00
Dedy F. Setyawan
a1d061c835 fix(docs): Resolve logo overlap on tablet view (#5853)
* fix(docs): Resolve logo overlap on tablet view

Signed-off-by: Dedy F. Setyawan <dedyfajars@gmail.com>

* fix(docs): Adjust header logo size

Signed-off-by: Dedy F. Setyawan <dedyfajars@gmail.com>

* refactor(docs): Rework header logo sizing implementation

Signed-off-by: Dedy F. Setyawan <dedyfajars@gmail.com>

---------

Signed-off-by: Dedy F. Setyawan <dedyfajars@gmail.com>
2025-07-18 15:55:44 +02:00
Sijia Lu
851c67019c fix: dockerfile typo (#5823)
fix dockerfile typo

Signed-off-by: LeonSijiaLu <leonsijialu1@gmail.com>
2025-07-18 14:59:33 +02:00
Ettore Di Giacinto
53ed5ef189 Makefile fixup
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-18 14:57:14 +02:00
Ettore Di Giacinto
294f7022f3 feat: do not bundle llama-cpp anymore (#5790)
* Build llama.cpp separately

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* WIP

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* WIP

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* WIP

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Start to try to attach some tests

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Add git and small fixups

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* fix: correctly autoload external backends

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Try to run AIO tests

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Slightly update the Makefile helps

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Adapt auto-bumper

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Try to run linux test

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Add llama-cpp into build pipelines

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Add default capability (for cpu)

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Drop llama-cpp specific logic from the backend loader

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* drop grpc install in ci for tests

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* fixups

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Pass by backends path for tests

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Build protogen at start

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* fix(tests): set backends path consistently

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Correctly configure the backends path

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Try to build for darwin

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* WIP

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Compile for metal on arm64/darwin

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Try to run build off from cross-arch

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Add to the backend index nvidia-l4t and cpu's llama-cpp backends

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Build also darwin-x86 for llama-cpp

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Disable arm64 builds temporary

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Test backend build on PR

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Fixup build backend reusable workflow

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* pass by skip drivers

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Use crane

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Skip drivers

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Fixups

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* x86 darwin

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Add packaging step for llama.cpp

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* fixups

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Fix leftover from bark-cpp extraction

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Try to fix hipblas build

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-18 13:24:12 +02:00
Richard Palethorpe
932f6b01a6 feat(realtime): Add speech started and stopped events (#5856)
Signed-off-by: Richard Palethorpe <io@richiejp.com>
2025-07-18 09:22:23 +02:00
LocalAI [bot]
e96452c5d4 chore: ⬆️ Update ggml-org/llama.cpp to d6fb3f6b49b27ef1c0f4cf5128e041f7e7dc03af (#5857)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-17 22:56:40 +00:00
Ettore Di Giacinto
5fc8d5bb78 fix: explorer page should not have login (#5855)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-17 10:54:03 +02:00
LocalAI [bot]
121937ed6f chore: ⬆️ Update ggml-org/llama.cpp to 496957e1cbcb522abc63aa18521036e40efce985 (#5854)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-16 22:52:10 +00:00
LocalAI [bot]
2e38f2a054 chore: ⬆️ Update ggml-org/llama.cpp to 4a4f426944e79b79e389f9ed7b34831cb9b637ad (#5852)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-15 22:56:04 +00:00
LocalAI [bot]
2a6187bc01 chore: ⬆️ Update ggml-org/llama.cpp to bdca38376f7e8dd928defe01ce6a16218a64b040 (#5850)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-15 08:16:01 +02:00
LocalAI [bot]
584c48df5a chore: ⬆️ Update ggml-org/whisper.cpp to 032697b9a850dc2615555e2a93a683cc3dd58559 (#5849)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-14 22:47:30 +00:00
Ettore Di Giacinto
8dd67748a1 chore(model gallery): add sophosympatheia_strawberrylemonade-70b-v1.1 (#5848)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-14 15:38:41 +02:00
Ettore Di Giacinto
3fd0bf3c88 chore(model gallery): add zhi-create-qwen3-32b-i1 (#5847)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-14 15:33:40 +02:00
LocalAI [bot]
4062a6c404 chore: ⬆️ Update ggml-org/llama.cpp to 982e347255723fe6d02e60ee30cfdd0559c884c5 (#5845)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-14 08:21:54 +02:00
Ettore Di Giacinto
354c0b763e feat(cli): add command to create custom OCI images from directories (#5844)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-14 08:21:29 +02:00
LocalAI [bot]
40f9065367 chore: ⬆️ Update ggml-org/whisper.cpp to a16da91365700f396da916d16a7f5a2ec99364b9 (#5846)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-13 22:46:03 +00:00
Ettore Di Giacinto
fc02bc0aba chore(model gallery): add google_medgemma-27b-it (#5843)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-13 18:20:21 +02:00
Ettore Di Giacinto
45badb75e8 chore(model gallery): add google_medgemma-4b-it (#5842)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-13 17:56:44 +02:00
LocalAI [bot]
d7e1922582 chore: ⬆️ Update ggml-org/whisper.cpp to 3775c503d5133d3d8b99d7d062e87a54064b0eb8 (#5841)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-13 08:49:15 +02:00
LocalAI [bot]
642a39afa0 chore: ⬆️ Update ggml-org/llama.cpp to c31e60647def83d671bac5ab5b35579bf25d9aa1 (#5840)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-12 22:44:45 +00:00
Ettore Di Giacinto
34d9deaf39 chore(model gallery): add impish_magic_24b-i1 (#5839)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-12 19:37:10 +02:00
Ettore Di Giacinto
ef37a73e1b chore(model gallery): add mistral-2x24b-moe-power-coder-magistral-devstral-reasoning-ultimate-neo-max-44b (#5838)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-12 19:27:46 +02:00
Ettore Di Giacinto
37de945ae8 chore(model gallery): add nvidia_llama-3_3-nemotron-super-49b-genrm-multilingual (#5837)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-12 19:21:40 +02:00
LocalAI [bot]
468f1f4539 chore: ⬆️ Update ggml-org/llama.cpp to f5e96b368f1acc7f53c390001b936517c4d18999 (#5835)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-11 22:46:25 +00:00
Ettore Di Giacinto
0640451368 chore(model gallery): add mistralai_devstral-small-2507 (#5834)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-11 11:51:11 +02:00
Ettore Di Giacinto
99058511cc chore(model gallery): add huihui-ai_huihui-gemma-3n-e4b-it-abliterated (#5833)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-11 11:42:01 +02:00
Ettore Di Giacinto
ec293b3b59 chore(model gallery): add microsoft_nextcoder-32b (#5832)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-11 11:36:11 +02:00
LocalAI [bot]
9b1b6df8e9 chore: ⬆️ Update ggml-org/llama.cpp to 0b8855775c6b873931d40b77a5e42558aacbde52 (#5830)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-10 22:48:03 +00:00
Ettore Di Giacinto
cd7fbafcd2 chore(model gallery): add thedrummer_tiger-gemma-12b-v3 (#5827)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-10 14:19:41 +02:00
Ettore Di Giacinto
e5125216cf chore(model gallery): add thedrummer_big-tiger-gemma-27b-v3 (#5826)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-10 14:17:34 +02:00
Ettore Di Giacinto
2105f82433 chore(model gallery): add delta-vector_plesio-70b (#5825)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-10 14:13:14 +02:00
Ettore Di Giacinto
49c0c7881a chore(model gallery): add huggingfacetb_smollm3-3b (#5820)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-09 18:27:52 +02:00
Ettore Di Giacinto
f8829376d8 chore(model gallery): add zerofata_l3.3-geneticlemonade-opus-70b (#5819)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-09 18:23:01 +02:00
Ettore Di Giacinto
0475f63675 chore(model gallery): add lyranovaheart_starfallen-snow-fantasy-24b-ms3.2-v0.0 (#5818)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-09 18:20:16 +02:00
Ettore Di Giacinto
ec206cc67c feat(cli): allow to install backends from OCI tar files (#5816)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-09 18:19:51 +02:00
LocalAI [bot]
34171fcf94 chore: ⬆️ Update ggml-org/llama.cpp to 6efcd65945a98cf6883cdd9de4c8ccd8c79d219a (#5817)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-08 22:46:10 +00:00
LocalAI [bot]
238c334aa7 chore: ⬆️ Update ggml-org/whisper.cpp to 869335f2d58d04010535be9ae23a69a9da12a169 (#5809)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-08 17:46:08 +02:00
Ettore Di Giacinto
d2df0a1769 chore(model gallery): add qwen3-8b-shiningvaliant3 (#5815)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-08 13:17:43 +02:00
Ettore Di Giacinto
d58647ac31 chore(model gallery): add ockerman0_anubislemonade-70b-v1.1 (#5814)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-08 13:14:46 +02:00
Ettore Di Giacinto
c1d3ce9a93 chore(model gallery): add cognitivecomputations_dolphin-mistral-24b-venice-edition (#5813)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-08 13:09:29 +02:00
Richard Palethorpe
c1dd4ff5d5 feat(whisper): Enable SYCL (#5802)
Signed-off-by: Richard Palethorpe <io@richiejp.com>
2025-07-08 12:54:20 +02:00
LocalAI [bot]
48118b9582 chore: ⬆️ Update ggml-org/llama.cpp to 12f55c302b35cfe900b84c5fe67c262026af9c44 (#5808)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-07 22:50:16 +00:00
Ettore Di Giacinto
ceda2e69db chore(model gallery): add huihui-jan-nano-abliterated (#5806)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-07 11:35:39 +02:00
Ettore Di Giacinto
cea1703acc chore(model gallery): add zonui-3b-i1 (#5805)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-07 11:32:58 +02:00
Ettore Di Giacinto
33fc9b9922 chore(model gallery): add mini-hydra (#5804)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-07 11:27:42 +02:00
Ettore Di Giacinto
b783997c52 chore(model gallery): add compumacy-experimental-32b (#5803)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-07 11:21:44 +02:00
LocalAI [bot]
f6ec06d21c chore: ⬆️ Update ggml-org/llama.cpp to 6491d6e4f1caf0ad2221865b4249ae6938a6308c (#5801)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-06 22:45:50 +00:00
Ettore Di Giacinto
7e1f2657d5 Update GPU-acceleration.md
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-07-06 19:03:34 +02:00
Ettore Di Giacinto
9589097252 chore(model gallery): add nano_imp_1b-q8_0 (#5800)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-06 18:58:56 +02:00
Ettore Di Giacinto
cb87d331a9 chore(model gallery): add sicariussicariistuff_impish_llama_4b (#5799)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-06 18:57:18 +02:00
Ettore Di Giacinto
6dfc96249a Update README.md
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-07-06 18:07:36 +02:00
LocalAI [bot]
a2564ed654 chore: ⬆️ Update ggml-org/llama.cpp to a0374a67e2924f2e845cdc59dd67d9a44065a89c (#5798)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-05 22:48:28 +00:00
LocalAI [bot]
6c747caa34 chore: ⬆️ Update ggml-org/llama.cpp to ef797db357e44ecb7437fa9d22f4e1614104b342 (#5795)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-04 22:46:51 +00:00
Ettore Di Giacinto
8ae5e0feb9 chore(model gallery): add ockerman0_anubislemonade-70b-v1 (#5794)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-04 18:43:35 +02:00
Ettore Di Giacinto
c35dd0a7b8 chore(model gallery): add zerofata_ms3.2-paintedfantasy-visage-33b (#5793)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-04 18:42:01 +02:00
Ettore Di Giacinto
2f5af6b246 chore(model gallery): add agentica-org_deepswe-preview (#5792)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-04 18:39:36 +02:00
Ettore Di Giacinto
00cf2e0e0a chore(model gallery): add helpingai_dhanishtha-2.0-preview (#5791)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-04 18:38:18 +02:00
LocalAI [bot]
c7a1d9c089 chore: ⬆️ Update ggml-org/llama.cpp to bee28421be25fd447f61cb6db64d556cbfce32ec (#5788)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-04 08:46:56 +02:00
LocalAI [bot]
ad7ba52166 chore: ⬆️ Update PABannier/bark.cpp to 5d5be84f089ab9ea53b7a793f088d3fbf7247495 (#4786)
⬆️ Update PABannier/bark.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-03 22:44:53 +00:00
Ettore Di Giacinto
c5b9f45166 chore(cli): add backends CLI to manipulate and install backends (#5787)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-03 19:31:27 +02:00
Ettore Di Giacinto
61b64a65ab chore(bark-cpp): generalize and move to bark-cpp (#5786)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-03 19:31:10 +02:00
Ettore Di Giacinto
8276952920 feat(system): detect and allow to override capabilities (#5785)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-03 19:30:52 +02:00
Ettore Di Giacinto
b7cd5bfaec feat(backends): add metas in the gallery (#5784)
* chore(backends): add metas in the gallery

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* chore: correctly handle aliases and metas with same names

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-03 18:01:55 +02:00
LocalAI [bot]
da4312e4d3 chore: ⬆️ Update ggml-org/llama.cpp to e75ba4c0434eb759eb7ff74e034ebe729053e575 (#5783)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-03 10:01:17 +02:00
LocalAI [bot]
7d507c54ed chore: ⬆️ Update ggml-org/whisper.cpp to d9999d54c868b8bfcd376aa26067e787d53e679e (#5782)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-03 09:57:36 +02:00
LocalAI [bot]
df7ed49889 docs: ⬆️ update docs version mudler/LocalAI (#5781)
⬆️ Update docs version mudler/LocalAI

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-02 22:45:21 +00:00
Ettore Di Giacinto
bfdc29d316 fix(gallery): correctly show status for downloading OCI images (#5774)
We can't use the mutate.Extract written bytes as current status as that
will be bigger than the compressed image size. Image manifest don't have
any guarantee of the type of artifact (can be compressed or not) when
showing the layer size.

Split the extraction process in two parts: Downloading and extracting as
a flattened system, in this way we can display the status of downloading
and extracting accordingly.

This change also fixes a small nuance in detecting installed backends,
now it's more consistent and looks if a metadata.json and/or a path with
a `run.sh` file is present.

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-02 08:25:48 +02:00
LocalAI [bot]
7fdc006071 chore: ⬆️ Update ggml-org/llama.cpp to de569441470332ff922c23fb0413cc957be75b25 (#5777)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-02 08:25:29 +02:00
LocalAI [bot]
615830245b chore: ⬆️ Update ggml-org/whisper.cpp to bca021c9740b267c2973fba56555be052006023a (#5776)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-02 08:24:58 +02:00
LocalAI [bot]
61376c0fa7 docs: ⬆️ update docs version mudler/LocalAI (#5775)
⬆️ Update docs version mudler/LocalAI

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-01 22:45:24 +00:00
Ettore Di Giacinto
d0fb23514f Revert "fix(gallery): correctly show status for downloading OCI images"
This reverts commit 780d034ac9.
2025-07-01 21:32:04 +02:00
Ettore Di Giacinto
780d034ac9 fix(gallery): correctly show status for downloading OCI images
We can't use the mutate.Extract written bytes as current status as that
will be bigger than the compressed image size. Image manifest don't have
any guarantee of the type of artifact (can be compressed or not) when
showing the layer size.

Split the extraction process in two parts: Downloading and extracting as
a flattened system, in this way we can display the status of downloading
and extracting accordingly.

This change also fixes a small nuance in detecting installed backends,
now it's more consistent and looks if a metadata.json and/or a path with
a `run.sh` file is present.

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-01 19:56:28 +02:00
Ettore Di Giacinto
ec2a044c7e chore(model gallery): add pinkpixel_crystal-think-v2 (#5773)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-01 16:18:19 +02:00
Ettore Di Giacinto
ad6fdd21fd chore(model gallery): add steelskull_l3.3-shakudo-70b (#5772)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-01 16:15:22 +02:00
Ettore Di Giacinto
cd94e6b352 chore(model gallery): add thedrummer_anubis-70b-v1.1 (#5771)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-01 13:54:29 +02:00
Richard Palethorpe
b37cef3718 fix: Diffusers and XPU fixes (#5737)
* fix(README): Add device flags for Intel/XPU

Signed-off-by: Richard Palethorpe <io@richiejp.com>

* fix(diffusers/xpu): Set device to XPU and ignore CUDA request when on Intel

Signed-off-by: Richard Palethorpe <io@richiejp.com>

---------

Signed-off-by: Richard Palethorpe <io@richiejp.com>
2025-07-01 12:36:17 +02:00
Dedy F. Setyawan
9f957d547d fix(docs): Improve Header Responsiveness - Hide "Star us on GitHub!" on Mobile (#5770) 2025-07-01 12:15:16 +02:00
LocalAI [bot]
f0d9f0c5d8 chore: ⬆️ Update ggml-org/llama.cpp to 0a5a3b5cdfd887cf0f8e09d9ff89dee130cfcdde (#5759)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-30 22:46:55 +00:00
LocalAI [bot]
d33e1c72a3 chore: ⬆️ Update ggml-org/llama.cpp to caf5681fcb47dfe9bafee94ef9aa8f669ac986c7 (#5758)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-29 22:49:47 +00:00
Ettore Di Giacinto
33f9ee06c9 fix(gallery): automatically install model from name (#5757)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-29 17:42:58 +02:00
Ettore Di Giacinto
c54677402d chore(model gallery): add qwen3-33b-a3b-stranger-thoughts-abliterated-uncensored (#5755)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-29 10:04:33 +02:00
LocalAI [bot]
3fe3a7b23d chore: ⬆️ Update ggml-org/llama.cpp to 27208bf657cfe7262791df473927225e48efe482 (#5753)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-29 09:06:37 +02:00
LocalAI [bot]
f8ff6fa1fd docs: ⬆️ update docs version mudler/LocalAI (#5752)
⬆️ Update docs version mudler/LocalAI

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-28 22:17:49 +02:00
Ettore Di Giacinto
dfadc3696e feat(llama.cpp): allow to set kv-overrides (#5745)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-28 21:26:07 +02:00
Ettore Di Giacinto
dbcf5fb4fc chore(model gallery): add gemma-3-4b-it-max-horror-uncensored-dbl-x-imatrix (#5751)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-28 18:18:14 +02:00
Ettore Di Giacinto
2633137a17 chore(model gallery): add qwen3-22b-a3b-the-harley-quinn (#5750)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-28 18:17:10 +02:00
Ettore Di Giacinto
d9c17dd23b chore(model gallery): add mistral-small-3.2-46b-the-brilliant-raconteur-ii-instruct-2506 (#5749)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-28 18:15:25 +02:00
Ettore Di Giacinto
d8b7bd4860 chore(model gallery): add qwen3-42b-a3b-stranger-thoughts-deep20x-abliterated-uncensored-i1 (#5748)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-28 18:12:17 +02:00
Ettore Di Giacinto
a611cbc0f4 chore(model gallery): add qwen3-55b-a3b-total-recall-deep-40x (#5747)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-28 17:54:32 +02:00
Ettore Di Giacinto
850b525159 chore(model gallery): add qwen3-55b-a3b-total-recall-v1.3-i1 (#5746)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-28 17:47:46 +02:00
Ettore Di Giacinto
35b3426a2a Update README.md
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-06-28 09:16:25 +02:00
LocalAI [bot]
cd2b0c0e7c chore: ⬆️ Update ggml-org/llama.cpp to 72babea5dea56c8a8e8420ccf731b12a5cf37854 (#5743)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-27 23:46:27 +02:00
LocalAI [bot]
73d80c43a8 chore: ⬆️ Update ggml-org/whisper.cpp to c88ffbf9baeaae8c2cc0a4f496618314bb2ee9e0 (#5742)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-27 23:45:57 +02:00
LocalAI [bot]
665562b850 docs: ⬆️ update docs version mudler/LocalAI (#5741)
⬆️ Update docs version mudler/LocalAI

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-27 22:23:43 +02:00
Ettore Di Giacinto
7a78e4f482 fix(backends gallery): meta packages do not have URIs (#5740)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-27 22:23:14 +02:00
Ettore Di Giacinto
6f41a6f934 fix(backends gallery): correctly identify gpu vendor (#5739)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-27 22:22:58 +02:00
Ettore Di Giacinto
bb54f2da2b feat(gallery): automatically install missing backends along models (#5736)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-27 18:25:44 +02:00
Ettore Di Giacinto
e1cc7ee107 fix(ci): enable tag-latest to auto (#5738)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-27 18:17:01 +02:00
Ettore Di Giacinto
cfc9dfa3d5 fix(ci): better handling of latest images for backends (#5735)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-27 10:32:58 +02:00
LocalAI [bot]
6a650e68cb chore: ⬆️ Update ggml-org/whisper.cpp to 32cf4e2aba799aff069011f37ca025401433cf9f (#5733)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-26 22:09:26 +02:00
LocalAI [bot]
5e1373877a chore: ⬆️ Update ggml-org/llama.cpp to 8846aace4934ad29651ea61b8c7e3f6b0556e3d2 (#5734)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-26 22:09:03 +02:00
Ettore Di Giacinto
b5b0ab26e7 fix(ci): remove non-existant input from build matrix
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-26 21:42:27 +02:00
Ettore Di Giacinto
9725bb4bbd chore(model gallery): add gemma-3n-e4b-it (#5731)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-26 19:36:50 +02:00
Ettore Di Giacinto
33b4275bbc chore(model gallery): add gemma-3n-e2b-it (#5730)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-26 19:35:49 +02:00
Ettore Di Giacinto
6644af10c6 feat: ⚠️ reduce images size and stop bundling sources (#5721)
feat: reduce images size and stop bundling sources

Do not copy sources anymore, and reduce packages of the base images by
not using builder images.

If needed to rebuild, just build the container image from scratch by
following the docs. We will slowly try to migrate all backends to the
gallery to keep the core small.

This PR is a breaking change, it also sets the base folders to /models
and /backends instead of /build/models and /build/backends.

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-06-26 18:41:38 +02:00
Ettore Di Giacinto
7c4a2e9b85 chore(ci): ⚠️ fix latest tag by using docker meta action (#5722)
chore(ci): fix latest tag by using docker meta action

Also uniform tagging names

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-26 18:40:25 +02:00
Ettore Di Giacinto
bcccee3909 fix(backends gallery): delete dangling dirs if installation failed (#5729)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-26 17:38:03 +02:00
Ettore Di Giacinto
c6f50ddd0c Revert "chore: ⬆️ Update leejet/stable-diffusion.cpp to 10c6501bd05a697e014f1bee3a84e5664290c489" (#5727)
Revert "chore: ⬆️ Update leejet/stable-diffusion.cpp to `10c6501bd05a…"

This reverts commit 30600dd5cb.
2025-06-26 13:25:25 +02:00
LocalAI [bot]
6613373b1b chore: ⬆️ Update ggml-org/whisper.cpp to 4daf7050ca2bf17f5166f45ac6da651c4e33f293 (#5725)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-26 13:00:20 +02:00
LocalAI [bot]
1659b3f795 chore: ⬆️ Update ggml-org/llama.cpp to 2bf9d539dd158345e3a3b096e16474af535265b4 (#5724)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-26 12:59:57 +02:00
LocalAI [bot]
30600dd5cb chore: ⬆️ Update leejet/stable-diffusion.cpp to 10c6501bd05a697e014f1bee3a84e5664290c489 (#4925)
⬆️ Update leejet/stable-diffusion.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-25 22:45:15 +00:00
Ettore Di Giacinto
179fcf5541 chore(model gallery): add menlo_jan-nano-128k (#5723)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-25 12:14:11 +02:00
LocalAI [bot]
9cb75086bb chore: ⬆️ Update ggml-org/whisper.cpp to 0083335ba0e9d6becbe0958903b0a27fc2ebaeed (#5718)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-25 09:47:33 +02:00
LocalAI [bot]
594bb462ab chore: ⬆️ Update ggml-org/llama.cpp to 73e53dc834c0a2336cd104473af6897197b96277 (#5719)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-24 22:47:48 +00:00
Ettore Di Giacinto
aa730a7b96 chore(model gallery): add delta-vector_austral-24b-winton (#5717)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-24 18:37:28 +02:00
Ettore Di Giacinto
0a454c527a chore(model gallery): add astrosage-70b (#5716)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-24 18:34:37 +02:00
Ettore Di Giacinto
cf86bcb984 chore(model gallery): add skywork_skywork-swe-32b (#5715)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-24 18:29:36 +02:00
Ettore Di Giacinto
a6d9988e84 feat(backend gallery): add meta packages (#5696)
* feat(backend gallery): add meta packages

So we can have meta packages such as "vllm" that automatically installs
the corresponding package depending on the GPU that is being currently
detected in the system.

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* feat: use a metadata file

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-24 17:08:27 +02:00
Ettore Di Giacinto
f3a114342e chore(model gallery): add mistralai_mistral-small-3.2-24b-instruct-2506 (#5714)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-24 13:59:14 +02:00
LocalAI [bot]
0d275ccc03 chore: ⬆️ Update ggml-org/llama.cpp to ce82bd0117bd3598300b3a089d13d401b90279c7 (#5712)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-24 08:37:32 +02:00
LocalAI [bot]
58dba3f01c chore: ⬆️ Update ggml-org/whisper.cpp to a422176937c5bb20eb58d969995765f90d3c1a9b (#5713)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-23 22:45:31 +00:00
kilavvy
b68d6e8088 Docs: Fix typos (#5709)
* Update GPU-acceleration.md

Signed-off-by: kilavvy <140459108+kilavvy@users.noreply.github.com>

* Update image-generation.md

Signed-off-by: kilavvy <140459108+kilavvy@users.noreply.github.com>

---------

Signed-off-by: kilavvy <140459108+kilavvy@users.noreply.github.com>
2025-06-23 18:15:06 +02:00
LocalAI [bot]
2352cec7e6 chore: ⬆️ Update ggml-org/llama.cpp to 238005c2dc67426cf678baa2d54c881701693288 (#5710)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-22 22:43:41 +00:00
Ettore Di Giacinto
de72ae79b5 chore(model gallery): add ds-r1-qwen3-8b-arliai-rpr-v4-small-iq-imatrix (#5708)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-22 09:05:55 +02:00
Ettore Di Giacinto
884c07d5f9 chore(model gallery): add allura-org_q3-8b-kintsugi (#5707)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-22 09:04:49 +02:00
Ettore Di Giacinto
cca7cbef1e chore(model gallery): add qwen3-the-xiaolong-omega-directive-22b-uncensored-abliterated-i1 (#5706)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-22 09:01:08 +02:00
Ettore Di Giacinto
32cd0d03d4 chore(model gallery): add menlo_jan-nano (#5705)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-22 08:57:33 +02:00
Ettore Di Giacinto
ee4d9e83d0 Update stalebot.yml
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-06-22 08:51:13 +02:00
LocalAI [bot]
5547e08a30 chore: ⬆️ Update ggml-org/llama.cpp to aa0ef5c578eef4c2adc7be1282f21bab5f3e8d26 (#5703)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-21 23:54:53 +02:00
LocalAI [bot]
ca7385c303 chore: ⬆️ Update ggml-org/whisper.cpp to e6c10cf3d5d60dc647eb6cd5e73d3c347149f746 (#5702)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-21 23:54:28 +02:00
Ettore Di Giacinto
28759e79d3 chore(model gallery): add qwen3-the-josiefied-omega-directive-22b-uncensored-abliterated-i1 (#5704)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-21 23:54:05 +02:00
Ettore Di Giacinto
40249b6b84 Update stalebot.yml
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-06-21 22:38:23 +02:00
Ettore Di Giacinto
e09e47bada chore(ci): add stale bot (#5700)
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-06-21 20:12:08 +02:00
Ettore Di Giacinto
3796558aeb Update quickstart.md
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-06-21 20:11:57 +02:00
LocalAI [bot]
cca4f010f8 chore: ⬆️ Update ggml-org/llama.cpp to 06cbedfca1587473df9b537f1dd4d6bfa2e3de13 (#5697)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-20 22:44:39 +00:00
Ettore Di Giacinto
be3ff482d0 chore(ci): try to optimize disk space when tagging latest (#5695)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-20 15:54:14 +02:00
LocalAI [bot]
af255cd0be chore: ⬆️ Update ggml-org/llama.cpp to 8f71d0f3e86ccbba059350058af8758cafed73e6 (#5692)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-20 15:53:55 +02:00
LocalAI [bot]
8000228d1b chore: ⬆️ Update ggml-org/whisper.cpp to 3e65f518ddf840b13b74794158aa95a2c8aa30cc (#5691)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-20 15:53:38 +02:00
Ettore Di Giacinto
79abe0ad77 Drop latest references to extras images 2025-06-20 15:51:16 +02:00
Ettore Di Giacinto
8131d11d1f Update quickstart.md
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-06-19 22:42:38 +02:00
LocalAI [bot]
beb01c91f3 docs: ⬆️ update docs version mudler/LocalAI (#5690)
⬆️ Update docs version mudler/LocalAI

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-19 22:13:16 +02:00
Ettore Di Giacinto
1ccd64ff6a chore: drop extras references from docs
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-19 22:04:28 +02:00
Ettore Di Giacinto
fc7681c68c Update README.md
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-06-19 21:46:09 +02:00
Ettore Di Giacinto
49d026a229 Update backends.md
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-06-19 19:47:09 +02:00
leopardracer
f9b968e19d Fix Typos and Improve Clarity in GPU Acceleration Documentation (#5688)
Update GPU-acceleration.md

Signed-off-by: leopardracer <136604165+leopardracer@users.noreply.github.com>
2025-06-19 15:41:13 +02:00
LocalAI [bot]
022d4a5ecb chore: ⬆️ Update ggml-org/whisper.cpp to ecb8f3c2b4e282d5ef416516bcbfb92821f06bf6 (#5686)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-19 08:23:42 +02:00
LocalAI [bot]
0e917eb01d chore: ⬆️ Update ggml-org/llama.cpp to 8d947136546773f6410756f37fcc5d3e65b8135d (#5685)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-19 08:23:23 +02:00
Ettore Di Giacinto
efde0eaf83 feat(backend gallery): display download progress (#5687)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-18 23:49:44 +02:00
Maxim Evtush
add8fc35a2 Fix Typos in Documentation and Python Comments (#5658)
* Update istftnet.py

Signed-off-by: Maxim Evtush <154841002+maximevtush@users.noreply.github.com>

* Update GPU-acceleration.md

Signed-off-by: Maxim Evtush <154841002+maximevtush@users.noreply.github.com>

---------

Signed-off-by: Maxim Evtush <154841002+maximevtush@users.noreply.github.com>
2025-06-18 22:11:13 +02:00
Ettore Di Giacinto
9bcf4c56f1 fix(backends gallery): propagate p2p settings to correctly draw menu (#5684)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-18 22:06:12 +02:00
Ettore Di Giacinto
3fcfaec7c8 chore(ci): move also other jobs to public runner (#5683)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-18 22:00:12 +02:00
Ettore Di Giacinto
a463d40a3e chore(ci): try to use public runners also for release builds (#5681)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-18 21:51:54 +02:00
Ettore Di Giacinto
1e1f0ee321 chore(backends): move bark-cpp to the backend gallery (#5682)
chore(bark-cpp): move outside from binary

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-18 19:48:50 +02:00
Ettore Di Giacinto
80b3139fa0 Update landing.yaml
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-06-18 19:48:17 +02:00
LocalAI [bot]
5173d37acb chore: ⬆️ Update ggml-org/llama.cpp to 860a9e4eeff3eb2e7bd1cc38f65787cc6c8177af (#5678)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-18 10:01:46 +02:00
LocalAI [bot]
470e48a900 chore: ⬆️ Update ggml-org/whisper.cpp to f3ff80ea8da044e5b8833e7ba54ee174504c518d (#5677)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-18 10:01:08 +02:00
Ettore Di Giacinto
b706dddc93 chore(ci): switch to public runners for base images (#5680)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-17 22:38:50 +02:00
Ettore Di Giacinto
867db3f888 chore(docs): add backend url
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-17 22:35:21 +02:00
Ettore Di Giacinto
b79aa31398 chore: move backends docs
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-17 22:26:40 +02:00
Ettore Di Giacinto
fb9a09d49c chore(backend gallery): add description for remaining backends (#5679)
* chore(backend gallery): add description for remaining backends

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* chore(backend gallery): add linter

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-17 22:21:44 +02:00
Ettore Di Giacinto
0a78f0ad2d chore(backend gallery): re-order and add description for vLLM (#5676)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-17 17:31:53 +02:00
Ettore Di Giacinto
d68660bd5a chore(deps): bump llama.cpp to 'e434e69183fd9e1031f4445002083178c331a28b (#5665)
chore(deps): bump llama.cpp to 'e434e69183fd9e1031f4445002083178c331a28b'

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-17 17:00:10 +02:00
LocalAI [bot]
30ceee2dec chore: ⬆️ Update ggml-org/whisper.cpp to 2a4d6db7d90899aff3d58d70996916968e4e0d27 (#5661)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-17 09:21:05 +02:00
dependabot[bot]
18c38335fc chore(deps): bump securego/gosec from 2.22.4 to 2.22.5 (#5663)
Bumps [securego/gosec](https://github.com/securego/gosec) from 2.22.4 to 2.22.5.
- [Release notes](https://github.com/securego/gosec/releases)
- [Changelog](https://github.com/securego/gosec/blob/master/.goreleaser.yml)
- [Commits](https://github.com/securego/gosec/compare/v2.22.4...v2.22.5)

---
updated-dependencies:
- dependency-name: securego/gosec
  dependency-version: 2.22.5
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-16 23:12:27 +00:00
Ettore Di Giacinto
89040ff6f7 fix: add python symlink, use absolute python env path when running backends (#5664)
* fix: add python symlink, use absolute python env path when running backends

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* fix(ci): do not push images when building PRs

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-16 23:00:53 +02:00
Ettore Di Giacinto
de343700fd Don't run python_backend workflow on PR
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-06-16 11:06:56 +02:00
Ettore Di Giacinto
87d18ad951 chore: Add python3 to images (#5660)
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-06-16 11:05:44 +02:00
Ettore Di Giacinto
912c8eff04 chore(ci): use public runner for extra backends (#5657)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-16 08:21:18 +02:00
LocalAI [bot]
481f30bde8 chore: ⬆️ Update ggml-org/llama.cpp to 30e5b01de2a0bcddc7c063c8ef0802703a958417 (#5659)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-15 23:03:40 +00:00
Ettore Di Giacinto
236ac30252 chore(ci): do not specify image-type anymore
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-06-15 17:28:40 +02:00
Ettore Di Giacinto
6f761e62e4 update README
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-06-15 16:06:43 +02:00
FT
1f29b5f38e Fix Typos and Improve Documentation Clarity (#5648)
* Update p2p.go

Signed-off-by: FT <140458077+zeevick10@users.noreply.github.com>

* Update GPU-acceleration.md

Signed-off-by: FT <140458077+zeevick10@users.noreply.github.com>

---------

Signed-off-by: FT <140458077+zeevick10@users.noreply.github.com>
2025-06-15 16:04:44 +02:00
LocalAI [bot]
33d702c5e0 chore: ⬆️ Update ggml-org/llama.cpp to 3cb203c89f60483e349f841684173446ed23c28f (#5644)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-06-15 16:03:13 +02:00
Ettore Di Giacinto
95ff236127 ci: do not fire python_backend on PRs
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-06-15 16:02:30 +02:00
Ettore Di Giacinto
2d64269763 feat: Add backend gallery (#5607)
* feat: Add backend gallery

This PR add support to manage backends as similar to models. There is
now available a backend gallery which can be used to install and remove
extra backends.
The backend gallery can be configured similarly as a model gallery, and
API calls allows to install and remove new backends in runtime, and as
well during the startup phase of LocalAI.

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Add backends docs

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* wip: Backend Dockerfile for python backends

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* feat: drop extras images, build python backends separately

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* fixup on all backends

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* test CI

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Tweaks

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Drop old backends leftovers

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Fixup CI

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Move dockerfile upper

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Fix proto

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Feature dropped for consistency - we prefer model galleries

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Add missing packages in the build image

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* exllama is ponly available on cublas

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* pin torch on chatterbox

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Fixups to index

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* CI

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Debug CI

* Install accellerators deps

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Add target arch

* Add cuda minor version

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Use self-hosted runners

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* ci: use quay for test images

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* fixups for vllm and chatterbox

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Small fixups on CI

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* chatterbox is only available for nvidia

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Simplify CI builds

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Adapt test, use qwen3

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* chore(model gallery): add jina-reranker-v1-tiny-en-gguf

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* fix(gguf-parser): recover from potential panics that can happen while reading ggufs with gguf-parser

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Use reranker from llama.cpp in AIO images

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Limit concurrent jobs

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-06-15 14:56:52 +02:00
LocalAI [bot]
a7a6020328 chore: ⬆️ Update ggml-org/whisper.cpp to 705db0f728310c32bc96f4e355e2b18076932f75 (#5643)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-15 08:39:00 +02:00
Ettore Di Giacinto
40618164b2 chore: improve tests (#5646)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-14 10:07:05 +02:00
fuder.eth
eb8c29f90a Minor Documentation Updates: Clarified Comments in Python and Go Files (#5641)
* Update ui.go

Signed-off-by: fuder.eth <139509124+vtjl10@users.noreply.github.com>

* Update backend.py

Signed-off-by: fuder.eth <139509124+vtjl10@users.noreply.github.com>

---------

Signed-off-by: fuder.eth <139509124+vtjl10@users.noreply.github.com>
2025-06-13 19:55:25 +02:00
Gavin Mogan
63116a2c6a docs: Update docs metadata headers so when mentioned on slack it doesn't say hugo (#5642)
Update docs metadata headers so when mentioned on slack it doesn't say hugo

Signed-off-by: Gavin Mogan <github@gavinmogan.com>
2025-06-13 19:54:57 +02:00
LocalAI [bot]
311c2cf539 chore: ⬆️ Update ggml-org/llama.cpp to ed52f3668e633423054a4eab61bb7efee47025ab (#5636)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-12 23:33:33 +02:00
Ettore Di Giacinto
a6fcbd991d chore(model gallery): add yanfei-v2-qwen3-32b (#5639)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-12 22:24:13 +02:00
kilavvy
2e1dc8deef Fix Typos in Comments and Error Messages (#5637)
* Update initializers.go

Signed-off-by: kilavvy <140459108+kilavvy@users.noreply.github.com>

* Update base.go

Signed-off-by: kilavvy <140459108+kilavvy@users.noreply.github.com>

---------

Signed-off-by: kilavvy <140459108+kilavvy@users.noreply.github.com>
2025-06-12 18:34:32 +02:00
LocalAI [bot]
282e017b22 chore: ⬆️ Update ggml-org/whisper.cpp to ebbc874e85b518f963a87612f6d79f5c71a55e84 (#5635)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-11 23:47:00 +02:00
Ettore Di Giacinto
f86cb8be2d chore(model gallery): add qwen3-embedding-0.6b (#5634)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-11 11:40:41 +02:00
Ettore Di Giacinto
5c56ec4f87 chore(model gallery): add qwen3-embedding-8b (#5633)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-11 11:38:44 +02:00
Ettore Di Giacinto
dd2845a034 chore(model gallery): add qwen3-embedding-4b (#5632)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-11 11:31:43 +02:00
Ettore Di Giacinto
2e7db014b6 chore(model gallery): add openbuddy_openbuddy-r1-0528-distill-qwen3-32b-preview0-qat (#5631)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-11 11:27:30 +02:00
Ettore Di Giacinto
6faeee1d92 chore(model gallery): add baai_robobrain2.0-7b (#5630)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-11 11:17:32 +02:00
Ettore Di Giacinto
31d73eb934 chore(model gallery): add mistralai_magistral-small-2506 (#5629)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-11 11:11:44 +02:00
Ettore Di Giacinto
60863b9e52 chore(model gallery): add sophosympatheia_strawberrylemonade-l3-70b-v1.0 (#5628)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-11 11:08:17 +02:00
Ettore Di Giacinto
a9fc71e2f3 chore(model gallery): add kwaipilot_kwaicoder-autothink-preview (#5627)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-11 11:06:38 +02:00
leopardracer
ce9a9a30e0 Improve Comments and Documentation for MixedMode and ParseJSON Functions (#5626)
Update parse.go

Signed-off-by: leopardracer <136604165+leopardracer@users.noreply.github.com>
2025-06-11 09:46:53 +02:00
LocalAI [bot]
2693a21da5 chore: ⬆️ Update ggml-org/whisper.cpp to 2679bec6e09231c6fd59715fcba3eebc9e2f6076 (#5625)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-11 09:35:28 +02:00
LocalAI [bot]
d460eab18e chore: ⬆️ Update ggml-org/llama.cpp to 3678b838bb71eaccbaeb479ff38c2e12bfd2f960 (#5620)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-11 09:00:39 +02:00
LocalAI [bot]
c61e5fe266 chore: ⬆️ Update ggml-org/whisper.cpp to d78f08142381c1460604713e2f2ddf3331c7d816 (#5619)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-06-10 17:29:58 +02:00
Ettore Di Giacinto
88e570b5de fix(deps): pin grpcio (#5621)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-10 14:21:51 +02:00
Ettore Di Giacinto
6efa97ce0b chore(model gallery): add qwen2.5-omni-3b (#5606)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-09 10:54:42 +02:00
LocalAI [bot]
41cde5468a chore: ⬆️ Update ggml-org/llama.cpp to 247e5c6e447707bb4539bdf1913d206088a8fc69 (#5605)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-09 00:11:46 +02:00
Richard Palethorpe
d650647db9 fix(realtime): Use updated model on session update (#5604)
Signed-off-by: Richard Palethorpe <io@richiejp.com>
2025-06-09 00:11:05 +02:00
LocalAI [bot]
5bc7ef37a2 chore: ⬆️ Update ggml-org/llama.cpp to 5787b5da57e54dba760c2deeac1edf892e8fc450 (#5601)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-08 08:44:24 +02:00
Ettore Di Giacinto
e0a52807c8 chore(model gallery): add akhil-theerthala_kuvera-8b-v0.1.0 (#5600)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-07 08:59:20 +02:00
LocalAI [bot]
1a95a19f87 chore: ⬆️ Update ggml-org/llama.cpp to 745aa5319b9930068aff5e87cf5e9eef7227339b (#5598)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-07 08:59:05 +02:00
LocalAI [bot]
bcfc08e5bf chore: ⬆️ Update ggml-org/whisper.cpp to b175baa665bc35f97a2ca774174f07dfffb84e19 (#5597)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-07 08:57:52 +02:00
Ettore Di Giacinto
4d282ca963 chore(model gallery): add nbeerbower_qwen3-gutenberg-encore-14b (#5596)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-06 10:20:48 +02:00
Ettore Di Giacinto
525f49b69d chore(model gallery): add open-thoughts_openthinker3-7b (#5595)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-06 10:14:00 +02:00
LocalAI [bot]
786aa1de05 chore: ⬆️ Update ggml-org/llama.cpp to 1caae7fc6c77551cb1066515e0f414713eebb367 (#5593)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-06 00:10:02 +02:00
Ettore Di Giacinto
ea82deb16b chore(model gallery): add ultravox-v0_5-llama-3_1-8b (#5592)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-05 19:23:51 +02:00
Ettore Di Giacinto
b0891309ba chore(model gallery): add ultravox-v0_5-llama-3_2-1b (#5591)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-05 19:22:01 +02:00
Ettore Di Giacinto
b034cff149 feat: improve RAM estimation by using values from summary (#5525)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-05 19:16:26 +02:00
Ettore Di Giacinto
432f34f001 chore(model gallery): add goekdeniz-guelmez_josiefied-qwen3-14b-abliterated-v3 (#5590)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-05 19:16:04 +02:00
Gavin Mogan
cbd61dccd4 fix(install.sh): vulkan docker tag (#5589)
vulkan docker tag is not prefixed with gpu

```
regctl tag ls localai/localai | grep 2.29 | grep vulkan
v2.29.0-vulkan
```

Signed-off-by: Gavin Mogan <github@gavinmogan.com>
2025-06-05 08:12:16 +02:00
LocalAI [bot]
0de0817d71 chore: ⬆️ Update ggml-org/whisper.cpp to 799eacdde40b3c562cfce1508da1354b90567f8f (#5586)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-05 08:11:38 +02:00
LocalAI [bot]
bf57d6e5ac chore: ⬆️ Update ggml-org/llama.cpp to 0d3984424f2973c49c4bcabe4cc0153b4f90c601 (#5585)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-05 08:11:12 +02:00
Ettore Di Giacinto
0b9603e010 chore(model gallery): add deepseek-ai_deepseek-r1-0528-qwen3-8b (#5580)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-04 15:28:45 +02:00
Ettore Di Giacinto
8d925217f6 chore(model gallery): add e-n-v-y_legion-v2.1-llama-70b-elarablated-v0.8-hf (#5579)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-04 11:12:37 +02:00
Ettore Di Giacinto
669a1ccae6 chore(model gallery): add nvidia_nemotron-research-reasoning-qwen-1.5b (#5578)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-04 11:07:10 +02:00
Ettore Di Giacinto
7a7d36ad63 chore(model gallery): add arcee-ai_homunculus (#5577)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-04 10:02:15 +02:00
Ettore Di Giacinto
8b889955b4 chore(deps): bump pytorch to 2.7 in vllm (#5576)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-04 08:56:45 +02:00
dependabot[bot]
a226555949 chore(deps): bump GrantBirki/git-diff-action from 2.8.0 to 2.8.1 (#5564)
Bumps [GrantBirki/git-diff-action](https://github.com/grantbirki/git-diff-action) from 2.8.0 to 2.8.1.
- [Release notes](https://github.com/grantbirki/git-diff-action/releases)
- [Commits](https://github.com/grantbirki/git-diff-action/compare/v2.8.0...v2.8.1)

---
updated-dependencies:
- dependency-name: GrantBirki/git-diff-action
  dependency-version: 2.8.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-04 08:41:47 +02:00
LocalAI [bot]
f38f17865a chore: ⬆️ Update ggml-org/whisper.cpp to 82f461eaa4e6a1ba29fc0dbdaa415a9934ee8a1d (#5575)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-04 08:41:26 +02:00
LocalAI [bot]
03f380701b chore: ⬆️ Update ggml-org/llama.cpp to 7e00e60ef86645a01fda738fef85b74afa016a34 (#5574)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-04 08:26:36 +02:00
Ettore Di Giacinto
65e2866c97 fix(chatterbox): install only with cuda 12 (#5573)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-03 14:57:47 +02:00
Ettore Di Giacinto
cd3cd899ad chore(deps): bump llama.cpp to '363757628848a27a435bbf22ff9476e9aeda5f40' (#5571)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-03 12:19:16 +02:00
LocalAI [bot]
c2ae3100e7 chore: ⬆️ Update ggml-org/whisper.cpp to e05af2457b7b4134ee626dc044294a19b096e62f (#5569)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-03 11:29:18 +02:00
Ettore Di Giacinto
ec0868e691 chore(deps): bump grpcio from 1.72.0 to 1.72.1 (#5570)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-03 09:59:43 +02:00
Ettore Di Giacinto
489c289916 Revert "fix(ci): try to add different mirrors to avoid 403 issues" (#5555)
Revert "fix(ci): try to add different mirrors to avoid 403 issues (#5554)"

This reverts commit 7c9f011d91.
2025-06-02 08:46:29 +02:00
LocalAI [bot]
ac5fb50bcc chore: ⬆️ Update ggml-org/whisper.cpp to 7fd6fa809749078aa00edf945e959c898f2bd1af (#5556)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-06-02 08:45:47 +02:00
Ettore Di Giacinto
7c9f011d91 fix(ci): try to add different mirrors to avoid 403 issues (#5554)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-06-01 08:48:53 +02:00
Ettore Di Giacinto
80f7f17843 chore(deps): bump llama.cpp to 'e562eece7cb476276bfc4cbb18deb7c0369b2233' (#5552)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-31 12:46:32 +02:00
LocalAI [bot]
f0c41d6405 chore: ⬆️ Update ggml-org/whisper.cpp to 98dfe8dc264b7d0d1daccfff9a9c043bcc2ece4b (#5542)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-31 08:51:15 +02:00
Ettore Di Giacinto
8472321a81 feat(ui): display thinking tags appropriately (#5540)
* fix(streaming): stream complete runes

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* feat(ui): display thinking tags separately

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-05-31 08:50:46 +02:00
Ettore Di Giacinto
3bac4724ac fix(streaming): stream complete runes (#5539)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-31 08:48:05 +02:00
Ettore Di Giacinto
59db154cbc feat(ui): allow to upload PDF and text files, also add support to multiple input files (#5538)
* Support file inputs

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* feat: support multiple files

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* show preview of files

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-31 08:47:48 +02:00
Ettore Di Giacinto
1cc4525f15 fix: adapt test to error changes
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-30 17:43:59 +02:00
Ettore Di Giacinto
45c58752e5 feat(ui): add audio upload button in chat view (#5526)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-30 16:47:31 +02:00
Ettore Di Giacinto
d5c9c717b5 feat(chatterbox): add new backend (#5524)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-30 10:52:55 +02:00
Ettore Di Giacinto
dd7fa6b9f7 chore(deps): bump llama.cpp to 'e83ba3e460651b20a594e9f2f0f0bffb998d3ce1 (#5527)
chore(deps): bump llama.cpp to 'e83ba3e460651b20a594e9f2f0f0bffb998d3ce1'

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-30 10:29:01 +02:00
LocalAI [bot]
039c318607 chore: ⬆️ Update ggml-org/whisper.cpp to e5e900dd00747f747143ad30a697c8f21ddcd59e (#5522)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-30 08:34:52 +02:00
Ettore Di Giacinto
0870bf5af6 fix(input): handle correctly case where we pass by string list as inputs (#5521)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-29 22:06:42 +02:00
Ettore Di Giacinto
6073b9944e chore(model gallery): add moondream2-20250414 (#5518)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-29 10:47:11 +02:00
LocalAI [bot]
ef0e0f3777 chore: ⬆️ Update ggml-org/whisper.cpp to 1f5fdbecb411a61b8576242e5170c5ecef24b05a (#5515)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-29 09:45:23 +02:00
LocalAI [bot]
b7de9e0aa0 chore: ⬆️ Update ggml-org/llama.cpp to d98f2a35fcf4a8d3e660ad48cd19e2a1f3d5b2ef (#5514)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-29 09:44:36 +02:00
Ettore Di Giacinto
39292407a1 chore(model gallery): add pku-ds-lab_fairyr1-32b (#5517)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-29 09:43:45 +02:00
Ettore Di Giacinto
f257bf8d14 chore(model gallery): add pku-ds-lab_fairyr1-14b-preview (#5516)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-29 09:37:08 +02:00
Ettore Di Giacinto
8ca2fb5ef1 chore(model gallery): add qwen2.5-omni-7b (#5513)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-28 18:15:09 +02:00
LocalAI [bot]
3a790fed13 chore: ⬆️ Update ggml-org/whisper.cpp to 0ed00d9d30e8c984936ff9ed9a4fcd475d6d82e5 (#5510)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-28 09:00:22 +02:00
LocalAI [bot]
a334f28a07 chore: ⬆️ Update ggml-org/llama.cpp to a3c30846e410c91c11d7bf80978795a03bb03dee (#5509)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-28 01:39:38 +00:00
Ettore Di Giacinto
dc6663d121 fix(template): we do not always have .Name (#5508)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-27 18:44:24 +02:00
LocalAI [bot]
103caf9823 chore: ⬆️ Update ggml-org/llama.cpp to a26c4cc11ec7c6574e3691e90ecdbd67deeea35b (#5500)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-27 17:13:55 +02:00
Ettore Di Giacinto
4226d2d837 Update index.yaml
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-05-27 10:24:37 +02:00
Ettore Di Giacinto
7434256fc9 chore(model gallery): add ms-24b-mullein-v0 (#5506)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-27 10:14:52 +02:00
Ettore Di Giacinto
86a0563ae1 chore(model gallery): add llama3-24b-mullein-v1 (#5505)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-27 10:13:40 +02:00
Ettore Di Giacinto
c68951cbfe chore(model gallery): add mrm8488_qwen3-14b-ft-limo (#5504)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-27 10:04:16 +02:00
Ettore Di Giacinto
8408084120 chore(model gallery): add luckyrp-24b (#5503)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-27 10:02:25 +02:00
Ettore Di Giacinto
0f2f4c7e23 chore(model gallery): add allura-org_q3-30b-a3b-designant (#5502)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-27 09:59:56 +02:00
Ettore Di Giacinto
5ffad3b004 chore(deps): remove pin on transformers (#5501)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-27 09:24:27 +02:00
Ettore Di Giacinto
e5ccd97b8c Update README.md
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-05-26 20:00:31 +02:00
LocalAI [bot]
a3b08d46ec chore: ⬆️ Update ggml-org/whisper.cpp to ea9f206f18d86c4eb357db9fdc52e4d9dc24435e (#5464)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-05-26 19:56:44 +02:00
Ettore Di Giacinto
090f5065fc chore(deps): bump llama.cpp to 'fef693dc6b959a8e8ba11558fbeaad0b264dd457' (#5467)
Also try to use a smaller model for integration tests

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-26 17:19:46 +02:00
Ettore Di Giacinto
88de2ea01a feat(llama.cpp): add support for audio input (#5466)
* feat(llama.cpp): add support for audio input

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Adapt tests

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-26 16:06:03 +02:00
Ettore Di Giacinto
9650d490d4 chore(model gallery): add nvidia_acereason-nemotron-14b (#5463)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-26 10:08:10 +02:00
Ettore Di Giacinto
4de1c83764 chore(model gallery): add allura-org_q3-30b-a3b-pentiment (#5462)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-26 09:46:44 +02:00
Ettore Di Giacinto
e5978dc714 chore(model gallery): add medgemma-27b-text-it (#5461)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-26 09:44:13 +02:00
Ettore Di Giacinto
f784986e19 chore(model gallery): add medgemma-4b-it (#5460)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-26 09:41:09 +02:00
Richard Palethorpe
bf6426aef2 feat: Realtime API support reboot (#5392)
* feat(realtime): Initial Realtime API implementation

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* chore: go mod tidy

Signed-off-by: Richard Palethorpe <io@richiejp.com>

* feat: Implement transcription only mode for realtime API

Reduce the scope of the real time API for the initial realease and make
transcription only mode functional.

Signed-off-by: Richard Palethorpe <io@richiejp.com>

* chore(build): Build backends on a separate layer to speed up core only changes

Signed-off-by: Richard Palethorpe <io@richiejp.com>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
Signed-off-by: Richard Palethorpe <io@richiejp.com>
Co-authored-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-25 22:25:05 +02:00
LocalAI [bot]
4a91950848 chore: ⬆️ Update ggml-org/llama.cpp to d13d0f6135803822ec1cd7e3efb49360b88a1bdf (#5448)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-24 08:50:41 +02:00
LocalAI [bot]
4614ea1685 chore: ⬆️ Update ggml-org/whisper.cpp to 13d92d08ae26031545921243256aaaf0ee057943 (#5449)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-23 23:44:06 +00:00
Ettore Di Giacinto
f0bf59d1d9 chore(model gallery): add vulpecula-4b (#5445)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-23 09:51:21 +02:00
Ettore Di Giacinto
83dd678959 chore(model gallery): add whiterabbitneo_whiterabbitneo-v3-7b (#5444)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-23 09:46:28 +02:00
Ettore Di Giacinto
9d6c9f874a chore(model gallery): add arliai_qwq-32b-arliai-rpr-v4 (#5443)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-23 09:34:33 +02:00
LocalAI [bot]
c62f2bb336 chore: ⬆️ Update ggml-org/llama.cpp to 8a1d206f1d2b4e45918b589f3165b4be232f7ba8 (#5440)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-23 09:22:38 +02:00
LocalAI [bot]
38aeca6f9c chore: ⬆️ Update ggml-org/whisper.cpp to 78b31ca7824500e429ba026c1a9b48e0b41c50cb (#5439)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-23 06:22:41 +00:00
Ettore Di Giacinto
3b0cf52f6a feat(llama.cpp): add reranking (#5396)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-22 21:49:30 +02:00
LocalAI [bot]
bac3022044 chore: ⬆️ Update ggml-org/whisper.cpp to bd1cb0c8e3a04baa411dc12c1325b6a9f12ee7f4 (#5424)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-05-22 21:49:06 +02:00
LocalAI [bot]
cd41701524 chore: ⬆️ Update ggml-org/llama.cpp to 8e186ef0e764c7a620e402d1f76ebad60bf31c49 (#5423)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-05-22 21:48:51 +02:00
Ettore Di Giacinto
6a382a1afe fix(transformers): try to pin to working release (#5426)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-22 12:50:51 +02:00
Ettore Di Giacinto
8dcab2f9c7 chore(scripts): allow to specify quants (#5430)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-22 11:53:30 +02:00
Ettore Di Giacinto
1d1d5627f0 chore(model gallery): add delta-vector_archaeo-12b-v2 (#5429)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-22 11:38:48 +02:00
Ettore Di Giacinto
233b3369ad chore(model gallery): add mistralai_devstral-small-2505 (#5428)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-22 11:37:17 +02:00
Ettore Di Giacinto
c587ac0aef chore(model gallery): add nvidia_llama-3.1-nemotron-nano-4b-v1.1 (#5427)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-22 11:33:33 +02:00
David Thole
38c5d16b57 feat(docs): updating the documentation on fine tuning and advanced guide. (#5420)
updating the documentation on fine tuning and advanced guide.  This mirrors how modern version of llama.cpp operate
2025-05-21 19:11:00 +02:00
LocalAI [bot]
ef6fc052eb chore: ⬆️ Update ggml-org/llama.cpp to b7a17463ec190aeee7b9077c606c910fb4688b84 (#5399)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-21 09:06:09 +02:00
LocalAI [bot]
7ff35c08ac chore: ⬆️ Update ggml-org/whisper.cpp to 62dc8f7d7b72ca8e75c57cd6a100712c631fa5d5 (#5398)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-21 09:00:42 +02:00
LocalAI [bot]
43f75ee7f3 chore(model-gallery): ⬆️ update checksum (#5422)
⬆️ Checksum updates in gallery/index.yaml

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-21 03:52:39 +00:00
Ettore Di Giacinto
82811a9630 fix(transformers): pin protobuf (#5421)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-20 20:28:31 +02:00
Ettore Di Giacinto
04a3d8e5ac feat(ui): add error page to display errors (#5418)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-20 12:17:27 +02:00
Ettore Di Giacinto
9af09b3f8c chore(model gallery): fixup
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-20 12:17:21 +02:00
Ettore Di Giacinto
0d590a4044 chore(model gallery): add smolvlm2-256m-video-instruct (#5417)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-20 12:03:02 +02:00
Ettore Di Giacinto
e0a54de4f5 chore(model gallery): add smolvlm2-500m-video-instruct (#5416)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-20 11:42:30 +02:00
Ettore Di Giacinto
6bc2ae5467 chore(model gallery): add smolvlm2-2.2b-instruct (#5415)
chore(model gallery): add smolvlm-instruct

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-20 11:36:22 +02:00
Ettore Di Giacinto
8caaf49f5d chore(model gallery): add smolvlm-instruct (#5414)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-20 11:35:01 +02:00
Ettore Di Giacinto
1db51044bb chore(model gallery): add smolvlm-500m-instruct (#5413)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-20 11:25:32 +02:00
Ettore Di Giacinto
ec21b58008 chore(model gallery): add smolvlm-256m-instruct (#5412)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-20 11:15:09 +02:00
Ettore Di Giacinto
996259b529 chore(model gallery): add facebook_kernelllm (#5411)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-20 10:31:09 +02:00
Ettore Di Giacinto
f2942cc0e1 chore(model gallery): add thedrummer_valkyrie-49b-v1 (#5410)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-20 10:28:27 +02:00
Ettore Di Giacinto
f8fbfd4fa3 chore(model gallery): add a-m-team_am-thinking-v1 (#5395)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-19 17:31:38 +02:00
Ettore Di Giacinto
41e239c67e chore(model gallery): add soob3123_grayline-qwen3-8b (#5394)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-19 17:02:43 +02:00
Ettore Di Giacinto
587827e779 chore(model gallery): add soob3123_grayline-qwen3-14b (#5393)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-19 15:59:07 +02:00
LocalAI [bot]
456b4982ef chore: ⬆️ Update ggml-org/llama.cpp to 6a2bc8bfb7cd502e5ebc72e36c97a6f848c21c2c (#5390)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-19 01:25:22 +00:00
Ettore Di Giacinto
159388cce8 chore: memoize detected GPUs (#5385)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-18 08:55:44 +02:00
LocalAI [bot]
cfc73c7773 chore: ⬆️ Update ggml-org/llama.cpp to e3a7cf6c5bf6a0a24217f88607b06e4405a2b5d9 (#5384)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-18 01:21:13 +00:00
Ettore Di Giacinto
6d5bde860b feat(llama.cpp): upgrade and use libmtmd (#5379)
* WIP

* wip

* wip

* Make it compile

* Update json.hpp

* this shouldn't be private for now

* Add logs

* Reset auto detected template

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Re-enable grammars

* This seems to be broken - 360a9c98e1 (diff-a18a8e64e12a01167d8e98fc)[…]cccf0d4eed09d76d879L2998-L3207

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Placeholder

* Simplify image loading

* use completion type

* disable streaming

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* correctly return timings

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Remove some debug logging

* Adapt tests

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Keep header

* embedding: do not use oai type

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Sync from server.cpp

* Use utils and json directly from llama.cpp

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Sync with upstream

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* fix: copy json.hpp from the correct location

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* fix: add httplib

* sync llama.cpp

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Embeddiongs: set OAICOMPAT_TYPE_EMBEDDING

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* feat: sync with server.cpp by including it

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* make it darwin-compatible

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-17 16:02:53 +02:00
LocalAI [bot]
6ef383033b chore: ⬆️ Update ggml-org/whisper.cpp to d1f114da61b1ae1e70b03104fad42c9dd666feeb (#5381)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-17 00:35:17 +00:00
Richard Palethorpe
cd494089d9 fix(flux): Set CFG=1 so that prompts are followed (#5378)
The recommendation with Flux is to set CFG to 1 as shown in the
stablediffusion-cpp README.

Signed-off-by: Richard Palethorpe <io@richiejp.com>
2025-05-16 17:53:54 +02:00
LocalAI [bot]
3033845f94 chore: ⬆️ Update ggml-org/whisper.cpp to 20a20decd94badfd519a07ea91f0bba8b8fc4dea (#5374)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-16 12:46:16 +02:00
omahs
0f365ac204 fix: typos (#5376)
Signed-off-by: omahs <73983677+omahs@users.noreply.github.com>
2025-05-16 12:45:48 +02:00
Ettore Di Giacinto
525cf198be chore(model gallery): add primeintellect_intellect-2 (#5373)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-15 10:53:52 +02:00
Ettore Di Giacinto
658c2a4f55 chore(model gallery): add thedrummer_rivermind-lux-12b-v1 (#5372)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-15 10:51:55 +02:00
Ettore Di Giacinto
c987de090d chore(model gallery): add thedrummer_snowpiercer-15b-v1 (#5371)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-15 10:04:44 +02:00
Ettore Di Giacinto
04365843e6 chore(model gallery): add skywork_skywork-or1-7b (#5370)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-15 10:02:07 +02:00
Ettore Di Giacinto
1dc5781679 chore(model gallery): add skywork_skywork-or1-32b (#5369)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-15 09:58:51 +02:00
LocalAI [bot]
30704292de chore: ⬆️ Update ggml-org/whisper.cpp to f389d7e3e56bbbfec49fd333551927a0fcbb7213 (#5367)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-15 00:34:16 +00:00
Ettore Di Giacinto
e52c66c76e chore(docs/install.sh): image changes (#5354)
chore(docs): image changes

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-14 19:28:30 +02:00
LocalAI [bot]
cb28aef93b chore: ⬆️ Update ggml-org/whisper.cpp to f89056057511a1657af90bb28ef3f21e5b1f33cd (#5364)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-14 09:24:16 +02:00
LocalAI [bot]
029f97c2a2 docs: ⬆️ update docs version mudler/LocalAI (#5363)
⬆️ Update docs version mudler/LocalAI

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-14 01:54:34 +00:00
Ettore Di Giacinto
3be71be696 fix(ci): tag latest against cpu-only image (#5362)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-13 22:00:41 +02:00
LocalAI [bot]
6adb019f8f chore: ⬆️ Update ggml-org/llama.cpp to de4c07f93783a1a96456a44dc16b9db538ee1618 (#5358)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-13 22:00:19 +02:00
LocalAI [bot]
fcaa0a2f01 chore: ⬆️ Update ggml-org/whisper.cpp to e41bc5c61ae66af6be2bd7011769bb821a83e8ae (#5357)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-13 21:59:50 +02:00
dependabot[bot]
fd17a3312c chore(deps): bump securego/gosec from 2.22.3 to 2.22.4 (#5356)
Bumps [securego/gosec](https://github.com/securego/gosec) from 2.22.3 to 2.22.4.
- [Release notes](https://github.com/securego/gosec/releases)
- [Changelog](https://github.com/securego/gosec/blob/master/.goreleaser.yml)
- [Commits](https://github.com/securego/gosec/compare/v2.22.3...v2.22.4)

---
updated-dependencies:
- dependency-name: securego/gosec
  dependency-version: 2.22.4
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-05-12 22:01:43 +02:00
dependabot[bot]
12d0fe610b chore(deps): bump dependabot/fetch-metadata from 2.3.0 to 2.4.0 (#5355)
Bumps [dependabot/fetch-metadata](https://github.com/dependabot/fetch-metadata) from 2.3.0 to 2.4.0.
- [Release notes](https://github.com/dependabot/fetch-metadata/releases)
- [Commits](https://github.com/dependabot/fetch-metadata/compare/v2.3.0...v2.4.0)

---
updated-dependencies:
- dependency-name: dependabot/fetch-metadata
  dependency-version: 2.4.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-05-12 22:01:19 +02:00
Ettore Di Giacinto
11c67d16b8 chore(ci): strip 'core' in the image suffix, identify python-based images with 'extras' (#5353)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-12 09:36:59 +02:00
LocalAI [bot]
63f7c86c4d chore: ⬆️ Update ggml-org/llama.cpp to 9a390c4829cd3058d26a2e2c09d16e3fd12bf1b1 (#5351)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-12 09:24:54 +02:00
LocalAI [bot]
ac89bf77bf chore: ⬆️ Update ggml-org/whisper.cpp to 2e310b841e0b4e7cf00890b53411dd9f8578f243 (#4785)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-12 01:30:35 +00:00
Ettore Di Giacinto
0395cc02fb chore(model gallery): add qwen_qwen2.5-vl-72b-instruct (#5349)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-11 09:46:32 +02:00
Ettore Di Giacinto
616972fca0 chore(model gallery): add qwen_qwen2.5-vl-7b-instruct (#5348)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-11 09:44:58 +02:00
Ettore Di Giacinto
942fbff62d chore(model gallery): add gryphe_pantheon-proto-rp-1.8-30b-a3b (#5347)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-11 09:39:28 +02:00
LocalAI [bot]
2612a0c910 chore: ⬆️ Update ggml-org/llama.cpp to 15e6125a397f6086c1dfdf7584acdb7c730313dc (#5345)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-11 09:21:46 +02:00
LocalAI [bot]
2dcb6d7247 chore(model-gallery): ⬆️ update checksum (#5346)
⬆️ Checksum updates in gallery/index.yaml

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-10 22:24:04 +02:00
Ettore Di Giacinto
6978eec69f feat(whisper.cpp): gpu support (#5344)
* fix(whisper.cpp): gpu support

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Try to fix apple tests

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-10 22:02:40 +02:00
LocalAI [bot]
2fcfe54466 chore: ⬆️ Update ggml-org/llama.cpp to 33eff4024084d1f0c8441b79f7208a52fad79858 (#5343)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-10 10:07:39 +02:00
Ettore Di Giacinto
4e7506a3be fix(whisper): add vulkan flag
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-05-10 08:46:21 +02:00
Ettore Di Giacinto
2a46217f90 Update Makefile
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-05-09 23:17:18 +02:00
Ettore Di Giacinto
31ff9dbd52 chore(Makefile): small cleanups, disable openmp on whisper
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-09 22:37:18 +02:00
Ettore Di Giacinto
9483abef03 fix(whisper/sycl): disable
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-09 22:36:09 +02:00
Ettore Di Giacinto
ce3e8b3e31 fix(whisper/sycl): use icx when running go build
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-09 21:48:09 +02:00
Ettore Di Giacinto
f3bb84c9a7 feat(whisper): link vulkan, hipblas and sycl
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-09 19:25:26 +02:00
Ettore Di Giacinto
ecb1297582 fix: specify icx and icpx only on whisper.cpp
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-09 10:58:30 +02:00
Ettore Di Giacinto
73fc702b3c fix: this is not needed
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-09 10:28:53 +02:00
Ettore Di Giacinto
e3af62ae1a feat: Add sycl support for whisper.cpp (#5341) 2025-05-09 09:31:02 +02:00
Ettore Di Giacinto
dc21604741 chore(deps): bump whisper.cpp (#5338)
* chore(deps): bump whisper.cpp

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* add libggml-metal

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Fixups macOS arm64

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* adjust cublas for whisper.cpp

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-09 08:17:45 +02:00
LocalAI [bot]
5433f1a70e chore: ⬆️ Update ggml-org/llama.cpp to f05a6d71a0f3dbf0730b56a1abbad41c0f42e63d (#5340)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-08 23:13:28 +00:00
Ettore Di Giacinto
d5e032bdcd chore(model gallery): add gemma-3-12b-fornaxv.2-qat-cot (#5337)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-08 12:07:25 +02:00
Ettore Di Giacinto
de786f6586 chore(model gallery): add symiotic-14b-i1 (#5336)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-08 12:03:35 +02:00
Ettore Di Giacinto
8b9bc4aa6e chore(model gallery): add qwen3-14b-uncensored (#5335)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-08 11:59:26 +02:00
Ettore Di Giacinto
e6cea7d28e chore(model gallery): add cognition-ai_kevin-32b (#5334)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-08 11:57:12 +02:00
Ettore Di Giacinto
7d7d56f2ce chore(model gallery): add servicenow-ai_apriel-nemotron-15b-thinker (#5333)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-08 11:55:35 +02:00
Ettore Di Giacinto
1caae91ab6 chore(model gallery): add qwen3-4b-esper3-i1 (#5332)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-08 11:52:02 +02:00
LocalAI [bot]
e90f2cb0ca chore: ⬆️ Update ggml-org/llama.cpp to 814f795e063c257f33b921eab4073484238a151a (#5331)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-08 09:25:13 +02:00
Ettore Di Giacinto
5a4291fadd docs: update README badges
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-05-07 22:20:06 +02:00
Ettore Di Giacinto
91ef58ee5a chore(model gallery): add qwen3-14b-griffon-i1 (#5330)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-07 11:07:38 +02:00
LocalAI [bot]
a86e8c78f1 chore: ⬆️ Update ggml-org/llama.cpp to 91a86a6f354aa73a7aab7bc3d283be410fdc93a5 (#5329)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-06 23:39:10 +00:00
Ettore Di Giacinto
adb24214c6 chore(deps): bump llama.cpp to b34c859146630dff136943abc9852ca173a7c9d6 (#5323)
chore(deps): bump llama.cpp to 'b34c859146630dff136943abc9852ca173a7c9d6'

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-06 11:21:25 +02:00
Ettore Di Giacinto
f03a0430aa chore(model gallery): add claria-14b (#5326)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-06 10:48:03 +02:00
Ettore Di Giacinto
73bc12abc0 chore(model gallery): add goekdeniz-guelmez_josiefied-qwen3-8b-abliterated-v1 (#5325)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-06 10:38:20 +02:00
Ettore Di Giacinto
7fa437bbcc chore(model gallery): add huihui-ai_qwen3-14b-abliterated (#5324)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-06 10:35:55 +02:00
LocalAI [bot]
4a27c99928 chore(model-gallery): ⬆️ update checksum (#5321)
⬆️ Checksum updates in gallery/index.yaml

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-06 10:01:28 +02:00
Ettore Di Giacinto
6ce94834b6 fix(hipblas): do not build all cpu-specific flags (#5322)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-06 10:00:50 +02:00
dependabot[bot]
84a26458dc chore(deps): bump mxschmitt/action-tmate from 3.21 to 3.22 (#5319)
Bumps [mxschmitt/action-tmate](https://github.com/mxschmitt/action-tmate) from 3.21 to 3.22.
- [Release notes](https://github.com/mxschmitt/action-tmate/releases)
- [Changelog](https://github.com/mxschmitt/action-tmate/blob/master/RELEASE.md)
- [Commits](https://github.com/mxschmitt/action-tmate/compare/v3.21...v3.22)

---
updated-dependencies:
- dependency-name: mxschmitt/action-tmate
  dependency-version: '3.22'
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-05-05 22:17:59 +00:00
Ettore Di Giacinto
7aa377b6a9 fix(arm64): do not build instructions which are not available (#5318)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-05 17:30:00 +02:00
Ettore Di Giacinto
64e66dda4a chore(model gallery): add allura-org_remnant-qwen3-8b (#5317)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-05 11:09:07 +02:00
LocalAI [bot]
a085f61fdc chore: ⬆️ Update ggml-org/llama.cpp to 9fdfcdaeddd1ef57c6d041b89cd8fb7048a0f028 (#5316)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-04 23:00:25 +00:00
Ettore Di Giacinto
21bdfe5fa4 fix: use rice when embedding large binaries (#5309)
* fix(embed): use go-rice for large backend assets

Golang embed FS has a hard limit that we might exceed when providing
many binary alternatives.

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* simplify golang deps

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* chore(tests): switch to testcontainers and print logs

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* fix(tests): do not build a test binary

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* small fixup

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-04 16:42:42 +02:00
Ettore Di Giacinto
7ebd7b2454 chore(model gallery): add rei-v3-kto-12b (#5313)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-04 09:41:35 +02:00
Ettore Di Giacinto
6984749ea1 chore(model gallery): add kalomaze_qwen3-16b-a3b (#5312)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-04 09:39:38 +02:00
Ettore Di Giacinto
c0a206bc7a chore(model gallery): add qwen3-30b-a1.5b-high-speed (#5311)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-04 09:38:01 +02:00
LocalAI [bot]
01bbb31fb3 chore: ⬆️ Update ggml-org/llama.cpp to 36667c8edcded08063ed51c7d57e9e086bbfc903 (#5300)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-04 09:23:01 +02:00
Ettore Di Giacinto
72111c597d fix(gpu): do not assume gpu being returned has node and mem (#5310)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-03 19:00:24 +02:00
Ettore Di Giacinto
b2f9fc870b chore(defaults): enlarge defaults, drop gpu layers which is infered (#5308)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-03 18:44:51 +02:00
Ettore Di Giacinto
1fc6d469ac chore(deps): bump llama.cpp to '1d36b3670b285e69e58b9d687c770a2a0a192194 (#5307)
chore(deps): bump llama.cpp to '1d36b3670b285e69e58b9d687c770a2a0a192194'

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-03 18:44:40 +02:00
Ettore Di Giacinto
05848b2027 chore(model gallery): add smoothie-qwen3-8b (#5306)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-03 10:35:20 +02:00
Ettore Di Giacinto
1da0644aa3 chore(model gallery): add qwen-3-32b-medical-reasoning-i1 (#5305)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-03 10:24:07 +02:00
Ettore Di Giacinto
c087cd1377 chore(model gallery): add amoral-qwen3-14b (#5304)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-03 10:21:48 +02:00
Ettore Di Giacinto
c621412f6a chore(model gallery): add comet_12b_v.5-i1 (#5303)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-03 10:20:03 +02:00
Ettore Di Giacinto
5a8b1892cd chore(model gallery): add genericrpv3-4b (#5302)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-03 10:18:31 +02:00
Ettore Di Giacinto
5b20426863 chore(model gallery): add planetoid_27b_v.2 (#5301)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-03 10:14:33 +02:00
Ettore Di Giacinto
5c6cd50ed6 feat(llama.cpp): estimate vram usage (#5299)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-02 17:40:26 +02:00
Ettore Di Giacinto
bace6516f1 chore(model gallery): add webthinker-qwq-32b-i1 (#5298)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-02 09:57:49 +02:00
Ettore Di Giacinto
3baadf6f27 chore(model gallery): add shuttleai_shuttle-3.5 (#5297)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-02 09:48:11 +02:00
Ettore Di Giacinto
8804c701b8 chore(model gallery): add microsoft_phi-4-reasoning (#5296)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-02 09:46:20 +02:00
Ettore Di Giacinto
7b3ceb19bb chore(model gallery): add microsoft_phi-4-reasoning-plus (#5295)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-02 09:43:38 +02:00
Ettore Di Giacinto
e7f3effea1 chore(model gallery): add furina-8b (#5294)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-02 09:39:22 +02:00
Ettore Di Giacinto
61694a2ffb chore(model gallery): add josiefied-qwen3-8b-abliterated-v1 (#5293)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-02 09:36:35 +02:00
LocalAI [bot]
573a3f104c chore: ⬆️ Update ggml-org/llama.cpp to d7a14c42a1883a34a6553cbfe30da1e1b84dfd6a (#5292)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-02 09:21:38 +02:00
Ettore Di Giacinto
0e8af53a5b chore: update quickstart
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-01 22:36:33 +02:00
Ettore Di Giacinto
960ffa808c chore(model gallery): add microsoft_phi-4-mini-reasoning (#5288)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-01 10:17:58 +02:00
Ettore Di Giacinto
92719568e5 chore(model gallery): add fast-math-qwen3-14b (#5287)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-01 10:14:51 +02:00
Ettore Di Giacinto
163939af71 chore(model gallery): add qwen3-8b-jailbroken (#5286)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-01 10:13:01 +02:00
Ettore Di Giacinto
399f1241dc chore(model gallery): add qwen3-30b-a3b-abliterated (#5285)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-05-01 10:07:42 +02:00
LocalAI [bot]
58c9ade2e8 chore: ⬆️ Update ggml-org/llama.cpp to 3e168bede4d27b35656ab8026015b87659ecbec2 (#5284)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-05-01 10:01:39 +02:00
Ettore Di Giacinto
6e1c93d84f fix(ci): comment out vllm tests
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-05-01 10:01:22 +02:00
Wyatt Neal
4076ea0494 fix: vllm missing logprobs (#5279)
* working to address missing items

referencing #3436, #2930 - if i could test it, this might show that the
output from the vllm backend is processed and returned to the user

Signed-off-by: Wyatt Neal <wyatt.neal+git@gmail.com>

* adding in vllm tests to test-extras

Signed-off-by: Wyatt Neal <wyatt.neal+git@gmail.com>

* adding in tests to pipeline for execution

Signed-off-by: Wyatt Neal <wyatt.neal+git@gmail.com>

* removing todo block, test via pipeline

Signed-off-by: Wyatt Neal <wyatt.neal+git@gmail.com>

---------

Signed-off-by: Wyatt Neal <wyatt.neal+git@gmail.com>
2025-04-30 12:55:07 +00:00
Ettore Di Giacinto
26cbf77c0d chore(model gallery): add mlabonne_qwen3-4b-abliterated (#5283)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-30 11:09:58 +02:00
Ettore Di Giacinto
640790d628 chore(model gallery): add mlabonne_qwen3-8b-abliterated (#5282)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-30 11:08:26 +02:00
Ettore Di Giacinto
4132adea2f chore(model gallery): add mlabonne_qwen3-14b-abliterated (#5281)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-30 11:04:49 +02:00
LocalAI [bot]
2b2d907a3a chore: ⬆️ Update ggml-org/llama.cpp to e2e1ddb93a01ce282e304431b37e60b3cddb6114 (#5278)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-04-29 21:46:08 +00:00
Ettore Di Giacinto
6e8f4f584b fix(diffusers): consider options only in form of key/value (#5277)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-29 17:08:55 +02:00
Richard Palethorpe
662cfc2b48 fix(aio): Fix copypasta in download files for gpt-4 model (#5276)
Signed-off-by: Richard Palethorpe <io@richiejp.com>
2025-04-29 17:08:16 +02:00
Ettore Di Giacinto
a25d355d66 chore(model gallery): add qwen3-0.6b (#5275)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-29 10:10:16 +02:00
Ettore Di Giacinto
6d1cfdbefc chore(model gallery): add qwen3-1.7b (#5274)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-29 10:06:03 +02:00
Ettore Di Giacinto
5ecc478968 chore(model gallery): add qwen3-4b (#5273)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-29 10:01:22 +02:00
Ettore Di Giacinto
aef5c4291b chore(model gallery): add qwen3-8b (#5272)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-29 09:59:17 +02:00
Ettore Di Giacinto
c059f912b9 chore(model gallery): add qwen3-14b (#5271)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-29 09:56:50 +02:00
LocalAI [bot]
bc1e059259 chore: ⬆️ Update ggml-org/llama.cpp to 5f5e39e1ba5dbea814e41f2a15e035d749a520bc (#5267)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-04-29 09:49:42 +02:00
LocalAI [bot]
38dc07793a chore(model-gallery): ⬆️ update checksum (#5268)
⬆️ Checksum updates in gallery/index.yaml

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-04-29 09:49:23 +02:00
Ettore Di Giacinto
da6ef0967d chore(model gallery): add qwen3-32b (#5270)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-29 09:48:28 +02:00
Ettore Di Giacinto
7a011e60bd chore(model gallery): add qwen3-30b-a3b (#5269)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-29 09:44:44 +02:00
dependabot[bot]
e13dd5b09f chore(deps): bump appleboy/scp-action from 0.1.7 to 1.0.0 (#5265)
Bumps [appleboy/scp-action](https://github.com/appleboy/scp-action) from 0.1.7 to 1.0.0.
- [Release notes](https://github.com/appleboy/scp-action/releases)
- [Changelog](https://github.com/appleboy/scp-action/blob/master/.goreleaser.yaml)
- [Commits](https://github.com/appleboy/scp-action/compare/v0.1.7...v1.0.0)

---
updated-dependencies:
- dependency-name: appleboy/scp-action
  dependency-version: 1.0.0
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-04-28 22:36:30 +00:00
Ettore Di Giacinto
86ee303bd6 chore(model gallery): add nvidia_openmath-nemotron-14b-kaggle (#5264)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-28 19:52:36 +02:00
Ettore Di Giacinto
978ee96fd3 chore(model gallery): add nvidia_openmath-nemotron-14b (#5263)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-28 19:43:49 +02:00
Ettore Di Giacinto
3ad5691db6 chore(model gallery): add nvidia_openmath-nemotron-7b (#5262)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-28 19:41:59 +02:00
Ettore Di Giacinto
0027681090 chore(model gallery): add nvidia_openmath-nemotron-1.5b (#5261)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-28 19:40:09 +02:00
Ettore Di Giacinto
8cba990edc chore(model gallery): add nvidia_openmath-nemotron-32b (#5260)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-28 19:36:57 +02:00
Simon Redman
88857696d4 fix(CUDA): Add note for how to run CUDA with SELinux (#5259)
* Add note to help run nvidia containers with SELinux

* Use correct CUDA container references as noted in the dockerhub overview

* Clean trailing whitespaces
2025-04-28 09:00:52 +02:00
LocalAI [bot]
23f347e687 chore: ⬆️ Update ggml-org/llama.cpp to ced44be34290fab450f8344efa047d8a08e723b4 (#5258)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-04-27 21:59:35 +00:00
Mohit Gaur
b6e3dc5f02 docs: update docs for DisableWebUI flag (#5256)
Signed-off-by: Mohit Gaur <56885276+Mohit-Gaur@users.noreply.github.com>
2025-04-27 16:02:02 +02:00
Alessandro Pirastru
69667521e2 fix(install/gpu):Fix docker not being able to leverage the GPU on systems that have SELinux Enforced (#5252)
* Update installation script for improved compatibility and clarity

- Renamed VERSION to LOCALAI_VERSION to avoid conflicts with system variables.
- Enhanced NVIDIA and CUDA repository installation for DNF5 compatibility.
- Adjusted default Fedora version handling for CUDA installation.
- Updated Docker image tag handling to use LOCALAI_VERSION consistently.
- Improved logging messages for repository and LocalAI binary downloads.
- Added a temporary bypass for nvidia-smi installation on Fedora Cloud Edition.

* feat: Add SELinux configuration for NVIDIA GPU support in containers

- Introduced `enable_selinux_container_booleans` function to handle SELinux configuration changes for GPU access.
- Included user confirmation prompt to enable SELinux `container_use_devices` boolean due to security implications.
- Added NVIDIA Container Runtime to Docker runtimes and restarted Docker to ensure proper GPU support.
- Applied SELinux adjustments conditionally for Fedora, RHEL, CentOS, Rocky, and openSUSE distributions.

Signed-off-by: Alessandro Pirastru <alessandro.pirastru.94@gmail.com>

* fix: Correct SELinux boolean parsing and add loop break

- Fixed incorrect parsing of `container_use_devices` boolean by changing the awk field from `$2` to `$3` to retrieve the correct value.
- Added a `break` statement after enabling the SELinux boolean to prevent unnecessary loop iterations after user prompt.

Signed-off-by: Alessandro Pirastru <alessandro.pirastru.94@gmail.com>

* fix: typo in install.sh

Signed-off-by: Alessandro Pirastru <57262788+Bloodis94@users.noreply.github.com>

---------

Signed-off-by: Alessandro Pirastru <alessandro.pirastru.94@gmail.com>
Signed-off-by: Alessandro Pirastru <57262788+Bloodis94@users.noreply.github.com>
2025-04-27 16:01:29 +02:00
LocalAI [bot]
2a92effc5d chore: ⬆️ Update ggml-org/llama.cpp to 77d5e9a76a7b4a8a7c5bf9cf6ebef91860123cba (#5254)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-04-27 09:21:02 +02:00
Simon Redman
a65e012aa2 docs(Vulkan): Add GPU docker documentation for Vulkan (#5255)
Add GPU docker documentation for Vulkan
2025-04-27 09:20:26 +02:00
Ettore Di Giacinto
8e9b41d05f chore(ci): build only images with ffmpeg included, simplify tags (#5251)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-27 08:23:25 +02:00
LocalAI [bot]
078da5c2f0 feat(swagger): update swagger (#5253)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-04-26 22:40:35 +00:00
Ettore Di Giacinto
c5af5d139c Update index.yaml
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-04-26 18:42:22 +02:00
Ettore Di Giacinto
2c9279a542 feat(video-gen): add endpoint for video generation (#5247)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-26 18:05:01 +02:00
Ettore Di Giacinto
a67d22f5f2 chore(model gallery): add mmproj to gemma3 models (now working)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-26 17:31:40 +02:00
Ettore Di Giacinto
dc7c51dcc7 chore(model gallery): fix correct filename for gemma-3-27b-it-qat
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-26 17:27:50 +02:00
Ettore Di Giacinto
98df65c7aa chore(model gallery): add l3.3-genetic-lemonade-sunset-70b (#5250)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-26 17:19:20 +02:00
Ettore Di Giacinto
1559b6b522 chore(model gallery): add l3.3-geneticlemonade-unleashed-v2-70b (#5249)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-26 17:17:18 +02:00
Alessandro Pirastru
a0244e3fb4 feat(install): added complete process for installing nvidia drivers on fedora without pulling X11 (#5246)
* Update installation script for improved compatibility and clarity

- Renamed VERSION to LOCALAI_VERSION to avoid conflicts with system variables.
- Enhanced NVIDIA and CUDA repository installation for DNF5 compatibility.
- Adjusted default Fedora version handling for CUDA installation.
- Updated Docker image tag handling to use LOCALAI_VERSION consistently.
- Improved logging messages for repository and LocalAI binary downloads.
- Added a temporary bypass for nvidia-smi installation on Fedora Cloud Edition.

* Enhance log functions with ANSI color formatting

- Added ANSI escape codes for improved log styling: light blue for info, orange for warnings, and red for errors.
- Updated all log functions (`info`, `warn`, `fatal`) to include bold and colored output.

Signed-off-by: Alessandro Pirastru <alessandro.pirastru.94@gmail.com>

* feat: Enhance log functions with ANSI color formatting

- Added ANSI escape codes for improved log styling: light blue for info, orange for warnings, and red for errors.
- Updated all log functions (`info`, `warn`, `fatal`) to include bold and colored output.

Signed-off-by: Alessandro Pirastru <alessandro.pirastru.94@gmail.com>

* chore: ⬆️ Update ggml-org/llama.cpp to `ecda2ec4b347031a9b8a89ee2efc664ce63f599c` (#5238)

⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>

* fix(stablediffusion-ggml): Build with DSD CUDA, HIP and Metal flags (#5236)

Signed-off-by: Richard Palethorpe <io@richiejp.com>

* feat(install): enhance script with choice functions and logs

- Added custom `choice_info`, `choice_warn`, and `choice_fatal` functions for interactive input logging.
- Adjusted Docker volume creation message for better clarity.
- Included NVIDIA driver check log for improved feedback to users.
- Added consistent logging before starting LocalAI Docker containers across configurations.

Signed-off-by: Alessandro Pirastru <alessandro.pirastru.94@gmail.com>

* feat(install): add Fedora NVIDIA driver installation option

- Introduced a new function to install NVIDIA kernel drivers on Fedora using akmod packages.
- Added user prompt to choose between installing drivers automatically or exiting for manual setup.
- Integrated the new function into the existing Fedora-specific CUDA toolkit installation workflow.

Signed-off-by: Alessandro Pirastru <alessandro.pirastru.94@gmail.com>

* fix(install): correct repository ID for DNF5 configuration

- Update repository ID from 'nome-repo' to 'nvidia-cuda' for DNF5.
- Ensures the correct repository name matches expected configuration.
- Fix prevents potential misconfiguration during installation process.

Signed-off-by: Alessandro Pirastru <alessandro.pirastru.94@gmail.com>

* feat(install): enhance NVIDIA driver handling on Fedora

- fixed `install_cuda_driver_yum` function call in `install_fedora_nvidia_kernel_drivers`
- Added `cuda-toolkit` for Fedora installations, as recommended by RPM Fusion.
- Adjusted driver repository commands for compatibility with DNF5.
- Improved URL and version handling for package manager installations.

Signed-off-by: Alessandro Pirastru <alessandro.pirastru.94@gmail.com>

* Refactor NVIDIA driver installation process in install.sh

- Removed redundant empty lines for cleaner formatting.
- Standardized URL formatting by removing unnecessary quotes around URLs.
- Reverted logic by removing Fedora-specific exclusions for cuda-toolkit and using `cuda-drivers` universally.
- Refined repository addition for `dnf` by explicitly setting `id` and `name` parameters for clarity and accuracy.
- Fixed minor formatting inconsistencies in parameter passing.

Signed-off-by: Alessandro Pirastru <alessandro.pirastru.94@gmail.com>

* feat: Update NVIDIA module installation warning in install script

- Clarified that Akmod installation may inhibit the reboot command.
- Added a cautionary note to the warning to inform users of potential risks.

Signed-off-by: Alessandro Pirastru <alessandro.pirastru.94@gmail.com>

* Update NVIDIA driver installation warning message

- Clarify prerequisites by noting the need for rpmfusion free/nonfree repos.
- Improve formatting of the warning box for better readability.
- Inform users that the script will install missing repos if necessary.

Signed-off-by: Alessandro Pirastru <alessandro.pirastru.94@gmail.com>

---------

Signed-off-by: Alessandro Pirastru <alessandro.pirastru.94@gmail.com>
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Signed-off-by: Richard Palethorpe <io@richiejp.com>
Co-authored-by: LocalAI [bot] <139863280+localai-bot@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
Co-authored-by: Richard Palethorpe <io@richiejp.com>
2025-04-26 09:44:40 +02:00
LocalAI [bot]
d66396201a chore: ⬆️ Update ggml-org/llama.cpp to 295354ea6848a77bdee204ee1c971d9b92ffcca9 (#5245)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-04-26 00:05:16 +02:00
Ettore Di Giacinto
9628860c0e feat(llama.cpp/clip): inject gpu options if we detect GPUs (#5243)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-26 00:04:47 +02:00
Ettore Di Giacinto
cae9bf1308 chore(deps): bump grpcio to 1.72.0 (#5244)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-25 21:32:37 +02:00
Ettore Di Giacinto
5bb5da0760 fix(ci): add clang (#5242)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-25 16:20:05 +02:00
Ettore Di Giacinto
867973a850 chore(model gallery): add soob3123_veritas-12b (#5241)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-25 09:20:01 +02:00
LocalAI [bot]
701cd6b6d5 chore: ⬆️ Update ggml-org/llama.cpp to 226251ed56b85190e18a1cca963c45b888f4953c (#5240)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-04-25 08:42:22 +02:00
Richard Palethorpe
7f61d397d5 fix(stablediffusion-ggml): Build with DSD CUDA, HIP and Metal flags (#5236)
Signed-off-by: Richard Palethorpe <io@richiejp.com>
2025-04-24 10:27:17 +02:00
Alessandro Pirastru
1ae0b896fa fix: installation script compatibility with fedora 41 and later, fedora headless unclear errors (#5239)
Update installation script for improved compatibility and clarity

- Renamed VERSION to LOCALAI_VERSION to avoid conflicts with system variables.
- Enhanced NVIDIA and CUDA repository installation for DNF5 compatibility.
- Adjusted default Fedora version handling for CUDA installation.
- Updated Docker image tag handling to use LOCALAI_VERSION consistently.
- Improved logging messages for repository and LocalAI binary downloads.
- Added a temporary bypass for nvidia-smi installation on Fedora Cloud Edition.
2025-04-24 09:34:25 +02:00
LocalAI [bot]
3937407cb3 chore: ⬆️ Update ggml-org/llama.cpp to ecda2ec4b347031a9b8a89ee2efc664ce63f599c (#5238)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-04-24 09:32:08 +02:00
LocalAI [bot]
0e34ae4f3f chore: ⬆️ Update ggml-org/llama.cpp to 658987cfc9d752dca7758987390d5fb1a7a0a54a (#5234)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-04-23 09:13:49 +02:00
dependabot[bot]
a38b99ecb6 chore(deps): bump mxschmitt/action-tmate from 3.19 to 3.21 (#5231)
Bumps [mxschmitt/action-tmate](https://github.com/mxschmitt/action-tmate) from 3.19 to 3.21.
- [Release notes](https://github.com/mxschmitt/action-tmate/releases)
- [Changelog](https://github.com/mxschmitt/action-tmate/blob/master/RELEASE.md)
- [Commits](https://github.com/mxschmitt/action-tmate/compare/v3.19...v3.21)

---
updated-dependencies:
- dependency-name: mxschmitt/action-tmate
  dependency-version: '3.21'
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-04-22 10:27:10 +02:00
LocalAI [bot]
a4a4358182 chore: ⬆️ Update ggml-org/llama.cpp to 1d735c0b4fa0551c51c2f4ac888dd9a01f447985 (#5233)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-04-22 10:25:54 +02:00
Ettore Di Giacinto
4bc39c2db3 fix: typo on README link
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-04-21 22:13:14 +02:00
Ettore Di Giacinto
cc3df759f8 chore(docs): improve installer.sh docs (#5232)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-21 22:11:43 +02:00
LocalAI [bot]
378161060c chore: ⬆️ Update ggml-org/llama.cpp to 6602304814e679cc8c162bb760a034aceb4f8965 (#5228)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-04-20 21:44:33 +00:00
Ettore Di Giacinto
f2f788fe60 chore(model gallery): add starrysky-12b-i1 (#5224)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-20 10:26:30 +02:00
Ettore Di Giacinto
9fa8ed6b1e chore(model gallery) add amoral-gemma3-1b-v2 (#5223)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-20 10:23:24 +02:00
Ettore Di Giacinto
7fc37c5e29 chore(model gallery) add llama_3.3_70b_darkhorse-i1 (#5222)
chore(model gallery): add llama_3.3_70b_darkhorse-i1

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-20 10:20:58 +02:00
Ettore Di Giacinto
4bc4b1e8bc chore(model gallery) update gemma3 qat models
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-04-20 10:11:12 +02:00
LocalAI [bot]
e495b89f18 chore: ⬆️ Update ggml-org/llama.cpp to 00137157fca3d17b90380762b4d7cc158d385bd3 (#5218)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-04-19 23:50:35 +00:00
LocalAI [bot]
ba09eaea1b feat(swagger): update swagger (#5217)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-04-19 22:06:30 +02:00
548 changed files with 37111 additions and 38986 deletions

View File

@@ -2,9 +2,6 @@
cd /workspace
# Grab the pre-stashed backend assets to avoid build issues
cp -r /build/backend-assets /workspace/backend-assets
# Ensures generated source files are present upon load
make prepare

View File

@@ -4,10 +4,6 @@ services:
context: ..
dockerfile: Dockerfile
target: devcontainer
args:
- FFMPEG=true
- IMAGE_TYPE=extras
- GO_TAGS=p2p tts
env_file:
- ../.env
ports:

View File

@@ -3,7 +3,13 @@
.vscode
.devcontainer
models
backends
examples/chatbot-ui/models
backend/go/image/stablediffusion-ggml/build/
backend/go/*/build
backend/go/*/.cache
backend/go/*/sources
backend/go/*/package
examples/rwkv/models
examples/**/models
Dockerfile*
@@ -14,4 +20,4 @@ __pycache__
# backend virtual environments
**/venv
backend/python/**/source
backend/python/**/source

9
.env
View File

@@ -41,13 +41,6 @@
## Uncomment and set to true to enable rebuilding from source
# REBUILD=true
## Enable go tags, available: p2p, tts
## p2p: enable distributed inferencing
## tts: enables text-to-speech with go-piper
## (requires REBUILD=true)
#
# GO_TAGS=p2p
## Path where to store generated images
# LOCALAI_IMAGE_PATH=/tmp/generated/images
@@ -76,7 +69,7 @@
### Define a list of GRPC Servers for llama-cpp workers to distribute the load
# https://github.com/ggerganov/llama.cpp/pull/6829
# https://github.com/ggerganov/llama.cpp/blob/master/examples/rpc/README.md
# https://github.com/ggerganov/llama.cpp/blob/master/tools/rpc/README.md
# LLAMACPP_GRPC_SERVERS=""
### Enable to run parallel requests

View File

@@ -3,15 +3,20 @@ set -xe
REPO=$1
BRANCH=$2
VAR=$3
FILE=$4
if [ -z "$FILE" ]; then
FILE="Makefile"
fi
LAST_COMMIT=$(curl -s -H "Accept: application/vnd.github.VERSION.sha" "https://api.github.com/repos/$REPO/commits/$BRANCH")
# Read $VAR from Makefile (only first match)
set +e
CURRENT_COMMIT="$(grep -m1 "^$VAR?=" Makefile | cut -d'=' -f2)"
CURRENT_COMMIT="$(grep -m1 "^$VAR?=" $FILE | cut -d'=' -f2)"
set -e
sed -i Makefile -e "s/$VAR?=.*/$VAR?=$LAST_COMMIT/"
sed -i $FILE -e "s/$VAR?=.*/$VAR?=$LAST_COMMIT/"
if [ -z "$CURRENT_COMMIT" ]; then
echo "Could not find $VAR in Makefile."

View File

@@ -61,10 +61,6 @@ updates:
directory: "/backend/python/openvoice"
schedule:
interval: "weekly"
- package-ecosystem: "pip"
directory: "/backend/python/parler-tts"
schedule:
interval: "weekly"
- package-ecosystem: "pip"
directory: "/backend/python/rerankers"
schedule:

1354
.github/workflows/backend.yml vendored Normal file
View File

File diff suppressed because it is too large Load Diff

243
.github/workflows/backend_build.yml vendored Normal file
View File

@@ -0,0 +1,243 @@
---
name: 'build python backend container images (reusable)'
on:
workflow_call:
inputs:
base-image:
description: 'Base image'
required: true
type: string
build-type:
description: 'Build type'
default: ''
type: string
cuda-major-version:
description: 'CUDA major version'
default: "12"
type: string
cuda-minor-version:
description: 'CUDA minor version'
default: "1"
type: string
platforms:
description: 'Platforms'
default: ''
type: string
tag-latest:
description: 'Tag latest'
default: ''
type: string
tag-suffix:
description: 'Tag suffix'
default: ''
type: string
runs-on:
description: 'Runs on'
required: true
default: ''
type: string
backend:
description: 'Backend to build'
required: true
type: string
context:
description: 'Build context'
required: true
type: string
dockerfile:
description: 'Build Dockerfile'
required: true
type: string
skip-drivers:
description: 'Skip drivers'
default: 'false'
type: string
secrets:
dockerUsername:
required: false
dockerPassword:
required: false
quayUsername:
required: true
quayPassword:
required: true
jobs:
backend-build:
runs-on: ${{ inputs.runs-on }}
env:
quay_username: ${{ secrets.quayUsername }}
steps:
- name: Free Disk Space (Ubuntu)
if: inputs.runs-on == 'ubuntu-latest'
uses: jlumbroso/free-disk-space@main
with:
# this might remove tools that are actually needed,
# if set to "true" but frees about 6 GB
tool-cache: true
# all of these default to true, but feel free to set to
# "false" if necessary for your workflow
android: true
dotnet: true
haskell: true
large-packages: true
docker-images: true
swap-storage: true
- name: Force Install GIT latest
run: |
sudo apt-get update \
&& sudo apt-get install -y software-properties-common \
&& sudo apt-get update \
&& sudo add-apt-repository -y ppa:git-core/ppa \
&& sudo apt-get update \
&& sudo apt-get install -y git
- name: Checkout
uses: actions/checkout@v5
- name: Release space from worker
if: inputs.runs-on == 'ubuntu-latest'
run: |
echo "Listing top largest packages"
pkgs=$(dpkg-query -Wf '${Installed-Size}\t${Package}\t${Status}\n' | awk '$NF == "installed"{print $1 "\t" $2}' | sort -nr)
head -n 30 <<< "${pkgs}"
echo
df -h
echo
sudo apt-get remove -y '^llvm-.*|^libllvm.*' || true
sudo apt-get remove --auto-remove android-sdk-platform-tools snapd || true
sudo apt-get purge --auto-remove android-sdk-platform-tools snapd || true
sudo rm -rf /usr/local/lib/android
sudo apt-get remove -y '^dotnet-.*|^aspnetcore-.*' || true
sudo rm -rf /usr/share/dotnet
sudo apt-get remove -y '^mono-.*' || true
sudo apt-get remove -y '^ghc-.*' || true
sudo apt-get remove -y '.*jdk.*|.*jre.*' || true
sudo apt-get remove -y 'php.*' || true
sudo apt-get remove -y hhvm powershell firefox monodoc-manual msbuild || true
sudo apt-get remove -y '^google-.*' || true
sudo apt-get remove -y azure-cli || true
sudo apt-get remove -y '^mongo.*-.*|^postgresql-.*|^mysql-.*|^mssql-.*' || true
sudo apt-get remove -y '^gfortran-.*' || true
sudo apt-get remove -y microsoft-edge-stable || true
sudo apt-get remove -y firefox || true
sudo apt-get remove -y powershell || true
sudo apt-get remove -y r-base-core || true
sudo apt-get autoremove -y
sudo apt-get clean
echo
echo "Listing top largest packages"
pkgs=$(dpkg-query -Wf '${Installed-Size}\t${Package}\t${Status}\n' | awk '$NF == "installed"{print $1 "\t" $2}' | sort -nr)
head -n 30 <<< "${pkgs}"
echo
sudo rm -rfv build || true
sudo rm -rf /usr/share/dotnet || true
sudo rm -rf /opt/ghc || true
sudo rm -rf "/usr/local/share/boost" || true
sudo rm -rf "$AGENT_TOOLSDIRECTORY" || true
df -h
- name: Docker meta
id: meta
if: github.event_name != 'pull_request'
uses: docker/metadata-action@v5
with:
images: |
quay.io/go-skynet/local-ai-backends
localai/localai-backends
tags: |
type=ref,event=branch
type=semver,pattern={{raw}}
type=sha
flavor: |
latest=${{ inputs.tag-latest }}
suffix=${{ inputs.tag-suffix }},onlatest=true
- name: Docker meta for PR
id: meta_pull_request
if: github.event_name == 'pull_request'
uses: docker/metadata-action@v5
with:
images: |
quay.io/go-skynet/ci-tests
tags: |
type=ref,event=branch,suffix=${{ github.event.number }}-${{ inputs.backend }}-${{ inputs.build-type }}-${{ inputs.cuda-major-version }}-${{ inputs.cuda-minor-version }}
type=semver,pattern={{raw}},suffix=${{ github.event.number }}-${{ inputs.backend }}-${{ inputs.build-type }}-${{ inputs.cuda-major-version }}-${{ inputs.cuda-minor-version }}
type=sha,suffix=${{ github.event.number }}-${{ inputs.backend }}-${{ inputs.build-type }}-${{ inputs.cuda-major-version }}-${{ inputs.cuda-minor-version }}
flavor: |
latest=${{ inputs.tag-latest }}
suffix=${{ inputs.tag-suffix }},onlatest=true
## End testing image
- name: Set up QEMU
uses: docker/setup-qemu-action@master
with:
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@master
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
with:
username: ${{ secrets.dockerUsername }}
password: ${{ secrets.dockerPassword }}
- name: Login to Quay.io
if: ${{ env.quay_username != '' }}
uses: docker/login-action@v3
with:
registry: quay.io
username: ${{ secrets.quayUsername }}
password: ${{ secrets.quayPassword }}
- name: Build and push
uses: docker/build-push-action@v6
if: github.event_name != 'pull_request'
with:
builder: ${{ steps.buildx.outputs.name }}
build-args: |
BUILD_TYPE=${{ inputs.build-type }}
SKIP_DRIVERS=${{ inputs.skip-drivers }}
CUDA_MAJOR_VERSION=${{ inputs.cuda-major-version }}
CUDA_MINOR_VERSION=${{ inputs.cuda-minor-version }}
BASE_IMAGE=${{ inputs.base-image }}
BACKEND=${{ inputs.backend }}
context: ${{ inputs.context }}
file: ${{ inputs.dockerfile }}
cache-from: type=gha
platforms: ${{ inputs.platforms }}
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
- name: Build and push (PR)
uses: docker/build-push-action@v6
if: github.event_name == 'pull_request'
with:
builder: ${{ steps.buildx.outputs.name }}
build-args: |
BUILD_TYPE=${{ inputs.build-type }}
SKIP_DRIVERS=${{ inputs.skip-drivers }}
CUDA_MAJOR_VERSION=${{ inputs.cuda-major-version }}
CUDA_MINOR_VERSION=${{ inputs.cuda-minor-version }}
BASE_IMAGE=${{ inputs.base-image }}
BACKEND=${{ inputs.backend }}
context: ${{ inputs.context }}
file: ${{ inputs.dockerfile }}
cache-from: type=gha
platforms: ${{ inputs.platforms }}
push: ${{ env.quay_username != '' }}
tags: ${{ steps.meta_pull_request.outputs.tags }}
labels: ${{ steps.meta_pull_request.outputs.labels }}
- name: job summary
run: |
echo "Built image: ${{ steps.meta.outputs.labels }}" >> $GITHUB_STEP_SUMMARY

View File

@@ -0,0 +1,144 @@
---
name: 'build darwin python backend container images (reusable)'
on:
workflow_call:
inputs:
backend:
description: 'Backend to build'
required: true
type: string
build-type:
description: 'Build type (e.g., mps)'
default: ''
type: string
use-pip:
description: 'Use pip to install dependencies'
default: false
type: boolean
lang:
description: 'Programming language (e.g. go)'
default: 'python'
type: string
go-version:
description: 'Go version to use'
default: '1.24.x'
type: string
tag-suffix:
description: 'Tag suffix for the built image'
required: true
type: string
runs-on:
description: 'Runner to use'
default: 'macOS-14'
type: string
secrets:
dockerUsername:
required: false
dockerPassword:
required: false
quayUsername:
required: true
quayPassword:
required: true
jobs:
darwin-backend-build:
runs-on: ${{ inputs.runs-on }}
strategy:
matrix:
go-version: ['${{ inputs.go-version }}']
steps:
- name: Clone
uses: actions/checkout@v5
with:
submodules: true
- name: Setup Go ${{ matrix.go-version }}
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
cache: false
# You can test your matrix by printing the current Go version
- name: Display Go version
run: go version
- name: Dependencies
run: |
brew install protobuf grpc make protoc-gen-go protoc-gen-go-grpc libomp llvm
- name: Build ${{ inputs.backend }}-darwin
run: |
make protogen-go
BACKEND=${{ inputs.backend }} BUILD_TYPE=${{ inputs.build-type }} USE_PIP=${{ inputs.use-pip }} make build-darwin-${{ inputs.lang }}-backend
- name: Upload ${{ inputs.backend }}.tar
uses: actions/upload-artifact@v4
with:
name: ${{ inputs.backend }}-tar
path: backend-images/${{ inputs.backend }}.tar
darwin-backend-publish:
needs: darwin-backend-build
if: github.event_name != 'pull_request'
runs-on: ubuntu-latest
steps:
- name: Download ${{ inputs.backend }}.tar
uses: actions/download-artifact@v5
with:
name: ${{ inputs.backend }}-tar
path: .
- name: Install crane
run: |
curl -L https://github.com/google/go-containerregistry/releases/latest/download/go-containerregistry_Linux_x86_64.tar.gz | tar -xz
sudo mv crane /usr/local/bin/
- name: Log in to DockerHub
run: |
echo "${{ secrets.dockerPassword }}" | crane auth login docker.io -u "${{ secrets.dockerUsername }}" --password-stdin
- name: Log in to quay.io
run: |
echo "${{ secrets.quayPassword }}" | crane auth login quay.io -u "${{ secrets.quayUsername }}" --password-stdin
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: |
localai/localai-backends
tags: |
type=ref,event=branch
type=semver,pattern={{raw}}
type=sha
flavor: |
latest=auto
suffix=${{ inputs.tag-suffix }},onlatest=true
- name: Docker meta
id: quaymeta
uses: docker/metadata-action@v5
with:
images: |
quay.io/go-skynet/local-ai-backends
tags: |
type=ref,event=branch
type=semver,pattern={{raw}}
type=sha
flavor: |
latest=auto
suffix=${{ inputs.tag-suffix }},onlatest=true
- name: Push Docker image (DockerHub)
run: |
for tag in $(echo "${{ steps.meta.outputs.tags }}" | tr ',' '\n'); do
crane push ${{ inputs.backend }}.tar $tag
done
- name: Push Docker image (Quay)
run: |
for tag in $(echo "${{ steps.quaymeta.outputs.tags }}" | tr ',' '\n'); do
crane push ${{ inputs.backend }}.tar $tag
done

78
.github/workflows/backend_pr.yml vendored Normal file
View File

@@ -0,0 +1,78 @@
name: 'build backend container images (PR-filtered)'
on:
pull_request:
concurrency:
group: ci-backends-pr-${{ github.head_ref || github.ref }}-${{ github.repository }}
cancel-in-progress: true
jobs:
generate-matrix:
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
matrix-darwin: ${{ steps.set-matrix.outputs.matrix-darwin }}
has-backends: ${{ steps.set-matrix.outputs.has-backends }}
has-backends-darwin: ${{ steps.set-matrix.outputs.has-backends-darwin }}
steps:
- name: Checkout repository
uses: actions/checkout@v5
- name: Setup Bun
uses: oven-sh/setup-bun@v2
- name: Install dependencies
run: |
bun add js-yaml
bun add @octokit/core
# filters the matrix in backend.yml
- name: Filter matrix for changed backends
id: set-matrix
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GITHUB_EVENT_PATH: ${{ github.event_path }}
run: bun run scripts/changed-backends.js
backend-jobs:
needs: generate-matrix
uses: ./.github/workflows/backend_build.yml
if: needs.generate-matrix.outputs.has-backends == 'true'
with:
tag-latest: ${{ matrix.tag-latest }}
tag-suffix: ${{ matrix.tag-suffix }}
build-type: ${{ matrix.build-type }}
cuda-major-version: ${{ matrix.cuda-major-version }}
cuda-minor-version: ${{ matrix.cuda-minor-version }}
platforms: ${{ matrix.platforms }}
runs-on: ${{ matrix.runs-on }}
base-image: ${{ matrix.base-image }}
backend: ${{ matrix.backend }}
dockerfile: ${{ matrix.dockerfile }}
skip-drivers: ${{ matrix.skip-drivers }}
context: ${{ matrix.context }}
secrets:
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
strategy:
fail-fast: true
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
backend-jobs-darwin:
needs: generate-matrix
uses: ./.github/workflows/backend_build_darwin.yml
if: needs.generate-matrix.outputs.has-backends-darwin == 'true'
with:
backend: ${{ matrix.backend }}
build-type: ${{ matrix.build-type }}
go-version: "1.24.x"
tag-suffix: ${{ matrix.tag-suffix }}
lang: ${{ matrix.lang || 'python' }}
use-pip: ${{ matrix.backend == 'diffusers' }}
runs-on: "macOS-14"
secrets:
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
strategy:
fail-fast: true
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix-darwin) }}

67
.github/workflows/build-test.yaml vendored Normal file
View File

@@ -0,0 +1,67 @@
name: Build test
on:
push:
branches:
- master
pull_request:
jobs:
build-test:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.23
- name: Run GoReleaser
run: |
make dev-dist
launcher-build-darwin:
runs-on: macos-latest
steps:
- name: Checkout
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.23
- name: Build launcher for macOS ARM64
run: |
make build-launcher-darwin
ls -liah dist
- name: Upload macOS launcher artifacts
uses: actions/upload-artifact@v4
with:
name: launcher-macos
path: dist/
retention-days: 30
launcher-build-linux:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.23
- name: Build launcher for Linux
run: |
sudo apt-get update
sudo apt-get install golang gcc libgl1-mesa-dev xorg-dev libxkbcommon-dev
make build-launcher-linux
- name: Upload Linux launcher artifacts
uses: actions/upload-artifact@v4
with:
name: launcher-linux
path: local-ai-launcher-linux.tar.xz
retention-days: 30

View File

@@ -10,30 +10,32 @@ jobs:
matrix:
include:
- repository: "ggml-org/llama.cpp"
variable: "CPPLLAMA_VERSION"
variable: "LLAMA_VERSION"
branch: "master"
- repository: "ggerganov/whisper.cpp"
file: "backend/cpp/llama-cpp/Makefile"
- repository: "ggml-org/whisper.cpp"
variable: "WHISPER_CPP_VERSION"
branch: "master"
file: "backend/go/whisper/Makefile"
- repository: "PABannier/bark.cpp"
variable: "BARKCPP_VERSION"
branch: "main"
file: "Makefile"
- repository: "leejet/stable-diffusion.cpp"
variable: "STABLEDIFFUSION_GGML_VERSION"
branch: "master"
- repository: "mudler/go-stable-diffusion"
variable: "STABLEDIFFUSION_VERSION"
branch: "master"
file: "backend/go/stablediffusion-ggml/Makefile"
- repository: "mudler/go-piper"
variable: "PIPER_VERSION"
branch: "master"
file: "backend/go/piper/Makefile"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Bump dependencies 🔧
id: bump
run: |
bash .github/bump_deps.sh ${{ matrix.repository }} ${{ matrix.branch }} ${{ matrix.variable }}
bash .github/bump_deps.sh ${{ matrix.repository }} ${{ matrix.branch }} ${{ matrix.variable }} ${{ matrix.file }}
{
echo 'message<<EOF'
cat "${{ matrix.variable }}_message.txt"

View File

@@ -12,7 +12,7 @@ jobs:
- repository: "mudler/LocalAI"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Bump dependencies 🔧
run: |
bash .github/bump_docs.sh ${{ matrix.repository }}

View File

@@ -5,7 +5,7 @@ on:
workflow_dispatch:
jobs:
checksum_check:
runs-on: arc-runner-set
runs-on: ubuntu-latest
steps:
- name: Force Install GIT latest
run: |
@@ -15,12 +15,11 @@ jobs:
&& sudo add-apt-repository -y ppa:git-core/ppa \
&& sudo apt-get update \
&& sudo apt-get install -y git
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install -y pip wget
sudo pip install --upgrade pip
pip install huggingface_hub
- name: 'Setup yq'
uses: dcarbone/install-yq-action@v1.3.1

View File

@@ -14,13 +14,13 @@ jobs:
steps:
- name: Dependabot metadata
id: metadata
uses: dependabot/fetch-metadata@v2.3.0
uses: dependabot/fetch-metadata@v2.4.0
with:
github-token: "${{ secrets.GITHUB_TOKEN }}"
skip-commit-verification: true
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: Approve a PR if not already approved
run: |

View File

@@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Clone
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
submodules: true
- uses: actions/setup-go@v5
@@ -31,7 +31,7 @@ jobs:
make protogen-go
- name: Build api
run: |
CGO_ENABLED=0 make build-api
CGO_ENABLED=0 make build
- name: rm
uses: appleboy/ssh-action@v1.2.2
with:
@@ -42,7 +42,7 @@ jobs:
script: |
sudo rm -rf local-ai/ || true
- name: copy file via ssh
uses: appleboy/scp-action@v0.1.7
uses: appleboy/scp-action@v1.0.0
with:
host: ${{ secrets.EXPLORER_SSH_HOST }}
username: ${{ secrets.EXPLORER_SSH_USERNAME }}

View File

@@ -17,7 +17,7 @@ jobs:
matrix:
include:
- grpc-base-image: ubuntu:22.04
runs-on: 'arc-runner-set'
runs-on: 'ubuntu-latest'
platforms: 'linux/amd64,linux/arm64'
runs-on: ${{matrix.runs-on}}
steps:
@@ -73,7 +73,7 @@ jobs:
uses: docker/setup-buildx-action@master
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: Cache GRPC
uses: docker/build-push-action@v6

View File

@@ -15,7 +15,7 @@ jobs:
strategy:
matrix:
include:
- base-image: intel/oneapi-basekit:2025.1.0-0-devel-ubuntu22.04
- base-image: intel/oneapi-basekit:2025.2.0-0-devel-ubuntu22.04
runs-on: 'ubuntu-latest'
platforms: 'linux/amd64'
runs-on: ${{matrix.runs-on}}
@@ -43,7 +43,7 @@ jobs:
uses: docker/setup-buildx-action@master
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: Cache Intel images
uses: docker/build-push-action@v6

View File

@@ -9,13 +9,11 @@ concurrency:
cancel-in-progress: true
jobs:
extras-image-build:
image-build:
uses: ./.github/workflows/image_build.yml
with:
tag-latest: ${{ matrix.tag-latest }}
tag-suffix: ${{ matrix.tag-suffix }}
ffmpeg: ${{ matrix.ffmpeg }}
image-type: ${{ matrix.image-type }}
build-type: ${{ matrix.build-type }}
cuda-major-version: ${{ matrix.cuda-major-version }}
cuda-minor-version: ${{ matrix.cuda-minor-version }}
@@ -33,108 +31,47 @@ jobs:
# Pushing with all jobs in parallel
# eats the bandwidth of all the nodes
max-parallel: ${{ github.event_name != 'pull_request' && 4 || 8 }}
fail-fast: false
matrix:
include:
# This is basically covered by the AIO test
# - build-type: ''
# platforms: 'linux/amd64'
# tag-latest: 'false'
# tag-suffix: '-ffmpeg'
# ffmpeg: 'true'
# image-type: 'extras'
# runs-on: 'arc-runner-set'
# base-image: "ubuntu:22.04"
# makeflags: "--jobs=3 --output-sync=target"
- build-type: 'cublas'
cuda-major-version: "13"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-gpu-nvidia-cuda-13'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
makeflags: "--jobs=3 --output-sync=target"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-cublas-cuda12-ffmpeg'
ffmpeg: 'true'
image-type: 'extras'
runs-on: 'arc-runner-set'
tag-suffix: '-gpu-nvidia-cuda-12'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
makeflags: "--jobs=3 --output-sync=target"
# - build-type: 'hipblas'
# platforms: 'linux/amd64'
# tag-latest: 'false'
# tag-suffix: '-hipblas'
# ffmpeg: 'false'
# image-type: 'extras'
# base-image: "rocm/dev-ubuntu-22.04:6.1"
# grpc-base-image: "ubuntu:22.04"
# runs-on: 'arc-runner-set'
# makeflags: "--jobs=3 --output-sync=target"
# - build-type: 'sycl_f16'
# platforms: 'linux/amd64'
# tag-latest: 'false'
# base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
# grpc-base-image: "ubuntu:22.04"
# tag-suffix: 'sycl-f16-ffmpeg'
# ffmpeg: 'true'
# image-type: 'extras'
# runs-on: 'arc-runner-set'
# makeflags: "--jobs=3 --output-sync=target"
# core-image-build:
# uses: ./.github/workflows/image_build.yml
# with:
# tag-latest: ${{ matrix.tag-latest }}
# tag-suffix: ${{ matrix.tag-suffix }}
# ffmpeg: ${{ matrix.ffmpeg }}
# image-type: ${{ matrix.image-type }}
# build-type: ${{ matrix.build-type }}
# cuda-major-version: ${{ matrix.cuda-major-version }}
# cuda-minor-version: ${{ matrix.cuda-minor-version }}
# platforms: ${{ matrix.platforms }}
# runs-on: ${{ matrix.runs-on }}
# base-image: ${{ matrix.base-image }}
# grpc-base-image: ${{ matrix.grpc-base-image }}
# makeflags: ${{ matrix.makeflags }}
# secrets:
# dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
# dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
# quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
# quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
# strategy:
# matrix:
# include:
# - build-type: ''
# platforms: 'linux/amd64'
# tag-latest: 'false'
# tag-suffix: '-ffmpeg-core'
# ffmpeg: 'true'
# image-type: 'core'
# runs-on: 'ubuntu-latest'
# base-image: "ubuntu:22.04"
# makeflags: "--jobs=4 --output-sync=target"
# - build-type: 'sycl_f16'
# platforms: 'linux/amd64'
# tag-latest: 'false'
# base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
# grpc-base-image: "ubuntu:22.04"
# tag-suffix: 'sycl-f16-ffmpeg-core'
# ffmpeg: 'true'
# image-type: 'core'
# runs-on: 'arc-runner-set'
# makeflags: "--jobs=3 --output-sync=target"
# - build-type: 'cublas'
# cuda-major-version: "12"
# cuda-minor-version: "0"
# platforms: 'linux/amd64'
# tag-latest: 'false'
# tag-suffix: '-cublas-cuda12-ffmpeg-core'
# ffmpeg: 'true'
# image-type: 'core'
# runs-on: 'ubuntu-latest'
# base-image: "ubuntu:22.04"
# makeflags: "--jobs=4 --output-sync=target"
# - build-type: 'vulkan'
# platforms: 'linux/amd64'
# tag-latest: 'false'
# tag-suffix: '-vulkan-ffmpeg-core'
# ffmpeg: 'true'
# image-type: 'core'
# runs-on: 'ubuntu-latest'
# base-image: "ubuntu:22.04"
# makeflags: "--jobs=4 --output-sync=target"
- build-type: 'hipblas'
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-hipblas'
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
grpc-base-image: "ubuntu:22.04"
runs-on: 'ubuntu-latest'
makeflags: "--jobs=3 --output-sync=target"
- build-type: 'sycl'
platforms: 'linux/amd64'
tag-latest: 'false'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
grpc-base-image: "ubuntu:22.04"
tag-suffix: 'sycl'
runs-on: 'ubuntu-latest'
makeflags: "--jobs=3 --output-sync=target"
- build-type: 'vulkan'
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-vulkan-core'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
makeflags: "--jobs=4 --output-sync=target"

View File

@@ -18,8 +18,6 @@ jobs:
with:
tag-latest: ${{ matrix.tag-latest }}
tag-suffix: ${{ matrix.tag-suffix }}
ffmpeg: ${{ matrix.ffmpeg }}
image-type: ${{ matrix.image-type }}
build-type: ${{ matrix.build-type }}
cuda-major-version: ${{ matrix.cuda-major-version }}
cuda-minor-version: ${{ matrix.cuda-minor-version }}
@@ -29,249 +27,29 @@ jobs:
grpc-base-image: ${{ matrix.grpc-base-image }}
aio: ${{ matrix.aio }}
makeflags: ${{ matrix.makeflags }}
latest-image: ${{ matrix.latest-image }}
latest-image-aio: ${{ matrix.latest-image-aio }}
secrets:
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
strategy:
# Pushing with all jobs in parallel
# eats the bandwidth of all the nodes
max-parallel: 2
matrix:
include:
- build-type: 'hipblas'
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-hipblas-ffmpeg'
ffmpeg: 'true'
image-type: 'extras'
tag-suffix: '-gpu-hipblas'
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
grpc-base-image: "ubuntu:22.04"
runs-on: 'ubuntu-latest'
makeflags: "--jobs=3 --output-sync=target"
aio: "-aio-gpu-hipblas"
base-image: "rocm/dev-ubuntu-22.04:6.1"
grpc-base-image: "ubuntu:22.04"
latest-image: 'latest-gpu-hipblas'
latest-image-aio: 'latest-aio-gpu-hipblas'
runs-on: 'arc-runner-set'
makeflags: "--jobs=3 --output-sync=target"
- build-type: 'hipblas'
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-hipblas'
ffmpeg: 'false'
image-type: 'extras'
base-image: "rocm/dev-ubuntu-22.04:6.1"
grpc-base-image: "ubuntu:22.04"
runs-on: 'arc-runner-set'
makeflags: "--jobs=3 --output-sync=target"
- build-type: 'hipblas'
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-hipblas-ffmpeg-core'
ffmpeg: 'true'
image-type: 'core'
base-image: "rocm/dev-ubuntu-22.04:6.1"
grpc-base-image: "ubuntu:22.04"
runs-on: 'arc-runner-set'
makeflags: "--jobs=3 --output-sync=target"
latest-image: 'latest-gpu-hipblas-core'
- build-type: 'hipblas'
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-hipblas-core'
ffmpeg: 'false'
image-type: 'core'
base-image: "rocm/dev-ubuntu-22.04:6.1"
grpc-base-image: "ubuntu:22.04"
runs-on: 'arc-runner-set'
makeflags: "--jobs=3 --output-sync=target"
self-hosted-jobs:
uses: ./.github/workflows/image_build.yml
with:
tag-latest: ${{ matrix.tag-latest }}
tag-suffix: ${{ matrix.tag-suffix }}
ffmpeg: ${{ matrix.ffmpeg }}
image-type: ${{ matrix.image-type }}
build-type: ${{ matrix.build-type }}
cuda-major-version: ${{ matrix.cuda-major-version }}
cuda-minor-version: ${{ matrix.cuda-minor-version }}
platforms: ${{ matrix.platforms }}
runs-on: ${{ matrix.runs-on }}
base-image: ${{ matrix.base-image }}
grpc-base-image: ${{ matrix.grpc-base-image }}
aio: ${{ matrix.aio }}
makeflags: ${{ matrix.makeflags }}
latest-image: ${{ matrix.latest-image }}
latest-image-aio: ${{ matrix.latest-image-aio }}
secrets:
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
strategy:
# Pushing with all jobs in parallel
# eats the bandwidth of all the nodes
max-parallel: ${{ github.event_name != 'pull_request' && 5 || 8 }}
matrix:
include:
# Extra images
- build-type: ''
#platforms: 'linux/amd64,linux/arm64'
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: ''
ffmpeg: ''
image-type: 'extras'
runs-on: 'arc-runner-set'
base-image: "ubuntu:22.04"
makeflags: "--jobs=3 --output-sync=target"
- build-type: ''
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-ffmpeg'
ffmpeg: 'true'
image-type: 'extras'
runs-on: 'arc-runner-set'
base-image: "ubuntu:22.04"
makeflags: "--jobs=3 --output-sync=target"
- build-type: 'cublas'
cuda-major-version: "11"
cuda-minor-version: "7"
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-cublas-cuda11'
ffmpeg: ''
image-type: 'extras'
runs-on: 'arc-runner-set'
base-image: "ubuntu:22.04"
makeflags: "--jobs=3 --output-sync=target"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-cublas-cuda12'
ffmpeg: ''
image-type: 'extras'
runs-on: 'arc-runner-set'
base-image: "ubuntu:22.04"
makeflags: "--jobs=3 --output-sync=target"
- build-type: 'cublas'
cuda-major-version: "11"
cuda-minor-version: "7"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-cublas-cuda11-ffmpeg'
ffmpeg: 'true'
image-type: 'extras'
runs-on: 'arc-runner-set'
base-image: "ubuntu:22.04"
aio: "-aio-gpu-nvidia-cuda-11"
latest-image: 'latest-gpu-nvidia-cuda-11'
latest-image-aio: 'latest-aio-gpu-nvidia-cuda-11'
makeflags: "--jobs=3 --output-sync=target"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-cublas-cuda12-ffmpeg'
ffmpeg: 'true'
image-type: 'extras'
runs-on: 'arc-runner-set'
base-image: "ubuntu:22.04"
aio: "-aio-gpu-nvidia-cuda-12"
latest-image: 'latest-gpu-nvidia-cuda-12'
latest-image-aio: 'latest-aio-gpu-nvidia-cuda-12'
makeflags: "--jobs=3 --output-sync=target"
- build-type: ''
#platforms: 'linux/amd64,linux/arm64'
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: ''
ffmpeg: ''
image-type: 'extras'
base-image: "ubuntu:22.04"
runs-on: 'arc-runner-set'
makeflags: "--jobs=3 --output-sync=target"
- build-type: 'sycl_f16'
platforms: 'linux/amd64'
tag-latest: 'auto'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
grpc-base-image: "ubuntu:22.04"
tag-suffix: '-sycl-f16-ffmpeg'
ffmpeg: 'true'
image-type: 'extras'
runs-on: 'arc-runner-set'
aio: "-aio-gpu-intel-f16"
latest-image: 'latest-gpu-intel-f16'
latest-image-aio: 'latest-aio-gpu-intel-f16'
makeflags: "--jobs=3 --output-sync=target"
- build-type: 'sycl_f32'
platforms: 'linux/amd64'
tag-latest: 'auto'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
grpc-base-image: "ubuntu:22.04"
tag-suffix: '-sycl-f32-ffmpeg'
ffmpeg: 'true'
image-type: 'extras'
runs-on: 'arc-runner-set'
aio: "-aio-gpu-intel-f32"
latest-image: 'latest-gpu-intel-f32'
latest-image-aio: 'latest-aio-gpu-intel-f32'
makeflags: "--jobs=3 --output-sync=target"
# Core images
- build-type: 'sycl_f16'
platforms: 'linux/amd64'
tag-latest: 'false'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
grpc-base-image: "ubuntu:22.04"
tag-suffix: '-sycl-f16-core'
ffmpeg: 'false'
image-type: 'core'
runs-on: 'arc-runner-set'
makeflags: "--jobs=3 --output-sync=target"
- build-type: 'sycl_f32'
platforms: 'linux/amd64'
tag-latest: 'false'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
grpc-base-image: "ubuntu:22.04"
tag-suffix: '-sycl-f32-core'
ffmpeg: 'false'
image-type: 'core'
runs-on: 'arc-runner-set'
makeflags: "--jobs=3 --output-sync=target"
- build-type: 'sycl_f16'
platforms: 'linux/amd64'
tag-latest: 'false'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
grpc-base-image: "ubuntu:22.04"
tag-suffix: '-sycl-f16-ffmpeg-core'
ffmpeg: 'true'
image-type: 'core'
runs-on: 'arc-runner-set'
makeflags: "--jobs=3 --output-sync=target"
latest-image: 'latest-gpu-intel-f16-core'
- build-type: 'sycl_f32'
platforms: 'linux/amd64'
tag-latest: 'false'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
grpc-base-image: "ubuntu:22.04"
tag-suffix: '-sycl-f32-ffmpeg-core'
ffmpeg: 'true'
image-type: 'core'
runs-on: 'arc-runner-set'
makeflags: "--jobs=3 --output-sync=target"
latest-image: 'latest-gpu-intel-f32-core'
core-image-build:
uses: ./.github/workflows/image_build.yml
with:
tag-latest: ${{ matrix.tag-latest }}
tag-suffix: ${{ matrix.tag-suffix }}
ffmpeg: ${{ matrix.ffmpeg }}
image-type: ${{ matrix.image-type }}
build-type: ${{ matrix.build-type }}
cuda-major-version: ${{ matrix.cuda-major-version }}
cuda-minor-version: ${{ matrix.cuda-minor-version }}
@@ -281,8 +59,6 @@ jobs:
base-image: ${{ matrix.base-image }}
grpc-base-image: ${{ matrix.grpc-base-image }}
makeflags: ${{ matrix.makeflags }}
latest-image: ${{ matrix.latest-image }}
latest-image-aio: ${{ matrix.latest-image-aio }}
skip-drivers: ${{ matrix.skip-drivers }}
secrets:
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
@@ -290,90 +66,75 @@ jobs:
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
strategy:
max-parallel: ${{ github.event_name != 'pull_request' && 2 || 4 }}
#max-parallel: ${{ github.event_name != 'pull_request' && 2 || 4 }}
matrix:
include:
- build-type: ''
platforms: 'linux/amd64,linux/arm64'
tag-latest: 'auto'
tag-suffix: '-ffmpeg-core'
ffmpeg: 'true'
image-type: 'core'
tag-suffix: ''
base-image: "ubuntu:22.04"
runs-on: 'arc-runner-set'
runs-on: 'ubuntu-latest'
aio: "-aio-cpu"
latest-image: 'latest-cpu'
latest-image-aio: 'latest-aio-cpu'
makeflags: "--jobs=4 --output-sync=target"
skip-drivers: 'false'
- build-type: 'cublas'
cuda-major-version: "11"
cuda-minor-version: "7"
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-cublas-cuda11-core'
ffmpeg: ''
image-type: 'core'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-11'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
runs-on: 'arc-runner-set'
makeflags: "--jobs=4 --output-sync=target"
skip-drivers: 'false'
aio: "-aio-gpu-nvidia-cuda-11"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-cublas-cuda12-core'
ffmpeg: ''
image-type: 'core'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
runs-on: 'arc-runner-set'
makeflags: "--jobs=4 --output-sync=target"
skip-drivers: 'false'
- build-type: 'cublas'
cuda-major-version: "11"
cuda-minor-version: "7"
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-cublas-cuda11-ffmpeg-core'
ffmpeg: 'true'
image-type: 'core'
runs-on: 'arc-runner-set'
base-image: "ubuntu:22.04"
makeflags: "--jobs=4 --output-sync=target"
skip-drivers: 'false'
latest-image: 'latest-gpu-nvidia-cuda-12-core'
aio: "-aio-gpu-nvidia-cuda-12"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-major-version: "13"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-cublas-cuda12-ffmpeg-core'
ffmpeg: 'true'
image-type: 'core'
runs-on: 'arc-runner-set'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-13'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
makeflags: "--jobs=4 --output-sync=target"
latest-image: 'latest-gpu-nvidia-cuda-12-core'
aio: "-aio-gpu-nvidia-cuda-13"
- build-type: 'vulkan'
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-vulkan-ffmpeg-core'
ffmpeg: 'true'
image-type: 'core'
runs-on: 'arc-runner-set'
tag-latest: 'auto'
tag-suffix: '-gpu-vulkan'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
makeflags: "--jobs=4 --output-sync=target"
latest-image: 'latest-gpu-vulkan-core'
aio: "-aio-gpu-vulkan"
- build-type: 'intel'
platforms: 'linux/amd64'
tag-latest: 'auto'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
grpc-base-image: "ubuntu:22.04"
tag-suffix: '-gpu-intel'
runs-on: 'ubuntu-latest'
makeflags: "--jobs=3 --output-sync=target"
aio: "-aio-gpu-intel"
gh-runner:
uses: ./.github/workflows/image_build.yml
with:
tag-latest: ${{ matrix.tag-latest }}
tag-suffix: ${{ matrix.tag-suffix }}
ffmpeg: ${{ matrix.ffmpeg }}
image-type: ${{ matrix.image-type }}
build-type: ${{ matrix.build-type }}
cuda-major-version: ${{ matrix.cuda-major-version }}
cuda-minor-version: ${{ matrix.cuda-minor-version }}
@@ -383,8 +144,6 @@ jobs:
base-image: ${{ matrix.base-image }}
grpc-base-image: ${{ matrix.grpc-base-image }}
makeflags: ${{ matrix.makeflags }}
latest-image: ${{ matrix.latest-image }}
latest-image-aio: ${{ matrix.latest-image-aio }}
skip-drivers: ${{ matrix.skip-drivers }}
secrets:
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
@@ -398,12 +157,9 @@ jobs:
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/arm64'
tag-latest: 'false'
tag-suffix: '-nvidia-l4t-arm64-core'
latest-image: 'latest-nvidia-l4t-arm64-core'
ffmpeg: 'true'
image-type: 'core'
tag-latest: 'auto'
tag-suffix: '-nvidia-l4t-arm64'
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
runs-on: 'ubuntu-24.04-arm'
makeflags: "--jobs=4 --output-sync=target"
skip-drivers: 'true'
skip-drivers: 'true'

View File

@@ -33,30 +33,14 @@ on:
description: 'Tag latest'
default: ''
type: string
latest-image:
description: 'Tag latest'
default: ''
type: string
latest-image-aio:
description: 'Tag latest'
default: ''
type: string
tag-suffix:
description: 'Tag suffix'
default: ''
type: string
ffmpeg:
description: 'FFMPEG'
default: ''
type: string
skip-drivers:
description: 'Skip drivers by default'
default: 'false'
type: string
image-type:
description: 'Image type'
default: ''
type: string
runs-on:
description: 'Runs on'
required: true
@@ -85,6 +69,22 @@ jobs:
reusable_image-build:
runs-on: ${{ inputs.runs-on }}
steps:
- name: Free Disk Space (Ubuntu)
if: inputs.runs-on == 'ubuntu-latest'
uses: jlumbroso/free-disk-space@main
with:
# this might remove tools that are actually needed,
# if set to "true" but frees about 6 GB
tool-cache: true
# all of these default to true, but feel free to set to
# "false" if necessary for your workflow
android: true
dotnet: true
haskell: true
large-packages: true
docker-images: true
swap-storage: true
- name: Force Install GIT latest
run: |
sudo apt-get update \
@@ -94,7 +94,7 @@ jobs:
&& sudo apt-get update \
&& sudo apt-get install -y git
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: Release space from worker
if: inputs.runs-on == 'ubuntu-latest'
@@ -106,8 +106,8 @@ jobs:
df -h
echo
sudo apt-get remove -y '^llvm-.*|^libllvm.*' || true
sudo apt-get remove --auto-remove android-sdk-platform-tools || true
sudo apt-get purge --auto-remove android-sdk-platform-tools || true
sudo apt-get remove --auto-remove android-sdk-platform-tools snapd || true
sudo apt-get purge --auto-remove android-sdk-platform-tools snapd || true
sudo rm -rf /usr/local/lib/android
sudo apt-get remove -y '^dotnet-.*|^aspnetcore-.*' || true
sudo rm -rf /usr/share/dotnet
@@ -152,18 +152,18 @@ jobs:
type=sha
flavor: |
latest=${{ inputs.tag-latest }}
suffix=${{ inputs.tag-suffix }}
suffix=${{ inputs.tag-suffix }},onlatest=true
- name: Docker meta for PR
id: meta_pull_request
if: github.event_name == 'pull_request'
uses: docker/metadata-action@v5
with:
images: |
ttl.sh/localai-ci-pr-${{ github.event.number }}
quay.io/go-skynet/ci-tests
tags: |
type=ref,event=branch
type=semver,pattern={{raw}}
type=sha
type=ref,event=branch,suffix=localai${{ github.event.number }}-${{ inputs.build-type }}-${{ inputs.cuda-major-version }}-${{ inputs.cuda-minor-version }}
type=semver,pattern={{raw}},suffix=localai${{ github.event.number }}-${{ inputs.build-type }}-${{ inputs.cuda-major-version }}-${{ inputs.cuda-minor-version }}
type=sha,suffix=localai${{ github.event.number }}-${{ inputs.build-type }}-${{ inputs.cuda-major-version }}-${{ inputs.cuda-minor-version }}
flavor: |
latest=${{ inputs.tag-latest }}
suffix=${{ inputs.tag-suffix }}
@@ -179,7 +179,7 @@ jobs:
type=semver,pattern={{raw}}
flavor: |
latest=${{ inputs.tag-latest }}
suffix=${{ inputs.aio }}
suffix=${{ inputs.aio }},onlatest=true
- name: Docker meta AIO (dockerhub)
if: inputs.aio != ''
@@ -192,7 +192,8 @@ jobs:
type=ref,event=branch
type=semver,pattern={{raw}}
flavor: |
suffix=${{ inputs.aio }}
latest=${{ inputs.tag-latest }}
suffix=${{ inputs.aio }},onlatest=true
- name: Set up QEMU
uses: docker/setup-qemu-action@master
@@ -231,8 +232,6 @@ jobs:
BUILD_TYPE=${{ inputs.build-type }}
CUDA_MAJOR_VERSION=${{ inputs.cuda-major-version }}
CUDA_MINOR_VERSION=${{ inputs.cuda-minor-version }}
FFMPEG=${{ inputs.ffmpeg }}
IMAGE_TYPE=${{ inputs.image-type }}
BASE_IMAGE=${{ inputs.base-image }}
GRPC_BASE_IMAGE=${{ inputs.grpc-base-image || inputs.base-image }}
GRPC_MAKEFLAGS=--jobs=4 --output-sync=target
@@ -260,8 +259,6 @@ jobs:
BUILD_TYPE=${{ inputs.build-type }}
CUDA_MAJOR_VERSION=${{ inputs.cuda-major-version }}
CUDA_MINOR_VERSION=${{ inputs.cuda-minor-version }}
FFMPEG=${{ inputs.ffmpeg }}
IMAGE_TYPE=${{ inputs.image-type }}
BASE_IMAGE=${{ inputs.base-image }}
GRPC_BASE_IMAGE=${{ inputs.grpc-base-image || inputs.base-image }}
GRPC_MAKEFLAGS=--jobs=4 --output-sync=target
@@ -272,13 +269,9 @@ jobs:
file: ./Dockerfile
cache-from: type=gha
platforms: ${{ inputs.platforms }}
push: true
#push: true
tags: ${{ steps.meta_pull_request.outputs.tags }}
labels: ${{ steps.meta_pull_request.outputs.labels }}
- name: Testing image
if: github.event_name == 'pull_request'
run: |
echo "Image is available at ttl.sh/localai-ci-pr-${{ github.event.number }}:${{ steps.meta_pull_request.outputs.version }}" >> $GITHUB_STEP_SUMMARY
## End testing image
- name: Build and push AIO image
if: inputs.aio != ''
@@ -310,32 +303,6 @@ jobs:
tags: ${{ steps.meta_aio_dockerhub.outputs.tags }}
labels: ${{ steps.meta_aio_dockerhub.outputs.labels }}
- name: Cleanup
run: |
docker builder prune -f
docker system prune --force --volumes --all
- name: Latest tag
# run this on branches, when it is a tag and there is a latest-image defined
if: github.event_name != 'pull_request' && inputs.latest-image != '' && github.ref_type == 'tag'
run: |
docker pull localai/localai:${{ steps.meta.outputs.version }}
docker tag localai/localai:${{ steps.meta.outputs.version }} localai/localai:${{ inputs.latest-image }}
docker push localai/localai:${{ inputs.latest-image }}
docker pull quay.io/go-skynet/local-ai:${{ steps.meta.outputs.version }}
docker tag quay.io/go-skynet/local-ai:${{ steps.meta.outputs.version }} quay.io/go-skynet/local-ai:${{ inputs.latest-image }}
docker push quay.io/go-skynet/local-ai:${{ inputs.latest-image }}
- name: Latest AIO tag
# run this on branches, when it is a tag and there is a latest-image defined
if: github.event_name != 'pull_request' && inputs.latest-image-aio != '' && github.ref_type == 'tag'
run: |
docker pull localai/localai:${{ steps.meta_aio_dockerhub.outputs.version }}
docker tag localai/localai:${{ steps.meta_aio_dockerhub.outputs.version }} localai/localai:${{ inputs.latest-image-aio }}
docker push localai/localai:${{ inputs.latest-image-aio }}
docker pull quay.io/go-skynet/local-ai:${{ steps.meta_aio.outputs.version }}
docker tag quay.io/go-skynet/local-ai:${{ steps.meta_aio.outputs.version }} quay.io/go-skynet/local-ai:${{ inputs.latest-image-aio }}
docker push quay.io/go-skynet/local-ai:${{ inputs.latest-image-aio }}
- name: job summary
run: |
echo "Built image: ${{ steps.meta.outputs.labels }}" >> $GITHUB_STEP_SUMMARY

View File

@@ -9,4 +9,4 @@ jobs:
pull-requests: write
runs-on: ubuntu-latest
steps:
- uses: actions/labeler@v5
- uses: actions/labeler@v6

View File

@@ -13,7 +13,7 @@ jobs:
if: ${{ github.actor == 'localai-bot' }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: Approve a PR if not already approved
run: |

View File

@@ -11,14 +11,14 @@ jobs:
MODEL_NAME: gemma-3-12b-it
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
with:
fetch-depth: 0 # needed to checkout all branches for this Action to work
- uses: mudler/localai-github-action@v1
with:
model: 'gemma-3-12b-it' # Any from models.localai.io, or from huggingface.com with: "huggingface://<repository>/file"
# Check the PR diff using the current branch and the base branch of the PR
- uses: GrantBirki/git-diff-action@v2.8.0
- uses: GrantBirki/git-diff-action@v2.8.1
id: git-diff-action
with:
json_diff_file_output: diff.json
@@ -79,7 +79,7 @@ jobs:
args: ${{ steps.summarize.outputs.message }}
- name: Setup tmate session if fails
if: ${{ failure() }}
uses: mxschmitt/action-tmate@v3.19
uses: mxschmitt/action-tmate@v3.22
with:
detached: true
connect-timeout-seconds: 180
@@ -90,16 +90,16 @@ jobs:
MODEL_NAME: gemma-3-12b-it
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
with:
fetch-depth: 0 # needed to checkout all branches for this Action to work
- name: Start LocalAI
run: |
echo "Starting LocalAI..."
docker run -e -ti -d --name local-ai -p 8080:8080 localai/localai:master-ffmpeg-core run --debug $MODEL_NAME
docker run -e -ti -d --name local-ai -p 8080:8080 localai/localai:master run --debug $MODEL_NAME
until [ "`docker inspect -f {{.State.Health.Status}} local-ai`" == "healthy" ]; do echo "Waiting for container to be ready"; docker logs --tail 10 local-ai; sleep 2; done
# Check the PR diff using the current branch and the base branch of the PR
- uses: GrantBirki/git-diff-action@v2.8.0
- uses: GrantBirki/git-diff-action@v2.8.1
id: git-diff-action
with:
json_diff_file_output: diff.json
@@ -161,7 +161,7 @@ jobs:
TWITTER_ACCESS_TOKEN_SECRET: ${{ secrets.TWITTER_ACCESS_TOKEN_SECRET }}
- name: Setup tmate session if fails
if: ${{ failure() }}
uses: mxschmitt/action-tmate@v3.19
uses: mxschmitt/action-tmate@v3.22
with:
detached: true
connect-timeout-seconds: 180

View File

@@ -1,324 +1,64 @@
name: Build and Release
name: goreleaser
on:
push:
branches:
- master
tags:
- 'v*'
pull_request:
env:
GRPC_VERSION: v1.65.0
permissions:
contents: write
concurrency:
group: ci-releases-${{ github.head_ref || github.ref }}-${{ github.repository }}
cancel-in-progress: true
jobs:
build-linux-arm:
goreleaser:
runs-on: ubuntu-latest
steps:
- name: Clone
uses: actions/checkout@v4
- name: Checkout
uses: actions/checkout@v5
with:
submodules: true
- uses: actions/setup-go@v5
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.21.x'
cache: false
- name: Dependencies
run: |
sudo apt-get update
sudo apt-get install build-essential ffmpeg protobuf-compiler ccache upx-ucl gawk
sudo apt-get install -qy binutils-aarch64-linux-gnu gcc-aarch64-linux-gnu g++-aarch64-linux-gnu libgmock-dev
- name: Install CUDA Dependencies
run: |
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/cross-linux-aarch64/cuda-keyring_1.1-1_all.deb
sudo dpkg -i cuda-keyring_1.1-1_all.deb
sudo apt-get update
sudo apt-get install -y cuda-cross-aarch64 cuda-nvcc-cross-aarch64-${CUDA_VERSION} libcublas-cross-aarch64-${CUDA_VERSION}
go-version: 1.23
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v6
with:
version: v2.11.0
args: release --clean
env:
CUDA_VERSION: 12-4
- name: Cache grpc
id: cache-grpc
uses: actions/cache@v4
with:
path: grpc
key: ${{ runner.os }}-arm-grpc-${{ env.GRPC_VERSION }}
- name: Build grpc
if: steps.cache-grpc.outputs.cache-hit != 'true'
run: |
git clone --recurse-submodules -b ${{ env.GRPC_VERSION }} --depth 1 --shallow-submodules https://github.com/grpc/grpc && \
cd grpc && sed -i "216i\ TESTONLY" "third_party/abseil-cpp/absl/container/CMakeLists.txt" && mkdir -p cmake/build && \
cd cmake/build && cmake -DgRPC_INSTALL=ON \
-DgRPC_BUILD_TESTS=OFF \
../.. && sudo make --jobs 5 --output-sync=target
- name: Install gRPC
run: |
GNU_HOST=aarch64-linux-gnu
C_COMPILER_ARM_LINUX=$GNU_HOST-gcc
CXX_COMPILER_ARM_LINUX=$GNU_HOST-g++
CROSS_TOOLCHAIN=/usr/$GNU_HOST
CROSS_STAGING_PREFIX=$CROSS_TOOLCHAIN/stage
CMAKE_CROSS_TOOLCHAIN=/tmp/arm.toolchain.cmake
# https://cmake.org/cmake/help/v3.13/manual/cmake-toolchains.7.html#cross-compiling-for-linux
echo "set(CMAKE_SYSTEM_NAME Linux)" >> $CMAKE_CROSS_TOOLCHAIN && \
echo "set(CMAKE_SYSTEM_PROCESSOR arm)" >> $CMAKE_CROSS_TOOLCHAIN && \
echo "set(CMAKE_STAGING_PREFIX $CROSS_STAGING_PREFIX)" >> $CMAKE_CROSS_TOOLCHAIN && \
echo "set(CMAKE_SYSROOT ${CROSS_TOOLCHAIN}/sysroot)" >> $CMAKE_CROSS_TOOLCHAIN && \
echo "set(CMAKE_C_COMPILER /usr/bin/$C_COMPILER_ARM_LINUX)" >> $CMAKE_CROSS_TOOLCHAIN && \
echo "set(CMAKE_CXX_COMPILER /usr/bin/$CXX_COMPILER_ARM_LINUX)" >> $CMAKE_CROSS_TOOLCHAIN && \
echo "set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)" >> $CMAKE_CROSS_TOOLCHAIN && \
echo "set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)" >> $CMAKE_CROSS_TOOLCHAIN && \
echo "set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)" >> $CMAKE_CROSS_TOOLCHAIN && \
echo "set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)" >> $CMAKE_CROSS_TOOLCHAIN
GRPC_DIR=$PWD/grpc
cd grpc && cd cmake/build && sudo make --jobs 5 --output-sync=target install && \
GRPC_CROSS_BUILD_DIR=$GRPC_DIR/cmake/cross_build && \
mkdir -p $GRPC_CROSS_BUILD_DIR && \
cd $GRPC_CROSS_BUILD_DIR && \
cmake -DCMAKE_TOOLCHAIN_FILE=$CMAKE_CROSS_TOOLCHAIN \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=$CROSS_TOOLCHAIN/grpc_install \
../.. && \
sudo make -j`nproc` install
- name: Build
id: build
run: |
GNU_HOST=aarch64-linux-gnu
C_COMPILER_ARM_LINUX=$GNU_HOST-gcc
CXX_COMPILER_ARM_LINUX=$GNU_HOST-g++
CROSS_TOOLCHAIN=/usr/$GNU_HOST
CROSS_STAGING_PREFIX=$CROSS_TOOLCHAIN/stage
CMAKE_CROSS_TOOLCHAIN=/tmp/arm.toolchain.cmake
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2
export PATH=$PATH:$GOPATH/bin
export PATH=/usr/local/cuda/bin:$PATH
sudo rm -rf /usr/aarch64-linux-gnu/lib/libstdc++.so.6
sudo cp -rf /usr/aarch64-linux-gnu/lib/libstdc++.so* /usr/aarch64-linux-gnu/lib/libstdc++.so.6
sudo cp /usr/aarch64-linux-gnu/lib/ld-linux-aarch64.so.1 ld.so
BACKEND_LIBS="./grpc/cmake/cross_build/third_party/re2/libre2.a ./grpc/cmake/cross_build/libgrpc.a ./grpc/cmake/cross_build/libgrpc++.a ./grpc/cmake/cross_build/third_party/protobuf/libprotobuf.a /usr/aarch64-linux-gnu/lib/libc.so.6 /usr/aarch64-linux-gnu/lib/libstdc++.so.6 /usr/aarch64-linux-gnu/lib/libgomp.so.1 /usr/aarch64-linux-gnu/lib/libm.so.6 /usr/aarch64-linux-gnu/lib/libgcc_s.so.1 /usr/aarch64-linux-gnu/lib/libdl.so.2 /usr/aarch64-linux-gnu/lib/libpthread.so.0 ./ld.so" \
GOOS=linux \
GOARCH=arm64 \
CMAKE_ARGS="-DProtobuf_INCLUDE_DIRS=$CROSS_STAGING_PREFIX/include -DProtobuf_DIR=$CROSS_STAGING_PREFIX/lib/cmake/protobuf -DgRPC_DIR=$CROSS_STAGING_PREFIX/lib/cmake/grpc -DCMAKE_TOOLCHAIN_FILE=$CMAKE_CROSS_TOOLCHAIN -DCMAKE_C_COMPILER=aarch64-linux-gnu-gcc -DCMAKE_CXX_COMPILER=aarch64-linux-gnu-g++" make dist-cross-linux-arm64
- uses: actions/upload-artifact@v4
with:
name: LocalAI-linux-arm64
path: release/
- name: Release
uses: softprops/action-gh-release@v2
if: startsWith(github.ref, 'refs/tags/')
with:
files: |
release/*
- name: Setup tmate session if tests fail
if: ${{ failure() }}
uses: mxschmitt/action-tmate@v3.19
with:
detached: true
connect-timeout-seconds: 180
limit-access-to-actor: true
build-linux:
runs-on: arc-runner-set
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
launcher-build-darwin:
runs-on: macos-latest
steps:
- name: Force Install GIT latest
- name: Checkout
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.23
- name: Build launcher for macOS ARM64
run: |
sudo apt-get update \
&& sudo apt-get install -y software-properties-common \
&& sudo apt-get update \
&& sudo add-apt-repository -y ppa:git-core/ppa \
&& sudo apt-get update \
&& sudo apt-get install -y git
- name: Clone
uses: actions/checkout@v4
make build-launcher-darwin
- name: Upload DMG to Release
uses: softprops/action-gh-release@v2
with:
submodules: true
- uses: actions/setup-go@v5
files: ./dist/LocalAI.dmg
launcher-build-linux:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v5
with:
go-version: '1.21.x'
cache: false
- name: Dependencies
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.23
- name: Build launcher for Linux
run: |
sudo apt-get update
sudo apt-get install -y wget curl build-essential ffmpeg protobuf-compiler ccache upx-ucl gawk cmake libgmock-dev
- name: Intel Dependencies
run: |
wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpg --dearmor | sudo tee /usr/share/keyrings/oneapi-archive-keyring.gpg > /dev/null
echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main" | sudo tee /etc/apt/sources.list.d/oneAPI.list
sudo apt update
sudo apt install -y intel-basekit
- name: Install CUDA Dependencies
run: |
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb
sudo dpkg -i cuda-keyring_1.1-1_all.deb
sudo apt-get update
sudo apt-get install -y cuda-nvcc-${CUDA_VERSION} libcublas-dev-${CUDA_VERSION}
env:
CUDA_VERSION: 12-5
- name: "Install Hipblas"
env:
ROCM_VERSION: "6.1"
AMDGPU_VERSION: "6.1"
run: |
set -ex
sudo apt-get update
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends ca-certificates curl libnuma-dev gnupg
curl -sL https://repo.radeon.com/rocm/rocm.gpg.key | sudo apt-key add -
printf "deb [arch=amd64] https://repo.radeon.com/rocm/apt/$ROCM_VERSION/ jammy main" | sudo tee /etc/apt/sources.list.d/rocm.list
printf "deb [arch=amd64] https://repo.radeon.com/amdgpu/$AMDGPU_VERSION/ubuntu jammy main" | sudo tee /etc/apt/sources.list.d/amdgpu.list
printf 'Package: *\nPin: release o=repo.radeon.com\nPin-Priority: 600' | sudo tee /etc/apt/preferences.d/rocm-pin-600
sudo apt-get update
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y \
hipblas-dev rocm-dev \
rocblas-dev
sudo apt-get clean
sudo rm -rf /var/lib/apt/lists/*
sudo ldconfig
- name: Cache grpc
id: cache-grpc
uses: actions/cache@v4
with:
path: grpc
key: ${{ runner.os }}-grpc-${{ env.GRPC_VERSION }}
- name: Build grpc
if: steps.cache-grpc.outputs.cache-hit != 'true'
run: |
git clone --recurse-submodules -b ${{ env.GRPC_VERSION }} --depth 1 --shallow-submodules https://github.com/grpc/grpc && \
cd grpc && sed -i "216i\ TESTONLY" "third_party/abseil-cpp/absl/container/CMakeLists.txt" && mkdir -p cmake/build && \
cd cmake/build && cmake -DgRPC_INSTALL=ON \
-DgRPC_BUILD_TESTS=OFF \
../.. && sudo make --jobs 5 --output-sync=target
- name: Install gRPC
run: |
cd grpc && cd cmake/build && sudo make --jobs 5 --output-sync=target install
# BACKEND_LIBS needed for gpu-workload: /opt/intel/oneapi/*/lib/libiomp5.so /opt/intel/oneapi/*/lib/libmkl_core.so /opt/intel/oneapi/*/lib/libmkl_core.so.2 /opt/intel/oneapi/*/lib/libmkl_intel_ilp64.so /opt/intel/oneapi/*/lib/libmkl_intel_ilp64.so.2 /opt/intel/oneapi/*/lib/libmkl_sycl_blas.so /opt/intel/oneapi/*/lib/libmkl_sycl_blas.so.4 /opt/intel/oneapi/*/lib/libmkl_tbb_thread.so /opt/intel/oneapi/*/lib/libmkl_tbb_thread.so.2 /opt/intel/oneapi/*/lib/libsycl.so /opt/intel/oneapi/*/lib/libsycl.so.7 /opt/intel/oneapi/*/lib/libsycl.so.7.1.0 /opt/rocm-*/lib/libamdhip64.so /opt/rocm-*/lib/libamdhip64.so.5 /opt/rocm-*/lib/libamdhip64.so.6 /opt/rocm-*/lib/libamdhip64.so.6.1.60100 /opt/rocm-*/lib/libhipblas.so /opt/rocm-*/lib/libhipblas.so.2 /opt/rocm-*/lib/libhipblas.so.2.1.60100 /opt/rocm-*/lib/librocblas.so /opt/rocm-*/lib/librocblas.so.4 /opt/rocm-*/lib/librocblas.so.4.1.60100 /usr/lib/x86_64-linux-gnu/libstdc++.so.6 /usr/lib/x86_64-linux-gnu/libOpenCL.so.1 /usr/lib/x86_64-linux-gnu/libOpenCL.so.1.0.0 /usr/lib/x86_64-linux-gnu/libm.so.6 /usr/lib/x86_64-linux-gnu/libgcc_s.so.1 /usr/lib/x86_64-linux-gnu/libc.so.6 /usr/lib/x86_64-linux-gnu/librt.so.1 /usr/local/cuda-*/targets/x86_64-linux/lib/libcublas.so /usr/local/cuda-*/targets/x86_64-linux/lib/libcublasLt.so /usr/local/cuda-*/targets/x86_64-linux/lib/libcudart.so /usr/local/cuda-*/targets/x86_64-linux/lib/stubs/libcuda.so
- name: Build
id: build
run: |
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2
export PATH=$PATH:$GOPATH/bin
export PATH=/usr/local/cuda/bin:$PATH
export PATH=/opt/rocm/bin:$PATH
source /opt/intel/oneapi/setvars.sh
sudo cp /lib64/ld-linux-x86-64.so.2 ld.so
BACKEND_LIBS="./ld.so ./sources/go-piper/piper/build/fi/lib/libfmt.a ./sources/go-piper/piper-phonemize/pi/lib/libonnxruntime.so.1.14.1 ./sources/go-piper/piper-phonemize/pi/src/libespeak-ng/libespeak-ng.so /usr/lib/x86_64-linux-gnu/libdl.so.2 /usr/lib/x86_64-linux-gnu/librt.so.1 /usr/lib/x86_64-linux-gnu/libpthread.so.0 ./sources/go-piper/piper-phonemize/pi/lib/libpiper_phonemize.so.1 ./sources/go-piper/piper/build/si/lib/libspdlog.a ./sources/go-piper/espeak/ei/lib/libucd.so" \
make -j4 dist
- uses: actions/upload-artifact@v4
with:
name: LocalAI-linux
path: release/
- name: Release
sudo apt-get install golang gcc libgl1-mesa-dev xorg-dev libxkbcommon-dev
make build-launcher-linux
- name: Upload Linux launcher artifacts
uses: softprops/action-gh-release@v2
if: startsWith(github.ref, 'refs/tags/')
with:
files: |
release/*
- name: Setup tmate session if tests fail
if: ${{ failure() }}
uses: mxschmitt/action-tmate@v3.19
with:
detached: true
connect-timeout-seconds: 180
limit-access-to-actor: true
build-macOS-x86_64:
runs-on: macos-13
steps:
- name: Clone
uses: actions/checkout@v4
with:
submodules: true
- uses: actions/setup-go@v5
with:
go-version: '1.21.x'
cache: false
- name: Dependencies
run: |
brew install protobuf grpc
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@8ba23be9613c672d40ae261d2a1335d639bdd59b
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.0
- name: Build
id: build
run: |
export C_INCLUDE_PATH=/usr/local/include
export CPLUS_INCLUDE_PATH=/usr/local/include
export PATH=$PATH:$GOPATH/bin
export SKIP_GRPC_BACKEND=backend-assets/grpc/whisper
make dist
- uses: actions/upload-artifact@v4
with:
name: LocalAI-MacOS-x86_64
path: release/
- name: Release
uses: softprops/action-gh-release@v2
if: startsWith(github.ref, 'refs/tags/')
with:
files: |
release/*
- name: Setup tmate session if tests fail
if: ${{ failure() }}
uses: mxschmitt/action-tmate@v3.19
with:
detached: true
connect-timeout-seconds: 180
limit-access-to-actor: true
build-macOS-arm64:
runs-on: macos-14
steps:
- name: Clone
uses: actions/checkout@v4
with:
submodules: true
- uses: actions/setup-go@v5
with:
go-version: '1.21.x'
cache: false
- name: Dependencies
run: |
brew install protobuf grpc libomp llvm
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2
- name: Build
id: build
run: |
export C_INCLUDE_PATH=/usr/local/include
export CPLUS_INCLUDE_PATH=/usr/local/include
export PATH=$PATH:$GOPATH/bin
export CC=/opt/homebrew/opt/llvm/bin/clang
make dist
- uses: actions/upload-artifact@v4
with:
name: LocalAI-MacOS-arm64
path: release/
- name: Release
uses: softprops/action-gh-release@v2
if: startsWith(github.ref, 'refs/tags/')
with:
files: |
release/*
- name: Setup tmate session if tests fail
if: ${{ failure() }}
uses: mxschmitt/action-tmate@v3.19
with:
detached: true
connect-timeout-seconds: 180
limit-access-to-actor: true
files: ./local-ai-launcher-linux.tar.xz

View File

@@ -14,11 +14,11 @@ jobs:
GO111MODULE: on
steps:
- name: Checkout Source
uses: actions/checkout@v4
uses: actions/checkout@v5
if: ${{ github.actor != 'dependabot[bot]' }}
- name: Run Gosec Security Scanner
if: ${{ github.actor != 'dependabot[bot]' }}
uses: securego/gosec@v2.22.3
uses: securego/gosec@v2.22.8
with:
# we let the report trigger content trigger a failure using the GitHub Security features.
args: '-no-fail -fmt sarif -out results.sarif ./...'

24
.github/workflows/stalebot.yml vendored Normal file
View File

@@ -0,0 +1,24 @@
name: 'Close stale issues and PRs'
permissions:
issues: write
pull-requests: write
on:
schedule:
- cron: '30 1 * * *'
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@3a9db7e6a41a89f618792c92c0e97cc736e1b13f # v9
with:
stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
stale-pr-message: 'This PR is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 10 days.'
close-issue-message: 'This issue was closed because it has been stalled for 5 days with no activity.'
close-pr-message: 'This PR was closed because it has been stalled for 10 days with no activity.'
days-before-issue-stale: 90
days-before-pr-stale: 90
days-before-issue-close: 5
days-before-pr-close: 10
exempt-issue-labels: 'roadmap'
exempt-pr-labels: 'roadmap'

View File

@@ -14,11 +14,33 @@ concurrency:
cancel-in-progress: true
jobs:
# Requires CUDA
# tests-chatterbox-tts:
# runs-on: ubuntu-latest
# steps:
# - name: Clone
# uses: actions/checkout@v5
# with:
# submodules: true
# - name: Dependencies
# run: |
# sudo apt-get update
# sudo apt-get install build-essential ffmpeg
# # Install UV
# curl -LsSf https://astral.sh/uv/install.sh | sh
# sudo apt-get install -y ca-certificates cmake curl patch python3-pip
# sudo apt-get install -y libopencv-dev
# pip install --user --no-cache-dir grpcio-tools==1.64.1
# - name: Test chatterbox-tts
# run: |
# make --jobs=5 --output-sync=target -C backend/python/chatterbox
# make --jobs=5 --output-sync=target -C backend/python/chatterbox test
tests-transformers:
runs-on: ubuntu-latest
steps:
- name: Clone
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
submodules: true
- name: Dependencies
@@ -39,7 +61,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Clone
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
submodules: true
- name: Dependencies
@@ -61,7 +83,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Clone
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
submodules: true
- name: Dependencies
@@ -78,11 +100,31 @@ jobs:
make --jobs=5 --output-sync=target -C backend/python/diffusers
make --jobs=5 --output-sync=target -C backend/python/diffusers test
#tests-vllm:
# runs-on: ubuntu-latest
# steps:
# - name: Clone
# uses: actions/checkout@v5
# with:
# submodules: true
# - name: Dependencies
# run: |
# sudo apt-get update
# sudo apt-get install -y build-essential ffmpeg
# sudo apt-get install -y ca-certificates cmake curl patch python3-pip
# sudo apt-get install -y libopencv-dev
# # Install UV
# curl -LsSf https://astral.sh/uv/install.sh | sh
# pip install --user --no-cache-dir grpcio-tools==1.64.1
# - name: Test vllm backend
# run: |
# make --jobs=5 --output-sync=target -C backend/python/vllm
# make --jobs=5 --output-sync=target -C backend/python/vllm test
# tests-transformers-musicgen:
# runs-on: ubuntu-latest
# steps:
# - name: Clone
# uses: actions/checkout@v4
# uses: actions/checkout@v5
# with:
# submodules: true
# - name: Dependencies
@@ -144,7 +186,7 @@ jobs:
# sudo rm -rf "$AGENT_TOOLSDIRECTORY" || true
# df -h
# - name: Clone
# uses: actions/checkout@v4
# uses: actions/checkout@v5
# with:
# submodules: true
# - name: Dependencies
@@ -169,7 +211,7 @@ jobs:
# runs-on: ubuntu-latest
# steps:
# - name: Clone
# uses: actions/checkout@v4
# uses: actions/checkout@v5
# with:
# submodules: true
# - name: Dependencies
@@ -190,7 +232,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Clone
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
submodules: true
- name: Dependencies

View File

@@ -23,6 +23,20 @@ jobs:
matrix:
go-version: ['1.21.x']
steps:
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
# this might remove tools that are actually needed,
# if set to "true" but frees about 6 GB
tool-cache: true
# all of these default to true, but feel free to set to
# "false" if necessary for your workflow
android: true
dotnet: true
haskell: true
large-packages: true
docker-images: true
swap-storage: true
- name: Release space from worker
run: |
echo "Listing top largest packages"
@@ -56,7 +70,7 @@ jobs:
sudo rm -rfv build || true
df -h
- name: Clone
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
submodules: true
- name: Setup Go ${{ matrix.go-version }}
@@ -67,18 +81,20 @@ jobs:
# You can test your matrix by printing the current Go version
- name: Display Go version
run: go version
- name: Proto Dependencies
run: |
# Install protoc
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v26.1/protoc-26.1-linux-x86_64.zip -o protoc.zip && \
unzip -j -d /usr/local/bin protoc.zip bin/protoc && \
rm protoc.zip
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
PATH="$PATH:$HOME/go/bin" make protogen-go
- name: Dependencies
run: |
sudo apt-get update
sudo apt-get install build-essential ccache upx-ucl curl ffmpeg
sudo apt-get install -y libgmock-dev
curl https://repo.anaconda.com/pkgs/misc/gpgkeys/anaconda.asc | gpg --dearmor > conda.gpg && \
sudo install -o root -g root -m 644 conda.gpg /usr/share/keyrings/conda-archive-keyring.gpg && \
gpg --keyring /usr/share/keyrings/conda-archive-keyring.gpg --no-default-keyring --fingerprint 34161F5BF5EB1D4BFBBB8F0A8AEB4F8B29D82806 && \
sudo /bin/bash -c 'echo "deb [arch=amd64 signed-by=/usr/share/keyrings/conda-archive-keyring.gpg] https://repo.anaconda.com/pkgs/misc/debrepo/conda stable main" > /etc/apt/sources.list.d/conda.list' && \
sudo /bin/bash -c 'echo "deb [arch=amd64 signed-by=/usr/share/keyrings/conda-archive-keyring.gpg] https://repo.anaconda.com/pkgs/misc/debrepo/conda stable main" | tee -a /etc/apt/sources.list.d/conda.list' && \
sudo apt-get update && \
sudo apt-get install -y conda
sudo apt-get install -y libgmock-dev clang
# Install UV
curl -LsSf https://astral.sh/uv/install.sh | sh
sudo apt-get install -y ca-certificates cmake patch python3-pip unzip
@@ -94,43 +110,21 @@ jobs:
sudo apt-get install -y cuda-nvcc-${CUDA_VERSION} libcublas-dev-${CUDA_VERSION}
export CUDACXX=/usr/local/cuda/bin/nvcc
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
# The python3-grpc-tools package in 22.04 is too old
pip install --user grpcio-tools
pip install --user grpcio-tools==1.71.0 grpcio==1.71.0
make -C backend/python/transformers
# Pre-build piper before we start tests in order to have shared libraries in place
make sources/go-piper && \
GO_TAGS="tts" make -C sources/go-piper piper.o && \
sudo cp -rfv sources/go-piper/piper-phonemize/pi/lib/. /usr/lib/
make backends/huggingface backends/llama-cpp backends/local-store backends/silero-vad backends/piper backends/whisper backends/stablediffusion-ggml
env:
CUDA_VERSION: 12-4
- name: Cache grpc
id: cache-grpc
uses: actions/cache@v4
with:
path: grpc
key: ${{ runner.os }}-grpc-${{ env.GRPC_VERSION }}
- name: Build grpc
if: steps.cache-grpc.outputs.cache-hit != 'true'
run: |
git clone --recurse-submodules -b ${{ env.GRPC_VERSION }} --depth 1 --jobs 5 --shallow-submodules https://github.com/grpc/grpc && \
cd grpc && sed -i "216i\ TESTONLY" "third_party/abseil-cpp/absl/container/CMakeLists.txt" && mkdir -p cmake/build && cd cmake/build && \
cmake -DgRPC_INSTALL=ON \
-DgRPC_BUILD_TESTS=OFF \
../.. && sudo make --jobs 5
- name: Install gRPC
run: |
cd grpc && cd cmake/build && sudo make --jobs 5 install
- name: Test
run: |
PATH="$PATH:/root/go/bin" GO_TAGS="tts" make --jobs 5 --output-sync=target test
- name: Setup tmate session if tests fail
if: ${{ failure() }}
uses: mxschmitt/action-tmate@v3.19
uses: mxschmitt/action-tmate@v3.22
with:
detached: true
connect-timeout-seconds: 180
@@ -172,7 +166,7 @@ jobs:
sudo rm -rfv build || true
df -h
- name: Clone
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
submodules: true
- name: Dependencies
@@ -184,17 +178,12 @@ jobs:
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
PATH="$PATH:$HOME/go/bin" make protogen-go
- name: Build images
run: |
docker build --build-arg FFMPEG=true --build-arg IMAGE_TYPE=extras --build-arg EXTRA_BACKENDS=rerankers --build-arg MAKEFLAGS="--jobs=5 --output-sync=target" -t local-ai:tests -f Dockerfile .
BASE_IMAGE=local-ai:tests DOCKER_AIO_IMAGE=local-ai-aio:test make docker-aio
- name: Test
run: |
PATH="$PATH:$HOME/go/bin" LOCALAI_MODELS_DIR=$PWD/models LOCALAI_IMAGE_TAG=test LOCALAI_IMAGE=local-ai-aio \
make run-e2e-aio
PATH="$PATH:$HOME/go/bin" make backends/local-store backends/silero-vad backends/llama-cpp backends/whisper backends/piper backends/stablediffusion-ggml docker-build-aio e2e-aio
- name: Setup tmate session if tests fail
if: ${{ failure() }}
uses: mxschmitt/action-tmate@v3.19
uses: mxschmitt/action-tmate@v3.22
with:
detached: true
connect-timeout-seconds: 180
@@ -207,7 +196,7 @@ jobs:
go-version: ['1.21.x']
steps:
- name: Clone
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
submodules: true
- name: Setup Go ${{ matrix.go-version }}
@@ -221,7 +210,11 @@ jobs:
- name: Dependencies
run: |
brew install protobuf grpc make protoc-gen-go protoc-gen-go-grpc libomp llvm
pip install --user --no-cache-dir grpcio-tools
pip install --user --no-cache-dir grpcio-tools==1.71.0 grpcio==1.71.0
- name: Build llama-cpp-darwin
run: |
make protogen-go
make backends/llama-cpp-darwin
- name: Test
run: |
export C_INCLUDE_PATH=/usr/local/include
@@ -229,10 +222,11 @@ jobs:
export CC=/opt/homebrew/opt/llvm/bin/clang
# Used to run the newer GNUMake version from brew that supports --output-sync
export PATH="/opt/homebrew/opt/make/libexec/gnubin:$PATH"
BUILD_TYPE="GITHUB_CI_HAS_BROKEN_METAL" CMAKE_ARGS="-DGGML_F16C=OFF -DGGML_AVX512=OFF -DGGML_AVX2=OFF -DGGML_FMA=OFF" make --jobs 4 --output-sync=target test
PATH="$PATH:$HOME/go/bin" make protogen-go
PATH="$PATH:$HOME/go/bin" BUILD_TYPE="GITHUB_CI_HAS_BROKEN_METAL" CMAKE_ARGS="-DGGML_F16C=OFF -DGGML_AVX512=OFF -DGGML_AVX2=OFF -DGGML_FMA=OFF" make --jobs 4 --output-sync=target test
- name: Setup tmate session if tests fail
if: ${{ failure() }}
uses: mxschmitt/action-tmate@v3.19
uses: mxschmitt/action-tmate@v3.22
with:
detached: true
connect-timeout-seconds: 180

View File

@@ -9,7 +9,7 @@ jobs:
fail-fast: false
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v5
- uses: actions/setup-go@v5
with:
go-version: 'stable'

View File

@@ -8,7 +8,7 @@ jobs:
steps:
- name: 'Checkout'
uses: actions/checkout@master
- name: 'Yamllint'
- name: 'Yamllint model gallery'
uses: karancode/yamllint-github-action@master
with:
yamllint_file_or_dir: 'gallery'
@@ -16,3 +16,11 @@ jobs:
yamllint_comment: true
env:
GITHUB_ACCESS_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: 'Yamllint Backend gallery'
uses: karancode/yamllint-github-action@master
with:
yamllint_file_or_dir: 'backend'
yamllint_strict: false
yamllint_comment: true
env:
GITHUB_ACCESS_TOKEN: ${{ secrets.GITHUB_TOKEN }}

13
.gitignore vendored
View File

@@ -5,9 +5,14 @@ __pycache__/
*.o
get-sources
prepare-sources
/backend/cpp/llama/grpc-server
/backend/cpp/llama/llama.cpp
/backend/cpp/llama-cpp/grpc-server
/backend/cpp/llama-cpp/llama.cpp
/backend/cpp/llama-*
!backend/cpp/llama-cpp
/backends
/backend-images
/result.yaml
protoc
*.log
@@ -19,7 +24,7 @@ go-bert
# LocalAI build binary
LocalAI
local-ai
/local-ai
# prevent above rules from omitting the helm chart
!charts/*
# prevent above rules from omitting the api/localai folder
@@ -56,4 +61,4 @@ docs/static/gallery.html
**/venv
# per-developer customization files for the development container
.devcontainer/customization/*
.devcontainer/customization/*

33
.goreleaser.yaml Normal file
View File

@@ -0,0 +1,33 @@
version: 2
before:
hooks:
- make protogen-go
- go mod tidy
dist: release
source:
enabled: true
name_template: '{{ .ProjectName }}-{{ .Tag }}-source'
builds:
- main: ./cmd/local-ai
env:
- CGO_ENABLED=0
ldflags:
- -s -w
- -X "github.com/mudler/LocalAI/internal.Version={{ .Tag }}"
- -X "github.com/mudler/LocalAI/internal.Commit={{ .FullCommit }}"
goos:
- linux
- darwin
#- windows
goarch:
- amd64
- arm64
archives:
- formats: [ 'binary' ] # this removes the tar of the archives, leaving the binaries alone
name_template: local-ai-{{ .Tag }}-{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}
checksum:
name_template: '{{ .ProjectName }}-{{ .Tag }}-checksums.txt'
snapshot:
version_template: "{{ .Tag }}-next"
changelog:
use: github-native

2
.vscode/launch.json vendored
View File

@@ -26,7 +26,7 @@
"LOCALAI_P2P": "true",
"LOCALAI_FEDERATED": "true"
},
"buildFlags": ["-tags", "p2p tts", "-v"],
"buildFlags": ["-tags", "", "-v"],
"envFile": "${workspaceFolder}/.env",
"cwd": "${workspaceRoot}"
}

View File

@@ -1,120 +1,32 @@
ARG IMAGE_TYPE=extras
ARG BASE_IMAGE=ubuntu:22.04
ARG GRPC_BASE_IMAGE=${BASE_IMAGE}
ARG INTEL_BASE_IMAGE=${BASE_IMAGE}
# The requirements-core target is common to all images. It should not be placed in requirements-core unless every single build will use it.
FROM ${BASE_IMAGE} AS requirements-core
USER root
ARG GO_VERSION=1.22.6
ARG CMAKE_VERSION=3.26.4
ARG CMAKE_FROM_SOURCE=false
ARG TARGETARCH
ARG TARGETVARIANT
FROM ${BASE_IMAGE} AS requirements
ENV DEBIAN_FRONTEND=noninteractive
ENV EXTERNAL_GRPC_BACKENDS="coqui:/build/backend/python/coqui/run.sh,transformers:/build/backend/python/transformers/run.sh,rerankers:/build/backend/python/rerankers/run.sh,bark:/build/backend/python/bark/run.sh,diffusers:/build/backend/python/diffusers/run.sh,faster-whisper:/build/backend/python/faster-whisper/run.sh,kokoro:/build/backend/python/kokoro/run.sh,vllm:/build/backend/python/vllm/run.sh,exllama2:/build/backend/python/exllama2/run.sh"
RUN apt-get update && \
apt-get install -y --no-install-recommends \
build-essential \
ccache \
ca-certificates \
curl libssl-dev \
git \
git-lfs \
unzip upx-ucl && \
ca-certificates curl wget espeak-ng libgomp1 \
ffmpeg libopenblas-base libopenblas-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Install CMake (the version in 22.04 is too old)
RUN <<EOT bash
if [ "${CMAKE_FROM_SOURCE}}" = "true" ]; then
curl -L -s https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz -o cmake.tar.gz && tar xvf cmake.tar.gz && cd cmake-${CMAKE_VERSION} && ./configure && make && make install
else
apt-get update && \
apt-get install -y \
cmake && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
fi
EOT
# Install Go
RUN curl -L -s https://go.dev/dl/go${GO_VERSION}.linux-${TARGETARCH}.tar.gz | tar -C /usr/local -xz
ENV PATH=$PATH:/root/go/bin:/usr/local/go/bin
# Install grpc compilers
RUN go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2 && \
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
COPY --chmod=644 custom-ca-certs/* /usr/local/share/ca-certificates/
RUN update-ca-certificates
RUN test -n "$TARGETARCH" \
|| (echo 'warn: missing $TARGETARCH, either set this `ARG` manually, or run using `docker buildkit`')
# Use the variables in subsequent instructions
RUN echo "Target Architecture: $TARGETARCH"
RUN echo "Target Variant: $TARGETVARIANT"
# Cuda
ENV PATH=/usr/local/cuda/bin:${PATH}
# HipBLAS requirements
ENV PATH=/opt/rocm/bin:${PATH}
# OpenBLAS requirements and stable diffusion
RUN apt-get update && \
apt-get install -y --no-install-recommends \
libopenblas-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
WORKDIR /build
###################################
###################################
# The requirements-extras target is for any builds with IMAGE_TYPE=extras. It should not be placed in this target unless every IMAGE_TYPE=extras build will use it
FROM requirements-core AS requirements-extras
# Install uv as a system package
RUN curl -LsSf https://astral.sh/uv/install.sh | UV_INSTALL_DIR=/usr/bin sh
ENV PATH="/root/.cargo/bin:${PATH}"
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
RUN apt-get update && \
apt-get install -y --no-install-recommends \
espeak-ng \
espeak \
python3-pip \
python-is-python3 \
python3-dev llvm \
python3-venv && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* && \
pip install --upgrade pip
# Install grpcio-tools (the version in 22.04 is too old)
RUN pip install --user grpcio-tools
###################################
###################################
# The requirements-drivers target is for BUILD_TYPE specific items. If you need to install something specific to CUDA, or specific to ROCM, it goes here.
# This target will be built on top of requirements-core or requirements-extras as retermined by the IMAGE_TYPE build-arg
FROM requirements-${IMAGE_TYPE} AS requirements-drivers
FROM requirements AS requirements-drivers
ARG BUILD_TYPE
ARG CUDA_MAJOR_VERSION=12
ARG CUDA_MINOR_VERSION=0
ARG SKIP_DRIVERS=false
ARG TARGETARCH
ARG TARGETVARIANT
ENV BUILD_TYPE=${BUILD_TYPE}
RUN mkdir -p /run/localai
RUN echo "default" > /run/localai/capability
# Vulkan requirements
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "vulkan" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
@@ -127,7 +39,8 @@ RUN <<EOT bash
apt-get install -y \
vulkan-sdk && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
rm -rf /var/lib/apt/lists/* && \
echo "vulkan" > /run/localai/capability
fi
EOT
@@ -154,7 +67,14 @@ RUN <<EOT bash
libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
rm -rf /var/lib/apt/lists/* && \
echo "nvidia" > /run/localai/capability
fi
EOT
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${TARGETARCH}" = "arm64" ]; then
echo "nvidia-l4t" > /run/localai/capability
fi
EOT
@@ -174,11 +94,94 @@ RUN if [ "${BUILD_TYPE}" = "hipblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
rocblas-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* && \
echo "amd" > /run/localai/capability && \
# I have no idea why, but the ROCM lib packages don't trigger ldconfig after they install, which results in local-ai and others not being able
# to locate the libraries. We run ldconfig ourselves to work around this packaging deficiency
ldconfig \
; fi
RUN if [ "${BUILD_TYPE}" = "hipblas" ]; then \
ln -s /opt/rocm-**/lib/llvm/lib/libomp.so /usr/lib/libomp.so \
; fi
RUN expr "${BUILD_TYPE}" = intel && echo "intel" > /run/localai/capability || echo "not intel"
# Cuda
ENV PATH=/usr/local/cuda/bin:${PATH}
# HipBLAS requirements
ENV PATH=/opt/rocm/bin:${PATH}
###################################
###################################
# The requirements-core target is common to all images. It should not be placed in requirements-core unless every single build will use it.
FROM requirements-drivers AS build-requirements
ARG GO_VERSION=1.22.6
ARG CMAKE_VERSION=3.26.4
ARG CMAKE_FROM_SOURCE=false
ARG TARGETARCH
ARG TARGETVARIANT
RUN apt-get update && \
apt-get install -y --no-install-recommends \
build-essential \
ccache \
ca-certificates espeak-ng \
curl libssl-dev \
git \
git-lfs \
unzip upx-ucl python3 python-is-python3 && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Install CMake (the version in 22.04 is too old)
RUN <<EOT bash
if [ "${CMAKE_FROM_SOURCE}" = "true" ]; then
curl -L -s https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz -o cmake.tar.gz && tar xvf cmake.tar.gz && cd cmake-${CMAKE_VERSION} && ./configure && make && make install
else
apt-get update && \
apt-get install -y \
cmake && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
fi
EOT
# Install Go
RUN curl -L -s https://go.dev/dl/go${GO_VERSION}.linux-${TARGETARCH}.tar.gz | tar -C /usr/local -xz
ENV PATH=$PATH:/root/go/bin:/usr/local/go/bin
# Install grpc compilers
RUN go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2 && \
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
COPY --chmod=644 custom-ca-certs/* /usr/local/share/ca-certificates/
RUN update-ca-certificates
# OpenBLAS requirements and stable diffusion
RUN apt-get update && \
apt-get install -y --no-install-recommends \
libopenblas-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
RUN test -n "$TARGETARCH" \
|| (echo 'warn: missing $TARGETARCH, either set this `ARG` manually, or run using `docker buildkit`')
# Use the variables in subsequent instructions
RUN echo "Target Architecture: $TARGETARCH"
RUN echo "Target Variant: $TARGETVARIANT"
WORKDIR /build
###################################
###################################
@@ -189,69 +192,25 @@ FROM ${INTEL_BASE_IMAGE} AS intel
RUN wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | \
gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
RUN echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy/lts/2350 unified" > /etc/apt/sources.list.d/intel-graphics.list
###################################
###################################
# The grpc target does one thing, it builds and installs GRPC. This is in it's own layer so that it can be effectively cached by CI.
# You probably don't need to change anything here, and if you do, make sure that CI is adjusted so that the cache continues to work.
FROM ${GRPC_BASE_IMAGE} AS grpc
# This is a bit of a hack, but it's required in order to be able to effectively cache this layer in CI
ARG GRPC_MAKEFLAGS="-j4 -Otarget"
ARG GRPC_VERSION=v1.65.0
ARG CMAKE_FROM_SOURCE=false
ARG CMAKE_VERSION=3.26.4
ENV MAKEFLAGS=${GRPC_MAKEFLAGS}
WORKDIR /build
RUN apt-get update && \
apt-get install -y --no-install-recommends \
ca-certificates \
build-essential curl libssl-dev \
git && \
intel-oneapi-runtime-libs && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Install CMake (the version in 22.04 is too old)
RUN <<EOT bash
if [ "${CMAKE_FROM_SOURCE}}" = "true" ]; then
curl -L -s https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz -o cmake.tar.gz && tar xvf cmake.tar.gz && cd cmake-${CMAKE_VERSION} && ./configure && make && make install
else
apt-get update && \
apt-get install -y \
cmake && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
fi
EOT
# We install GRPC to a different prefix here so that we can copy in only the build artifacts later
# saves several hundred MB on the final docker image size vs copying in the entire GRPC source tree
# and running make install in the target container
RUN git clone --recurse-submodules --jobs 4 -b ${GRPC_VERSION} --depth 1 --shallow-submodules https://github.com/grpc/grpc && \
mkdir -p /build/grpc/cmake/build && \
cd /build/grpc/cmake/build && \
sed -i "216i\ TESTONLY" "../../third_party/abseil-cpp/absl/container/CMakeLists.txt" && \
cmake -DgRPC_INSTALL=ON -DgRPC_BUILD_TESTS=OFF -DCMAKE_INSTALL_PREFIX:PATH=/opt/grpc ../.. && \
make && \
make install && \
rm -rf /build
###################################
###################################
# The builder-base target has the arguments, variables, and copies shared between full builder images and the uncompiled devcontainer
FROM requirements-drivers AS builder-base
FROM build-requirements AS builder-base
ARG GO_TAGS="tts p2p"
ARG GO_TAGS=""
ARG GRPC_BACKENDS
ARG MAKEFLAGS
ARG LD_FLAGS="-s -w"
ARG TARGETARCH
ARG TARGETVARIANT
ENV GRPC_BACKENDS=${GRPC_BACKENDS}
ENV GO_TAGS=${GO_TAGS}
ENV MAKEFLAGS=${MAKEFLAGS}
@@ -265,9 +224,7 @@ RUN echo "GO_TAGS: $GO_TAGS" && echo "TARGETARCH: $TARGETARCH"
WORKDIR /build
# We need protoc installed, and the version in 22.04 is too old. We will create one as part installing the GRPC build below
# but that will also being in a newer version of absl which stablediffusion cannot compile with. This version of protoc is only
# here so that we can generate the grpc code for the stablediffusion build
# We need protoc installed, and the version in 22.04 is too old.
RUN <<EOT bash
if [ "amd64" = "$TARGETARCH" ]; then
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v27.1/protoc-27.1-linux-x86_64.zip -o protoc.zip && \
@@ -284,35 +241,39 @@ EOT
###################################
###################################
# Compile backends first in a separate stage
FROM builder-base AS builder-backends
ARG TARGETARCH
ARG TARGETVARIANT
WORKDIR /build
COPY ./Makefile .
COPY ./backend ./backend
COPY ./go.mod .
COPY ./go.sum .
COPY ./.git ./.git
# Some of the Go backends use libs from the main src, we could further optimize the caching by building the CPP backends before here
COPY ./pkg/grpc ./pkg/grpc
COPY ./pkg/utils ./pkg/utils
COPY ./pkg/langchain ./pkg/langchain
RUN ls -l ./
RUN make protogen-go
# The builder target compiles LocalAI. This target is not the target that will be uploaded to the registry.
# Adjustments to the build process should likely be made here.
FROM builder-base AS builder
FROM builder-backends AS builder
# Install the pre-built GRPC
COPY --from=grpc /opt/grpc /usr/local
# Rebuild with defaults backends
WORKDIR /build
COPY . .
COPY .git .
RUN make prepare
## Build the binary
## If it's CUDA or hipblas, we want to skip some of the llama-compat backends to save space
## We only leave the most CPU-optimized variant and the fallback for the cublas/hipblas build
## (both will use CUDA or hipblas for the actual computation)
RUN if [ "${BUILD_TYPE}" = "cublas" ] || [ "${BUILD_TYPE}" = "hipblas" ]; then \
SKIP_GRPC_BACKEND="backend-assets/grpc/llama-cpp-avx512 backend-assets/grpc/llama-cpp-avx backend-assets/grpc/llama-cpp-avx2" make build; \
else \
make build; \
fi
RUN if [ ! -d "/build/sources/go-piper/piper-phonemize/pi/lib/" ]; then \
mkdir -p /build/sources/go-piper/piper-phonemize/pi/lib/ \
touch /build/sources/go-piper/piper-phonemize/pi/lib/keep \
; fi
## If we're on arm64 AND using cublas/hipblas, skip some of the llama-compat backends to save space
## Otherwise just run the normal build
RUN make build
###################################
###################################
@@ -322,24 +283,11 @@ RUN if [ ! -d "/build/sources/go-piper/piper-phonemize/pi/lib/" ]; then \
FROM builder-base AS devcontainer
ARG FFMPEG
COPY --from=grpc /opt/grpc /usr/local
COPY .devcontainer-scripts /.devcontainer-scripts
# Add FFmpeg
RUN if [ "${FFMPEG}" = "true" ]; then \
apt-get update && \
apt-get install -y --no-install-recommends \
ffmpeg && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* \
; fi
RUN apt-get update && \
apt-get install -y --no-install-recommends \
ssh less wget
ssh less
# For the devcontainer, leave apt functional in case additional devtools are needed at runtime.
RUN go install github.com/go-delve/delve/cmd/dlv@latest
@@ -353,98 +301,27 @@ RUN go install github.com/mikefarah/yq/v4@latest
# If you cannot find a more suitable place for an addition, this layer is a suitable place for it.
FROM requirements-drivers
ARG FFMPEG
ARG BUILD_TYPE
ARG TARGETARCH
ARG IMAGE_TYPE=extras
ARG EXTRA_BACKENDS
ARG MAKEFLAGS
ENV BUILD_TYPE=${BUILD_TYPE}
ENV REBUILD=false
ENV HEALTHCHECK_ENDPOINT=http://localhost:8080/readyz
ENV MAKEFLAGS=${MAKEFLAGS}
ARG CUDA_MAJOR_VERSION=12
ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
ENV NVIDIA_REQUIRE_CUDA="cuda>=${CUDA_MAJOR_VERSION}.0"
ENV NVIDIA_VISIBLE_DEVICES=all
# Add FFmpeg
RUN if [ "${FFMPEG}" = "true" ]; then \
apt-get update && \
apt-get install -y --no-install-recommends \
ffmpeg && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* \
; fi
WORKDIR /
WORKDIR /build
# we start fresh & re-copy all assets because `make build` does not clean up nicely after itself
# so when `entrypoint.sh` runs `make build` again (which it does by default), the build would fail
# see https://github.com/go-skynet/LocalAI/pull/658#discussion_r1241971626 and
# https://github.com/go-skynet/LocalAI/pull/434
COPY . .
COPY --from=builder /build/sources ./sources/
COPY --from=grpc /opt/grpc /usr/local
RUN make prepare-sources
COPY ./entrypoint.sh .
# Copy the binary
COPY --from=builder /build/local-ai ./
# Copy shared libraries for piper
COPY --from=builder /build/sources/go-piper/piper-phonemize/pi/lib/* /usr/lib/
# Change the shell to bash so we can use [[ tests below
SHELL ["/bin/bash", "-c"]
# We try to strike a balance between individual layer size (as that affects total push time) and total image size
# Splitting the backends into more groups with fewer items results in a larger image, but a smaller size for the largest layer
# Splitting the backends into fewer groups with more items results in a smaller image, but a larger size for the largest layer
RUN if [[ ( "${IMAGE_TYPE}" == "extras ")]]; then \
apt-get -qq -y install espeak-ng \
; fi
RUN if [[ ( "${EXTRA_BACKENDS}" =~ "coqui" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
make -C backend/python/coqui \
; fi && \
if [[ ( "${EXTRA_BACKENDS}" =~ "faster-whisper" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
make -C backend/python/faster-whisper \
; fi && \
if [[ ( "${EXTRA_BACKENDS}" =~ "diffusers" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
make -C backend/python/diffusers \
; fi
RUN if [[ ( "${EXTRA_BACKENDS}" =~ "kokoro" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
make -C backend/python/kokoro \
; fi && \
if [[ ( "${EXTRA_BACKENDS}" =~ "exllama2" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
make -C backend/python/exllama2 \
; fi && \
if [[ ( "${EXTRA_BACKENDS}" =~ "transformers" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
make -C backend/python/transformers \
; fi
RUN if [[ ( "${EXTRA_BACKENDS}" =~ "vllm" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
make -C backend/python/vllm \
; fi && \
if [[ ( "${EXTRA_BACKENDS}" =~ "bark" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
make -C backend/python/bark \
; fi && \
if [[ ( "${EXTRA_BACKENDS}" =~ "rerankers" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
make -C backend/python/rerankers \
; fi
# Make sure the models directory exists
RUN mkdir -p /build/models
RUN mkdir -p /models /backends
# Define the health check command
HEALTHCHECK --interval=1m --timeout=10m --retries=10 \
CMD curl -f ${HEALTHCHECK_ENDPOINT} || exit 1
VOLUME /build/models
VOLUME /models /backends
EXPOSE 8080
ENTRYPOINT [ "/build/entrypoint.sh" ]
ENTRYPOINT [ "/entrypoint.sh" ]

View File

@@ -1,5 +0,0 @@
VERSION 0.7
build:
FROM DOCKERFILE -f Dockerfile .
SAVE ARTIFACT /usr/bin/local-ai AS LOCAL local-ai

920
Makefile
View File

File diff suppressed because it is too large Load Diff

145
README.md
View File

@@ -1,6 +1,6 @@
<h1 align="center">
<br>
<img height="300" src="./core/http/static/logo.png"> <br>
<img width="300" src="./core/http/static/logo.png"> <br>
<br>
</h1>
@@ -30,7 +30,7 @@
<p align="center">
<a href="https://twitter.com/LocalAI_API" target="blank">
<img src="https://img.shields.io/twitter/follow/LocalAI_API?label=Follow: LocalAI_API&style=social" alt="Follow LocalAI_API"/>
<img src="https://img.shields.io/badge/X-%23000000.svg?style=for-the-badge&logo=X&logoColor=white&label=LocalAI_API" alt="Follow LocalAI_API"/>
</a>
<a href="https://discord.gg/uJAeKSAGDy" target="blank">
<img src="https://dcbadge.vercel.app/api/server/uJAeKSAGDy?style=flat-square&theme=default-inverted" alt="Join LocalAI Discord Community"/>
@@ -43,7 +43,8 @@
> :bulb: Get help - [❓FAQ](https://localai.io/faq/) [💭Discussions](https://github.com/go-skynet/LocalAI/discussions) [:speech_balloon: Discord](https://discord.gg/uJAeKSAGDy) [:book: Documentation website](https://localai.io/)
>
> [💻 Quickstart](https://localai.io/basics/getting_started/) [🖼️ Models](https://models.localai.io/) [🚀 Roadmap](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) [🥽 Demo](https://demo.localai.io) [🌍 Explorer](https://explorer.localai.io) [🛫 Examples](https://github.com/mudler/LocalAI-examples)
> [💻 Quickstart](https://localai.io/basics/getting_started/) [🖼️ Models](https://models.localai.io/) [🚀 Roadmap](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) [🌍 Explorer](https://explorer.localai.io) [🛫 Examples](https://github.com/mudler/LocalAI-examples) Try on
[![Telegram](https://img.shields.io/badge/Telegram-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white)](https://t.me/localaiofficial_bot)
[![tests](https://github.com/go-skynet/LocalAI/actions/workflows/test.yml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/test.yml)[![Build and Release](https://github.com/go-skynet/LocalAI/actions/workflows/release.yaml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/release.yaml)[![build container images](https://github.com/go-skynet/LocalAI/actions/workflows/image.yml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/image.yml)[![Bump dependencies](https://github.com/go-skynet/LocalAI/actions/workflows/bump_deps.yaml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/bump_deps.yaml)[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/localai)](https://artifacthub.io/packages/search?repo=localai)
@@ -103,28 +104,78 @@
Run the installer script:
```bash
# Basic installation
curl https://localai.io/install.sh | sh
```
For more installation options, see [Installer Options](https://localai.io/docs/advanced/installer/).
### macOS Download:
<a href="https://github.com/mudler/LocalAI/releases/latest/download/LocalAI.dmg">
<img src="https://img.shields.io/badge/Download-macOS-blue?style=for-the-badge&logo=apple&logoColor=white" alt="Download LocalAI for macOS"/>
</a>
Or run with docker:
### CPU only image:
```bash
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-cpu
```
### Nvidia GPU:
```bash
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-gpu-nvidia-cuda-12
```
### CPU and GPU image (bigger size):
```bash
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest
```
### AIO images (it will pre-download a set of models ready for use, see https://localai.io/basics/container/)
### NVIDIA GPU Images:
```bash
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-cpu
# CUDA 12.0
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-gpu-nvidia-cuda-12
# CUDA 11.7
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-gpu-nvidia-cuda-11
# NVIDIA Jetson (L4T) ARM64
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-nvidia-l4t-arm64
```
### AMD GPU Images (ROCm):
```bash
docker run -ti --name local-ai -p 8080:8080 --device=/dev/kfd --device=/dev/dri --group-add=video localai/localai:latest-gpu-hipblas
```
### Intel GPU Images (oneAPI):
```bash
docker run -ti --name local-ai -p 8080:8080 --device=/dev/dri/card1 --device=/dev/dri/renderD128 localai/localai:latest-gpu-intel
```
### Vulkan GPU Images:
```bash
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-gpu-vulkan
```
### AIO Images (pre-downloaded models):
```bash
# CPU version
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-cpu
# NVIDIA CUDA 12 version
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-12
# NVIDIA CUDA 11 version
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-11
# Intel GPU version
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-gpu-intel
# AMD GPU version
docker run -ti --name local-ai -p 8080:8080 --device=/dev/kfd --device=/dev/dri --group-add=video localai/localai:latest-aio-gpu-hipblas
```
For more information about the AIO images and pre-downloaded models, see [Container Documentation](https://localai.io/basics/container/).
To load models:
```bash
@@ -140,10 +191,19 @@ local-ai run https://gist.githubusercontent.com/.../phi-2.yaml
local-ai run oci://localai/phi-2:latest
```
> ⚡ **Automatic Backend Detection**: When you install models from the gallery or YAML files, LocalAI automatically detects your system's GPU capabilities (NVIDIA, AMD, Intel) and downloads the appropriate backend. For advanced configuration options, see [GPU Acceleration](https://localai.io/features/gpu-acceleration/#automatic-backend-detection).
For more information, see [💻 Getting started](https://localai.io/basics/getting_started/index.html)
## 📰 Latest project news
- August 2025: MLX, MLX-VLM, Diffusers and llama.cpp are now supported on Mac M1/M2/M3+ chips ( with `development` suffix in the gallery ): https://github.com/mudler/LocalAI/pull/6049 https://github.com/mudler/LocalAI/pull/6119 https://github.com/mudler/LocalAI/pull/6121 https://github.com/mudler/LocalAI/pull/6060
- July/August 2025: 🔍 [Object Detection](https://localai.io/features/object-detection/) added to the API featuring [rf-detr](https://github.com/roboflow/rf-detr)
- July 2025: All backends migrated outside of the main binary. LocalAI is now more lightweight, small, and automatically downloads the required backend to run the model. [Read the release notes](https://github.com/mudler/LocalAI/releases/tag/v3.2.0)
- June 2025: [Backend management](https://github.com/mudler/LocalAI/pull/5607) has been added. Attention: extras images are going to be deprecated from the next release! Read [the backend management PR](https://github.com/mudler/LocalAI/pull/5607).
- May 2025: [Audio input](https://github.com/mudler/LocalAI/pull/5466) and [Reranking](https://github.com/mudler/LocalAI/pull/5396) in llama.cpp backend, [Realtime API](https://github.com/mudler/LocalAI/pull/5392), Support to Gemma, SmollVLM, and more multimodal models (available in the gallery).
- May 2025: Important: image name changes [See release](https://github.com/mudler/LocalAI/releases/tag/v2.29.0)
- Apr 2025: Rebrand, WebUI enhancements
- Apr 2025: [LocalAGI](https://github.com/mudler/LocalAGI) and [LocalRecall](https://github.com/mudler/LocalRecall) join the LocalAI family stack.
- Apr 2025: WebUI overhaul, AIO images updates
- Feb 2025: Backend cleanup, Breaking changes, new backends (kokoro, OutelTTS, faster-whisper), Nvidia L4T images
@@ -162,6 +222,7 @@ Roadmap items: [List of issues](https://github.com/mudler/LocalAI/issues?q=is%3A
## 🚀 [Features](https://localai.io/features/)
- 🧩 [Backend Gallery](https://localai.io/backends/): Install/remove backends on the fly, powered by OCI images — fully customizable and API-driven.
- 📖 [Text generation with GPTs](https://localai.io/features/text-generation/) (`llama.cpp`, `transformers`, `vllm` ... [:book: and more](https://localai.io/model-compatibility/index.html#model-compatibility-table))
- 🗣 [Text to Audio](https://localai.io/features/text-to-audio/)
- 🔈 [Audio to Text](https://localai.io/features/audio-to-text/) (Audio transcription with `whisper.cpp`)
@@ -171,12 +232,67 @@ Roadmap items: [List of issues](https://github.com/mudler/LocalAI/issues?q=is%3A
- ✍️ [Constrained grammars](https://localai.io/features/constrained_grammars/)
- 🖼️ [Download Models directly from Huggingface ](https://localai.io/models/)
- 🥽 [Vision API](https://localai.io/features/gpt-vision/)
- 🔍 [Object Detection](https://localai.io/features/object-detection/)
- 📈 [Reranker API](https://localai.io/features/reranker/)
- 🆕🖧 [P2P Inferencing](https://localai.io/features/distribute/)
- [Agentic capabilities](https://github.com/mudler/LocalAGI)
- 🔊 Voice activity detection (Silero-VAD support)
- 🌍 Integrated WebUI!
## 🧩 Supported Backends & Acceleration
LocalAI supports a comprehensive range of AI backends with multiple acceleration options:
### Text Generation & Language Models
| Backend | Description | Acceleration Support |
|---------|-------------|---------------------|
| **llama.cpp** | LLM inference in C/C++ | CUDA 11/12, ROCm, Intel SYCL, Vulkan, Metal, CPU |
| **vLLM** | Fast LLM inference with PagedAttention | CUDA 12, ROCm, Intel |
| **transformers** | HuggingFace transformers framework | CUDA 11/12, ROCm, Intel, CPU |
| **exllama2** | GPTQ inference library | CUDA 12 |
| **MLX** | Apple Silicon LLM inference | Metal (M1/M2/M3+) |
| **MLX-VLM** | Apple Silicon Vision-Language Models | Metal (M1/M2/M3+) |
### Audio & Speech Processing
| Backend | Description | Acceleration Support |
|---------|-------------|---------------------|
| **whisper.cpp** | OpenAI Whisper in C/C++ | CUDA 12, ROCm, Intel SYCL, Vulkan, CPU |
| **faster-whisper** | Fast Whisper with CTranslate2 | CUDA 12, ROCm, Intel, CPU |
| **bark** | Text-to-audio generation | CUDA 12, ROCm, Intel |
| **bark-cpp** | C++ implementation of Bark | CUDA, Metal, CPU |
| **coqui** | Advanced TTS with 1100+ languages | CUDA 12, ROCm, Intel, CPU |
| **kokoro** | Lightweight TTS model | CUDA 12, ROCm, Intel, CPU |
| **chatterbox** | Production-grade TTS | CUDA 11/12, CPU |
| **piper** | Fast neural TTS system | CPU |
| **kitten-tts** | Kitten TTS models | CPU |
| **silero-vad** | Voice Activity Detection | CPU |
### Image & Video Generation
| Backend | Description | Acceleration Support |
|---------|-------------|---------------------|
| **stablediffusion.cpp** | Stable Diffusion in C/C++ | CUDA 12, Intel SYCL, Vulkan, CPU |
| **diffusers** | HuggingFace diffusion models | CUDA 11/12, ROCm, Intel, Metal, CPU |
### Specialized AI Tasks
| Backend | Description | Acceleration Support |
|---------|-------------|---------------------|
| **rfdetr** | Real-time object detection | CUDA 12, Intel, CPU |
| **rerankers** | Document reranking API | CUDA 11/12, ROCm, Intel, CPU |
| **local-store** | Vector database | CPU |
| **huggingface** | HuggingFace API integration | API-based |
### Hardware Acceleration Matrix
| Acceleration Type | Supported Backends | Hardware Support |
|-------------------|-------------------|------------------|
| **NVIDIA CUDA 11** | llama.cpp, whisper, stablediffusion, diffusers, rerankers, bark, chatterbox | Nvidia hardware |
| **NVIDIA CUDA 12** | All CUDA-compatible backends | Nvidia hardware |
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, bark | AMD Graphics |
| **Intel oneAPI** | llama.cpp, whisper, stablediffusion, vllm, transformers, diffusers, rfdetr, rerankers, exllama2, coqui, kokoro, bark | Intel Arc, Intel iGPUs |
| **Apple Metal** | llama.cpp, whisper, diffusers, MLX, MLX-VLM, bark-cpp | Apple M1/M2/M3+ |
| **Vulkan** | llama.cpp, whisper, stablediffusion | Cross-platform GPUs |
| **NVIDIA Jetson** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr | ARM64 embedded AI |
| **CPU Optimized** | All backends | AVX/AVX2/AVX512, quantization support |
### 🔗 Community and integrations
@@ -191,6 +307,9 @@ WebUIs:
Model galleries
- https://github.com/go-skynet/model-gallery
Voice:
- https://github.com/richiejp/VoxInput
Other:
- Helm chart https://github.com/go-skynet/helm-charts
- VSCode extension https://github.com/badgooooor/localai-vscode-plugin

View File

@@ -1,5 +1,6 @@
embeddings: true
name: text-embedding-ada-002
backend: llama-cpp
parameters:
model: huggingface://bartowski/granite-embedding-107m-multilingual-GGUF/granite-embedding-107m-multilingual-f16.gguf

View File

@@ -1,7 +1,13 @@
name: jina-reranker-v1-base-en
backend: rerankers
reranking: true
f16: true
parameters:
model: cross-encoder
model: jina-reranker-v1-tiny-en.f16.gguf
backend: llama-cpp
download_files:
- filename: jina-reranker-v1-tiny-en.f16.gguf
sha256: 5f696cf0d0f3d347c4a279eee8270e5918554cdac0ed1f632f2619e4e8341407
uri: huggingface://mradermacher/jina-reranker-v1-tiny-en-GGUF/jina-reranker-v1-tiny-en.f16.gguf
usage: |
You can test this model with curl like this:

View File

@@ -2,7 +2,7 @@ name: tts-1
download_files:
- filename: voice-en-us-amy-low.tar.gz
uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-en-us-amy-low.tar.gz
backend: piper
parameters:
model: en-us-amy-low.onnx

View File

@@ -1,5 +1,6 @@
context_size: 8192
f16: true
backend: llama-cpp
function:
grammar:
no_mixed_free_string: true

View File

@@ -1,10 +1,11 @@
context_size: 4096
f16: true
backend: llama-cpp
mmap: true
mmproj: minicpm-v-2_6-mmproj-f16.gguf
mmproj: minicpm-v-4_5-mmproj-f16.gguf
name: gpt-4o
parameters:
model: minicpm-v-2_6-Q4_K_M.gguf
model: minicpm-v-4_5-Q4_K_M.gguf
stopwords:
- <|im_end|>
- <dummy32000>
@@ -41,9 +42,9 @@ template:
<|im_start|>assistant
download_files:
- filename: minicpm-v-2_6-Q4_K_M.gguf
sha256: 3a4078d53b46f22989adbf998ce5a3fd090b6541f112d7e936eb4204a04100b1
uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/ggml-model-Q4_K_M.gguf
- filename: minicpm-v-2_6-mmproj-f16.gguf
uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/mmproj-model-f16.gguf
sha256: 4485f68a0f1aa404c391e788ea88ea653c100d8e98fe572698f701e5809711fd
- filename: minicpm-v-4_5-Q4_K_M.gguf
sha256: c1c3c33100b15b4caf7319acce4e23c0eb0ce1cbd12f70e8d24f05aa67b7512f
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/ggml-model-Q4_K_M.gguf
- filename: minicpm-v-4_5-mmproj-f16.gguf
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/mmproj-model-f16.gguf
sha256: 7a7225a32e8d453aaa3d22d8c579b5bf833c253f784cdb05c99c9a76fd616df8

View File

@@ -135,4 +135,4 @@ check_vars
echo "===> Starting LocalAI[$PROFILE] with the following models: $MODELS"
exec /build/entrypoint.sh "$@"
exec /entrypoint.sh "$@"

View File

@@ -1,5 +1,6 @@
embeddings: true
name: text-embedding-ada-002
backend: llama-cpp
parameters:
model: huggingface://bartowski/granite-embedding-107m-multilingual-GGUF/granite-embedding-107m-multilingual-f16.gguf

View File

@@ -1,7 +1,13 @@
name: jina-reranker-v1-base-en
backend: rerankers
reranking: true
f16: true
parameters:
model: cross-encoder
model: jina-reranker-v1-tiny-en.f16.gguf
backend: llama-cpp
download_files:
- filename: jina-reranker-v1-tiny-en.f16.gguf
sha256: 5f696cf0d0f3d347c4a279eee8270e5918554cdac0ed1f632f2619e4e8341407
uri: huggingface://mradermacher/jina-reranker-v1-tiny-en-GGUF/jina-reranker-v1-tiny-en.f16.gguf
usage: |
You can test this model with curl like this:

View File

@@ -2,7 +2,7 @@ name: tts-1
download_files:
- filename: voice-en-us-amy-low.tar.gz
uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-en-us-amy-low.tar.gz
backend: piper
parameters:
model: en-us-amy-low.onnx

View File

@@ -1,5 +1,6 @@
context_size: 4096
f16: true
backend: llama-cpp
function:
capture_llm_results:
- (?s)<Thought>(.*?)</Thought>
@@ -48,6 +49,6 @@ template:
<|im_start|>assistant
download_files:
- filename: localai-functioncall-phi-4-v0.3-q4_k_m.gguf
sha256: 23fee048ded2a6e2e1a7b6bbefa6cbf83068f194caa9552aecbaa00fec8a16d5
uri: huggingface://mudler/LocalAI-functioncall-phi-4-v0.3-Q4_K_M-GGUF/localai-functioncall-phi-4-v0.3-q4_k_m.gguf
- filename: localai-functioncall-qwen2.5-7b-v0.5-q4_k_m.gguf
sha256: 4e7b7fe1d54b881f1ef90799219dc6cc285d29db24f559c8998d1addb35713d4
uri: huggingface://mudler/LocalAI-functioncall-qwen2.5-7b-v0.5-Q4_K_M-GGUF/localai-functioncall-qwen2.5-7b-v0.5-q4_k_m.gguf

View File

@@ -1,10 +1,11 @@
context_size: 4096
backend: llama-cpp
f16: true
mmap: true
mmproj: minicpm-v-2_6-mmproj-f16.gguf
mmproj: minicpm-v-4_5-mmproj-f16.gguf
name: gpt-4o
parameters:
model: minicpm-v-2_6-Q4_K_M.gguf
model: minicpm-v-4_5-Q4_K_M.gguf
stopwords:
- <|im_end|>
- <dummy32000>
@@ -41,9 +42,9 @@ template:
<|im_start|>assistant
download_files:
- filename: minicpm-v-2_6-Q4_K_M.gguf
sha256: 3a4078d53b46f22989adbf998ce5a3fd090b6541f112d7e936eb4204a04100b1
uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/ggml-model-Q4_K_M.gguf
- filename: minicpm-v-2_6-mmproj-f16.gguf
uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/mmproj-model-f16.gguf
sha256: 4485f68a0f1aa404c391e788ea88ea653c100d8e98fe572698f701e5809711fd
- filename: minicpm-v-4_5-Q4_K_M.gguf
sha256: c1c3c33100b15b4caf7319acce4e23c0eb0ce1cbd12f70e8d24f05aa67b7512f
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/ggml-model-Q4_K_M.gguf
- filename: minicpm-v-4_5-mmproj-f16.gguf
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/mmproj-model-f16.gguf
sha256: 7a7225a32e8d453aaa3d22d8c579b5bf833c253f784cdb05c99c9a76fd616df8

View File

@@ -1,5 +1,6 @@
embeddings: true
name: text-embedding-ada-002
backend: llama-cpp
parameters:
model: huggingface://bartowski/granite-embedding-107m-multilingual-GGUF/granite-embedding-107m-multilingual-f16.gguf

View File

@@ -1,7 +1,13 @@
name: jina-reranker-v1-base-en
backend: rerankers
reranking: true
f16: true
parameters:
model: cross-encoder
model: jina-reranker-v1-tiny-en.f16.gguf
backend: llama-cpp
download_files:
- filename: jina-reranker-v1-tiny-en.f16.gguf
sha256: 5f696cf0d0f3d347c4a279eee8270e5918554cdac0ed1f632f2619e4e8341407
uri: huggingface://mradermacher/jina-reranker-v1-tiny-en-GGUF/jina-reranker-v1-tiny-en.f16.gguf
usage: |
You can test this model with curl like this:

View File

@@ -2,7 +2,7 @@ name: tts-1
download_files:
- filename: voice-en-us-amy-low.tar.gz
uri: https://github.com/rhasspy/piper/releases/download/v0.0.2/voice-en-us-amy-low.tar.gz
backend: piper
parameters:
model: en-us-amy-low.onnx

View File

@@ -1,5 +1,6 @@
context_size: 4096
f16: true
backend: llama-cpp
function:
capture_llm_results:
- (?s)<Thought>(.*?)</Thought>

View File

@@ -1,10 +1,11 @@
context_size: 4096
backend: llama-cpp
f16: true
mmap: true
mmproj: minicpm-v-2_6-mmproj-f16.gguf
mmproj: minicpm-v-4_5-mmproj-f16.gguf
name: gpt-4o
parameters:
model: minicpm-v-2_6-Q4_K_M.gguf
model: minicpm-v-4_5-Q4_K_M.gguf
stopwords:
- <|im_end|>
- <dummy32000>
@@ -42,9 +43,9 @@ template:
download_files:
- filename: minicpm-v-2_6-Q4_K_M.gguf
sha256: 3a4078d53b46f22989adbf998ce5a3fd090b6541f112d7e936eb4204a04100b1
uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/ggml-model-Q4_K_M.gguf
- filename: minicpm-v-2_6-mmproj-f16.gguf
uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/mmproj-model-f16.gguf
sha256: 4485f68a0f1aa404c391e788ea88ea653c100d8e98fe572698f701e5809711fd
- filename: minicpm-v-4_5-Q4_K_M.gguf
sha256: c1c3c33100b15b4caf7319acce4e23c0eb0ce1cbd12f70e8d24f05aa67b7512f
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/ggml-model-Q4_K_M.gguf
- filename: minicpm-v-4_5-mmproj-f16.gguf
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/mmproj-model-f16.gguf
sha256: 7a7225a32e8d453aaa3d22d8c579b5bf833c253f784cdb05c99c9a76fd616df8

View File

@@ -1,6 +0,0 @@
package main
import "embed"
//go:embed backend-assets/*
var backendAssets embed.FS

131
backend/Dockerfile.golang Normal file
View File

@@ -0,0 +1,131 @@
ARG BASE_IMAGE=ubuntu:22.04
FROM ${BASE_IMAGE} AS builder
ARG BACKEND=rerankers
ARG BUILD_TYPE
ENV BUILD_TYPE=${BUILD_TYPE}
ARG CUDA_MAJOR_VERSION
ARG CUDA_MINOR_VERSION
ARG SKIP_DRIVERS=false
ENV CUDA_MAJOR_VERSION=${CUDA_MAJOR_VERSION}
ENV CUDA_MINOR_VERSION=${CUDA_MINOR_VERSION}
ENV DEBIAN_FRONTEND=noninteractive
ARG TARGETARCH
ARG TARGETVARIANT
ARG GO_VERSION=1.22.6
RUN apt-get update && \
apt-get install -y --no-install-recommends \
build-essential \
git ccache \
ca-certificates \
make cmake \
curl unzip \
libssl-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Cuda
ENV PATH=/usr/local/cuda/bin:${PATH}
# HipBLAS requirements
ENV PATH=/opt/rocm/bin:${PATH}
# Vulkan requirements
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "vulkan" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
apt-get update && \
apt-get install -y --no-install-recommends \
software-properties-common pciutils wget gpg-agent && \
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
apt-get update && \
apt-get install -y \
vulkan-sdk && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
fi
EOT
# CuBLAS requirements
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
apt-get update && \
apt-get install -y --no-install-recommends \
software-properties-common pciutils
if [ "amd64" = "$TARGETARCH" ]; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb
fi
if [ "arm64" = "$TARGETARCH" ]; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/arm64/cuda-keyring_1.1-1_all.deb
fi
dpkg -i cuda-keyring_1.1-1_all.deb && \
rm -f cuda-keyring_1.1-1_all.deb && \
apt-get update && \
apt-get install -y --no-install-recommends \
cuda-nvcc-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcufft-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcurand-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
fi
EOT
# If we are building with clblas support, we need the libraries for the builds
RUN if [ "${BUILD_TYPE}" = "clblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
apt-get update && \
apt-get install -y --no-install-recommends \
libclblast-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* \
; fi
RUN if [ "${BUILD_TYPE}" = "hipblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
apt-get update && \
apt-get install -y --no-install-recommends \
hipblas-dev \
rocblas-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* && \
# I have no idea why, but the ROCM lib packages don't trigger ldconfig after they install, which results in local-ai and others not being able
# to locate the libraries. We run ldconfig ourselves to work around this packaging deficiency
ldconfig \
; fi
# Install Go
RUN curl -L -s https://go.dev/dl/go${GO_VERSION}.linux-${TARGETARCH}.tar.gz | tar -C /usr/local -xz
ENV PATH=$PATH:/root/go/bin:/usr/local/go/bin:/usr/local/bin
# Install grpc compilers
RUN go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2 && \
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
RUN echo "TARGETARCH: $TARGETARCH"
# We need protoc installed, and the version in 22.04 is too old. We will create one as part installing the GRPC build below
# but that will also being in a newer version of absl which stablediffusion cannot compile with. This version of protoc is only
# here so that we can generate the grpc code for the stablediffusion build
RUN <<EOT bash
if [ "amd64" = "$TARGETARCH" ]; then
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v27.1/protoc-27.1-linux-x86_64.zip -o protoc.zip && \
unzip -j -d /usr/local/bin protoc.zip bin/protoc && \
rm protoc.zip
fi
if [ "arm64" = "$TARGETARCH" ]; then
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v27.1/protoc-27.1-linux-aarch_64.zip -o protoc.zip && \
unzip -j -d /usr/local/bin protoc.zip bin/protoc && \
rm protoc.zip
fi
EOT
COPY . /LocalAI
RUN cd /LocalAI && make protogen-go && make -C /LocalAI/backend/go/${BACKEND} build
FROM scratch
ARG BACKEND=rerankers
COPY --from=builder /LocalAI/backend/go/${BACKEND}/package/. ./

View File

@@ -0,0 +1,207 @@
ARG BASE_IMAGE=ubuntu:22.04
ARG GRPC_BASE_IMAGE=${BASE_IMAGE}
# The grpc target does one thing, it builds and installs GRPC. This is in it's own layer so that it can be effectively cached by CI.
# You probably don't need to change anything here, and if you do, make sure that CI is adjusted so that the cache continues to work.
FROM ${GRPC_BASE_IMAGE} AS grpc
# This is a bit of a hack, but it's required in order to be able to effectively cache this layer in CI
ARG GRPC_MAKEFLAGS="-j4 -Otarget"
ARG GRPC_VERSION=v1.65.0
ARG CMAKE_FROM_SOURCE=false
ARG CMAKE_VERSION=3.26.4
ENV MAKEFLAGS=${GRPC_MAKEFLAGS}
WORKDIR /build
RUN apt-get update && \
apt-get install -y --no-install-recommends \
ca-certificates \
build-essential curl libssl-dev \
git && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Install CMake (the version in 22.04 is too old)
RUN <<EOT bash
if [ "${CMAKE_FROM_SOURCE}}" = "true" ]; then
curl -L -s https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz -o cmake.tar.gz && tar xvf cmake.tar.gz && cd cmake-${CMAKE_VERSION} && ./configure && make && make install
else
apt-get update && \
apt-get install -y \
cmake && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
fi
EOT
# We install GRPC to a different prefix here so that we can copy in only the build artifacts later
# saves several hundred MB on the final docker image size vs copying in the entire GRPC source tree
# and running make install in the target container
RUN git clone --recurse-submodules --jobs 4 -b ${GRPC_VERSION} --depth 1 --shallow-submodules https://github.com/grpc/grpc && \
mkdir -p /build/grpc/cmake/build && \
cd /build/grpc/cmake/build && \
sed -i "216i\ TESTONLY" "../../third_party/abseil-cpp/absl/container/CMakeLists.txt" && \
cmake -DgRPC_INSTALL=ON -DgRPC_BUILD_TESTS=OFF -DCMAKE_INSTALL_PREFIX:PATH=/opt/grpc ../.. && \
make && \
make install && \
rm -rf /build
FROM ${BASE_IMAGE} AS builder
ARG BACKEND=rerankers
ARG BUILD_TYPE
ENV BUILD_TYPE=${BUILD_TYPE}
ARG CUDA_MAJOR_VERSION
ARG CUDA_MINOR_VERSION
ARG SKIP_DRIVERS=false
ENV CUDA_MAJOR_VERSION=${CUDA_MAJOR_VERSION}
ENV CUDA_MINOR_VERSION=${CUDA_MINOR_VERSION}
ENV DEBIAN_FRONTEND=noninteractive
ARG TARGETARCH
ARG TARGETVARIANT
ARG GO_VERSION=1.22.6
RUN apt-get update && \
apt-get install -y --no-install-recommends \
build-essential \
ccache git \
ca-certificates \
make \
curl unzip \
libssl-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Cuda
ENV PATH=/usr/local/cuda/bin:${PATH}
# HipBLAS requirements
ENV PATH=/opt/rocm/bin:${PATH}
# Vulkan requirements
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "vulkan" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
apt-get update && \
apt-get install -y --no-install-recommends \
software-properties-common pciutils wget gpg-agent && \
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
apt-get update && \
apt-get install -y \
vulkan-sdk && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
fi
EOT
# CuBLAS requirements
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
apt-get update && \
apt-get install -y --no-install-recommends \
software-properties-common pciutils
if [ "amd64" = "$TARGETARCH" ]; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb
fi
if [ "arm64" = "$TARGETARCH" ]; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/arm64/cuda-keyring_1.1-1_all.deb
fi
dpkg -i cuda-keyring_1.1-1_all.deb && \
rm -f cuda-keyring_1.1-1_all.deb && \
apt-get update && \
apt-get install -y --no-install-recommends \
cuda-nvcc-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcufft-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcurand-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
fi
EOT
# If we are building with clblas support, we need the libraries for the builds
RUN if [ "${BUILD_TYPE}" = "clblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
apt-get update && \
apt-get install -y --no-install-recommends \
libclblast-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* \
; fi
RUN if [ "${BUILD_TYPE}" = "hipblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
apt-get update && \
apt-get install -y --no-install-recommends \
hipblas-dev \
rocblas-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* && \
# I have no idea why, but the ROCM lib packages don't trigger ldconfig after they install, which results in local-ai and others not being able
# to locate the libraries. We run ldconfig ourselves to work around this packaging deficiency
ldconfig \
; fi
RUN echo "TARGETARCH: $TARGETARCH"
# We need protoc installed, and the version in 22.04 is too old. We will create one as part installing the GRPC build below
# but that will also being in a newer version of absl which stablediffusion cannot compile with. This version of protoc is only
# here so that we can generate the grpc code for the stablediffusion build
RUN <<EOT bash
if [ "amd64" = "$TARGETARCH" ]; then
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v27.1/protoc-27.1-linux-x86_64.zip -o protoc.zip && \
unzip -j -d /usr/local/bin protoc.zip bin/protoc && \
rm protoc.zip
fi
if [ "arm64" = "$TARGETARCH" ]; then
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v27.1/protoc-27.1-linux-aarch_64.zip -o protoc.zip && \
unzip -j -d /usr/local/bin protoc.zip bin/protoc && \
rm protoc.zip
fi
EOT
# Install CMake (the version in 22.04 is too old)
RUN <<EOT bash
if [ "${CMAKE_FROM_SOURCE}}" = "true" ]; then
curl -L -s https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz -o cmake.tar.gz && tar xvf cmake.tar.gz && cd cmake-${CMAKE_VERSION} && ./configure && make && make install
else
apt-get update && \
apt-get install -y \
cmake && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
fi
EOT
COPY --from=grpc /opt/grpc /usr/local
COPY . /LocalAI
## Otherwise just run the normal build
RUN <<EOT bash
if [ "${TARGETARCH}" = "arm64" ] || [ "${BUILD_TYPE}" = "hipblas" ]; then \
cd /LocalAI/backend/cpp/llama-cpp && make llama-cpp-fallback && \
make llama-cpp-grpc && make llama-cpp-rpc-server; \
else \
cd /LocalAI/backend/cpp/llama-cpp && make llama-cpp-avx && \
make llama-cpp-avx2 && \
make llama-cpp-avx512 && \
make llama-cpp-fallback && \
make llama-cpp-grpc && \
make llama-cpp-rpc-server; \
fi
EOT
# Copy libraries using a script to handle architecture differences
RUN make -C /LocalAI/backend/cpp/llama-cpp package
FROM scratch
# Copy all available binaries (the build process only creates the appropriate ones for the target architecture)
COPY --from=builder /LocalAI/backend/cpp/llama-cpp/package/. ./

123
backend/Dockerfile.python Normal file
View File

@@ -0,0 +1,123 @@
ARG BASE_IMAGE=ubuntu:22.04
FROM ${BASE_IMAGE} AS builder
ARG BACKEND=rerankers
ARG BUILD_TYPE
ENV BUILD_TYPE=${BUILD_TYPE}
ARG CUDA_MAJOR_VERSION
ARG CUDA_MINOR_VERSION
ARG SKIP_DRIVERS=false
ENV CUDA_MAJOR_VERSION=${CUDA_MAJOR_VERSION}
ENV CUDA_MINOR_VERSION=${CUDA_MINOR_VERSION}
ENV DEBIAN_FRONTEND=noninteractive
ARG TARGETARCH
ARG TARGETVARIANT
RUN apt-get update && \
apt-get install -y --no-install-recommends \
build-essential \
ccache \
ca-certificates \
espeak-ng \
curl \
libssl-dev \
git \
git-lfs \
unzip clang \
upx-ucl \
curl python3-pip \
python-is-python3 \
python3-dev llvm \
python3-venv make && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* && \
pip install --upgrade pip
# Cuda
ENV PATH=/usr/local/cuda/bin:${PATH}
# HipBLAS requirements
ENV PATH=/opt/rocm/bin:${PATH}
# Vulkan requirements
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "vulkan" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
apt-get update && \
apt-get install -y --no-install-recommends \
software-properties-common pciutils wget gpg-agent && \
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
apt-get update && \
apt-get install -y \
vulkan-sdk && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
fi
EOT
# CuBLAS requirements
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
apt-get update && \
apt-get install -y --no-install-recommends \
software-properties-common pciutils
if [ "amd64" = "$TARGETARCH" ]; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb
fi
if [ "arm64" = "$TARGETARCH" ]; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/arm64/cuda-keyring_1.1-1_all.deb
fi
dpkg -i cuda-keyring_1.1-1_all.deb && \
rm -f cuda-keyring_1.1-1_all.deb && \
apt-get update && \
apt-get install -y --no-install-recommends \
cuda-nvcc-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcufft-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcurand-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
fi
EOT
# If we are building with clblas support, we need the libraries for the builds
RUN if [ "${BUILD_TYPE}" = "clblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
apt-get update && \
apt-get install -y --no-install-recommends \
libclblast-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* \
; fi
RUN if [ "${BUILD_TYPE}" = "hipblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
apt-get update && \
apt-get install -y --no-install-recommends \
hipblas-dev \
rocblas-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* && \
# I have no idea why, but the ROCM lib packages don't trigger ldconfig after they install, which results in local-ai and others not being able
# to locate the libraries. We run ldconfig ourselves to work around this packaging deficiency
ldconfig \
; fi
# Install uv as a system package
RUN curl -LsSf https://astral.sh/uv/install.sh | UV_INSTALL_DIR=/usr/bin sh
ENV PATH="/root/.cargo/bin:${PATH}"
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
# Install grpcio-tools (the version in 22.04 is too old)
RUN pip install --user grpcio-tools==1.71.0 grpcio==1.71.0
COPY python/${BACKEND} /${BACKEND}
COPY backend.proto /${BACKEND}/backend.proto
COPY python/common/ /${BACKEND}/common
RUN cd /${BACKEND} && PORTABLE_PYTHON=true make
FROM scratch
ARG BACKEND=rerankers
COPY --from=builder /${BACKEND}/ /

213
backend/README.md Normal file
View File

@@ -0,0 +1,213 @@
# LocalAI Backend Architecture
This directory contains the core backend infrastructure for LocalAI, including the gRPC protocol definition, multi-language Dockerfiles, and language-specific backend implementations.
## Overview
LocalAI uses a unified gRPC-based architecture that allows different programming languages to implement AI backends while maintaining consistent interfaces and capabilities. The backend system supports multiple hardware acceleration targets and provides a standardized way to integrate various AI models and frameworks.
## Architecture Components
### 1. Protocol Definition (`backend.proto`)
The `backend.proto` file defines the gRPC service interface that all backends must implement. This ensures consistency across different language implementations and provides a contract for communication between LocalAI core and backend services.
#### Core Services
- **Text Generation**: `Predict`, `PredictStream` for LLM inference
- **Embeddings**: `Embedding` for text vectorization
- **Image Generation**: `GenerateImage` for stable diffusion and image models
- **Audio Processing**: `AudioTranscription`, `TTS`, `SoundGeneration`
- **Video Generation**: `GenerateVideo` for video synthesis
- **Object Detection**: `Detect` for computer vision tasks
- **Vector Storage**: `StoresSet`, `StoresGet`, `StoresFind` for RAG operations
- **Reranking**: `Rerank` for document relevance scoring
- **Voice Activity Detection**: `VAD` for audio segmentation
#### Key Message Types
- **`PredictOptions`**: Comprehensive configuration for text generation
- **`ModelOptions`**: Model loading and configuration parameters
- **`Result`**: Standardized response format
- **`StatusResponse`**: Backend health and memory usage information
### 2. Multi-Language Dockerfiles
The backend system provides language-specific Dockerfiles that handle the build environment and dependencies for different programming languages:
- `Dockerfile.python`
- `Dockerfile.golang`
- `Dockerfile.llama-cpp`
### 3. Language-Specific Implementations
#### Python Backends (`python/`)
- **transformers**: Hugging Face Transformers framework
- **vllm**: High-performance LLM inference
- **mlx**: Apple Silicon optimization
- **diffusers**: Stable Diffusion models
- **Audio**: bark, coqui, faster-whisper, kitten-tts
- **Vision**: mlx-vlm, rfdetr
- **Specialized**: rerankers, chatterbox, kokoro
#### Go Backends (`go/`)
- **whisper**: OpenAI Whisper speech recognition in Go with GGML cpp backend (whisper.cpp)
- **stablediffusion-ggml**: Stable Diffusion in Go with GGML Cpp backend
- **huggingface**: Hugging Face model integration
- **piper**: Text-to-speech synthesis Golang with C bindings using rhaspy/piper
- **bark-cpp**: Bark TTS models Golang with Cpp bindings
- **local-store**: Vector storage backend
#### C++ Backends (`cpp/`)
- **llama-cpp**: Llama.cpp integration
- **grpc**: GRPC utilities and helpers
## Hardware Acceleration Support
### CUDA (NVIDIA)
- **Versions**: CUDA 11.x, 12.x
- **Features**: cuBLAS, cuDNN, TensorRT optimization
- **Targets**: x86_64, ARM64 (Jetson)
### ROCm (AMD)
- **Features**: HIP, rocBLAS, MIOpen
- **Targets**: AMD GPUs with ROCm support
### Intel
- **Features**: oneAPI, Intel Extension for PyTorch
- **Targets**: Intel GPUs, XPUs, CPUs
### Vulkan
- **Features**: Cross-platform GPU acceleration
- **Targets**: Windows, Linux, Android, macOS
### Apple Silicon
- **Features**: MLX framework, Metal Performance Shaders
- **Targets**: M1/M2/M3 Macs
## Backend Registry (`index.yaml`)
The `index.yaml` file serves as a central registry for all available backends, providing:
- **Metadata**: Name, description, license, icons
- **Capabilities**: Hardware targets and optimization profiles
- **Tags**: Categorization for discovery
- **URLs**: Source code and documentation links
## Building Backends
### Prerequisites
- Docker with multi-architecture support
- Appropriate hardware drivers (CUDA, ROCm, etc.)
- Build tools (make, cmake, compilers)
### Build Commands
Example of build commands with Docker
```bash
# Build Python backend
docker build -f backend/Dockerfile.python \
--build-arg BACKEND=transformers \
--build-arg BUILD_TYPE=cublas12 \
--build-arg CUDA_MAJOR_VERSION=12 \
--build-arg CUDA_MINOR_VERSION=0 \
-t localai-backend-transformers .
# Build Go backend
docker build -f backend/Dockerfile.golang \
--build-arg BACKEND=whisper \
--build-arg BUILD_TYPE=cpu \
-t localai-backend-whisper .
# Build C++ backend
docker build -f backend/Dockerfile.llama-cpp \
--build-arg BACKEND=llama-cpp \
--build-arg BUILD_TYPE=cublas12 \
-t localai-backend-llama-cpp .
```
For ARM64/Mac builds, docker can't be used, and the makefile in the respective backend has to be used.
### Build Types
- **`cpu`**: CPU-only optimization
- **`cublas11`**: CUDA 11.x with cuBLAS
- **`cublas12`**: CUDA 12.x with cuBLAS
- **`hipblas`**: ROCm with rocBLAS
- **`intel`**: Intel oneAPI optimization
- **`vulkan`**: Vulkan-based acceleration
- **`metal`**: Apple Metal optimization
## Backend Development
### Creating a New Backend
1. **Choose Language**: Select Python, Go, or C++ based on requirements
2. **Implement Interface**: Implement the gRPC service defined in `backend.proto`
3. **Add Dependencies**: Create appropriate requirements files
4. **Configure Build**: Set up Dockerfile and build scripts
5. **Register Backend**: Add entry to `index.yaml`
6. **Test Integration**: Verify gRPC communication and functionality
### Backend Structure
```
backend-name/
├── backend.py/go/cpp # Main implementation
├── requirements.txt # Dependencies
├── Dockerfile # Build configuration
├── install.sh # Installation script
├── run.sh # Execution script
├── test.sh # Test script
└── README.md # Backend documentation
```
### Required gRPC Methods
At minimum, backends must implement:
- `Health()` - Service health check
- `LoadModel()` - Model loading and initialization
- `Predict()` - Main inference endpoint
- `Status()` - Backend status and metrics
## Integration with LocalAI Core
Backends communicate with LocalAI core through gRPC:
1. **Service Discovery**: Core discovers available backends
2. **Model Loading**: Core requests model loading via `LoadModel`
3. **Inference**: Core sends requests via `Predict` or specialized endpoints
4. **Streaming**: Core handles streaming responses for real-time generation
5. **Monitoring**: Core tracks backend health and performance
## Performance Optimization
### Memory Management
- **Model Caching**: Efficient model loading and caching
- **Batch Processing**: Optimize for multiple concurrent requests
- **Memory Pinning**: GPU memory optimization for CUDA/ROCm
### Hardware Utilization
- **Multi-GPU**: Support for tensor parallelism
- **Mixed Precision**: FP16/BF16 for memory efficiency
- **Kernel Fusion**: Optimized CUDA/ROCm kernels
## Troubleshooting
### Common Issues
1. **GRPC Connection**: Verify backend service is running and accessible
2. **Model Loading**: Check model paths and dependencies
3. **Hardware Detection**: Ensure appropriate drivers and libraries
4. **Memory Issues**: Monitor GPU memory usage and model sizes
## Contributing
When contributing to the backend system:
1. **Follow Protocol**: Implement the exact gRPC interface
2. **Add Tests**: Include comprehensive test coverage
3. **Document**: Provide clear usage examples
4. **Optimize**: Consider performance and resource usage
5. **Validate**: Test across different hardware targets

View File

@@ -14,11 +14,13 @@ service Backend {
rpc PredictStream(PredictOptions) returns (stream Reply) {}
rpc Embedding(PredictOptions) returns (EmbeddingResult) {}
rpc GenerateImage(GenerateImageRequest) returns (Result) {}
rpc GenerateVideo(GenerateVideoRequest) returns (Result) {}
rpc AudioTranscription(TranscriptRequest) returns (TranscriptResult) {}
rpc TTS(TTSRequest) returns (Result) {}
rpc SoundGeneration(SoundGenerationRequest) returns (Result) {}
rpc TokenizeString(PredictOptions) returns (TokenizationResponse) {}
rpc Status(HealthMessage) returns (StatusResponse) {}
rpc Detect(DetectOptions) returns (DetectResponse) {}
rpc StoresSet(StoresSetOptions) returns (Result) {}
rpc StoresDelete(StoresDeleteOptions) returns (Result) {}
@@ -161,6 +163,7 @@ message Reply {
int32 prompt_tokens = 3;
double timing_prompt_processing = 4;
double timing_token_generation = 5;
bytes audio = 6;
}
message GrammarTrigger {
@@ -183,7 +186,6 @@ message ModelOptions {
string MainGPU = 13;
string TensorSplit = 14;
int32 Threads = 15;
string LibrarySearchPath = 16;
float RopeFreqBase = 17;
float RopeFreqScale = 18;
float RMSNormEps = 19;
@@ -240,7 +242,7 @@ message ModelOptions {
string Type = 49;
bool FlashAttention = 56;
string FlashAttention = 56;
bool NoKVOffload = 57;
string ModelPath = 59;
@@ -254,6 +256,10 @@ message ModelOptions {
string CacheTypeValue = 64;
repeated GrammarTrigger GrammarTriggers = 65;
bool Reranking = 71;
repeated string Overrides = 72;
}
message Result {
@@ -270,6 +276,7 @@ message TranscriptRequest {
string language = 3;
uint32 threads = 4;
bool translate = 5;
bool diarize = 6;
}
message TranscriptResult {
@@ -299,6 +306,24 @@ message GenerateImageRequest {
// Diffusers
string EnableParameters = 10;
int32 CLIPSkip = 11;
// Reference images for models that support them (e.g., Flux Kontext)
repeated string ref_images = 12;
}
message GenerateVideoRequest {
string prompt = 1;
string negative_prompt = 2; // Negative prompt for video generation
string start_image = 3; // Path or base64 encoded image for the start frame
string end_image = 4; // Path or base64 encoded image for the end frame
int32 width = 5;
int32 height = 6;
int32 num_frames = 7; // Number of frames to generate
int32 fps = 8; // Frames per second
int32 seed = 9;
float cfg_scale = 10; // Classifier-free guidance scale
int32 step = 11; // Number of inference steps
string dst = 12; // Output path for the generated video
}
message TTSRequest {
@@ -358,3 +383,20 @@ message Message {
string role = 1;
string content = 2;
}
message DetectOptions {
string src = 1;
}
message Detection {
float x = 1;
float y = 2;
float width = 3;
float height = 4;
float confidence = 5;
string class_name = 6;
}
message DetectResponse {
repeated Detection Detections = 1;
}

View File

@@ -1,20 +1,3 @@
## XXX: In some versions of CMake clip wasn't being built before llama.
## This is an hack for now, but it should be fixed in the future.
set(TARGET myclip)
add_library(${TARGET} clip.cpp clip.h clip-impl.h llava.cpp llava.h)
install(TARGETS ${TARGET} LIBRARY)
target_include_directories(myclip PUBLIC .)
target_include_directories(myclip PUBLIC ../..)
target_include_directories(myclip PUBLIC ../../common)
target_link_libraries(${TARGET} PRIVATE common ggml llama ${CMAKE_THREAD_LIBS_INIT})
target_compile_features(${TARGET} PRIVATE cxx_std_11)
if (NOT MSVC)
target_compile_options(${TARGET} PRIVATE -Wno-cast-qual) # stb_image.h
endif()
# END CLIP hack
set(TARGET grpc-server)
set(CMAKE_CXX_STANDARD 17)
cmake_minimum_required(VERSION 3.15)
@@ -74,8 +57,12 @@ add_library(hw_grpc_proto
${hw_proto_srcs}
${hw_proto_hdrs} )
add_executable(${TARGET} grpc-server.cpp utils.hpp json.hpp)
target_link_libraries(${TARGET} PRIVATE common llama myclip ${CMAKE_THREAD_LIBS_INIT} absl::flags hw_grpc_proto
add_executable(${TARGET} grpc-server.cpp utils.hpp json.hpp httplib.h)
target_include_directories(${TARGET} PRIVATE ../llava)
target_include_directories(${TARGET} PRIVATE ${CMAKE_SOURCE_DIR})
target_link_libraries(${TARGET} PRIVATE common llama mtmd ${CMAKE_THREAD_LIBS_INIT} absl::flags hw_grpc_proto
absl::flags_parse
gRPC::${_REFLECTION}
gRPC::${_GRPC_GRPCPP}

View File

@@ -0,0 +1,166 @@
LLAMA_VERSION?=8ff206097c2bf3ca1c7aa95f9d6db779fc7bdd68
LLAMA_REPO?=https://github.com/ggerganov/llama.cpp
CMAKE_ARGS?=
BUILD_TYPE?=
NATIVE?=false
ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh
TARGET?=--target grpc-server
JOBS?=$(shell nproc)
# Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF -DLLAMA_CURL=OFF
CURRENT_MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
ifeq ($(NATIVE),false)
CMAKE_ARGS+=-DGGML_NATIVE=OFF
endif
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
ifeq ($(BUILD_TYPE),cublas)
CMAKE_ARGS+=-DGGML_CUDA=ON
# If build type is openblas then we set -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
# to CMAKE_ARGS automatically
else ifeq ($(BUILD_TYPE),openblas)
CMAKE_ARGS+=-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
# If build type is clblas (openCL) we set -DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
else ifeq ($(BUILD_TYPE),clblas)
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
else ifeq ($(BUILD_TYPE),hipblas)
ROCM_HOME ?= /opt/rocm
ROCM_PATH ?= /opt/rocm
export CXX=$(ROCM_HOME)/llvm/bin/clang++
export CC=$(ROCM_HOME)/llvm/bin/clang
AMDGPU_TARGETS?=gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102,gfx1200,gfx1201
CMAKE_ARGS+=-DGGML_HIP=ON -DAMDGPU_TARGETS=$(AMDGPU_TARGETS)
else ifeq ($(BUILD_TYPE),vulkan)
CMAKE_ARGS+=-DGGML_VULKAN=1
else ifeq ($(OS),Darwin)
ifeq ($(BUILD_TYPE),)
BUILD_TYPE=metal
endif
ifneq ($(BUILD_TYPE),metal)
CMAKE_ARGS+=-DGGML_METAL=OFF
else
CMAKE_ARGS+=-DGGML_METAL=ON
CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON
CMAKE_ARGS+=-DGGML_METAL_USE_BF16=ON
CMAKE_ARGS+=-DGGML_OPENMP=OFF
endif
TARGET+=--target ggml-metal
endif
ifeq ($(BUILD_TYPE),sycl_f16)
CMAKE_ARGS+=-DGGML_SYCL=ON \
-DCMAKE_C_COMPILER=icx \
-DCMAKE_CXX_COMPILER=icpx \
-DCMAKE_CXX_FLAGS="-fsycl" \
-DGGML_SYCL_F16=ON
endif
ifeq ($(BUILD_TYPE),sycl_f32)
CMAKE_ARGS+=-DGGML_SYCL=ON \
-DCMAKE_C_COMPILER=icx \
-DCMAKE_CXX_COMPILER=icpx \
-DCMAKE_CXX_FLAGS="-fsycl"
endif
INSTALLED_PACKAGES=$(CURDIR)/../grpc/installed_packages
INSTALLED_LIB_CMAKE=$(INSTALLED_PACKAGES)/lib/cmake
ADDED_CMAKE_ARGS=-Dabsl_DIR=${INSTALLED_LIB_CMAKE}/absl \
-DProtobuf_DIR=${INSTALLED_LIB_CMAKE}/protobuf \
-Dutf8_range_DIR=${INSTALLED_LIB_CMAKE}/utf8_range \
-DgRPC_DIR=${INSTALLED_LIB_CMAKE}/grpc \
-DCMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES=${INSTALLED_PACKAGES}/include
build-llama-cpp-grpc-server:
# Conditionally build grpc for the llama backend to use if needed
ifdef BUILD_GRPC_FOR_BACKEND_LLAMA
$(MAKE) -C ../../grpc build
_PROTOBUF_PROTOC=${INSTALLED_PACKAGES}/bin/proto \
_GRPC_CPP_PLUGIN_EXECUTABLE=${INSTALLED_PACKAGES}/bin/grpc_cpp_plugin \
PATH="${INSTALLED_PACKAGES}/bin:${PATH}" \
CMAKE_ARGS="${CMAKE_ARGS} ${ADDED_CMAKE_ARGS}" \
LLAMA_VERSION=$(LLAMA_VERSION) \
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../$(VARIANT) grpc-server
else
echo "BUILD_GRPC_FOR_BACKEND_LLAMA is not defined."
LLAMA_VERSION=$(LLAMA_VERSION) $(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../$(VARIANT) grpc-server
endif
llama-cpp-avx2: llama.cpp
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx2-build
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx2-build purge
$(info ${GREEN}I llama-cpp build info:avx2${RESET})
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on" $(MAKE) VARIANT="llama-cpp-avx2-build" build-llama-cpp-grpc-server
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx2-build/grpc-server llama-cpp-avx2
llama-cpp-avx512: llama.cpp
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx512-build
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx512-build purge
$(info ${GREEN}I llama-cpp build info:avx512${RESET})
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=on -DGGML_FMA=on -DGGML_F16C=on" $(MAKE) VARIANT="llama-cpp-avx512-build" build-llama-cpp-grpc-server
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx512-build/grpc-server llama-cpp-avx512
llama-cpp-avx: llama.cpp
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build purge
$(info ${GREEN}I llama-cpp build info:avx${RESET})
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE) VARIANT="llama-cpp-avx-build" build-llama-cpp-grpc-server
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build/grpc-server llama-cpp-avx
llama-cpp-fallback: llama.cpp
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build purge
$(info ${GREEN}I llama-cpp build info:fallback${RESET})
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE) VARIANT="llama-cpp-fallback-build" build-llama-cpp-grpc-server
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build/grpc-server llama-cpp-fallback
llama-cpp-grpc: llama.cpp
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build purge
$(info ${GREEN}I llama-cpp build info:grpc${RESET})
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_RPC=ON -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" TARGET="--target grpc-server --target rpc-server" $(MAKE) VARIANT="llama-cpp-grpc-build" build-llama-cpp-grpc-server
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build/grpc-server llama-cpp-grpc
llama-cpp-rpc-server: llama-cpp-grpc
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build/llama.cpp/build/bin/rpc-server llama-cpp-rpc-server
llama.cpp:
mkdir -p llama.cpp
cd llama.cpp && \
git init && \
git remote add origin $(LLAMA_REPO) && \
git fetch origin && \
git checkout -b build $(LLAMA_VERSION) && \
git submodule update --init --recursive --depth 1 --single-branch
llama.cpp/tools/grpc-server: llama.cpp
mkdir -p llama.cpp/tools/grpc-server
bash prepare.sh
rebuild:
bash prepare.sh
rm -rf grpc-server
$(MAKE) grpc-server
package:
bash package.sh
purge:
rm -rf llama.cpp/build
rm -rf llama.cpp/tools/grpc-server
rm -rf grpc-server
clean: purge
rm -rf llama.cpp
grpc-server: llama.cpp llama.cpp/tools/grpc-server
@echo "Building grpc-server with $(BUILD_TYPE) build type and $(CMAKE_ARGS)"
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
+bash -c "source $(ONEAPI_VARS); \
cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release -j $(JOBS) $(TARGET)"
else
+cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release -j $(JOBS) $(TARGET)
endif
cp llama.cpp/build/bin/grpc-server .

View File

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,42 @@
#!/bin/bash
# Script to copy the appropriate libraries based on architecture
# This script is used in the final stage of the Dockerfile
set -e
CURDIR=$(dirname "$(realpath $0)")
# Create lib directory
mkdir -p $CURDIR/package/lib
cp -avrf $CURDIR/llama-cpp-* $CURDIR/package/
cp -rfv $CURDIR/run.sh $CURDIR/package/
# Detect architecture and copy appropriate libraries
if [ -f "/lib64/ld-linux-x86-64.so.2" ]; then
# x86_64 architecture
echo "Detected x86_64 architecture, copying x86_64 libraries..."
cp -arfLv /lib64/ld-linux-x86-64.so.2 $CURDIR/package/lib/ld.so
cp -arfLv /lib/x86_64-linux-gnu/libc.so.6 $CURDIR/package/lib/libc.so.6
cp -arfLv /lib/x86_64-linux-gnu/libgcc_s.so.1 $CURDIR/package/lib/libgcc_s.so.1
cp -arfLv /lib/x86_64-linux-gnu/libstdc++.so.6 $CURDIR/package/lib/libstdc++.so.6
cp -arfLv /lib/x86_64-linux-gnu/libm.so.6 $CURDIR/package/lib/libm.so.6
cp -arfLv /lib/x86_64-linux-gnu/libgomp.so.1 $CURDIR/package/lib/libgomp.so.1
elif [ -f "/lib/ld-linux-aarch64.so.1" ]; then
# ARM64 architecture
echo "Detected ARM64 architecture, copying ARM64 libraries..."
cp -arfLv /lib/ld-linux-aarch64.so.1 $CURDIR/package/lib/ld.so
cp -arfLv /lib/aarch64-linux-gnu/libc.so.6 $CURDIR/package/lib/libc.so.6
cp -arfLv /lib/aarch64-linux-gnu/libgcc_s.so.1 $CURDIR/package/lib/libgcc_s.so.1
cp -arfLv /lib/aarch64-linux-gnu/libstdc++.so.6 $CURDIR/package/lib/libstdc++.so.6
cp -arfLv /lib/aarch64-linux-gnu/libm.so.6 $CURDIR/package/lib/libm.so.6
cp -arfLv /lib/aarch64-linux-gnu/libgomp.so.1 $CURDIR/package/lib/libgomp.so.1
else
echo "Error: Could not detect architecture"
exit 1
fi
echo "Packaging completed successfully"
ls -liah $CURDIR/package/
ls -liah $CURDIR/package/lib/

View File

@@ -1,7 +1,7 @@
diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp
diff --git a/tools/mtmd/clip.cpp b/tools/mtmd/clip.cpp
index 3cd0d2fa..6c5e811a 100644
--- a/examples/llava/clip.cpp
+++ b/examples/llava/clip.cpp
--- a/tools/mtmd/clip.cpp
+++ b/tools/mtmd/clip.cpp
@@ -2608,7 +2608,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
struct ggml_tensor * patches = ggml_graph_get_tensor(gf, "patches");
int* patches_data = (int*)malloc(ggml_nbytes(patches));

View File

@@ -0,0 +1,52 @@
#!/bin/bash
## Patches
## Apply patches from the `patches` directory
for patch in $(ls patches); do
echo "Applying patch $patch"
patch -d llama.cpp/ -p1 < patches/$patch
done
set -e
cp -r CMakeLists.txt llama.cpp/tools/grpc-server/
cp -r grpc-server.cpp llama.cpp/tools/grpc-server/
cp -rfv llama.cpp/vendor/nlohmann/json.hpp llama.cpp/tools/grpc-server/
cp -rfv llama.cpp/tools/server/utils.hpp llama.cpp/tools/grpc-server/
cp -rfv llama.cpp/vendor/cpp-httplib/httplib.h llama.cpp/tools/grpc-server/
set +e
if grep -q "grpc-server" llama.cpp/tools/CMakeLists.txt; then
echo "grpc-server already added"
else
echo "add_subdirectory(grpc-server)" >> llama.cpp/tools/CMakeLists.txt
fi
set -e
# Now to keep maximum compatibility with the original server.cpp, we need to remove the index.html.gz.hpp and loading.html.hpp includes
# and remove the main function
# TODO: upstream this to the original server.cpp by extracting the upstream main function to a separate file
awk '
/int[ \t]+main[ \t]*\(/ { # If the line starts the main function
in_main=1; # Set a flag
open_braces=0; # Track number of open braces
}
in_main {
open_braces += gsub(/\{/, "{"); # Count opening braces
open_braces -= gsub(/\}/, "}"); # Count closing braces
if (open_braces == 0) { # If all braces are closed
in_main=0; # End skipping
}
next; # Skip lines inside main
}
!in_main # Print lines not inside main
' "llama.cpp/tools/server/server.cpp" > llama.cpp/tools/grpc-server/server.cpp
# remove index.html.gz.hpp and loading.html.hpp includes
if [[ "$OSTYPE" == "darwin"* ]]; then
# macOS
sed -i '' '/#include "index\.html\.gz\.hpp"/d; /#include "loading\.html\.hpp"/d' llama.cpp/tools/grpc-server/server.cpp
else
# Linux and others
sed -i '/#include "index\.html\.gz\.hpp"/d; /#include "loading\.html\.hpp"/d' llama.cpp/tools/grpc-server/server.cpp
fi

62
backend/cpp/llama-cpp/run.sh Executable file
View File

@@ -0,0 +1,62 @@
#!/bin/bash
set -ex
# Get the absolute current dir where the script is located
CURDIR=$(dirname "$(realpath $0)")
cd /
echo "CPU info:"
grep -e "model\sname" /proc/cpuinfo | head -1
grep -e "flags" /proc/cpuinfo | head -1
BINARY=llama-cpp-fallback
if grep -q -e "\savx\s" /proc/cpuinfo ; then
echo "CPU: AVX found OK"
if [ -e $CURDIR/llama-cpp-avx ]; then
BINARY=llama-cpp-avx
fi
fi
if grep -q -e "\savx2\s" /proc/cpuinfo ; then
echo "CPU: AVX2 found OK"
if [ -e $CURDIR/llama-cpp-avx2 ]; then
BINARY=llama-cpp-avx2
fi
fi
# Check avx 512
if grep -q -e "\savx512f\s" /proc/cpuinfo ; then
echo "CPU: AVX512F found OK"
if [ -e $CURDIR/llama-cpp-avx512 ]; then
BINARY=llama-cpp-avx512
fi
fi
if [ -n "$LLAMACPP_GRPC_SERVERS" ]; then
if [ -e $CURDIR/llama-cpp-grpc ]; then
BINARY=llama-cpp-grpc
fi
fi
# Extend ld library path with the dir where this script is located/lib
if [ "$(uname)" == "Darwin" ]; then
export DYLD_LIBRARY_PATH=$CURDIR/lib:$DYLD_LIBRARY_PATH
#export DYLD_FALLBACK_LIBRARY_PATH=$CURDIR/lib:$DYLD_FALLBACK_LIBRARY_PATH
else
export LD_LIBRARY_PATH=$CURDIR/lib:$LD_LIBRARY_PATH
fi
# If there is a lib/ld.so, use it
if [ -f $CURDIR/lib/ld.so ]; then
echo "Using lib/ld.so"
echo "Using binary: $BINARY"
exec $CURDIR/lib/ld.so $CURDIR/$BINARY "$@"
fi
echo "Using binary: $BINARY"
exec $CURDIR/$BINARY "$@"
# We should never reach this point, however just in case we do, run fallback
exec $CURDIR/llama-cpp-fallback "$@"

View File

@@ -1,87 +0,0 @@
LLAMA_VERSION?=
LLAMA_REPO?=https://github.com/ggerganov/llama.cpp
CMAKE_ARGS?=
BUILD_TYPE?=
ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh
TARGET?=--target grpc-server
# Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF -DLLAMA_CURL=OFF
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
ifeq ($(BUILD_TYPE),cublas)
CMAKE_ARGS+=-DGGML_CUDA=ON
# If build type is openblas then we set -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
# to CMAKE_ARGS automatically
else ifeq ($(BUILD_TYPE),openblas)
CMAKE_ARGS+=-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
# If build type is clblas (openCL) we set -DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
else ifeq ($(BUILD_TYPE),clblas)
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
else ifeq ($(BUILD_TYPE),hipblas)
CMAKE_ARGS+=-DGGML_HIP=ON
# If it's OSX, DO NOT embed the metal library - -DGGML_METAL_EMBED_LIBRARY=ON requires further investigation
# But if it's OSX without metal, disable it here
else ifeq ($(OS),Darwin)
ifneq ($(BUILD_TYPE),metal)
CMAKE_ARGS+=-DGGML_METAL=OFF
else
CMAKE_ARGS+=-DGGML_METAL=ON
CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON
TARGET+=--target ggml-metal
endif
endif
ifeq ($(BUILD_TYPE),sycl_f16)
CMAKE_ARGS+=-DGGML_SYCL=ON \
-DCMAKE_C_COMPILER=icx \
-DCMAKE_CXX_COMPILER=icpx \
-DCMAKE_CXX_FLAGS="-fsycl" \
-DGGML_SYCL_F16=ON
endif
ifeq ($(BUILD_TYPE),sycl_f32)
CMAKE_ARGS+=-DGGML_SYCL=ON \
-DCMAKE_C_COMPILER=icx \
-DCMAKE_CXX_COMPILER=icpx \
-DCMAKE_CXX_FLAGS="-fsycl"
endif
llama.cpp:
mkdir -p llama.cpp
cd llama.cpp && \
git init && \
git remote add origin $(LLAMA_REPO) && \
git fetch origin && \
git checkout -b build $(LLAMA_VERSION) && \
git submodule update --init --recursive --depth 1 --single-branch
llama.cpp/examples/grpc-server: llama.cpp
mkdir -p llama.cpp/examples/grpc-server
bash prepare.sh
rebuild:
bash prepare.sh
rm -rf grpc-server
$(MAKE) grpc-server
purge:
rm -rf llama.cpp/build
rm -rf llama.cpp/examples/grpc-server
rm -rf grpc-server
clean: purge
rm -rf llama.cpp
grpc-server: llama.cpp llama.cpp/examples/grpc-server
@echo "Building grpc-server with $(BUILD_TYPE) build type and $(CMAKE_ARGS)"
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
+bash -c "source $(ONEAPI_VARS); \
cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release $(TARGET)"
else
+cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release $(TARGET)
endif
cp llama.cpp/build/bin/grpc-server .

View File

File diff suppressed because it is too large Load Diff

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,28 +0,0 @@
#!/bin/bash
## Patches
## Apply patches from the `patches` directory
for patch in $(ls patches); do
echo "Applying patch $patch"
patch -d llama.cpp/ -p1 < patches/$patch
done
cp -r CMakeLists.txt llama.cpp/examples/grpc-server/
cp -r grpc-server.cpp llama.cpp/examples/grpc-server/
cp -rfv json.hpp llama.cpp/examples/grpc-server/
cp -rfv utils.hpp llama.cpp/examples/grpc-server/
if grep -q "grpc-server" llama.cpp/examples/CMakeLists.txt; then
echo "grpc-server already added"
else
echo "add_subdirectory(grpc-server)" >> llama.cpp/examples/CMakeLists.txt
fi
## XXX: In some versions of CMake clip wasn't being built before llama.
## This is an hack for now, but it should be fixed in the future.
cp -rfv llama.cpp/examples/llava/clip.h llama.cpp/examples/grpc-server/clip.h
cp -rfv llama.cpp/examples/llava/clip-impl.h llama.cpp/examples/grpc-server/clip-impl.h
cp -rfv llama.cpp/examples/llava/llava.cpp llama.cpp/examples/grpc-server/llava.cpp
echo '#include "llama.h"' > llama.cpp/examples/grpc-server/llava.h
cat llama.cpp/examples/llava/llava.h >> llama.cpp/examples/grpc-server/llava.h
cp -rfv llama.cpp/examples/llava/clip.cpp llama.cpp/examples/grpc-server/clip.cpp

View File

@@ -1,483 +0,0 @@
// https://github.com/ggerganov/llama.cpp/blob/master/examples/server/utils.hpp
#pragma once
#include <string>
#include <vector>
#include <set>
#include <mutex>
#include <condition_variable>
#include <unordered_map>
#include "json.hpp"
#include "../llava/clip.h"
using json = nlohmann::json;
extern bool server_verbose;
#ifndef SERVER_VERBOSE
#define SERVER_VERBOSE 1
#endif
#if SERVER_VERBOSE != 1
#define LOG_VERBOSE(MSG, ...)
#else
#define LOG_VERBOSE(MSG, ...) \
do \
{ \
if (server_verbose) \
{ \
server_log("VERBOSE", __func__, __LINE__, MSG, __VA_ARGS__); \
} \
} while (0)
#endif
#define LOG_ERROR( MSG, ...) server_log("ERROR", __func__, __LINE__, MSG, __VA_ARGS__)
#define LOG_WARNING(MSG, ...) server_log("WARNING", __func__, __LINE__, MSG, __VA_ARGS__)
#define LOG_INFO( MSG, ...) server_log("INFO", __func__, __LINE__, MSG, __VA_ARGS__)
//
// parallel
//
enum server_state {
SERVER_STATE_LOADING_MODEL, // Server is starting up, model not fully loaded yet
SERVER_STATE_READY, // Server is ready and model is loaded
SERVER_STATE_ERROR // An error occurred, load_model failed
};
enum task_type {
TASK_TYPE_COMPLETION,
TASK_TYPE_CANCEL,
TASK_TYPE_NEXT_RESPONSE
};
struct task_server {
int id = -1; // to be filled by llama_server_queue
int target_id;
task_type type;
json data;
bool infill_mode = false;
bool embedding_mode = false;
int multitask_id = -1;
};
struct task_result {
int id;
int multitask_id = -1;
bool stop;
bool error;
json result_json;
};
struct task_multi {
int id;
std::set<int> subtasks_remaining{};
std::vector<task_result> results{};
};
// TODO: can become bool if we can't find use of more states
enum slot_state
{
IDLE,
PROCESSING,
};
enum slot_command
{
NONE,
LOAD_PROMPT,
RELEASE,
};
struct slot_params
{
bool stream = true;
bool cache_prompt = false; // remember the prompt to avoid reprocessing all prompt
uint32_t seed = -1; // RNG seed
int32_t n_keep = 0; // number of tokens to keep from initial prompt
int32_t n_predict = -1; // new tokens to predict
std::vector<std::string> antiprompt;
json input_prefix;
json input_suffix;
};
struct slot_image
{
int32_t id;
bool request_encode_image = false;
float * image_embedding = nullptr;
int32_t image_tokens = 0;
clip_image_u8 * img_data;
std::string prefix_prompt; // before of this image
};
// completion token output with probabilities
struct completion_token_output
{
struct token_prob
{
llama_token tok;
float prob;
};
std::vector<token_prob> probs;
llama_token tok;
std::string text_to_send;
};
static inline void server_log(const char *level, const char *function, int line,
const char *message, const nlohmann::ordered_json &extra)
{
nlohmann::ordered_json log
{
{"timestamp", time(nullptr)},
{"level", level},
{"function", function},
{"line", line},
{"message", message},
};
if (!extra.empty())
{
log.merge_patch(extra);
}
const std::string str = log.dump(-1, ' ', false, json::error_handler_t::replace);
printf("%.*s\n", (int)str.size(), str.data());
fflush(stdout);
}
//
// server utils
//
template <typename T>
static T json_value(const json &body, const std::string &key, const T &default_value)
{
// Fallback null to default value
return body.contains(key) && !body.at(key).is_null()
? body.value(key, default_value)
: default_value;
}
inline std::string format_chatml(std::vector<json> messages)
{
std::ostringstream chatml_msgs;
for (auto it = messages.begin(); it != messages.end(); ++it) {
chatml_msgs << "<|im_start|>"
<< json_value(*it, "role", std::string("user")) << '\n';
chatml_msgs << json_value(*it, "content", std::string(""))
<< "<|im_end|>\n";
}
chatml_msgs << "<|im_start|>assistant" << '\n';
return chatml_msgs.str();
}
//
// work queue utils
//
struct llama_server_queue {
int id = 0;
std::mutex mutex_tasks;
// queues
std::vector<task_server> queue_tasks;
std::vector<task_server> queue_tasks_deferred;
std::vector<task_multi> queue_multitasks;
std::condition_variable condition_tasks;
// callback functions
std::function<void(task_server&)> callback_new_task;
std::function<void(task_multi&)> callback_finish_multitask;
std::function<void(void)> callback_all_task_finished;
// Add a new task to the end of the queue
int post(task_server task) {
std::unique_lock<std::mutex> lock(mutex_tasks);
if (task.id == -1) {
task.id = id++;
}
queue_tasks.push_back(std::move(task));
condition_tasks.notify_one();
return task.id;
}
// Add a new task, but defer until one slot is available
void defer(task_server task) {
std::unique_lock<std::mutex> lock(mutex_tasks);
queue_tasks_deferred.push_back(std::move(task));
}
// Get the next id for creating anew task
int get_new_id() {
std::unique_lock<std::mutex> lock(mutex_tasks);
return id++;
}
// Register function to process a new task
void on_new_task(std::function<void(task_server&)> callback) {
callback_new_task = callback;
}
// Register function to process a multitask
void on_finish_multitask(std::function<void(task_multi&)> callback) {
callback_finish_multitask = callback;
}
// Register the function to be called when the batch of tasks is finished
void on_all_tasks_finished(std::function<void(void)> callback) {
callback_all_task_finished = callback;
}
// Call when the state of one slot is changed
void notify_slot_changed() {
// move deferred tasks back to main loop
std::unique_lock<std::mutex> lock(mutex_tasks);
for (auto & task : queue_tasks_deferred) {
queue_tasks.push_back(std::move(task));
}
queue_tasks_deferred.clear();
}
// Start the main loop. This call is blocking
[[noreturn]]
void start_loop() {
while (true) {
// new task arrived
LOG_VERBOSE("have new task", {});
{
while (true)
{
std::unique_lock<std::mutex> lock(mutex_tasks);
if (queue_tasks.empty()) {
lock.unlock();
break;
}
task_server task = queue_tasks.front();
queue_tasks.erase(queue_tasks.begin());
lock.unlock();
LOG_VERBOSE("callback_new_task", {});
callback_new_task(task);
}
LOG_VERBOSE("callback_all_task_finished", {});
// process and update all the multitasks
auto queue_iterator = queue_multitasks.begin();
while (queue_iterator != queue_multitasks.end())
{
if (queue_iterator->subtasks_remaining.empty())
{
// all subtasks done == multitask is done
task_multi current_multitask = *queue_iterator;
callback_finish_multitask(current_multitask);
// remove this multitask
queue_iterator = queue_multitasks.erase(queue_iterator);
}
else
{
++queue_iterator;
}
}
// all tasks in the current loop is finished
callback_all_task_finished();
}
LOG_VERBOSE("wait for new task", {});
// wait for new task
{
std::unique_lock<std::mutex> lock(mutex_tasks);
if (queue_tasks.empty()) {
condition_tasks.wait(lock, [&]{
return !queue_tasks.empty();
});
}
}
}
}
//
// functions to manage multitasks
//
// add a multitask by specifying the id of all subtask (subtask is a task_server)
void add_multitask(int multitask_id, std::vector<int>& sub_ids)
{
std::lock_guard<std::mutex> lock(mutex_tasks);
task_multi multi;
multi.id = multitask_id;
std::copy(sub_ids.begin(), sub_ids.end(), std::inserter(multi.subtasks_remaining, multi.subtasks_remaining.end()));
queue_multitasks.push_back(multi);
}
// updatethe remaining subtasks, while appending results to multitask
void update_multitask(int multitask_id, int subtask_id, task_result& result)
{
std::lock_guard<std::mutex> lock(mutex_tasks);
for (auto& multitask : queue_multitasks)
{
if (multitask.id == multitask_id)
{
multitask.subtasks_remaining.erase(subtask_id);
multitask.results.push_back(result);
}
}
}
};
struct llama_server_response {
typedef std::function<void(int, int, task_result&)> callback_multitask_t;
callback_multitask_t callback_update_multitask;
// for keeping track of all tasks waiting for the result
std::set<int> waiting_task_ids;
// the main result queue
std::vector<task_result> queue_results;
std::mutex mutex_results;
std::condition_variable condition_results;
void add_waiting_task_id(int task_id) {
std::unique_lock<std::mutex> lock(mutex_results);
waiting_task_ids.insert(task_id);
}
void remove_waiting_task_id(int task_id) {
std::unique_lock<std::mutex> lock(mutex_results);
waiting_task_ids.erase(task_id);
}
// This function blocks the thread until there is a response for this task_id
task_result recv(int task_id) {
while (true)
{
std::unique_lock<std::mutex> lock(mutex_results);
condition_results.wait(lock, [&]{
return !queue_results.empty();
});
LOG_VERBOSE("condition_results unblock", {});
for (int i = 0; i < (int) queue_results.size(); i++)
{
if (queue_results[i].id == task_id)
{
assert(queue_results[i].multitask_id == -1);
task_result res = queue_results[i];
queue_results.erase(queue_results.begin() + i);
return res;
}
}
}
// should never reach here
}
// Register the function to update multitask
void on_multitask_update(callback_multitask_t callback) {
callback_update_multitask = callback;
}
// Send a new result to a waiting task_id
void send(task_result result) {
std::unique_lock<std::mutex> lock(mutex_results);
LOG_VERBOSE("send new result", {});
for (auto& task_id : waiting_task_ids) {
// LOG_TEE("waiting task id %i \n", task_id);
// for now, tasks that have associated parent multitasks just get erased once multitask picks up the result
if (result.multitask_id == task_id)
{
LOG_VERBOSE("callback_update_multitask", {});
callback_update_multitask(task_id, result.id, result);
continue;
}
if (result.id == task_id)
{
LOG_VERBOSE("queue_results.push_back", {});
queue_results.push_back(result);
condition_results.notify_one();
return;
}
}
}
};
//
// base64 utils (TODO: move to common in the future)
//
static const std::string base64_chars =
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789+/";
static inline bool is_base64(uint8_t c)
{
return (isalnum(c) || (c == '+') || (c == '/'));
}
static inline std::vector<uint8_t> base64_decode(const std::string & encoded_string)
{
int i = 0;
int j = 0;
int in_ = 0;
int in_len = encoded_string.size();
uint8_t char_array_4[4];
uint8_t char_array_3[3];
std::vector<uint8_t> ret;
while (in_len-- && (encoded_string[in_] != '=') && is_base64(encoded_string[in_]))
{
char_array_4[i++] = encoded_string[in_]; in_++;
if (i == 4)
{
for (i = 0; i <4; i++)
{
char_array_4[i] = base64_chars.find(char_array_4[i]);
}
char_array_3[0] = ((char_array_4[0] ) << 2) + ((char_array_4[1] & 0x30) >> 4);
char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
for (i = 0; (i < 3); i++)
{
ret.push_back(char_array_3[i]);
}
i = 0;
}
}
if (i)
{
for (j = i; j <4; j++)
{
char_array_4[j] = 0;
}
for (j = 0; j <4; j++)
{
char_array_4[j] = base64_chars.find(char_array_4[j]);
}
char_array_3[0] = ((char_array_4[0] ) << 2) + ((char_array_4[1] & 0x30) >> 4);
char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
for (j = 0; (j < i - 1); j++)
{
ret.push_back(char_array_3[j]);
}
}
return ret;
}

View File

@@ -0,0 +1,51 @@
INCLUDE_PATH := $(abspath ./)
LIBRARY_PATH := $(abspath ./)
AR?=ar
CMAKE_ARGS?=-DGGML_NATIVE=OFF
BUILD_TYPE?=
GOCMD=go
# keep standard at C11 and C++11
CXXFLAGS = -I. -I$(INCLUDE_PATH)/sources/bark.cpp/examples -I$(INCLUDE_PATH)/sources/bark.cpp/encodec.cpp/ggml/include -I$(INCLUDE_PATH)/sources/bark.cpp/spm-headers -I$(INCLUDE_PATH)/sources/bark.cpp -O3 -DNDEBUG -std=c++17 -fPIC
LDFLAGS = -L$(LIBRARY_PATH) -L$(LIBRARY_PATH)/sources/bark.cpp/build/examples -lbark -lstdc++ -lm
# bark.cpp
BARKCPP_REPO?=https://github.com/PABannier/bark.cpp.git
BARKCPP_VERSION?=5d5be84f089ab9ea53b7a793f088d3fbf7247495
# warnings
CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function
## bark.cpp
sources/bark.cpp:
git clone --recursive $(BARKCPP_REPO) sources/bark.cpp && \
cd sources/bark.cpp && \
git checkout $(BARKCPP_VERSION) && \
git submodule update --init --recursive --depth 1 --single-branch
sources/bark.cpp/build/libbark.a: sources/bark.cpp
cd sources/bark.cpp && \
mkdir -p build && \
cd build && \
cmake $(CMAKE_ARGS) .. && \
cmake --build . --config Release
gobark.o:
$(CXX) $(CXXFLAGS) gobark.cpp -o gobark.o -c $(LDFLAGS)
libbark.a: sources/bark.cpp/build/libbark.a gobark.o
cp $(INCLUDE_PATH)/sources/bark.cpp/build/libbark.a ./
$(AR) rcs libbark.a gobark.o
bark-cpp: libbark.a
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH="$(CURDIR)" LIBRARY_PATH=$(CURDIR) \
$(GOCMD) build -v -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o bark-cpp ./
package:
bash package.sh
build: bark-cpp package
clean:
rm -f gobark.o libbark.a

View File

@@ -48,7 +48,7 @@ int tts(char *text,int threads, char *dst ) {
// generate audio
if (!bark_generate_audio(c, text, threads)) {
fprintf(stderr, "%s: An error occured. If the problem persists, feel free to open an issue to report it.\n", __func__);
fprintf(stderr, "%s: An error occurred. If the problem persists, feel free to open an issue to report it.\n", __func__);
return 1;
}

View File

@@ -1,7 +1,7 @@
package main
// #cgo CXXFLAGS: -I${SRCDIR}/../../../sources/bark.cpp/ -I${SRCDIR}/../../../sources/bark.cpp/encodec.cpp -I${SRCDIR}/../../../sources/bark.cpp/examples -I${SRCDIR}/../../../sources/bark.cpp/spm-headers
// #cgo LDFLAGS: -L${SRCDIR}/ -L${SRCDIR}/../../../sources/bark.cpp/build/examples -L${SRCDIR}/../../../sources/bark.cpp/build/encodec.cpp/ -lbark -lencodec -lcommon
// #cgo CXXFLAGS: -I${SRCDIR}/sources/bark.cpp/ -I${SRCDIR}/sources/bark.cpp/encodec.cpp -I${SRCDIR}/sources/bark.cpp/encodec.cpp/ggml/include -I${SRCDIR}/sources/bark.cpp/examples -I${SRCDIR}/sources/bark.cpp/spm-headers
// #cgo LDFLAGS: -L${SRCDIR}/ -L${SRCDIR}/sources/bark.cpp/build/examples -L${SRCDIR}/sources/bark.cpp/build/encodec.cpp/ggml/src/ -L${SRCDIR}/sources/bark.cpp/build/encodec.cpp/ -lbark -lencodec -lcommon -lggml -lgomp
// #include <gobark.h>
// #include <stdlib.h>
import "C"

41
backend/go/bark-cpp/package.sh Executable file
View File

@@ -0,0 +1,41 @@
#!/bin/bash
# Script to copy the appropriate libraries based on architecture
# This script is used in the final stage of the Dockerfile
set -e
CURDIR=$(dirname "$(realpath $0)")
# Create lib directory
mkdir -p $CURDIR/package/lib
cp -avrf $CURDIR/bark-cpp $CURDIR/package/
cp -rfv $CURDIR/run.sh $CURDIR/package/
# Detect architecture and copy appropriate libraries
if [ -f "/lib64/ld-linux-x86-64.so.2" ]; then
# x86_64 architecture
echo "Detected x86_64 architecture, copying x86_64 libraries..."
cp -arfLv /lib64/ld-linux-x86-64.so.2 $CURDIR/package/lib/ld.so
cp -arfLv /lib/x86_64-linux-gnu/libc.so.6 $CURDIR/package/lib/libc.so.6
cp -arfLv /lib/x86_64-linux-gnu/libgcc_s.so.1 $CURDIR/package/lib/libgcc_s.so.1
cp -arfLv /lib/x86_64-linux-gnu/libstdc++.so.6 $CURDIR/package/lib/libstdc++.so.6
cp -arfLv /lib/x86_64-linux-gnu/libm.so.6 $CURDIR/package/lib/libm.so.6
cp -arfLv /lib/x86_64-linux-gnu/libgomp.so.1 $CURDIR/package/lib/libgomp.so.1
elif [ -f "/lib/ld-linux-aarch64.so.1" ]; then
# ARM64 architecture
echo "Detected ARM64 architecture, copying ARM64 libraries..."
cp -arfLv /lib/ld-linux-aarch64.so.1 $CURDIR/package/lib/ld.so
cp -arfLv /lib/aarch64-linux-gnu/libc.so.6 $CURDIR/package/lib/libc.so.6
cp -arfLv /lib/aarch64-linux-gnu/libgcc_s.so.1 $CURDIR/package/lib/libgcc_s.so.1
cp -arfLv /lib/aarch64-linux-gnu/libstdc++.so.6 $CURDIR/package/lib/libstdc++.so.6
cp -arfLv /lib/aarch64-linux-gnu/libm.so.6 $CURDIR/package/lib/libm.so.6
cp -arfLv /lib/aarch64-linux-gnu/libgomp.so.1 $CURDIR/package/lib/libgomp.so.1
else
echo "Error: Could not detect architecture"
exit 1
fi
echo "Packaging completed successfully"
ls -liah $CURDIR/package/
ls -liah $CURDIR/package/lib/

13
backend/go/bark-cpp/run.sh Executable file
View File

@@ -0,0 +1,13 @@
#!/bin/bash
set -ex
CURDIR=$(dirname "$(realpath $0)")
export LD_LIBRARY_PATH=$CURDIR/lib:$LD_LIBRARY_PATH
# If there is a lib/ld.so, use it
if [ -f $CURDIR/lib/ld.so ]; then
echo "Using lib/ld.so"
exec $CURDIR/lib/ld.so $CURDIR/bark-cpp "$@"
fi
exec $CURDIR/bark-cpp "$@"

View File

@@ -1,25 +0,0 @@
INCLUDE_PATH := $(abspath ./)
LIBRARY_PATH := $(abspath ./)
AR?=ar
BUILD_TYPE?=
# keep standard at C11 and C++11
CXXFLAGS = -I. -I$(INCLUDE_PATH)/../../../sources/bark.cpp/examples -I$(INCLUDE_PATH)/../../../sources/bark.cpp/spm-headers -I$(INCLUDE_PATH)/../../../sources/bark.cpp -O3 -DNDEBUG -std=c++17 -fPIC
LDFLAGS = -L$(LIBRARY_PATH) -L$(LIBRARY_PATH)/../../../sources/bark.cpp/build/examples -lbark -lstdc++ -lm
# warnings
CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function
gobark.o:
$(CXX) $(CXXFLAGS) gobark.cpp -o gobark.o -c $(LDFLAGS)
libbark.a: gobark.o
cp $(INCLUDE_PATH)/../../../sources/bark.cpp/build/libbark.a ./
$(AR) rcs libbark.a gobark.o
$(AR) rcs libbark.a $(LIBRARY_PATH)/../../../sources/bark.cpp/build/encodec.cpp/ggml/src/CMakeFiles/ggml.dir/ggml.c.o
$(AR) rcs libbark.a $(LIBRARY_PATH)/../../../sources/bark.cpp/build/encodec.cpp/ggml/src/CMakeFiles/ggml.dir/ggml-alloc.c.o
$(AR) rcs libbark.a $(LIBRARY_PATH)/../../../sources/bark.cpp/build/encodec.cpp/ggml/src/CMakeFiles/ggml.dir/ggml-backend.c.o
clean:
rm -f gobark.o libbark.a

View File

@@ -0,0 +1,9 @@
GOCMD=go
huggingface:
CGO_ENABLED=0 $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o huggingface ./
package:
bash package.sh
build: huggingface package

View File

@@ -0,0 +1,12 @@
#!/bin/bash
# Script to copy the appropriate libraries based on architecture
# This script is used in the final stage of the Dockerfile
set -e
CURDIR=$(dirname "$(realpath $0)")
mkdir -p $CURDIR/package
cp -avrf $CURDIR/huggingface $CURDIR/package/
cp -rfv $CURDIR/run.sh $CURDIR/package/

6
backend/go/huggingface/run.sh Executable file
View File

@@ -0,0 +1,6 @@
#!/bin/bash
set -ex
CURDIR=$(dirname "$(realpath $0)")
exec $CURDIR/huggingface "$@"

View File

@@ -1,135 +0,0 @@
INCLUDE_PATH := $(abspath ./)
LIBRARY_PATH := $(abspath ./)
AR?=ar
CMAKE_ARGS?=
BUILD_TYPE?=
ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh
# keep standard at C11 and C++11
CXXFLAGS = -I. -I$(INCLUDE_PATH)/../../../../sources/stablediffusion-ggml.cpp/thirdparty -I$(INCLUDE_PATH)/../../../../sources/stablediffusion-ggml.cpp/ggml/include -I$(INCLUDE_PATH)/../../../../sources/stablediffusion-ggml.cpp -O3 -DNDEBUG -std=c++17 -fPIC
GOCMD?=go
CGO_LDFLAGS?=
# Avoid parent make file overwriting CGO_LDFLAGS which is needed for hipblas
CGO_LDFLAGS_SYCL=
GO_TAGS?=
LD_FLAGS?=
# Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
ifeq ($(BUILD_TYPE),cublas)
CMAKE_ARGS+=-DGGML_CUDA=ON
# If build type is openblas then we set -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
# to CMAKE_ARGS automatically
else ifeq ($(BUILD_TYPE),openblas)
CMAKE_ARGS+=-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
# If build type is clblas (openCL) we set -DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
else ifeq ($(BUILD_TYPE),clblas)
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
else ifeq ($(BUILD_TYPE),hipblas)
CMAKE_ARGS+=-DGGML_HIP=ON
# If it's OSX, DO NOT embed the metal library - -DGGML_METAL_EMBED_LIBRARY=ON requires further investigation
# But if it's OSX without metal, disable it here
else ifeq ($(OS),Darwin)
ifneq ($(BUILD_TYPE),metal)
CMAKE_ARGS+=-DGGML_METAL=OFF
else
CMAKE_ARGS+=-DGGML_METAL=ON
CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON
TARGET+=--target ggml-metal
endif
endif
ifeq ($(BUILD_TYPE),sycl_f16)
CMAKE_ARGS+=-DGGML_SYCL=ON \
-DCMAKE_C_COMPILER=icx \
-DCMAKE_CXX_COMPILER=icpx \
-DSD_SYCL=ON \
-DGGML_SYCL_F16=ON
CC=icx
CXX=icpx
CGO_LDFLAGS_SYCL += -fsycl -L${DNNLROOT}/lib -ldnnl ${MKLROOT}/lib/intel64/libmkl_sycl.a -fiopenmp -fopenmp-targets=spir64 -lOpenCL
CGO_LDFLAGS_SYCL += $(shell pkg-config --libs mkl-static-lp64-gomp)
CGO_CXXFLAGS += -fiopenmp -fopenmp-targets=spir64
CGO_CXXFLAGS += $(shell pkg-config --cflags mkl-static-lp64-gomp )
endif
ifeq ($(BUILD_TYPE),sycl_f32)
CMAKE_ARGS+=-DGGML_SYCL=ON \
-DCMAKE_C_COMPILER=icx \
-DCMAKE_CXX_COMPILER=icpx \
-DSD_SYCL=ON
CC=icx
CXX=icpx
CGO_LDFLAGS_SYCL += -fsycl -L${DNNLROOT}/lib -ldnnl ${MKLROOT}/lib/intel64/libmkl_sycl.a -fiopenmp -fopenmp-targets=spir64 -lOpenCL
CGO_LDFLAGS_SYCL += $(shell pkg-config --libs mkl-static-lp64-gomp)
CGO_CXXFLAGS += -fiopenmp -fopenmp-targets=spir64
CGO_CXXFLAGS += $(shell pkg-config --cflags mkl-static-lp64-gomp )
endif
# warnings
# CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function
# Find all .a archives in ARCHIVE_DIR
# (ggml can have different backends cpu, cuda, etc., each backend generates a .a archive)
GGML_ARCHIVE_DIR := build/ggml/src/
ALL_ARCHIVES := $(shell find $(GGML_ARCHIVE_DIR) -type f -name '*.a')
# Name of the single merged library
COMBINED_LIB := libggmlall.a
# Rule to merge all the .a files into one
$(COMBINED_LIB): $(ALL_ARCHIVES)
@echo "Merging all .a into $(COMBINED_LIB)"
rm -f $@
mkdir -p merge-tmp
for a in $(ALL_ARCHIVES); do \
( cd merge-tmp && ar x ../$$a ); \
done
( cd merge-tmp && ar rcs ../$@ *.o )
# Ensure we have a proper index
ranlib $@
# Clean up
rm -rf merge-tmp
build/libstable-diffusion.a:
@echo "Building SD with $(BUILD_TYPE) build type and $(CMAKE_ARGS)"
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
+bash -c "source $(ONEAPI_VARS); \
mkdir -p build && \
cd build && \
cmake $(CMAKE_ARGS) ../../../../../sources/stablediffusion-ggml.cpp && \
cmake --build . --config Release"
else
mkdir -p build && \
cd build && \
cmake $(CMAKE_ARGS) ../../../../../sources/stablediffusion-ggml.cpp && \
cmake --build . --config Release
endif
$(MAKE) $(COMBINED_LIB)
gosd.o:
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
+bash -c "source $(ONEAPI_VARS); \
$(CXX) $(CXXFLAGS) gosd.cpp -o gosd.o -c"
else
$(CXX) $(CXXFLAGS) gosd.cpp -o gosd.o -c
endif
libsd.a: gosd.o
cp $(INCLUDE_PATH)/build/libstable-diffusion.a ./libsd.a
$(AR) rcs libsd.a gosd.o
stablediffusion-ggml:
CGO_LDFLAGS="$(CGO_LDFLAGS) $(CGO_LDFLAGS_SYCL)" C_INCLUDE_PATH="$(INCLUDE_PATH)" LIBRARY_PATH="$(LIBRARY_PATH)" \
CC="$(CC)" CXX="$(CXX)" CGO_CXXFLAGS="$(CGO_CXXFLAGS)" \
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o ../../../../backend-assets/grpc/stablediffusion-ggml ./
ifneq ($(UPX),)
$(UPX) ../../../../backend-assets/grpc/stablediffusion-ggml
endif
clean:
rm -rf gosd.o libsd.a build $(COMBINED_LIB)

View File

@@ -1,231 +0,0 @@
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <iostream>
#include <random>
#include <string>
#include <vector>
#include "gosd.h"
// #include "preprocessing.hpp"
#include "flux.hpp"
#include "stable-diffusion.h"
#define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_STATIC
#include "stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#define STB_IMAGE_WRITE_STATIC
#include "stb_image_write.h"
#define STB_IMAGE_RESIZE_IMPLEMENTATION
#define STB_IMAGE_RESIZE_STATIC
#include "stb_image_resize.h"
// Names of the sampler method, same order as enum sample_method in stable-diffusion.h
const char* sample_method_str[] = {
"euler_a",
"euler",
"heun",
"dpm2",
"dpm++2s_a",
"dpm++2m",
"dpm++2mv2",
"ipndm",
"ipndm_v",
"lcm",
"ddim_trailing",
"tcd",
};
// Names of the sigma schedule overrides, same order as sample_schedule in stable-diffusion.h
const char* schedule_str[] = {
"default",
"discrete",
"karras",
"exponential",
"ays",
"gits",
};
sd_ctx_t* sd_c;
sample_method_t sample_method;
int load_model(char *model, char* options[], int threads, int diff) {
fprintf (stderr, "Loading model!\n");
char *stableDiffusionModel = "";
if (diff == 1 ) {
stableDiffusionModel = model;
model = "";
}
// decode options. Options are in form optname:optvale, or if booleans only optname.
char *clip_l_path = "";
char *clip_g_path = "";
char *t5xxl_path = "";
char *vae_path = "";
char *scheduler = "";
char *sampler = "";
// If options is not NULL, parse options
for (int i = 0; options[i] != NULL; i++) {
char *optname = strtok(options[i], ":");
char *optval = strtok(NULL, ":");
if (optval == NULL) {
optval = "true";
}
if (!strcmp(optname, "clip_l_path")) {
clip_l_path = optval;
}
if (!strcmp(optname, "clip_g_path")) {
clip_g_path = optval;
}
if (!strcmp(optname, "t5xxl_path")) {
t5xxl_path = optval;
}
if (!strcmp(optname, "vae_path")) {
vae_path = optval;
}
if (!strcmp(optname, "scheduler")) {
scheduler = optval;
}
if (!strcmp(optname, "sampler")) {
sampler = optval;
}
}
int sample_method_found = -1;
for (int m = 0; m < N_SAMPLE_METHODS; m++) {
if (!strcmp(sampler, sample_method_str[m])) {
sample_method_found = m;
}
}
if (sample_method_found == -1) {
fprintf(stderr, "Invalid sample method, default to EULER_A!\n");
sample_method_found = EULER_A;
}
sample_method = (sample_method_t)sample_method_found;
int schedule_found = -1;
for (int d = 0; d < N_SCHEDULES; d++) {
if (!strcmp(scheduler, schedule_str[d])) {
schedule_found = d;
fprintf (stderr, "Found scheduler: %s\n", scheduler);
}
}
if (schedule_found == -1) {
fprintf (stderr, "Invalid scheduler! using DEFAULT\n");
schedule_found = DEFAULT;
}
schedule_t schedule = (schedule_t)schedule_found;
fprintf (stderr, "Creating context\n");
sd_ctx_t* sd_ctx = new_sd_ctx(model,
clip_l_path,
clip_g_path,
t5xxl_path,
stableDiffusionModel,
vae_path,
"",
"",
"",
"",
"",
false,
false,
false,
threads,
SD_TYPE_COUNT,
STD_DEFAULT_RNG,
schedule,
false,
false,
false,
false);
if (sd_ctx == NULL) {
fprintf (stderr, "failed loading model (generic error)\n");
return 1;
}
fprintf (stderr, "Created context: OK\n");
sd_c = sd_ctx;
return 0;
}
int gen_image(char *text, char *negativeText, int width, int height, int steps, int seed , char *dst, float cfg_scale) {
sd_image_t* results;
std::vector<int> skip_layers = {7, 8, 9};
fprintf (stderr, "Generating image\n");
results = txt2img(sd_c,
text,
negativeText,
-1, //clip_skip
cfg_scale, // sfg_scale
3.5f,
0, // eta
width,
height,
sample_method,
steps,
seed,
1,
NULL,
0.9f,
20.f,
false,
"",
skip_layers.data(),
skip_layers.size(),
0,
0.01,
0.2);
if (results == NULL) {
fprintf (stderr, "NO results\n");
return 1;
}
if (results[0].data == NULL) {
fprintf (stderr, "Results with no data\n");
return 1;
}
fprintf (stderr, "Writing PNG\n");
fprintf (stderr, "DST: %s\n", dst);
fprintf (stderr, "Width: %d\n", results[0].width);
fprintf (stderr, "Height: %d\n", results[0].height);
fprintf (stderr, "Channel: %d\n", results[0].channel);
fprintf (stderr, "Data: %p\n", results[0].data);
stbi_write_png(dst, results[0].width, results[0].height, results[0].channel,
results[0].data, 0, NULL);
fprintf (stderr, "Saved resulting image to '%s'\n", dst);
// TODO: free results. Why does it crash?
free(results[0].data);
results[0].data = NULL;
free(results);
fprintf (stderr, "gen_image is done", dst);
return 0;
}
int unload() {
free_sd_ctx(sd_c);
}

View File

@@ -1,96 +0,0 @@
package main
// #cgo CXXFLAGS: -I${SRCDIR}/../../../../sources/stablediffusion-ggml.cpp/thirdparty -I${SRCDIR}/../../../../sources/stablediffusion-ggml.cpp -I${SRCDIR}/../../../../sources/stablediffusion-ggml.cpp/ggml/include
// #cgo LDFLAGS: -L${SRCDIR}/ -lsd -lstdc++ -lm -lggmlall -lgomp
// #include <gosd.h>
// #include <stdlib.h>
import "C"
import (
"fmt"
"os"
"path/filepath"
"strings"
"unsafe"
"github.com/mudler/LocalAI/pkg/grpc/base"
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
"github.com/mudler/LocalAI/pkg/utils"
)
type SDGGML struct {
base.SingleThread
threads int
sampleMethod string
cfgScale float32
}
func (sd *SDGGML) Load(opts *pb.ModelOptions) error {
sd.threads = int(opts.Threads)
modelFile := C.CString(opts.ModelFile)
defer C.free(unsafe.Pointer(modelFile))
var options **C.char
// prepare the options array to pass to C
size := C.size_t(unsafe.Sizeof((*C.char)(nil)))
length := C.size_t(len(opts.Options))
options = (**C.char)(C.malloc(length * size))
view := (*[1 << 30]*C.char)(unsafe.Pointer(options))[0:len(opts.Options):len(opts.Options)]
var diffusionModel int
var oo []string
for _, op := range opts.Options {
if op == "diffusion_model" {
diffusionModel = 1
continue
}
// If it's an option path, we resolve absolute path from the model path
if strings.Contains(op, ":") && strings.Contains(op, "path") {
data := strings.Split(op, ":")
data[1] = filepath.Join(opts.ModelPath, data[1])
if err := utils.VerifyPath(data[1], opts.ModelPath); err == nil {
oo = append(oo, strings.Join(data, ":"))
}
} else {
oo = append(oo, op)
}
}
fmt.Fprintf(os.Stderr, "Options: %+v\n", oo)
for i, x := range oo {
view[i] = C.CString(x)
}
sd.cfgScale = opts.CFGScale
ret := C.load_model(modelFile, options, C.int(opts.Threads), C.int(diffusionModel))
if ret != 0 {
return fmt.Errorf("could not load model")
}
return nil
}
func (sd *SDGGML) GenerateImage(opts *pb.GenerateImageRequest) error {
t := C.CString(opts.PositivePrompt)
defer C.free(unsafe.Pointer(t))
dst := C.CString(opts.Dst)
defer C.free(unsafe.Pointer(dst))
negative := C.CString(opts.NegativePrompt)
defer C.free(unsafe.Pointer(negative))
ret := C.gen_image(t, negative, C.int(opts.Width), C.int(opts.Height), C.int(opts.Step), C.int(opts.Seed), dst, C.float(sd.cfgScale))
if ret != 0 {
return fmt.Errorf("inference failed")
}
return nil
}

View File

@@ -1,8 +0,0 @@
#ifdef __cplusplus
extern "C" {
#endif
int load_model(char *model, char* options[], int threads, int diffusionModel);
int gen_image(char *text, char *negativeText, int width, int height, int steps, int seed, char *dst, float cfg_scale);
#ifdef __cplusplus
}
#endif

View File

@@ -1,20 +0,0 @@
package main
// Note: this is started internally by LocalAI and a server is allocated for each model
import (
"flag"
grpc "github.com/mudler/LocalAI/pkg/grpc"
)
var (
addr = flag.String("addr", "localhost:50051", "the address to connect to")
)
func main() {
flag.Parse()
if err := grpc.StartServer(*addr, &SDGGML{}); err != nil {
panic(err)
}
}

View File

@@ -58,6 +58,9 @@ func (llm *LLM) Load(opts *pb.ModelOptions) error {
if opts.Embeddings {
llamaOpts = append(llamaOpts, llama.EnableEmbeddings)
}
if opts.Reranking {
llamaOpts = append(llamaOpts, llama.EnableReranking)
}
if opts.NGPULayers != 0 {
llamaOpts = append(llamaOpts, llama.SetGPULayers(int(opts.NGPULayers)))
}

View File

@@ -0,0 +1,9 @@
GOCMD=go
local-store:
CGO_ENABLED=0 $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o local-store ./
package:
bash package.sh
build: local-store package

View File

@@ -0,0 +1,12 @@
#!/bin/bash
# Script to copy the appropriate libraries based on architecture
# This script is used in the final stage of the Dockerfile
set -e
CURDIR=$(dirname "$(realpath $0)")
mkdir -p $CURDIR/package
cp -avrf $CURDIR/local-store $CURDIR/package/
cp -rfv $CURDIR/run.sh $CURDIR/package/

6
backend/go/local-store/run.sh Executable file
View File

@@ -0,0 +1,6 @@
#!/bin/bash
set -ex
CURDIR=$(dirname "$(realpath $0)")
exec $CURDIR/local-store "$@"

View File

@@ -4,6 +4,7 @@ package main
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
import (
"container/heap"
"errors"
"fmt"
"math"
"slices"
@@ -99,6 +100,9 @@ func sortIntoKeySlicese(keys []*pb.StoresKey) [][]float32 {
}
func (s *Store) Load(opts *pb.ModelOptions) error {
if opts.Model != "" {
return errors.New("not implemented")
}
return nil
}
@@ -315,7 +319,7 @@ func isNormalized(k []float32) bool {
for _, v := range k {
v64 := float64(v)
sum += v64*v64
sum += v64 * v64
}
s := math.Sqrt(sum)

37
backend/go/piper/Makefile Normal file
View File

@@ -0,0 +1,37 @@
# go-piper version
PIPER_REPO?=https://github.com/mudler/go-piper
PIPER_VERSION?=e10ca041a885d4a8f3871d52924b47792d5e5aa0
CURRENT_DIR=$(abspath ./)
GOCMD=go
PIPER_CGO_CXXFLAGS+=-I$(CURRENT_DIR)/sources/go-piper/piper/src/cpp -I$(CURRENT_DIR)/sources/go-piper/piper/build/fi/include -I$(CURRENT_DIR)/sources/go-piper/piper/build/pi/include -I$(CURRENT_DIR)/sources/go-piper/piper/build/si/include
PIPER_CGO_LDFLAGS+=-L$(CURRENT_DIR)/sources/go-piper/piper/build/fi/lib -L$(CURRENT_DIR)/sources/go-piper/piper/build/pi/lib -L$(CURRENT_DIR)/sources/go-piper/piper/build/si/lib -lfmt -lspdlog -lucd
## go-piper
sources/go-piper:
mkdir -p sources/go-piper
cd sources/go-piper && \
git init && \
git remote add origin $(PIPER_REPO) && \
git fetch origin && \
git checkout $(PIPER_VERSION) && \
git submodule update --init --recursive --depth 1 --single-branch
sources/go-piper/libpiper_binding.a: sources/go-piper
$(MAKE) -C sources/go-piper libpiper_binding.a example/main piper.o
espeak-ng-data: sources/go-piper sources/go-piper/libpiper_binding.a
mkdir -p espeak-ng-data
@cp -rf sources/go-piper/piper-phonemize/pi/share/espeak-ng-data/. espeak-ng-data
piper: sources/go-piper sources/go-piper/libpiper_binding.a espeak-ng-data
$(GOCMD) mod edit -replace github.com/mudler/go-piper=$(CURRENT_DIR)/sources/go-piper
CGO_CXXFLAGS="$(PIPER_CGO_CXXFLAGS)" CGO_LDFLAGS="$(PIPER_CGO_LDFLAGS)" LIBRARY_PATH=$(CURRENT_DIR)/sources/go-piper \
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o piper ./
package:
bash package.sh
build: piper package

Some files were not shown because too many files have changed in this diff Show More