Compare commits

...

15 Commits

Author SHA1 Message Date
Ettore Di Giacinto
a8057b952c fix(cuda): be consistent with image tag naming (#5916)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-26 08:30:59 +02:00
Ettore Di Giacinto
fd5c1d916f chore(docs): add documentation on backend detection override (#5915)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-26 08:18:31 +02:00
LocalAI [bot]
5ce982b9c9 chore: ⬆️ Update ggml-org/llama.cpp to c7f3169cd523140a288095f2d79befb20a0b73f4 (#5913)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-25 23:08:20 +02:00
Ettore Di Giacinto
47ccfccf7a fix(ci): add nvidia-l4t capability to l4t images (#5914)
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-07-25 22:45:09 +02:00
LocalAI [bot]
a760f7ff39 docs: ⬆️ update docs version mudler/LocalAI (#5912)
⬆️ Update docs version mudler/LocalAI

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-25 22:15:16 +02:00
Ettore Di Giacinto
facf7625f3 fix(vulkan): use correct image suffix (#5911)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-25 19:20:20 +02:00
Ettore Di Giacinto
b3600b3c50 feat(backend gallery): add mirrors (#5910)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-25 19:20:08 +02:00
Ettore Di Giacinto
f0b47cfe6a fix(backends gallery): trim string when reading cap from file (#5909)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-25 18:10:02 +02:00
Ettore Di Giacinto
ee625fc34e fix(backends gallery): pass-by backend galleries to the model service (#5906)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-25 16:38:09 +02:00
Ettore Di Giacinto
693aa0b5de Update README.md
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-07-25 11:51:23 +02:00
Ettore Di Giacinto
3973e6e5da fix(install.sh): update to use the new binary naming (#5903)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-25 10:43:22 +02:00
LocalAI [bot]
fb6ec68090 chore: ⬆️ Update ggml-org/whisper.cpp to 7de8dd783f7b2eab56bff6bbc5d3369e34f0e77f (#5902)
⬆️ Update ggml-org/whisper.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-25 08:40:24 +02:00
LocalAI [bot]
0301fc7c46 chore: ⬆️ Update leejet/stable-diffusion.cpp to eed97a5e1d054f9c1e7ac01982ae480411d4157e (#5901)
⬆️ Update leejet/stable-diffusion.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-25 08:40:06 +02:00
LocalAI [bot]
813cb4296d chore: ⬆️ Update ggml-org/llama.cpp to 3f4fc97f1d745f1d5d3c853949503136d419e6de (#5900)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-07-25 08:39:44 +02:00
Ettore Di Giacinto
deda3a4972 Update build documentation
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-07-24 22:53:08 +02:00
23 changed files with 402 additions and 183 deletions

View File

@@ -39,7 +39,7 @@ jobs:
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-gpu-nvidia-cuda12'
tag-suffix: '-gpu-nvidia-cuda-12'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
makeflags: "--jobs=3 --output-sync=target"

View File

@@ -83,7 +83,7 @@ jobs:
cuda-minor-version: "7"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda11'
tag-suffix: '-gpu-nvidia-cuda-11'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
makeflags: "--jobs=4 --output-sync=target"
@@ -94,7 +94,7 @@ jobs:
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda12'
tag-suffix: '-gpu-nvidia-cuda-12'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
@@ -103,7 +103,7 @@ jobs:
- build-type: 'vulkan'
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-vulkan'
tag-suffix: '-gpu-vulkan'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'

View File

@@ -72,6 +72,12 @@ RUN <<EOT bash
fi
EOT
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${TARGETARCH}" = "arm64" ]; then
echo "nvidia-l4t" > /run/localai/capability
fi
EOT
# If we are building with clblas support, we need the libraries for the builds
RUN if [ "${BUILD_TYPE}" = "clblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
apt-get update && \

View File

@@ -322,7 +322,7 @@ docker-cuda11:
--build-arg GO_TAGS="$(GO_TAGS)" \
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
-t $(DOCKER_IMAGE)-cuda11 .
-t $(DOCKER_IMAGE)-cuda-11 .
docker-aio:
@echo "Building AIO image with base $(BASE_IMAGE) as $(DOCKER_AIO_IMAGE)"

View File

@@ -189,10 +189,13 @@ local-ai run https://gist.githubusercontent.com/.../phi-2.yaml
local-ai run oci://localai/phi-2:latest
```
> ⚡ **Automatic Backend Detection**: When you install models from the gallery or YAML files, LocalAI automatically detects your system's GPU capabilities (NVIDIA, AMD, Intel) and downloads the appropriate backend. For advanced configuration options, see [GPU Acceleration](https://localai.io/features/gpu-acceleration/#automatic-backend-detection).
For more information, see [💻 Getting started](https://localai.io/basics/getting_started/index.html)
## 📰 Latest project news
- July 2025: All backends migrated outside of the main binary. LocalAI is now more lightweight, small, and automatically downloads the required backend to run the model. [Read the release notes](https://github.com/mudler/LocalAI/releases/tag/v3.2.0)
- June 2025: [Backend management](https://github.com/mudler/LocalAI/pull/5607) has been added. Attention: extras images are going to be deprecated from the next release! Read [the backend management PR](https://github.com/mudler/LocalAI/pull/5607).
- May 2025: [Audio input](https://github.com/mudler/LocalAI/pull/5466) and [Reranking](https://github.com/mudler/LocalAI/pull/5396) in llama.cpp backend, [Realtime API](https://github.com/mudler/LocalAI/pull/5392), Support to Gemma, SmollVLM, and more multimodal models (available in the gallery).
- May 2025: Important: image name changes [See release](https://github.com/mudler/LocalAI/releases/tag/v2.29.0)

View File

@@ -1,5 +1,5 @@
LLAMA_VERSION?=a86f52b2859dae4db5a7a0bbc0f1ad9de6b43ec6
LLAMA_VERSION?=c7f3169cd523140a288095f2d79befb20a0b73f4
LLAMA_REPO?=https://github.com/ggerganov/llama.cpp
CMAKE_ARGS?=

View File

@@ -19,7 +19,7 @@ LD_FLAGS?=
# stablediffusion.cpp (ggml)
STABLEDIFFUSION_GGML_REPO?=https://github.com/leejet/stable-diffusion.cpp
STABLEDIFFUSION_GGML_VERSION?=1896b28ef2fd5b3643120e66979bea487385439f
STABLEDIFFUSION_GGML_VERSION?=eed97a5e1d054f9c1e7ac01982ae480411d4157e
# Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF

View File

@@ -6,7 +6,7 @@ CMAKE_ARGS?=
# whisper.cpp version
WHISPER_REPO?=https://github.com/ggml-org/whisper.cpp
WHISPER_CPP_VERSION?=1f5cf0b2888402d57bb17b2029b2caa97e5f3baf
WHISPER_CPP_VERSION?=7de8dd783f7b2eab56bff6bbc5d3369e34f0e77f
export WHISPER_CMAKE_ARGS?=-DBUILD_SHARED_LIBS=OFF
export WHISPER_DIR=$(abspath ./sources/whisper.cpp)

View File

@@ -258,6 +258,8 @@
icon: https://github.com/PABannier/bark.cpp/raw/main/assets/banner.png
name: "bark-cpp"
uri: "quay.io/go-skynet/local-ai-backends:latest-bark-cpp"
mirrors:
- localai/localai-backends:latest-bark-cpp
alias: "bark-cpp"
- &chatterbox
urls:
@@ -280,6 +282,8 @@
urls:
- https://github.com/rhasspy/piper
- https://github.com/mudler/go-piper
mirrors:
- localai/localai-backends:latest-piper
license: MIT
description: |
A fast, local neural text to speech system
@@ -292,6 +296,8 @@
icon: https://user-images.githubusercontent.com/12515440/89997349-b3523080-dc94-11ea-9906-ca2e8bc50535.png
urls:
- https://github.com/snakers4/silero-vad
mirrors:
- localai/localai-backends:latest-cpu-silero-vad
description: |
Silero VAD: pre-trained enterprise-grade Voice Activity Detector.
Silero VAD is a voice activity detection model that can be used to detect whether a given audio contains speech or not.
@@ -303,6 +309,8 @@
- &local-store
name: "local-store"
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-local-store"
mirrors:
- localai/localai-backends:latest-cpu-local-store
urls:
- https://github.com/mudler/LocalAI
description: |
@@ -316,6 +324,8 @@
- &huggingface
name: "huggingface"
uri: "quay.io/go-skynet/local-ai-backends:latest-huggingface"
mirrors:
- localai/localai-backends:latest-huggingface
icon: https://huggingface.co/front/assets/huggingface_logo-noborder.svg
urls:
- https://huggingface.co/docs/hub/en/api
@@ -328,174 +338,284 @@
- !!merge <<: *huggingface
name: "huggingface-development"
uri: "quay.io/go-skynet/local-ai-backends:master-huggingface"
mirrors:
- localai/localai-backends:master-huggingface
- !!merge <<: *local-store
name: "local-store-development"
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-local-store"
mirrors:
- localai/localai-backends:master-cpu-local-store
- !!merge <<: *silero-vad
name: "silero-vad-development"
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-silero-vad"
mirrors:
- localai/localai-backends:master-cpu-silero-vad
- !!merge <<: *piper
name: "piper-development"
uri: "quay.io/go-skynet/local-ai-backends:master-piper"
mirrors:
- localai/localai-backends:master-piper
## llama-cpp
- !!merge <<: *llamacpp
name: "darwin-x86-llama-cpp"
uri: "quay.io/go-skynet/local-ai-backends:latest-darwin-x86-llama-cpp"
mirrors:
- localai/localai-backends:latest-darwin-x86-llama-cpp
- !!merge <<: *llamacpp
name: "darwin-x86-llama-cpp-development"
uri: "quay.io/go-skynet/local-ai-backends:master-darwin-x86-llama-cpp"
mirrors:
- localai/localai-backends:master-darwin-x86-llama-cpp
- !!merge <<: *llamacpp
name: "nvidia-l4t-arm64-llama-cpp"
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-arm64-llama-cpp"
mirrors:
- localai/localai-backends:latest-nvidia-l4t-arm64-llama-cpp
- !!merge <<: *llamacpp
name: "nvidia-l4t-arm64-llama-cpp-development"
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-arm64-llama-cpp"
mirrors:
- localai/localai-backends:master-nvidia-l4t-arm64-llama-cpp
- !!merge <<: *llamacpp
name: "cpu-llama-cpp"
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-llama-cpp"
mirrors:
- localai/localai-backends:latest-cpu-llama-cpp
- !!merge <<: *llamacpp
name: "cpu-llama-cpp-development"
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-llama-cpp"
mirrors:
- localai/localai-backends:master-cpu-llama-cpp
- !!merge <<: *llamacpp
name: "cuda11-llama-cpp"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-llama-cpp"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-11-llama-cpp
- !!merge <<: *llamacpp
name: "cuda12-llama-cpp"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-llama-cpp"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-llama-cpp
- !!merge <<: *llamacpp
name: "rocm-llama-cpp"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-llama-cpp"
mirrors:
- localai/localai-backends:latest-gpu-rocm-hipblas-llama-cpp
- !!merge <<: *llamacpp
name: "intel-sycl-f32-llama-cpp"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f32-llama-cpp"
mirrors:
- localai/localai-backends:latest-gpu-intel-sycl-f32-llama-cpp
- !!merge <<: *llamacpp
name: "intel-sycl-f16-llama-cpp"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f16-llama-cpp"
mirrors:
- localai/localai-backends:latest-gpu-intel-sycl-f16-llama-cpp
- !!merge <<: *llamacpp
name: "vulkan-llama-cpp"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-vulkan-llama-cpp"
mirrors:
- localai/localai-backends:latest-gpu-vulkan-llama-cpp
- !!merge <<: *llamacpp
name: "vulkan-llama-cpp-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-vulkan-llama-cpp"
mirrors:
- localai/localai-backends:master-gpu-vulkan-llama-cpp
- !!merge <<: *llamacpp
name: "metal-llama-cpp"
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-llama-cpp"
mirrors:
- localai/localai-backends:latest-metal-darwin-arm64-llama-cpp
- !!merge <<: *llamacpp
name: "metal-llama-cpp-development"
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-llama-cpp"
mirrors:
- localai/localai-backends:master-metal-darwin-arm64-llama-cpp
- !!merge <<: *llamacpp
name: "cuda11-llama-cpp-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-llama-cpp"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-11-llama-cpp
- !!merge <<: *llamacpp
name: "cuda12-llama-cpp-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-llama-cpp"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-12-llama-cpp
- !!merge <<: *llamacpp
name: "rocm-llama-cpp-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-llama-cpp"
mirrors:
- localai/localai-backends:master-gpu-rocm-hipblas-llama-cpp
- !!merge <<: *llamacpp
name: "intel-sycl-f32-llama-cpp-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f32-llama-cpp"
mirrors:
- localai/localai-backends:master-gpu-intel-sycl-f32-llama-cpp
- !!merge <<: *llamacpp
name: "intel-sycl-f16-llama-cpp-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f16-llama-cpp"
mirrors:
- localai/localai-backends:master-gpu-intel-sycl-f16-llama-cpp
## whisper
- !!merge <<: *whispercpp
name: "nvidia-l4t-arm64-whisper"
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-arm64-whisper"
mirrors:
- localai/localai-backends:latest-nvidia-l4t-arm64-whisper
- !!merge <<: *whispercpp
name: "nvidia-l4t-arm64-whisper-development"
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-arm64-whisper"
mirrors:
- localai/localai-backends:master-nvidia-l4t-arm64-whisper
- !!merge <<: *whispercpp
name: "cpu-whisper"
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-whisper"
mirrors:
- localai/localai-backends:latest-cpu-whisper
- !!merge <<: *whispercpp
name: "cpu-whisper-development"
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-whisper"
mirrors:
- localai/localai-backends:master-cpu-whisper
- !!merge <<: *whispercpp
name: "cuda11-whisper"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-whisper"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-11-whisper
- !!merge <<: *whispercpp
name: "cuda12-whisper"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-whisper"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-whisper
- !!merge <<: *whispercpp
name: "rocm-whisper"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-whisper"
mirrors:
- localai/localai-backends:latest-gpu-rocm-hipblas-whisper
- !!merge <<: *whispercpp
name: "intel-sycl-f32-whisper"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f32-whisper"
mirrors:
- localai/localai-backends:latest-gpu-intel-sycl-f32-whisper
- !!merge <<: *whispercpp
name: "intel-sycl-f16-whisper"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f16-whisper"
mirrors:
- localai/localai-backends:latest-gpu-intel-sycl-f16-whisper
- !!merge <<: *whispercpp
name: "vulkan-whisper"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-vulkan-whisper"
mirrors:
- localai/localai-backends:latest-gpu-vulkan-whisper
- !!merge <<: *whispercpp
name: "vulkan-whisper-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-vulkan-whisper"
mirrors:
- localai/localai-backends:master-gpu-vulkan-whisper
- !!merge <<: *whispercpp
name: "metal-whisper"
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-whisper"
mirrors:
- localai/localai-backends:latest-metal-darwin-arm64-whisper
- !!merge <<: *whispercpp
name: "metal-whisper-development"
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-whisper"
mirrors:
- localai/localai-backends:master-metal-darwin-arm64-whisper
- !!merge <<: *whispercpp
name: "cuda11-whisper-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-whisper"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-11-whisper
- !!merge <<: *whispercpp
name: "cuda12-whisper-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-whisper"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-12-whisper
- !!merge <<: *whispercpp
name: "rocm-whisper-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-whisper"
mirrors:
- localai/localai-backends:master-gpu-rocm-hipblas-whisper
- !!merge <<: *whispercpp
name: "intel-sycl-f32-whisper-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f32-whisper"
mirrors:
- localai/localai-backends:master-gpu-intel-sycl-f32-whisper
- !!merge <<: *whispercpp
name: "intel-sycl-f16-whisper-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f16-whisper"
mirrors:
- localai/localai-backends:master-gpu-intel-sycl-f16-whisper
## stablediffusion-ggml
- !!merge <<: *stablediffusionggml
name: "cpu-stablediffusion-ggml"
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-stablediffusion-ggml"
mirrors:
- localai/localai-backends:latest-cpu-stablediffusion-ggml
- !!merge <<: *stablediffusionggml
name: "cpu-stablediffusion-ggml-development"
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-stablediffusion-ggml"
mirrors:
- localai/localai-backends:master-cpu-stablediffusion-ggml
- !!merge <<: *stablediffusionggml
name: "vulkan-stablediffusion-ggml"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-vulkan-stablediffusion-ggml"
mirrors:
- localai/localai-backends:latest-gpu-vulkan-stablediffusion-ggml
- !!merge <<: *stablediffusionggml
name: "vulkan-stablediffusion-ggml-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-vulkan-stablediffusion-ggml"
mirrors:
- localai/localai-backends:master-gpu-vulkan-stablediffusion-ggml
- !!merge <<: *stablediffusionggml
name: "cuda12-stablediffusion-ggml"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-stablediffusion-ggml"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-stablediffusion-ggml
- !!merge <<: *stablediffusionggml
name: "intel-sycl-f32-stablediffusion-ggml"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f32-stablediffusion-ggml"
- !!merge <<: *stablediffusionggml
name: "intel-sycl-f16-stablediffusion-ggml"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f16-stablediffusion-ggml"
mirrors:
- localai/localai-backends:latest-gpu-intel-sycl-f16-stablediffusion-ggml
- !!merge <<: *stablediffusionggml
name: "cuda11-stablediffusion-ggml"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-stablediffusion-ggml"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-11-stablediffusion-ggml
- !!merge <<: *stablediffusionggml
name: "cuda12-stablediffusion-ggml-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-stablediffusion-ggml"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-12-stablediffusion-ggml
- !!merge <<: *stablediffusionggml
name: "intel-sycl-f32-stablediffusion-ggml-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f32-stablediffusion-ggml"
mirrors:
- localai/localai-backends:master-gpu-intel-sycl-f32-stablediffusion-ggml
- !!merge <<: *stablediffusionggml
name: "intel-sycl-f16-stablediffusion-ggml-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f16-stablediffusion-ggml"
mirrors:
- localai/localai-backends:master-gpu-intel-sycl-f16-stablediffusion-ggml
- !!merge <<: *stablediffusionggml
name: "cuda11-stablediffusion-ggml-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-stablediffusion-ggml"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-11-stablediffusion-ggml
- !!merge <<: *stablediffusionggml
name: "nvidia-l4t-arm64-stablediffusion-ggml-development"
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-arm64-stablediffusion-ggml"
mirrors:
- localai/localai-backends:master-nvidia-l4t-arm64-stablediffusion-ggml
- !!merge <<: *stablediffusionggml
name: "nvidia-l4t-arm64-stablediffusion-ggml"
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-arm64-stablediffusion-ggml"
mirrors:
- localai/localai-backends:latest-nvidia-l4t-arm64-stablediffusion-ggml
# vllm
- !!merge <<: *vllm
name: "vllm-development"
@@ -506,27 +626,43 @@
- !!merge <<: *vllm
name: "cuda12-vllm"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-vllm"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-vllm
- !!merge <<: *vllm
name: "rocm-vllm"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-vllm"
mirrors:
- localai/localai-backends:latest-gpu-rocm-hipblas-vllm
- !!merge <<: *vllm
name: "intel-sycl-f32-vllm"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f32-vllm"
mirrors:
- localai/localai-backends:latest-gpu-intel-sycl-f32-vllm
- !!merge <<: *vllm
name: "intel-sycl-f16-vllm"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f16-vllm"
mirrors:
- localai/localai-backends:latest-gpu-intel-sycl-f16-vllm
- !!merge <<: *vllm
name: "cuda12-vllm-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-vllm"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-12-vllm
- !!merge <<: *vllm
name: "rocm-vllm-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-vllm"
mirrors:
- localai/localai-backends:master-gpu-rocm-hipblas-vllm
- !!merge <<: *vllm
name: "intel-sycl-f32-vllm-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f32-vllm"
mirrors:
- localai/localai-backends:master-gpu-intel-sycl-f32-vllm
- !!merge <<: *vllm
name: "intel-sycl-f16-vllm-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f16-vllm"
mirrors:
- localai/localai-backends:master-gpu-intel-sycl-f16-vllm
## Rerankers
- !!merge <<: *rerankers
name: "rerankers-development"
@@ -537,33 +673,53 @@
- !!merge <<: *rerankers
name: "cuda11-rerankers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-rerankers"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-11-rerankers
- !!merge <<: *rerankers
name: "cuda12-rerankers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-rerankers"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-rerankers
- !!merge <<: *rerankers
name: "intel-sycl-f32-rerankers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f32-rerankers"
mirrors:
- localai/localai-backends:latest-gpu-intel-sycl-f32-rerankers
- !!merge <<: *rerankers
name: "intel-sycl-f16-rerankers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f16-rerankers"
mirrors:
- localai/localai-backends:latest-gpu-intel-sycl-f16-rerankers
- !!merge <<: *rerankers
name: "rocm-rerankers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-rerankers"
mirrors:
- localai/localai-backends:latest-gpu-rocm-hipblas-rerankers
- !!merge <<: *rerankers
name: "cuda11-rerankers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-rerankers"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-11-rerankers
- !!merge <<: *rerankers
name: "cuda12-rerankers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-rerankers"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-12-rerankers
- !!merge <<: *rerankers
name: "rocm-rerankers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-rerankers"
mirrors:
- localai/localai-backends:master-gpu-rocm-hipblas-rerankers
- !!merge <<: *rerankers
name: "intel-sycl-f32-rerankers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f32-rerankers"
mirrors:
- localai/localai-backends:master-gpu-intel-sycl-f32-rerankers
- !!merge <<: *rerankers
name: "intel-sycl-f16-rerankers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f16-rerankers"
mirrors:
- localai/localai-backends:master-gpu-intel-sycl-f16-rerankers
## Transformers
- !!merge <<: *transformers
name: "transformers-development"
@@ -574,33 +730,53 @@
- !!merge <<: *transformers
name: "cuda12-transformers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-transformers"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-transformers
- !!merge <<: *transformers
name: "rocm-transformers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-transformers"
mirrors:
- localai/localai-backends:latest-gpu-rocm-hipblas-transformers
- !!merge <<: *transformers
name: "intel-sycl-f32-transformers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f32-transformers"
mirrors:
- localai/localai-backends:latest-gpu-intel-sycl-f32-transformers
- !!merge <<: *transformers
name: "intel-sycl-f16-transformers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f16-transformers"
mirrors:
- localai/localai-backends:latest-gpu-intel-sycl-f16-transformers
- !!merge <<: *transformers
name: "cuda11-transformers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-transformers"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-11-transformers
- !!merge <<: *transformers
name: "cuda11-transformers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-transformers"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-11-transformers
- !!merge <<: *transformers
name: "cuda12-transformers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-transformers"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-12-transformers
- !!merge <<: *transformers
name: "rocm-transformers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-transformers"
mirrors:
- localai/localai-backends:master-gpu-rocm-hipblas-transformers
- !!merge <<: *transformers
name: "intel-sycl-f32-transformers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f32-transformers"
mirrors:
- localai/localai-backends:master-gpu-intel-sycl-f32-transformers
- !!merge <<: *transformers
name: "intel-sycl-f16-transformers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f16-transformers"
mirrors:
- localai/localai-backends:master-gpu-intel-sycl-f16-transformers
## Diffusers
- !!merge <<: *diffusers
name: "diffusers-development"
@@ -611,27 +787,43 @@
- !!merge <<: *diffusers
name: "cuda12-diffusers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-diffusers"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-diffusers
- !!merge <<: *diffusers
name: "rocm-diffusers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-diffusers"
mirrors:
- localai/localai-backends:latest-gpu-rocm-hipblas-diffusers
- !!merge <<: *diffusers
name: "cuda11-diffusers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-diffusers"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-11-diffusers
- !!merge <<: *diffusers
name: "intel-sycl-f32-diffusers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f32-diffusers"
mirrors:
- localai/localai-backends:latest-gpu-intel-sycl-f32-diffusers
- !!merge <<: *diffusers
name: "cuda11-diffusers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-diffusers"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-11-diffusers
- !!merge <<: *diffusers
name: "cuda12-diffusers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-diffusers"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-12-diffusers
- !!merge <<: *diffusers
name: "rocm-diffusers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-diffusers"
mirrors:
- localai/localai-backends:master-gpu-rocm-hipblas-diffusers
- !!merge <<: *diffusers
name: "intel-sycl-f32-diffusers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f32-diffusers"
mirrors:
- localai/localai-backends:master-gpu-intel-sycl-f32-diffusers
## exllama2
- !!merge <<: *exllama2
name: "exllama2-development"
@@ -642,15 +834,23 @@
- !!merge <<: *exllama2
name: "cuda11-exllama2"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-exllama2"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-11-exllama2
- !!merge <<: *exllama2
name: "cuda12-exllama2"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-exllama2"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-exllama2
- !!merge <<: *exllama2
name: "cuda11-exllama2-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-exllama2"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-11-exllama2
- !!merge <<: *exllama2
name: "cuda12-exllama2-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-exllama2"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-12-exllama2
## kokoro
- !!merge <<: *kokoro
name: "kokoro-development"
@@ -661,33 +861,53 @@
- !!merge <<: *kokoro
name: "cuda11-kokoro-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-kokoro"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-11-kokoro
- !!merge <<: *kokoro
name: "cuda12-kokoro-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-kokoro"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-12-kokoro
- !!merge <<: *kokoro
name: "rocm-kokoro-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-kokoro"
mirrors:
- localai/localai-backends:master-gpu-rocm-hipblas-kokoro
- !!merge <<: *kokoro
name: "sycl-f32-kokoro"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f32-kokoro"
mirrors:
- localai/localai-backends:latest-gpu-intel-sycl-f32-kokoro
- !!merge <<: *kokoro
name: "sycl-f16-kokoro"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f16-kokoro"
mirrors:
- localai/localai-backends:latest-gpu-intel-sycl-f16-kokoro
- !!merge <<: *kokoro
name: "sycl-f16-kokoro-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f16-kokoro"
mirrors:
- localai/localai-backends:master-gpu-intel-sycl-f16-kokoro
- !!merge <<: *kokoro
name: "sycl-f32-kokoro-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f32-kokoro"
mirrors:
- localai/localai-backends:master-gpu-intel-sycl-f32-kokoro
- !!merge <<: *kokoro
name: "cuda11-kokoro"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-kokoro"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-11-kokoro
- !!merge <<: *kokoro
name: "cuda12-kokoro"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-kokoro"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-kokoro
- !!merge <<: *kokoro
name: "rocm-kokoro"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-kokoro"
mirrors:
- localai/localai-backends:latest-gpu-rocm-hipblas-kokoro
## faster-whisper
- !!merge <<: *faster-whisper
name: "faster-whisper-development"
@@ -698,24 +918,38 @@
- !!merge <<: *faster-whisper
name: "cuda11-faster-whisper"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-faster-whisper"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-11-faster-whisper
- !!merge <<: *faster-whisper
name: "cuda12-faster-whisper-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-faster-whisper"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-12-faster-whisper
- !!merge <<: *faster-whisper
name: "rocm-faster-whisper-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-faster-whisper"
mirrors:
- localai/localai-backends:master-gpu-rocm-hipblas-faster-whisper
- !!merge <<: *faster-whisper
name: "sycl-f32-faster-whisper"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f32-faster-whisper"
mirrors:
- localai/localai-backends:latest-gpu-intel-sycl-f32-faster-whisper
- !!merge <<: *faster-whisper
name: "sycl-f16-faster-whisper"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f16-faster-whisper"
mirrors:
- localai/localai-backends:latest-gpu-intel-sycl-f16-faster-whisper
- !!merge <<: *faster-whisper
name: "sycl-f32-faster-whisper-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f32-faster-whisper"
mirrors:
- localai/localai-backends:master-gpu-intel-sycl-f32-faster-whisper
- !!merge <<: *faster-whisper
name: "sycl-f16-faster-whisper-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f16-faster-whisper"
mirrors:
- localai/localai-backends:master-gpu-intel-sycl-f16-faster-whisper
## coqui
- !!merge <<: *coqui
@@ -727,33 +961,53 @@
- !!merge <<: *coqui
name: "cuda11-coqui"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-coqui"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-11-coqui
- !!merge <<: *coqui
name: "cuda12-coqui"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-coqui"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-coqui
- !!merge <<: *coqui
name: "cuda11-coqui-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-coqui"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-11-coqui
- !!merge <<: *coqui
name: "cuda12-coqui-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-coqui"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-12-coqui
- !!merge <<: *coqui
name: "rocm-coqui-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-coqui"
mirrors:
- localai/localai-backends:master-gpu-rocm-hipblas-coqui
- !!merge <<: *coqui
name: "sycl-f32-coqui"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f32-coqui"
mirrors:
- localai/localai-backends:latest-gpu-intel-sycl-f32-coqui
- !!merge <<: *coqui
name: "sycl-f16-coqui"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f16-coqui"
mirrors:
- localai/localai-backends:latest-gpu-intel-sycl-f16-coqui
- !!merge <<: *coqui
name: "sycl-f32-coqui-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f32-coqui"
mirrors:
- localai/localai-backends:master-gpu-intel-sycl-f32-coqui
- !!merge <<: *coqui
name: "sycl-f16-coqui-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f16-coqui"
mirrors:
- localai/localai-backends:master-gpu-intel-sycl-f16-coqui
- !!merge <<: *coqui
name: "rocm-coqui"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-coqui"
mirrors:
- localai/localai-backends:latest-gpu-rocm-hipblas-coqui
## bark
- !!merge <<: *bark
name: "bark-development"
@@ -764,33 +1018,53 @@
- !!merge <<: *bark
name: "cuda11-bark-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-bark"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-11-bark
- !!merge <<: *bark
name: "cuda11-bark"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-bark"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-11-bark
- !!merge <<: *bark
name: "rocm-bark-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-bark"
mirrors:
- localai/localai-backends:master-gpu-rocm-hipblas-bark
- !!merge <<: *bark
name: "sycl-f32-bark"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f32-bark"
mirrors:
- localai/localai-backends:latest-gpu-intel-sycl-f32-bark
- !!merge <<: *bark
name: "sycl-f16-bark"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f16-bark"
mirrors:
- localai/localai-backends:latest-gpu-intel-sycl-f16-bark
- !!merge <<: *bark
name: "sycl-f32-bark-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f32-bark"
mirrors:
- localai/localai-backends:master-gpu-intel-sycl-f32-bark
- !!merge <<: *bark
name: "sycl-f16-bark-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f16-bark"
mirrors:
- localai/localai-backends:master-gpu-intel-sycl-f16-bark
- !!merge <<: *bark
name: "cuda12-bark"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-bark"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-bark
- !!merge <<: *bark
name: "rocm-bark"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-bark"
mirrors:
- localai/localai-backends:latest-gpu-rocm-hipblas-bark
- !!merge <<: *bark
name: "cuda12-bark-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-bark"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-12-bark
- !!merge <<: *barkcpp
name: "bark-cpp-development"
uri: "quay.io/go-skynet/local-ai-backends:master-bark-cpp"
@@ -803,12 +1077,20 @@
- !!merge <<: *chatterbox
name: "cuda12-chatterbox-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-chatterbox"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-12-chatterbox
- !!merge <<: *chatterbox
name: "cuda11-chatterbox"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-chatterbox"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-11-chatterbox
- !!merge <<: *chatterbox
name: "cuda11-chatterbox-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-chatterbox"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-11-chatterbox
- !!merge <<: *chatterbox
name: "cuda12-chatterbox"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-chatterbox"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-chatterbox

View File

@@ -111,7 +111,7 @@ function ensureVenv() {
# - requirements-${BUILD_TYPE}.txt
# - requirements-${BUILD_PROFILE}.txt
#
# BUILD_PROFILE is a pore specific version of BUILD_TYPE, ex: cuda11 or cuda12
# BUILD_PROFILE is a pore specific version of BUILD_TYPE, ex: cuda-11 or cuda-12
# it can also include some options that we do not have BUILD_TYPES for, ex: intel
#
# NOTE: for BUILD_PROFILE==intel, this function does NOT automatically use the Intel python package index.

View File

@@ -3,6 +3,7 @@ package gallery
import (
"github.com/mudler/LocalAI/core/config"
"github.com/mudler/LocalAI/pkg/system"
"github.com/rs/zerolog/log"
)
// BackendMetadata represents the metadata stored in a JSON file for each installed backend
@@ -23,6 +24,7 @@ type GalleryBackend struct {
Metadata `json:",inline" yaml:",inline"`
Alias string `json:"alias,omitempty" yaml:"alias,omitempty"`
URI string `json:"uri,omitempty" yaml:"uri,omitempty"`
Mirrors []string `json:"mirrors,omitempty" yaml:"mirrors,omitempty"`
CapabilitiesMap map[string]string `json:"capabilities,omitempty" yaml:"capabilities,omitempty"`
}
@@ -33,9 +35,11 @@ func (backend *GalleryBackend) FindBestBackendFromMeta(systemState *system.Syste
realBackend := backend.CapabilitiesMap[systemState.Capability(backend.CapabilitiesMap)]
if realBackend == "" {
log.Debug().Str("backend", backend.Name).Str("reportedCapability", systemState.Capability(backend.CapabilitiesMap)).Msg("No backend found for reported capability")
return nil
}
log.Debug().Str("backend", backend.Name).Str("reportedCapability", systemState.Capability(backend.CapabilitiesMap)).Msg("Found backend for reported capability")
return backends.FindByName(realBackend)
}

View File

@@ -146,7 +146,18 @@ func InstallBackend(basePath string, config *GalleryBackend, downloadStatus func
uri := downloader.URI(config.URI)
if err := uri.DownloadFile(backendPath, "", 1, 1, downloadStatus); err != nil {
return fmt.Errorf("failed to download backend %q: %v", config.URI, err)
success := false
// Try to download from mirrors
for _, mirror := range config.Mirrors {
if err := downloader.URI(mirror).DownloadFile(backendPath, "", 1, 1, downloadStatus); err == nil {
success = true
break
}
}
if !success {
return fmt.Errorf("failed to download backend %q: %v", config.URI, err)
}
}
// Create metadata for the backend

View File

@@ -15,9 +15,10 @@ import (
)
type ModelGalleryEndpointService struct {
galleries []config.Gallery
modelPath string
galleryApplier *services.GalleryService
galleries []config.Gallery
backendGalleries []config.Gallery
modelPath string
galleryApplier *services.GalleryService
}
type GalleryModel struct {
@@ -25,11 +26,12 @@ type GalleryModel struct {
gallery.GalleryModel
}
func CreateModelGalleryEndpointService(galleries []config.Gallery, modelPath string, galleryApplier *services.GalleryService) ModelGalleryEndpointService {
func CreateModelGalleryEndpointService(galleries []config.Gallery, backendGalleries []config.Gallery, modelPath string, galleryApplier *services.GalleryService) ModelGalleryEndpointService {
return ModelGalleryEndpointService{
galleries: galleries,
modelPath: modelPath,
galleryApplier: galleryApplier,
galleries: galleries,
backendGalleries: backendGalleries,
modelPath: modelPath,
galleryApplier: galleryApplier,
}
}
@@ -79,6 +81,7 @@ func (mgs *ModelGalleryEndpointService) ApplyModelGalleryEndpoint() func(c *fibe
ID: uuid.String(),
GalleryElementName: input.ID,
Galleries: mgs.galleries,
BackendGalleries: mgs.backendGalleries,
}
return c.JSON(schema.GalleryResponse{ID: uuid.String(), StatusURL: fmt.Sprintf("%smodels/jobs/%s", utils.BaseURL(c), uuid.String())})

View File

@@ -23,7 +23,7 @@ func RegisterLocalAIRoutes(router *fiber.App,
// LocalAI API endpoints
if !appConfig.DisableGalleryEndpoint {
modelGalleryEndpointService := localai.CreateModelGalleryEndpointService(appConfig.Galleries, appConfig.ModelPath, galleryService)
modelGalleryEndpointService := localai.CreateModelGalleryEndpointService(appConfig.Galleries, appConfig.BackendGalleries, appConfig.ModelPath, galleryService)
router.Post("/models/apply", modelGalleryEndpointService.ApplyModelGalleryEndpoint())
router.Post("/models/delete/:name", modelGalleryEndpointService.DeleteModelGalleryEndpoint())

View File

@@ -180,6 +180,7 @@ func registerGalleryRoutes(app *fiber.App, cl *config.BackendConfigLoader, appCo
ID: uid,
GalleryElementName: galleryID,
Galleries: appConfig.Galleries,
BackendGalleries: appConfig.BackendGalleries,
}
go func() {
galleryService.ModelGalleryChannel <- op
@@ -219,6 +220,7 @@ func registerGalleryRoutes(app *fiber.App, cl *config.BackendConfigLoader, appCo
Delete: true,
GalleryElementName: galleryName,
Galleries: appConfig.Galleries,
BackendGalleries: appConfig.BackendGalleries,
}
go func() {
galleryService.ModelGalleryChannel <- op

View File

@@ -24,6 +24,7 @@ func (g *GalleryService) backendHandler(op *GalleryOp[gallery.GalleryBackend], s
g.modelLoader.DeleteExternalBackend(op.GalleryElementName)
} else {
log.Warn().Msgf("installing backend %s", op.GalleryElementName)
log.Debug().Msgf("backend galleries: %v", g.appConfig.BackendGalleries)
err = gallery.InstallBackendFromGallery(g.appConfig.BackendGalleries, systemState, op.GalleryElementName, g.appConfig.BackendsPath, progressCallback, true)
if err == nil {
err = gallery.RegisterBackends(g.appConfig.BackendsPath, g.modelLoader)

View File

@@ -15,6 +15,16 @@ This section contains instruction on how to use LocalAI with GPU acceleration.
For acceleration for AMD or Metal HW is still in development, for additional details see the [build]({{%relref "docs/getting-started/build#Acceleration" %}})
{{% /alert %}}
## Automatic Backend Detection
When you install a model from the gallery (or a YAML file), LocalAI intelligently detects the required backend and your system's capabilities, then downloads the correct version for you. Whether you're running on a standard CPU, an NVIDIA GPU, an AMD GPU, or an Intel GPU, LocalAI handles it automatically.
For advanced use cases or to override auto-detection, you can use the `LOCALAI_FORCE_META_BACKEND_CAPABILITY` environment variable. Here are the available options:
- `default`: Forces CPU-only backend. This is the fallback if no specific hardware is detected.
- `nvidia`: Forces backends compiled with CUDA support for NVIDIA GPUs.
- `amd`: Forces backends compiled with ROCm support for AMD GPUs.
- `intel`: Forces backends compiled with SYCL/oneAPI support for Intel GPUs.
## Model configuration
@@ -71,8 +81,8 @@ To use CUDA, use the images with the `cublas` tag, for example.
The image list is on [quay](https://quay.io/repository/go-skynet/local-ai?tab=tags):
- CUDA `11` tags: `master-gpu-nvidia-cuda11`, `v1.40.0-gpu-nvidia-cuda11`, ...
- CUDA `12` tags: `master-gpu-nvidia-cuda12`, `v1.40.0-gpu-nvidia-cuda12`, ...
- CUDA `11` tags: `master-gpu-nvidia-cuda-11`, `v1.40.0-gpu-nvidia-cuda-11`, ...
- CUDA `12` tags: `master-gpu-nvidia-cuda-12`, `v1.40.0-gpu-nvidia-cuda-12`, ...
In addition to the commands to run LocalAI normally, you need to specify `--gpus all` to docker, for example:

View File

@@ -9,13 +9,11 @@ ico = "rocket_launch"
### Build
LocalAI can be built as a container image or as a single, portable binary. Note that some model architectures might require Python libraries, which are not included in the binary. The binary contains only the core backends written in Go and C++.
LocalAI can be built as a container image or as a single, portable binary. Note that some model architectures might require Python libraries, which are not included in the binary.
LocalAI's extensible architecture allows you to add your own backends, which can be written in any language, and as such the container images contains also the Python dependencies to run all the available backends (for example, in order to run backends like __Diffusers__ that allows to generate images and videos from text).
In some cases you might want to re-build LocalAI from source (for instance to leverage Apple Silicon acceleration), or to build a custom container image with your own backends. This section contains instructions on how to build LocalAI from source.
This section contains instructions on how to build LocalAI from source.
#### Build LocalAI locally
@@ -24,7 +22,6 @@ In some cases you might want to re-build LocalAI from source (for instance to le
In order to build LocalAI locally, you need the following requirements:
- Golang >= 1.21
- Cmake/make
- GCC
- GRPC
@@ -36,20 +33,14 @@ To install the dependencies follow the instructions below:
Install `xcode` from the App Store
```bash
brew install abseil cmake go grpc protobuf protoc-gen-go protoc-gen-go-grpc python wget
```
After installing the above dependencies, you need to install grpcio-tools from PyPI. You could do this via a pip --user install or a virtualenv.
```bash
pip install --user grpcio-tools
brew install go protobuf protoc-gen-go protoc-gen-go-grpc wget
```
{{% /tab %}}
{{% tab tabName="Debian" %}}
```bash
apt install cmake golang libgrpc-dev make protobuf-compiler-grpc python3-grpc-tools
apt install golang make protobuf-compiler-grpc
```
After you have golang installed and working, you can install the required binaries for compiling the golang protobuf components via the following commands
@@ -63,10 +54,8 @@ go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f1
{{% /tab %}}
{{% tab tabName="From source" %}}
Specify `BUILD_GRPC_FOR_BACKEND_LLAMA=true` to build automatically the gRPC dependencies
```bash
make ... BUILD_GRPC_FOR_BACKEND_LLAMA=true build
make build
```
{{% /tab %}}
@@ -83,36 +72,6 @@ make build
This should produce the binary `local-ai`
Here is the list of the variables available that can be used to customize the build:
| Variable | Default | Description |
| ---------------------| ------- | ----------- |
| `BUILD_TYPE` | None | Build type. Available: `cublas`, `openblas`, `clblas`, `metal`,`hipblas`, `sycl_f16`, `sycl_f32` |
| `GO_TAGS` | `tts stablediffusion` | Go tags. Available: `stablediffusion`, `tts` |
| `CLBLAST_DIR` | | Specify a CLBlast directory |
| `CUDA_LIBPATH` | | Specify a CUDA library path |
| `BUILD_API_ONLY` | false | Set to true to build only the API (no backends will be built) |
{{% alert note %}}
#### CPU flagset compatibility
LocalAI uses different backends based on ggml and llama.cpp to run models. If your CPU doesn't support common instruction sets, you can disable them during build:
```
CMAKE_ARGS="-DGGML_F16C=OFF -DGGML_AVX512=OFF -DGGML_AVX2=OFF -DGGML_AVX=OFF -DGGML_FMA=OFF" make build
```
To have effect on the container image, you need to set `REBUILD=true`:
```
docker run quay.io/go-skynet/localai
docker run --rm -ti -p 8080:8080 -e DEBUG=true -e MODELS_PATH=/models -e THREADS=1 -e REBUILD=true -e CMAKE_ARGS="-DGGML_F16C=OFF -DGGML_AVX512=OFF -DGGML_AVX2=OFF -DGGML_AVX=OFF -DGGML_FMA=OFF" -v $PWD/models:/models quay.io/go-skynet/local-ai:latest
```
{{% /alert %}}
#### Container image
Requirements:
@@ -153,6 +112,9 @@ wget https://huggingface.co/TheBloke/phi-2-GGUF/resolve/main/phi-2.Q2_K.gguf -O
# Use a template from the examples
cp -rf prompt-templates/ggml-gpt4all-j.tmpl models/phi-2.Q2_K.tmpl
# Install the llama-cpp backend
./local-ai backends install llama-cpp
# Run LocalAI
./local-ai --models-path=./models/ --debug=true
@@ -186,131 +148,53 @@ sudo xcode-select --switch /Applications/Xcode.app/Contents/Developer
```
# reinstall build dependencies
brew reinstall abseil cmake go grpc protobuf wget
brew reinstall go grpc protobuf wget
make clean
make build
```
**Requirements**: OpenCV, Gomp
## Build backends
Image generation requires `GO_TAGS=stablediffusion` to be set during build:
LocalAI have several backends available for installation in the backend gallery. The backends can be also built by source. As backends might vary from language and dependencies that they require, the documentation will provide generic guidance for few of the backends, which can be applied with some slight modifications also to the others.
### Manually
Typically each backend include a Makefile which allow to package the backend.
In the LocalAI repository, for instance you can build `bark-cpp` by doing:
```
make GO_TAGS=stablediffusion build
git clone https://github.com/go-skynet/LocalAI.git
# Build the bark-cpp backend (requires cmake)
make -C LocalAI/backend/go/bark-cpp build package
# Build vllm backend (requires python)
make -C LocalAI/backend/python/vllm
```
### Build with Text to audio support
### With Docker
**Requirements**: piper-phonemize
Building with docker is simpler as abstracts away all the requirement, and focuses on building the final OCI images that are available in the gallery. This allows for instance also to build locally a backend and install it with LocalAI. You can refer to [Backends](https://localai.io/backends/) for general guidance on how to install and develop backends.
Text to audio support is experimental and requires `GO_TAGS=tts` to be set during build:
In the LocalAI repository, you can build `bark-cpp` by doing:
```
make GO_TAGS=tts build
git clone https://github.com/go-skynet/LocalAI.git
# Build the bark-cpp backend (requires docker)
make docker-build-bark-cpp
```
### Acceleration
#### OpenBLAS
Software acceleration.
Requirements: OpenBLAS
```
make BUILD_TYPE=openblas build
```
#### CuBLAS
Nvidia Acceleration.
Requirement: Nvidia CUDA toolkit
Note: CuBLAS support is experimental, and has not been tested on real HW. please report any issues you find!
```
make BUILD_TYPE=cublas build
```
More informations available in the upstream PR: https://github.com/ggerganov/llama.cpp/pull/1412
#### Hipblas (AMD GPU with ROCm on Arch Linux)
Packages:
```
pacman -S base-devel git rocm-hip-sdk rocm-opencl-sdk opencv clblast grpc
```
Library links:
```
export CGO_CFLAGS="-I/usr/include/opencv4"
export CGO_CXXFLAGS="-I/usr/include/opencv4"
export CGO_LDFLAGS="-L/opt/rocm/hip/lib -lamdhip64 -L/opt/rocm/lib -lOpenCL -L/usr/lib -lclblast -lrocblas -lhipblas -lrocrand -lomp -O3 --rtlib=compiler-rt -unwindlib=libgcc -lhipblas -lrocblas --hip-link"
```
Build:
```
make BUILD_TYPE=hipblas GPU_TARGETS=gfx1030
```
#### ClBLAS
AMD/Intel GPU acceleration.
Requirement: OpenCL, CLBlast
```
make BUILD_TYPE=clblas build
```
To specify a clblast dir set: `CLBLAST_DIR`
#### Intel GPU acceleration
Intel GPU acceleration is supported via SYCL.
Requirements: [Intel oneAPI Base Toolkit](https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit-download.html) (see also [llama.cpp setup installations instructions](https://github.com/ggerganov/llama.cpp/blob/d71ac90985854b0905e1abba778e407e17f9f887/README-sycl.md?plain=1#L56))
```
make BUILD_TYPE=sycl_f16 build # for float16
make BUILD_TYPE=sycl_f32 build # for float32
```
#### Metal (Apple Silicon)
```
make build
# correct build type is automatically used on mac (BUILD_TYPE=metal)
# Set `gpu_layers: 256` (or equal to the number of model layers) to your YAML model config file and `f16: true`
```
### Windows compatibility
Make sure to give enough resources to the running container. See https://github.com/go-skynet/LocalAI/issues/2
### Examples
More advanced build options are available, for instance to build only a single backend.
#### Build only a single backend
You can control the backends that are built by setting the `GRPC_BACKENDS` environment variable. For instance, to build only the `llama-cpp` backend only:
Note that `make` is only by convenience, in reality it just runs a simple `docker` command as:
```bash
make GRPC_BACKENDS=backend-assets/grpc/llama-cpp build
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:bark-cpp -f LocalAI/backend/Dockerfile.golang --build-arg BACKEND=bark-cpp .
```
By default, all the backends are built.
Note:
#### Specific llama.cpp version
To build with a specific version of llama.cpp, set `CPPLLAMA_VERSION` to the tag or wanted sha:
```
CPPLLAMA_VERSION=<sha> make build
```
- BUILD_TYPE can be either: `cublas`, `hipblas`, `sycl_f16`, `sycl_f32`, `metal`.
- BASE_IMAGE is tested on `ubuntu:22.04` (and defaults to it)

View File

@@ -163,9 +163,9 @@ Standard container images do not have pre-installed models.
| Description | Quay | Docker Hub |
| --- | --- |-------------------------------------------------------------|
| Latest images from the branch (development) | `quay.io/go-skynet/local-ai:master-gpu-nvidia-cuda11` | `localai/localai:master-gpu-nvidia-cuda11` |
| Latest images from the branch (development) | `quay.io/go-skynet/local-ai:master-gpu-nvidia-cuda-11` | `localai/localai:master-gpu-nvidia-cuda-11` |
| Latest tag | `quay.io/go-skynet/local-ai:latest-gpu-nvidia-cuda-11` | `localai/localai:latest-gpu-nvidia-cuda-11` |
| Versioned image | `quay.io/go-skynet/local-ai:{{< version >}}-gpu-nvidia-cuda11` | `localai/localai:{{< version >}}-gpu-nvidia-cuda11` |
| Versioned image | `quay.io/go-skynet/local-ai:{{< version >}}-gpu-nvidia-cuda-11` | `localai/localai:{{< version >}}-gpu-nvidia-cuda-11` |
{{% /tab %}}
@@ -173,9 +173,9 @@ Standard container images do not have pre-installed models.
| Description | Quay | Docker Hub |
| --- | --- |-------------------------------------------------------------|
| Latest images from the branch (development) | `quay.io/go-skynet/local-ai:master-gpu-nvidia-cuda12` | `localai/localai:master-gpu-nvidia-cuda12` |
| Latest images from the branch (development) | `quay.io/go-skynet/local-ai:master-gpu-nvidia-cuda-12` | `localai/localai:master-gpu-nvidia-cuda12` |
| Latest tag | `quay.io/go-skynet/local-ai:latest-gpu-nvidia-cuda-12` | `localai/localai:latest-gpu-nvidia-cuda-12` |
| Versioned image | `quay.io/go-skynet/local-ai:{{< version >}}-gpu-nvidia-cuda12` | `localai/localai:{{< version >}}-gpu-nvidia-cuda12` |
| Versioned image | `quay.io/go-skynet/local-ai:{{< version >}}-gpu-nvidia-cuda-12` | `localai/localai:{{< version >}}-gpu-nvidia-cuda-12` |
{{% /tab %}}

View File

@@ -106,6 +106,9 @@ local-ai run https://gist.githubusercontent.com/.../phi-2.yaml
local-ai run oci://localai/phi-2:latest
```
{{% alert icon="⚡" %}}
**Automatic Backend Detection**: When you install models from the gallery or YAML files, LocalAI automatically detects your system's GPU capabilities (NVIDIA, AMD, Intel) and downloads the appropriate backend. For advanced configuration options, see [GPU Acceleration]({{% relref "docs/features/gpu-acceleration#automatic-backend-detection" %}}).
{{% /alert %}}
For a full list of options, refer to the [Installer Options]({{% relref "docs/advanced/installer" %}}) documentation.

View File

@@ -1,3 +1,3 @@
{
"version": "v3.1.1"
"version": "v3.2.1"
}

View File

@@ -660,7 +660,7 @@ install_docker() {
IMAGE_TAG=
if [ "$USE_VULKAN" = true ]; then
IMAGE_TAG=${LOCALAI_VERSION}-vulkan
IMAGE_TAG=${LOCALAI_VERSION}-gpu-vulkan
info "Starting LocalAI Docker container..."
$SUDO docker run -v local-ai-data:/models \
@@ -672,7 +672,7 @@ install_docker() {
-d -p $PORT:8080 --name local-ai localai/localai:$IMAGE_TAG $STARTCOMMAND
elif [ "$HAS_CUDA" ]; then
# Default to CUDA 12
IMAGE_TAG=${LOCALAI_VERSION}-gpu-nvidia-cuda12
IMAGE_TAG=${LOCALAI_VERSION}-gpu-nvidia-cuda-12
# AIO
if [ "$USE_AIO" = true ]; then
IMAGE_TAG=${LOCALAI_VERSION}-aio-gpu-nvidia-cuda-12
@@ -757,7 +757,7 @@ install_binary_darwin() {
[ "$(uname -s)" = "Darwin" ] || fatal 'This script is intended to run on macOS only.'
info "Downloading LocalAI ${LOCALAI_VERSION}..."
curl --fail --show-error --location --progress-bar -o $TEMP_DIR/local-ai "https://github.com/mudler/LocalAI/releases/download/${LOCALAI_VERSION}/local-ai-Darwin-${ARCH}"
curl --fail --show-error --location --progress-bar -o $TEMP_DIR/local-ai "https://github.com/mudler/LocalAI/releases/download/${LOCALAI_VERSION}/local-ai-${LOCALAI_VERSION}-darwin-${ARCH}"
info "Installing to /usr/local/bin/local-ai"
install -o0 -g0 -m755 $TEMP_DIR/local-ai /usr/local/bin/local-ai
@@ -789,7 +789,7 @@ install_binary() {
fi
info "Downloading LocalAI ${LOCALAI_VERSION}..."
curl --fail --location --progress-bar -o $TEMP_DIR/local-ai "https://github.com/mudler/LocalAI/releases/download/${LOCALAI_VERSION}/local-ai-Linux-${ARCH}"
curl --fail --location --progress-bar -o $TEMP_DIR/local-ai "https://github.com/mudler/LocalAI/releases/download/${LOCALAI_VERSION}/local-ai-${LOCALAI_VERSION}-linux-${ARCH}"
for BINDIR in /usr/local/bin /usr/bin /bin; do
echo $PATH | grep -q $BINDIR && break || continue
@@ -868,7 +868,7 @@ OS="$(uname -s)"
ARCH=$(uname -m)
case "$ARCH" in
x86_64) ARCH="x86_64" ;;
x86_64) ARCH="amd64" ;;
aarch64|arm64) ARCH="arm64" ;;
*) fatal "Unsupported architecture: $ARCH" ;;
esac

View File

@@ -25,20 +25,24 @@ func (s *SystemState) Capability(capMap map[string]string) string {
// Check if the reported capability is in the map
if _, exists := capMap[reportedCapability]; exists {
log.Debug().Str("reportedCapability", reportedCapability).Any("capMap", capMap).Msg("Using reported capability")
return reportedCapability
}
log.Debug().Str("reportedCapability", reportedCapability).Any("capMap", capMap).Msg("The requested capability was not found, using default capability")
// Otherwise, return the default capability (catch-all)
return defaultCapability
}
func (s *SystemState) getSystemCapabilities() string {
if os.Getenv("LOCALAI_FORCE_META_BACKEND_CAPABILITY") != "" {
log.Debug().Str("LOCALAI_FORCE_META_BACKEND_CAPABILITY", os.Getenv("LOCALAI_FORCE_META_BACKEND_CAPABILITY")).Msg("Using forced capability")
return os.Getenv("LOCALAI_FORCE_META_BACKEND_CAPABILITY")
}
capabilityRunFile := "/run/localai/capability"
if os.Getenv("LOCALAI_FORCE_META_BACKEND_CAPABILITY_RUN_FILE") != "" {
log.Debug().Str("LOCALAI_FORCE_META_BACKEND_CAPABILITY_RUN_FILE", os.Getenv("LOCALAI_FORCE_META_BACKEND_CAPABILITY_RUN_FILE")).Msg("Using forced capability run file")
capabilityRunFile = os.Getenv("LOCALAI_FORCE_META_BACKEND_CAPABILITY_RUN_FILE")
}
@@ -48,31 +52,37 @@ func (s *SystemState) getSystemCapabilities() string {
if _, err := os.Stat(capabilityRunFile); err == nil {
capability, err := os.ReadFile(capabilityRunFile)
if err == nil {
return string(capability)
log.Debug().Str("capability", string(capability)).Msg("Using capability from run file")
return strings.Trim(strings.TrimSpace(string(capability)), "\n")
}
}
// If we are on mac and arm64, we will return metal
if runtime.GOOS == "darwin" && runtime.GOARCH == "arm64" {
log.Debug().Msg("Using metal capability")
return metal
}
// If we are on mac and x86, we will return darwin-x86
if runtime.GOOS == "darwin" && runtime.GOARCH == "amd64" {
log.Debug().Msg("Using darwin-x86 capability")
return darwinX86
}
// If arm64 on linux and a nvidia gpu is detected, we will return nvidia-l4t
if runtime.GOOS == "linux" && runtime.GOARCH == "arm64" {
if s.GPUVendor == "nvidia" {
log.Debug().Msg("Using nvidia-l4t capability")
return nvidiaL4T
}
}
if s.GPUVendor == "" {
log.Debug().Msg("Using default capability")
return defaultCapability
}
log.Debug().Str("GPUVendor", s.GPUVendor).Msg("Using GPU vendor capability")
return s.GPUVendor
}