Compare commits

..

4 Commits

Author SHA1 Message Date
LocalAI [bot]
902e47f0b0 chore: ⬆️ Update ggml-org/llama.cpp to 0320ac5264279d74f8ee91bafa6c90e9ab9bbb91 (#6306)
⬆️ Update ggml-org/llama.cpp

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-18 09:27:18 +02:00
Ettore Di Giacinto
50bb78fd24 Add permissions for issues and actions
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2025-09-18 09:26:10 +02:00
LocalAI [bot]
542f07ab2d docs: ⬆️ update docs version mudler/LocalAI (#6305)
⬆️ Update docs version mudler/LocalAI

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2025-09-17 21:06:50 +00:00
Ettore Di Giacinto
77c5acb9db Revert "feat(nvidia-gpu): bump images to cuda 12.8" (#6303)
Revert "feat(nvidia-gpu): bump images to cuda 12.8 (#6239)"

This reverts commit d9e25af7b5.
2025-09-17 19:31:43 +02:00
9 changed files with 29 additions and 28 deletions

View File

@@ -89,7 +89,7 @@ jobs:
context: "./backend"
- build-type: 'l4t'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "0"
platforms: 'linux/arm64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-l4t-diffusers'
@@ -187,7 +187,7 @@ jobs:
# CUDA 12 builds
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-rerankers'
@@ -199,7 +199,7 @@ jobs:
context: "./backend"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-llama-cpp'
@@ -211,7 +211,7 @@ jobs:
context: "./"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-vllm'
@@ -223,7 +223,7 @@ jobs:
context: "./backend"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-transformers'
@@ -235,7 +235,7 @@ jobs:
context: "./backend"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-diffusers'
@@ -248,7 +248,7 @@ jobs:
# CUDA 12 additional backends
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-kokoro'
@@ -260,7 +260,7 @@ jobs:
context: "./backend"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-faster-whisper'
@@ -272,7 +272,7 @@ jobs:
context: "./backend"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-coqui'
@@ -284,7 +284,7 @@ jobs:
context: "./backend"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-bark'
@@ -296,7 +296,7 @@ jobs:
context: "./backend"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-chatterbox'
@@ -578,7 +578,7 @@ jobs:
context: "./"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "0"
platforms: 'linux/arm64'
skip-drivers: 'true'
tag-latest: 'auto'
@@ -615,7 +615,7 @@ jobs:
context: "./"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-stablediffusion-ggml'
@@ -675,7 +675,7 @@ jobs:
context: "./"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "0"
platforms: 'linux/arm64'
skip-drivers: 'true'
tag-latest: 'auto'
@@ -700,7 +700,7 @@ jobs:
context: "./"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-whisper'
@@ -760,7 +760,7 @@ jobs:
context: "./"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "0"
platforms: 'linux/arm64'
skip-drivers: 'true'
tag-latest: 'auto'
@@ -836,7 +836,7 @@ jobs:
context: "./backend"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-rfdetr'
@@ -872,7 +872,7 @@ jobs:
context: "./backend"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "0"
platforms: 'linux/arm64'
skip-drivers: 'true'
tag-latest: 'auto'
@@ -897,7 +897,7 @@ jobs:
context: "./backend"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-exllama2'

View File

@@ -36,7 +36,7 @@ jobs:
include:
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-gpu-nvidia-cuda-12'

View File

@@ -91,7 +91,7 @@ jobs:
aio: "-aio-gpu-nvidia-cuda-11"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12'
@@ -144,7 +144,7 @@ jobs:
include:
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "8"
cuda-minor-version: "0"
platforms: 'linux/arm64'
tag-latest: 'auto'
tag-suffix: '-nvidia-l4t-arm64'

View File

@@ -6,7 +6,8 @@ permissions:
contents: write
pull-requests: write
packages: read
issues: write # for Homebrew/actions/post-comment
actions: write # to dispatch publish workflow
jobs:
dependabot:
runs-on: ubuntu-latest

View File

@@ -18,7 +18,7 @@ FROM requirements AS requirements-drivers
ARG BUILD_TYPE
ARG CUDA_MAJOR_VERSION=12
ARG CUDA_MINOR_VERSION=8
ARG CUDA_MINOR_VERSION=0
ARG SKIP_DRIVERS=false
ARG TARGETARCH
ARG TARGETVARIANT

View File

@@ -170,7 +170,7 @@ prepare-e2e:
mkdir -p $(TEST_DIR)
cp -rfv $(abspath ./tests/e2e-fixtures)/gpu.yaml $(TEST_DIR)/gpu.yaml
test -e $(TEST_DIR)/ggllm-test-model.bin || wget -q https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GGUF/resolve/main/codellama-7b-instruct.Q2_K.gguf -O $(TEST_DIR)/ggllm-test-model.bin
docker build --build-arg IMAGE_TYPE=core --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg CUDA_MAJOR_VERSION=12 --build-arg CUDA_MINOR_VERSION=8 -t localai-tests .
docker build --build-arg IMAGE_TYPE=core --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg CUDA_MAJOR_VERSION=12 --build-arg CUDA_MINOR_VERSION=0 -t localai-tests .
run-e2e-image:
ls -liah $(abspath ./tests/e2e-fixtures)

View File

@@ -111,7 +111,7 @@ docker build -f backend/Dockerfile.python \
--build-arg BACKEND=transformers \
--build-arg BUILD_TYPE=cublas12 \
--build-arg CUDA_MAJOR_VERSION=12 \
--build-arg CUDA_MINOR_VERSION=8 \
--build-arg CUDA_MINOR_VERSION=0 \
-t localai-backend-transformers .
# Build Go backend

View File

@@ -1,5 +1,5 @@
LLAMA_VERSION?=8ff206097c2bf3ca1c7aa95f9d6db779fc7bdd68
LLAMA_VERSION?=0320ac5264279d74f8ee91bafa6c90e9ab9bbb91
LLAMA_REPO?=https://github.com/ggerganov/llama.cpp
CMAKE_ARGS?=

View File

@@ -1,3 +1,3 @@
{
"version": "v3.5.0"
"version": "v3.5.1"
}