Compare commits

..

1 Commits

Author SHA1 Message Date
Ettore Di Giacinto
3f52776a1c WIP 2025-07-23 21:18:47 +02:00
348 changed files with 7502 additions and 18722 deletions

View File

@@ -6,10 +6,6 @@ models
backends backends
examples/chatbot-ui/models examples/chatbot-ui/models
backend/go/image/stablediffusion-ggml/build/ backend/go/image/stablediffusion-ggml/build/
backend/go/*/build
backend/go/*/.cache
backend/go/*/sources
backend/go/*/package
examples/rwkv/models examples/rwkv/models
examples/**/models examples/**/models
Dockerfile* Dockerfile*

View File

@@ -87,42 +87,6 @@ jobs:
backend: "diffusers" backend: "diffusers"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
context: "./backend" context: "./backend"
- build-type: 'l4t'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/arm64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-l4t-diffusers'
runs-on: 'ubuntu-24.04-arm'
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
skip-drivers: 'true'
backend: "diffusers"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: ''
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-cpu-diffusers'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'true'
backend: "diffusers"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: ''
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-cpu-chatterbox'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'true'
backend: "chatterbox"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
# CUDA 11 additional backends # CUDA 11 additional backends
- build-type: 'cublas' - build-type: 'cublas'
cuda-major-version: "11" cuda-major-version: "11"
@@ -215,7 +179,7 @@ jobs:
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-vllm' tag-suffix: '-gpu-nvidia-cuda-12-vllm'
runs-on: 'arc-runner-set' runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "vllm" backend: "vllm"
@@ -242,7 +206,7 @@ jobs:
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "diffusers" backend: "diffusers"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
context: "./backend" context: "./backend"
# CUDA 12 additional backends # CUDA 12 additional backends
@@ -314,7 +278,7 @@ jobs:
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-rerankers' tag-suffix: '-gpu-rocm-hipblas-rerankers'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "rocm/dev-ubuntu-22.04:6.4.3" base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false' skip-drivers: 'false'
backend: "rerankers" backend: "rerankers"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
@@ -326,7 +290,7 @@ jobs:
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-llama-cpp' tag-suffix: '-gpu-rocm-hipblas-llama-cpp'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "rocm/dev-ubuntu-22.04:6.4.3" base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false' skip-drivers: 'false'
backend: "llama-cpp" backend: "llama-cpp"
dockerfile: "./backend/Dockerfile.llama-cpp" dockerfile: "./backend/Dockerfile.llama-cpp"
@@ -337,8 +301,8 @@ jobs:
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-vllm' tag-suffix: '-gpu-rocm-hipblas-vllm'
runs-on: 'arc-runner-set' runs-on: 'ubuntu-latest'
base-image: "rocm/dev-ubuntu-22.04:6.4.3" base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false' skip-drivers: 'false'
backend: "vllm" backend: "vllm"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
@@ -349,8 +313,8 @@ jobs:
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-transformers' tag-suffix: '-gpu-rocm-hipblas-transformers'
runs-on: 'arc-runner-set' runs-on: 'ubuntu-latest'
base-image: "rocm/dev-ubuntu-22.04:6.4.3" base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false' skip-drivers: 'false'
backend: "transformers" backend: "transformers"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
@@ -361,8 +325,8 @@ jobs:
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-diffusers' tag-suffix: '-gpu-rocm-hipblas-diffusers'
runs-on: 'arc-runner-set' runs-on: 'ubuntu-latest'
base-image: "rocm/dev-ubuntu-22.04:6.4.3" base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false' skip-drivers: 'false'
backend: "diffusers" backend: "diffusers"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
@@ -374,8 +338,8 @@ jobs:
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-kokoro' tag-suffix: '-gpu-rocm-hipblas-kokoro'
runs-on: 'arc-runner-set' runs-on: 'ubuntu-latest'
base-image: "rocm/dev-ubuntu-22.04:6.4.3" base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false' skip-drivers: 'false'
backend: "kokoro" backend: "kokoro"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
@@ -387,7 +351,7 @@ jobs:
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-faster-whisper' tag-suffix: '-gpu-rocm-hipblas-faster-whisper'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "rocm/dev-ubuntu-22.04:6.4.3" base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false' skip-drivers: 'false'
backend: "faster-whisper" backend: "faster-whisper"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
@@ -399,7 +363,7 @@ jobs:
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-coqui' tag-suffix: '-gpu-rocm-hipblas-coqui'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "rocm/dev-ubuntu-22.04:6.4.3" base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false' skip-drivers: 'false'
backend: "coqui" backend: "coqui"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
@@ -410,19 +374,31 @@ jobs:
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-bark' tag-suffix: '-gpu-rocm-hipblas-bark'
runs-on: 'arc-runner-set' runs-on: 'ubuntu-latest'
base-image: "rocm/dev-ubuntu-22.04:6.4.3" base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false' skip-drivers: 'false'
backend: "bark" backend: "bark"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
context: "./backend" context: "./backend"
# sycl builds # sycl builds
- build-type: 'intel' - build-type: 'sycl_f32'
cuda-major-version: "" cuda-major-version: ""
cuda-minor-version: "" cuda-minor-version: ""
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-intel-rerankers' tag-suffix: '-gpu-intel-sycl-f32-rerankers'
runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false'
backend: "rerankers"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'sycl_f16'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-intel-sycl-f16-rerankers'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false' skip-drivers: 'false'
@@ -453,36 +429,60 @@ jobs:
backend: "llama-cpp" backend: "llama-cpp"
dockerfile: "./backend/Dockerfile.llama-cpp" dockerfile: "./backend/Dockerfile.llama-cpp"
context: "./" context: "./"
- build-type: 'intel' - build-type: 'sycl_f32'
cuda-major-version: "" cuda-major-version: ""
cuda-minor-version: "" cuda-minor-version: ""
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-intel-vllm' tag-suffix: '-gpu-intel-sycl-f32-vllm'
runs-on: 'arc-runner-set' runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false' skip-drivers: 'false'
backend: "vllm" backend: "vllm"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
context: "./backend" context: "./backend"
- build-type: 'intel' - build-type: 'sycl_f16'
cuda-major-version: "" cuda-major-version: ""
cuda-minor-version: "" cuda-minor-version: ""
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-intel-transformers' tag-suffix: '-gpu-intel-sycl-f16-vllm'
runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false'
backend: "vllm"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'sycl_f32'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-intel-sycl-f32-transformers'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false' skip-drivers: 'false'
backend: "transformers" backend: "transformers"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
context: "./backend" context: "./backend"
- build-type: 'intel' - build-type: 'sycl_f16'
cuda-major-version: "" cuda-major-version: ""
cuda-minor-version: "" cuda-minor-version: ""
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-intel-diffusers' tag-suffix: '-gpu-intel-sycl-f16-transformers'
runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false'
backend: "transformers"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'sycl_f32'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-intel-sycl-f32-diffusers'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false' skip-drivers: 'false'
@@ -490,48 +490,96 @@ jobs:
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
context: "./backend" context: "./backend"
# SYCL additional backends # SYCL additional backends
- build-type: 'intel' - build-type: 'sycl_f32'
cuda-major-version: "" cuda-major-version: ""
cuda-minor-version: "" cuda-minor-version: ""
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-intel-kokoro' tag-suffix: '-gpu-intel-sycl-f32-kokoro'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false' skip-drivers: 'false'
backend: "kokoro" backend: "kokoro"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
context: "./backend" context: "./backend"
- build-type: 'intel' - build-type: 'sycl_f16'
cuda-major-version: "" cuda-major-version: ""
cuda-minor-version: "" cuda-minor-version: ""
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-intel-faster-whisper' tag-suffix: '-gpu-intel-sycl-f16-kokoro'
runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false'
backend: "kokoro"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'sycl_f32'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-intel-sycl-f32-faster-whisper'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false' skip-drivers: 'false'
backend: "faster-whisper" backend: "faster-whisper"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
context: "./backend" context: "./backend"
- build-type: 'intel' - build-type: 'sycl_f16'
cuda-major-version: "" cuda-major-version: ""
cuda-minor-version: "" cuda-minor-version: ""
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-intel-coqui' tag-suffix: '-gpu-intel-sycl-f16-faster-whisper'
runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false'
backend: "faster-whisper"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'sycl_f32'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-intel-sycl-f32-coqui'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false' skip-drivers: 'false'
backend: "coqui" backend: "coqui"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
context: "./backend" context: "./backend"
- build-type: 'intel' - build-type: 'sycl_f16'
cuda-major-version: "" cuda-major-version: ""
cuda-minor-version: "" cuda-minor-version: ""
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-intel-bark' tag-suffix: '-gpu-intel-sycl-f16-coqui'
runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false'
backend: "coqui"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'sycl_f32'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-intel-sycl-f32-bark'
runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false'
backend: "bark"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'sycl_f16'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-intel-sycl-f16-bark'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false' skip-drivers: 'false'
@@ -549,7 +597,7 @@ jobs:
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "piper" backend: "piper"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
# bark-cpp # bark-cpp
- build-type: '' - build-type: ''
@@ -562,7 +610,7 @@ jobs:
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "bark-cpp" backend: "bark-cpp"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: '' - build-type: ''
cuda-major-version: "" cuda-major-version: ""
@@ -611,7 +659,7 @@ jobs:
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "stablediffusion-ggml" backend: "stablediffusion-ggml"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: 'cublas' - build-type: 'cublas'
cuda-major-version: "12" cuda-major-version: "12"
@@ -623,7 +671,7 @@ jobs:
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "stablediffusion-ggml" backend: "stablediffusion-ggml"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: 'cublas' - build-type: 'cublas'
cuda-major-version: "11" cuda-major-version: "11"
@@ -635,7 +683,7 @@ jobs:
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "stablediffusion-ggml" backend: "stablediffusion-ggml"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: 'sycl_f32' - build-type: 'sycl_f32'
cuda-major-version: "" cuda-major-version: ""
@@ -647,7 +695,7 @@ jobs:
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false' skip-drivers: 'false'
backend: "stablediffusion-ggml" backend: "stablediffusion-ggml"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: 'sycl_f16' - build-type: 'sycl_f16'
cuda-major-version: "" cuda-major-version: ""
@@ -659,7 +707,7 @@ jobs:
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false' skip-drivers: 'false'
backend: "stablediffusion-ggml" backend: "stablediffusion-ggml"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: 'vulkan' - build-type: 'vulkan'
cuda-major-version: "" cuda-major-version: ""
@@ -671,7 +719,7 @@ jobs:
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "stablediffusion-ggml" backend: "stablediffusion-ggml"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: 'cublas' - build-type: 'cublas'
cuda-major-version: "12" cuda-major-version: "12"
@@ -683,7 +731,7 @@ jobs:
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0" base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
runs-on: 'ubuntu-24.04-arm' runs-on: 'ubuntu-24.04-arm'
backend: "stablediffusion-ggml" backend: "stablediffusion-ggml"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
# whisper # whisper
- build-type: '' - build-type: ''
@@ -696,7 +744,7 @@ jobs:
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "whisper" backend: "whisper"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: 'cublas' - build-type: 'cublas'
cuda-major-version: "12" cuda-major-version: "12"
@@ -708,7 +756,7 @@ jobs:
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "whisper" backend: "whisper"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: 'cublas' - build-type: 'cublas'
cuda-major-version: "11" cuda-major-version: "11"
@@ -720,7 +768,7 @@ jobs:
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "whisper" backend: "whisper"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: 'sycl_f32' - build-type: 'sycl_f32'
cuda-major-version: "" cuda-major-version: ""
@@ -732,7 +780,7 @@ jobs:
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false' skip-drivers: 'false'
backend: "whisper" backend: "whisper"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: 'sycl_f16' - build-type: 'sycl_f16'
cuda-major-version: "" cuda-major-version: ""
@@ -744,7 +792,7 @@ jobs:
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false' skip-drivers: 'false'
backend: "whisper" backend: "whisper"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: 'vulkan' - build-type: 'vulkan'
cuda-major-version: "" cuda-major-version: ""
@@ -756,7 +804,7 @@ jobs:
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "whisper" backend: "whisper"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: 'cublas' - build-type: 'cublas'
cuda-major-version: "12" cuda-major-version: "12"
@@ -768,19 +816,19 @@ jobs:
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0" base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
runs-on: 'ubuntu-24.04-arm' runs-on: 'ubuntu-24.04-arm'
backend: "whisper" backend: "whisper"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: 'hipblas' - build-type: 'hipblas'
cuda-major-version: "" cuda-major-version: ""
cuda-minor-version: "" cuda-minor-version: ""
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-whisper' tag-suffix: '-gpu-hipblas-whisper'
base-image: "rocm/dev-ubuntu-22.04:6.4.3" base-image: "rocm/dev-ubuntu-22.04:6.1"
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
skip-drivers: 'false' skip-drivers: 'false'
backend: "whisper" backend: "whisper"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
#silero-vad #silero-vad
- build-type: '' - build-type: ''
@@ -793,7 +841,7 @@ jobs:
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "silero-vad" backend: "silero-vad"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
# local-store # local-store
- build-type: '' - build-type: ''
@@ -806,7 +854,7 @@ jobs:
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "local-store" backend: "local-store"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
# huggingface # huggingface
- build-type: '' - build-type: ''
@@ -819,197 +867,8 @@ jobs:
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "huggingface" backend: "huggingface"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
# rfdetr
- build-type: ''
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64,linux/arm64'
tag-latest: 'auto'
tag-suffix: '-cpu-rfdetr'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
backend: "rfdetr"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-rfdetr'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
backend: "rfdetr"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'cublas'
cuda-major-version: "11"
cuda-minor-version: "7"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-11-rfdetr'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
backend: "rfdetr"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'intel'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-intel-rfdetr'
runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false'
backend: "rfdetr"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/arm64'
skip-drivers: 'true'
tag-latest: 'auto'
tag-suffix: '-nvidia-l4t-arm64-rfdetr'
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
runs-on: 'ubuntu-24.04-arm'
backend: "rfdetr"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
# exllama2
- build-type: ''
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-cpu-exllama2'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
backend: "exllama2"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-exllama2'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
backend: "exllama2"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'cublas'
cuda-major-version: "11"
cuda-minor-version: "7"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-11-exllama2'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
backend: "exllama2"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'intel'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-intel-exllama2'
runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false'
backend: "exllama2"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'hipblas'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
skip-drivers: 'true'
tag-latest: 'auto'
tag-suffix: '-gpu-hipblas-exllama2'
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
runs-on: 'ubuntu-latest'
backend: "exllama2"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
# runs out of space on the runner
# - build-type: 'hipblas'
# cuda-major-version: ""
# cuda-minor-version: ""
# platforms: 'linux/amd64'
# tag-latest: 'auto'
# tag-suffix: '-gpu-hipblas-rfdetr'
# base-image: "rocm/dev-ubuntu-22.04:6.4.3"
# runs-on: 'ubuntu-latest'
# skip-drivers: 'false'
# backend: "rfdetr"
# dockerfile: "./backend/Dockerfile.python"
# context: "./backend"
# kitten-tts
- build-type: ''
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64,linux/arm64'
tag-latest: 'auto'
tag-suffix: '-kitten-tts'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
backend: "kitten-tts"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
backend-jobs-darwin:
uses: ./.github/workflows/backend_build_darwin.yml
strategy:
matrix:
include:
- backend: "diffusers"
tag-suffix: "-metal-darwin-arm64-diffusers"
build-type: "mps"
- backend: "mlx"
tag-suffix: "-metal-darwin-arm64-mlx"
build-type: "mps"
- backend: "chatterbox"
tag-suffix: "-metal-darwin-arm64-chatterbox"
build-type: "mps"
- backend: "mlx-vlm"
tag-suffix: "-metal-darwin-arm64-mlx-vlm"
build-type: "mps"
- backend: "mlx-audio"
tag-suffix: "-metal-darwin-arm64-mlx-audio"
build-type: "mps"
- backend: "stablediffusion-ggml"
tag-suffix: "-metal-darwin-arm64-stablediffusion-ggml"
build-type: "metal"
lang: "go"
- backend: "whisper"
tag-suffix: "-metal-darwin-arm64-whisper"
build-type: "metal"
lang: "go"
with:
backend: ${{ matrix.backend }}
build-type: ${{ matrix.build-type }}
go-version: "1.24.x"
tag-suffix: ${{ matrix.tag-suffix }}
lang: ${{ matrix.lang || 'python' }}
use-pip: ${{ matrix.backend == 'diffusers' }}
runs-on: "macOS-14"
secrets:
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
llama-cpp-darwin: llama-cpp-darwin:
runs-on: macOS-14 runs-on: macOS-14
strategy: strategy:
@@ -1017,7 +876,7 @@ jobs:
go-version: ['1.21.x'] go-version: ['1.21.x']
steps: steps:
- name: Clone - name: Clone
uses: actions/checkout@v5 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Setup Go ${{ matrix.go-version }} - name: Setup Go ${{ matrix.go-version }}
@@ -1034,19 +893,21 @@ jobs:
- name: Build llama-cpp-darwin - name: Build llama-cpp-darwin
run: | run: |
make protogen-go make protogen-go
make backends/llama-cpp-darwin make build
bash scripts/build-llama-cpp-darwin.sh
ls -la build/darwin.tar
mv build/darwin.tar build/llama-cpp.tar
- name: Upload llama-cpp.tar - name: Upload llama-cpp.tar
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
with: with:
name: llama-cpp-tar name: llama-cpp-tar
path: backend-images/llama-cpp.tar path: build/llama-cpp.tar
llama-cpp-darwin-publish: llama-cpp-darwin-publish:
needs: llama-cpp-darwin needs: llama-cpp-darwin
if: github.event_name != 'pull_request'
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Download llama-cpp.tar - name: Download llama-cpp.tar
uses: actions/download-artifact@v5 uses: actions/download-artifact@v4
with: with:
name: llama-cpp-tar name: llama-cpp-tar
path: . path: .
@@ -1103,7 +964,7 @@ jobs:
go-version: ['1.21.x'] go-version: ['1.21.x']
steps: steps:
- name: Clone - name: Clone
uses: actions/checkout@v5 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Setup Go ${{ matrix.go-version }} - name: Setup Go ${{ matrix.go-version }}
@@ -1122,19 +983,20 @@ jobs:
make protogen-go make protogen-go
make build make build
export PLATFORMARCH=darwin/amd64 export PLATFORMARCH=darwin/amd64
make backends/llama-cpp-darwin bash scripts/build-llama-cpp-darwin.sh
ls -la build/darwin.tar
mv build/darwin.tar build/llama-cpp.tar
- name: Upload llama-cpp.tar - name: Upload llama-cpp.tar
uses: actions/upload-artifact@v4 uses: actions/upload-artifact@v4
with: with:
name: llama-cpp-tar-x86 name: llama-cpp-tar-x86
path: backend-images/llama-cpp.tar path: build/llama-cpp.tar
llama-cpp-darwin-x86-publish: llama-cpp-darwin-x86-publish:
if: github.event_name != 'pull_request'
needs: llama-cpp-darwin-x86 needs: llama-cpp-darwin-x86
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Download llama-cpp.tar - name: Download llama-cpp.tar
uses: actions/download-artifact@v5 uses: actions/download-artifact@v4
with: with:
name: llama-cpp-tar-x86 name: llama-cpp-tar-x86
path: . path: .
@@ -1183,4 +1045,4 @@ jobs:
run: | run: |
for tag in $(echo "${{ steps.quaymeta.outputs.tags }}" | tr ',' '\n'); do for tag in $(echo "${{ steps.quaymeta.outputs.tags }}" | tr ',' '\n'); do
crane push llama-cpp.tar $tag crane push llama-cpp.tar $tag
done done

View File

@@ -55,9 +55,9 @@ on:
type: string type: string
secrets: secrets:
dockerUsername: dockerUsername:
required: false required: true
dockerPassword: dockerPassword:
required: false required: true
quayUsername: quayUsername:
required: true required: true
quayPassword: quayPassword:
@@ -66,8 +66,6 @@ on:
jobs: jobs:
backend-build: backend-build:
runs-on: ${{ inputs.runs-on }} runs-on: ${{ inputs.runs-on }}
env:
quay_username: ${{ secrets.quayUsername }}
steps: steps:
@@ -97,7 +95,7 @@ jobs:
&& sudo apt-get install -y git && sudo apt-get install -y git
- name: Checkout - name: Checkout
uses: actions/checkout@v5 uses: actions/checkout@v4
- name: Release space from worker - name: Release space from worker
if: inputs.runs-on == 'ubuntu-latest' if: inputs.runs-on == 'ubuntu-latest'
@@ -189,7 +187,7 @@ jobs:
password: ${{ secrets.dockerPassword }} password: ${{ secrets.dockerPassword }}
- name: Login to Quay.io - name: Login to Quay.io
if: ${{ env.quay_username != '' }} # if: github.event_name != 'pull_request'
uses: docker/login-action@v3 uses: docker/login-action@v3
with: with:
registry: quay.io registry: quay.io
@@ -232,7 +230,7 @@ jobs:
file: ${{ inputs.dockerfile }} file: ${{ inputs.dockerfile }}
cache-from: type=gha cache-from: type=gha
platforms: ${{ inputs.platforms }} platforms: ${{ inputs.platforms }}
push: ${{ env.quay_username != '' }} push: true
tags: ${{ steps.meta_pull_request.outputs.tags }} tags: ${{ steps.meta_pull_request.outputs.tags }}
labels: ${{ steps.meta_pull_request.outputs.labels }} labels: ${{ steps.meta_pull_request.outputs.labels }}
@@ -240,4 +238,4 @@ jobs:
- name: job summary - name: job summary
run: | run: |
echo "Built image: ${{ steps.meta.outputs.labels }}" >> $GITHUB_STEP_SUMMARY echo "Built image: ${{ steps.meta.outputs.labels }}" >> $GITHUB_STEP_SUMMARY

View File

@@ -1,144 +0,0 @@
---
name: 'build darwin python backend container images (reusable)'
on:
workflow_call:
inputs:
backend:
description: 'Backend to build'
required: true
type: string
build-type:
description: 'Build type (e.g., mps)'
default: ''
type: string
use-pip:
description: 'Use pip to install dependencies'
default: false
type: boolean
lang:
description: 'Programming language (e.g. go)'
default: 'python'
type: string
go-version:
description: 'Go version to use'
default: '1.24.x'
type: string
tag-suffix:
description: 'Tag suffix for the built image'
required: true
type: string
runs-on:
description: 'Runner to use'
default: 'macOS-14'
type: string
secrets:
dockerUsername:
required: false
dockerPassword:
required: false
quayUsername:
required: true
quayPassword:
required: true
jobs:
darwin-backend-build:
runs-on: ${{ inputs.runs-on }}
strategy:
matrix:
go-version: ['${{ inputs.go-version }}']
steps:
- name: Clone
uses: actions/checkout@v5
with:
submodules: true
- name: Setup Go ${{ matrix.go-version }}
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
cache: false
# You can test your matrix by printing the current Go version
- name: Display Go version
run: go version
- name: Dependencies
run: |
brew install protobuf grpc make protoc-gen-go protoc-gen-go-grpc libomp llvm
- name: Build ${{ inputs.backend }}-darwin
run: |
make protogen-go
BACKEND=${{ inputs.backend }} BUILD_TYPE=${{ inputs.build-type }} USE_PIP=${{ inputs.use-pip }} make build-darwin-${{ inputs.lang }}-backend
- name: Upload ${{ inputs.backend }}.tar
uses: actions/upload-artifact@v4
with:
name: ${{ inputs.backend }}-tar
path: backend-images/${{ inputs.backend }}.tar
darwin-backend-publish:
needs: darwin-backend-build
if: github.event_name != 'pull_request'
runs-on: ubuntu-latest
steps:
- name: Download ${{ inputs.backend }}.tar
uses: actions/download-artifact@v5
with:
name: ${{ inputs.backend }}-tar
path: .
- name: Install crane
run: |
curl -L https://github.com/google/go-containerregistry/releases/latest/download/go-containerregistry_Linux_x86_64.tar.gz | tar -xz
sudo mv crane /usr/local/bin/
- name: Log in to DockerHub
run: |
echo "${{ secrets.dockerPassword }}" | crane auth login docker.io -u "${{ secrets.dockerUsername }}" --password-stdin
- name: Log in to quay.io
run: |
echo "${{ secrets.quayPassword }}" | crane auth login quay.io -u "${{ secrets.quayUsername }}" --password-stdin
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: |
localai/localai-backends
tags: |
type=ref,event=branch
type=semver,pattern={{raw}}
type=sha
flavor: |
latest=auto
suffix=${{ inputs.tag-suffix }},onlatest=true
- name: Docker meta
id: quaymeta
uses: docker/metadata-action@v5
with:
images: |
quay.io/go-skynet/local-ai-backends
tags: |
type=ref,event=branch
type=semver,pattern={{raw}}
type=sha
flavor: |
latest=auto
suffix=${{ inputs.tag-suffix }},onlatest=true
- name: Push Docker image (DockerHub)
run: |
for tag in $(echo "${{ steps.meta.outputs.tags }}" | tr ',' '\n'); do
crane push ${{ inputs.backend }}.tar $tag
done
- name: Push Docker image (Quay)
run: |
for tag in $(echo "${{ steps.quaymeta.outputs.tags }}" | tr ',' '\n'); do
crane push ${{ inputs.backend }}.tar $tag
done

View File

@@ -1,78 +0,0 @@
name: 'build backend container images (PR-filtered)'
on:
pull_request:
concurrency:
group: ci-backends-pr-${{ github.head_ref || github.ref }}-${{ github.repository }}
cancel-in-progress: true
jobs:
generate-matrix:
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
matrix-darwin: ${{ steps.set-matrix.outputs.matrix-darwin }}
has-backends: ${{ steps.set-matrix.outputs.has-backends }}
has-backends-darwin: ${{ steps.set-matrix.outputs.has-backends-darwin }}
steps:
- name: Checkout repository
uses: actions/checkout@v5
- name: Setup Bun
uses: oven-sh/setup-bun@v2
- name: Install dependencies
run: |
bun add js-yaml
bun add @octokit/core
# filters the matrix in backend.yml
- name: Filter matrix for changed backends
id: set-matrix
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GITHUB_EVENT_PATH: ${{ github.event_path }}
run: bun run scripts/changed-backends.js
backend-jobs:
needs: generate-matrix
uses: ./.github/workflows/backend_build.yml
if: needs.generate-matrix.outputs.has-backends == 'true'
with:
tag-latest: ${{ matrix.tag-latest }}
tag-suffix: ${{ matrix.tag-suffix }}
build-type: ${{ matrix.build-type }}
cuda-major-version: ${{ matrix.cuda-major-version }}
cuda-minor-version: ${{ matrix.cuda-minor-version }}
platforms: ${{ matrix.platforms }}
runs-on: ${{ matrix.runs-on }}
base-image: ${{ matrix.base-image }}
backend: ${{ matrix.backend }}
dockerfile: ${{ matrix.dockerfile }}
skip-drivers: ${{ matrix.skip-drivers }}
context: ${{ matrix.context }}
secrets:
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
strategy:
fail-fast: true
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
backend-jobs-darwin:
needs: generate-matrix
uses: ./.github/workflows/backend_build_darwin.yml
if: needs.generate-matrix.outputs.has-backends-darwin == 'true'
with:
backend: ${{ matrix.backend }}
build-type: ${{ matrix.build-type }}
go-version: "1.24.x"
tag-suffix: ${{ matrix.tag-suffix }}
lang: ${{ matrix.lang || 'python' }}
use-pip: ${{ matrix.backend == 'diffusers' }}
runs-on: "macOS-14"
secrets:
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
strategy:
fail-fast: true
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix-darwin) }}

View File

@@ -11,7 +11,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v5 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Set up Go - name: Set up Go
@@ -21,47 +21,3 @@ jobs:
- name: Run GoReleaser - name: Run GoReleaser
run: | run: |
make dev-dist make dev-dist
launcher-build-darwin:
runs-on: macos-latest
steps:
- name: Checkout
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.23
- name: Build launcher for macOS ARM64
run: |
make build-launcher-darwin
ls -liah dist
- name: Upload macOS launcher artifacts
uses: actions/upload-artifact@v4
with:
name: launcher-macos
path: dist/
retention-days: 30
launcher-build-linux:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.23
- name: Build launcher for Linux
run: |
sudo apt-get update
sudo apt-get install golang gcc libgl1-mesa-dev xorg-dev libxkbcommon-dev
make build-launcher-linux
- name: Upload Linux launcher artifacts
uses: actions/upload-artifact@v4
with:
name: launcher-linux
path: local-ai-launcher-linux.tar.xz
retention-days: 30

View File

@@ -21,7 +21,7 @@ jobs:
variable: "BARKCPP_VERSION" variable: "BARKCPP_VERSION"
branch: "main" branch: "main"
file: "Makefile" file: "Makefile"
- repository: "leejet/stable-diffusion.cpp" - repository: "richiejp/stable-diffusion.cpp"
variable: "STABLEDIFFUSION_GGML_VERSION" variable: "STABLEDIFFUSION_GGML_VERSION"
branch: "master" branch: "master"
file: "backend/go/stablediffusion-ggml/Makefile" file: "backend/go/stablediffusion-ggml/Makefile"
@@ -31,7 +31,7 @@ jobs:
file: "backend/go/piper/Makefile" file: "backend/go/piper/Makefile"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- name: Bump dependencies 🔧 - name: Bump dependencies 🔧
id: bump id: bump
run: | run: |

View File

@@ -12,7 +12,7 @@ jobs:
- repository: "mudler/LocalAI" - repository: "mudler/LocalAI"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- name: Bump dependencies 🔧 - name: Bump dependencies 🔧
run: | run: |
bash .github/bump_docs.sh ${{ matrix.repository }} bash .github/bump_docs.sh ${{ matrix.repository }}

View File

@@ -15,7 +15,7 @@ jobs:
&& sudo add-apt-repository -y ppa:git-core/ppa \ && sudo add-apt-repository -y ppa:git-core/ppa \
&& sudo apt-get update \ && sudo apt-get update \
&& sudo apt-get install -y git && sudo apt-get install -y git
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- name: Install dependencies - name: Install dependencies
run: | run: |
sudo apt-get update sudo apt-get update

View File

@@ -20,7 +20,7 @@ jobs:
skip-commit-verification: true skip-commit-verification: true
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v5 uses: actions/checkout@v4
- name: Approve a PR if not already approved - name: Approve a PR if not already approved
run: | run: |

View File

@@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Clone - name: Clone
uses: actions/checkout@v5 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- uses: actions/setup-go@v5 - uses: actions/setup-go@v5

View File

@@ -73,7 +73,7 @@ jobs:
uses: docker/setup-buildx-action@master uses: docker/setup-buildx-action@master
- name: Checkout - name: Checkout
uses: actions/checkout@v5 uses: actions/checkout@v4
- name: Cache GRPC - name: Cache GRPC
uses: docker/build-push-action@v6 uses: docker/build-push-action@v6

View File

@@ -43,7 +43,7 @@ jobs:
uses: docker/setup-buildx-action@master uses: docker/setup-buildx-action@master
- name: Checkout - name: Checkout
uses: actions/checkout@v5 uses: actions/checkout@v4
- name: Cache Intel images - name: Cache Intel images
uses: docker/build-push-action@v6 uses: docker/build-push-action@v6

View File

@@ -39,7 +39,7 @@ jobs:
cuda-minor-version: "0" cuda-minor-version: "0"
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'false' tag-latest: 'false'
tag-suffix: '-gpu-nvidia-cuda-12' tag-suffix: '-gpu-nvidia-cuda12'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
makeflags: "--jobs=3 --output-sync=target" makeflags: "--jobs=3 --output-sync=target"
@@ -47,16 +47,16 @@ jobs:
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'false' tag-latest: 'false'
tag-suffix: '-hipblas' tag-suffix: '-hipblas'
base-image: "rocm/dev-ubuntu-22.04:6.4.3" base-image: "rocm/dev-ubuntu-22.04:6.1"
grpc-base-image: "ubuntu:22.04" grpc-base-image: "ubuntu:22.04"
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
makeflags: "--jobs=3 --output-sync=target" makeflags: "--jobs=3 --output-sync=target"
- build-type: 'sycl' - build-type: 'sycl_f16'
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'false' tag-latest: 'false'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
grpc-base-image: "ubuntu:22.04" grpc-base-image: "ubuntu:22.04"
tag-suffix: 'sycl' tag-suffix: 'sycl-f16'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
makeflags: "--jobs=3 --output-sync=target" makeflags: "--jobs=3 --output-sync=target"
- build-type: 'vulkan' - build-type: 'vulkan'

View File

@@ -39,7 +39,7 @@ jobs:
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-hipblas' tag-suffix: '-gpu-hipblas'
base-image: "rocm/dev-ubuntu-22.04:6.4.3" base-image: "rocm/dev-ubuntu-22.04:6.1"
grpc-base-image: "ubuntu:22.04" grpc-base-image: "ubuntu:22.04"
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
makeflags: "--jobs=3 --output-sync=target" makeflags: "--jobs=3 --output-sync=target"
@@ -83,7 +83,7 @@ jobs:
cuda-minor-version: "7" cuda-minor-version: "7"
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-11' tag-suffix: '-gpu-nvidia-cuda11'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
makeflags: "--jobs=4 --output-sync=target" makeflags: "--jobs=4 --output-sync=target"
@@ -94,7 +94,7 @@ jobs:
cuda-minor-version: "0" cuda-minor-version: "0"
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12' tag-suffix: '-gpu-nvidia-cuda12'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
@@ -103,21 +103,30 @@ jobs:
- build-type: 'vulkan' - build-type: 'vulkan'
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-vulkan' tag-suffix: '-vulkan'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
makeflags: "--jobs=4 --output-sync=target" makeflags: "--jobs=4 --output-sync=target"
aio: "-aio-gpu-vulkan" aio: "-aio-gpu-vulkan"
- build-type: 'intel' - build-type: 'sycl_f16'
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
grpc-base-image: "ubuntu:22.04" grpc-base-image: "ubuntu:22.04"
tag-suffix: '-gpu-intel' tag-suffix: '-gpu-intel-f16'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
makeflags: "--jobs=3 --output-sync=target" makeflags: "--jobs=3 --output-sync=target"
aio: "-aio-gpu-intel" aio: "-aio-gpu-intel-f16"
- build-type: 'sycl_f32'
platforms: 'linux/amd64'
tag-latest: 'auto'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
grpc-base-image: "ubuntu:22.04"
tag-suffix: '-gpu-intel-f32'
runs-on: 'ubuntu-latest'
makeflags: "--jobs=3 --output-sync=target"
aio: "-aio-gpu-intel-f32"
gh-runner: gh-runner:
uses: ./.github/workflows/image_build.yml uses: ./.github/workflows/image_build.yml

View File

@@ -94,7 +94,7 @@ jobs:
&& sudo apt-get update \ && sudo apt-get update \
&& sudo apt-get install -y git && sudo apt-get install -y git
- name: Checkout - name: Checkout
uses: actions/checkout@v5 uses: actions/checkout@v4
- name: Release space from worker - name: Release space from worker
if: inputs.runs-on == 'ubuntu-latest' if: inputs.runs-on == 'ubuntu-latest'

View File

@@ -9,4 +9,4 @@ jobs:
pull-requests: write pull-requests: write
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/labeler@v6 - uses: actions/labeler@v5

View File

@@ -6,15 +6,14 @@ permissions:
contents: write contents: write
pull-requests: write pull-requests: write
packages: read packages: read
issues: write # for Homebrew/actions/post-comment
actions: write # to dispatch publish workflow
jobs: jobs:
dependabot: dependabot:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{ github.actor == 'localai-bot' }} if: ${{ github.actor == 'localai-bot' }}
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v5 uses: actions/checkout@v4
- name: Approve a PR if not already approved - name: Approve a PR if not already approved
run: | run: |

View File

@@ -11,7 +11,7 @@ jobs:
MODEL_NAME: gemma-3-12b-it MODEL_NAME: gemma-3-12b-it
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 # needed to checkout all branches for this Action to work fetch-depth: 0 # needed to checkout all branches for this Action to work
- uses: mudler/localai-github-action@v1 - uses: mudler/localai-github-action@v1
@@ -90,7 +90,7 @@ jobs:
MODEL_NAME: gemma-3-12b-it MODEL_NAME: gemma-3-12b-it
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 # needed to checkout all branches for this Action to work fetch-depth: 0 # needed to checkout all branches for this Action to work
- name: Start LocalAI - name: Start LocalAI

View File

@@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v5 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Set up Go - name: Set up Go
@@ -23,42 +23,4 @@ jobs:
version: v2.11.0 version: v2.11.0
args: release --clean args: release --clean
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
launcher-build-darwin:
runs-on: macos-latest
steps:
- name: Checkout
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.23
- name: Build launcher for macOS ARM64
run: |
make build-launcher-darwin
- name: Upload DMG to Release
uses: softprops/action-gh-release@v2
with:
files: ./dist/LocalAI.dmg
launcher-build-linux:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.23
- name: Build launcher for Linux
run: |
sudo apt-get update
sudo apt-get install golang gcc libgl1-mesa-dev xorg-dev libxkbcommon-dev
make build-launcher-linux
- name: Upload Linux launcher artifacts
uses: softprops/action-gh-release@v2
with:
files: ./local-ai-launcher-linux.tar.xz

View File

@@ -14,11 +14,11 @@ jobs:
GO111MODULE: on GO111MODULE: on
steps: steps:
- name: Checkout Source - name: Checkout Source
uses: actions/checkout@v5 uses: actions/checkout@v4
if: ${{ github.actor != 'dependabot[bot]' }} if: ${{ github.actor != 'dependabot[bot]' }}
- name: Run Gosec Security Scanner - name: Run Gosec Security Scanner
if: ${{ github.actor != 'dependabot[bot]' }} if: ${{ github.actor != 'dependabot[bot]' }}
uses: securego/gosec@v2.22.8 uses: securego/gosec@v2.22.7
with: with:
# we let the report trigger content trigger a failure using the GitHub Security features. # we let the report trigger content trigger a failure using the GitHub Security features.
args: '-no-fail -fmt sarif -out results.sarif ./...' args: '-no-fail -fmt sarif -out results.sarif ./...'

View File

@@ -10,7 +10,7 @@ jobs:
stale: stale:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/stale@3a9db7e6a41a89f618792c92c0e97cc736e1b13f # v9 - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9
with: with:
stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 5 days.' stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
stale-pr-message: 'This PR is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 10 days.' stale-pr-message: 'This PR is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 10 days.'

View File

@@ -19,7 +19,7 @@ jobs:
# runs-on: ubuntu-latest # runs-on: ubuntu-latest
# steps: # steps:
# - name: Clone # - name: Clone
# uses: actions/checkout@v5 # uses: actions/checkout@v4
# with: # with:
# submodules: true # submodules: true
# - name: Dependencies # - name: Dependencies
@@ -40,7 +40,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Clone - name: Clone
uses: actions/checkout@v5 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Dependencies - name: Dependencies
@@ -61,7 +61,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Clone - name: Clone
uses: actions/checkout@v5 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Dependencies - name: Dependencies
@@ -83,7 +83,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Clone - name: Clone
uses: actions/checkout@v5 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Dependencies - name: Dependencies
@@ -104,7 +104,7 @@ jobs:
# runs-on: ubuntu-latest # runs-on: ubuntu-latest
# steps: # steps:
# - name: Clone # - name: Clone
# uses: actions/checkout@v5 # uses: actions/checkout@v4
# with: # with:
# submodules: true # submodules: true
# - name: Dependencies # - name: Dependencies
@@ -124,7 +124,7 @@ jobs:
# runs-on: ubuntu-latest # runs-on: ubuntu-latest
# steps: # steps:
# - name: Clone # - name: Clone
# uses: actions/checkout@v5 # uses: actions/checkout@v4
# with: # with:
# submodules: true # submodules: true
# - name: Dependencies # - name: Dependencies
@@ -186,7 +186,7 @@ jobs:
# sudo rm -rf "$AGENT_TOOLSDIRECTORY" || true # sudo rm -rf "$AGENT_TOOLSDIRECTORY" || true
# df -h # df -h
# - name: Clone # - name: Clone
# uses: actions/checkout@v5 # uses: actions/checkout@v4
# with: # with:
# submodules: true # submodules: true
# - name: Dependencies # - name: Dependencies
@@ -211,7 +211,7 @@ jobs:
# runs-on: ubuntu-latest # runs-on: ubuntu-latest
# steps: # steps:
# - name: Clone # - name: Clone
# uses: actions/checkout@v5 # uses: actions/checkout@v4
# with: # with:
# submodules: true # submodules: true
# - name: Dependencies # - name: Dependencies
@@ -232,7 +232,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Clone - name: Clone
uses: actions/checkout@v5 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Dependencies - name: Dependencies

View File

@@ -23,20 +23,6 @@ jobs:
matrix: matrix:
go-version: ['1.21.x'] go-version: ['1.21.x']
steps: steps:
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
# this might remove tools that are actually needed,
# if set to "true" but frees about 6 GB
tool-cache: true
# all of these default to true, but feel free to set to
# "false" if necessary for your workflow
android: true
dotnet: true
haskell: true
large-packages: true
docker-images: true
swap-storage: true
- name: Release space from worker - name: Release space from worker
run: | run: |
echo "Listing top largest packages" echo "Listing top largest packages"
@@ -70,7 +56,7 @@ jobs:
sudo rm -rfv build || true sudo rm -rfv build || true
df -h df -h
- name: Clone - name: Clone
uses: actions/checkout@v5 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Setup Go ${{ matrix.go-version }} - name: Setup Go ${{ matrix.go-version }}
@@ -166,7 +152,7 @@ jobs:
sudo rm -rfv build || true sudo rm -rfv build || true
df -h df -h
- name: Clone - name: Clone
uses: actions/checkout@v5 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Dependencies - name: Dependencies
@@ -196,7 +182,7 @@ jobs:
go-version: ['1.21.x'] go-version: ['1.21.x']
steps: steps:
- name: Clone - name: Clone
uses: actions/checkout@v5 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Setup Go ${{ matrix.go-version }} - name: Setup Go ${{ matrix.go-version }}
@@ -214,7 +200,11 @@ jobs:
- name: Build llama-cpp-darwin - name: Build llama-cpp-darwin
run: | run: |
make protogen-go make protogen-go
make backends/llama-cpp-darwin make build
bash scripts/build-llama-cpp-darwin.sh
ls -la build/darwin.tar
mv build/darwin.tar build/llama-cpp.tar
./local-ai backends install "ocifile://$PWD/build/llama-cpp.tar"
- name: Test - name: Test
run: | run: |
export C_INCLUDE_PATH=/usr/local/include export C_INCLUDE_PATH=/usr/local/include

View File

@@ -9,7 +9,7 @@ jobs:
fail-fast: false fail-fast: false
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- uses: actions/setup-go@v5 - uses: actions/setup-go@v5
with: with:
go-version: 'stable' go-version: 'stable'

3
.gitignore vendored
View File

@@ -12,7 +12,6 @@ prepare-sources
/backends /backends
/backend-images /backend-images
/result.yaml /result.yaml
protoc
*.log *.log
@@ -24,7 +23,7 @@ go-bert
# LocalAI build binary # LocalAI build binary
LocalAI LocalAI
/local-ai local-ai
# prevent above rules from omitting the helm chart # prevent above rules from omitting the helm chart
!charts/* !charts/*
# prevent above rules from omitting the api/localai folder # prevent above rules from omitting the api/localai folder

View File

@@ -8,7 +8,7 @@ source:
enabled: true enabled: true
name_template: '{{ .ProjectName }}-{{ .Tag }}-source' name_template: '{{ .ProjectName }}-{{ .Tag }}-source'
builds: builds:
- main: ./cmd/local-ai -
env: env:
- CGO_ENABLED=0 - CGO_ENABLED=0
ldflags: ldflags:

View File

@@ -9,7 +9,7 @@ ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y --no-install-recommends \ apt-get install -y --no-install-recommends \
ca-certificates curl wget espeak-ng libgomp1 \ ca-certificates curl wget espeak-ng libgomp1 \
ffmpeg libopenblas-base libopenblas-dev && \ python3 python-is-python3 ffmpeg && \
apt-get clean && \ apt-get clean && \
rm -rf /var/lib/apt/lists/* rm -rf /var/lib/apt/lists/*
@@ -72,12 +72,6 @@ RUN <<EOT bash
fi fi
EOT EOT
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${TARGETARCH}" = "arm64" ]; then
echo "nvidia-l4t" > /run/localai/capability
fi
EOT
# If we are building with clblas support, we need the libraries for the builds # If we are building with clblas support, we need the libraries for the builds
RUN if [ "${BUILD_TYPE}" = "clblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \ RUN if [ "${BUILD_TYPE}" = "clblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
apt-get update && \ apt-get update && \
@@ -100,12 +94,6 @@ RUN if [ "${BUILD_TYPE}" = "hipblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
ldconfig \ ldconfig \
; fi ; fi
RUN if [ "${BUILD_TYPE}" = "hipblas" ]; then \
ln -s /opt/rocm-**/lib/llvm/lib/libomp.so /usr/lib/libomp.so \
; fi
RUN expr "${BUILD_TYPE}" = intel && echo "intel" > /run/localai/capability || echo "not intel"
# Cuda # Cuda
ENV PATH=/usr/local/cuda/bin:${PATH} ENV PATH=/usr/local/cuda/bin:${PATH}

285
Makefile
View File

@@ -2,10 +2,11 @@ GOCMD=go
GOTEST=$(GOCMD) test GOTEST=$(GOCMD) test
GOVET=$(GOCMD) vet GOVET=$(GOCMD) vet
BINARY_NAME=local-ai BINARY_NAME=local-ai
LAUNCHER_BINARY_NAME=local-ai-launcher
GORELEASER?= GORELEASER?=
ONEAPI_VERSION?=2025.2
export BUILD_TYPE?= export BUILD_TYPE?=
GO_TAGS?= GO_TAGS?=
@@ -91,17 +92,7 @@ build: protogen-go install-go-tools ## Build the project
$(info ${GREEN}I LD_FLAGS: ${YELLOW}$(LD_FLAGS)${RESET}) $(info ${GREEN}I LD_FLAGS: ${YELLOW}$(LD_FLAGS)${RESET})
$(info ${GREEN}I UPX: ${YELLOW}$(UPX)${RESET}) $(info ${GREEN}I UPX: ${YELLOW}$(UPX)${RESET})
rm -rf $(BINARY_NAME) || true rm -rf $(BINARY_NAME) || true
CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o $(BINARY_NAME) ./cmd/local-ai CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o $(BINARY_NAME) ./
build-launcher: ## Build the launcher application
$(info ${GREEN}I local-ai launcher build info:${RESET})
$(info ${GREEN}I BUILD_TYPE: ${YELLOW}$(BUILD_TYPE)${RESET})
$(info ${GREEN}I GO_TAGS: ${YELLOW}$(GO_TAGS)${RESET})
$(info ${GREEN}I LD_FLAGS: ${YELLOW}$(LD_FLAGS)${RESET})
rm -rf $(LAUNCHER_BINARY_NAME) || true
CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o $(LAUNCHER_BINARY_NAME) ./cmd/launcher
build-all: build build-launcher ## Build both server and launcher
dev-dist: dev-dist:
$(GORELEASER) build --snapshot --clean $(GORELEASER) build --snapshot --clean
@@ -117,8 +108,8 @@ run: ## run local-ai
CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) run ./ CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) run ./
test-models/testmodel.ggml: test-models/testmodel.ggml:
mkdir -p test-models mkdir test-models
mkdir -p test-dir mkdir test-dir
wget -q https://huggingface.co/mradermacher/gpt2-alpaca-gpt4-GGUF/resolve/main/gpt2-alpaca-gpt4.Q4_K_M.gguf -O test-models/testmodel.ggml wget -q https://huggingface.co/mradermacher/gpt2-alpaca-gpt4-GGUF/resolve/main/gpt2-alpaca-gpt4.Q4_K_M.gguf -O test-models/testmodel.ggml
wget -q https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-base.en.bin -O test-models/whisper-en wget -q https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-base.en.bin -O test-models/whisper-en
wget -q https://huggingface.co/mudler/all-MiniLM-L6-v2/resolve/main/ggml-model-q4_0.bin -O test-models/bert wget -q https://huggingface.co/mudler/all-MiniLM-L6-v2/resolve/main/ggml-model-q4_0.bin -O test-models/bert
@@ -143,6 +134,27 @@ test: test-models/testmodel.ggml protogen-go
$(MAKE) test-tts $(MAKE) test-tts
$(MAKE) test-stablediffusion $(MAKE) test-stablediffusion
backends/llama-cpp: docker-build-llama-cpp docker-save-llama-cpp build
./local-ai backends install "ocifile://$(abspath ./backend-images/llama-cpp.tar)"
backends/piper: docker-build-piper docker-save-piper build
./local-ai backends install "ocifile://$(abspath ./backend-images/piper.tar)"
backends/stablediffusion-ggml: docker-build-stablediffusion-ggml docker-save-stablediffusion-ggml build
./local-ai backends install "ocifile://$(abspath ./backend-images/stablediffusion-ggml.tar)"
backends/whisper: docker-build-whisper docker-save-whisper build
./local-ai backends install "ocifile://$(abspath ./backend-images/whisper.tar)"
backends/silero-vad: docker-build-silero-vad docker-save-silero-vad build
./local-ai backends install "ocifile://$(abspath ./backend-images/silero-vad.tar)"
backends/local-store: docker-build-local-store docker-save-local-store build
./local-ai backends install "ocifile://$(abspath ./backend-images/local-store.tar)"
backends/huggingface: docker-build-huggingface docker-save-huggingface build
./local-ai backends install "ocifile://$(abspath ./backend-images/huggingface.tar)"
######################################################## ########################################################
## AIO tests ## AIO tests
######################################################## ########################################################
@@ -230,7 +242,10 @@ help: ## Show this help.
######################################################## ########################################################
.PHONY: protogen .PHONY: protogen
protogen: protogen-go protogen: protogen-go protogen-python
.PHONY: protogen-clean
protogen-clean: protogen-go-clean protogen-python-clean
protoc: protoc:
@OS_NAME=$$(uname -s | tr '[:upper:]' '[:lower:]'); \ @OS_NAME=$$(uname -s | tr '[:upper:]' '[:lower:]'); \
@@ -275,6 +290,93 @@ protogen-go-clean:
$(RM) pkg/grpc/proto/backend.pb.go pkg/grpc/proto/backend_grpc.pb.go $(RM) pkg/grpc/proto/backend.pb.go pkg/grpc/proto/backend_grpc.pb.go
$(RM) bin/* $(RM) bin/*
.PHONY: protogen-python
protogen-python: bark-protogen coqui-protogen chatterbox-protogen diffusers-protogen exllama2-protogen rerankers-protogen transformers-protogen kokoro-protogen vllm-protogen faster-whisper-protogen
.PHONY: protogen-python-clean
protogen-python-clean: bark-protogen-clean coqui-protogen-clean chatterbox-protogen-clean diffusers-protogen-clean exllama2-protogen-clean rerankers-protogen-clean transformers-protogen-clean kokoro-protogen-clean vllm-protogen-clean faster-whisper-protogen-clean
.PHONY: bark-protogen
bark-protogen:
$(MAKE) -C backend/python/bark protogen
.PHONY: bark-protogen-clean
bark-protogen-clean:
$(MAKE) -C backend/python/bark protogen-clean
.PHONY: coqui-protogen
coqui-protogen:
$(MAKE) -C backend/python/coqui protogen
.PHONY: coqui-protogen-clean
coqui-protogen-clean:
$(MAKE) -C backend/python/coqui protogen-clean
.PHONY: diffusers-protogen
diffusers-protogen:
$(MAKE) -C backend/python/diffusers protogen
.PHONY: chatterbox-protogen
chatterbox-protogen:
$(MAKE) -C backend/python/chatterbox protogen
.PHONY: diffusers-protogen-clean
diffusers-protogen-clean:
$(MAKE) -C backend/python/diffusers protogen-clean
.PHONY: chatterbox-protogen-clean
chatterbox-protogen-clean:
$(MAKE) -C backend/python/chatterbox protogen-clean
.PHONY: faster-whisper-protogen
faster-whisper-protogen:
$(MAKE) -C backend/python/faster-whisper protogen
.PHONY: faster-whisper-protogen-clean
faster-whisper-protogen-clean:
$(MAKE) -C backend/python/faster-whisper protogen-clean
.PHONY: exllama2-protogen
exllama2-protogen:
$(MAKE) -C backend/python/exllama2 protogen
.PHONY: exllama2-protogen-clean
exllama2-protogen-clean:
$(MAKE) -C backend/python/exllama2 protogen-clean
.PHONY: rerankers-protogen
rerankers-protogen:
$(MAKE) -C backend/python/rerankers protogen
.PHONY: rerankers-protogen-clean
rerankers-protogen-clean:
$(MAKE) -C backend/python/rerankers protogen-clean
.PHONY: transformers-protogen
transformers-protogen:
$(MAKE) -C backend/python/transformers protogen
.PHONY: transformers-protogen-clean
transformers-protogen-clean:
$(MAKE) -C backend/python/transformers protogen-clean
.PHONY: kokoro-protogen
kokoro-protogen:
$(MAKE) -C backend/python/kokoro protogen
.PHONY: kokoro-protogen-clean
kokoro-protogen-clean:
$(MAKE) -C backend/python/kokoro protogen-clean
.PHONY: vllm-protogen
vllm-protogen:
$(MAKE) -C backend/python/vllm protogen
.PHONY: vllm-protogen-clean
vllm-protogen-clean:
$(MAKE) -C backend/python/vllm protogen-clean
prepare-test-extra: protogen-python prepare-test-extra: protogen-python
$(MAKE) -C backend/python/transformers $(MAKE) -C backend/python/transformers
$(MAKE) -C backend/python/diffusers $(MAKE) -C backend/python/diffusers
@@ -310,7 +412,7 @@ docker-cuda11:
--build-arg GO_TAGS="$(GO_TAGS)" \ --build-arg GO_TAGS="$(GO_TAGS)" \
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \ --build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
--build-arg BUILD_TYPE=$(BUILD_TYPE) \ --build-arg BUILD_TYPE=$(BUILD_TYPE) \
-t $(DOCKER_IMAGE)-cuda-11 . -t $(DOCKER_IMAGE)-cuda11 .
docker-aio: docker-aio:
@echo "Building AIO image with base $(BASE_IMAGE) as $(DOCKER_AIO_IMAGE)" @echo "Building AIO image with base $(BASE_IMAGE) as $(DOCKER_AIO_IMAGE)"
@@ -325,118 +427,41 @@ docker-aio-all:
docker-image-intel: docker-image-intel:
docker build \ docker build \
--build-arg BASE_IMAGE=quay.io/go-skynet/intel-oneapi-base:latest \ --build-arg BASE_IMAGE=intel/oneapi-basekit:${ONEAPI_VERSION}.0-0-devel-ubuntu24.04 \
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \ --build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
--build-arg GO_TAGS="$(GO_TAGS)" \ --build-arg GO_TAGS="$(GO_TAGS)" \
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \ --build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
--build-arg BUILD_TYPE=intel -t $(DOCKER_IMAGE) . --build-arg BUILD_TYPE=sycl_f32 -t $(DOCKER_IMAGE) .
docker-image-intel-xpu:
docker build \
--build-arg BASE_IMAGE=intel/oneapi-basekit:${ONEAPI_VERSION}.0-0-devel-ubuntu22.04 \
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
--build-arg GO_TAGS="$(GO_TAGS)" \
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
--build-arg BUILD_TYPE=sycl_f32 -t $(DOCKER_IMAGE) .
######################################################## ########################################################
## Backends ## Backends
######################################################## ########################################################
backends/diffusers: docker-build-diffusers docker-save-diffusers build
./local-ai backends install "ocifile://$(abspath ./backend-images/diffusers.tar)"
backends/llama-cpp: docker-build-llama-cpp docker-save-llama-cpp build
./local-ai backends install "ocifile://$(abspath ./backend-images/llama-cpp.tar)"
backends/piper: docker-build-piper docker-save-piper build
./local-ai backends install "ocifile://$(abspath ./backend-images/piper.tar)"
backends/stablediffusion-ggml: docker-build-stablediffusion-ggml docker-save-stablediffusion-ggml build
./local-ai backends install "ocifile://$(abspath ./backend-images/stablediffusion-ggml.tar)"
backends/whisper: docker-build-whisper docker-save-whisper build
./local-ai backends install "ocifile://$(abspath ./backend-images/whisper.tar)"
backends/silero-vad: docker-build-silero-vad docker-save-silero-vad build
./local-ai backends install "ocifile://$(abspath ./backend-images/silero-vad.tar)"
backends/local-store: docker-build-local-store docker-save-local-store build
./local-ai backends install "ocifile://$(abspath ./backend-images/local-store.tar)"
backends/huggingface: docker-build-huggingface docker-save-huggingface build
./local-ai backends install "ocifile://$(abspath ./backend-images/huggingface.tar)"
backends/rfdetr: docker-build-rfdetr docker-save-rfdetr build
./local-ai backends install "ocifile://$(abspath ./backend-images/rfdetr.tar)"
backends/kitten-tts: docker-build-kitten-tts docker-save-kitten-tts build
./local-ai backends install "ocifile://$(abspath ./backend-images/kitten-tts.tar)"
backends/kokoro: docker-build-kokoro docker-save-kokoro build
./local-ai backends install "ocifile://$(abspath ./backend-images/kokoro.tar)"
backends/chatterbox: docker-build-chatterbox docker-save-chatterbox build
./local-ai backends install "ocifile://$(abspath ./backend-images/chatterbox.tar)"
backends/llama-cpp-darwin: build
bash ./scripts/build/llama-cpp-darwin.sh
./local-ai backends install "ocifile://$(abspath ./backend-images/llama-cpp.tar)"
build-darwin-python-backend: build
bash ./scripts/build/python-darwin.sh
build-darwin-go-backend: build
bash ./scripts/build/golang-darwin.sh
backends/mlx:
BACKEND=mlx $(MAKE) build-darwin-python-backend
./local-ai backends install "ocifile://$(abspath ./backend-images/mlx.tar)"
backends/diffuser-darwin:
BACKEND=diffusers $(MAKE) build-darwin-python-backend
./local-ai backends install "ocifile://$(abspath ./backend-images/diffusers.tar)"
backends/mlx-vlm:
BACKEND=mlx-vlm $(MAKE) build-darwin-python-backend
./local-ai backends install "ocifile://$(abspath ./backend-images/mlx-vlm.tar)"
backends/mlx-audio:
BACKEND=mlx-audio $(MAKE) build-darwin-python-backend
./local-ai backends install "ocifile://$(abspath ./backend-images/mlx-audio.tar)"
backends/stablediffusion-ggml-darwin:
BACKEND=stablediffusion-ggml BUILD_TYPE=metal $(MAKE) build-darwin-go-backend
./local-ai backends install "ocifile://$(abspath ./backend-images/stablediffusion-ggml.tar)"
backend-images: backend-images:
mkdir -p backend-images mkdir -p backend-images
docker-build-llama-cpp: docker-build-llama-cpp:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:llama-cpp -f backend/Dockerfile.llama-cpp . docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg IMAGE_BASE=$(IMAGE_BASE) -t local-ai-backend:llama-cpp -f backend/Dockerfile.llama-cpp .
docker-build-bark-cpp: docker-build-bark-cpp:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:bark-cpp -f backend/Dockerfile.golang --build-arg BACKEND=bark-cpp . docker build -t local-ai-backend:bark-cpp -f backend/Dockerfile.go --build-arg BACKEND=bark-cpp .
docker-build-piper: docker-build-piper:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:piper -f backend/Dockerfile.golang --build-arg BACKEND=piper . docker build -t local-ai-backend:piper -f backend/Dockerfile.go --build-arg BACKEND=piper .
docker-build-local-store: docker-build-local-store:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:local-store -f backend/Dockerfile.golang --build-arg BACKEND=local-store . docker build -t local-ai-backend:local-store -f backend/Dockerfile.go --build-arg BACKEND=local-store .
docker-build-huggingface: docker-build-huggingface:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:huggingface -f backend/Dockerfile.golang --build-arg BACKEND=huggingface . docker build -t local-ai-backend:huggingface -f backend/Dockerfile.go --build-arg BACKEND=huggingface .
docker-build-rfdetr:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:rfdetr -f backend/Dockerfile.python --build-arg BACKEND=rfdetr ./backend
docker-build-kitten-tts:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:kitten-tts -f backend/Dockerfile.python --build-arg BACKEND=kitten-tts ./backend
docker-save-kitten-tts: backend-images
docker save local-ai-backend:kitten-tts -o backend-images/kitten-tts.tar
docker-build-kokoro:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:kokoro -f backend/Dockerfile.python --build-arg BACKEND=kokoro ./backend
docker-save-kokoro: backend-images
docker save local-ai-backend:kokoro -o backend-images/kokoro.tar
docker-save-rfdetr: backend-images
docker save local-ai-backend:rfdetr -o backend-images/rfdetr.tar
docker-save-huggingface: backend-images docker-save-huggingface: backend-images
docker save local-ai-backend:huggingface -o backend-images/huggingface.tar docker save local-ai-backend:huggingface -o backend-images/huggingface.tar
@@ -445,7 +470,7 @@ docker-save-local-store: backend-images
docker save local-ai-backend:local-store -o backend-images/local-store.tar docker save local-ai-backend:local-store -o backend-images/local-store.tar
docker-build-silero-vad: docker-build-silero-vad:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:silero-vad -f backend/Dockerfile.golang --build-arg BACKEND=silero-vad . docker build -t local-ai-backend:silero-vad -f backend/Dockerfile.go --build-arg BACKEND=silero-vad .
docker-save-silero-vad: backend-images docker-save-silero-vad: backend-images
docker save local-ai-backend:silero-vad -o backend-images/silero-vad.tar docker save local-ai-backend:silero-vad -o backend-images/silero-vad.tar
@@ -460,46 +485,46 @@ docker-save-bark-cpp: backend-images
docker save local-ai-backend:bark-cpp -o backend-images/bark-cpp.tar docker save local-ai-backend:bark-cpp -o backend-images/bark-cpp.tar
docker-build-stablediffusion-ggml: docker-build-stablediffusion-ggml:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:stablediffusion-ggml -f backend/Dockerfile.golang --build-arg BACKEND=stablediffusion-ggml . docker build -t local-ai-backend:stablediffusion-ggml -f backend/Dockerfile.go --build-arg BACKEND=stablediffusion-ggml .
docker-save-stablediffusion-ggml: backend-images docker-save-stablediffusion-ggml: backend-images
docker save local-ai-backend:stablediffusion-ggml -o backend-images/stablediffusion-ggml.tar docker save local-ai-backend:stablediffusion-ggml -o backend-images/stablediffusion-ggml.tar
docker-build-rerankers: docker-build-rerankers:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:rerankers -f backend/Dockerfile.python --build-arg BACKEND=rerankers . docker build -t local-ai-backend:rerankers -f backend/Dockerfile.python --build-arg BACKEND=rerankers .
docker-build-vllm: docker-build-vllm:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:vllm -f backend/Dockerfile.python --build-arg BACKEND=vllm . docker build -t local-ai-backend:vllm -f backend/Dockerfile.python --build-arg BACKEND=vllm .
docker-build-transformers: docker-build-transformers:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:transformers -f backend/Dockerfile.python --build-arg BACKEND=transformers . docker build -t local-ai-backend:transformers -f backend/Dockerfile.python --build-arg BACKEND=transformers .
docker-build-diffusers: docker-build-diffusers:
docker build --progress=plain --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:diffusers -f backend/Dockerfile.python --build-arg BACKEND=diffusers ./backend docker build -t local-ai-backend:diffusers -f backend/Dockerfile.python --build-arg BACKEND=diffusers .
docker-save-diffusers: backend-images docker-build-kokoro:
docker save local-ai-backend:diffusers -o backend-images/diffusers.tar docker build -t local-ai-backend:kokoro -f backend/Dockerfile.python --build-arg BACKEND=kokoro .
docker-build-whisper: docker-build-whisper:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:whisper -f backend/Dockerfile.golang --build-arg BACKEND=whisper . docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:whisper -f backend/Dockerfile.go --build-arg BACKEND=whisper .
docker-save-whisper: backend-images docker-save-whisper: backend-images
docker save local-ai-backend:whisper -o backend-images/whisper.tar docker save local-ai-backend:whisper -o backend-images/whisper.tar
docker-build-faster-whisper: docker-build-faster-whisper:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:faster-whisper -f backend/Dockerfile.python --build-arg BACKEND=faster-whisper . docker build -t local-ai-backend:faster-whisper -f backend/Dockerfile.python --build-arg BACKEND=faster-whisper .
docker-build-coqui: docker-build-coqui:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:coqui -f backend/Dockerfile.python --build-arg BACKEND=coqui . docker build -t local-ai-backend:coqui -f backend/Dockerfile.python --build-arg BACKEND=coqui .
docker-build-bark: docker-build-bark:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:bark -f backend/Dockerfile.python --build-arg BACKEND=bark . docker build -t local-ai-backend:bark -f backend/Dockerfile.python --build-arg BACKEND=bark .
docker-build-chatterbox: docker-build-chatterbox:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:chatterbox -f backend/Dockerfile.python --build-arg BACKEND=chatterbox ./backend docker build -t local-ai-backend:chatterbox -f backend/Dockerfile.python --build-arg BACKEND=chatterbox .
docker-build-exllama2: docker-build-exllama2:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:exllama2 -f backend/Dockerfile.python --build-arg BACKEND=exllama2 . docker build -t local-ai-backend:exllama2 -f backend/Dockerfile.python --build-arg BACKEND=exllama2 .
docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-bark docker-build-chatterbox docker-build-exllama2 docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-bark docker-build-chatterbox docker-build-exllama2
@@ -532,19 +557,3 @@ docs-clean:
.PHONY: docs .PHONY: docs
docs: docs/static/gallery.html docs: docs/static/gallery.html
cd docs && hugo serve cd docs && hugo serve
########################################################
## Platform-specific builds
########################################################
## fyne cross-platform build
build-launcher-darwin: build-launcher
go run github.com/tiagomelo/macos-dmg-creator/cmd/createdmg@latest \
--appName "LocalAI" \
--appBinaryPath "$(LAUNCHER_BINARY_NAME)" \
--bundleIdentifier "com.localai.launcher" \
--iconPath "core/http/static/logo.png" \
--outputDir "dist/"
build-launcher-linux:
cd cmd/launcher && go run fyne.io/tools/cmd/fyne@latest package -os linux -icon ../../core/http/static/logo.png --executable $(LAUNCHER_BINARY_NAME)-linux && mv launcher.tar.xz ../../$(LAUNCHER_BINARY_NAME)-linux.tar.xz

View File

@@ -43,7 +43,7 @@
> :bulb: Get help - [❓FAQ](https://localai.io/faq/) [💭Discussions](https://github.com/go-skynet/LocalAI/discussions) [:speech_balloon: Discord](https://discord.gg/uJAeKSAGDy) [:book: Documentation website](https://localai.io/) > :bulb: Get help - [❓FAQ](https://localai.io/faq/) [💭Discussions](https://github.com/go-skynet/LocalAI/discussions) [:speech_balloon: Discord](https://discord.gg/uJAeKSAGDy) [:book: Documentation website](https://localai.io/)
> >
> [💻 Quickstart](https://localai.io/basics/getting_started/) [🖼️ Models](https://models.localai.io/) [🚀 Roadmap](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) [🌍 Explorer](https://explorer.localai.io) [🛫 Examples](https://github.com/mudler/LocalAI-examples) Try on > [💻 Quickstart](https://localai.io/basics/getting_started/) [🖼️ Models](https://models.localai.io/) [🚀 Roadmap](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) [🥽 Demo](https://demo.localai.io) [🌍 Explorer](https://explorer.localai.io) [🛫 Examples](https://github.com/mudler/LocalAI-examples) Try on
[![Telegram](https://img.shields.io/badge/Telegram-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white)](https://t.me/localaiofficial_bot) [![Telegram](https://img.shields.io/badge/Telegram-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white)](https://t.me/localaiofficial_bot)
[![tests](https://github.com/go-skynet/LocalAI/actions/workflows/test.yml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/test.yml)[![Build and Release](https://github.com/go-skynet/LocalAI/actions/workflows/release.yaml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/release.yaml)[![build container images](https://github.com/go-skynet/LocalAI/actions/workflows/image.yml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/image.yml)[![Bump dependencies](https://github.com/go-skynet/LocalAI/actions/workflows/bump_deps.yaml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/bump_deps.yaml)[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/localai)](https://artifacthub.io/packages/search?repo=localai) [![tests](https://github.com/go-skynet/LocalAI/actions/workflows/test.yml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/test.yml)[![Build and Release](https://github.com/go-skynet/LocalAI/actions/workflows/release.yaml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/release.yaml)[![build container images](https://github.com/go-skynet/LocalAI/actions/workflows/image.yml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/image.yml)[![Bump dependencies](https://github.com/go-skynet/LocalAI/actions/workflows/bump_deps.yaml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/bump_deps.yaml)[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/localai)](https://artifacthub.io/packages/search?repo=localai)
@@ -110,12 +110,6 @@ curl https://localai.io/install.sh | sh
For more installation options, see [Installer Options](https://localai.io/docs/advanced/installer/). For more installation options, see [Installer Options](https://localai.io/docs/advanced/installer/).
### macOS Download:
<a href="https://github.com/mudler/LocalAI/releases/latest/download/LocalAI.dmg">
<img src="https://img.shields.io/badge/Download-macOS-blue?style=for-the-badge&logo=apple&logoColor=white" alt="Download LocalAI for macOS"/>
</a>
Or run with docker: Or run with docker:
### CPU only image: ### CPU only image:
@@ -146,7 +140,11 @@ docker run -ti --name local-ai -p 8080:8080 --device=/dev/kfd --device=/dev/dri
### Intel GPU Images (oneAPI): ### Intel GPU Images (oneAPI):
```bash ```bash
docker run -ti --name local-ai -p 8080:8080 --device=/dev/dri/card1 --device=/dev/dri/renderD128 localai/localai:latest-gpu-intel # Intel GPU with FP16 support
docker run -ti --name local-ai -p 8080:8080 --device=/dev/dri/card1 --device=/dev/dri/renderD128 localai/localai:latest-gpu-intel-f16
# Intel GPU with FP32 support
docker run -ti --name local-ai -p 8080:8080 --device=/dev/dri/card1 --device=/dev/dri/renderD128 localai/localai:latest-gpu-intel-f32
``` ```
### Vulkan GPU Images: ### Vulkan GPU Images:
@@ -168,7 +166,7 @@ docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-ai
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-11 docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-11
# Intel GPU version # Intel GPU version
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-gpu-intel docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-gpu-intel-f16
# AMD GPU version # AMD GPU version
docker run -ti --name local-ai -p 8080:8080 --device=/dev/kfd --device=/dev/dri --group-add=video localai/localai:latest-aio-gpu-hipblas docker run -ti --name local-ai -p 8080:8080 --device=/dev/kfd --device=/dev/dri --group-add=video localai/localai:latest-aio-gpu-hipblas
@@ -191,15 +189,10 @@ local-ai run https://gist.githubusercontent.com/.../phi-2.yaml
local-ai run oci://localai/phi-2:latest local-ai run oci://localai/phi-2:latest
``` ```
> ⚡ **Automatic Backend Detection**: When you install models from the gallery or YAML files, LocalAI automatically detects your system's GPU capabilities (NVIDIA, AMD, Intel) and downloads the appropriate backend. For advanced configuration options, see [GPU Acceleration](https://localai.io/features/gpu-acceleration/#automatic-backend-detection).
For more information, see [💻 Getting started](https://localai.io/basics/getting_started/index.html) For more information, see [💻 Getting started](https://localai.io/basics/getting_started/index.html)
## 📰 Latest project news ## 📰 Latest project news
- August 2025: MLX, MLX-VLM, Diffusers and llama.cpp are now supported on Mac M1/M2/M3+ chips ( with `development` suffix in the gallery ): https://github.com/mudler/LocalAI/pull/6049 https://github.com/mudler/LocalAI/pull/6119 https://github.com/mudler/LocalAI/pull/6121 https://github.com/mudler/LocalAI/pull/6060
- July/August 2025: 🔍 [Object Detection](https://localai.io/features/object-detection/) added to the API featuring [rf-detr](https://github.com/roboflow/rf-detr)
- July 2025: All backends migrated outside of the main binary. LocalAI is now more lightweight, small, and automatically downloads the required backend to run the model. [Read the release notes](https://github.com/mudler/LocalAI/releases/tag/v3.2.0)
- June 2025: [Backend management](https://github.com/mudler/LocalAI/pull/5607) has been added. Attention: extras images are going to be deprecated from the next release! Read [the backend management PR](https://github.com/mudler/LocalAI/pull/5607). - June 2025: [Backend management](https://github.com/mudler/LocalAI/pull/5607) has been added. Attention: extras images are going to be deprecated from the next release! Read [the backend management PR](https://github.com/mudler/LocalAI/pull/5607).
- May 2025: [Audio input](https://github.com/mudler/LocalAI/pull/5466) and [Reranking](https://github.com/mudler/LocalAI/pull/5396) in llama.cpp backend, [Realtime API](https://github.com/mudler/LocalAI/pull/5392), Support to Gemma, SmollVLM, and more multimodal models (available in the gallery). - May 2025: [Audio input](https://github.com/mudler/LocalAI/pull/5466) and [Reranking](https://github.com/mudler/LocalAI/pull/5396) in llama.cpp backend, [Realtime API](https://github.com/mudler/LocalAI/pull/5392), Support to Gemma, SmollVLM, and more multimodal models (available in the gallery).
- May 2025: Important: image name changes [See release](https://github.com/mudler/LocalAI/releases/tag/v2.29.0) - May 2025: Important: image name changes [See release](https://github.com/mudler/LocalAI/releases/tag/v2.29.0)
@@ -232,67 +225,12 @@ Roadmap items: [List of issues](https://github.com/mudler/LocalAI/issues?q=is%3A
- ✍️ [Constrained grammars](https://localai.io/features/constrained_grammars/) - ✍️ [Constrained grammars](https://localai.io/features/constrained_grammars/)
- 🖼️ [Download Models directly from Huggingface ](https://localai.io/models/) - 🖼️ [Download Models directly from Huggingface ](https://localai.io/models/)
- 🥽 [Vision API](https://localai.io/features/gpt-vision/) - 🥽 [Vision API](https://localai.io/features/gpt-vision/)
- 🔍 [Object Detection](https://localai.io/features/object-detection/)
- 📈 [Reranker API](https://localai.io/features/reranker/) - 📈 [Reranker API](https://localai.io/features/reranker/)
- 🆕🖧 [P2P Inferencing](https://localai.io/features/distribute/) - 🆕🖧 [P2P Inferencing](https://localai.io/features/distribute/)
- [Agentic capabilities](https://github.com/mudler/LocalAGI) - [Agentic capabilities](https://github.com/mudler/LocalAGI)
- 🔊 Voice activity detection (Silero-VAD support) - 🔊 Voice activity detection (Silero-VAD support)
- 🌍 Integrated WebUI! - 🌍 Integrated WebUI!
## 🧩 Supported Backends & Acceleration
LocalAI supports a comprehensive range of AI backends with multiple acceleration options:
### Text Generation & Language Models
| Backend | Description | Acceleration Support |
|---------|-------------|---------------------|
| **llama.cpp** | LLM inference in C/C++ | CUDA 11/12, ROCm, Intel SYCL, Vulkan, Metal, CPU |
| **vLLM** | Fast LLM inference with PagedAttention | CUDA 12, ROCm, Intel |
| **transformers** | HuggingFace transformers framework | CUDA 11/12, ROCm, Intel, CPU |
| **exllama2** | GPTQ inference library | CUDA 12 |
| **MLX** | Apple Silicon LLM inference | Metal (M1/M2/M3+) |
| **MLX-VLM** | Apple Silicon Vision-Language Models | Metal (M1/M2/M3+) |
### Audio & Speech Processing
| Backend | Description | Acceleration Support |
|---------|-------------|---------------------|
| **whisper.cpp** | OpenAI Whisper in C/C++ | CUDA 12, ROCm, Intel SYCL, Vulkan, CPU |
| **faster-whisper** | Fast Whisper with CTranslate2 | CUDA 12, ROCm, Intel, CPU |
| **bark** | Text-to-audio generation | CUDA 12, ROCm, Intel |
| **bark-cpp** | C++ implementation of Bark | CUDA, Metal, CPU |
| **coqui** | Advanced TTS with 1100+ languages | CUDA 12, ROCm, Intel, CPU |
| **kokoro** | Lightweight TTS model | CUDA 12, ROCm, Intel, CPU |
| **chatterbox** | Production-grade TTS | CUDA 11/12, CPU |
| **piper** | Fast neural TTS system | CPU |
| **kitten-tts** | Kitten TTS models | CPU |
| **silero-vad** | Voice Activity Detection | CPU |
### Image & Video Generation
| Backend | Description | Acceleration Support |
|---------|-------------|---------------------|
| **stablediffusion.cpp** | Stable Diffusion in C/C++ | CUDA 12, Intel SYCL, Vulkan, CPU |
| **diffusers** | HuggingFace diffusion models | CUDA 11/12, ROCm, Intel, Metal, CPU |
### Specialized AI Tasks
| Backend | Description | Acceleration Support |
|---------|-------------|---------------------|
| **rfdetr** | Real-time object detection | CUDA 12, Intel, CPU |
| **rerankers** | Document reranking API | CUDA 11/12, ROCm, Intel, CPU |
| **local-store** | Vector database | CPU |
| **huggingface** | HuggingFace API integration | API-based |
### Hardware Acceleration Matrix
| Acceleration Type | Supported Backends | Hardware Support |
|-------------------|-------------------|------------------|
| **NVIDIA CUDA 11** | llama.cpp, whisper, stablediffusion, diffusers, rerankers, bark, chatterbox | Nvidia hardware |
| **NVIDIA CUDA 12** | All CUDA-compatible backends | Nvidia hardware |
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, bark | AMD Graphics |
| **Intel oneAPI** | llama.cpp, whisper, stablediffusion, vllm, transformers, diffusers, rfdetr, rerankers, exllama2, coqui, kokoro, bark | Intel Arc, Intel iGPUs |
| **Apple Metal** | llama.cpp, whisper, diffusers, MLX, MLX-VLM, bark-cpp | Apple M1/M2/M3+ |
| **Vulkan** | llama.cpp, whisper, stablediffusion | Cross-platform GPUs |
| **NVIDIA Jetson** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr | ARM64 embedded AI |
| **CPU Optimized** | All backends | AVX/AVX2/AVX512, quantization support |
### 🔗 Community and integrations ### 🔗 Community and integrations
@@ -307,9 +245,6 @@ WebUIs:
Model galleries Model galleries
- https://github.com/go-skynet/model-gallery - https://github.com/go-skynet/model-gallery
Voice:
- https://github.com/richiejp/VoxInput
Other: Other:
- Helm chart https://github.com/go-skynet/helm-charts - Helm chart https://github.com/go-skynet/helm-charts
- VSCode extension https://github.com/badgooooor/localai-vscode-plugin - VSCode extension https://github.com/badgooooor/localai-vscode-plugin

View File

@@ -2,10 +2,10 @@ context_size: 4096
f16: true f16: true
backend: llama-cpp backend: llama-cpp
mmap: true mmap: true
mmproj: minicpm-v-4_5-mmproj-f16.gguf mmproj: minicpm-v-2_6-mmproj-f16.gguf
name: gpt-4o name: gpt-4o
parameters: parameters:
model: minicpm-v-4_5-Q4_K_M.gguf model: minicpm-v-2_6-Q4_K_M.gguf
stopwords: stopwords:
- <|im_end|> - <|im_end|>
- <dummy32000> - <dummy32000>
@@ -42,9 +42,9 @@ template:
<|im_start|>assistant <|im_start|>assistant
download_files: download_files:
- filename: minicpm-v-4_5-Q4_K_M.gguf - filename: minicpm-v-2_6-Q4_K_M.gguf
sha256: c1c3c33100b15b4caf7319acce4e23c0eb0ce1cbd12f70e8d24f05aa67b7512f sha256: 3a4078d53b46f22989adbf998ce5a3fd090b6541f112d7e936eb4204a04100b1
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/ggml-model-Q4_K_M.gguf uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/ggml-model-Q4_K_M.gguf
- filename: minicpm-v-4_5-mmproj-f16.gguf - filename: minicpm-v-2_6-mmproj-f16.gguf
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/mmproj-model-f16.gguf uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/mmproj-model-f16.gguf
sha256: 7a7225a32e8d453aaa3d22d8c579b5bf833c253f784cdb05c99c9a76fd616df8 sha256: 4485f68a0f1aa404c391e788ea88ea653c100d8e98fe572698f701e5809711fd

View File

@@ -2,10 +2,10 @@ context_size: 4096
backend: llama-cpp backend: llama-cpp
f16: true f16: true
mmap: true mmap: true
mmproj: minicpm-v-4_5-mmproj-f16.gguf mmproj: minicpm-v-2_6-mmproj-f16.gguf
name: gpt-4o name: gpt-4o
parameters: parameters:
model: minicpm-v-4_5-Q4_K_M.gguf model: minicpm-v-2_6-Q4_K_M.gguf
stopwords: stopwords:
- <|im_end|> - <|im_end|>
- <dummy32000> - <dummy32000>
@@ -42,9 +42,9 @@ template:
<|im_start|>assistant <|im_start|>assistant
download_files: download_files:
- filename: minicpm-v-4_5-Q4_K_M.gguf - filename: minicpm-v-2_6-Q4_K_M.gguf
sha256: c1c3c33100b15b4caf7319acce4e23c0eb0ce1cbd12f70e8d24f05aa67b7512f sha256: 3a4078d53b46f22989adbf998ce5a3fd090b6541f112d7e936eb4204a04100b1
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/ggml-model-Q4_K_M.gguf uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/ggml-model-Q4_K_M.gguf
- filename: minicpm-v-4_5-mmproj-f16.gguf - filename: minicpm-v-2_6-mmproj-f16.gguf
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/mmproj-model-f16.gguf uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/mmproj-model-f16.gguf
sha256: 7a7225a32e8d453aaa3d22d8c579b5bf833c253f784cdb05c99c9a76fd616df8 sha256: 4485f68a0f1aa404c391e788ea88ea653c100d8e98fe572698f701e5809711fd

View File

@@ -2,10 +2,10 @@ context_size: 4096
backend: llama-cpp backend: llama-cpp
f16: true f16: true
mmap: true mmap: true
mmproj: minicpm-v-4_5-mmproj-f16.gguf mmproj: minicpm-v-2_6-mmproj-f16.gguf
name: gpt-4o name: gpt-4o
parameters: parameters:
model: minicpm-v-4_5-Q4_K_M.gguf model: minicpm-v-2_6-Q4_K_M.gguf
stopwords: stopwords:
- <|im_end|> - <|im_end|>
- <dummy32000> - <dummy32000>
@@ -43,9 +43,9 @@ template:
download_files: download_files:
- filename: minicpm-v-4_5-Q4_K_M.gguf - filename: minicpm-v-2_6-Q4_K_M.gguf
sha256: c1c3c33100b15b4caf7319acce4e23c0eb0ce1cbd12f70e8d24f05aa67b7512f sha256: 3a4078d53b46f22989adbf998ce5a3fd090b6541f112d7e936eb4204a04100b1
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/ggml-model-Q4_K_M.gguf uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/ggml-model-Q4_K_M.gguf
- filename: minicpm-v-4_5-mmproj-f16.gguf - filename: minicpm-v-2_6-mmproj-f16.gguf
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/mmproj-model-f16.gguf uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/mmproj-model-f16.gguf
sha256: 7a7225a32e8d453aaa3d22d8c579b5bf833c253f784cdb05c99c9a76fd616df8 sha256: 4485f68a0f1aa404c391e788ea88ea653c100d8e98fe572698f701e5809711fd

View File

@@ -96,6 +96,17 @@ RUN if [ "${BUILD_TYPE}" = "hipblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
ldconfig \ ldconfig \
; fi ; fi
# Intel oneAPI requirements
RUN <<EOT bash
if [[ "${BUILD_TYPE}" == sycl* ]] && [ "${SKIP_DRIVERS}" = "false" ]; then
apt-get update && \
apt-get install -y --no-install-recommends \
intel-oneapi-runtime-libs && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
fi
EOT
# Install Go # Install Go
RUN curl -L -s https://go.dev/dl/go${GO_VERSION}.linux-${TARGETARCH}.tar.gz | tar -C /usr/local -xz RUN curl -L -s https://go.dev/dl/go${GO_VERSION}.linux-${TARGETARCH}.tar.gz | tar -C /usr/local -xz
ENV PATH=$PATH:/root/go/bin:/usr/local/go/bin:/usr/local/bin ENV PATH=$PATH:/root/go/bin:/usr/local/go/bin:/usr/local/bin

View File

@@ -11,6 +11,7 @@ ARG GRPC_MAKEFLAGS="-j4 -Otarget"
ARG GRPC_VERSION=v1.65.0 ARG GRPC_VERSION=v1.65.0
ARG CMAKE_FROM_SOURCE=false ARG CMAKE_FROM_SOURCE=false
ARG CMAKE_VERSION=3.26.4 ARG CMAKE_VERSION=3.26.4
ARG PROTOBUF_VERSION=v21.12
ENV MAKEFLAGS=${GRPC_MAKEFLAGS} ENV MAKEFLAGS=${GRPC_MAKEFLAGS}
@@ -49,6 +50,14 @@ RUN git clone --recurse-submodules --jobs 4 -b ${GRPC_VERSION} --depth 1 --shall
make install && \ make install && \
rm -rf /build rm -rf /build
RUN git clone --recurse-submodules --branch ${PROTOBUF_VERSION} https://github.com/protocolbuffers/protobuf.git && \
mkdir -p /build/protobuf/build && \
cd /build/protobuf/build && \
cmake -Dprotobuf_BUILD_SHARED_LIBS=ON -Dprotobuf_BUILD_TESTS=OFF .. && \
make && \
make install && \
rm -rf /build
FROM ${BASE_IMAGE} AS builder FROM ${BASE_IMAGE} AS builder
ARG BACKEND=rerankers ARG BACKEND=rerankers
ARG BUILD_TYPE ARG BUILD_TYPE
@@ -180,21 +189,9 @@ COPY --from=grpc /opt/grpc /usr/local
COPY . /LocalAI COPY . /LocalAI
## Otherwise just run the normal build RUN make -C /LocalAI/backend/cpp/llama-cpp llama-cpp
RUN <<EOT bash RUN make -C /LocalAI/backend/cpp/llama-cpp llama-cpp-grpc
if [ "${TARGETARCH}" = "arm64" ] || [ "${BUILD_TYPE}" = "hipblas" ]; then \ RUN make -C /LocalAI/backend/cpp/llama-cpp llama-cpp-rpc-server
cd /LocalAI/backend/cpp/llama-cpp && make llama-cpp-fallback && \
make llama-cpp-grpc && make llama-cpp-rpc-server; \
else \
cd /LocalAI/backend/cpp/llama-cpp && make llama-cpp-avx && \
make llama-cpp-avx2 && \
make llama-cpp-avx512 && \
make llama-cpp-fallback && \
make llama-cpp-grpc && \
make llama-cpp-rpc-server; \
fi
EOT
# Copy libraries using a script to handle architecture differences # Copy libraries using a script to handle architecture differences
RUN make -C /LocalAI/backend/cpp/llama-cpp package RUN make -C /LocalAI/backend/cpp/llama-cpp package

View File

@@ -23,7 +23,7 @@ RUN apt-get update && \
libssl-dev \ libssl-dev \
git \ git \
git-lfs \ git-lfs \
unzip clang \ unzip \
upx-ucl \ upx-ucl \
curl python3-pip \ curl python3-pip \
python-is-python3 \ python-is-python3 \
@@ -116,7 +116,7 @@ COPY python/${BACKEND} /${BACKEND}
COPY backend.proto /${BACKEND}/backend.proto COPY backend.proto /${BACKEND}/backend.proto
COPY python/common/ /${BACKEND}/common COPY python/common/ /${BACKEND}/common
RUN cd /${BACKEND} && PORTABLE_PYTHON=true make RUN cd /${BACKEND} && make
FROM scratch FROM scratch
ARG BACKEND=rerankers ARG BACKEND=rerankers

View File

@@ -1,213 +0,0 @@
# LocalAI Backend Architecture
This directory contains the core backend infrastructure for LocalAI, including the gRPC protocol definition, multi-language Dockerfiles, and language-specific backend implementations.
## Overview
LocalAI uses a unified gRPC-based architecture that allows different programming languages to implement AI backends while maintaining consistent interfaces and capabilities. The backend system supports multiple hardware acceleration targets and provides a standardized way to integrate various AI models and frameworks.
## Architecture Components
### 1. Protocol Definition (`backend.proto`)
The `backend.proto` file defines the gRPC service interface that all backends must implement. This ensures consistency across different language implementations and provides a contract for communication between LocalAI core and backend services.
#### Core Services
- **Text Generation**: `Predict`, `PredictStream` for LLM inference
- **Embeddings**: `Embedding` for text vectorization
- **Image Generation**: `GenerateImage` for stable diffusion and image models
- **Audio Processing**: `AudioTranscription`, `TTS`, `SoundGeneration`
- **Video Generation**: `GenerateVideo` for video synthesis
- **Object Detection**: `Detect` for computer vision tasks
- **Vector Storage**: `StoresSet`, `StoresGet`, `StoresFind` for RAG operations
- **Reranking**: `Rerank` for document relevance scoring
- **Voice Activity Detection**: `VAD` for audio segmentation
#### Key Message Types
- **`PredictOptions`**: Comprehensive configuration for text generation
- **`ModelOptions`**: Model loading and configuration parameters
- **`Result`**: Standardized response format
- **`StatusResponse`**: Backend health and memory usage information
### 2. Multi-Language Dockerfiles
The backend system provides language-specific Dockerfiles that handle the build environment and dependencies for different programming languages:
- `Dockerfile.python`
- `Dockerfile.golang`
- `Dockerfile.llama-cpp`
### 3. Language-Specific Implementations
#### Python Backends (`python/`)
- **transformers**: Hugging Face Transformers framework
- **vllm**: High-performance LLM inference
- **mlx**: Apple Silicon optimization
- **diffusers**: Stable Diffusion models
- **Audio**: bark, coqui, faster-whisper, kitten-tts
- **Vision**: mlx-vlm, rfdetr
- **Specialized**: rerankers, chatterbox, kokoro
#### Go Backends (`go/`)
- **whisper**: OpenAI Whisper speech recognition in Go with GGML cpp backend (whisper.cpp)
- **stablediffusion-ggml**: Stable Diffusion in Go with GGML Cpp backend
- **huggingface**: Hugging Face model integration
- **piper**: Text-to-speech synthesis Golang with C bindings using rhaspy/piper
- **bark-cpp**: Bark TTS models Golang with Cpp bindings
- **local-store**: Vector storage backend
#### C++ Backends (`cpp/`)
- **llama-cpp**: Llama.cpp integration
- **grpc**: GRPC utilities and helpers
## Hardware Acceleration Support
### CUDA (NVIDIA)
- **Versions**: CUDA 11.x, 12.x
- **Features**: cuBLAS, cuDNN, TensorRT optimization
- **Targets**: x86_64, ARM64 (Jetson)
### ROCm (AMD)
- **Features**: HIP, rocBLAS, MIOpen
- **Targets**: AMD GPUs with ROCm support
### Intel
- **Features**: oneAPI, Intel Extension for PyTorch
- **Targets**: Intel GPUs, XPUs, CPUs
### Vulkan
- **Features**: Cross-platform GPU acceleration
- **Targets**: Windows, Linux, Android, macOS
### Apple Silicon
- **Features**: MLX framework, Metal Performance Shaders
- **Targets**: M1/M2/M3 Macs
## Backend Registry (`index.yaml`)
The `index.yaml` file serves as a central registry for all available backends, providing:
- **Metadata**: Name, description, license, icons
- **Capabilities**: Hardware targets and optimization profiles
- **Tags**: Categorization for discovery
- **URLs**: Source code and documentation links
## Building Backends
### Prerequisites
- Docker with multi-architecture support
- Appropriate hardware drivers (CUDA, ROCm, etc.)
- Build tools (make, cmake, compilers)
### Build Commands
Example of build commands with Docker
```bash
# Build Python backend
docker build -f backend/Dockerfile.python \
--build-arg BACKEND=transformers \
--build-arg BUILD_TYPE=cublas12 \
--build-arg CUDA_MAJOR_VERSION=12 \
--build-arg CUDA_MINOR_VERSION=0 \
-t localai-backend-transformers .
# Build Go backend
docker build -f backend/Dockerfile.golang \
--build-arg BACKEND=whisper \
--build-arg BUILD_TYPE=cpu \
-t localai-backend-whisper .
# Build C++ backend
docker build -f backend/Dockerfile.llama-cpp \
--build-arg BACKEND=llama-cpp \
--build-arg BUILD_TYPE=cublas12 \
-t localai-backend-llama-cpp .
```
For ARM64/Mac builds, docker can't be used, and the makefile in the respective backend has to be used.
### Build Types
- **`cpu`**: CPU-only optimization
- **`cublas11`**: CUDA 11.x with cuBLAS
- **`cublas12`**: CUDA 12.x with cuBLAS
- **`hipblas`**: ROCm with rocBLAS
- **`intel`**: Intel oneAPI optimization
- **`vulkan`**: Vulkan-based acceleration
- **`metal`**: Apple Metal optimization
## Backend Development
### Creating a New Backend
1. **Choose Language**: Select Python, Go, or C++ based on requirements
2. **Implement Interface**: Implement the gRPC service defined in `backend.proto`
3. **Add Dependencies**: Create appropriate requirements files
4. **Configure Build**: Set up Dockerfile and build scripts
5. **Register Backend**: Add entry to `index.yaml`
6. **Test Integration**: Verify gRPC communication and functionality
### Backend Structure
```
backend-name/
├── backend.py/go/cpp # Main implementation
├── requirements.txt # Dependencies
├── Dockerfile # Build configuration
├── install.sh # Installation script
├── run.sh # Execution script
├── test.sh # Test script
└── README.md # Backend documentation
```
### Required gRPC Methods
At minimum, backends must implement:
- `Health()` - Service health check
- `LoadModel()` - Model loading and initialization
- `Predict()` - Main inference endpoint
- `Status()` - Backend status and metrics
## Integration with LocalAI Core
Backends communicate with LocalAI core through gRPC:
1. **Service Discovery**: Core discovers available backends
2. **Model Loading**: Core requests model loading via `LoadModel`
3. **Inference**: Core sends requests via `Predict` or specialized endpoints
4. **Streaming**: Core handles streaming responses for real-time generation
5. **Monitoring**: Core tracks backend health and performance
## Performance Optimization
### Memory Management
- **Model Caching**: Efficient model loading and caching
- **Batch Processing**: Optimize for multiple concurrent requests
- **Memory Pinning**: GPU memory optimization for CUDA/ROCm
### Hardware Utilization
- **Multi-GPU**: Support for tensor parallelism
- **Mixed Precision**: FP16/BF16 for memory efficiency
- **Kernel Fusion**: Optimized CUDA/ROCm kernels
## Troubleshooting
### Common Issues
1. **GRPC Connection**: Verify backend service is running and accessible
2. **Model Loading**: Check model paths and dependencies
3. **Hardware Detection**: Ensure appropriate drivers and libraries
4. **Memory Issues**: Monitor GPU memory usage and model sizes
## Contributing
When contributing to the backend system:
1. **Follow Protocol**: Implement the exact gRPC interface
2. **Add Tests**: Include comprehensive test coverage
3. **Document**: Provide clear usage examples
4. **Optimize**: Consider performance and resource usage
5. **Validate**: Test across different hardware targets

View File

@@ -20,7 +20,6 @@ service Backend {
rpc SoundGeneration(SoundGenerationRequest) returns (Result) {} rpc SoundGeneration(SoundGenerationRequest) returns (Result) {}
rpc TokenizeString(PredictOptions) returns (TokenizationResponse) {} rpc TokenizeString(PredictOptions) returns (TokenizationResponse) {}
rpc Status(HealthMessage) returns (StatusResponse) {} rpc Status(HealthMessage) returns (StatusResponse) {}
rpc Detect(DetectOptions) returns (DetectResponse) {}
rpc StoresSet(StoresSetOptions) returns (Result) {} rpc StoresSet(StoresSetOptions) returns (Result) {}
rpc StoresDelete(StoresDeleteOptions) returns (Result) {} rpc StoresDelete(StoresDeleteOptions) returns (Result) {}
@@ -242,7 +241,7 @@ message ModelOptions {
string Type = 49; string Type = 49;
string FlashAttention = 56; bool FlashAttention = 56;
bool NoKVOffload = 57; bool NoKVOffload = 57;
string ModelPath = 59; string ModelPath = 59;
@@ -276,7 +275,6 @@ message TranscriptRequest {
string language = 3; string language = 3;
uint32 threads = 4; uint32 threads = 4;
bool translate = 5; bool translate = 5;
bool diarize = 6;
} }
message TranscriptResult { message TranscriptResult {
@@ -306,24 +304,19 @@ message GenerateImageRequest {
// Diffusers // Diffusers
string EnableParameters = 10; string EnableParameters = 10;
int32 CLIPSkip = 11; int32 CLIPSkip = 11;
// Reference images for models that support them (e.g., Flux Kontext)
repeated string ref_images = 12;
} }
message GenerateVideoRequest { message GenerateVideoRequest {
string prompt = 1; string prompt = 1;
string negative_prompt = 2; // Negative prompt for video generation string start_image = 2; // Path or base64 encoded image for the start frame
string start_image = 3; // Path or base64 encoded image for the start frame string end_image = 3; // Path or base64 encoded image for the end frame
string end_image = 4; // Path or base64 encoded image for the end frame int32 width = 4;
int32 width = 5; int32 height = 5;
int32 height = 6; int32 num_frames = 6; // Number of frames to generate
int32 num_frames = 7; // Number of frames to generate int32 fps = 7; // Frames per second
int32 fps = 8; // Frames per second int32 seed = 8;
int32 seed = 9; float cfg_scale = 9; // Classifier-free guidance scale
float cfg_scale = 10; // Classifier-free guidance scale string dst = 10; // Output path for the generated video
int32 step = 11; // Number of inference steps
string dst = 12; // Output path for the generated video
} }
message TTSRequest { message TTSRequest {
@@ -383,20 +376,3 @@ message Message {
string role = 1; string role = 1;
string content = 2; string content = 2;
} }
message DetectOptions {
string src = 1;
}
message Detection {
float x = 1;
float y = 2;
float width = 3;
float height = 4;
float confidence = 5;
string class_name = 6;
}
message DetectResponse {
repeated Detection Detections = 1;
}

View File

@@ -17,6 +17,8 @@ if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
include_directories("${HOMEBREW_DEFAULT_PREFIX}/include") include_directories("${HOMEBREW_DEFAULT_PREFIX}/include")
endif() endif()
set(Protobuf_USE_STATIC_LIBS OFF)
set(gRPC_USE_STATIC_LIBS OFF)
find_package(absl CONFIG REQUIRED) find_package(absl CONFIG REQUIRED)
find_package(Protobuf CONFIG REQUIRED) find_package(Protobuf CONFIG REQUIRED)
find_package(gRPC CONFIG REQUIRED) find_package(gRPC CONFIG REQUIRED)

View File

@@ -1,5 +1,5 @@
LLAMA_VERSION?=f432d8d83e7407073634c5e4fd81a3d23a10827f LLAMA_VERSION?=acd6cb1c41676f6bbb25c2a76fa5abeb1719301e
LLAMA_REPO?=https://github.com/ggerganov/llama.cpp LLAMA_REPO?=https://github.com/ggerganov/llama.cpp
CMAKE_ARGS?= CMAKE_ARGS?=
@@ -7,10 +7,9 @@ BUILD_TYPE?=
NATIVE?=false NATIVE?=false
ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh
TARGET?=--target grpc-server TARGET?=--target grpc-server
JOBS?=$(shell nproc)
# Disable Shared libs as we are linking on static gRPC and we can't mix shared and static # Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF -DLLAMA_CURL=OFF CMAKE_ARGS+=-DBUILD_SHARED_LIBS=ON -DLLAMA_CURL=OFF -DGGML_CPU_ALL_VARIANTS=ON -DGGML_BACKEND_DL=ON
CURRENT_MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) CURRENT_MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
ifeq ($(NATIVE),false) ifeq ($(NATIVE),false)
@@ -26,14 +25,16 @@ else ifeq ($(BUILD_TYPE),openblas)
# If build type is clblas (openCL) we set -DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path # If build type is clblas (openCL) we set -DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
else ifeq ($(BUILD_TYPE),clblas) else ifeq ($(BUILD_TYPE),clblas)
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++ # If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
else ifeq ($(BUILD_TYPE),hipblas) else ifeq ($(BUILD_TYPE),hipblas)
ROCM_HOME ?= /opt/rocm ROCM_HOME ?= /opt/rocm
ROCM_PATH ?= /opt/rocm ROCM_PATH ?= /opt/rocm
export CXX=$(ROCM_HOME)/llvm/bin/clang++ export CXX=$(ROCM_HOME)/llvm/bin/clang++
export CC=$(ROCM_HOME)/llvm/bin/clang export CC=$(ROCM_HOME)/llvm/bin/clang
AMDGPU_TARGETS?=gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102,gfx1200,gfx1201 # GPU_TARGETS ?= gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102
CMAKE_ARGS+=-DGGML_HIP=ON -DAMDGPU_TARGETS=$(AMDGPU_TARGETS) # AMDGPU_TARGETS ?= "$(GPU_TARGETS)"
CMAKE_ARGS+=-DGGML_HIP=ON
# CMAKE_ARGS+=-DGGML_HIP=ON -DAMDGPU_TARGETS="$(AMDGPU_TARGETS)" -DGPU_TARGETS="$(GPU_TARGETS)"
else ifeq ($(BUILD_TYPE),vulkan) else ifeq ($(BUILD_TYPE),vulkan)
CMAKE_ARGS+=-DGGML_VULKAN=1 CMAKE_ARGS+=-DGGML_VULKAN=1
else ifeq ($(OS),Darwin) else ifeq ($(OS),Darwin)
@@ -88,33 +89,12 @@ else
LLAMA_VERSION=$(LLAMA_VERSION) $(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../$(VARIANT) grpc-server LLAMA_VERSION=$(LLAMA_VERSION) $(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../$(VARIANT) grpc-server
endif endif
llama-cpp-avx2: llama.cpp llama-cpp: llama.cpp
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx2-build cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-build
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx2-build purge $(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-build purge
$(info ${GREEN}I llama-cpp build info:avx2${RESET}) $(info ${GREEN}I llama-cpp build info:${RESET})
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on" $(MAKE) VARIANT="llama-cpp-avx2-build" build-llama-cpp-grpc-server CMAKE_ARGS="$(CMAKE_ARGS)" $(MAKE) VARIANT="llama-cpp-build" build-llama-cpp-grpc-server
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx2-build/grpc-server llama-cpp-avx2 cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-build/grpc-server llama-cpp
llama-cpp-avx512: llama.cpp
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx512-build
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx512-build purge
$(info ${GREEN}I llama-cpp build info:avx512${RESET})
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=on -DGGML_FMA=on -DGGML_F16C=on" $(MAKE) VARIANT="llama-cpp-avx512-build" build-llama-cpp-grpc-server
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx512-build/grpc-server llama-cpp-avx512
llama-cpp-avx: llama.cpp
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build purge
$(info ${GREEN}I llama-cpp build info:avx${RESET})
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE) VARIANT="llama-cpp-avx-build" build-llama-cpp-grpc-server
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build/grpc-server llama-cpp-avx
llama-cpp-fallback: llama.cpp
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build purge
$(info ${GREEN}I llama-cpp build info:fallback${RESET})
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE) VARIANT="llama-cpp-fallback-build" build-llama-cpp-grpc-server
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build/grpc-server llama-cpp-fallback
llama-cpp-grpc: llama.cpp llama-cpp-grpc: llama.cpp
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build
@@ -159,8 +139,8 @@ grpc-server: llama.cpp llama.cpp/tools/grpc-server
@echo "Building grpc-server with $(BUILD_TYPE) build type and $(CMAKE_ARGS)" @echo "Building grpc-server with $(BUILD_TYPE) build type and $(CMAKE_ARGS)"
ifneq (,$(findstring sycl,$(BUILD_TYPE))) ifneq (,$(findstring sycl,$(BUILD_TYPE)))
+bash -c "source $(ONEAPI_VARS); \ +bash -c "source $(ONEAPI_VARS); \
cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release -j $(JOBS) $(TARGET)" cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release $(TARGET)"
else else
+cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release -j $(JOBS) $(TARGET) +cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release $(TARGET)
endif endif
cp llama.cpp/build/bin/grpc-server . cp llama.cpp/build/bin/grpc-server .

View File

@@ -53,9 +53,9 @@ static void start_llama_server(server_context& ctx_server) {
LOG_INF("%s: model loaded\n", __func__); LOG_INF("%s: model loaded\n", __func__);
// print sample chat example to make it clear which template is used // print sample chat example to make it clear which template is used
// LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__, LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__,
// common_chat_templates_source(ctx_server.chat_templates.get()), common_chat_templates_source(ctx_server.chat_templates.get()),
// common_chat_format_example(ctx_server.chat_templates.get(), ctx_server.params_base.use_jinja).c_str(), ctx_server.params_base.default_template_kwargs); common_chat_format_example(ctx_server.chat_templates.get(), ctx_server.params_base.use_jinja).c_str());
// Reset the chat templates // Reset the chat templates
// TODO: We should make this configurable by respecting the option that is already present in LocalAI for vLLM // TODO: We should make this configurable by respecting the option that is already present in LocalAI for vLLM
@@ -304,15 +304,7 @@ static void params_parse(const backend::ModelOptions* request,
} }
params.use_mlock = request->mlock(); params.use_mlock = request->mlock();
params.use_mmap = request->mmap(); params.use_mmap = request->mmap();
params.flash_attn = request->flashattention();
if (request->flashattention() == "on" || request->flashattention() == "enabled") {
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_ENABLED;
} else if (request->flashattention() == "off" || request->flashattention() == "disabled") {
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_DISABLED;
} else if (request->flashattention() == "auto") {
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_AUTO;
}
params.no_kv_offload = request->nokvoffload(); params.no_kv_offload = request->nokvoffload();
params.ctx_shift = false; // We control context-shifting in any case (and we disable it as it could just lead to infinite loops) params.ctx_shift = false; // We control context-shifting in any case (and we disable it as it could just lead to infinite loops)
@@ -321,11 +313,9 @@ static void params_parse(const backend::ModelOptions* request,
params.pooling_type = LLAMA_POOLING_TYPE_RANK; params.pooling_type = LLAMA_POOLING_TYPE_RANK;
} }
if (request->ropescaling() == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_NONE; } if (request->ropescaling() == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_NONE; }
else if (request->ropescaling() == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_YARN; } else if (request->ropescaling() == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_YARN; }
else if (request->ropescaling() == "linear") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_LINEAR; } else { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_LINEAR; }
if ( request->yarnextfactor() != 0.0f ) { if ( request->yarnextfactor() != 0.0f ) {
params.yarn_ext_factor = request->yarnextfactor(); params.yarn_ext_factor = request->yarnextfactor();
} }
@@ -445,7 +435,24 @@ public:
} }
} }
// process files
mtmd::bitmaps bitmaps;
const bool has_mtmd = ctx_server.mctx != nullptr; const bool has_mtmd = ctx_server.mctx != nullptr;
{
if (!has_mtmd && !files.empty()) {
throw std::runtime_error("This server does not support multimodal");
}
for (auto & file : files) {
mtmd::bitmap bmp(mtmd_helper_bitmap_init_from_buf(ctx_server.mctx, file.data(), file.size()));
if (!bmp.ptr) {
throw std::runtime_error("Failed to load image/audio");
}
// calculate bitmap hash (for KV caching)
std::string hash = fnv_hash(bmp.data(), bmp.n_bytes());
bmp.set_id(hash.c_str());
bitmaps.entries.push_back(std::move(bmp));
}
}
// process prompt // process prompt
std::vector<server_tokens> inputs; std::vector<server_tokens> inputs;
@@ -455,10 +462,32 @@ public:
if (has_mtmd) { if (has_mtmd) {
// multimodal // multimodal
inputs.push_back(process_mtmd_prompt(ctx_server.mctx, prompt.get<std::string>(), files)); std::string prompt_str = prompt.get<std::string>();
mtmd_input_text inp_txt = {
prompt_str.c_str(),
/* add_special */ true,
/* parse_special */ true,
};
mtmd::input_chunks chunks(mtmd_input_chunks_init());
auto bitmaps_c_ptr = bitmaps.c_ptr();
int32_t tokenized = mtmd_tokenize(ctx_server.mctx,
chunks.ptr.get(),
&inp_txt,
bitmaps_c_ptr.data(),
bitmaps_c_ptr.size());
if (tokenized != 0) {
throw std::runtime_error("Failed to tokenize prompt");
}
server_tokens tmp(chunks, true);
inputs.push_back(std::move(tmp));
} else { } else {
// Everything else, including multimodal completions. // non-multimodal version
inputs = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, prompt, true, true); auto tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, prompt, true, true);
for (auto & p : tokenized_prompts) {
auto tmp = server_tokens(p, ctx_server.mctx != nullptr);
inputs.push_back(std::move(tmp));
}
} }
tasks.reserve(inputs.size()); tasks.reserve(inputs.size());
@@ -599,7 +628,23 @@ public:
} }
// process files // process files
mtmd::bitmaps bitmaps;
const bool has_mtmd = ctx_server.mctx != nullptr; const bool has_mtmd = ctx_server.mctx != nullptr;
{
if (!has_mtmd && !files.empty()) {
throw std::runtime_error("This server does not support multimodal");
}
for (auto & file : files) {
mtmd::bitmap bmp(mtmd_helper_bitmap_init_from_buf(ctx_server.mctx, file.data(), file.size()));
if (!bmp.ptr) {
throw std::runtime_error("Failed to load image/audio");
}
// calculate bitmap hash (for KV caching)
std::string hash = fnv_hash(bmp.data(), bmp.n_bytes());
bmp.set_id(hash.c_str());
bitmaps.entries.push_back(std::move(bmp));
}
}
// process prompt // process prompt
std::vector<server_tokens> inputs; std::vector<server_tokens> inputs;
@@ -610,10 +655,33 @@ public:
if (has_mtmd) { if (has_mtmd) {
// multimodal // multimodal
inputs.push_back(process_mtmd_prompt(ctx_server.mctx, prompt.get<std::string>(), files)); std::string prompt_str = prompt.get<std::string>();
mtmd_input_text inp_txt = {
prompt_str.c_str(),
/* add_special */ true,
/* parse_special */ true,
};
mtmd::input_chunks chunks(mtmd_input_chunks_init());
auto bitmaps_c_ptr = bitmaps.c_ptr();
int32_t tokenized = mtmd_tokenize(ctx_server.mctx,
chunks.ptr.get(),
&inp_txt,
bitmaps_c_ptr.data(),
bitmaps_c_ptr.size());
if (tokenized != 0) {
std::cout << "[PREDICT] Failed to tokenize prompt" << std::endl;
throw std::runtime_error("Failed to tokenize prompt");
}
server_tokens tmp(chunks, true);
inputs.push_back(std::move(tmp));
} else { } else {
// Everything else, including multimodal completions. // non-multimodal version
inputs = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, prompt, true, true); auto tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, prompt, true, true);
for (auto & p : tokenized_prompts) {
auto tmp = server_tokens(p, ctx_server.mctx != nullptr);
inputs.push_back(std::move(tmp));
}
} }
tasks.reserve(inputs.size()); tasks.reserve(inputs.size());
@@ -701,10 +769,10 @@ public:
*/ */
// for the shape of input/content, see tokenize_input_prompts() // for the shape of input/content, see tokenize_input_prompts()
json prompt = body.at("embeddings"); json prompt = body.at("prompt");
auto tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, prompt, true, true); auto tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, prompt, true, true);
for (const auto & tokens : tokenized_prompts) { for (const auto & tokens : tokenized_prompts) {
// this check is necessary for models that do not add BOS token to the input // this check is necessary for models that do not add BOS token to the input
if (tokens.empty()) { if (tokens.empty()) {
@@ -712,7 +780,6 @@ public:
} }
} }
int embd_normalize = 2; // default to Euclidean/L2 norm
// create and queue the task // create and queue the task
json responses = json::array(); json responses = json::array();
bool error = false; bool error = false;
@@ -724,10 +791,11 @@ public:
task.id = ctx_server.queue_tasks.get_new_id(); task.id = ctx_server.queue_tasks.get_new_id();
task.index = i; task.index = i;
task.prompt_tokens = std::move(tokenized_prompts[i]); task.prompt_tokens = server_tokens(tokenized_prompts[i], ctx_server.mctx != nullptr);
// OAI-compat
task.params.oaicompat = OAICOMPAT_TYPE_EMBEDDING;
task.params.oaicompat = OAICOMPAT_TYPE_NONE;
task.params.embd_normalize = embd_normalize;
tasks.push_back(std::move(task)); tasks.push_back(std::move(task));
} }
@@ -743,8 +811,9 @@ public:
responses.push_back(res->to_json()); responses.push_back(res->to_json());
} }
}, [&](const json & error_data) { }, [&](const json & error_data) {
error = true; return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, error_data.value("content", ""));
}, [&]() { }, [&]() {
// NOTE: we should try to check when the writer is closed here
return false; return false;
}); });
@@ -754,36 +823,12 @@ public:
return grpc::Status(grpc::StatusCode::INTERNAL, "Error in receiving results"); return grpc::Status(grpc::StatusCode::INTERNAL, "Error in receiving results");
} }
std::cout << "[DEBUG] Responses size: " << responses.size() << std::endl; std::vector<float> embeddings = responses[0].value("embedding", std::vector<float>());
// loop the vector and set the embeddings results
// Process the responses and extract embeddings for (int i = 0; i < embeddings.size(); i++) {
for (const auto & response_elem : responses) { embeddingResult->add_embeddings(embeddings[i]);
// Check if the response has an "embedding" field
if (response_elem.contains("embedding")) {
json embedding_data = json_value(response_elem, "embedding", json::array());
if (embedding_data.is_array() && !embedding_data.empty()) {
for (const auto & embedding_vector : embedding_data) {
if (embedding_vector.is_array()) {
for (const auto & embedding_value : embedding_vector) {
embeddingResult->add_embeddings(embedding_value.get<float>());
}
}
}
}
} else {
// Check if the response itself contains the embedding data directly
if (response_elem.is_array()) {
for (const auto & embedding_value : response_elem) {
embeddingResult->add_embeddings(embedding_value.get<float>());
}
}
}
} }
return grpc::Status::OK; return grpc::Status::OK;
} }
@@ -802,10 +847,8 @@ public:
} }
// Tokenize the query // Tokenize the query
auto tokenized_query = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, request->query(), /* add_special */ false, true); llama_tokens tokenized_query = tokenize_input_prompts(ctx_server.vocab, request->query(), /* add_special */ false, true)[0];
if (tokenized_query.size() != 1) {
return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "\"query\" must contain only a single prompt");
}
// Create and queue the task // Create and queue the task
json responses = json::array(); json responses = json::array();
bool error = false; bool error = false;
@@ -817,14 +860,14 @@ public:
documents.push_back(request->documents(i)); documents.push_back(request->documents(i));
} }
auto tokenized_docs = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, documents, /* add_special */ false, true); auto tokenized_docs = tokenize_input_prompts(ctx_server.vocab, documents, /* add_special */ false, true);
tasks.reserve(tokenized_docs.size()); tasks.reserve(tokenized_docs.size());
for (size_t i = 0; i < tokenized_docs.size(); i++) { for (size_t i = 0; i < tokenized_docs.size(); i++) {
auto tmp = format_rerank(ctx_server.vocab, tokenized_query[0], tokenized_docs[i]); auto tmp = format_rerank(ctx_server.vocab, tokenized_query, tokenized_docs[i]);
server_task task = server_task(SERVER_TASK_TYPE_RERANK); server_task task = server_task(SERVER_TASK_TYPE_RERANK);
task.id = ctx_server.queue_tasks.get_new_id(); task.id = ctx_server.queue_tasks.get_new_id();
task.index = i; task.index = i;
task.prompt_tokens = std::move(tmp); task.prompt_tokens = server_tokens(tmp, ctx_server.mctx != nullptr);
tasks.push_back(std::move(task)); tasks.push_back(std::move(task));
} }

View File

@@ -6,34 +6,9 @@ CURDIR=$(dirname "$(realpath $0)")
cd / cd /
echo "CPU info:" BINARY=llama-cpp
grep -e "model\sname" /proc/cpuinfo | head -1
grep -e "flags" /proc/cpuinfo | head -1
BINARY=llama-cpp-fallback
if grep -q -e "\savx\s" /proc/cpuinfo ; then
echo "CPU: AVX found OK"
if [ -e $CURDIR/llama-cpp-avx ]; then
BINARY=llama-cpp-avx
fi
fi
if grep -q -e "\savx2\s" /proc/cpuinfo ; then
echo "CPU: AVX2 found OK"
if [ -e $CURDIR/llama-cpp-avx2 ]; then
BINARY=llama-cpp-avx2
fi
fi
# Check avx 512
if grep -q -e "\savx512f\s" /proc/cpuinfo ; then
echo "CPU: AVX512F found OK"
if [ -e $CURDIR/llama-cpp-avx512 ]; then
BINARY=llama-cpp-avx512
fi
fi
## P2P/GRPC mode
if [ -n "$LLAMACPP_GRPC_SERVERS" ]; then if [ -n "$LLAMACPP_GRPC_SERVERS" ]; then
if [ -e $CURDIR/llama-cpp-grpc ]; then if [ -e $CURDIR/llama-cpp-grpc ]; then
BINARY=llama-cpp-grpc BINARY=llama-cpp-grpc
@@ -42,8 +17,7 @@ fi
# Extend ld library path with the dir where this script is located/lib # Extend ld library path with the dir where this script is located/lib
if [ "$(uname)" == "Darwin" ]; then if [ "$(uname)" == "Darwin" ]; then
export DYLD_LIBRARY_PATH=$CURDIR/lib:$DYLD_LIBRARY_PATH DYLD_FALLBACK_LIBRARY_PATH=$CURDIR/lib:$DYLD_FALLBACK_LIBRARY_PATH
#export DYLD_FALLBACK_LIBRARY_PATH=$CURDIR/lib:$DYLD_FALLBACK_LIBRARY_PATH
else else
export LD_LIBRARY_PATH=$CURDIR/lib:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=$CURDIR/lib:$LD_LIBRARY_PATH
fi fi
@@ -57,6 +31,3 @@ fi
echo "Using binary: $BINARY" echo "Using binary: $BINARY"
exec $CURDIR/$BINARY "$@" exec $CURDIR/$BINARY "$@"
# We should never reach this point, however just in case we do, run fallback
exec $CURDIR/llama-cpp-fallback "$@"

View File

@@ -1,6 +0,0 @@
package/
sources/
.cache/
build/
libgosd.so
stablediffusion-ggml

View File

@@ -1,20 +0,0 @@
cmake_minimum_required(VERSION 3.12)
project(gosd LANGUAGES C CXX)
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
add_subdirectory(./sources/stablediffusion-ggml.cpp)
add_library(gosd MODULE gosd.cpp)
target_link_libraries(gosd PRIVATE stable-diffusion ggml)
if(CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 9.0)
target_link_libraries(gosd PRIVATE stdc++fs)
endif()
target_include_directories(gosd PUBLIC
stable-diffusion.cpp
stable-diffusion.cpp/thirdparty
)
set_property(TARGET gosd PROPERTY CXX_STANDARD 17)
set_target_properties(gosd PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})

View File

@@ -1,16 +1,28 @@
INCLUDE_PATH := $(abspath ./)
LIBRARY_PATH := $(abspath ./)
AR?=ar
CMAKE_ARGS?= CMAKE_ARGS?=
BUILD_TYPE?= BUILD_TYPE?=
NATIVE?=false NATIVE?=false
CUDA_LIBPATH?=/usr/local/cuda/lib64/
ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh
# keep standard at C11 and C++11
CXXFLAGS = -I. -I$(INCLUDE_PATH)/sources/stablediffusion-ggml.cpp/thirdparty -I$(INCLUDE_PATH)/sources/stablediffusion-ggml.cpp/ggml/include -I$(INCLUDE_PATH)/sources/stablediffusion-ggml.cpp -O3 -DNDEBUG -std=c++17 -fPIC
GOCMD?=go GOCMD?=go
CGO_LDFLAGS?=
# Avoid parent make file overwriting CGO_LDFLAGS which is needed for hipblas
CGO_LDFLAGS_SYCL=
GO_TAGS?= GO_TAGS?=
JOBS?=$(shell nproc --ignore=1) LD_FLAGS?=
# stablediffusion.cpp (ggml) # stablediffusion.cpp (ggml)
STABLEDIFFUSION_GGML_REPO?=https://github.com/leejet/stable-diffusion.cpp STABLEDIFFUSION_GGML_REPO?=https://github.com/richiejp/stable-diffusion.cpp
STABLEDIFFUSION_GGML_VERSION?=0ebe6fe118f125665939b27c89f34ed38716bff8 STABLEDIFFUSION_GGML_VERSION?=53e3b17eb3d0b5760ced06a1f98320b68b34aaae
CMAKE_ARGS+=-DGGML_MAX_NAME=128 # Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF
ifeq ($(NATIVE),false) ifeq ($(NATIVE),false)
CMAKE_ARGS+=-DGGML_NATIVE=OFF CMAKE_ARGS+=-DGGML_NATIVE=OFF
@@ -19,6 +31,7 @@ endif
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically # If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
ifeq ($(BUILD_TYPE),cublas) ifeq ($(BUILD_TYPE),cublas)
CMAKE_ARGS+=-DSD_CUDA=ON -DGGML_CUDA=ON CMAKE_ARGS+=-DSD_CUDA=ON -DGGML_CUDA=ON
CGO_LDFLAGS+=-lcublas -lcudart -L$(CUDA_LIBPATH) -L$(CUDA_LIBPATH)/stubs/ -lcuda
# If build type is openblas then we set -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS # If build type is openblas then we set -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
# to CMAKE_ARGS automatically # to CMAKE_ARGS automatically
else ifeq ($(BUILD_TYPE),openblas) else ifeq ($(BUILD_TYPE),openblas)
@@ -29,14 +42,18 @@ else ifeq ($(BUILD_TYPE),clblas)
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++ # If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
else ifeq ($(BUILD_TYPE),hipblas) else ifeq ($(BUILD_TYPE),hipblas)
CMAKE_ARGS+=-DSD_HIPBLAS=ON -DGGML_HIPBLAS=ON CMAKE_ARGS+=-DSD_HIPBLAS=ON -DGGML_HIPBLAS=ON
# If it's OSX, DO NOT embed the metal library - -DGGML_METAL_EMBED_LIBRARY=ON requires further investigation
# But if it's OSX without metal, disable it here
else ifeq ($(BUILD_TYPE),vulkan) else ifeq ($(BUILD_TYPE),vulkan)
CMAKE_ARGS+=-DSD_VULKAN=ON -DGGML_VULKAN=ON CMAKE_ARGS+=-DSD_VULKAN=ON -DGGML_VULKAN=ON
CGO_LDFLAGS+=-lvulkan
else ifeq ($(OS),Darwin) else ifeq ($(OS),Darwin)
ifneq ($(BUILD_TYPE),metal) ifneq ($(BUILD_TYPE),metal)
CMAKE_ARGS+=-DSD_METAL=OFF -DGGML_METAL=OFF CMAKE_ARGS+=-DSD_METAL=OFF -DGGML_METAL=OFF
else else
CMAKE_ARGS+=-DSD_METAL=ON -DGGML_METAL=ON CMAKE_ARGS+=-DSD_METAL=ON -DGGML_METAL=ON
CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON
TARGET+=--target ggml-metal
endif endif
endif endif
@@ -46,6 +63,12 @@ ifeq ($(BUILD_TYPE),sycl_f16)
-DCMAKE_CXX_COMPILER=icpx \ -DCMAKE_CXX_COMPILER=icpx \
-DSD_SYCL=ON \ -DSD_SYCL=ON \
-DGGML_SYCL_F16=ON -DGGML_SYCL_F16=ON
export CC=icx
export CXX=icpx
CGO_LDFLAGS_SYCL += -fsycl -L${DNNLROOT}/lib -ldnnl ${MKLROOT}/lib/intel64/libmkl_sycl.a -fiopenmp -fopenmp-targets=spir64 -lOpenCL
CGO_LDFLAGS_SYCL += $(shell pkg-config --libs mkl-static-lp64-gomp)
CGO_CXXFLAGS += -fiopenmp -fopenmp-targets=spir64
CGO_CXXFLAGS += $(shell pkg-config --cflags mkl-static-lp64-gomp )
endif endif
ifeq ($(BUILD_TYPE),sycl_f32) ifeq ($(BUILD_TYPE),sycl_f32)
@@ -53,29 +76,83 @@ ifeq ($(BUILD_TYPE),sycl_f32)
-DCMAKE_C_COMPILER=icx \ -DCMAKE_C_COMPILER=icx \
-DCMAKE_CXX_COMPILER=icpx \ -DCMAKE_CXX_COMPILER=icpx \
-DSD_SYCL=ON -DSD_SYCL=ON
export CC=icx
export CXX=icpx
CGO_LDFLAGS_SYCL += -fsycl -L${DNNLROOT}/lib -ldnnl ${MKLROOT}/lib/intel64/libmkl_sycl.a -fiopenmp -fopenmp-targets=spir64 -lOpenCL
CGO_LDFLAGS_SYCL += $(shell pkg-config --libs mkl-static-lp64-gomp)
CGO_CXXFLAGS += -fiopenmp -fopenmp-targets=spir64
CGO_CXXFLAGS += $(shell pkg-config --cflags mkl-static-lp64-gomp )
endif endif
# warnings
# CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function
# Find all .a archives in ARCHIVE_DIR
# (ggml can have different backends cpu, cuda, etc., each backend generates a .a archive)
GGML_ARCHIVE_DIR := build/ggml/src/
ALL_ARCHIVES := $(shell find $(GGML_ARCHIVE_DIR) -type f -name '*.a')
# Name of the single merged library
COMBINED_LIB := libggmlall.a
# Rule to merge all the .a files into one
$(COMBINED_LIB): $(ALL_ARCHIVES)
@echo "Merging all .a into $(COMBINED_LIB)"
rm -f $@
mkdir -p merge-tmp
for a in $(ALL_ARCHIVES); do \
( cd merge-tmp && ar x ../$$a ); \
done
( cd merge-tmp && ar rcs ../$@ *.o )
# Ensure we have a proper index
ranlib $@
# Clean up
rm -rf merge-tmp
build/libstable-diffusion.a:
@echo "Building SD with $(BUILD_TYPE) build type and $(CMAKE_ARGS)"
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
+bash -c "source $(ONEAPI_VARS); \
mkdir -p build && \
cd build && \
cmake $(CMAKE_ARGS) ../sources/stablediffusion-ggml.cpp && \
cmake --build . --config Release"
else
mkdir -p build && \
cd build && \
cmake $(CMAKE_ARGS) ../sources/stablediffusion-ggml.cpp && \
cmake --build . --config Release
endif
$(MAKE) $(COMBINED_LIB)
gosd.o:
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
+bash -c "source $(ONEAPI_VARS); \
$(CXX) $(CXXFLAGS) gosd.cpp -o gosd.o -c"
else
$(CXX) $(CXXFLAGS) gosd.cpp -o gosd.o -c
endif
## stablediffusion (ggml)
sources/stablediffusion-ggml.cpp: sources/stablediffusion-ggml.cpp:
git clone --recursive $(STABLEDIFFUSION_GGML_REPO) sources/stablediffusion-ggml.cpp && \ git clone --recursive $(STABLEDIFFUSION_GGML_REPO) sources/stablediffusion-ggml.cpp && \
cd sources/stablediffusion-ggml.cpp && \ cd sources/stablediffusion-ggml.cpp && \
git checkout $(STABLEDIFFUSION_GGML_VERSION) && \ git checkout $(STABLEDIFFUSION_GGML_VERSION) && \
git submodule update --init --recursive --depth 1 --single-branch git submodule update --init --recursive --depth 1 --single-branch
libgosd.so: sources/stablediffusion-ggml.cpp CMakeLists.txt gosd.cpp gosd.h libsd.a: sources/stablediffusion-ggml.cpp build/libstable-diffusion.a gosd.o
mkdir -p build && \ cp $(INCLUDE_PATH)/build/libstable-diffusion.a ./libsd.a
cd build && \ $(AR) rcs libsd.a gosd.o
cmake .. $(CMAKE_ARGS) && \
cmake --build . --config Release -j$(JOBS) && \
cd .. && \
mv build/libgosd.so ./
stablediffusion-ggml: main.go gosd.go libgosd.so stablediffusion-ggml: libsd.a
CGO_ENABLED=0 $(GOCMD) build -tags "$(GO_TAGS)" -o stablediffusion-ggml ./ CGO_LDFLAGS="$(CGO_LDFLAGS) $(CGO_LDFLAGS_SYCL)" C_INCLUDE_PATH="$(INCLUDE_PATH)" LIBRARY_PATH="$(LIBRARY_PATH)" \
CC="$(CC)" CXX="$(CXX)" CGO_CXXFLAGS="$(CGO_CXXFLAGS)" \
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o stablediffusion-ggml ./
package: stablediffusion-ggml package:
bash package.sh bash package.sh
build: package build: stablediffusion-ggml package
clean: clean:
rm -rf libgosd.so build stablediffusion-ggml package sources rm -rf gosd.o libsd.a build $(COMBINED_LIB)

View File

@@ -1,14 +1,16 @@
#include <cstdint>
#define GGML_MAX_NAME 128
#include <stdio.h> #include <stdio.h>
#include <string.h> #include <string.h>
#include <time.h> #include <time.h>
#include <iostream>
#include <random>
#include <string> #include <string>
#include <vector> #include <vector>
#include <filesystem>
#include "gosd.h" #include "gosd.h"
// #include "preprocessing.hpp"
#include "flux.hpp"
#include "stable-diffusion.h"
#define STB_IMAGE_IMPLEMENTATION #define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_STATIC #define STB_IMAGE_STATIC
#include "stb_image.h" #include "stb_image.h"
@@ -23,7 +25,7 @@
// Names of the sampler method, same order as enum sample_method in stable-diffusion.h // Names of the sampler method, same order as enum sample_method in stable-diffusion.h
const char* sample_method_str[] = { const char* sample_method_str[] = {
"default", "euler_a",
"euler", "euler",
"heun", "heun",
"dpm2", "dpm2",
@@ -35,89 +37,43 @@ const char* sample_method_str[] = {
"lcm", "lcm",
"ddim_trailing", "ddim_trailing",
"tcd", "tcd",
"euler_a",
}; };
static_assert(std::size(sample_method_str) == SAMPLE_METHOD_COUNT, "sample method mismatch");
// Names of the sigma schedule overrides, same order as sample_schedule in stable-diffusion.h // Names of the sigma schedule overrides, same order as sample_schedule in stable-diffusion.h
const char* schedulers[] = { const char* schedule_str[] = {
"default", "default",
"discrete", "discrete",
"karras", "karras",
"exponential", "exponential",
"ays", "ays",
"gits", "gits",
"smoothstep",
}; };
static_assert(std::size(schedulers) == SCHEDULE_COUNT, "schedulers mismatch");
sd_ctx_t* sd_c; sd_ctx_t* sd_c;
// Moved from the context (load time) to generation time params
scheduler_t scheduler = scheduler_t::DEFAULT;
sample_method_t sample_method; sample_method_t sample_method;
// Copied from the upstream CLI int load_model(char *model, char* options[], int threads, int diff) {
static void sd_log_cb(enum sd_log_level_t level, const char* log, void* data) { fprintf (stderr, "Loading model!\n");
//SDParams* params = (SDParams*)data;
const char* level_str;
if (!log /*|| (!params->verbose && level <= SD_LOG_DEBUG)*/) { char *stableDiffusionModel = "";
return;
}
switch (level) {
case SD_LOG_DEBUG:
level_str = "DEBUG";
break;
case SD_LOG_INFO:
level_str = "INFO";
break;
case SD_LOG_WARN:
level_str = "WARN";
break;
case SD_LOG_ERROR:
level_str = "ERROR";
break;
default: /* Potential future-proofing */
level_str = "?????";
break;
}
fprintf(stderr, "[%-5s] ", level_str);
fputs(log, stderr);
fflush(stderr);
}
int load_model(const char *model, char *model_path, char* options[], int threads, int diff) {
fprintf (stderr, "Loading model: %p=%s\n", model, model);
sd_set_log_callback(sd_log_cb, NULL);
const char *stableDiffusionModel = "";
if (diff == 1 ) { if (diff == 1 ) {
stableDiffusionModel = model; stableDiffusionModel = model;
model = ""; model = "";
} }
// decode options. Options are in form optname:optvale, or if booleans only optname. // decode options. Options are in form optname:optvale, or if booleans only optname.
const char *clip_l_path = ""; char *clip_l_path = "";
const char *clip_g_path = ""; char *clip_g_path = "";
const char *t5xxl_path = ""; char *t5xxl_path = "";
const char *vae_path = ""; char *vae_path = "";
const char *scheduler_str = ""; char *scheduler = "";
const char *sampler = ""; char *sampler = "";
char *lora_dir = model_path;
bool lora_dir_allocated = false;
fprintf(stderr, "parsing options: %p\n", options);
// If options is not NULL, parse options // If options is not NULL, parse options
for (int i = 0; options[i] != NULL; i++) { for (int i = 0; options[i] != NULL; i++) {
const char *optname = strtok(options[i], ":"); char *optname = strtok(options[i], ":");
const char *optval = strtok(NULL, ":"); char *optval = strtok(NULL, ":");
if (optval == NULL) { if (optval == NULL) {
optval = "true"; optval = "true";
} }
@@ -135,132 +91,77 @@ int load_model(const char *model, char *model_path, char* options[], int threads
vae_path = optval; vae_path = optval;
} }
if (!strcmp(optname, "scheduler")) { if (!strcmp(optname, "scheduler")) {
scheduler_str = optval; scheduler = optval;
} }
if (!strcmp(optname, "sampler")) { if (!strcmp(optname, "sampler")) {
sampler = optval; sampler = optval;
} }
if (!strcmp(optname, "lora_dir")) {
// Path join with model dir
if (model_path && strlen(model_path) > 0) {
std::filesystem::path model_path_str(model_path);
std::filesystem::path lora_path(optval);
std::filesystem::path full_lora_path = model_path_str / lora_path;
lora_dir = strdup(full_lora_path.string().c_str());
lora_dir_allocated = true;
fprintf(stderr, "Lora dir resolved to: %s\n", lora_dir);
} else {
lora_dir = strdup(optval);
lora_dir_allocated = true;
fprintf(stderr, "No model path provided, using lora dir as-is: %s\n", lora_dir);
}
}
} }
fprintf(stderr, "parsed options\n");
int sample_method_found = -1; int sample_method_found = -1;
for (int m = 0; m < SAMPLE_METHOD_COUNT; m++) { for (int m = 0; m < N_SAMPLE_METHODS; m++) {
if (!strcmp(sampler, sample_method_str[m])) { if (!strcmp(sampler, sample_method_str[m])) {
sample_method_found = m; sample_method_found = m;
fprintf(stderr, "Found sampler: %s\n", sampler);
} }
} }
if (sample_method_found == -1) { if (sample_method_found == -1) {
fprintf(stderr, "Invalid sample method, default to EULER_A!\n"); fprintf(stderr, "Invalid sample method, default to EULER_A!\n");
sample_method_found = sample_method_t::SAMPLE_METHOD_DEFAULT; sample_method_found = EULER_A;
} }
sample_method = (sample_method_t)sample_method_found; sample_method = (sample_method_t)sample_method_found;
for (int d = 0; d < SCHEDULE_COUNT; d++) { int schedule_found = -1;
if (!strcmp(scheduler_str, schedulers[d])) { for (int d = 0; d < N_SCHEDULES; d++) {
scheduler = (scheduler_t)d; if (!strcmp(scheduler, schedule_str[d])) {
fprintf (stderr, "Found scheduler: %s\n", scheduler_str); schedule_found = d;
fprintf (stderr, "Found scheduler: %s\n", scheduler);
} }
} }
if (schedule_found == -1) {
fprintf (stderr, "Invalid scheduler! using DEFAULT\n");
schedule_found = DEFAULT;
}
schedule_t schedule = (schedule_t)schedule_found;
fprintf (stderr, "Creating context\n"); fprintf (stderr, "Creating context\n");
sd_ctx_params_t ctx_params; sd_ctx_t* sd_ctx = new_sd_ctx(model,
sd_ctx_params_init(&ctx_params); clip_l_path,
ctx_params.model_path = model; clip_g_path,
ctx_params.clip_l_path = clip_l_path; t5xxl_path,
ctx_params.clip_g_path = clip_g_path; stableDiffusionModel,
ctx_params.t5xxl_path = t5xxl_path; vae_path,
ctx_params.diffusion_model_path = stableDiffusionModel; "",
ctx_params.vae_path = vae_path; "",
ctx_params.taesd_path = ""; "",
ctx_params.control_net_path = ""; "",
ctx_params.lora_model_dir = lora_dir; "",
ctx_params.embedding_dir = ""; false,
ctx_params.vae_decode_only = false; false,
ctx_params.free_params_immediately = false; false,
ctx_params.n_threads = threads; threads,
ctx_params.rng_type = STD_DEFAULT_RNG; SD_TYPE_COUNT,
sd_ctx_t* sd_ctx = new_sd_ctx(&ctx_params); STD_DEFAULT_RNG,
schedule,
false,
false,
false,
false);
if (sd_ctx == NULL) { if (sd_ctx == NULL) {
fprintf (stderr, "failed loading model (generic error)\n"); fprintf (stderr, "failed loading model (generic error)\n");
// Clean up allocated memory
if (lora_dir_allocated && lora_dir) {
free(lora_dir);
}
return 1; return 1;
} }
fprintf (stderr, "Created context: OK\n"); fprintf (stderr, "Created context: OK\n");
sd_c = sd_ctx; sd_c = sd_ctx;
// Clean up allocated memory
if (lora_dir_allocated && lora_dir) {
free(lora_dir);
}
return 0; return 0;
} }
void sd_tiling_params_set_enabled(sd_tiling_params_t *params, bool enabled) { int gen_image(char *text, char *negativeText, int width, int height, int steps, int seed , char *dst, float cfg_scale) {
params->enabled = enabled;
}
void sd_tiling_params_set_tile_sizes(sd_tiling_params_t *params, int tile_size_x, int tile_size_y) {
params->tile_size_x = tile_size_x;
params->tile_size_y = tile_size_y;
}
void sd_tiling_params_set_rel_sizes(sd_tiling_params_t *params, float rel_size_x, float rel_size_y) {
params->rel_size_x = rel_size_x;
params->rel_size_y = rel_size_y;
}
void sd_tiling_params_set_target_overlap(sd_tiling_params_t *params, float target_overlap) {
params->target_overlap = target_overlap;
}
sd_tiling_params_t* sd_img_gen_params_get_vae_tiling_params(sd_img_gen_params_t *params) {
return &params->vae_tiling_params;
}
sd_img_gen_params_t* sd_img_gen_params_new(void) {
sd_img_gen_params_t *params = (sd_img_gen_params_t *)std::malloc(sizeof(sd_img_gen_params_t));
sd_img_gen_params_init(params);
return params;
}
void sd_img_gen_params_set_prompts(sd_img_gen_params_t *params, const char *prompt, const char *negative_prompt) {
params->prompt = prompt;
params->negative_prompt = negative_prompt;
}
void sd_img_gen_params_set_dimensions(sd_img_gen_params_t *params, int width, int height) {
params->width = width;
params->height = height;
}
void sd_img_gen_params_set_seed(sd_img_gen_params_t *params, int64_t seed) {
params->seed = seed;
}
int gen_image(sd_img_gen_params_t *p, int steps, char *dst, float cfg_scale, char *src_image, float strength, char *mask_image, char **ref_images, int ref_images_count) {
sd_image_t* results; sd_image_t* results;
@@ -268,199 +169,37 @@ int gen_image(sd_img_gen_params_t *p, int steps, char *dst, float cfg_scale, cha
fprintf (stderr, "Generating image\n"); fprintf (stderr, "Generating image\n");
p->sample_params.guidance.txt_cfg = cfg_scale; results = txt2img(sd_c,
p->sample_params.guidance.slg.layers = skip_layers.data(); text,
p->sample_params.guidance.slg.layer_count = skip_layers.size(); negativeText,
p->sample_params.sample_method = sample_method; -1, //clip_skip
p->sample_params.sample_steps = steps; cfg_scale, // sfg_scale
p->sample_params.scheduler = scheduler; 3.5f,
0, // eta
int width = p->width; width,
int height = p->height; height,
sample_method,
// Handle input image for img2img steps,
bool has_input_image = (src_image != NULL && strlen(src_image) > 0); seed,
bool has_mask_image = (mask_image != NULL && strlen(mask_image) > 0); 1,
NULL,
uint8_t* input_image_buffer = NULL; 0.9f,
uint8_t* mask_image_buffer = NULL; 20.f,
std::vector<uint8_t> default_mask_image_vec; false,
"",
if (has_input_image) { skip_layers.data(),
fprintf(stderr, "Loading input image: %s\n", src_image); skip_layers.size(),
0,
int c = 0; 0.01,
int img_width = 0; 0.2);
int img_height = 0;
input_image_buffer = stbi_load(src_image, &img_width, &img_height, &c, 3);
if (input_image_buffer == NULL) {
fprintf(stderr, "Failed to load input image from '%s'\n", src_image);
return 1;
}
if (c < 3) {
fprintf(stderr, "Input image must have at least 3 channels, got %d\n", c);
free(input_image_buffer);
return 1;
}
// Resize input image if dimensions don't match
if (img_width != width || img_height != height) {
fprintf(stderr, "Resizing input image from %dx%d to %dx%d\n", img_width, img_height, width, height);
uint8_t* resized_image_buffer = (uint8_t*)malloc(height * width * 3);
if (resized_image_buffer == NULL) {
fprintf(stderr, "Failed to allocate memory for resized image\n");
free(input_image_buffer);
return 1;
}
stbir_resize(input_image_buffer, img_width, img_height, 0,
resized_image_buffer, width, height, 0, STBIR_TYPE_UINT8,
3, STBIR_ALPHA_CHANNEL_NONE, 0,
STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP,
STBIR_FILTER_BOX, STBIR_FILTER_BOX,
STBIR_COLORSPACE_SRGB, nullptr);
free(input_image_buffer);
input_image_buffer = resized_image_buffer;
}
p->init_image = {(uint32_t)width, (uint32_t)height, 3, input_image_buffer};
p->strength = strength;
fprintf(stderr, "Using img2img with strength: %.2f\n", strength);
} else {
// No input image, use empty image for text-to-image
p->init_image = {(uint32_t)width, (uint32_t)height, 3, NULL};
p->strength = 0.0f;
}
// Handle mask image for inpainting
if (has_mask_image) {
fprintf(stderr, "Loading mask image: %s\n", mask_image);
int c = 0;
int mask_width = 0;
int mask_height = 0;
mask_image_buffer = stbi_load(mask_image, &mask_width, &mask_height, &c, 1);
if (mask_image_buffer == NULL) {
fprintf(stderr, "Failed to load mask image from '%s'\n", mask_image);
if (input_image_buffer) free(input_image_buffer);
return 1;
}
// Resize mask if dimensions don't match
if (mask_width != width || mask_height != height) {
fprintf(stderr, "Resizing mask image from %dx%d to %dx%d\n", mask_width, mask_height, width, height);
uint8_t* resized_mask_buffer = (uint8_t*)malloc(height * width);
if (resized_mask_buffer == NULL) {
fprintf(stderr, "Failed to allocate memory for resized mask\n");
free(mask_image_buffer);
if (input_image_buffer) free(input_image_buffer);
return 1;
}
stbir_resize(mask_image_buffer, mask_width, mask_height, 0,
resized_mask_buffer, width, height, 0, STBIR_TYPE_UINT8,
1, STBIR_ALPHA_CHANNEL_NONE, 0,
STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP,
STBIR_FILTER_BOX, STBIR_FILTER_BOX,
STBIR_COLORSPACE_SRGB, nullptr);
free(mask_image_buffer);
mask_image_buffer = resized_mask_buffer;
}
p->mask_image = {(uint32_t)width, (uint32_t)height, 1, mask_image_buffer};
fprintf(stderr, "Using inpainting with mask\n");
} else {
// No mask image, create default full mask
default_mask_image_vec.resize(width * height, 255);
p->mask_image = {(uint32_t)width, (uint32_t)height, 1, default_mask_image_vec.data()};
}
// Handle reference images
std::vector<sd_image_t> ref_images_vec;
std::vector<uint8_t*> ref_image_buffers;
if (ref_images_count > 0 && ref_images != NULL) {
fprintf(stderr, "Loading %d reference images\n", ref_images_count);
for (int i = 0; i < ref_images_count; i++) {
if (ref_images[i] == NULL || strlen(ref_images[i]) == 0) {
continue;
}
fprintf(stderr, "Loading reference image %d: %s\n", i + 1, ref_images[i]);
int c = 0;
int ref_width = 0;
int ref_height = 0;
uint8_t* ref_image_buffer = stbi_load(ref_images[i], &ref_width, &ref_height, &c, 3);
if (ref_image_buffer == NULL) {
fprintf(stderr, "Failed to load reference image from '%s'\n", ref_images[i]);
continue;
}
if (c < 3) {
fprintf(stderr, "Reference image must have at least 3 channels, got %d\n", c);
free(ref_image_buffer);
continue;
}
// Resize reference image if dimensions don't match
if (ref_width != width || ref_height != height) {
fprintf(stderr, "Resizing reference image from %dx%d to %dx%d\n", ref_width, ref_height, width, height);
uint8_t* resized_ref_buffer = (uint8_t*)malloc(height * width * 3);
if (resized_ref_buffer == NULL) {
fprintf(stderr, "Failed to allocate memory for resized reference image\n");
free(ref_image_buffer);
continue;
}
stbir_resize(ref_image_buffer, ref_width, ref_height, 0,
resized_ref_buffer, width, height, 0, STBIR_TYPE_UINT8,
3, STBIR_ALPHA_CHANNEL_NONE, 0,
STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP,
STBIR_FILTER_BOX, STBIR_FILTER_BOX,
STBIR_COLORSPACE_SRGB, nullptr);
free(ref_image_buffer);
ref_image_buffer = resized_ref_buffer;
}
ref_image_buffers.push_back(ref_image_buffer);
ref_images_vec.push_back({(uint32_t)width, (uint32_t)height, 3, ref_image_buffer});
}
if (!ref_images_vec.empty()) {
p->ref_images = ref_images_vec.data();
p->ref_images_count = ref_images_vec.size();
fprintf(stderr, "Using %zu reference images\n", ref_images_vec.size());
}
}
results = generate_image(sd_c, p);
std::free(p);
if (results == NULL) { if (results == NULL) {
fprintf (stderr, "NO results\n"); fprintf (stderr, "NO results\n");
if (input_image_buffer) free(input_image_buffer);
if (mask_image_buffer) free(mask_image_buffer);
for (auto buffer : ref_image_buffers) {
if (buffer) free(buffer);
}
return 1; return 1;
} }
if (results[0].data == NULL) { if (results[0].data == NULL) {
fprintf (stderr, "Results with no data\n"); fprintf (stderr, "Results with no data\n");
if (input_image_buffer) free(input_image_buffer);
if (mask_image_buffer) free(mask_image_buffer);
for (auto buffer : ref_image_buffers) {
if (buffer) free(buffer);
}
return 1; return 1;
} }
@@ -476,21 +215,17 @@ int gen_image(sd_img_gen_params_t *p, int steps, char *dst, float cfg_scale, cha
results[0].data, 0, NULL); results[0].data, 0, NULL);
fprintf (stderr, "Saved resulting image to '%s'\n", dst); fprintf (stderr, "Saved resulting image to '%s'\n", dst);
// Clean up // TODO: free results. Why does it crash?
free(results[0].data); free(results[0].data);
results[0].data = NULL; results[0].data = NULL;
free(results); free(results);
if (input_image_buffer) free(input_image_buffer); fprintf (stderr, "gen_image is done", dst);
if (mask_image_buffer) free(mask_image_buffer);
for (auto buffer : ref_image_buffers) {
if (buffer) free(buffer);
}
fprintf (stderr, "gen_image is done: %s", dst);
return 0; return 0;
} }
int unload() { int unload() {
free_sd_ctx(sd_c); free_sd_ctx(sd_c);
return 0;
} }

View File

@@ -1,10 +1,15 @@
package main package main
// #cgo CXXFLAGS: -I${SRCDIR}/sources/stablediffusion-ggml.cpp/thirdparty -I${SRCDIR}/sources/stablediffusion-ggml.cpp -I${SRCDIR}/sources/stablediffusion-ggml.cpp/ggml/include
// #cgo LDFLAGS: -L${SRCDIR}/ -lsd -lstdc++ -lm -lggmlall -lgomp
// #include <gosd.h>
// #include <stdlib.h>
import "C"
import ( import (
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"runtime"
"strings" "strings"
"unsafe" "unsafe"
@@ -20,45 +25,20 @@ type SDGGML struct {
cfgScale float32 cfgScale float32
} }
var (
LoadModel func(model, model_apth string, options []uintptr, threads int32, diff int) int
GenImage func(params uintptr, steps int, dst string, cfgScale float32, srcImage string, strength float32, maskImage string, refImages []string, refImagesCount int) int
TilingParamsSetEnabled func(params uintptr, enabled bool)
TilingParamsSetTileSizes func(params uintptr, tileSizeX int, tileSizeY int)
TilingParamsSetRelSizes func(params uintptr, relSizeX float32, relSizeY float32)
TilingParamsSetTargetOverlap func(params uintptr, targetOverlap float32)
ImgGenParamsNew func() uintptr
ImgGenParamsSetPrompts func(params uintptr, prompt string, negativePrompt string)
ImgGenParamsSetDimensions func(params uintptr, width int, height int)
ImgGenParamsSetSeed func(params uintptr, seed int64)
ImgGenParamsGetVaeTilingParams func(params uintptr) uintptr
)
// Copied from Purego internal/strings
// TODO: We should upstream sending []string
func hasSuffix(s, suffix string) bool {
return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix
}
func CString(name string) *byte {
if hasSuffix(name, "\x00") {
return &(*(*[]byte)(unsafe.Pointer(&name)))[0]
}
b := make([]byte, len(name)+1)
copy(b, name)
return &b[0]
}
func (sd *SDGGML) Load(opts *pb.ModelOptions) error { func (sd *SDGGML) Load(opts *pb.ModelOptions) error {
sd.threads = int(opts.Threads) sd.threads = int(opts.Threads)
modelPath := opts.ModelPath modelFile := C.CString(opts.ModelFile)
defer C.free(unsafe.Pointer(modelFile))
modelFile := opts.ModelFile var options **C.char
modelPathC := modelPath // prepare the options array to pass to C
size := C.size_t(unsafe.Sizeof((*C.char)(nil)))
length := C.size_t(len(opts.Options))
options = (**C.char)(C.malloc(length * size))
view := (*[1 << 30]*C.char)(unsafe.Pointer(options))[0:len(opts.Options):len(opts.Options)]
var diffusionModel int var diffusionModel int
@@ -83,63 +63,31 @@ func (sd *SDGGML) Load(opts *pb.ModelOptions) error {
fmt.Fprintf(os.Stderr, "Options: %+v\n", oo) fmt.Fprintf(os.Stderr, "Options: %+v\n", oo)
// At the time of writing Purego doesn't recurse into slices and convert Go strings to pointers so we need to do that for i, x := range oo {
var keepAlive []any view[i] = C.CString(x)
options := make([]uintptr, len(oo), len(oo)+1)
for i, op := range oo {
bytep := CString(op)
options[i] = uintptr(unsafe.Pointer(bytep))
keepAlive = append(keepAlive, bytep)
} }
sd.cfgScale = opts.CFGScale sd.cfgScale = opts.CFGScale
ret := LoadModel(modelFile, modelPathC, options, opts.Threads, diffusionModel) ret := C.load_model(modelFile, options, C.int(opts.Threads), C.int(diffusionModel))
if ret != 0 { if ret != 0 {
return fmt.Errorf("could not load model") return fmt.Errorf("could not load model")
} }
runtime.KeepAlive(keepAlive)
return nil return nil
} }
func (sd *SDGGML) GenerateImage(opts *pb.GenerateImageRequest) error { func (sd *SDGGML) GenerateImage(opts *pb.GenerateImageRequest) error {
t := opts.PositivePrompt t := C.CString(opts.PositivePrompt)
dst := opts.Dst defer C.free(unsafe.Pointer(t))
negative := opts.NegativePrompt
srcImage := opts.Src
var maskImage string dst := C.CString(opts.Dst)
if opts.EnableParameters != "" { defer C.free(unsafe.Pointer(dst))
if strings.Contains(opts.EnableParameters, "mask:") {
parts := strings.Split(opts.EnableParameters, "mask:")
if len(parts) > 1 {
maskPath := strings.TrimSpace(parts[1])
if maskPath != "" {
maskImage = maskPath
}
}
}
}
refImagesCount := len(opts.RefImages) negative := C.CString(opts.NegativePrompt)
refImages := make([]string, refImagesCount, refImagesCount+1) defer C.free(unsafe.Pointer(negative))
copy(refImages, opts.RefImages)
*(*uintptr)(unsafe.Add(unsafe.Pointer(&refImages), refImagesCount)) = 0
// Default strength for img2img (0.75 is a good default) ret := C.gen_image(t, negative, C.int(opts.Width), C.int(opts.Height), C.int(opts.Step), C.int(opts.Seed), dst, C.float(sd.cfgScale))
strength := float32(0.75)
// free'd by GenImage
p := ImgGenParamsNew()
ImgGenParamsSetPrompts(p, t, negative)
ImgGenParamsSetDimensions(p, int(opts.Width), int(opts.Height))
ImgGenParamsSetSeed(p, int64(opts.Seed))
vaep := ImgGenParamsGetVaeTilingParams(p)
TilingParamsSetEnabled(vaep, false)
ret := GenImage(p, int(opts.Step), dst, sd.cfgScale, srcImage, strength, maskImage, refImages, refImagesCount)
if ret != 0 { if ret != 0 {
return fmt.Errorf("inference failed") return fmt.Errorf("inference failed")
} }

View File

@@ -1,23 +1,8 @@
#include <cstdint>
#include "stable-diffusion.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
int load_model(char *model, char* options[], int threads, int diffusionModel);
void sd_tiling_params_set_enabled(sd_tiling_params_t *params, bool enabled); int gen_image(char *text, char *negativeText, int width, int height, int steps, int seed, char *dst, float cfg_scale);
void sd_tiling_params_set_tile_sizes(sd_tiling_params_t *params, int tile_size_x, int tile_size_y);
void sd_tiling_params_set_rel_sizes(sd_tiling_params_t *params, float rel_size_x, float rel_size_y);
void sd_tiling_params_set_target_overlap(sd_tiling_params_t *params, float target_overlap);
sd_tiling_params_t* sd_img_gen_params_get_vae_tiling_params(sd_img_gen_params_t *params);
sd_img_gen_params_t* sd_img_gen_params_new(void);
void sd_img_gen_params_set_prompts(sd_img_gen_params_t *params, const char *prompt, const char *negative_prompt);
void sd_img_gen_params_set_dimensions(sd_img_gen_params_t *params, int width, int height);
void sd_img_gen_params_set_seed(sd_img_gen_params_t *params, int64_t seed);
int load_model(const char *model, char *model_path, char* options[], int threads, int diffusionModel);
int gen_image(sd_img_gen_params_t *p, int steps, char *dst, float cfg_scale, char *src_image, float strength, char *mask_image, char **ref_images, int ref_images_count);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@@ -1,9 +1,9 @@
package main package main
// Note: this is started internally by LocalAI and a server is allocated for each model
import ( import (
"flag" "flag"
"github.com/ebitengine/purego"
grpc "github.com/mudler/LocalAI/pkg/grpc" grpc "github.com/mudler/LocalAI/pkg/grpc"
) )
@@ -11,36 +11,7 @@ var (
addr = flag.String("addr", "localhost:50051", "the address to connect to") addr = flag.String("addr", "localhost:50051", "the address to connect to")
) )
type LibFuncs struct {
FuncPtr any
Name string
}
func main() { func main() {
gosd, err := purego.Dlopen("./libgosd.so", purego.RTLD_NOW|purego.RTLD_GLOBAL)
if err != nil {
panic(err)
}
libFuncs := []LibFuncs{
{&LoadModel, "load_model"},
{&GenImage, "gen_image"},
{&TilingParamsSetEnabled, "sd_tiling_params_set_enabled"},
{&TilingParamsSetTileSizes, "sd_tiling_params_set_tile_sizes"},
{&TilingParamsSetRelSizes, "sd_tiling_params_set_rel_sizes"},
{&TilingParamsSetTargetOverlap, "sd_tiling_params_set_target_overlap"},
{&ImgGenParamsNew, "sd_img_gen_params_new"},
{&ImgGenParamsSetPrompts, "sd_img_gen_params_set_prompts"},
{&ImgGenParamsSetDimensions, "sd_img_gen_params_set_dimensions"},
{&ImgGenParamsSetSeed, "sd_img_gen_params_set_seed"},
{&ImgGenParamsGetVaeTilingParams, "sd_img_gen_params_get_vae_tiling_params"},
}
for _, lf := range libFuncs {
purego.RegisterLibFunc(lf.FuncPtr, gosd, lf.Name)
}
flag.Parse() flag.Parse()
if err := grpc.StartServer(*addr, &SDGGML{}); err != nil { if err := grpc.StartServer(*addr, &SDGGML{}); err != nil {

View File

@@ -10,9 +10,8 @@ CURDIR=$(dirname "$(realpath $0)")
# Create lib directory # Create lib directory
mkdir -p $CURDIR/package/lib mkdir -p $CURDIR/package/lib
cp -avf $CURDIR/libgosd.so $CURDIR/package/ cp -avrf $CURDIR/stablediffusion-ggml $CURDIR/package/
cp -avf $CURDIR/stablediffusion-ggml $CURDIR/package/ cp -rfv $CURDIR/run.sh $CURDIR/package/
cp -fv $CURDIR/run.sh $CURDIR/package/
# Detect architecture and copy appropriate libraries # Detect architecture and copy appropriate libraries
if [ -f "/lib64/ld-linux-x86-64.so.2" ]; then if [ -f "/lib64/ld-linux-x86-64.so.2" ]; then
@@ -43,13 +42,11 @@ elif [ -f "/lib/ld-linux-aarch64.so.1" ]; then
cp -arfLv /lib/aarch64-linux-gnu/libdl.so.2 $CURDIR/package/lib/libdl.so.2 cp -arfLv /lib/aarch64-linux-gnu/libdl.so.2 $CURDIR/package/lib/libdl.so.2
cp -arfLv /lib/aarch64-linux-gnu/librt.so.1 $CURDIR/package/lib/librt.so.1 cp -arfLv /lib/aarch64-linux-gnu/librt.so.1 $CURDIR/package/lib/librt.so.1
cp -arfLv /lib/aarch64-linux-gnu/libpthread.so.0 $CURDIR/package/lib/libpthread.so.0 cp -arfLv /lib/aarch64-linux-gnu/libpthread.so.0 $CURDIR/package/lib/libpthread.so.0
elif [ $(uname -s) = "Darwin" ]; then
echo "Detected Darwin"
else else
echo "Error: Could not detect architecture" echo "Error: Could not detect architecture"
exit 1 exit 1
fi fi
echo "Packaging completed successfully" echo "Packaging completed successfully"
ls -liah $CURDIR/package/ ls -liah $CURDIR/package/
ls -liah $CURDIR/package/lib/ ls -liah $CURDIR/package/lib/

View File

@@ -1,7 +0,0 @@
.cache/
sources/
build/
package/
whisper
libgowhisper.so

View File

@@ -1,16 +0,0 @@
cmake_minimum_required(VERSION 3.12)
project(gowhisper LANGUAGES C CXX)
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
add_subdirectory(./sources/whisper.cpp)
add_library(gowhisper MODULE gowhisper.cpp)
target_link_libraries(gowhisper PRIVATE whisper ggml)
if(CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 9.0)
target_link_libraries(gosd PRIVATE stdc++fs)
endif()
set_property(TARGET gowhisper PROPERTY CXX_STANDARD 17)
set_target_properties(gowhisper PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})

View File

@@ -1,53 +1,110 @@
CMAKE_ARGS?= GOCMD=go
BUILD_TYPE?=
NATIVE?=false NATIVE?=false
GOCMD?=go BUILD_TYPE?=
GO_TAGS?= CMAKE_ARGS?=
JOBS?=$(shell nproc --ignore=1)
# whisper.cpp version # whisper.cpp version
WHISPER_REPO?=https://github.com/ggml-org/whisper.cpp WHISPER_REPO?=https://github.com/ggml-org/whisper.cpp
WHISPER_CPP_VERSION?=44fa2f647cf2a6953493b21ab83b50d5f5dbc483 WHISPER_CPP_VERSION?=1f5cf0b2888402d57bb17b2029b2caa97e5f3baf
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF export WHISPER_CMAKE_ARGS?=-DBUILD_SHARED_LIBS=OFF
export WHISPER_DIR=$(abspath ./sources/whisper.cpp)
export WHISPER_INCLUDE_PATH=$(WHISPER_DIR)/include:$(WHISPER_DIR)/ggml/include
export WHISPER_LIBRARY_PATH=$(WHISPER_DIR)/build/src/:$(WHISPER_DIR)/build/ggml/src
CGO_LDFLAGS_WHISPER?=
CGO_LDFLAGS_WHISPER+=-lggml
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF -DLLAMA_CURL=OFF
CUDA_LIBPATH?=/usr/local/cuda/lib64/
ONEAPI_VERSION?=2025.2
# IF native is false, we add -DGGML_NATIVE=OFF to CMAKE_ARGS
ifeq ($(NATIVE),false)
CMAKE_ARGS+=-DGGML_NATIVE=OFF
WHISPER_CMAKE_ARGS+=-DGGML_NATIVE=OFF
endif
CURRENT_MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
ifeq ($(NATIVE),false) ifeq ($(NATIVE),false)
CMAKE_ARGS+=-DGGML_NATIVE=OFF CMAKE_ARGS+=-DGGML_NATIVE=OFF
endif endif
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
ifeq ($(BUILD_TYPE),cublas) ifeq ($(BUILD_TYPE),cublas)
CGO_LDFLAGS+=-lcublas -lcudart -L$(CUDA_LIBPATH) -L$(CUDA_LIBPATH)/stubs/ -lcuda
CMAKE_ARGS+=-DGGML_CUDA=ON CMAKE_ARGS+=-DGGML_CUDA=ON
CGO_LDFLAGS_WHISPER+=-lcufft -lggml-cuda
export WHISPER_LIBRARY_PATH:=$(WHISPER_LIBRARY_PATH):$(WHISPER_DIR)/build/ggml/src/ggml-cuda/
# If build type is openblas then we set -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
# to CMAKE_ARGS automatically
else ifeq ($(BUILD_TYPE),openblas) else ifeq ($(BUILD_TYPE),openblas)
CMAKE_ARGS+=-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS CMAKE_ARGS+=-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
# If build type is clblas (openCL) we set -DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
else ifeq ($(BUILD_TYPE),clblas) else ifeq ($(BUILD_TYPE),clblas)
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
else ifeq ($(BUILD_TYPE),hipblas) else ifeq ($(BUILD_TYPE),hipblas)
CMAKE_ARGS+=-DGGML_HIPBLAS=ON ROCM_HOME ?= /opt/rocm
ROCM_PATH ?= /opt/rocm
LD_LIBRARY_PATH ?= /opt/rocm/lib:/opt/rocm/llvm/lib
export STABLE_BUILD_TYPE=
export CXX=$(ROCM_HOME)/llvm/bin/clang++
export CC=$(ROCM_HOME)/llvm/bin/clang
# GPU_TARGETS ?= gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102
# AMDGPU_TARGETS ?= "$(GPU_TARGETS)"
CMAKE_ARGS+=-DGGML_HIP=ON
CGO_LDFLAGS += -O3 --rtlib=compiler-rt -unwindlib=libgcc -lhipblas -lrocblas --hip-link -L${ROCM_HOME}/lib/llvm/lib -L$(CURRENT_MAKEFILE_DIR)/sources/whisper.cpp/build/ggml/src/ggml-hip/ -lggml-hip
# CMAKE_ARGS+=-DGGML_HIP=ON -DAMDGPU_TARGETS="$(AMDGPU_TARGETS)" -DGPU_TARGETS="$(GPU_TARGETS)"
else ifeq ($(BUILD_TYPE),vulkan) else ifeq ($(BUILD_TYPE),vulkan)
CMAKE_ARGS+=-DGGML_VULKAN=ON CMAKE_ARGS+=-DGGML_VULKAN=1
CGO_LDFLAGS_WHISPER+=-lggml-vulkan -lvulkan
export WHISPER_LIBRARY_PATH:=$(WHISPER_LIBRARY_PATH):$(WHISPER_DIR)/build/ggml/src/ggml-vulkan/
else ifeq ($(OS),Darwin) else ifeq ($(OS),Darwin)
ifeq ($(BUILD_TYPE),)
BUILD_TYPE=metal
endif
ifneq ($(BUILD_TYPE),metal) ifneq ($(BUILD_TYPE),metal)
CMAKE_ARGS+=-DGGML_METAL=OFF CMAKE_ARGS+=-DGGML_METAL=OFF
CGO_LDFLAGS_WHISPER+=-lggml-blas
export WHISPER_LIBRARY_PATH:=$(WHISPER_LIBRARY_PATH):$(WHISPER_DIR)/build/ggml/src/ggml-blas
else else
CMAKE_ARGS+=-DGGML_METAL=ON CMAKE_ARGS+=-DGGML_METAL=ON
CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON
CMAKE_ARGS+=-DGGML_METAL_USE_BF16=ON
CMAKE_ARGS+=-DGGML_OPENMP=OFF
CMAKE_ARGS+=-DWHISPER_BUILD_EXAMPLES=OFF
CMAKE_ARGS+=-DWHISPER_BUILD_TESTS=OFF
CMAKE_ARGS+=-DWHISPER_BUILD_SERVER=OFF
CGO_LDFLAGS += -framework Accelerate
CGO_LDFLAGS_WHISPER+=-lggml-metal -lggml-blas
export WHISPER_LIBRARY_PATH:=$(WHISPER_LIBRARY_PATH):$(WHISPER_DIR)/build/ggml/src/ggml-metal/:$(WHISPER_DIR)/build/ggml/src/ggml-blas
endif endif
TARGET+=--target ggml-metal
endif endif
ifeq ($(BUILD_TYPE),sycl_f16) ifneq (,$(findstring sycl,$(BUILD_TYPE)))
export CC=icx
export CXX=icpx
CGO_LDFLAGS_WHISPER += -fsycl -L${DNNLROOT}/lib -rpath ${ONEAPI_ROOT}/${ONEAPI_VERSION}/lib -ldnnl ${MKLROOT}/lib/intel64/libmkl_sycl.a -fiopenmp -fopenmp-targets=spir64 -lOpenCL -lggml-sycl
CGO_LDFLAGS_WHISPER += $(shell pkg-config --libs mkl-static-lp64-gomp)
CGO_CXXFLAGS_WHISPER += -fiopenmp -fopenmp-targets=spir64
CGO_CXXFLAGS_WHISPER += $(shell pkg-config --cflags mkl-static-lp64-gomp )
export WHISPER_LIBRARY_PATH:=$(WHISPER_LIBRARY_PATH):$(WHISPER_DIR)/build/ggml/src/ggml-sycl/
CMAKE_ARGS+=-DGGML_SYCL=ON \ CMAKE_ARGS+=-DGGML_SYCL=ON \
-DCMAKE_C_COMPILER=icx \ -DCMAKE_C_COMPILER=icx \
-DCMAKE_CXX_COMPILER=icpx \ -DCMAKE_CXX_COMPILER=icpx \
-DGGML_SYCL_F16=ON -DCMAKE_CXX_FLAGS="-fsycl"
endif endif
ifeq ($(BUILD_TYPE),sycl_f32) ifeq ($(BUILD_TYPE),sycl_f16)
CMAKE_ARGS+=-DGGML_SYCL=ON \ CMAKE_ARGS+=-DGGML_SYCL_F16=ON
-DCMAKE_C_COMPILER=icx \
-DCMAKE_CXX_COMPILER=icpx
endif endif
ifneq ($(OS),Darwin)
CGO_LDFLAGS_WHISPER+=-lgomp
endif
## whisper
sources/whisper.cpp: sources/whisper.cpp:
mkdir -p sources/whisper.cpp mkdir -p sources/whisper.cpp
cd sources/whisper.cpp && \ cd sources/whisper.cpp && \
@@ -57,21 +114,18 @@ sources/whisper.cpp:
git checkout $(WHISPER_CPP_VERSION) && \ git checkout $(WHISPER_CPP_VERSION) && \
git submodule update --init --recursive --depth 1 --single-branch git submodule update --init --recursive --depth 1 --single-branch
libgowhisper.so: sources/whisper.cpp CMakeLists.txt gowhisper.cpp gowhisper.h sources/whisper.cpp/build/src/libwhisper.a: sources/whisper.cpp
mkdir -p build && \ cd sources/whisper.cpp && cmake $(CMAKE_ARGS) $(WHISPER_CMAKE_ARGS) . -B ./build
cd build && \ cd sources/whisper.cpp/build && cmake --build . --config Release
cmake .. $(CMAKE_ARGS) && \
cmake --build . --config Release -j$(JOBS) && \
cd .. && \
mv build/libgowhisper.so ./
whisper: main.go gowhisper.go libgowhisper.so whisper: sources/whisper.cpp sources/whisper.cpp/build/src/libwhisper.a
CGO_ENABLED=0 $(GOCMD) build -tags "$(GO_TAGS)" -o whisper ./ $(GOCMD) mod edit -replace github.com/ggerganov/whisper.cpp=$(CURDIR)/sources/whisper.cpp
$(GOCMD) mod edit -replace github.com/ggerganov/whisper.cpp/bindings/go=$(CURDIR)/sources/whisper.cpp/bindings/go
CGO_LDFLAGS="$(CGO_LDFLAGS) $(CGO_LDFLAGS_WHISPER)" C_INCLUDE_PATH="${WHISPER_INCLUDE_PATH}" LIBRARY_PATH="${WHISPER_LIBRARY_PATH}" LD_LIBRARY_PATH="${WHISPER_LIBRARY_PATH}" \
CGO_CXXFLAGS="$(CGO_CXXFLAGS_WHISPER)" \
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o whisper ./
package: whisper package:
bash package.sh bash package.sh
build: package build: whisper package
clean:
rm -rf libgowhisper.o build whisper

View File

@@ -1,154 +0,0 @@
#include "gowhisper.h"
#include "ggml-backend.h"
#include "whisper.h"
#include <vector>
static struct whisper_vad_context *vctx;
static struct whisper_context *ctx;
static std::vector<float> flat_segs;
static void ggml_log_cb(enum ggml_log_level level, const char *log,
void *data) {
const char *level_str;
if (!log) {
return;
}
switch (level) {
case GGML_LOG_LEVEL_DEBUG:
level_str = "DEBUG";
break;
case GGML_LOG_LEVEL_INFO:
level_str = "INFO";
break;
case GGML_LOG_LEVEL_WARN:
level_str = "WARN";
break;
case GGML_LOG_LEVEL_ERROR:
level_str = "ERROR";
break;
default: /* Potential future-proofing */
level_str = "?????";
break;
}
fprintf(stderr, "[%-5s] ", level_str);
fputs(log, stderr);
fflush(stderr);
}
int load_model(const char *const model_path) {
whisper_log_set(ggml_log_cb, nullptr);
ggml_backend_load_all();
struct whisper_context_params cparams = whisper_context_default_params();
ctx = whisper_init_from_file_with_params(model_path, cparams);
if (ctx == nullptr) {
fprintf(stderr, "error: Also failed to init model as transcriber\n");
return 1;
}
return 0;
}
int load_model_vad(const char *const model_path) {
whisper_log_set(ggml_log_cb, nullptr);
ggml_backend_load_all();
struct whisper_vad_context_params vcparams =
whisper_vad_default_context_params();
// XXX: Overridden to false in upstream due to performance?
// vcparams.use_gpu = true;
vctx = whisper_vad_init_from_file_with_params(model_path, vcparams);
if (vctx == nullptr) {
fprintf(stderr, "error: Failed to init model as VAD\n");
return 1;
}
return 0;
}
int vad(float pcmf32[], size_t pcmf32_len, float **segs_out,
size_t *segs_out_len) {
if (!whisper_vad_detect_speech(vctx, pcmf32, pcmf32_len)) {
fprintf(stderr, "error: failed to detect speech\n");
return 1;
}
struct whisper_vad_params params = whisper_vad_default_params();
struct whisper_vad_segments *segs =
whisper_vad_segments_from_probs(vctx, params);
size_t segn = whisper_vad_segments_n_segments(segs);
// fprintf(stderr, "Got segments %zd\n", segn);
flat_segs.clear();
for (int i = 0; i < segn; i++) {
flat_segs.push_back(whisper_vad_segments_get_segment_t0(segs, i));
flat_segs.push_back(whisper_vad_segments_get_segment_t1(segs, i));
}
// fprintf(stderr, "setting out variables: %p=%p -> %p, %p=%zx -> %zx\n",
// segs_out, *segs_out, flat_segs.data(), segs_out_len, *segs_out_len,
// flat_segs.size());
*segs_out = flat_segs.data();
*segs_out_len = flat_segs.size();
// fprintf(stderr, "freeing segs\n");
whisper_vad_free_segments(segs);
// fprintf(stderr, "returning\n");
return 0;
}
int transcribe(uint32_t threads, char *lang, bool translate, bool tdrz,
float pcmf32[], size_t pcmf32_len, size_t *segs_out_len) {
whisper_full_params wparams =
whisper_full_default_params(WHISPER_SAMPLING_GREEDY);
wparams.n_threads = threads;
if (*lang != '\0')
wparams.language = lang;
else {
wparams.language = nullptr;
}
wparams.translate = translate;
wparams.debug_mode = true;
wparams.print_progress = true;
wparams.tdrz_enable = tdrz;
fprintf(stderr, "info: Enable tdrz: %d\n", tdrz);
if (whisper_full(ctx, wparams, pcmf32, pcmf32_len)) {
fprintf(stderr, "error: transcription failed\n");
return 1;
}
*segs_out_len = whisper_full_n_segments(ctx);
return 0;
}
const char *get_segment_text(int i) {
return whisper_full_get_segment_text(ctx, i);
}
int64_t get_segment_t0(int i) { return whisper_full_get_segment_t0(ctx, i); }
int64_t get_segment_t1(int i) { return whisper_full_get_segment_t1(ctx, i); }
int n_tokens(int i) { return whisper_full_n_tokens(ctx, i); }
int32_t get_token_id(int i, int j) {
return whisper_full_get_token_id(ctx, i, j);
}
bool get_segment_speaker_turn_next(int i) {
return whisper_full_get_segment_speaker_turn_next(ctx, i);
}

View File

@@ -1,161 +0,0 @@
package main
import (
"fmt"
"os"
"path/filepath"
"strings"
"unsafe"
"github.com/go-audio/wav"
"github.com/mudler/LocalAI/pkg/grpc/base"
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
"github.com/mudler/LocalAI/pkg/utils"
)
var (
CppLoadModel func(modelPath string) int
CppLoadModelVAD func(modelPath string) int
CppVAD func(pcmf32 []float32, pcmf32Size uintptr, segsOut unsafe.Pointer, segsOutLen unsafe.Pointer) int
CppTranscribe func(threads uint32, lang string, translate bool, diarize bool, pcmf32 []float32, pcmf32Len uintptr, segsOutLen unsafe.Pointer) int
CppGetSegmentText func(i int) string
CppGetSegmentStart func(i int) int64
CppGetSegmentEnd func(i int) int64
CppNTokens func(i int) int
CppGetTokenID func(i int, j int) int
CppGetSegmentSpeakerTurnNext func(i int) bool
)
type Whisper struct {
base.SingleThread
}
func (w *Whisper) Load(opts *pb.ModelOptions) error {
vadOnly := false
for _, oo := range opts.Options {
if oo == "vad_only" {
vadOnly = true
} else {
fmt.Fprintf(os.Stderr, "Unrecognized option: %v\n", oo)
}
}
if vadOnly {
if ret := CppLoadModelVAD(opts.ModelFile); ret != 0 {
return fmt.Errorf("Failed to load Whisper VAD model")
}
return nil
}
if ret := CppLoadModel(opts.ModelFile); ret != 0 {
return fmt.Errorf("Failed to load Whisper transcription model")
}
return nil
}
func (w *Whisper) VAD(req *pb.VADRequest) (pb.VADResponse, error) {
audio := req.Audio
// We expect 0xdeadbeef to be overwritten and if we see it in a stack trace we know it wasn't
segsPtr, segsLen := uintptr(0xdeadbeef), uintptr(0xdeadbeef)
segsPtrPtr, segsLenPtr := unsafe.Pointer(&segsPtr), unsafe.Pointer(&segsLen)
if ret := CppVAD(audio, uintptr(len(audio)), segsPtrPtr, segsLenPtr); ret != 0 {
return pb.VADResponse{}, fmt.Errorf("Failed VAD")
}
// Happens when CPP vector has not had any elements pushed to it
if segsPtr == 0 {
return pb.VADResponse{
Segments: []*pb.VADSegment{},
}, nil
}
// unsafeptr warning is caused by segsPtr being on the stack and therefor being subject to stack copying AFAICT
// however the stack shouldn't have grown between setting segsPtr and now, also the memory pointed to is allocated by C++
segs := unsafe.Slice((*float32)(unsafe.Pointer(segsPtr)), segsLen)
vadSegments := []*pb.VADSegment{}
for i := range len(segs) >> 1 {
s := segs[2*i] / 100
t := segs[2*i+1] / 100
vadSegments = append(vadSegments, &pb.VADSegment{
Start: s,
End: t,
})
}
return pb.VADResponse{
Segments: vadSegments,
}, nil
}
func (w *Whisper) AudioTranscription(opts *pb.TranscriptRequest) (pb.TranscriptResult, error) {
dir, err := os.MkdirTemp("", "whisper")
if err != nil {
return pb.TranscriptResult{}, err
}
defer os.RemoveAll(dir)
convertedPath := filepath.Join(dir, "converted.wav")
if err := utils.AudioToWav(opts.Dst, convertedPath); err != nil {
return pb.TranscriptResult{}, err
}
// Open samples
fh, err := os.Open(convertedPath)
if err != nil {
return pb.TranscriptResult{}, err
}
defer fh.Close()
// Read samples
d := wav.NewDecoder(fh)
buf, err := d.FullPCMBuffer()
if err != nil {
return pb.TranscriptResult{}, err
}
data := buf.AsFloat32Buffer().Data
segsLen := uintptr(0xdeadbeef)
segsLenPtr := unsafe.Pointer(&segsLen)
if ret := CppTranscribe(opts.Threads, opts.Language, opts.Translate, opts.Diarize, data, uintptr(len(data)), segsLenPtr); ret != 0 {
return pb.TranscriptResult{}, fmt.Errorf("Failed Transcribe")
}
segments := []*pb.TranscriptSegment{}
text := ""
for i := range int(segsLen) {
s := CppGetSegmentStart(i)
t := CppGetSegmentEnd(i)
txt := strings.Clone(CppGetSegmentText(i))
tokens := make([]int32, CppNTokens(i))
if opts.Diarize && CppGetSegmentSpeakerTurnNext(i) {
txt += " [SPEAKER_TURN]"
}
for j := range tokens {
tokens[j] = int32(CppGetTokenID(i, j))
}
segment := &pb.TranscriptSegment{
Id: int32(i),
Text: txt,
Start: s, End: t,
Tokens: tokens,
}
segments = append(segments, segment)
text += " " + strings.TrimSpace(txt)
}
return pb.TranscriptResult{
Segments: segments,
Text: strings.TrimSpace(text),
}, nil
}

View File

@@ -1,17 +0,0 @@
#include <cstddef>
#include <cstdint>
extern "C" {
int load_model(const char *const model_path);
int load_model_vad(const char *const model_path);
int vad(float pcmf32[], size_t pcmf32_size, float **segs_out,
size_t *segs_out_len);
int transcribe(uint32_t threads, char *lang, bool translate, bool tdrz,
float pcmf32[], size_t pcmf32_len, size_t *segs_out_len);
const char *get_segment_text(int i);
int64_t get_segment_t0(int i);
int64_t get_segment_t1(int i);
int n_tokens(int i);
int32_t get_token_id(int i, int j);
bool get_segment_speaker_turn_next(int i);
}

View File

@@ -1,10 +1,10 @@
package main package main
// Note: this is started internally by LocalAI and a server is allocated for each model // Note: this is started internally by LocalAI and a server is allocated for each model
import ( import (
"flag" "flag"
"github.com/ebitengine/purego"
grpc "github.com/mudler/LocalAI/pkg/grpc" grpc "github.com/mudler/LocalAI/pkg/grpc"
) )
@@ -12,34 +12,7 @@ var (
addr = flag.String("addr", "localhost:50051", "the address to connect to") addr = flag.String("addr", "localhost:50051", "the address to connect to")
) )
type LibFuncs struct {
FuncPtr any
Name string
}
func main() { func main() {
gosd, err := purego.Dlopen("./libgowhisper.so", purego.RTLD_NOW|purego.RTLD_GLOBAL)
if err != nil {
panic(err)
}
libFuncs := []LibFuncs{
{&CppLoadModel, "load_model"},
{&CppLoadModelVAD, "load_model_vad"},
{&CppVAD, "vad"},
{&CppTranscribe, "transcribe"},
{&CppGetSegmentText, "get_segment_text"},
{&CppGetSegmentStart, "get_segment_t0"},
{&CppGetSegmentEnd, "get_segment_t1"},
{&CppNTokens, "n_tokens"},
{&CppGetTokenID, "get_token_id"},
{&CppGetSegmentSpeakerTurnNext, "get_segment_speaker_turn_next"},
}
for _, lf := range libFuncs {
purego.RegisterLibFunc(lf.FuncPtr, gosd, lf.Name)
}
flag.Parse() flag.Parse()
if err := grpc.StartServer(*addr, &Whisper{}); err != nil { if err := grpc.StartServer(*addr, &Whisper{}); err != nil {

View File

@@ -10,8 +10,8 @@ CURDIR=$(dirname "$(realpath $0)")
# Create lib directory # Create lib directory
mkdir -p $CURDIR/package/lib mkdir -p $CURDIR/package/lib
cp -avf $CURDIR/whisper $CURDIR/libgowhisper.so $CURDIR/package/ cp -avrf $CURDIR/whisper $CURDIR/package/
cp -fv $CURDIR/run.sh $CURDIR/package/ cp -rfv $CURDIR/run.sh $CURDIR/package/
# Detect architecture and copy appropriate libraries # Detect architecture and copy appropriate libraries
if [ -f "/lib64/ld-linux-x86-64.so.2" ]; then if [ -f "/lib64/ld-linux-x86-64.so.2" ]; then
@@ -42,13 +42,11 @@ elif [ -f "/lib/ld-linux-aarch64.so.1" ]; then
cp -arfLv /lib/aarch64-linux-gnu/libdl.so.2 $CURDIR/package/lib/libdl.so.2 cp -arfLv /lib/aarch64-linux-gnu/libdl.so.2 $CURDIR/package/lib/libdl.so.2
cp -arfLv /lib/aarch64-linux-gnu/librt.so.1 $CURDIR/package/lib/librt.so.1 cp -arfLv /lib/aarch64-linux-gnu/librt.so.1 $CURDIR/package/lib/librt.so.1
cp -arfLv /lib/aarch64-linux-gnu/libpthread.so.0 $CURDIR/package/lib/libpthread.so.0 cp -arfLv /lib/aarch64-linux-gnu/libpthread.so.0 $CURDIR/package/lib/libpthread.so.0
elif [ $(uname -s) = "Darwin" ]; then
echo "Detected Darwin"
else else
echo "Error: Could not detect architecture" echo "Error: Could not detect architecture"
exit 1 exit 1
fi fi
echo "Packaging completed successfully" echo "Packaging completed successfully"
ls -liah $CURDIR/package/ ls -liah $CURDIR/package/
ls -liah $CURDIR/package/lib/ ls -liah $CURDIR/package/lib/

View File

@@ -0,0 +1,105 @@
package main
// This is a wrapper to statisfy the GRPC service interface
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
import (
"os"
"path/filepath"
"github.com/ggerganov/whisper.cpp/bindings/go/pkg/whisper"
"github.com/go-audio/wav"
"github.com/mudler/LocalAI/pkg/grpc/base"
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
"github.com/mudler/LocalAI/pkg/utils"
)
type Whisper struct {
base.SingleThread
whisper whisper.Model
}
func (sd *Whisper) Load(opts *pb.ModelOptions) error {
// Note: the Model here is a path to a directory containing the model files
w, err := whisper.New(opts.ModelFile)
sd.whisper = w
return err
}
func (sd *Whisper) AudioTranscription(opts *pb.TranscriptRequest) (pb.TranscriptResult, error) {
dir, err := os.MkdirTemp("", "whisper")
if err != nil {
return pb.TranscriptResult{}, err
}
defer os.RemoveAll(dir)
convertedPath := filepath.Join(dir, "converted.wav")
if err := utils.AudioToWav(opts.Dst, convertedPath); err != nil {
return pb.TranscriptResult{}, err
}
// Open samples
fh, err := os.Open(convertedPath)
if err != nil {
return pb.TranscriptResult{}, err
}
defer fh.Close()
// Read samples
d := wav.NewDecoder(fh)
buf, err := d.FullPCMBuffer()
if err != nil {
return pb.TranscriptResult{}, err
}
data := buf.AsFloat32Buffer().Data
// Process samples
context, err := sd.whisper.NewContext()
if err != nil {
return pb.TranscriptResult{}, err
}
context.SetThreads(uint(opts.Threads))
if opts.Language != "" {
context.SetLanguage(opts.Language)
} else {
context.SetLanguage("auto")
}
if opts.Translate {
context.SetTranslate(true)
}
if err := context.Process(data, nil, nil, nil); err != nil {
return pb.TranscriptResult{}, err
}
segments := []*pb.TranscriptSegment{}
text := ""
for {
s, err := context.NextSegment()
if err != nil {
break
}
var tokens []int32
for _, t := range s.Tokens {
tokens = append(tokens, int32(t.Id))
}
segment := &pb.TranscriptSegment{Id: int32(s.Num), Text: s.Text, Start: int64(s.Start), End: int64(s.End), Tokens: tokens}
segments = append(segments, segment)
text += s.Text
}
return pb.TranscriptResult{
Segments: segments,
Text: text,
}, nil
}

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,190 +1,38 @@
# Python Backends for LocalAI # Common commands about conda environment
This directory contains Python-based AI backends for LocalAI, providing support for various AI models and hardware acceleration targets. ## Create a new empty conda environment
## Overview ```
conda create --name <env-name> python=<your version> -y
The Python backends use a unified build system based on `libbackend.sh` that provides: conda create --name autogptq python=3.11 -y
- **Automatic virtual environment management** with support for both `uv` and `pip`
- **Hardware-specific dependency installation** (CPU, CUDA, Intel, MLX, etc.)
- **Portable Python support** for standalone deployments
- **Consistent backend execution** across different environments
## Available Backends
### Core AI Models
- **transformers** - Hugging Face Transformers framework (PyTorch-based)
- **vllm** - High-performance LLM inference engine
- **mlx** - Apple Silicon optimized ML framework
- **exllama2** - ExLlama2 quantized models
### Audio & Speech
- **bark** - Text-to-speech synthesis
- **coqui** - Coqui TTS models
- **faster-whisper** - Fast Whisper speech recognition
- **kitten-tts** - Lightweight TTS
- **mlx-audio** - Apple Silicon audio processing
- **chatterbox** - TTS model
- **kokoro** - TTS models
### Computer Vision
- **diffusers** - Stable Diffusion and image generation
- **mlx-vlm** - Vision-language models for Apple Silicon
- **rfdetr** - Object detection models
### Specialized
- **rerankers** - Text reranking models
## Quick Start
### Prerequisites
- Python 3.10+ (default: 3.10.18)
- `uv` package manager (recommended) or `pip`
- Appropriate hardware drivers for your target (CUDA, Intel, etc.)
### Installation
Each backend can be installed individually:
```bash
# Navigate to a specific backend
cd backend/python/transformers
# Install dependencies
make transformers
# or
bash install.sh
# Run the backend
make run
# or
bash run.sh
``` ```
### Using the Unified Build System ## To activate the environment
The `libbackend.sh` script provides consistent commands across all backends: As of conda 4.4
```
```bash conda activate autogptq
# Source the library in your backend script
source $(dirname $0)/../common/libbackend.sh
# Install requirements (automatically handles hardware detection)
installRequirements
# Start the backend server
startBackend $@
# Run tests
runUnittests
``` ```
## Hardware Targets The conda version older than 4.4
The build system automatically detects and configures for different hardware: ```
source activate autogptq
- **CPU** - Standard CPU-only builds
- **CUDA** - NVIDIA GPU acceleration (supports CUDA 11/12)
- **Intel** - Intel XPU/GPU optimization
- **MLX** - Apple Silicon (M1/M2/M3) optimization
- **HIP** - AMD GPU acceleration
### Target-Specific Requirements
Backends can specify hardware-specific dependencies:
- `requirements.txt` - Base requirements
- `requirements-cpu.txt` - CPU-specific packages
- `requirements-cublas11.txt` - CUDA 11 packages
- `requirements-cublas12.txt` - CUDA 12 packages
- `requirements-intel.txt` - Intel-optimized packages
- `requirements-mps.txt` - Apple Silicon packages
## Configuration Options
### Environment Variables
- `PYTHON_VERSION` - Python version (default: 3.10)
- `PYTHON_PATCH` - Python patch version (default: 18)
- `BUILD_TYPE` - Force specific build target
- `USE_PIP` - Use pip instead of uv (default: false)
- `PORTABLE_PYTHON` - Enable portable Python builds
- `LIMIT_TARGETS` - Restrict backend to specific targets
### Example: CUDA 12 Only Backend
```bash
# In your backend script
LIMIT_TARGETS="cublas12"
source $(dirname $0)/../common/libbackend.sh
``` ```
### Example: Intel-Optimized Backend ## Install the packages to your environment
```bash Sometimes you need to install the packages from the conda-forge channel
# In your backend script
LIMIT_TARGETS="intel" By using `conda`
source $(dirname $0)/../common/libbackend.sh ```
conda install <your-package-name>
conda install -c conda-forge <your package-name>
``` ```
## Development Or by using `pip`
### Adding a New Backend
1. Create a new directory in `backend/python/`
2. Copy the template structure from `common/template/`
3. Implement your `backend.py` with the required gRPC interface
4. Add appropriate requirements files for your target hardware
5. Use `libbackend.sh` for consistent build and execution
### Testing
```bash
# Run backend tests
make test
# or
bash test.sh
``` ```
pip install <your-package-name>
### Building
```bash
# Install dependencies
make <backend-name>
# Clean build artifacts
make clean
``` ```
## Architecture
Each backend follows a consistent structure:
```
backend-name/
├── backend.py # Main backend implementation
├── requirements.txt # Base dependencies
├── requirements-*.txt # Hardware-specific dependencies
├── install.sh # Installation script
├── run.sh # Execution script
├── test.sh # Test script
├── Makefile # Build targets
└── test.py # Unit tests
```
## Troubleshooting
### Common Issues
1. **Missing dependencies**: Ensure all requirements files are properly configured
2. **Hardware detection**: Check that `BUILD_TYPE` matches your system
3. **Python version**: Verify Python 3.10+ is available
4. **Virtual environment**: Use `ensureVenv` to create/activate environments
## Contributing
When adding new backends or modifying existing ones:
1. Follow the established directory structure
2. Use `libbackend.sh` for consistent behavior
3. Include appropriate requirements files for all target hardware
4. Add comprehensive tests
5. Update this README if adding new backend types

View File

@@ -1,23 +1,29 @@
.PHONY: ttsbark .PHONY: ttsbark
ttsbark: ttsbark: protogen
bash install.sh bash install.sh
.PHONY: run .PHONY: run
run: ttsbark run: protogen
@echo "Running bark..." @echo "Running bark..."
bash run.sh bash run.sh
@echo "bark run." @echo "bark run."
.PHONY: test .PHONY: test
test: ttsbark test: protogen
@echo "Testing bark..." @echo "Testing bark..."
bash test.sh bash test.sh
@echo "bark tested." @echo "bark tested."
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean .PHONY: protogen-clean
protogen-clean: protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py $(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
.PHONY: clean .PHONY: clean
clean: protogen-clean clean: protogen-clean
rm -rf venv __pycache__ rm -rf venv __pycache__

View File

@@ -1,5 +1,5 @@
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
intel-extension-for-pytorch==2.8.10+xpu intel-extension-for-pytorch==2.3.110+xpu
torch==2.3.1+cxx11.abi torch==2.3.1+cxx11.abi
torchaudio==2.3.1+cxx11.abi torchaudio==2.3.1+cxx11.abi
oneccl_bind_pt==2.3.100+xpu oneccl_bind_pt==2.3.100+xpu

View File

@@ -1,4 +1,4 @@
bark==0.1.5 bark==0.1.5
grpcio==1.74.0 grpcio==1.71.0
protobuf protobuf
certifi certifi

View File

@@ -1,23 +1,29 @@
.PHONY: chatterbox .PHONY: coqui
chatterbox: coqui: protogen
bash install.sh bash install.sh
.PHONY: run .PHONY: run
run: chatterbox run: protogen
@echo "Running coqui..." @echo "Running coqui..."
bash run.sh bash run.sh
@echo "coqui run." @echo "coqui run."
.PHONY: test .PHONY: test
test: chatterbox test: protogen
@echo "Testing coqui..." @echo "Testing coqui..."
bash test.sh bash test.sh
@echo "coqui tested." @echo "coqui tested."
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean .PHONY: protogen-clean
protogen-clean: protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py $(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
.PHONY: clean .PHONY: clean
clean: protogen-clean clean: protogen-clean
rm -rf venv __pycache__ rm -rf venv __pycache__

View File

@@ -41,9 +41,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
else: else:
print("CUDA is not available", file=sys.stderr) print("CUDA is not available", file=sys.stderr)
device = "cpu" device = "cpu"
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
if mps_available:
device = "mps"
if not torch.cuda.is_available() and request.CUDA: if not torch.cuda.is_available() and request.CUDA:
return backend_pb2.Result(success=False, message="CUDA is not available") return backend_pb2.Result(success=False, message="CUDA is not available")

View File

@@ -1,6 +1,5 @@
--extra-index-url https://download.pytorch.org/whl/cpu
accelerate accelerate
torch==2.6.0 torch==2.6.0
torchaudio==2.6.0 torchaudio==2.6.0
transformers==4.46.3 transformers==4.46.3
chatterbox-tts==0.1.2 chatterbox-tts

View File

@@ -2,5 +2,5 @@
torch==2.6.0+cu118 torch==2.6.0+cu118
torchaudio==2.6.0+cu118 torchaudio==2.6.0+cu118
transformers==4.46.3 transformers==4.46.3
chatterbox-tts==0.1.2 chatterbox-tts
accelerate accelerate

View File

@@ -1,5 +1,5 @@
torch==2.6.0 torch==2.6.0
torchaudio==2.6.0 torchaudio==2.6.0
transformers==4.46.3 transformers==4.46.3
chatterbox-tts==0.1.2 chatterbox-tts
accelerate accelerate

View File

@@ -2,5 +2,5 @@
torch==2.6.0+rocm6.1 torch==2.6.0+rocm6.1
torchaudio==2.6.0+rocm6.1 torchaudio==2.6.0+rocm6.1
transformers==4.46.3 transformers==4.46.3
chatterbox-tts==0.1.2 chatterbox-tts
accelerate accelerate

View File

@@ -3,8 +3,9 @@ intel-extension-for-pytorch==2.3.110+xpu
torch==2.3.1+cxx11.abi torch==2.3.1+cxx11.abi
torchaudio==2.3.1+cxx11.abi torchaudio==2.3.1+cxx11.abi
transformers==4.46.3 transformers==4.46.3
chatterbox-tts==0.1.2 chatterbox-tts
accelerate accelerate
oneccl_bind_pt==2.3.100+xpu oneccl_bind_pt==2.3.100+xpu
optimum[openvino] optimum[openvino]
setuptools setuptools
accelerate

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env bash
set -euo pipefail
# init handles the setup of the library
# #
# use the library by adding the following line to a script: # use the library by adding the following line to a script:
# source $(dirname $0)/../common/libbackend.sh # source $(dirname $0)/../common/libbackend.sh
@@ -17,182 +17,29 @@ set -euo pipefail
# LIMIT_TARGETS="cublas12" # LIMIT_TARGETS="cublas12"
# source $(dirname $0)/../common/libbackend.sh # source $(dirname $0)/../common/libbackend.sh
# #
# You can switch between uv (conda-like) and pip installation methods by setting USE_PIP:
# USE_PIP=true source $(dirname $0)/../common/libbackend.sh
#
# ===================== user-configurable defaults =====================
PYTHON_VERSION="${PYTHON_VERSION:-3.10}" # e.g. 3.10 / 3.11 / 3.12 / 3.13
PYTHON_PATCH="${PYTHON_PATCH:-18}" # e.g. 18 -> 3.10.18 ; 13 -> 3.11.13
PY_STANDALONE_TAG="${PY_STANDALONE_TAG:-20250818}" # release tag date
# Enable/disable bundling of a portable Python build
PORTABLE_PYTHON="${PORTABLE_PYTHON:-false}"
# If you want to fully pin the filename (including tuned CPU targets), set: PYTHON_VERSION="3.10"
# PORTABLE_PY_FILENAME="cpython-3.10.18+20250818-x86_64_v3-unknown-linux-gnu-install_only.tar.gz"
: "${PORTABLE_PY_FILENAME:=}"
: "${PORTABLE_PY_SHA256:=}" # optional; if set we verify the download
# =====================================================================
# Default to uv if USE_PIP is not set
if [ "x${USE_PIP:-}" == "x" ]; then
USE_PIP=false
fi
# ----------------------- helpers -----------------------
function _is_musl() {
# detect musl (Alpine, etc)
if command -v ldd >/dev/null 2>&1; then
ldd --version 2>&1 | grep -qi musl && return 0
fi
# busybox-ish fallback
if command -v getconf >/dev/null 2>&1; then
getconf GNU_LIBC_VERSION >/dev/null 2>&1 || return 0
fi
return 1
}
function _triple() {
local os="" arch="" libc="gnu"
case "$(uname -s)" in
Linux*) os="unknown-linux" ;;
Darwin*) os="apple-darwin" ;;
MINGW*|MSYS*|CYGWIN*) os="pc-windows-msvc" ;; # best-effort for Git Bash
*) echo "Unsupported OS $(uname -s)"; exit 1;;
esac
case "$(uname -m)" in
x86_64) arch="x86_64" ;;
aarch64|arm64) arch="aarch64" ;;
armv7l) arch="armv7" ;;
i686|i386) arch="i686" ;;
ppc64le) arch="ppc64le" ;;
s390x) arch="s390x" ;;
riscv64) arch="riscv64" ;;
*) echo "Unsupported arch $(uname -m)"; exit 1;;
esac
if [[ "$os" == "unknown-linux" ]]; then
if _is_musl; then
libc="musl"
else
libc="gnu"
fi
echo "${arch}-${os}-${libc}"
else
echo "${arch}-${os}"
fi
}
function _portable_dir() {
echo "${EDIR}/python"
}
function _portable_bin() {
# python-build-standalone puts python in ./bin
echo "$(_portable_dir)/bin"
}
function _portable_python() {
if [ -x "$(_portable_bin)/python3" ]; then
echo "$(_portable_bin)/python3"
else
echo "$(_portable_bin)/python"
fi
}
# macOS loader env for the portable CPython
_macosPortableEnv() {
if [ "$(uname -s)" = "Darwin" ]; then
export DYLD_LIBRARY_PATH="$(_portable_dir)/lib${DYLD_LIBRARY_PATH:+:${DYLD_LIBRARY_PATH}}"
export DYLD_FALLBACK_LIBRARY_PATH="$(_portable_dir)/lib${DYLD_FALLBACK_LIBRARY_PATH:+:${DYLD_FALLBACK_LIBRARY_PATH}}"
fi
}
# Good hygiene on macOS for downloaded/extracted trees
_unquarantinePortablePython() {
if [ "$(uname -s)" = "Darwin" ]; then
command -v xattr >/dev/null 2>&1 && xattr -dr com.apple.quarantine "$(_portable_dir)" || true
fi
}
# ------------------ ### PORTABLE PYTHON ------------------
function ensurePortablePython() {
local pdir="$(_portable_dir)"
local pbin="$(_portable_bin)"
local pyexe
if [ -x "${pbin}/python3" ] || [ -x "${pbin}/python" ]; then
_macosPortableEnv
return 0
fi
mkdir -p "${pdir}"
local triple="$(_triple)"
local full_ver="${PYTHON_VERSION}.${PYTHON_PATCH}"
local fn=""
if [ -n "${PORTABLE_PY_FILENAME}" ]; then
fn="${PORTABLE_PY_FILENAME}"
else
# generic asset name: cpython-<full_ver>+<tag>-<triple>-install_only.tar.gz
fn="cpython-${full_ver}+${PY_STANDALONE_TAG}-${triple}-install_only.tar.gz"
fi
local url="https://github.com/astral-sh/python-build-standalone/releases/download/${PY_STANDALONE_TAG}/${fn}"
local tmp="${pdir}/${fn}"
echo "Downloading portable Python: ${fn}"
# curl with retries; fall back to wget if needed
if command -v curl >/dev/null 2>&1; then
curl -L --fail --retry 3 --retry-delay 1 -o "${tmp}" "${url}"
else
wget -O "${tmp}" "${url}"
fi
if [ -n "${PORTABLE_PY_SHA256}" ]; then
echo "${PORTABLE_PY_SHA256} ${tmp}" | sha256sum -c -
fi
echo "Extracting ${fn} -> ${pdir}"
# always a .tar.gz (we purposely choose install_only)
tar -xzf "${tmp}" -C "${pdir}"
rm -f "${tmp}"
# Some archives nest a directory; if so, flatten to ${pdir}
# Find the first dir with a 'bin/python*'
local inner
inner="$(find "${pdir}" -type f -path "*/bin/python*" -maxdepth 3 2>/dev/null | head -n1 || true)"
if [ -n "${inner}" ]; then
local inner_root
inner_root="$(dirname "$(dirname "${inner}")")" # .../bin -> root
if [ "${inner_root}" != "${pdir}" ]; then
# move contents up one level
shopt -s dotglob
mv "${inner_root}/"* "${pdir}/"
rm -rf "${inner_root}"
shopt -u dotglob
fi
fi
_unquarantinePortablePython
_macosPortableEnv
# Make sure it's runnable
pyexe="$(_portable_python)"
"${pyexe}" -V
}
# init handles the setup of the library
function init() { function init() {
# Name of the backend (directory name)
BACKEND_NAME=${PWD##*/} BACKEND_NAME=${PWD##*/}
MY_DIR=$(realpath "$(dirname "$0")")
# Path where all backends files are
MY_DIR=$(realpath `dirname $0`)
# Build type
BUILD_PROFILE=$(getBuildProfile) BUILD_PROFILE=$(getBuildProfile)
# Environment directory
EDIR=${MY_DIR} EDIR=${MY_DIR}
if [ "x${ENV_DIR:-}" != "x" ]; then
# Allow to specify a custom env dir for shared environments
if [ "x${ENV_DIR}" != "x" ]; then
EDIR=${ENV_DIR} EDIR=${ENV_DIR}
fi fi
if [ ! -z "${LIMIT_TARGETS:-}" ]; then # If a backend has defined a list of valid build profiles...
if [ ! -z "${LIMIT_TARGETS}" ]; then
isValidTarget=$(checkTargets ${LIMIT_TARGETS}) isValidTarget=$(checkTargets ${LIMIT_TARGETS})
if [ ${isValidTarget} != true ]; then if [ ${isValidTarget} != true ]; then
echo "${BACKEND_NAME} can only be used on the following targets: ${LIMIT_TARGETS}" echo "${BACKEND_NAME} can only be used on the following targets: ${LIMIT_TARGETS}"
@@ -203,7 +50,6 @@ function init() {
echo "Initializing libbackend for ${BACKEND_NAME}" echo "Initializing libbackend for ${BACKEND_NAME}"
} }
# getBuildProfile will inspect the system to determine which build profile is appropriate: # getBuildProfile will inspect the system to determine which build profile is appropriate:
# returns one of the following: # returns one of the following:
# - cublas11 # - cublas11
@@ -211,140 +57,53 @@ function init() {
# - hipblas # - hipblas
# - intel # - intel
function getBuildProfile() { function getBuildProfile() {
if [ x"${BUILD_TYPE:-}" == "xcublas" ]; then # First check if we are a cublas build, and if so report the correct build profile
if [ ! -z "${CUDA_MAJOR_VERSION:-}" ]; then if [ x"${BUILD_TYPE}" == "xcublas" ]; then
if [ ! -z ${CUDA_MAJOR_VERSION} ]; then
# If we have been given a CUDA version, we trust it
echo ${BUILD_TYPE}${CUDA_MAJOR_VERSION} echo ${BUILD_TYPE}${CUDA_MAJOR_VERSION}
else else
# We don't know what version of cuda we are, so we report ourselves as a generic cublas
echo ${BUILD_TYPE} echo ${BUILD_TYPE}
fi fi
return 0 return 0
fi fi
# If /opt/intel exists, then we are doing an intel/ARC build
if [ -d "/opt/intel" ]; then if [ -d "/opt/intel" ]; then
echo "intel" echo "intel"
return 0 return 0
fi fi
if [ -n "${BUILD_TYPE:-}" ]; then # If for any other values of BUILD_TYPE, we don't need any special handling/discovery
if [ ! -z ${BUILD_TYPE} ]; then
echo ${BUILD_TYPE} echo ${BUILD_TYPE}
return 0 return 0
fi fi
# If there is no BUILD_TYPE set at all, set a build-profile value of CPU, we aren't building for any GPU targets
echo "cpu" echo "cpu"
} }
# Make the venv relocatable:
# - rewrite venv/bin/python{,3} to relative symlinks into $(_portable_dir)
# - normalize entrypoint shebangs to /usr/bin/env python3
_makeVenvPortable() {
local venv_dir="${EDIR}/venv"
local vbin="${venv_dir}/bin"
[ -d "${vbin}" ] || return 0
# 1) Replace python symlinks with relative ones to ../../python/bin/python3
# (venv/bin -> venv -> EDIR -> python/bin)
local rel_py='../../python/bin/python3'
for name in python3 python; do
if [ -e "${vbin}/${name}" ] || [ -L "${vbin}/${name}" ]; then
rm -f "${vbin}/${name}"
fi
done
ln -s "${rel_py}" "${vbin}/python3"
ln -s "python3" "${vbin}/python"
# 2) Rewrite shebangs of entry points to use env, so the venv is relocatable
# Only touch text files that start with #! and reference the current venv.
local ve_abs="${vbin}/python"
local sed_i=(sed -i)
# macOS/BSD sed needs a backup suffix; GNU sed doesn't. Make it portable:
if sed --version >/dev/null 2>&1; then
sed_i=(sed -i)
else
sed_i=(sed -i '')
fi
for f in "${vbin}"/*; do
[ -f "$f" ] || continue
# Fast path: check first two bytes (#!)
head -c2 "$f" 2>/dev/null | grep -q '^#!' || continue
# Only rewrite if the shebang mentions the (absolute) venv python
if head -n1 "$f" | grep -Fq "${ve_abs}"; then
"${sed_i[@]}" '1s|^#!.*$|#!/usr/bin/env python3|' "$f"
chmod +x "$f" 2>/dev/null || true
fi
done
}
# ensureVenv makes sure that the venv for the backend both exists, and is activated. # ensureVenv makes sure that the venv for the backend both exists, and is activated.
# #
# This function is idempotent, so you can call it as many times as you want and it will # This function is idempotent, so you can call it as many times as you want and it will
# always result in an activated virtual environment # always result in an activated virtual environment
function ensureVenv() { function ensureVenv() {
local interpreter=""
if [ "x${PORTABLE_PYTHON}" == "xtrue" ] || [ -e "$(_portable_python)" ]; then
echo "Using portable Python"
ensurePortablePython
interpreter="$(_portable_python)"
else
# Prefer system python${PYTHON_VERSION}, else python3, else fall back to bundled
if command -v python${PYTHON_VERSION} >/dev/null 2>&1; then
interpreter="python${PYTHON_VERSION}"
elif command -v python3 >/dev/null 2>&1; then
interpreter="python3"
else
echo "No suitable system Python found, bootstrapping portable build..."
ensurePortablePython
interpreter="$(_portable_python)"
fi
fi
if [ ! -d "${EDIR}/venv" ]; then if [ ! -d "${EDIR}/venv" ]; then
if [ "x${USE_PIP}" == "xtrue" ]; then uv venv --python ${PYTHON_VERSION} ${EDIR}/venv
"${interpreter}" -m venv --copies "${EDIR}/venv" echo "virtualenv created"
source "${EDIR}/venv/bin/activate"
"${interpreter}" -m pip install --upgrade pip
else
if [ "x${PORTABLE_PYTHON}" == "xtrue" ]; then
uv venv --python "${interpreter}" "${EDIR}/venv"
else
uv venv --python "${PYTHON_VERSION}" "${EDIR}/venv"
fi
fi
if [ "x${PORTABLE_PYTHON}" == "xtrue" ]; then
_makeVenvPortable
fi
fi fi
# We call it here to make sure that when we source a venv we can still use python as expected # Source if we are not already in a Virtual env
if [ -x "$(_portable_python)" ]; then if [ "x${VIRTUAL_ENV}" != "x${EDIR}/venv" ]; then
_macosPortableEnv source ${EDIR}/venv/bin/activate
echo "virtualenv activated"
fi fi
if [ "x${VIRTUAL_ENV:-}" != "x${EDIR}/venv" ]; then echo "activated virtualenv has been ensured"
source "${EDIR}/venv/bin/activate"
fi
} }
function runProtogen() {
ensureVenv
if [ "x${USE_PIP}" == "xtrue" ]; then
pip install grpcio-tools
else
uv pip install grpcio-tools
fi
pushd "${EDIR}" >/dev/null
# use the venv python (ensures correct interpreter & sys.path)
python -m grpc_tools.protoc -I../../ -I./ --python_out=. --grpc_python_out=. backend.proto
popd >/dev/null
}
# installRequirements looks for several requirements files and if they exist runs the install for them in order # installRequirements looks for several requirements files and if they exist runs the install for them in order
# #
# - requirements-install.txt # - requirements-install.txt
@@ -352,7 +111,7 @@ function runProtogen() {
# - requirements-${BUILD_TYPE}.txt # - requirements-${BUILD_TYPE}.txt
# - requirements-${BUILD_PROFILE}.txt # - requirements-${BUILD_PROFILE}.txt
# #
# BUILD_PROFILE is a more specific version of BUILD_TYPE, ex: cuda-11 or cuda-12 # BUILD_PROFILE is a pore specific version of BUILD_TYPE, ex: cuda11 or cuda12
# it can also include some options that we do not have BUILD_TYPES for, ex: intel # it can also include some options that we do not have BUILD_TYPES for, ex: intel
# #
# NOTE: for BUILD_PROFILE==intel, this function does NOT automatically use the Intel python package index. # NOTE: for BUILD_PROFILE==intel, this function does NOT automatically use the Intel python package index.
@@ -368,41 +127,36 @@ function runProtogen() {
# installRequirements # installRequirements
function installRequirements() { function installRequirements() {
ensureVenv ensureVenv
# These are the requirements files we will attempt to install, in order
declare -a requirementFiles=( declare -a requirementFiles=(
"${EDIR}/requirements-install.txt" "${EDIR}/requirements-install.txt"
"${EDIR}/requirements.txt" "${EDIR}/requirements.txt"
"${EDIR}/requirements-${BUILD_TYPE:-}.txt" "${EDIR}/requirements-${BUILD_TYPE}.txt"
) )
if [ "x${BUILD_TYPE:-}" != "x${BUILD_PROFILE}" ]; then if [ "x${BUILD_TYPE}" != "x${BUILD_PROFILE}" ]; then
requirementFiles+=("${EDIR}/requirements-${BUILD_PROFILE}.txt") requirementFiles+=("${EDIR}/requirements-${BUILD_PROFILE}.txt")
fi fi
if [ "x${BUILD_TYPE:-}" == "x" ]; then
# if BUILD_TYPE is empty, we are a CPU build, so we should try to install the CPU requirements
if [ "x${BUILD_TYPE}" == "x" ]; then
requirementFiles+=("${EDIR}/requirements-cpu.txt") requirementFiles+=("${EDIR}/requirements-cpu.txt")
fi fi
requirementFiles+=("${EDIR}/requirements-after.txt") requirementFiles+=("${EDIR}/requirements-after.txt")
if [ "x${BUILD_TYPE:-}" != "x${BUILD_PROFILE}" ]; then
if [ "x${BUILD_TYPE}" != "x${BUILD_PROFILE}" ]; then
requirementFiles+=("${EDIR}/requirements-${BUILD_PROFILE}-after.txt") requirementFiles+=("${EDIR}/requirements-${BUILD_PROFILE}-after.txt")
fi fi
# This is needed to build wheels that e.g. depends on Python.h
if [ "x${PORTABLE_PYTHON}" == "xtrue" ]; then
export C_INCLUDE_PATH="${C_INCLUDE_PATH:-}:$(_portable_dir)/include/python${PYTHON_VERSION}"
fi
for reqFile in ${requirementFiles[@]}; do for reqFile in ${requirementFiles[@]}; do
if [ -f "${reqFile}" ]; then if [ -f ${reqFile} ]; then
echo "starting requirements install for ${reqFile}" echo "starting requirements install for ${reqFile}"
if [ "x${USE_PIP}" == "xtrue" ]; then uv pip install ${EXTRA_PIP_INSTALL_FLAGS} --requirement ${reqFile}
pip install ${EXTRA_PIP_INSTALL_FLAGS:-} --requirement "${reqFile}"
else
uv pip install ${EXTRA_PIP_INSTALL_FLAGS:-} --requirement "${reqFile}"
fi
echo "finished requirements install for ${reqFile}" echo "finished requirements install for ${reqFile}"
fi fi
done done
runProtogen
} }
# startBackend discovers and runs the backend GRPC server # startBackend discovers and runs the backend GRPC server
@@ -420,18 +174,18 @@ function installRequirements() {
# - ${BACKEND_NAME}.py # - ${BACKEND_NAME}.py
function startBackend() { function startBackend() {
ensureVenv ensureVenv
if [ ! -z "${BACKEND_FILE:-}" ]; then
exec "${EDIR}/venv/bin/python" "${BACKEND_FILE}" "$@" if [ ! -z ${BACKEND_FILE} ]; then
exec ${EDIR}/venv/bin/python ${BACKEND_FILE} $@
elif [ -e "${MY_DIR}/server.py" ]; then elif [ -e "${MY_DIR}/server.py" ]; then
exec "${EDIR}/venv/bin/python" "${MY_DIR}/server.py" "$@" exec ${EDIR}/venv/bin/python ${MY_DIR}/server.py $@
elif [ -e "${MY_DIR}/backend.py" ]; then elif [ -e "${MY_DIR}/backend.py" ]; then
exec "${EDIR}/venv/bin/python" "${MY_DIR}/backend.py" "$@" exec ${EDIR}/venv/bin/python ${MY_DIR}/backend.py $@
elif [ -e "${MY_DIR}/${BACKEND_NAME}.py" ]; then elif [ -e "${MY_DIR}/${BACKEND_NAME}.py" ]; then
exec "${EDIR}/venv/bin/python" "${MY_DIR}/${BACKEND_NAME}.py" "$@" exec ${EDIR}/venv/bin/python ${MY_DIR}/${BACKEND_NAME}.py $@
fi fi
} }
# runUnittests discovers and runs python unittests # runUnittests discovers and runs python unittests
# #
# You can specify a specific test file to use by setting TEST_FILE before calling runUnittests. # You can specify a specific test file to use by setting TEST_FILE before calling runUnittests.
@@ -444,36 +198,41 @@ function startBackend() {
# be default a file named test.py in the backends directory will be used # be default a file named test.py in the backends directory will be used
function runUnittests() { function runUnittests() {
ensureVenv ensureVenv
if [ ! -z "${TEST_FILE:-}" ]; then
testDir=$(dirname "$(realpath "${TEST_FILE}")") if [ ! -z ${TEST_FILE} ]; then
testFile=$(basename "${TEST_FILE}") testDir=$(dirname `realpath ${TEST_FILE}`)
pushd "${testDir}" >/dev/null testFile=$(basename ${TEST_FILE})
python -m unittest "${testFile}" pushd ${testDir}
popd >/dev/null python -m unittest ${testFile}
popd
elif [ -f "${MY_DIR}/test.py" ]; then elif [ -f "${MY_DIR}/test.py" ]; then
pushd "${MY_DIR}" >/dev/null pushd ${MY_DIR}
python -m unittest test.py python -m unittest test.py
popd >/dev/null popd
else else
echo "no tests defined for ${BACKEND_NAME}" echo "no tests defined for ${BACKEND_NAME}"
fi fi
} }
################################################################################## ##################################################################################
# Below here are helper functions not intended to be used outside of the library # # Below here are helper functions not intended to be used outside of the library #
################################################################################## ##################################################################################
# checkTargets determines if the current BUILD_TYPE or BUILD_PROFILE is in a list of valid targets # checkTargets determines if the current BUILD_TYPE or BUILD_PROFILE is in a list of valid targets
function checkTargets() { function checkTargets() {
# Collect all provided targets into a variable and...
targets=$@ targets=$@
# ...convert it into an array
declare -a targets=($targets) declare -a targets=($targets)
for target in ${targets[@]}; do for target in ${targets[@]}; do
if [ "x${BUILD_TYPE:-}" == "x${target}" ]; then if [ "x${BUILD_TYPE}" == "x${target}" ]; then
echo true; return 0 echo true
return 0
fi fi
if [ "x${BUILD_PROFILE}" == "x${target}" ]; then if [ "x${BUILD_PROFILE}" == "x${target}" ]; then
echo true; return 0 echo true
return 0
fi fi
done done
echo false echo false

View File

@@ -3,11 +3,18 @@
.PHONY: install .PHONY: install
install: install:
bash install.sh bash install.sh
$(MAKE) protogen
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean .PHONY: protogen-clean
protogen-clean: protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py $(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
bash protogen.sh
.PHONY: clean .PHONY: clean
clean: protogen-clean clean: protogen-clean
rm -rf venv __pycache__ rm -rf venv __pycache__

View File

@@ -8,4 +8,4 @@ else
source $backend_dir/../common/libbackend.sh source $backend_dir/../common/libbackend.sh
fi fi
runProtogen python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto

View File

@@ -1,5 +1,5 @@
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
intel-extension-for-pytorch==2.8.10+xpu intel-extension-for-pytorch==2.3.110+xpu
torch==2.8.0 torch==2.3.1+cxx11.abi
oneccl_bind_pt==2.8.0+xpu oneccl_bind_pt==2.3.100+xpu
optimum[openvino] optimum[openvino]

View File

@@ -1,3 +1,3 @@
grpcio==1.74.0 grpcio==1.71.0
protobuf protobuf
grpcio-tools grpcio-tools

View File

@@ -1,23 +1,29 @@
.PHONY: coqui .PHONY: coqui
coqui: coqui: protogen
bash install.sh bash install.sh
.PHONY: run .PHONY: run
run: coqui run: protogen
@echo "Running coqui..." @echo "Running coqui..."
bash run.sh bash run.sh
@echo "coqui run." @echo "coqui run."
.PHONY: test .PHONY: test
test: coqui test: protogen
@echo "Testing coqui..." @echo "Testing coqui..."
bash test.sh bash test.sh
@echo "coqui tested." @echo "coqui tested."
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean .PHONY: protogen-clean
protogen-clean: protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py $(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
.PHONY: clean .PHONY: clean
clean: protogen-clean clean: protogen-clean
rm -rf venv __pycache__ rm -rf venv __pycache__

View File

@@ -40,9 +40,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
else: else:
print("CUDA is not available", file=sys.stderr) print("CUDA is not available", file=sys.stderr)
device = "cpu" device = "cpu"
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
if mps_available:
device = "mps"
if not torch.cuda.is_available() and request.CUDA: if not torch.cuda.is_available() and request.CUDA:
return backend_pb2.Result(success=False, message="CUDA is not available") return backend_pb2.Result(success=False, message="CUDA is not available")

View File

@@ -1,4 +1,4 @@
grpcio==1.74.0 grpcio==1.71.0
protobuf protobuf
certifi certifi
packaging==24.1 packaging==24.1

View File

@@ -12,22 +12,28 @@ export SKIP_CONDA=1
endif endif
.PHONY: diffusers .PHONY: diffusers
diffusers: diffusers: protogen
bash install.sh bash install.sh
.PHONY: run .PHONY: run
run: diffusers run: protogen
@echo "Running diffusers..." @echo "Running diffusers..."
bash run.sh bash run.sh
@echo "Diffusers run." @echo "Diffusers run."
test: diffusers test: protogen
bash test.sh bash test.sh
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean .PHONY: protogen-clean
protogen-clean: protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py $(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
.PHONY: clean .PHONY: clean
clean: protogen-clean clean: protogen-clean
rm -rf venv __pycache__ rm -rf venv __pycache__

View File

@@ -18,7 +18,7 @@ import backend_pb2_grpc
import grpc import grpc
from diffusers import SanaPipeline, StableDiffusion3Pipeline, StableDiffusionXLPipeline, StableDiffusionDepth2ImgPipeline, DPMSolverMultistepScheduler, StableDiffusionPipeline, DiffusionPipeline, \ from diffusers import SanaPipeline, StableDiffusion3Pipeline, StableDiffusionXLPipeline, StableDiffusionDepth2ImgPipeline, DPMSolverMultistepScheduler, StableDiffusionPipeline, DiffusionPipeline, \
EulerAncestralDiscreteScheduler, FluxPipeline, FluxTransformer2DModel, QwenImageEditPipeline, AutoencoderKLWan, WanPipeline, WanImageToVideoPipeline EulerAncestralDiscreteScheduler, FluxPipeline, FluxTransformer2DModel
from diffusers import StableDiffusionImg2ImgPipeline, AutoPipelineForText2Image, ControlNetModel, StableVideoDiffusionPipeline, Lumina2Text2ImgPipeline from diffusers import StableDiffusionImg2ImgPipeline, AutoPipelineForText2Image, ControlNetModel, StableVideoDiffusionPipeline, Lumina2Text2ImgPipeline
from diffusers.pipelines.stable_diffusion import safety_checker from diffusers.pipelines.stable_diffusion import safety_checker
from diffusers.utils import load_image, export_to_video from diffusers.utils import load_image, export_to_video
@@ -65,21 +65,6 @@ from diffusers.schedulers import (
UniPCMultistepScheduler, UniPCMultistepScheduler,
) )
def is_float(s):
"""Check if a string can be converted to float."""
try:
float(s)
return True
except ValueError:
return False
def is_int(s):
"""Check if a string can be converted to int."""
try:
int(s)
return True
except ValueError:
return False
# The scheduler list mapping was taken from here: https://github.com/neggles/animatediff-cli/blob/6f336f5f4b5e38e85d7f06f1744ef42d0a45f2a7/src/animatediff/schedulers.py#L39 # The scheduler list mapping was taken from here: https://github.com/neggles/animatediff-cli/blob/6f336f5f4b5e38e85d7f06f1744ef42d0a45f2a7/src/animatediff/schedulers.py#L39
# Credits to https://github.com/neggles # Credits to https://github.com/neggles
@@ -184,26 +169,8 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
if ":" not in opt: if ":" not in opt:
continue continue
key, value = opt.split(":") key, value = opt.split(":")
# if value is a number, convert it to the appropriate type
if is_float(value):
value = float(value)
elif is_int(value):
value = int(value)
elif value.lower() in ["true", "false"]:
value = value.lower() == "true"
self.options[key] = value self.options[key] = value
# From options, extract if present "torch_dtype" and set it to the appropriate type
if "torch_dtype" in self.options:
if self.options["torch_dtype"] == "fp16":
torchType = torch.float16
elif self.options["torch_dtype"] == "bf16":
torchType = torch.bfloat16
elif self.options["torch_dtype"] == "fp32":
torchType = torch.float32
# remove it from options
del self.options["torch_dtype"]
print(f"Options: {self.options}", file=sys.stderr) print(f"Options: {self.options}", file=sys.stderr)
local = False local = False
@@ -267,9 +234,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
elif request.PipelineType == "DiffusionPipeline": elif request.PipelineType == "DiffusionPipeline":
self.pipe = DiffusionPipeline.from_pretrained(request.Model, self.pipe = DiffusionPipeline.from_pretrained(request.Model,
torch_dtype=torchType) torch_dtype=torchType)
elif request.PipelineType == "QwenImageEditPipeline":
self.pipe = QwenImageEditPipeline.from_pretrained(request.Model,
torch_dtype=torchType)
elif request.PipelineType == "VideoDiffusionPipeline": elif request.PipelineType == "VideoDiffusionPipeline":
self.txt2vid = True self.txt2vid = True
self.pipe = DiffusionPipeline.from_pretrained(request.Model, self.pipe = DiffusionPipeline.from_pretrained(request.Model,
@@ -338,32 +302,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
torch_dtype=torch.bfloat16) torch_dtype=torch.bfloat16)
self.pipe.vae.to(torch.bfloat16) self.pipe.vae.to(torch.bfloat16)
self.pipe.text_encoder.to(torch.bfloat16) self.pipe.text_encoder.to(torch.bfloat16)
elif request.PipelineType == "WanPipeline":
# WAN2.2 pipeline requires special VAE handling
vae = AutoencoderKLWan.from_pretrained(
request.Model,
subfolder="vae",
torch_dtype=torch.float32
)
self.pipe = WanPipeline.from_pretrained(
request.Model,
vae=vae,
torch_dtype=torchType
)
self.txt2vid = True # WAN2.2 is a text-to-video pipeline
elif request.PipelineType == "WanImageToVideoPipeline":
# WAN2.2 image-to-video pipeline
vae = AutoencoderKLWan.from_pretrained(
request.Model,
subfolder="vae",
torch_dtype=torch.float32
)
self.pipe = WanImageToVideoPipeline.from_pretrained(
request.Model,
vae=vae,
torch_dtype=torchType
)
self.img2vid = True # WAN2.2 image-to-video pipeline
if CLIPSKIP and request.CLIPSkip != 0: if CLIPSKIP and request.CLIPSkip != 0:
self.clip_skip = request.CLIPSkip self.clip_skip = request.CLIPSkip
@@ -398,9 +336,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
device = "cpu" if not request.CUDA else "cuda" device = "cpu" if not request.CUDA else "cuda"
if XPU: if XPU:
device = "xpu" device = "xpu"
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
if mps_available:
device = "mps"
self.device = device self.device = device
if request.LoraAdapter: if request.LoraAdapter:
# Check if its a local file and not a directory ( we load lora differently for a safetensor file ) # Check if its a local file and not a directory ( we load lora differently for a safetensor file )
@@ -505,24 +440,11 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
"num_inference_steps": steps, "num_inference_steps": steps,
} }
# Handle image source: prioritize RefImages over request.src if request.src != "" and not self.controlnet and not self.img2vid:
image_src = None image = Image.open(request.src)
if hasattr(request, 'ref_images') and request.ref_images and len(request.ref_images) > 0:
# Use the first reference image if available
image_src = request.ref_images[0]
print(f"Using reference image: {image_src}", file=sys.stderr)
elif request.src != "":
# Fall back to request.src if no ref_images
image_src = request.src
print(f"Using source image: {image_src}", file=sys.stderr)
else:
print("No image source provided", file=sys.stderr)
if image_src and not self.controlnet and not self.img2vid:
image = Image.open(image_src)
options["image"] = image options["image"] = image
elif self.controlnet and image_src: elif self.controlnet and request.src:
pose_image = load_image(image_src) pose_image = load_image(request.src)
options["image"] = pose_image options["image"] = pose_image
if CLIPSKIP and self.clip_skip != 0: if CLIPSKIP and self.clip_skip != 0:
@@ -564,11 +486,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
if self.img2vid: if self.img2vid:
# Load the conditioning image # Load the conditioning image
if image_src: image = load_image(request.src)
image = load_image(image_src)
else:
# Fallback to request.src for img2vid if no ref_images
image = load_image(request.src)
image = image.resize((1024, 576)) image = image.resize((1024, 576))
generator = torch.manual_seed(request.seed) generator = torch.manual_seed(request.seed)
@@ -605,96 +523,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
return backend_pb2.Result(message="Media generated", success=True) return backend_pb2.Result(message="Media generated", success=True)
def GenerateVideo(self, request, context):
try:
prompt = request.prompt
if not prompt:
return backend_pb2.Result(success=False, message="No prompt provided for video generation")
# Set default values from request or use defaults
num_frames = request.num_frames if request.num_frames > 0 else 81
fps = request.fps if request.fps > 0 else 16
cfg_scale = request.cfg_scale if request.cfg_scale > 0 else 4.0
num_inference_steps = request.step if request.step > 0 else 40
# Prepare generation parameters
kwargs = {
"prompt": prompt,
"negative_prompt": request.negative_prompt if request.negative_prompt else "",
"height": request.height if request.height > 0 else 720,
"width": request.width if request.width > 0 else 1280,
"num_frames": num_frames,
"guidance_scale": cfg_scale,
"num_inference_steps": num_inference_steps,
}
# Add custom options from self.options (including guidance_scale_2 if specified)
kwargs.update(self.options)
# Set seed if provided
if request.seed > 0:
kwargs["generator"] = torch.Generator(device=self.device).manual_seed(request.seed)
# Handle start and end images for video generation
if request.start_image:
kwargs["start_image"] = load_image(request.start_image)
if request.end_image:
kwargs["end_image"] = load_image(request.end_image)
print(f"Generating video with {kwargs=}", file=sys.stderr)
# Generate video frames based on pipeline type
if self.PipelineType == "WanPipeline":
# WAN2.2 text-to-video generation
output = self.pipe(**kwargs)
frames = output.frames[0] # WAN2.2 returns frames in this format
elif self.PipelineType == "WanImageToVideoPipeline":
# WAN2.2 image-to-video generation
if request.start_image:
# Load and resize the input image according to WAN2.2 requirements
image = load_image(request.start_image)
# Use request dimensions or defaults, but respect WAN2.2 constraints
request_height = request.height if request.height > 0 else 480
request_width = request.width if request.width > 0 else 832
max_area = request_height * request_width
aspect_ratio = image.height / image.width
mod_value = self.pipe.vae_scale_factor_spatial * self.pipe.transformer.config.patch_size[1]
height = round((max_area * aspect_ratio) ** 0.5 / mod_value) * mod_value
width = round((max_area / aspect_ratio) ** 0.5 / mod_value) * mod_value
image = image.resize((width, height))
kwargs["image"] = image
kwargs["height"] = height
kwargs["width"] = width
output = self.pipe(**kwargs)
frames = output.frames[0]
elif self.img2vid:
# Generic image-to-video generation
if request.start_image:
image = load_image(request.start_image)
image = image.resize((request.width if request.width > 0 else 1024,
request.height if request.height > 0 else 576))
kwargs["image"] = image
output = self.pipe(**kwargs)
frames = output.frames[0]
elif self.txt2vid:
# Generic text-to-video generation
output = self.pipe(**kwargs)
frames = output.frames[0]
else:
return backend_pb2.Result(success=False, message=f"Pipeline {self.PipelineType} does not support video generation")
# Export video
export_to_video(frames, request.dst, fps=fps)
return backend_pb2.Result(message="Video generated successfully", success=True)
except Exception as err:
print(f"Error generating video: {err}", file=sys.stderr)
traceback.print_exc()
return backend_pb2.Result(success=False, message=f"Error generating video: {err}")
def serve(address): def serve(address):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS), server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),

View File

@@ -1,12 +1,9 @@
--extra-index-url https://download.pytorch.org/whl/cpu diffusers
git+https://github.com/huggingface/diffusers
opencv-python opencv-python
transformers transformers
torchvision==0.22.1
accelerate accelerate
compel compel
peft peft
sentencepiece sentencepiece
torch==2.7.1 torch==2.4.1
optimum-quanto optimum-quanto
ftfy

View File

@@ -1,12 +1,10 @@
--extra-index-url https://download.pytorch.org/whl/cu118 --extra-index-url https://download.pytorch.org/whl/cu118
git+https://github.com/huggingface/diffusers torch==2.4.1+cu118
diffusers
opencv-python opencv-python
transformers transformers
torchvision==0.22.1
accelerate accelerate
compel compel
peft peft
sentencepiece sentencepiece
torch==2.7.1 optimum-quanto
optimum-quanto
ftfy

View File

@@ -1,12 +1,9 @@
--extra-index-url https://download.pytorch.org/whl/cu121 torch==2.4.1
git+https://github.com/huggingface/diffusers diffusers
opencv-python opencv-python
transformers transformers
torchvision
accelerate accelerate
compel compel
peft peft
sentencepiece sentencepiece
torch optimum-quanto
ftfy
optimum-quanto

View File

@@ -1,12 +1,11 @@
--extra-index-url https://download.pytorch.org/whl/rocm6.3 --extra-index-url https://download.pytorch.org/whl/rocm6.0
torch==2.7.1+rocm6.3 torch==2.3.1+rocm6.0
torchvision==0.22.1+rocm6.3 torchvision==0.18.1+rocm6.0
git+https://github.com/huggingface/diffusers diffusers
opencv-python opencv-python
transformers transformers
accelerate accelerate
compel compel
peft peft
sentencepiece sentencepiece
optimum-quanto optimum-quanto
ftfy

View File

@@ -1,16 +1,15 @@
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
intel-extension-for-pytorch==2.3.110+xpu intel-extension-for-pytorch==2.3.110+xpu
torch==2.5.1+cxx11.abi torch==2.3.1+cxx11.abi
torchvision==0.20.1+cxx11.abi torchvision==0.18.1+cxx11.abi
oneccl_bind_pt==2.8.0+xpu oneccl_bind_pt==2.3.100+xpu
optimum[openvino] optimum[openvino]
setuptools setuptools
git+https://github.com/huggingface/diffusers diffusers
opencv-python opencv-python
transformers transformers
accelerate accelerate
compel compel
peft peft
sentencepiece sentencepiece
optimum-quanto optimum-quanto
ftfy

View File

@@ -1,12 +0,0 @@
--extra-index-url https://pypi.jetson-ai-lab.io/jp6/cu126/
torch
diffusers
transformers
accelerate
compel
peft
optimum-quanto
numpy<2
sentencepiece
torchvision
ftfy

View File

@@ -1,11 +0,0 @@
torch==2.7.1
torchvision==0.22.1
git+https://github.com/huggingface/diffusers
opencv-python
transformers
accelerate
compel
peft
sentencepiece
optimum-quanto
ftfy

View File

@@ -1,5 +1,5 @@
setuptools setuptools
grpcio==1.74.0 grpcio==1.71.0
pillow pillow
protobuf protobuf
certifi certifi

View File

@@ -12,6 +12,4 @@ if [ -d "/opt/intel" ]; then
export XPU=1 export XPU=1
fi fi
export PYTORCH_ENABLE_MPS_FALLBACK=1
startBackend $@ startBackend $@

View File

@@ -1,17 +1,23 @@
.PHONY: exllama2 .PHONY: exllama2
exllama2: exllama2: protogen
bash install.sh bash install.sh
.PHONY: run .PHONY: run
run: exllama2 run: protogen
@echo "Running exllama2..." @echo "Running exllama2..."
bash run.sh bash run.sh
@echo "exllama2 run." @echo "exllama2 run."
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean .PHONY: protogen-clean
protogen-clean: protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py $(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
.PHONY: clean .PHONY: clean
clean: protogen-clean clean: protogen-clean
$(RM) -r venv source __pycache__ $(RM) -r venv source __pycache__

View File

@@ -1,4 +1,4 @@
grpcio==1.74.0 grpcio==1.71.0
protobuf protobuf
certifi certifi
wheel wheel

View File

@@ -3,11 +3,18 @@
.PHONY: install .PHONY: install
install: install:
bash install.sh bash install.sh
$(MAKE) protogen
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean .PHONY: protogen-clean
protogen-clean: protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py $(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
bash protogen.sh
.PHONY: clean .PHONY: clean
clean: protogen-clean clean: protogen-clean
rm -rf venv __pycache__ rm -rf venv __pycache__

View File

@@ -10,7 +10,7 @@ import sys
import os import os
import backend_pb2 import backend_pb2
import backend_pb2_grpc import backend_pb2_grpc
import torch
from faster_whisper import WhisperModel from faster_whisper import WhisperModel
import grpc import grpc
@@ -35,9 +35,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
# device = "cuda" if request.CUDA else "cpu" # device = "cuda" if request.CUDA else "cpu"
if request.CUDA: if request.CUDA:
device = "cuda" device = "cuda"
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
if mps_available:
device = "mps"
try: try:
print("Preparing models, please wait", file=sys.stderr) print("Preparing models, please wait", file=sys.stderr)
self.model = WhisperModel(request.Model, device=device, compute_type="float16") self.model = WhisperModel(request.Model, device=device, compute_type="float16")

View File

@@ -1,23 +0,0 @@
.PHONY: kitten-tts
kitten-tts:
bash install.sh
.PHONY: run
run: kitten-tts
@echo "Running kitten-tts..."
bash run.sh
@echo "kitten-tts run."
.PHONY: test
test: kitten-tts
@echo "Testing kitten-tts..."
bash test.sh
@echo "kitten-tts tested."
.PHONY: protogen-clean
protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py
.PHONY: clean
clean: protogen-clean
rm -rf venv __pycache__

View File

@@ -1,109 +0,0 @@
#!/usr/bin/env python3
"""
This is an extra gRPC server of LocalAI for Kitten TTS
"""
from concurrent import futures
import time
import argparse
import signal
import sys
import os
import backend_pb2
import backend_pb2_grpc
import torch
from kittentts import KittenTTS
import soundfile as sf
import grpc
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
KITTEN_LANGUAGE = os.environ.get('KITTEN_LANGUAGE', None)
# Implement the BackendServicer class with the service methods
class BackendServicer(backend_pb2_grpc.BackendServicer):
"""
BackendServicer is the class that implements the gRPC service
"""
def Health(self, request, context):
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
def LoadModel(self, request, context):
self.AudioPath = None
# List available KittenTTS models
print("Available KittenTTS voices: expr-voice-2-m, expr-voice-2-f, expr-voice-3-m, expr-voice-3-f, expr-voice-4-m, expr-voice-4-f, expr-voice-5-m, expr-voice-5-f")
if os.path.isabs(request.AudioPath):
self.AudioPath = request.AudioPath
elif request.AudioPath and request.ModelFile != "" and not os.path.isabs(request.AudioPath):
# get base path of modelFile
modelFileBase = os.path.dirname(request.ModelFile)
# modify LoraAdapter to be relative to modelFileBase
self.AudioPath = os.path.join(modelFileBase, request.AudioPath)
try:
print("Preparing KittenTTS model, please wait", file=sys.stderr)
# Use the model name from request.Model, defaulting to "KittenML/kitten-tts-nano-0.1" if not specified
model_name = request.Model if request.Model else "KittenML/kitten-tts-nano-0.1"
self.tts = KittenTTS(model_name)
except Exception as err:
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
# Implement your logic here for the LoadModel service
# Replace this with your desired response
return backend_pb2.Result(message="Model loaded successfully", success=True)
def TTS(self, request, context):
try:
# KittenTTS doesn't use language parameter like TTS, so we ignore it
# For multi-speaker models, use voice parameter
voice = request.voice if request.voice else "expr-voice-2-f"
# Generate audio using KittenTTS
audio = self.tts.generate(request.text, voice=voice)
# Save the audio using soundfile
sf.write(request.dst, audio, 24000)
except Exception as err:
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
return backend_pb2.Result(success=True)
def serve(address):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
options=[
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
])
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
server.add_insecure_port(address)
server.start()
print("Server started. Listening on: " + address, file=sys.stderr)
# Define the signal handler function
def signal_handler(sig, frame):
print("Received termination signal. Shutting down...")
server.stop(0)
sys.exit(0)
# Set the signal handlers for SIGINT and SIGTERM
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the gRPC server.")
parser.add_argument(
"--addr", default="localhost:50051", help="The address to bind the server to."
)
args = parser.parse_args()
serve(args.addr)

View File

@@ -1,19 +0,0 @@
#!/bin/bash
set -e
backend_dir=$(dirname $0)
if [ -d $backend_dir/common ]; then
source $backend_dir/common/libbackend.sh
else
source $backend_dir/../common/libbackend.sh
fi
# This is here because the Intel pip index is broken and returns 200 status codes for every package name, it just doesn't return any package links.
# This makes uv think that the package exists in the Intel pip index, and by default it stops looking at other pip indexes once it finds a match.
# We need uv to continue falling through to the pypi default index to find optimum[openvino] in the pypi index
# the --upgrade actually allows us to *downgrade* torch to the version provided in the Intel pip index
if [ "x${BUILD_PROFILE}" == "xintel" ]; then
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
fi
installRequirements

View File

@@ -1,5 +0,0 @@
grpcio==1.71.0
protobuf
certifi
packaging==24.1
https://github.com/KittenML/KittenTTS/releases/download/0.1/kittentts-0.1.0-py3-none-any.whl

View File

@@ -1,9 +0,0 @@
#!/bin/bash
backend_dir=$(dirname $0)
if [ -d $backend_dir/common ]; then
source $backend_dir/common/libbackend.sh
else
source $backend_dir/../common/libbackend.sh
fi
startBackend $@

View File

@@ -1,82 +0,0 @@
"""
A test script to test the gRPC service
"""
import unittest
import subprocess
import time
import backend_pb2
import backend_pb2_grpc
import grpc
class TestBackendServicer(unittest.TestCase):
"""
TestBackendServicer is the class that tests the gRPC service
"""
def setUp(self):
"""
This method sets up the gRPC service by starting the server
"""
self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"])
time.sleep(30)
def tearDown(self) -> None:
"""
This method tears down the gRPC service by terminating the server
"""
self.service.terminate()
self.service.wait()
def test_server_startup(self):
"""
This method tests if the server starts up successfully
"""
try:
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
response = stub.Health(backend_pb2.HealthMessage())
self.assertEqual(response.message, b'OK')
except Exception as err:
print(err)
self.fail("Server failed to start")
finally:
self.tearDown()
def test_load_model(self):
"""
This method tests if the model is loaded successfully
"""
try:
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
response = stub.LoadModel(backend_pb2.ModelOptions(Model="tts_models/en/vctk/vits"))
print(response)
self.assertTrue(response.success)
self.assertEqual(response.message, "Model loaded successfully")
except Exception as err:
print(err)
self.fail("LoadModel service failed")
finally:
self.tearDown()
def test_tts(self):
"""
This method tests if the embeddings are generated successfully
"""
try:
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
response = stub.LoadModel(backend_pb2.ModelOptions(Model="tts_models/en/vctk/vits"))
self.assertTrue(response.success)
tts_request = backend_pb2.TTSRequest(text="80s TV news production music hit for tonight's biggest story")
tts_response = stub.TTS(tts_request)
self.assertIsNotNone(tts_response)
except Exception as err:
print(err)
self.fail("TTS service failed")
finally:
self.tearDown()

Some files were not shown because too many files have changed in this diff Show More