Compare commits

..

1 Commits

Author SHA1 Message Date
Ettore Di Giacinto
9c40d9bbed feat(diffusers): add builds for nvidia-l4t
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-08-08 22:50:40 +02:00
219 changed files with 2044 additions and 7229 deletions

View File

@@ -2,7 +2,6 @@
name: 'build backend container images'
on:
pull_request:
push:
branches:
- master
@@ -64,18 +63,6 @@ jobs:
backend: "llama-cpp"
dockerfile: "./backend/Dockerfile.llama-cpp"
context: "./"
- build-type: ''
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-cpu-transformers'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'true'
backend: "transformers"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'cublas'
cuda-major-version: "11"
cuda-minor-version: "7"
@@ -103,7 +90,7 @@ jobs:
- build-type: 'l4t'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/arm64'
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-l4t-diffusers'
runs-on: 'ubuntu-24.04-arm'
@@ -112,18 +99,6 @@ jobs:
backend: "diffusers"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: ''
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-cpu-diffusers'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'true'
backend: "diffusers"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
# CUDA 11 additional backends
- build-type: 'cublas'
cuda-major-version: "11"
@@ -216,7 +191,7 @@ jobs:
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-vllm'
runs-on: 'arc-runner-set'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
backend: "vllm"
@@ -315,7 +290,7 @@ jobs:
tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-rerankers'
runs-on: 'ubuntu-latest'
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false'
backend: "rerankers"
dockerfile: "./backend/Dockerfile.python"
@@ -327,7 +302,7 @@ jobs:
tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-llama-cpp'
runs-on: 'ubuntu-latest'
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false'
backend: "llama-cpp"
dockerfile: "./backend/Dockerfile.llama-cpp"
@@ -338,8 +313,8 @@ jobs:
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-vllm'
runs-on: 'arc-runner-set'
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
runs-on: 'ubuntu-latest'
base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false'
backend: "vllm"
dockerfile: "./backend/Dockerfile.python"
@@ -351,7 +326,7 @@ jobs:
tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-transformers'
runs-on: 'arc-runner-set'
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false'
backend: "transformers"
dockerfile: "./backend/Dockerfile.python"
@@ -363,7 +338,7 @@ jobs:
tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-diffusers'
runs-on: 'arc-runner-set'
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false'
backend: "diffusers"
dockerfile: "./backend/Dockerfile.python"
@@ -376,7 +351,7 @@ jobs:
tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-kokoro'
runs-on: 'arc-runner-set'
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false'
backend: "kokoro"
dockerfile: "./backend/Dockerfile.python"
@@ -388,7 +363,7 @@ jobs:
tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-faster-whisper'
runs-on: 'ubuntu-latest'
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false'
backend: "faster-whisper"
dockerfile: "./backend/Dockerfile.python"
@@ -400,7 +375,7 @@ jobs:
tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-coqui'
runs-on: 'ubuntu-latest'
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false'
backend: "coqui"
dockerfile: "./backend/Dockerfile.python"
@@ -412,7 +387,7 @@ jobs:
tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-bark'
runs-on: 'arc-runner-set'
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false'
backend: "bark"
dockerfile: "./backend/Dockerfile.python"
@@ -460,7 +435,7 @@ jobs:
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-intel-vllm'
runs-on: 'arc-runner-set'
runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false'
backend: "vllm"
@@ -777,7 +752,7 @@ jobs:
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-hipblas-whisper'
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
base-image: "rocm/dev-ubuntu-22.04:6.1"
runs-on: 'ubuntu-latest'
skip-drivers: 'false'
backend: "whisper"
@@ -939,7 +914,7 @@ jobs:
skip-drivers: 'true'
tag-latest: 'auto'
tag-suffix: '-gpu-hipblas-exllama2'
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
base-image: "rocm/dev-ubuntu-22.04:6.1"
runs-on: 'ubuntu-latest'
backend: "exllama2"
dockerfile: "./backend/Dockerfile.python"
@@ -951,7 +926,7 @@ jobs:
# platforms: 'linux/amd64'
# tag-latest: 'auto'
# tag-suffix: '-gpu-hipblas-rfdetr'
# base-image: "rocm/dev-ubuntu-22.04:6.4.3"
# base-image: "rocm/dev-ubuntu-22.04:6.1"
# runs-on: 'ubuntu-latest'
# skip-drivers: 'false'
# backend: "rfdetr"
@@ -970,60 +945,6 @@ jobs:
backend: "kitten-tts"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
transformers-darwin:
uses: ./.github/workflows/backend_build_darwin.yml
with:
backend: "transformers"
build-type: "mps"
go-version: "1.24.x"
tag-suffix: "-metal-darwin-arm64-transformers"
use-pip: true
runs-on: "macOS-14"
secrets:
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
diffusers-darwin:
uses: ./.github/workflows/backend_build_darwin.yml
with:
backend: "diffusers"
build-type: "mps"
go-version: "1.24.x"
tag-suffix: "-metal-darwin-arm64-diffusers"
use-pip: true
runs-on: "macOS-14"
secrets:
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
mlx-darwin:
uses: ./.github/workflows/backend_build_darwin.yml
with:
backend: "mlx"
build-type: "mps"
go-version: "1.24.x"
tag-suffix: "-metal-darwin-arm64-mlx"
runs-on: "macOS-14"
secrets:
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
mlx-vlm-darwin:
uses: ./.github/workflows/backend_build_darwin.yml
with:
backend: "mlx-vlm"
build-type: "mps"
go-version: "1.24.x"
tag-suffix: "-metal-darwin-arm64-mlx-vlm"
runs-on: "macOS-14"
secrets:
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
llama-cpp-darwin:
runs-on: macOS-14
strategy:
@@ -1031,7 +952,7 @@ jobs:
go-version: ['1.21.x']
steps:
- name: Clone
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
submodules: true
- name: Setup Go ${{ matrix.go-version }}
@@ -1048,19 +969,22 @@ jobs:
- name: Build llama-cpp-darwin
run: |
make protogen-go
make backends/llama-cpp-darwin
make build
bash scripts/build-llama-cpp-darwin.sh
ls -la build/darwin.tar
mv build/darwin.tar build/llama-cpp.tar
- name: Upload llama-cpp.tar
uses: actions/upload-artifact@v4
with:
name: llama-cpp-tar
path: backend-images/llama-cpp.tar
path: build/llama-cpp.tar
llama-cpp-darwin-publish:
needs: llama-cpp-darwin
if: github.event_name != 'pull_request'
runs-on: ubuntu-latest
steps:
- name: Download llama-cpp.tar
uses: actions/download-artifact@v5
uses: actions/download-artifact@v4
with:
name: llama-cpp-tar
path: .
@@ -1117,7 +1041,7 @@ jobs:
go-version: ['1.21.x']
steps:
- name: Clone
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
submodules: true
- name: Setup Go ${{ matrix.go-version }}
@@ -1136,19 +1060,21 @@ jobs:
make protogen-go
make build
export PLATFORMARCH=darwin/amd64
make backends/llama-cpp-darwin
bash scripts/build-llama-cpp-darwin.sh
ls -la build/darwin.tar
mv build/darwin.tar build/llama-cpp.tar
- name: Upload llama-cpp.tar
uses: actions/upload-artifact@v4
with:
name: llama-cpp-tar-x86
path: backend-images/llama-cpp.tar
path: build/llama-cpp.tar
llama-cpp-darwin-x86-publish:
if: github.event_name != 'pull_request'
needs: llama-cpp-darwin-x86
runs-on: ubuntu-latest
steps:
- name: Download llama-cpp.tar
uses: actions/download-artifact@v5
uses: actions/download-artifact@v4
with:
name: llama-cpp-tar-x86
path: .

View File

@@ -55,9 +55,9 @@ on:
type: string
secrets:
dockerUsername:
required: false
required: true
dockerPassword:
required: false
required: true
quayUsername:
required: true
quayPassword:
@@ -66,8 +66,6 @@ on:
jobs:
backend-build:
runs-on: ${{ inputs.runs-on }}
env:
quay_username: ${{ secrets.quayUsername }}
steps:
@@ -97,7 +95,7 @@ jobs:
&& sudo apt-get install -y git
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
- name: Release space from worker
if: inputs.runs-on == 'ubuntu-latest'
@@ -189,7 +187,7 @@ jobs:
password: ${{ secrets.dockerPassword }}
- name: Login to Quay.io
if: ${{ env.quay_username != '' }}
# if: github.event_name != 'pull_request'
uses: docker/login-action@v3
with:
registry: quay.io
@@ -232,7 +230,7 @@ jobs:
file: ${{ inputs.dockerfile }}
cache-from: type=gha
platforms: ${{ inputs.platforms }}
push: ${{ env.quay_username != '' }}
push: true
tags: ${{ steps.meta_pull_request.outputs.tags }}
labels: ${{ steps.meta_pull_request.outputs.labels }}
@@ -240,4 +238,4 @@ jobs:
- name: job summary
run: |
echo "Built image: ${{ steps.meta.outputs.labels }}" >> $GITHUB_STEP_SUMMARY
echo "Built image: ${{ steps.meta.outputs.labels }}" >> $GITHUB_STEP_SUMMARY

View File

@@ -1,140 +0,0 @@
---
name: 'build darwin python backend container images (reusable)'
on:
workflow_call:
inputs:
backend:
description: 'Backend to build'
required: true
type: string
build-type:
description: 'Build type (e.g., mps)'
default: ''
type: string
use-pip:
description: 'Use pip to install dependencies'
default: false
type: boolean
go-version:
description: 'Go version to use'
default: '1.24.x'
type: string
tag-suffix:
description: 'Tag suffix for the built image'
required: true
type: string
runs-on:
description: 'Runner to use'
default: 'macOS-14'
type: string
secrets:
dockerUsername:
required: false
dockerPassword:
required: false
quayUsername:
required: true
quayPassword:
required: true
jobs:
darwin-backend-build:
runs-on: ${{ inputs.runs-on }}
strategy:
matrix:
go-version: ['${{ inputs.go-version }}']
steps:
- name: Clone
uses: actions/checkout@v5
with:
submodules: true
- name: Setup Go ${{ matrix.go-version }}
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
cache: false
# You can test your matrix by printing the current Go version
- name: Display Go version
run: go version
- name: Dependencies
run: |
brew install protobuf grpc make protoc-gen-go protoc-gen-go-grpc libomp llvm
- name: Build ${{ inputs.backend }}-darwin
run: |
make protogen-go
BACKEND=${{ inputs.backend }} BUILD_TYPE=${{ inputs.build-type }} USE_PIP=${{ inputs.use-pip }} make build-darwin-python-backend
- name: Upload ${{ inputs.backend }}.tar
uses: actions/upload-artifact@v4
with:
name: ${{ inputs.backend }}-tar
path: backend-images/${{ inputs.backend }}.tar
darwin-backend-publish:
needs: darwin-backend-build
if: github.event_name != 'pull_request'
runs-on: ubuntu-latest
steps:
- name: Download ${{ inputs.backend }}.tar
uses: actions/download-artifact@v5
with:
name: ${{ inputs.backend }}-tar
path: .
- name: Install crane
run: |
curl -L https://github.com/google/go-containerregistry/releases/latest/download/go-containerregistry_Linux_x86_64.tar.gz | tar -xz
sudo mv crane /usr/local/bin/
- name: Log in to DockerHub
run: |
echo "${{ secrets.dockerPassword }}" | crane auth login docker.io -u "${{ secrets.dockerUsername }}" --password-stdin
- name: Log in to quay.io
run: |
echo "${{ secrets.quayPassword }}" | crane auth login quay.io -u "${{ secrets.quayUsername }}" --password-stdin
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: |
localai/localai-backends
tags: |
type=ref,event=branch
type=semver,pattern={{raw}}
type=sha
flavor: |
latest=auto
suffix=${{ inputs.tag-suffix }},onlatest=true
- name: Docker meta
id: quaymeta
uses: docker/metadata-action@v5
with:
images: |
quay.io/go-skynet/local-ai-backends
tags: |
type=ref,event=branch
type=semver,pattern={{raw}}
type=sha
flavor: |
latest=auto
suffix=${{ inputs.tag-suffix }},onlatest=true
- name: Push Docker image (DockerHub)
run: |
for tag in $(echo "${{ steps.meta.outputs.tags }}" | tr ',' '\n'); do
crane push ${{ inputs.backend }}.tar $tag
done
- name: Push Docker image (Quay)
run: |
for tag in $(echo "${{ steps.quaymeta.outputs.tags }}" | tr ',' '\n'); do
crane push ${{ inputs.backend }}.tar $tag
done

View File

@@ -1,58 +0,0 @@
name: 'build backend container images (PR-filtered)'
on:
pull_request:
concurrency:
group: ci-backends-pr-${{ github.head_ref || github.ref }}-${{ github.repository }}
cancel-in-progress: true
jobs:
generate-matrix:
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
has-backends: ${{ steps.set-matrix.outputs.has-backends }}
steps:
- name: Checkout repository
uses: actions/checkout@v5
- name: Setup Bun
uses: oven-sh/setup-bun@v2
- name: Install dependencies
run: |
bun add js-yaml
bun add @octokit/core
# filters the matrix in backend.yml
- name: Filter matrix for changed backends
id: set-matrix
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GITHUB_EVENT_PATH: ${{ github.event_path }}
run: bun run scripts/changed-backends.js
backend-jobs:
needs: generate-matrix
uses: ./.github/workflows/backend_build.yml
if: needs.generate-matrix.outputs.has-backends == 'true'
with:
tag-latest: ${{ matrix.tag-latest }}
tag-suffix: ${{ matrix.tag-suffix }}
build-type: ${{ matrix.build-type }}
cuda-major-version: ${{ matrix.cuda-major-version }}
cuda-minor-version: ${{ matrix.cuda-minor-version }}
platforms: ${{ matrix.platforms }}
runs-on: ${{ matrix.runs-on }}
base-image: ${{ matrix.base-image }}
backend: ${{ matrix.backend }}
dockerfile: ${{ matrix.dockerfile }}
skip-drivers: ${{ matrix.skip-drivers }}
context: ${{ matrix.context }}
secrets:
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
strategy:
fail-fast: true
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}

View File

@@ -11,7 +11,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Go

View File

@@ -31,7 +31,7 @@ jobs:
file: "backend/go/piper/Makefile"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Bump dependencies 🔧
id: bump
run: |

View File

@@ -12,7 +12,7 @@ jobs:
- repository: "mudler/LocalAI"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Bump dependencies 🔧
run: |
bash .github/bump_docs.sh ${{ matrix.repository }}

View File

@@ -15,7 +15,7 @@ jobs:
&& sudo add-apt-repository -y ppa:git-core/ppa \
&& sudo apt-get update \
&& sudo apt-get install -y git
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- name: Install dependencies
run: |
sudo apt-get update

View File

@@ -20,7 +20,7 @@ jobs:
skip-commit-verification: true
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v4
- name: Approve a PR if not already approved
run: |

View File

@@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Clone
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
submodules: true
- uses: actions/setup-go@v5

View File

@@ -73,7 +73,7 @@ jobs:
uses: docker/setup-buildx-action@master
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
- name: Cache GRPC
uses: docker/build-push-action@v6

View File

@@ -43,7 +43,7 @@ jobs:
uses: docker/setup-buildx-action@master
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
- name: Cache Intel images
uses: docker/build-push-action@v6

View File

@@ -47,7 +47,7 @@ jobs:
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-hipblas'
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
base-image: "rocm/dev-ubuntu-22.04:6.1"
grpc-base-image: "ubuntu:22.04"
runs-on: 'ubuntu-latest'
makeflags: "--jobs=3 --output-sync=target"

View File

@@ -39,7 +39,7 @@ jobs:
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-hipblas'
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
base-image: "rocm/dev-ubuntu-22.04:6.1"
grpc-base-image: "ubuntu:22.04"
runs-on: 'ubuntu-latest'
makeflags: "--jobs=3 --output-sync=target"

View File

@@ -94,7 +94,7 @@ jobs:
&& sudo apt-get update \
&& sudo apt-get install -y git
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
- name: Release space from worker
if: inputs.runs-on == 'ubuntu-latest'

View File

@@ -13,7 +13,7 @@ jobs:
if: ${{ github.actor == 'localai-bot' }}
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v4
- name: Approve a PR if not already approved
run: |

View File

@@ -11,7 +11,7 @@ jobs:
MODEL_NAME: gemma-3-12b-it
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
with:
fetch-depth: 0 # needed to checkout all branches for this Action to work
- uses: mudler/localai-github-action@v1
@@ -90,7 +90,7 @@ jobs:
MODEL_NAME: gemma-3-12b-it
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
with:
fetch-depth: 0 # needed to checkout all branches for this Action to work
- name: Start LocalAI

View File

@@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Go

View File

@@ -14,11 +14,11 @@ jobs:
GO111MODULE: on
steps:
- name: Checkout Source
uses: actions/checkout@v5
uses: actions/checkout@v4
if: ${{ github.actor != 'dependabot[bot]' }}
- name: Run Gosec Security Scanner
if: ${{ github.actor != 'dependabot[bot]' }}
uses: securego/gosec@v2.22.8
uses: securego/gosec@v2.22.7
with:
# we let the report trigger content trigger a failure using the GitHub Security features.
args: '-no-fail -fmt sarif -out results.sarif ./...'

View File

@@ -19,7 +19,7 @@ jobs:
# runs-on: ubuntu-latest
# steps:
# - name: Clone
# uses: actions/checkout@v5
# uses: actions/checkout@v4
# with:
# submodules: true
# - name: Dependencies
@@ -40,7 +40,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Clone
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
submodules: true
- name: Dependencies
@@ -61,7 +61,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Clone
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
submodules: true
- name: Dependencies
@@ -83,7 +83,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Clone
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
submodules: true
- name: Dependencies
@@ -104,7 +104,7 @@ jobs:
# runs-on: ubuntu-latest
# steps:
# - name: Clone
# uses: actions/checkout@v5
# uses: actions/checkout@v4
# with:
# submodules: true
# - name: Dependencies
@@ -124,7 +124,7 @@ jobs:
# runs-on: ubuntu-latest
# steps:
# - name: Clone
# uses: actions/checkout@v5
# uses: actions/checkout@v4
# with:
# submodules: true
# - name: Dependencies
@@ -186,7 +186,7 @@ jobs:
# sudo rm -rf "$AGENT_TOOLSDIRECTORY" || true
# df -h
# - name: Clone
# uses: actions/checkout@v5
# uses: actions/checkout@v4
# with:
# submodules: true
# - name: Dependencies
@@ -211,7 +211,7 @@ jobs:
# runs-on: ubuntu-latest
# steps:
# - name: Clone
# uses: actions/checkout@v5
# uses: actions/checkout@v4
# with:
# submodules: true
# - name: Dependencies
@@ -232,7 +232,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Clone
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
submodules: true
- name: Dependencies

View File

@@ -23,20 +23,6 @@ jobs:
matrix:
go-version: ['1.21.x']
steps:
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
# this might remove tools that are actually needed,
# if set to "true" but frees about 6 GB
tool-cache: true
# all of these default to true, but feel free to set to
# "false" if necessary for your workflow
android: true
dotnet: true
haskell: true
large-packages: true
docker-images: true
swap-storage: true
- name: Release space from worker
run: |
echo "Listing top largest packages"
@@ -70,7 +56,7 @@ jobs:
sudo rm -rfv build || true
df -h
- name: Clone
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
submodules: true
- name: Setup Go ${{ matrix.go-version }}
@@ -166,7 +152,7 @@ jobs:
sudo rm -rfv build || true
df -h
- name: Clone
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
submodules: true
- name: Dependencies
@@ -196,7 +182,7 @@ jobs:
go-version: ['1.21.x']
steps:
- name: Clone
uses: actions/checkout@v5
uses: actions/checkout@v4
with:
submodules: true
- name: Setup Go ${{ matrix.go-version }}
@@ -214,7 +200,11 @@ jobs:
- name: Build llama-cpp-darwin
run: |
make protogen-go
make backends/llama-cpp-darwin
make build
bash scripts/build-llama-cpp-darwin.sh
ls -la build/darwin.tar
mv build/darwin.tar build/llama-cpp.tar
./local-ai backends install "ocifile://$PWD/build/llama-cpp.tar"
- name: Test
run: |
export C_INCLUDE_PATH=/usr/local/include

View File

@@ -9,7 +9,7 @@ jobs:
fail-fast: false
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: 'stable'

View File

@@ -9,7 +9,7 @@ ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -y --no-install-recommends \
ca-certificates curl wget espeak-ng libgomp1 \
ffmpeg libopenblas-base libopenblas-dev && \
python3 python-is-python3 ffmpeg libopenblas-base libopenblas-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*

View File

@@ -132,6 +132,39 @@ test: test-models/testmodel.ggml protogen-go
$(MAKE) test-tts
$(MAKE) test-stablediffusion
backends/diffusers: docker-build-diffusers docker-save-diffusers build
./local-ai backends install "ocifile://$(abspath ./backend-images/diffusers.tar)"
backends/llama-cpp: docker-build-llama-cpp docker-save-llama-cpp build
./local-ai backends install "ocifile://$(abspath ./backend-images/llama-cpp.tar)"
backends/piper: docker-build-piper docker-save-piper build
./local-ai backends install "ocifile://$(abspath ./backend-images/piper.tar)"
backends/stablediffusion-ggml: docker-build-stablediffusion-ggml docker-save-stablediffusion-ggml build
./local-ai backends install "ocifile://$(abspath ./backend-images/stablediffusion-ggml.tar)"
backends/whisper: docker-build-whisper docker-save-whisper build
./local-ai backends install "ocifile://$(abspath ./backend-images/whisper.tar)"
backends/silero-vad: docker-build-silero-vad docker-save-silero-vad build
./local-ai backends install "ocifile://$(abspath ./backend-images/silero-vad.tar)"
backends/local-store: docker-build-local-store docker-save-local-store build
./local-ai backends install "ocifile://$(abspath ./backend-images/local-store.tar)"
backends/huggingface: docker-build-huggingface docker-save-huggingface build
./local-ai backends install "ocifile://$(abspath ./backend-images/huggingface.tar)"
backends/rfdetr: docker-build-rfdetr docker-save-rfdetr build
./local-ai backends install "ocifile://$(abspath ./backend-images/rfdetr.tar)"
backends/kitten-tts: docker-build-kitten-tts docker-save-kitten-tts build
./local-ai backends install "ocifile://$(abspath ./backend-images/kitten-tts.tar)"
backends/kokoro: docker-build-kokoro docker-save-kokoro build
./local-ai backends install "ocifile://$(abspath ./backend-images/kokoro.tar)"
########################################################
## AIO tests
########################################################
@@ -324,59 +357,6 @@ docker-image-intel:
## Backends
########################################################
backends/diffusers: docker-build-diffusers docker-save-diffusers build
./local-ai backends install "ocifile://$(abspath ./backend-images/diffusers.tar)"
backends/llama-cpp: docker-build-llama-cpp docker-save-llama-cpp build
./local-ai backends install "ocifile://$(abspath ./backend-images/llama-cpp.tar)"
backends/piper: docker-build-piper docker-save-piper build
./local-ai backends install "ocifile://$(abspath ./backend-images/piper.tar)"
backends/stablediffusion-ggml: docker-build-stablediffusion-ggml docker-save-stablediffusion-ggml build
./local-ai backends install "ocifile://$(abspath ./backend-images/stablediffusion-ggml.tar)"
backends/whisper: docker-build-whisper docker-save-whisper build
./local-ai backends install "ocifile://$(abspath ./backend-images/whisper.tar)"
backends/silero-vad: docker-build-silero-vad docker-save-silero-vad build
./local-ai backends install "ocifile://$(abspath ./backend-images/silero-vad.tar)"
backends/local-store: docker-build-local-store docker-save-local-store build
./local-ai backends install "ocifile://$(abspath ./backend-images/local-store.tar)"
backends/huggingface: docker-build-huggingface docker-save-huggingface build
./local-ai backends install "ocifile://$(abspath ./backend-images/huggingface.tar)"
backends/rfdetr: docker-build-rfdetr docker-save-rfdetr build
./local-ai backends install "ocifile://$(abspath ./backend-images/rfdetr.tar)"
backends/kitten-tts: docker-build-kitten-tts docker-save-kitten-tts build
./local-ai backends install "ocifile://$(abspath ./backend-images/kitten-tts.tar)"
backends/kokoro: docker-build-kokoro docker-save-kokoro build
./local-ai backends install "ocifile://$(abspath ./backend-images/kokoro.tar)"
backends/llama-cpp-darwin: build
bash ./scripts/build/llama-cpp-darwin.sh
./local-ai backends install "ocifile://$(abspath ./backend-images/llama-cpp.tar)"
build-darwin-python-backend: build
bash ./scripts/build/python-darwin.sh
backends/mlx:
BACKEND=mlx $(MAKE) build-darwin-python-backend
./local-ai backends install "ocifile://$(abspath ./backend-images/mlx.tar)"
backends/diffuser-darwin:
BACKEND=diffusers $(MAKE) build-darwin-python-backend
./local-ai backends install "ocifile://$(abspath ./backend-images/diffusers.tar)"
backends/mlx-vlm:
BACKEND=mlx-vlm $(MAKE) build-darwin-python-backend
./local-ai backends install "ocifile://$(abspath ./backend-images/mlx-vlm.tar)"
backend-images:
mkdir -p backend-images

View File

@@ -191,7 +191,6 @@ For more information, see [💻 Getting started](https://localai.io/basics/getti
## 📰 Latest project news
- August 2025: MLX, MLX-VLM, Diffusers and llama.cpp are now supported on Mac M1/M2/M3+ chips ( with `development` suffix in the gallery ): https://github.com/mudler/LocalAI/pull/6049 https://github.com/mudler/LocalAI/pull/6119 https://github.com/mudler/LocalAI/pull/6121 https://github.com/mudler/LocalAI/pull/6060
- July/August 2025: 🔍 [Object Detection](https://localai.io/features/object-detection/) added to the API featuring [rf-detr](https://github.com/roboflow/rf-detr)
- July 2025: All backends migrated outside of the main binary. LocalAI is now more lightweight, small, and automatically downloads the required backend to run the model. [Read the release notes](https://github.com/mudler/LocalAI/releases/tag/v3.2.0)
- June 2025: [Backend management](https://github.com/mudler/LocalAI/pull/5607) has been added. Attention: extras images are going to be deprecated from the next release! Read [the backend management PR](https://github.com/mudler/LocalAI/pull/5607).

View File

@@ -23,7 +23,7 @@ RUN apt-get update && \
libssl-dev \
git \
git-lfs \
unzip clang \
unzip \
upx-ucl \
curl python3-pip \
python-is-python3 \
@@ -116,7 +116,7 @@ COPY python/${BACKEND} /${BACKEND}
COPY backend.proto /${BACKEND}/backend.proto
COPY python/common/ /${BACKEND}/common
RUN cd /${BACKEND} && PORTABLE_PYTHON=true make
RUN cd /${BACKEND} && make
FROM scratch
ARG BACKEND=rerankers

View File

@@ -1,5 +1,5 @@
LLAMA_VERSION?=710dfc465a68f7443b87d9f792cffba00ed739fe
LLAMA_VERSION?=a0552c8beef74e843bb085c8ef0c63f9ed7a2b27
LLAMA_REPO?=https://github.com/ggerganov/llama.cpp
CMAKE_ARGS?=
@@ -32,8 +32,10 @@ else ifeq ($(BUILD_TYPE),hipblas)
ROCM_PATH ?= /opt/rocm
export CXX=$(ROCM_HOME)/llvm/bin/clang++
export CC=$(ROCM_HOME)/llvm/bin/clang
AMDGPU_TARGETS?=gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102,gfx1200,gfx1201
CMAKE_ARGS+=-DGGML_HIP=ON -DAMDGPU_TARGETS=$(AMDGPU_TARGETS)
# GPU_TARGETS ?= gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102
# AMDGPU_TARGETS ?= "$(GPU_TARGETS)"
CMAKE_ARGS+=-DGGML_HIP=ON
# CMAKE_ARGS+=-DGGML_HIP=ON -DAMDGPU_TARGETS="$(AMDGPU_TARGETS)" -DGPU_TARGETS="$(GPU_TARGETS)"
else ifeq ($(BUILD_TYPE),vulkan)
CMAKE_ARGS+=-DGGML_VULKAN=1
else ifeq ($(OS),Darwin)

View File

@@ -53,9 +53,9 @@ static void start_llama_server(server_context& ctx_server) {
LOG_INF("%s: model loaded\n", __func__);
// print sample chat example to make it clear which template is used
// LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__,
// common_chat_templates_source(ctx_server.chat_templates.get()),
// common_chat_format_example(ctx_server.chat_templates.get(), ctx_server.params_base.use_jinja).c_str(), ctx_server.params_base.default_template_kwargs);
LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__,
common_chat_templates_source(ctx_server.chat_templates.get()),
common_chat_format_example(ctx_server.chat_templates.get(), ctx_server.params_base.use_jinja).c_str());
// Reset the chat templates
// TODO: We should make this configurable by respecting the option that is already present in LocalAI for vLLM
@@ -437,7 +437,24 @@ public:
}
}
// process files
mtmd::bitmaps bitmaps;
const bool has_mtmd = ctx_server.mctx != nullptr;
{
if (!has_mtmd && !files.empty()) {
throw std::runtime_error("This server does not support multimodal");
}
for (auto & file : files) {
mtmd::bitmap bmp(mtmd_helper_bitmap_init_from_buf(ctx_server.mctx, file.data(), file.size()));
if (!bmp.ptr) {
throw std::runtime_error("Failed to load image/audio");
}
// calculate bitmap hash (for KV caching)
std::string hash = fnv_hash(bmp.data(), bmp.n_bytes());
bmp.set_id(hash.c_str());
bitmaps.entries.push_back(std::move(bmp));
}
}
// process prompt
std::vector<server_tokens> inputs;
@@ -447,10 +464,32 @@ public:
if (has_mtmd) {
// multimodal
inputs.push_back(process_mtmd_prompt(ctx_server.mctx, prompt.get<std::string>(), files));
std::string prompt_str = prompt.get<std::string>();
mtmd_input_text inp_txt = {
prompt_str.c_str(),
/* add_special */ true,
/* parse_special */ true,
};
mtmd::input_chunks chunks(mtmd_input_chunks_init());
auto bitmaps_c_ptr = bitmaps.c_ptr();
int32_t tokenized = mtmd_tokenize(ctx_server.mctx,
chunks.ptr.get(),
&inp_txt,
bitmaps_c_ptr.data(),
bitmaps_c_ptr.size());
if (tokenized != 0) {
throw std::runtime_error("Failed to tokenize prompt");
}
server_tokens tmp(chunks, true);
inputs.push_back(std::move(tmp));
} else {
// Everything else, including multimodal completions.
inputs = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, prompt, true, true);
// non-multimodal version
auto tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, prompt, true, true);
for (auto & p : tokenized_prompts) {
auto tmp = server_tokens(p, ctx_server.mctx != nullptr);
inputs.push_back(std::move(tmp));
}
}
tasks.reserve(inputs.size());
@@ -591,7 +630,23 @@ public:
}
// process files
mtmd::bitmaps bitmaps;
const bool has_mtmd = ctx_server.mctx != nullptr;
{
if (!has_mtmd && !files.empty()) {
throw std::runtime_error("This server does not support multimodal");
}
for (auto & file : files) {
mtmd::bitmap bmp(mtmd_helper_bitmap_init_from_buf(ctx_server.mctx, file.data(), file.size()));
if (!bmp.ptr) {
throw std::runtime_error("Failed to load image/audio");
}
// calculate bitmap hash (for KV caching)
std::string hash = fnv_hash(bmp.data(), bmp.n_bytes());
bmp.set_id(hash.c_str());
bitmaps.entries.push_back(std::move(bmp));
}
}
// process prompt
std::vector<server_tokens> inputs;
@@ -602,10 +657,33 @@ public:
if (has_mtmd) {
// multimodal
inputs.push_back(process_mtmd_prompt(ctx_server.mctx, prompt.get<std::string>(), files));
std::string prompt_str = prompt.get<std::string>();
mtmd_input_text inp_txt = {
prompt_str.c_str(),
/* add_special */ true,
/* parse_special */ true,
};
mtmd::input_chunks chunks(mtmd_input_chunks_init());
auto bitmaps_c_ptr = bitmaps.c_ptr();
int32_t tokenized = mtmd_tokenize(ctx_server.mctx,
chunks.ptr.get(),
&inp_txt,
bitmaps_c_ptr.data(),
bitmaps_c_ptr.size());
if (tokenized != 0) {
std::cout << "[PREDICT] Failed to tokenize prompt" << std::endl;
throw std::runtime_error("Failed to tokenize prompt");
}
server_tokens tmp(chunks, true);
inputs.push_back(std::move(tmp));
} else {
// Everything else, including multimodal completions.
inputs = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, prompt, true, true);
// non-multimodal version
auto tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, prompt, true, true);
for (auto & p : tokenized_prompts) {
auto tmp = server_tokens(p, ctx_server.mctx != nullptr);
inputs.push_back(std::move(tmp));
}
}
tasks.reserve(inputs.size());
@@ -696,7 +774,7 @@ public:
json prompt = body.at("prompt");
auto tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, prompt, true, true);
auto tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, prompt, true, true);
for (const auto & tokens : tokenized_prompts) {
// this check is necessary for models that do not add BOS token to the input
if (tokens.empty()) {
@@ -715,7 +793,7 @@ public:
task.id = ctx_server.queue_tasks.get_new_id();
task.index = i;
task.prompt_tokens = std::move(tokenized_prompts[i]);
task.prompt_tokens = server_tokens(tokenized_prompts[i], ctx_server.mctx != nullptr);
// OAI-compat
task.params.oaicompat = OAICOMPAT_TYPE_EMBEDDING;
@@ -771,10 +849,8 @@ public:
}
// Tokenize the query
auto tokenized_query = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, request->query(), /* add_special */ false, true);
if (tokenized_query.size() != 1) {
return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "\"query\" must contain only a single prompt");
}
llama_tokens tokenized_query = tokenize_input_prompts(ctx_server.vocab, request->query(), /* add_special */ false, true)[0];
// Create and queue the task
json responses = json::array();
bool error = false;
@@ -786,14 +862,14 @@ public:
documents.push_back(request->documents(i));
}
auto tokenized_docs = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, documents, /* add_special */ false, true);
auto tokenized_docs = tokenize_input_prompts(ctx_server.vocab, documents, /* add_special */ false, true);
tasks.reserve(tokenized_docs.size());
for (size_t i = 0; i < tokenized_docs.size(); i++) {
auto tmp = format_rerank(ctx_server.vocab, tokenized_query[0], tokenized_docs[i]);
auto tmp = format_rerank(ctx_server.vocab, tokenized_query, tokenized_docs[i]);
server_task task = server_task(SERVER_TASK_TYPE_RERANK);
task.id = ctx_server.queue_tasks.get_new_id();
task.index = i;
task.prompt_tokens = std::move(tmp);
task.prompt_tokens = server_tokens(tmp, ctx_server.mctx != nullptr);
tasks.push_back(std::move(task));
}

View File

@@ -42,8 +42,7 @@ fi
# Extend ld library path with the dir where this script is located/lib
if [ "$(uname)" == "Darwin" ]; then
export DYLD_LIBRARY_PATH=$CURDIR/lib:$DYLD_LIBRARY_PATH
#export DYLD_FALLBACK_LIBRARY_PATH=$CURDIR/lib:$DYLD_FALLBACK_LIBRARY_PATH
DYLD_FALLBACK_LIBRARY_PATH=$CURDIR/lib:$DYLD_FALLBACK_LIBRARY_PATH
else
export LD_LIBRARY_PATH=$CURDIR/lib:$LD_LIBRARY_PATH
fi
@@ -58,5 +57,5 @@ fi
echo "Using binary: $BINARY"
exec $CURDIR/$BINARY "$@"
# We should never reach this point, however just in case we do, run fallback
# In case we fail execing, just run fallback
exec $CURDIR/llama-cpp-fallback "$@"

View File

@@ -1,16 +0,0 @@
cmake_minimum_required(VERSION 3.12)
project(gosd LANGUAGES C CXX)
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
add_subdirectory(./sources/stablediffusion-ggml.cpp)
add_library(gosd MODULE gosd.cpp)
target_link_libraries(gosd PRIVATE stable-diffusion ggml stdc++fs)
target_include_directories(gosd PUBLIC
stable-diffusion.cpp
stable-diffusion.cpp/thirdparty
)
set_property(TARGET gosd PROPERTY CXX_STANDARD 17)
set_target_properties(gosd PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})

View File

@@ -1,16 +1,28 @@
INCLUDE_PATH := $(abspath ./)
LIBRARY_PATH := $(abspath ./)
AR?=ar
CMAKE_ARGS?=
BUILD_TYPE?=
NATIVE?=false
CUDA_LIBPATH?=/usr/local/cuda/lib64/
ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh
# keep standard at C11 and C++11
CXXFLAGS = -I. -I$(INCLUDE_PATH)/sources/stablediffusion-ggml.cpp/thirdparty -I$(INCLUDE_PATH)/sources/stablediffusion-ggml.cpp/ggml/include -I$(INCLUDE_PATH)/sources/stablediffusion-ggml.cpp -O3 -DNDEBUG -std=c++17 -fPIC
GOCMD?=go
CGO_LDFLAGS?=
# Avoid parent make file overwriting CGO_LDFLAGS which is needed for hipblas
CGO_LDFLAGS_SYCL=
GO_TAGS?=
JOBS?=$(shell nproc --ignore=1)
LD_FLAGS?=
# stablediffusion.cpp (ggml)
STABLEDIFFUSION_GGML_REPO?=https://github.com/leejet/stable-diffusion.cpp
STABLEDIFFUSION_GGML_VERSION?=5900ef6605c6fbf7934239f795c13c97bc993853
CMAKE_ARGS+=-DGGML_MAX_NAME=128
# Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF -DGGML_MAX_NAME=128 -DSD_USE_SYSTEM_GGML=OFF
ifeq ($(NATIVE),false)
CMAKE_ARGS+=-DGGML_NATIVE=OFF
@@ -19,6 +31,7 @@ endif
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
ifeq ($(BUILD_TYPE),cublas)
CMAKE_ARGS+=-DSD_CUDA=ON -DGGML_CUDA=ON
CGO_LDFLAGS+=-lcublas -lcudart -L$(CUDA_LIBPATH) -L$(CUDA_LIBPATH)/stubs/ -lcuda
# If build type is openblas then we set -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
# to CMAKE_ARGS automatically
else ifeq ($(BUILD_TYPE),openblas)
@@ -33,12 +46,14 @@ else ifeq ($(BUILD_TYPE),hipblas)
# But if it's OSX without metal, disable it here
else ifeq ($(BUILD_TYPE),vulkan)
CMAKE_ARGS+=-DSD_VULKAN=ON -DGGML_VULKAN=ON
CGO_LDFLAGS+=-lvulkan
else ifeq ($(OS),Darwin)
ifneq ($(BUILD_TYPE),metal)
CMAKE_ARGS+=-DSD_METAL=OFF -DGGML_METAL=OFF
else
CMAKE_ARGS+=-DSD_METAL=ON -DGGML_METAL=ON
CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON
TARGET+=--target ggml-metal
endif
endif
@@ -48,6 +63,12 @@ ifeq ($(BUILD_TYPE),sycl_f16)
-DCMAKE_CXX_COMPILER=icpx \
-DSD_SYCL=ON \
-DGGML_SYCL_F16=ON
export CC=icx
export CXX=icpx
CGO_LDFLAGS_SYCL += -fsycl -L${DNNLROOT}/lib -ldnnl ${MKLROOT}/lib/intel64/libmkl_sycl.a -fiopenmp -fopenmp-targets=spir64 -lOpenCL
CGO_LDFLAGS_SYCL += $(shell pkg-config --libs mkl-static-lp64-gomp)
CGO_CXXFLAGS += -fiopenmp -fopenmp-targets=spir64
CGO_CXXFLAGS += $(shell pkg-config --cflags mkl-static-lp64-gomp )
endif
ifeq ($(BUILD_TYPE),sycl_f32)
@@ -55,24 +76,73 @@ ifeq ($(BUILD_TYPE),sycl_f32)
-DCMAKE_C_COMPILER=icx \
-DCMAKE_CXX_COMPILER=icpx \
-DSD_SYCL=ON
export CC=icx
export CXX=icpx
CGO_LDFLAGS_SYCL += -fsycl -L${DNNLROOT}/lib -ldnnl ${MKLROOT}/lib/intel64/libmkl_sycl.a -fiopenmp -fopenmp-targets=spir64 -lOpenCL
CGO_LDFLAGS_SYCL += $(shell pkg-config --libs mkl-static-lp64-gomp)
CGO_CXXFLAGS += -fiopenmp -fopenmp-targets=spir64
CGO_CXXFLAGS += $(shell pkg-config --cflags mkl-static-lp64-gomp )
endif
# warnings
# CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function
# Find all .a archives in ARCHIVE_DIR
# (ggml can have different backends cpu, cuda, etc., each backend generates a .a archive)
GGML_ARCHIVE_DIR := build/ggml/src/
ALL_ARCHIVES := $(shell find $(GGML_ARCHIVE_DIR) -type f -name '*.a')
ALL_OBJS := $(shell find $(GGML_ARCHIVE_DIR) -type f -name '*.o')
# Name of the single merged library
COMBINED_LIB := libggmlall.a
# Instead of using the archives generated by GGML, use the object files directly to avoid overwriting objects with the same base name
$(COMBINED_LIB): $(ALL_ARCHIVES)
@echo "Merging all .o into $(COMBINED_LIB): $(ALL_OBJS)"
rm -f $@
ar -qc $@ $(ALL_OBJS)
# Ensure we have a proper index
ranlib $@
build/libstable-diffusion.a:
@echo "Building SD with $(BUILD_TYPE) build type and $(CMAKE_ARGS)"
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
+bash -c "source $(ONEAPI_VARS); \
mkdir -p build && \
cd build && \
cmake $(CMAKE_ARGS) ../sources/stablediffusion-ggml.cpp && \
cmake --build . --config Release"
else
mkdir -p build && \
cd build && \
cmake $(CMAKE_ARGS) ../sources/stablediffusion-ggml.cpp && \
cmake --build . --config Release
endif
$(MAKE) $(COMBINED_LIB)
gosd.o:
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
+bash -c "source $(ONEAPI_VARS); \
$(CXX) $(CXXFLAGS) gosd.cpp -o gosd.o -c"
else
$(CXX) $(CXXFLAGS) gosd.cpp -o gosd.o -c
endif
## stablediffusion (ggml)
sources/stablediffusion-ggml.cpp:
git clone --recursive $(STABLEDIFFUSION_GGML_REPO) sources/stablediffusion-ggml.cpp && \
cd sources/stablediffusion-ggml.cpp && \
git checkout $(STABLEDIFFUSION_GGML_VERSION) && \
git submodule update --init --recursive --depth 1 --single-branch
libgosd.so: sources/stablediffusion-ggml.cpp CMakeLists.txt gosd.cpp gosd.h
mkdir -p build && \
cd build && \
cmake .. $(CMAKE_ARGS) && \
cmake --build . --config Release -j$(JOBS) && \
cd .. && \
mv build/libgosd.so ./
libsd.a: sources/stablediffusion-ggml.cpp build/libstable-diffusion.a gosd.o
cp $(INCLUDE_PATH)/build/libstable-diffusion.a ./libsd.a
$(AR) rcs libsd.a gosd.o
stablediffusion-ggml: main.go gosd.go libgosd.so
CGO_ENABLED=0 $(GOCMD) build -tags "$(GO_TAGS)" -o stablediffusion-ggml ./
stablediffusion-ggml: libsd.a
CGO_LDFLAGS="$(CGO_LDFLAGS) $(CGO_LDFLAGS_SYCL)" C_INCLUDE_PATH="$(INCLUDE_PATH)" LIBRARY_PATH="$(LIBRARY_PATH)" \
CC="$(CC)" CXX="$(CXX)" CGO_CXXFLAGS="$(CGO_CXXFLAGS)" \
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o stablediffusion-ggml ./
package:
bash package.sh
@@ -80,4 +150,4 @@ package:
build: stablediffusion-ggml package
clean:
rm -rf libgosd.o build stablediffusion-ggml
rm -rf gosd.o libsd.a build $(COMBINED_LIB)

View File

@@ -1,4 +1,3 @@
#include <cstdint>
#define GGML_MAX_NAME 128
#include <stdio.h>
@@ -58,7 +57,7 @@ sd_ctx_t* sd_c;
sample_method_t sample_method;
// Copied from the upstream CLI
static void sd_log_cb(enum sd_log_level_t level, const char* log, void* data) {
void sd_log_cb(enum sd_log_level_t level, const char* log, void* data) {
//SDParams* params = (SDParams*)data;
const char* level_str;
@@ -89,33 +88,33 @@ static void sd_log_cb(enum sd_log_level_t level, const char* log, void* data) {
fflush(stderr);
}
int load_model(const char *model, char *model_path, char* options[], int threads, int diff) {
fprintf (stderr, "Loading model: %p=%s\n", model, model);
int load_model(char *model, char *model_path, char* options[], int threads, int diff) {
fprintf (stderr, "Loading model!\n");
sd_set_log_callback(sd_log_cb, NULL);
const char *stableDiffusionModel = "";
char *stableDiffusionModel = "";
if (diff == 1 ) {
stableDiffusionModel = model;
model = "";
}
// decode options. Options are in form optname:optvale, or if booleans only optname.
const char *clip_l_path = "";
const char *clip_g_path = "";
const char *t5xxl_path = "";
const char *vae_path = "";
const char *scheduler = "";
const char *sampler = "";
char *clip_l_path = "";
char *clip_g_path = "";
char *t5xxl_path = "";
char *vae_path = "";
char *scheduler = "";
char *sampler = "";
char *lora_dir = model_path;
bool lora_dir_allocated = false;
fprintf(stderr, "parsing options: %p\n", options);
fprintf(stderr, "parsing options\n");
// If options is not NULL, parse options
for (int i = 0; options[i] != NULL; i++) {
const char *optname = strtok(options[i], ":");
const char *optval = strtok(NULL, ":");
char *optname = strtok(options[i], ":");
char *optval = strtok(NULL, ":");
if (optval == NULL) {
optval = "true";
}
@@ -148,8 +147,7 @@ int load_model(const char *model, char *model_path, char* options[], int threads
lora_dir_allocated = true;
fprintf(stderr, "Lora dir resolved to: %s\n", lora_dir);
} else {
lora_dir = strdup(optval);
lora_dir_allocated = true;
lora_dir = optval;
fprintf(stderr, "No model path provided, using lora dir as-is: %s\n", lora_dir);
}
}
@@ -228,7 +226,7 @@ int load_model(const char *model, char *model_path, char* options[], int threads
return 0;
}
int gen_image(char *text, char *negativeText, int width, int height, int steps, int64_t seed, char *dst, float cfg_scale, char *src_image, float strength, char *mask_image, char **ref_images, int ref_images_count) {
int gen_image(char *text, char *negativeText, int width, int height, int steps, int seed , char *dst, float cfg_scale, char *src_image, float strength, char *mask_image, char **ref_images, int ref_images_count) {
sd_image_t* results;
@@ -254,14 +252,14 @@ int gen_image(char *text, char *negativeText, int width, int height, int steps,
// Handle input image for img2img
bool has_input_image = (src_image != NULL && strlen(src_image) > 0);
bool has_mask_image = (mask_image != NULL && strlen(mask_image) > 0);
uint8_t* input_image_buffer = NULL;
uint8_t* mask_image_buffer = NULL;
std::vector<uint8_t> default_mask_image_vec;
if (has_input_image) {
fprintf(stderr, "Loading input image: %s\n", src_image);
int c = 0;
int img_width = 0;
int img_height = 0;
@@ -275,29 +273,29 @@ int gen_image(char *text, char *negativeText, int width, int height, int steps,
free(input_image_buffer);
return 1;
}
// Resize input image if dimensions don't match
if (img_width != width || img_height != height) {
fprintf(stderr, "Resizing input image from %dx%d to %dx%d\n", img_width, img_height, width, height);
uint8_t* resized_image_buffer = (uint8_t*)malloc(height * width * 3);
if (resized_image_buffer == NULL) {
fprintf(stderr, "Failed to allocate memory for resized image\n");
free(input_image_buffer);
return 1;
}
stbir_resize(input_image_buffer, img_width, img_height, 0,
resized_image_buffer, width, height, 0, STBIR_TYPE_UINT8,
3, STBIR_ALPHA_CHANNEL_NONE, 0,
STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP,
STBIR_FILTER_BOX, STBIR_FILTER_BOX,
STBIR_COLORSPACE_SRGB, nullptr);
free(input_image_buffer);
input_image_buffer = resized_image_buffer;
}
p.init_image = {(uint32_t)width, (uint32_t)height, 3, input_image_buffer};
p.strength = strength;
fprintf(stderr, "Using img2img with strength: %.2f\n", strength);
@@ -306,11 +304,11 @@ int gen_image(char *text, char *negativeText, int width, int height, int steps,
p.init_image = {(uint32_t)width, (uint32_t)height, 3, NULL};
p.strength = 0.0f;
}
// Handle mask image for inpainting
if (has_mask_image) {
fprintf(stderr, "Loading mask image: %s\n", mask_image);
int c = 0;
int mask_width = 0;
int mask_height = 0;
@@ -320,11 +318,11 @@ int gen_image(char *text, char *negativeText, int width, int height, int steps,
if (input_image_buffer) free(input_image_buffer);
return 1;
}
// Resize mask if dimensions don't match
if (mask_width != width || mask_height != height) {
fprintf(stderr, "Resizing mask image from %dx%d to %dx%d\n", mask_width, mask_height, width, height);
uint8_t* resized_mask_buffer = (uint8_t*)malloc(height * width);
if (resized_mask_buffer == NULL) {
fprintf(stderr, "Failed to allocate memory for resized mask\n");
@@ -332,18 +330,18 @@ int gen_image(char *text, char *negativeText, int width, int height, int steps,
if (input_image_buffer) free(input_image_buffer);
return 1;
}
stbir_resize(mask_image_buffer, mask_width, mask_height, 0,
resized_mask_buffer, width, height, 0, STBIR_TYPE_UINT8,
1, STBIR_ALPHA_CHANNEL_NONE, 0,
STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP,
STBIR_FILTER_BOX, STBIR_FILTER_BOX,
STBIR_COLORSPACE_SRGB, nullptr);
free(mask_image_buffer);
mask_image_buffer = resized_mask_buffer;
}
p.mask_image = {(uint32_t)width, (uint32_t)height, 1, mask_image_buffer};
fprintf(stderr, "Using inpainting with mask\n");
} else {
@@ -355,17 +353,17 @@ int gen_image(char *text, char *negativeText, int width, int height, int steps,
// Handle reference images
std::vector<sd_image_t> ref_images_vec;
std::vector<uint8_t*> ref_image_buffers;
if (ref_images_count > 0 && ref_images != NULL) {
fprintf(stderr, "Loading %d reference images\n", ref_images_count);
for (int i = 0; i < ref_images_count; i++) {
if (ref_images[i] == NULL || strlen(ref_images[i]) == 0) {
continue;
}
fprintf(stderr, "Loading reference image %d: %s\n", i + 1, ref_images[i]);
int c = 0;
int ref_width = 0;
int ref_height = 0;
@@ -379,33 +377,33 @@ int gen_image(char *text, char *negativeText, int width, int height, int steps,
free(ref_image_buffer);
continue;
}
// Resize reference image if dimensions don't match
if (ref_width != width || ref_height != height) {
fprintf(stderr, "Resizing reference image from %dx%d to %dx%d\n", ref_width, ref_height, width, height);
uint8_t* resized_ref_buffer = (uint8_t*)malloc(height * width * 3);
if (resized_ref_buffer == NULL) {
fprintf(stderr, "Failed to allocate memory for resized reference image\n");
free(ref_image_buffer);
continue;
}
stbir_resize(ref_image_buffer, ref_width, ref_height, 0,
resized_ref_buffer, width, height, 0, STBIR_TYPE_UINT8,
3, STBIR_ALPHA_CHANNEL_NONE, 0,
STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP,
STBIR_FILTER_BOX, STBIR_FILTER_BOX,
STBIR_COLORSPACE_SRGB, nullptr);
free(ref_image_buffer);
ref_image_buffer = resized_ref_buffer;
}
ref_image_buffers.push_back(ref_image_buffer);
ref_images_vec.push_back({(uint32_t)width, (uint32_t)height, 3, ref_image_buffer});
}
if (!ref_images_vec.empty()) {
p.ref_images = ref_images_vec.data();
p.ref_images_count = ref_images_vec.size();
@@ -456,12 +454,12 @@ int gen_image(char *text, char *negativeText, int width, int height, int steps,
for (auto buffer : ref_image_buffers) {
if (buffer) free(buffer);
}
fprintf (stderr, "gen_image is done: %s", dst);
fprintf (stderr, "gen_image is done", dst);
return 0;
}
int unload() {
free_sd_ctx(sd_c);
return 0;
}

View File

@@ -1,10 +1,15 @@
package main
// #cgo CXXFLAGS: -I${SRCDIR}/sources/stablediffusion-ggml.cpp/thirdparty -I${SRCDIR}/sources/stablediffusion-ggml.cpp -I${SRCDIR}/sources/stablediffusion-ggml.cpp/ggml/include
// #cgo LDFLAGS: -L${SRCDIR}/ -lsd -lstdc++ -lm -lggmlall -lgomp
// #include <gosd.h>
// #include <stdlib.h>
import "C"
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"unsafe"
@@ -20,34 +25,25 @@ type SDGGML struct {
cfgScale float32
}
var (
LoadModel func(model, model_apth string, options []uintptr, threads int32, diff int) int
GenImage func(text, negativeText string, width, height, steps int, seed int64, dst string, cfgScale float32, srcImage string, strength float32, maskImage string, refImages []string, refImagesCount int) int
)
// Copied from Purego internal/strings
// TODO: We should upstream sending []string
func hasSuffix(s, suffix string) bool {
return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix
}
func CString(name string) *byte {
if hasSuffix(name, "\x00") {
return &(*(*[]byte)(unsafe.Pointer(&name)))[0]
}
b := make([]byte, len(name)+1)
copy(b, name)
return &b[0]
}
func (sd *SDGGML) Load(opts *pb.ModelOptions) error {
sd.threads = int(opts.Threads)
modelPath := opts.ModelPath
modelFile := opts.ModelFile
modelPathC := modelPath
modelFile := C.CString(opts.ModelFile)
defer C.free(unsafe.Pointer(modelFile))
modelPathC := C.CString(modelPath)
defer C.free(unsafe.Pointer(modelPathC))
var options **C.char
// prepare the options array to pass to C
size := C.size_t(unsafe.Sizeof((*C.char)(nil)))
length := C.size_t(len(opts.Options))
options = (**C.char)(C.malloc((length + 1) * size))
view := (*[1 << 30]*C.char)(unsafe.Pointer(options))[0 : len(opts.Options)+1 : len(opts.Options)+1]
var diffusionModel int
@@ -72,55 +68,81 @@ func (sd *SDGGML) Load(opts *pb.ModelOptions) error {
fmt.Fprintf(os.Stderr, "Options: %+v\n", oo)
// At the time of writing Purego doesn't recurse into slices and convert Go strings to pointers so we need to do that
var keepAlive []any
options := make([]uintptr, len(oo), len(oo)+1)
for i, op := range oo {
bytep := CString(op)
options[i] = uintptr(unsafe.Pointer(bytep))
keepAlive = append(keepAlive, bytep)
for i, x := range oo {
view[i] = C.CString(x)
}
view[len(oo)] = nil
sd.cfgScale = opts.CFGScale
ret := LoadModel(modelFile, modelPathC, options, opts.Threads, diffusionModel)
ret := C.load_model(modelFile, modelPathC, options, C.int(opts.Threads), C.int(diffusionModel))
if ret != 0 {
return fmt.Errorf("could not load model")
}
runtime.KeepAlive(keepAlive)
return nil
}
func (sd *SDGGML) GenerateImage(opts *pb.GenerateImageRequest) error {
t := opts.PositivePrompt
dst := opts.Dst
negative := opts.NegativePrompt
srcImage := opts.Src
t := C.CString(opts.PositivePrompt)
defer C.free(unsafe.Pointer(t))
var maskImage string
dst := C.CString(opts.Dst)
defer C.free(unsafe.Pointer(dst))
negative := C.CString(opts.NegativePrompt)
defer C.free(unsafe.Pointer(negative))
// Handle source image path
var srcImage *C.char
if opts.Src != "" {
srcImage = C.CString(opts.Src)
defer C.free(unsafe.Pointer(srcImage))
}
// Handle mask image path
var maskImage *C.char
if opts.EnableParameters != "" {
// Parse EnableParameters for mask path if provided
// This is a simple approach - in a real implementation you might want to parse JSON
if strings.Contains(opts.EnableParameters, "mask:") {
parts := strings.Split(opts.EnableParameters, "mask:")
if len(parts) > 1 {
maskPath := strings.TrimSpace(parts[1])
if maskPath != "" {
maskImage = maskPath
maskImage = C.CString(maskPath)
defer C.free(unsafe.Pointer(maskImage))
}
}
}
}
refImagesCount := len(opts.RefImages)
refImages := make([]string, refImagesCount, refImagesCount+1)
copy(refImages, opts.RefImages)
*(*uintptr)(unsafe.Add(unsafe.Pointer(&refImages), refImagesCount)) = 0
// Handle reference images
var refImages **C.char
var refImagesCount C.int
if len(opts.RefImages) > 0 {
refImagesCount = C.int(len(opts.RefImages))
// Allocate array of C strings
size := C.size_t(unsafe.Sizeof((*C.char)(nil)))
refImages = (**C.char)(C.malloc((C.size_t(len(opts.RefImages)) + 1) * size))
view := (*[1 << 30]*C.char)(unsafe.Pointer(refImages))[0 : len(opts.RefImages)+1 : len(opts.RefImages)+1]
for i, refImagePath := range opts.RefImages {
view[i] = C.CString(refImagePath)
defer C.free(unsafe.Pointer(view[i]))
}
view[len(opts.RefImages)] = nil
}
// Default strength for img2img (0.75 is a good default)
strength := float32(0.75)
strength := C.float(0.75)
if opts.Src != "" {
// If we have a source image, use img2img mode
// You could also parse strength from EnableParameters if needed
strength = C.float(0.75)
}
ret := GenImage(t, negative, int(opts.Width), int(opts.Height), int(opts.Step), int64(opts.Seed), dst, sd.cfgScale, srcImage, strength, maskImage, refImages, refImagesCount)
ret := C.gen_image(t, negative, C.int(opts.Width), C.int(opts.Height), C.int(opts.Step), C.int(opts.Seed), dst, C.float(sd.cfgScale), srcImage, strength, maskImage, refImages, refImagesCount)
if ret != 0 {
return fmt.Errorf("inference failed")
}

View File

@@ -1,8 +1,8 @@
#ifdef __cplusplus
extern "C" {
#endif
int load_model(const char *model, char *model_path, char* options[], int threads, int diffusionModel);
int gen_image(char *text, char *negativeText, int width, int height, int steps, int64_t seed, char *dst, float cfg_scale, char *src_image, float strength, char *mask_image, char **ref_images, int ref_images_count);
int load_model(char *model, char *model_path, char* options[], int threads, int diffusionModel);
int gen_image(char *text, char *negativeText, int width, int height, int steps, int seed, char *dst, float cfg_scale, char *src_image, float strength, char *mask_image, char **ref_images, int ref_images_count);
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -1,9 +1,9 @@
package main
// Note: this is started internally by LocalAI and a server is allocated for each model
import (
"flag"
"github.com/ebitengine/purego"
grpc "github.com/mudler/LocalAI/pkg/grpc"
)
@@ -12,14 +12,6 @@ var (
)
func main() {
gosd, err := purego.Dlopen("./libgosd.so", purego.RTLD_NOW|purego.RTLD_GLOBAL)
if err != nil {
panic(err)
}
purego.RegisterLibFunc(&LoadModel, gosd, "load_model")
purego.RegisterLibFunc(&GenImage, gosd, "gen_image")
flag.Parse()
if err := grpc.StartServer(*addr, &SDGGML{}); err != nil {

View File

@@ -10,7 +10,6 @@ CURDIR=$(dirname "$(realpath $0)")
# Create lib directory
mkdir -p $CURDIR/package/lib
cp -avrf $CURDIR/libgosd.so $CURDIR/package/
cp -avrf $CURDIR/stablediffusion-ggml $CURDIR/package/
cp -rfv $CURDIR/run.sh $CURDIR/package/
@@ -48,6 +47,6 @@ else
exit 1
fi
echo "Packaging completed successfully"
echo "Packaging completed successfully"
ls -liah $CURDIR/package/
ls -liah $CURDIR/package/lib/
ls -liah $CURDIR/package/lib/

View File

@@ -6,7 +6,7 @@ CMAKE_ARGS?=
# whisper.cpp version
WHISPER_REPO?=https://github.com/ggml-org/whisper.cpp
WHISPER_CPP_VERSION?=fc45bb86251f774ef817e89878bb4c2636c8a58f
WHISPER_CPP_VERSION?=4245c77b654cd384ad9f53a4a302be716b3e5861
export WHISPER_CMAKE_ARGS?=-DBUILD_SHARED_LIBS=OFF
export WHISPER_DIR=$(abspath ./sources/whisper.cpp)

View File

@@ -127,38 +127,6 @@
nvidia: "cuda12-vllm"
amd: "rocm-vllm"
intel: "intel-vllm"
- &mlx
name: "mlx"
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-mlx"
icon: https://avatars.githubusercontent.com/u/102832242?s=200&v=4
urls:
- https://github.com/ml-explore/mlx-lm
mirrors:
- localai/localai-backends:latest-metal-darwin-arm64-mlx
license: MIT
description: |
Run LLMs with MLX
tags:
- text-to-text
- LLM
- MLX
- &mlx-vlm
name: "mlx-vlm"
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-mlx-vlm"
icon: https://avatars.githubusercontent.com/u/102832242?s=200&v=4
urls:
- https://github.com/ml-explore/mlx-vlm
mirrors:
- localai/localai-backends:latest-metal-darwin-arm64-mlx-vlm
license: MIT
description: |
Run Vision-Language Models with MLX
tags:
- text-to-text
- multimodal
- vision-language
- LLM
- MLX
- &rerankers
name: "rerankers"
alias: "rerankers"
@@ -183,8 +151,6 @@
nvidia: "cuda12-transformers"
intel: "intel-transformers"
amd: "rocm-transformers"
metal: "metal-transformers"
default: "cpu-transformers"
- &diffusers
name: "diffusers"
icon: https://raw.githubusercontent.com/huggingface/diffusers/main/docs/source/en/imgs/diffusers_library.jpg
@@ -203,8 +169,6 @@
intel: "intel-diffusers"
amd: "rocm-diffusers"
nvidia-l4t: "nvidia-l4t-diffusers"
metal: "metal-diffusers"
default: "cpu-diffusers"
- &exllama2
name: "exllama2"
urls:
@@ -407,16 +371,6 @@
- text-to-speech
- TTS
license: apache-2.0
- !!merge <<: *mlx
name: "mlx-development"
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-mlx"
mirrors:
- localai/localai-backends:master-metal-darwin-arm64-mlx
- !!merge <<: *mlx-vlm
name: "mlx-vlm-development"
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-mlx-vlm"
mirrors:
- localai/localai-backends:master-metal-darwin-arm64-mlx-vlm
- !!merge <<: *kitten-tts
name: "kitten-tts-development"
uri: "quay.io/go-skynet/local-ai-backends:master-kitten-tts"
@@ -853,28 +807,6 @@
nvidia: "cuda12-transformers-development"
intel: "intel-transformers-development"
amd: "rocm-transformers-development"
default: "cpu-transformers-development"
metal: "metal-transformers-development"
- !!merge <<: *transformers
name: "cpu-transformers"
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-transformers"
mirrors:
- localai/localai-backends:latest-cpu-transformers
- !!merge <<: *transformers
name: "cpu-transformers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-transformers"
mirrors:
- localai/localai-backends:master-cpu-transformers
- !!merge <<: *transformers
name: "metal-transformers"
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-transformers"
mirrors:
- localai/localai-backends:latest-metal-darwin-arm64-transformers
- !!merge <<: *transformers
name: "metal-transformers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-transformers"
mirrors:
- localai/localai-backends:master-metal-darwin-arm64-transformers
- !!merge <<: *transformers
name: "cuda12-transformers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-transformers"
@@ -923,18 +855,6 @@
intel: "intel-diffusers-development"
amd: "rocm-diffusers-development"
nvidia-l4t: "nvidia-l4t-diffusers-development"
metal: "metal-diffusers-development"
default: "cpu-diffusers-development"
- !!merge <<: *diffusers
name: "cpu-diffusers"
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-diffusers"
mirrors:
- localai/localai-backends:latest-cpu-diffusers
- !!merge <<: *diffusers
name: "cpu-diffusers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-diffusers"
mirrors:
- localai/localai-backends:master-cpu-diffusers
- !!merge <<: *diffusers
name: "nvidia-l4t-diffusers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-l4t-diffusers"
@@ -985,16 +905,6 @@
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-diffusers"
mirrors:
- localai/localai-backends:master-gpu-intel-diffusers
- !!merge <<: *diffusers
name: "metal-diffusers"
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-diffusers"
mirrors:
- localai/localai-backends:latest-metal-darwin-arm64-diffusers
- !!merge <<: *diffusers
name: "metal-diffusers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-diffusers"
mirrors:
- localai/localai-backends:master-metal-darwin-arm64-diffusers
## exllama2
- !!merge <<: *exllama2
name: "exllama2-development"

View File

@@ -1,23 +1,29 @@
.PHONY: ttsbark
ttsbark:
ttsbark: protogen
bash install.sh
.PHONY: run
run: ttsbark
run: protogen
@echo "Running bark..."
bash run.sh
@echo "bark run."
.PHONY: test
test: ttsbark
test: protogen
@echo "Testing bark..."
bash test.sh
@echo "bark tested."
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean
protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
.PHONY: clean
clean: protogen-clean
rm -rf venv __pycache__

View File

@@ -1,5 +1,5 @@
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
intel-extension-for-pytorch==2.8.10+xpu
intel-extension-for-pytorch==2.3.110+xpu
torch==2.3.1+cxx11.abi
torchaudio==2.3.1+cxx11.abi
oneccl_bind_pt==2.3.100+xpu

View File

@@ -1,4 +1,4 @@
bark==0.1.5
grpcio==1.74.0
grpcio==1.71.0
protobuf
certifi

View File

@@ -1,23 +1,29 @@
.PHONY: chatterbox
chatterbox:
.PHONY: coqui
coqui: protogen
bash install.sh
.PHONY: run
run: chatterbox
run: protogen
@echo "Running coqui..."
bash run.sh
@echo "coqui run."
.PHONY: test
test: chatterbox
test: protogen
@echo "Testing coqui..."
bash test.sh
@echo "coqui tested."
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean
protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
.PHONY: clean
clean: protogen-clean
rm -rf venv __pycache__

View File

@@ -41,9 +41,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
else:
print("CUDA is not available", file=sys.stderr)
device = "cpu"
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
if mps_available:
device = "mps"
if not torch.cuda.is_available() and request.CUDA:
return backend_pb2.Result(success=False, message="CUDA is not available")

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env bash
set -euo pipefail
# init handles the setup of the library
#
# use the library by adding the following line to a script:
# source $(dirname $0)/../common/libbackend.sh
@@ -17,182 +17,29 @@ set -euo pipefail
# LIMIT_TARGETS="cublas12"
# source $(dirname $0)/../common/libbackend.sh
#
# You can switch between uv (conda-like) and pip installation methods by setting USE_PIP:
# USE_PIP=true source $(dirname $0)/../common/libbackend.sh
#
# ===================== user-configurable defaults =====================
PYTHON_VERSION="${PYTHON_VERSION:-3.10}" # e.g. 3.10 / 3.11 / 3.12 / 3.13
PYTHON_PATCH="${PYTHON_PATCH:-18}" # e.g. 18 -> 3.10.18 ; 13 -> 3.11.13
PY_STANDALONE_TAG="${PY_STANDALONE_TAG:-20250818}" # release tag date
# Enable/disable bundling of a portable Python build
PORTABLE_PYTHON="${PORTABLE_PYTHON:-false}"
# If you want to fully pin the filename (including tuned CPU targets), set:
# PORTABLE_PY_FILENAME="cpython-3.10.18+20250818-x86_64_v3-unknown-linux-gnu-install_only.tar.gz"
: "${PORTABLE_PY_FILENAME:=}"
: "${PORTABLE_PY_SHA256:=}" # optional; if set we verify the download
# =====================================================================
PYTHON_VERSION="3.10"
# Default to uv if USE_PIP is not set
if [ "x${USE_PIP:-}" == "x" ]; then
USE_PIP=false
fi
# ----------------------- helpers -----------------------
function _is_musl() {
# detect musl (Alpine, etc)
if command -v ldd >/dev/null 2>&1; then
ldd --version 2>&1 | grep -qi musl && return 0
fi
# busybox-ish fallback
if command -v getconf >/dev/null 2>&1; then
getconf GNU_LIBC_VERSION >/dev/null 2>&1 || return 0
fi
return 1
}
function _triple() {
local os="" arch="" libc="gnu"
case "$(uname -s)" in
Linux*) os="unknown-linux" ;;
Darwin*) os="apple-darwin" ;;
MINGW*|MSYS*|CYGWIN*) os="pc-windows-msvc" ;; # best-effort for Git Bash
*) echo "Unsupported OS $(uname -s)"; exit 1;;
esac
case "$(uname -m)" in
x86_64) arch="x86_64" ;;
aarch64|arm64) arch="aarch64" ;;
armv7l) arch="armv7" ;;
i686|i386) arch="i686" ;;
ppc64le) arch="ppc64le" ;;
s390x) arch="s390x" ;;
riscv64) arch="riscv64" ;;
*) echo "Unsupported arch $(uname -m)"; exit 1;;
esac
if [[ "$os" == "unknown-linux" ]]; then
if _is_musl; then
libc="musl"
else
libc="gnu"
fi
echo "${arch}-${os}-${libc}"
else
echo "${arch}-${os}"
fi
}
function _portable_dir() {
echo "${EDIR}/python"
}
function _portable_bin() {
# python-build-standalone puts python in ./bin
echo "$(_portable_dir)/bin"
}
function _portable_python() {
if [ -x "$(_portable_bin)/python3" ]; then
echo "$(_portable_bin)/python3"
else
echo "$(_portable_bin)/python"
fi
}
# macOS loader env for the portable CPython
_macosPortableEnv() {
if [ "$(uname -s)" = "Darwin" ]; then
export DYLD_LIBRARY_PATH="$(_portable_dir)/lib${DYLD_LIBRARY_PATH:+:${DYLD_LIBRARY_PATH}}"
export DYLD_FALLBACK_LIBRARY_PATH="$(_portable_dir)/lib${DYLD_FALLBACK_LIBRARY_PATH:+:${DYLD_FALLBACK_LIBRARY_PATH}}"
fi
}
# Good hygiene on macOS for downloaded/extracted trees
_unquarantinePortablePython() {
if [ "$(uname -s)" = "Darwin" ]; then
command -v xattr >/dev/null 2>&1 && xattr -dr com.apple.quarantine "$(_portable_dir)" || true
fi
}
# ------------------ ### PORTABLE PYTHON ------------------
function ensurePortablePython() {
local pdir="$(_portable_dir)"
local pbin="$(_portable_bin)"
local pyexe
if [ -x "${pbin}/python3" ] || [ -x "${pbin}/python" ]; then
_macosPortableEnv
return 0
fi
mkdir -p "${pdir}"
local triple="$(_triple)"
local full_ver="${PYTHON_VERSION}.${PYTHON_PATCH}"
local fn=""
if [ -n "${PORTABLE_PY_FILENAME}" ]; then
fn="${PORTABLE_PY_FILENAME}"
else
# generic asset name: cpython-<full_ver>+<tag>-<triple>-install_only.tar.gz
fn="cpython-${full_ver}+${PY_STANDALONE_TAG}-${triple}-install_only.tar.gz"
fi
local url="https://github.com/astral-sh/python-build-standalone/releases/download/${PY_STANDALONE_TAG}/${fn}"
local tmp="${pdir}/${fn}"
echo "Downloading portable Python: ${fn}"
# curl with retries; fall back to wget if needed
if command -v curl >/dev/null 2>&1; then
curl -L --fail --retry 3 --retry-delay 1 -o "${tmp}" "${url}"
else
wget -O "${tmp}" "${url}"
fi
if [ -n "${PORTABLE_PY_SHA256}" ]; then
echo "${PORTABLE_PY_SHA256} ${tmp}" | sha256sum -c -
fi
echo "Extracting ${fn} -> ${pdir}"
# always a .tar.gz (we purposely choose install_only)
tar -xzf "${tmp}" -C "${pdir}"
rm -f "${tmp}"
# Some archives nest a directory; if so, flatten to ${pdir}
# Find the first dir with a 'bin/python*'
local inner
inner="$(find "${pdir}" -type f -path "*/bin/python*" -maxdepth 3 2>/dev/null | head -n1 || true)"
if [ -n "${inner}" ]; then
local inner_root
inner_root="$(dirname "$(dirname "${inner}")")" # .../bin -> root
if [ "${inner_root}" != "${pdir}" ]; then
# move contents up one level
shopt -s dotglob
mv "${inner_root}/"* "${pdir}/"
rm -rf "${inner_root}"
shopt -u dotglob
fi
fi
_unquarantinePortablePython
_macosPortableEnv
# Make sure it's runnable
pyexe="$(_portable_python)"
"${pyexe}" -V
}
# init handles the setup of the library
function init() {
# Name of the backend (directory name)
BACKEND_NAME=${PWD##*/}
MY_DIR=$(realpath "$(dirname "$0")")
# Path where all backends files are
MY_DIR=$(realpath `dirname $0`)
# Build type
BUILD_PROFILE=$(getBuildProfile)
# Environment directory
EDIR=${MY_DIR}
if [ "x${ENV_DIR:-}" != "x" ]; then
# Allow to specify a custom env dir for shared environments
if [ "x${ENV_DIR}" != "x" ]; then
EDIR=${ENV_DIR}
fi
if [ ! -z "${LIMIT_TARGETS:-}" ]; then
# If a backend has defined a list of valid build profiles...
if [ ! -z "${LIMIT_TARGETS}" ]; then
isValidTarget=$(checkTargets ${LIMIT_TARGETS})
if [ ${isValidTarget} != true ]; then
echo "${BACKEND_NAME} can only be used on the following targets: ${LIMIT_TARGETS}"
@@ -203,7 +50,6 @@ function init() {
echo "Initializing libbackend for ${BACKEND_NAME}"
}
# getBuildProfile will inspect the system to determine which build profile is appropriate:
# returns one of the following:
# - cublas11
@@ -211,139 +57,58 @@ function init() {
# - hipblas
# - intel
function getBuildProfile() {
if [ x"${BUILD_TYPE:-}" == "xcublas" ]; then
if [ ! -z "${CUDA_MAJOR_VERSION:-}" ]; then
if [ "x${BUILD_TYPE}" == "xl4t" ]; then
echo "l4t"
return 0
fi
# First check if we are a cublas build, and if so report the correct build profile
if [ x"${BUILD_TYPE}" == "xcublas" ]; then
if [ ! -z ${CUDA_MAJOR_VERSION} ]; then
# If we have been given a CUDA version, we trust it
echo ${BUILD_TYPE}${CUDA_MAJOR_VERSION}
else
# We don't know what version of cuda we are, so we report ourselves as a generic cublas
echo ${BUILD_TYPE}
fi
return 0
fi
# If /opt/intel exists, then we are doing an intel/ARC build
if [ -d "/opt/intel" ]; then
echo "intel"
return 0
fi
if [ -n "${BUILD_TYPE:-}" ]; then
# If for any other values of BUILD_TYPE, we don't need any special handling/discovery
if [ ! -z ${BUILD_TYPE} ]; then
echo ${BUILD_TYPE}
return 0
fi
# If there is no BUILD_TYPE set at all, set a build-profile value of CPU, we aren't building for any GPU targets
echo "cpu"
}
# Make the venv relocatable:
# - rewrite venv/bin/python{,3} to relative symlinks into $(_portable_dir)
# - normalize entrypoint shebangs to /usr/bin/env python3
_makeVenvPortable() {
local venv_dir="${EDIR}/venv"
local vbin="${venv_dir}/bin"
[ -d "${vbin}" ] || return 0
# 1) Replace python symlinks with relative ones to ../../python/bin/python3
# (venv/bin -> venv -> EDIR -> python/bin)
local rel_py='../../python/bin/python3'
for name in python3 python; do
if [ -e "${vbin}/${name}" ] || [ -L "${vbin}/${name}" ]; then
rm -f "${vbin}/${name}"
fi
done
ln -s "${rel_py}" "${vbin}/python3"
ln -s "python3" "${vbin}/python"
# 2) Rewrite shebangs of entry points to use env, so the venv is relocatable
# Only touch text files that start with #! and reference the current venv.
local ve_abs="${vbin}/python"
local sed_i=(sed -i)
# macOS/BSD sed needs a backup suffix; GNU sed doesn't. Make it portable:
if sed --version >/dev/null 2>&1; then
sed_i=(sed -i)
else
sed_i=(sed -i '')
fi
for f in "${vbin}"/*; do
[ -f "$f" ] || continue
# Fast path: check first two bytes (#!)
head -c2 "$f" 2>/dev/null | grep -q '^#!' || continue
# Only rewrite if the shebang mentions the (absolute) venv python
if head -n1 "$f" | grep -Fq "${ve_abs}"; then
"${sed_i[@]}" '1s|^#!.*$|#!/usr/bin/env python3|' "$f"
chmod +x "$f" 2>/dev/null || true
fi
done
}
# ensureVenv makes sure that the venv for the backend both exists, and is activated.
#
# This function is idempotent, so you can call it as many times as you want and it will
# always result in an activated virtual environment
function ensureVenv() {
local interpreter=""
if [ "x${PORTABLE_PYTHON}" == "xtrue" ]; then
ensurePortablePython
interpreter="$(_portable_python)"
else
# Prefer system python${PYTHON_VERSION}, else python3, else fall back to bundled
if command -v python${PYTHON_VERSION} >/dev/null 2>&1; then
interpreter="python${PYTHON_VERSION}"
elif command -v python3 >/dev/null 2>&1; then
interpreter="python3"
else
echo "No suitable system Python found, bootstrapping portable build..."
ensurePortablePython
interpreter="$(_portable_python)"
fi
fi
if [ ! -d "${EDIR}/venv" ]; then
if [ "x${USE_PIP}" == "xtrue" ]; then
"${interpreter}" -m venv --copies "${EDIR}/venv"
source "${EDIR}/venv/bin/activate"
"${interpreter}" -m pip install --upgrade pip
else
if [ "x${PORTABLE_PYTHON}" == "xtrue" ]; then
uv venv --python "${interpreter}" "${EDIR}/venv"
else
uv venv --python "${PYTHON_VERSION}" "${EDIR}/venv"
fi
fi
if [ "x${PORTABLE_PYTHON}" == "xtrue" ]; then
_makeVenvPortable
fi
uv venv --python ${PYTHON_VERSION} ${EDIR}/venv
echo "virtualenv created"
fi
# We call it here to make sure that when we source a venv we can still use python as expected
if [ -x "$(_portable_python)" ]; then
_macosPortableEnv
# Source if we are not already in a Virtual env
if [ "x${VIRTUAL_ENV}" != "x${EDIR}/venv" ]; then
source ${EDIR}/venv/bin/activate
echo "virtualenv activated"
fi
if [ "x${VIRTUAL_ENV:-}" != "x${EDIR}/venv" ]; then
source "${EDIR}/venv/bin/activate"
fi
echo "activated virtualenv has been ensured"
}
function runProtogen() {
ensureVenv
if [ "x${USE_PIP}" == "xtrue" ]; then
pip install grpcio-tools
else
uv pip install grpcio-tools
fi
pushd "${EDIR}" >/dev/null
# use the venv python (ensures correct interpreter & sys.path)
python -m grpc_tools.protoc -I../../ -I./ --python_out=. --grpc_python_out=. backend.proto
popd >/dev/null
}
# installRequirements looks for several requirements files and if they exist runs the install for them in order
#
# - requirements-install.txt
@@ -351,7 +116,7 @@ function runProtogen() {
# - requirements-${BUILD_TYPE}.txt
# - requirements-${BUILD_PROFILE}.txt
#
# BUILD_PROFILE is a more specific version of BUILD_TYPE, ex: cuda-11 or cuda-12
# BUILD_PROFILE is a pore specific version of BUILD_TYPE, ex: cuda-11 or cuda-12
# it can also include some options that we do not have BUILD_TYPES for, ex: intel
#
# NOTE: for BUILD_PROFILE==intel, this function does NOT automatically use the Intel python package index.
@@ -367,36 +132,36 @@ function runProtogen() {
# installRequirements
function installRequirements() {
ensureVenv
# These are the requirements files we will attempt to install, in order
declare -a requirementFiles=(
"${EDIR}/requirements-install.txt"
"${EDIR}/requirements.txt"
"${EDIR}/requirements-${BUILD_TYPE:-}.txt"
"${EDIR}/requirements-${BUILD_TYPE}.txt"
)
if [ "x${BUILD_TYPE:-}" != "x${BUILD_PROFILE}" ]; then
if [ "x${BUILD_TYPE}" != "x${BUILD_PROFILE}" ]; then
requirementFiles+=("${EDIR}/requirements-${BUILD_PROFILE}.txt")
fi
if [ "x${BUILD_TYPE:-}" == "x" ]; then
# if BUILD_TYPE is empty, we are a CPU build, so we should try to install the CPU requirements
if [ "x${BUILD_TYPE}" == "x" ]; then
requirementFiles+=("${EDIR}/requirements-cpu.txt")
fi
requirementFiles+=("${EDIR}/requirements-after.txt")
if [ "x${BUILD_TYPE:-}" != "x${BUILD_PROFILE}" ]; then
if [ "x${BUILD_TYPE}" != "x${BUILD_PROFILE}" ]; then
requirementFiles+=("${EDIR}/requirements-${BUILD_PROFILE}-after.txt")
fi
for reqFile in ${requirementFiles[@]}; do
if [ -f "${reqFile}" ]; then
if [ -f ${reqFile} ]; then
echo "starting requirements install for ${reqFile}"
if [ "x${USE_PIP}" == "xtrue" ]; then
pip install ${EXTRA_PIP_INSTALL_FLAGS:-} --requirement "${reqFile}"
else
uv pip install ${EXTRA_PIP_INSTALL_FLAGS:-} --requirement "${reqFile}"
fi
uv pip install ${EXTRA_PIP_INSTALL_FLAGS} --requirement ${reqFile}
echo "finished requirements install for ${reqFile}"
fi
done
runProtogen
}
# startBackend discovers and runs the backend GRPC server
@@ -414,18 +179,18 @@ function installRequirements() {
# - ${BACKEND_NAME}.py
function startBackend() {
ensureVenv
if [ ! -z "${BACKEND_FILE:-}" ]; then
exec "${EDIR}/venv/bin/python" "${BACKEND_FILE}" "$@"
if [ ! -z ${BACKEND_FILE} ]; then
exec ${EDIR}/venv/bin/python ${BACKEND_FILE} $@
elif [ -e "${MY_DIR}/server.py" ]; then
exec "${EDIR}/venv/bin/python" "${MY_DIR}/server.py" "$@"
exec ${EDIR}/venv/bin/python ${MY_DIR}/server.py $@
elif [ -e "${MY_DIR}/backend.py" ]; then
exec "${EDIR}/venv/bin/python" "${MY_DIR}/backend.py" "$@"
exec ${EDIR}/venv/bin/python ${MY_DIR}/backend.py $@
elif [ -e "${MY_DIR}/${BACKEND_NAME}.py" ]; then
exec "${EDIR}/venv/bin/python" "${MY_DIR}/${BACKEND_NAME}.py" "$@"
exec ${EDIR}/venv/bin/python ${MY_DIR}/${BACKEND_NAME}.py $@
fi
}
# runUnittests discovers and runs python unittests
#
# You can specify a specific test file to use by setting TEST_FILE before calling runUnittests.
@@ -438,36 +203,41 @@ function startBackend() {
# be default a file named test.py in the backends directory will be used
function runUnittests() {
ensureVenv
if [ ! -z "${TEST_FILE:-}" ]; then
testDir=$(dirname "$(realpath "${TEST_FILE}")")
testFile=$(basename "${TEST_FILE}")
pushd "${testDir}" >/dev/null
python -m unittest "${testFile}"
popd >/dev/null
if [ ! -z ${TEST_FILE} ]; then
testDir=$(dirname `realpath ${TEST_FILE}`)
testFile=$(basename ${TEST_FILE})
pushd ${testDir}
python -m unittest ${testFile}
popd
elif [ -f "${MY_DIR}/test.py" ]; then
pushd "${MY_DIR}" >/dev/null
pushd ${MY_DIR}
python -m unittest test.py
popd >/dev/null
popd
else
echo "no tests defined for ${BACKEND_NAME}"
fi
}
##################################################################################
# Below here are helper functions not intended to be used outside of the library #
##################################################################################
# checkTargets determines if the current BUILD_TYPE or BUILD_PROFILE is in a list of valid targets
function checkTargets() {
# Collect all provided targets into a variable and...
targets=$@
# ...convert it into an array
declare -a targets=($targets)
for target in ${targets[@]}; do
if [ "x${BUILD_TYPE:-}" == "x${target}" ]; then
echo true; return 0
if [ "x${BUILD_TYPE}" == "x${target}" ]; then
echo true
return 0
fi
if [ "x${BUILD_PROFILE}" == "x${target}" ]; then
echo true; return 0
echo true
return 0
fi
done
echo false

View File

@@ -3,11 +3,18 @@
.PHONY: install
install:
bash install.sh
$(MAKE) protogen
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean
protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
bash protogen.sh
.PHONY: clean
clean: protogen-clean
rm -rf venv __pycache__

View File

@@ -8,4 +8,6 @@ else
source $backend_dir/../common/libbackend.sh
fi
runProtogen
ensureVenv
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto

View File

@@ -1,5 +1,5 @@
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
intel-extension-for-pytorch==2.8.10+xpu
torch==2.8.0
oneccl_bind_pt==2.8.0+xpu
intel-extension-for-pytorch==2.3.110+xpu
torch==2.3.1+cxx11.abi
oneccl_bind_pt==2.3.100+xpu
optimum[openvino]

View File

@@ -1,3 +1,3 @@
grpcio==1.74.0
grpcio==1.71.0
protobuf
grpcio-tools

View File

@@ -1,23 +1,29 @@
.PHONY: coqui
coqui:
coqui: protogen
bash install.sh
.PHONY: run
run: coqui
run: protogen
@echo "Running coqui..."
bash run.sh
@echo "coqui run."
.PHONY: test
test: coqui
test: protogen
@echo "Testing coqui..."
bash test.sh
@echo "coqui tested."
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean
protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
.PHONY: clean
clean: protogen-clean
rm -rf venv __pycache__

View File

@@ -40,9 +40,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
else:
print("CUDA is not available", file=sys.stderr)
device = "cpu"
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
if mps_available:
device = "mps"
if not torch.cuda.is_available() and request.CUDA:
return backend_pb2.Result(success=False, message="CUDA is not available")

View File

@@ -1,4 +1,4 @@
grpcio==1.74.0
grpcio==1.71.0
protobuf
certifi
packaging==24.1

View File

@@ -12,22 +12,28 @@ export SKIP_CONDA=1
endif
.PHONY: diffusers
diffusers:
diffusers: protogen
bash install.sh
.PHONY: run
run: diffusers
run: protogen
@echo "Running diffusers..."
bash run.sh
@echo "Diffusers run."
test: diffusers
test: protogen
bash test.sh
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean
protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
.PHONY: clean
clean: protogen-clean
rm -rf venv __pycache__

View File

@@ -18,7 +18,7 @@ import backend_pb2_grpc
import grpc
from diffusers import SanaPipeline, StableDiffusion3Pipeline, StableDiffusionXLPipeline, StableDiffusionDepth2ImgPipeline, DPMSolverMultistepScheduler, StableDiffusionPipeline, DiffusionPipeline, \
EulerAncestralDiscreteScheduler, FluxPipeline, FluxTransformer2DModel, QwenImageEditPipeline
EulerAncestralDiscreteScheduler, FluxPipeline, FluxTransformer2DModel
from diffusers import StableDiffusionImg2ImgPipeline, AutoPipelineForText2Image, ControlNetModel, StableVideoDiffusionPipeline, Lumina2Text2ImgPipeline
from diffusers.pipelines.stable_diffusion import safety_checker
from diffusers.utils import load_image, export_to_video
@@ -263,9 +263,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
elif request.PipelineType == "DiffusionPipeline":
self.pipe = DiffusionPipeline.from_pretrained(request.Model,
torch_dtype=torchType)
elif request.PipelineType == "QwenImageEditPipeline":
self.pipe = QwenImageEditPipeline.from_pretrained(request.Model,
torch_dtype=torchType)
elif request.PipelineType == "VideoDiffusionPipeline":
self.txt2vid = True
self.pipe = DiffusionPipeline.from_pretrained(request.Model,
@@ -368,9 +365,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
device = "cpu" if not request.CUDA else "cuda"
if XPU:
device = "xpu"
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
if mps_available:
device = "mps"
self.device = device
if request.LoraAdapter:
# Check if its a local file and not a directory ( we load lora differently for a safetensor file )

View File

@@ -1,8 +1,6 @@
--extra-index-url https://download.pytorch.org/whl/cpu
git+https://github.com/huggingface/diffusers
opencv-python
transformers
torchvision==0.22.1
accelerate
compel
peft

View File

@@ -1,6 +1,5 @@
--extra-index-url https://download.pytorch.org/whl/cu118
torch==2.7.1+cu118
torchvision==0.22.1+cu118
git+https://github.com/huggingface/diffusers
opencv-python
transformers

View File

@@ -1,5 +1,4 @@
torch==2.7.1
torchvision==0.22.1
git+https://github.com/huggingface/diffusers
opencv-python
transformers

View File

@@ -6,6 +6,4 @@ accelerate
compel
peft
optimum-quanto
numpy<2
sentencepiece
torchvision
numpy<2

View File

@@ -1,10 +0,0 @@
torch==2.7.1
torchvision==0.22.1
git+https://github.com/huggingface/diffusers
opencv-python
transformers
accelerate
compel
peft
sentencepiece
optimum-quanto

View File

@@ -1,5 +1,5 @@
setuptools
grpcio==1.74.0
grpcio==1.71.0
pillow
protobuf
certifi

View File

@@ -12,6 +12,4 @@ if [ -d "/opt/intel" ]; then
export XPU=1
fi
export PYTORCH_ENABLE_MPS_FALLBACK=1
startBackend $@

View File

@@ -1,17 +1,23 @@
.PHONY: exllama2
exllama2:
exllama2: protogen
bash install.sh
.PHONY: run
run: exllama2
run: protogen
@echo "Running exllama2..."
bash run.sh
@echo "exllama2 run."
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean
protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
.PHONY: clean
clean: protogen-clean
$(RM) -r venv source __pycache__

View File

@@ -1,4 +1,4 @@
grpcio==1.74.0
grpcio==1.71.0
protobuf
certifi
wheel

View File

@@ -3,11 +3,18 @@
.PHONY: install
install:
bash install.sh
$(MAKE) protogen
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean
protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
bash protogen.sh
.PHONY: clean
clean: protogen-clean
rm -rf venv __pycache__

View File

@@ -10,7 +10,7 @@ import sys
import os
import backend_pb2
import backend_pb2_grpc
import torch
from faster_whisper import WhisperModel
import grpc
@@ -35,9 +35,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
# device = "cuda" if request.CUDA else "cpu"
if request.CUDA:
device = "cuda"
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
if mps_available:
device = "mps"
try:
print("Preparing models, please wait", file=sys.stderr)
self.model = WhisperModel(request.Model, device=device, compute_type="float16")

View File

@@ -1,23 +1,29 @@
.PHONY: kitten-tts
kitten-tts:
kitten-tts: protogen
bash install.sh
.PHONY: run
run: kitten-tts
run: protogen
@echo "Running kitten-tts..."
bash run.sh
@echo "kitten-tts run."
.PHONY: test
test: kitten-tts
test: protogen
@echo "Testing kitten-tts..."
bash test.sh
@echo "kitten-tts tested."
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean
protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
.PHONY: clean
clean: protogen-clean
rm -rf venv __pycache__

View File

@@ -33,6 +33,18 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
def LoadModel(self, request, context):
# Get device
# device = "cuda" if request.CUDA else "cpu"
if torch.cuda.is_available():
print("CUDA is available", file=sys.stderr)
device = "cuda"
else:
print("CUDA is not available", file=sys.stderr)
device = "cpu"
if not torch.cuda.is_available() and request.CUDA:
return backend_pb2.Result(success=False, message="CUDA is not available")
self.AudioPath = None
# List available KittenTTS models
print("Available KittenTTS voices: expr-voice-2-m, expr-voice-2-f, expr-voice-3-m, expr-voice-3-f, expr-voice-4-m, expr-voice-4-f, expr-voice-5-m, expr-voice-5-f")

View File

@@ -1,23 +1,29 @@
.PHONY: kokoro
kokoro:
kokoro: protogen
bash install.sh
.PHONY: run
run: kokoro
run: protogen
@echo "Running kokoro..."
bash run.sh
@echo "kokoro run."
.PHONY: test
test: kokoro
test: protogen
@echo "Testing kokoro..."
bash test.sh
@echo "kokoro tested."
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean
protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
.PHONY: clean
clean: protogen-clean
rm -rf venv __pycache__

View File

@@ -33,6 +33,17 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
def LoadModel(self, request, context):
# Get device
if torch.cuda.is_available():
print("CUDA is available", file=sys.stderr)
device = "cuda"
else:
print("CUDA is not available", file=sys.stderr)
device = "cpu"
if not torch.cuda.is_available() and request.CUDA:
return backend_pb2.Result(success=False, message="CUDA is not available")
try:
print("Preparing Kokoro TTS pipeline, please wait", file=sys.stderr)
# empty dict

View File

@@ -1,23 +0,0 @@
.PHONY: mlx-vlm
mlx-vlm:
bash install.sh
.PHONY: run
run: mlx-vlm
@echo "Running mlx-vlm..."
bash run.sh
@echo "mlx run."
.PHONY: test
test: mlx-vlm
@echo "Testing mlx-vlm..."
bash test.sh
@echo "mlx tested."
.PHONY: protogen-clean
protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py
.PHONY: clean
clean: protogen-clean
rm -rf venv __pycache__

View File

@@ -1,477 +0,0 @@
#!/usr/bin/env python3
import asyncio
from concurrent import futures
import argparse
import signal
import sys
import os
from typing import List
import time
import backend_pb2
import backend_pb2_grpc
import grpc
from mlx_vlm import load, generate, stream_generate
from mlx_vlm.prompt_utils import apply_chat_template
from mlx_vlm.utils import load_config, load_image
import mlx.core as mx
import base64
import io
from PIL import Image
import tempfile
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
# Implement the BackendServicer class with the service methods
class BackendServicer(backend_pb2_grpc.BackendServicer):
"""
A gRPC servicer that implements the Backend service defined in backend.proto.
"""
def _is_float(self, s):
"""Check if a string can be converted to float."""
try:
float(s)
return True
except ValueError:
return False
def _is_int(self, s):
"""Check if a string can be converted to int."""
try:
int(s)
return True
except ValueError:
return False
def Health(self, request, context):
"""
Returns a health check message.
Args:
request: The health check request.
context: The gRPC context.
Returns:
backend_pb2.Reply: The health check reply.
"""
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
async def LoadModel(self, request, context):
"""
Loads a multimodal vision-language model using MLX-VLM.
Args:
request: The load model request.
context: The gRPC context.
Returns:
backend_pb2.Result: The load model result.
"""
try:
print(f"Loading MLX-VLM model: {request.Model}", file=sys.stderr)
print(f"Request: {request}", file=sys.stderr)
# Parse options like in the diffusers backend
options = request.Options
self.options = {}
# The options are a list of strings in this form optname:optvalue
# We store all the options in a dict for later use
for opt in options:
if ":" not in opt:
continue
key, value = opt.split(":", 1) # Split only on first colon to handle values with colons
# Convert numeric values to appropriate types
if self._is_float(value):
value = float(value)
elif self._is_int(value):
value = int(value)
elif value.lower() in ["true", "false"]:
value = value.lower() == "true"
self.options[key] = value
print(f"Options: {self.options}", file=sys.stderr)
# Load model and processor using MLX-VLM
# mlx-vlm load function returns (model, processor) instead of (model, tokenizer)
self.model, self.processor = load(request.Model)
# Load model config for chat template support
self.config = load_config(request.Model)
except Exception as err:
print(f"Error loading MLX-VLM model {err=}, {type(err)=}", file=sys.stderr)
return backend_pb2.Result(success=False, message=f"Error loading MLX-VLM model: {err}")
print("MLX-VLM model loaded successfully", file=sys.stderr)
return backend_pb2.Result(message="MLX-VLM model loaded successfully", success=True)
async def Predict(self, request, context):
"""
Generates text based on the given prompt and sampling parameters using MLX-VLM with multimodal support.
Args:
request: The predict request.
context: The gRPC context.
Returns:
backend_pb2.Reply: The predict result.
"""
temp_files = []
try:
# Process images and audios from request
image_paths = []
audio_paths = []
# Process images
if request.Images:
for img_data in request.Images:
img_path = self.load_image_from_base64(img_data)
if img_path:
image_paths.append(img_path)
temp_files.append(img_path)
# Process audios
if request.Audios:
for audio_data in request.Audios:
audio_path = self.load_audio_from_base64(audio_data)
if audio_path:
audio_paths.append(audio_path)
temp_files.append(audio_path)
# Prepare the prompt with multimodal information
prompt = self._prepare_prompt(request, num_images=len(image_paths), num_audios=len(audio_paths))
# Build generation parameters using request attributes and options
max_tokens, generation_params = self._build_generation_params(request)
print(f"Generating text with MLX-VLM - max_tokens: {max_tokens}, params: {generation_params}", file=sys.stderr)
print(f"Images: {len(image_paths)}, Audios: {len(audio_paths)}", file=sys.stderr)
# Generate text using MLX-VLM with multimodal inputs
response = generate(
model=self.model,
processor=self.processor,
prompt=prompt,
image=image_paths if image_paths else None,
audio=audio_paths if audio_paths else None,
max_tokens=max_tokens,
temperature=generation_params.get('temp', 0.6),
top_p=generation_params.get('top_p', 1.0),
verbose=False
)
return backend_pb2.Reply(message=bytes(response, encoding='utf-8'))
except Exception as e:
print(f"Error in MLX-VLM Predict: {e}", file=sys.stderr)
context.set_code(grpc.StatusCode.INTERNAL)
context.set_details(f"Generation failed: {str(e)}")
return backend_pb2.Reply(message=bytes("", encoding='utf-8'))
finally:
# Clean up temporary files
self.cleanup_temp_files(temp_files)
def Embedding(self, request, context):
"""
A gRPC method that calculates embeddings for a given sentence.
Note: MLX-VLM doesn't support embeddings directly. This method returns an error.
Args:
request: An EmbeddingRequest object that contains the request parameters.
context: A grpc.ServicerContext object that provides information about the RPC.
Returns:
An EmbeddingResult object that contains the calculated embeddings.
"""
print("Embeddings not supported in MLX-VLM backend", file=sys.stderr)
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Embeddings are not supported in the MLX-VLM backend.")
return backend_pb2.EmbeddingResult()
async def PredictStream(self, request, context):
"""
Generates text based on the given prompt and sampling parameters, and streams the results using MLX-VLM with multimodal support.
Args:
request: The predict stream request.
context: The gRPC context.
Yields:
backend_pb2.Reply: Streaming predict results.
"""
temp_files = []
try:
# Process images and audios from request
image_paths = []
audio_paths = []
# Process images
if request.Images:
for img_data in request.Images:
img_path = self.load_image_from_base64(img_data)
if img_path:
image_paths.append(img_path)
temp_files.append(img_path)
# Process audios
if request.Audios:
for audio_data in request.Audios:
audio_path = self.load_audio_from_base64(audio_data)
if audio_path:
audio_paths.append(audio_path)
temp_files.append(audio_path)
# Prepare the prompt with multimodal information
prompt = self._prepare_prompt(request, num_images=len(image_paths), num_audios=len(audio_paths))
# Build generation parameters using request attributes and options
max_tokens, generation_params = self._build_generation_params(request, default_max_tokens=512)
print(f"Streaming text with MLX-VLM - max_tokens: {max_tokens}, params: {generation_params}", file=sys.stderr)
print(f"Images: {len(image_paths)}, Audios: {len(audio_paths)}", file=sys.stderr)
# Stream text generation using MLX-VLM with multimodal inputs
for response in stream_generate(
model=self.model,
processor=self.processor,
prompt=prompt,
image=image_paths if image_paths else None,
audio=audio_paths if audio_paths else None,
max_tokens=max_tokens,
temperature=generation_params.get('temp', 0.6),
top_p=generation_params.get('top_p', 1.0),
):
yield backend_pb2.Reply(message=bytes(response.text, encoding='utf-8'))
except Exception as e:
print(f"Error in MLX-VLM PredictStream: {e}", file=sys.stderr)
context.set_code(grpc.StatusCode.INTERNAL)
context.set_details(f"Streaming generation failed: {str(e)}")
yield backend_pb2.Reply(message=bytes("", encoding='utf-8'))
finally:
# Clean up temporary files
self.cleanup_temp_files(temp_files)
def _prepare_prompt(self, request, num_images=0, num_audios=0):
"""
Prepare the prompt for MLX-VLM generation, handling chat templates and multimodal inputs.
Args:
request: The gRPC request containing prompt and message information.
num_images: Number of images in the request.
num_audios: Number of audio files in the request.
Returns:
str: The prepared prompt.
"""
# If tokenizer template is enabled and messages are provided instead of prompt, apply the tokenizer template
if not request.Prompt and request.UseTokenizerTemplate and request.Messages:
# Convert gRPC messages to the format expected by apply_chat_template
messages = []
for msg in request.Messages:
messages.append({"role": msg.role, "content": msg.content})
# Use mlx-vlm's apply_chat_template which handles multimodal inputs
prompt = apply_chat_template(
self.processor,
self.config,
messages,
num_images=num_images,
num_audios=num_audios
)
return prompt
elif request.Prompt:
# If we have a direct prompt but also have images/audio, we need to format it properly
if num_images > 0 or num_audios > 0:
# Create a simple message structure for multimodal prompt
messages = [{"role": "user", "content": request.Prompt}]
prompt = apply_chat_template(
self.processor,
self.config,
messages,
num_images=num_images,
num_audios=num_audios
)
return prompt
else:
return request.Prompt
else:
# Fallback to empty prompt with multimodal template if we have media
if num_images > 0 or num_audios > 0:
messages = [{"role": "user", "content": ""}]
prompt = apply_chat_template(
self.processor,
self.config,
messages,
num_images=num_images,
num_audios=num_audios
)
return prompt
else:
return ""
def _build_generation_params(self, request, default_max_tokens=200):
"""
Build generation parameters from request attributes and options for MLX-VLM.
Args:
request: The gRPC request.
default_max_tokens: Default max_tokens if not specified.
Returns:
tuple: (max_tokens, generation_params dict)
"""
# Extract max_tokens
max_tokens = getattr(request, 'Tokens', default_max_tokens)
if max_tokens == 0:
max_tokens = default_max_tokens
# Extract generation parameters from request attributes
temp = getattr(request, 'Temperature', 0.0)
if temp == 0.0:
temp = 0.6 # Default temperature
top_p = getattr(request, 'TopP', 0.0)
if top_p == 0.0:
top_p = 1.0 # Default top_p
# Initialize generation parameters for MLX-VLM
generation_params = {
'temp': temp,
'top_p': top_p,
}
# Add seed if specified
seed = getattr(request, 'Seed', 0)
if seed != 0:
mx.random.seed(seed)
# Override with options if available
if hasattr(self, 'options'):
# Max tokens from options
if 'max_tokens' in self.options:
max_tokens = self.options['max_tokens']
# Generation parameters from options
param_option_mapping = {
'temp': 'temp',
'temperature': 'temp', # alias
'top_p': 'top_p',
}
for option_key, param_key in param_option_mapping.items():
if option_key in self.options:
generation_params[param_key] = self.options[option_key]
# Handle seed from options
if 'seed' in self.options:
mx.random.seed(self.options['seed'])
return max_tokens, generation_params
def load_image_from_base64(self, image_data: str):
"""
Load an image from base64 encoded data.
Args:
image_data (str): Base64 encoded image data.
Returns:
PIL.Image or str: The loaded image or path to the image.
"""
try:
decoded_data = base64.b64decode(image_data)
image = Image.open(io.BytesIO(decoded_data))
# Save to temporary file for mlx-vlm
with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as tmp_file:
image.save(tmp_file.name, format='JPEG')
return tmp_file.name
except Exception as e:
print(f"Error loading image from base64: {e}", file=sys.stderr)
return None
def load_audio_from_base64(self, audio_data: str):
"""
Load audio from base64 encoded data.
Args:
audio_data (str): Base64 encoded audio data.
Returns:
str: Path to the loaded audio file.
"""
try:
decoded_data = base64.b64decode(audio_data)
# Save to temporary file for mlx-vlm
with tempfile.NamedTemporaryFile(delete=False, suffix='.wav') as tmp_file:
tmp_file.write(decoded_data)
return tmp_file.name
except Exception as e:
print(f"Error loading audio from base64: {e}", file=sys.stderr)
return None
def cleanup_temp_files(self, file_paths: List[str]):
"""
Clean up temporary files.
Args:
file_paths (List[str]): List of file paths to clean up.
"""
for file_path in file_paths:
try:
if file_path and os.path.exists(file_path):
os.remove(file_path)
except Exception as e:
print(f"Error removing temporary file {file_path}: {e}", file=sys.stderr)
async def serve(address):
# Start asyncio gRPC server
server = grpc.aio.server(migration_thread_pool=futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
options=[
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
])
# Add the servicer to the server
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
# Bind the server to the address
server.add_insecure_port(address)
# Gracefully shutdown the server on SIGTERM or SIGINT
loop = asyncio.get_event_loop()
for sig in (signal.SIGINT, signal.SIGTERM):
loop.add_signal_handler(
sig, lambda: asyncio.ensure_future(server.stop(5))
)
# Start the server
await server.start()
print("Server started. Listening on: " + address, file=sys.stderr)
# Wait for the server to be terminated
await server.wait_for_termination()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the gRPC server.")
parser.add_argument(
"--addr", default="localhost:50051", help="The address to bind the server to."
)
args = parser.parse_args()
asyncio.run(serve(args.addr))

View File

@@ -1,14 +0,0 @@
#!/bin/bash
set -e
USE_PIP=true
backend_dir=$(dirname $0)
if [ -d $backend_dir/common ]; then
source $backend_dir/common/libbackend.sh
else
source $backend_dir/../common/libbackend.sh
fi
installRequirements

View File

@@ -1 +0,0 @@
git+https://github.com/Blaizzy/mlx-vlm

View File

@@ -1,4 +0,0 @@
grpcio==1.71.0
protobuf
certifi
setuptools

View File

@@ -1,11 +0,0 @@
#!/bin/bash
backend_dir=$(dirname $0)
if [ -d $backend_dir/common ]; then
source $backend_dir/common/libbackend.sh
else
source $backend_dir/../common/libbackend.sh
fi
startBackend $@

View File

@@ -1,146 +0,0 @@
import unittest
import subprocess
import time
import backend_pb2
import backend_pb2_grpc
import grpc
import unittest
import subprocess
import time
import grpc
import backend_pb2_grpc
import backend_pb2
class TestBackendServicer(unittest.TestCase):
"""
TestBackendServicer is the class that tests the gRPC service.
This class contains methods to test the startup and shutdown of the gRPC service.
"""
def setUp(self):
self.service = subprocess.Popen(["python", "backend.py", "--addr", "localhost:50051"])
time.sleep(10)
def tearDown(self) -> None:
self.service.terminate()
self.service.wait()
def test_server_startup(self):
try:
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
response = stub.Health(backend_pb2.HealthMessage())
self.assertEqual(response.message, b'OK')
except Exception as err:
print(err)
self.fail("Server failed to start")
finally:
self.tearDown()
def test_load_model(self):
"""
This method tests if the model is loaded successfully
"""
try:
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
response = stub.LoadModel(backend_pb2.ModelOptions(Model="facebook/opt-125m"))
self.assertTrue(response.success)
self.assertEqual(response.message, "Model loaded successfully")
except Exception as err:
print(err)
self.fail("LoadModel service failed")
finally:
self.tearDown()
def test_text(self):
"""
This method tests if the embeddings are generated successfully
"""
try:
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
response = stub.LoadModel(backend_pb2.ModelOptions(Model="facebook/opt-125m"))
self.assertTrue(response.success)
req = backend_pb2.PredictOptions(Prompt="The capital of France is")
resp = stub.Predict(req)
self.assertIsNotNone(resp.message)
except Exception as err:
print(err)
self.fail("text service failed")
finally:
self.tearDown()
def test_sampling_params(self):
"""
This method tests if all sampling parameters are correctly processed
NOTE: this does NOT test for correctness, just that we received a compatible response
"""
try:
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
response = stub.LoadModel(backend_pb2.ModelOptions(Model="facebook/opt-125m"))
self.assertTrue(response.success)
req = backend_pb2.PredictOptions(
Prompt="The capital of France is",
TopP=0.8,
Tokens=50,
Temperature=0.7,
TopK=40,
PresencePenalty=0.1,
FrequencyPenalty=0.2,
RepetitionPenalty=1.1,
MinP=0.05,
Seed=42,
StopPrompts=["\n"],
StopTokenIds=[50256],
BadWords=["badword"],
IncludeStopStrInOutput=True,
IgnoreEOS=True,
MinTokens=5,
Logprobs=5,
PromptLogprobs=5,
SkipSpecialTokens=True,
SpacesBetweenSpecialTokens=True,
TruncatePromptTokens=10,
GuidedDecoding=True,
N=2,
)
resp = stub.Predict(req)
self.assertIsNotNone(resp.message)
self.assertIsNotNone(resp.logprobs)
except Exception as err:
print(err)
self.fail("sampling params service failed")
finally:
self.tearDown()
def test_embedding(self):
"""
This method tests if the embeddings are generated successfully
"""
try:
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
response = stub.LoadModel(backend_pb2.ModelOptions(Model="intfloat/e5-mistral-7b-instruct"))
self.assertTrue(response.success)
embedding_request = backend_pb2.PredictOptions(Embeddings="This is a test sentence.")
embedding_response = stub.Embedding(embedding_request)
self.assertIsNotNone(embedding_response.embeddings)
# assert that is a list of floats
self.assertIsInstance(embedding_response.embeddings, list)
# assert that the list is not empty
self.assertTrue(len(embedding_response.embeddings) > 0)
except Exception as err:
print(err)
self.fail("Embedding service failed")
finally:
self.tearDown()

View File

@@ -1,12 +0,0 @@
#!/bin/bash
set -e
backend_dir=$(dirname $0)
if [ -d $backend_dir/common ]; then
source $backend_dir/common/libbackend.sh
else
source $backend_dir/../common/libbackend.sh
fi
runUnittests

View File

@@ -1,23 +0,0 @@
.PHONY: mlx
mlx:
bash install.sh
.PHONY: run
run:
@echo "Running mlx..."
bash run.sh
@echo "mlx run."
.PHONY: test
test:
@echo "Testing mlx..."
bash test.sh
@echo "mlx tested."
.PHONY: protogen-clean
protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py
.PHONY: clean
clean: protogen-clean
rm -rf venv __pycache__

View File

@@ -1,376 +0,0 @@
#!/usr/bin/env python3
import asyncio
from concurrent import futures
import argparse
import signal
import sys
import os
from typing import List
import time
import backend_pb2
import backend_pb2_grpc
import grpc
from mlx_lm import load, generate, stream_generate
from mlx_lm.sample_utils import make_sampler
from mlx_lm.models.cache import make_prompt_cache
import mlx.core as mx
import base64
import io
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
# Implement the BackendServicer class with the service methods
class BackendServicer(backend_pb2_grpc.BackendServicer):
"""
A gRPC servicer that implements the Backend service defined in backend.proto.
"""
def _is_float(self, s):
"""Check if a string can be converted to float."""
try:
float(s)
return True
except ValueError:
return False
def _is_int(self, s):
"""Check if a string can be converted to int."""
try:
int(s)
return True
except ValueError:
return False
def Health(self, request, context):
"""
Returns a health check message.
Args:
request: The health check request.
context: The gRPC context.
Returns:
backend_pb2.Reply: The health check reply.
"""
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
async def LoadModel(self, request, context):
"""
Loads a language model using MLX.
Args:
request: The load model request.
context: The gRPC context.
Returns:
backend_pb2.Result: The load model result.
"""
try:
print(f"Loading MLX model: {request.Model}", file=sys.stderr)
print(f"Request: {request}", file=sys.stderr)
# Parse options like in the diffusers backend
options = request.Options
self.options = {}
# The options are a list of strings in this form optname:optvalue
# We store all the options in a dict for later use
for opt in options:
if ":" not in opt:
continue
key, value = opt.split(":", 1) # Split only on first colon to handle values with colons
# Convert numeric values to appropriate types
if self._is_float(value):
value = float(value)
elif self._is_int(value):
value = int(value)
elif value.lower() in ["true", "false"]:
value = value.lower() == "true"
self.options[key] = value
print(f"Options: {self.options}", file=sys.stderr)
# Build tokenizer config for MLX using options
tokenizer_config = {}
# Handle trust_remote_code from request or options
if request.TrustRemoteCode or self.options.get("trust_remote_code", False):
tokenizer_config["trust_remote_code"] = True
# Handle EOS token from options
if "eos_token" in self.options:
tokenizer_config["eos_token"] = self.options["eos_token"]
# Handle other tokenizer config options
for key in ["pad_token", "bos_token", "unk_token", "sep_token", "cls_token", "mask_token"]:
if key in self.options:
tokenizer_config[key] = self.options[key]
# Load model and tokenizer using MLX
if tokenizer_config:
print(f"Loading with tokenizer_config: {tokenizer_config}", file=sys.stderr)
self.model, self.tokenizer = load(request.Model, tokenizer_config=tokenizer_config)
else:
self.model, self.tokenizer = load(request.Model)
# Initialize prompt cache for efficient generation
max_kv_size = self.options.get("max_kv_size", None)
self.prompt_cache = make_prompt_cache(self.model, max_kv_size)
except Exception as err:
print(f"Error loading MLX model {err=}, {type(err)=}", file=sys.stderr)
return backend_pb2.Result(success=False, message=f"Error loading MLX model: {err}")
print("MLX model loaded successfully", file=sys.stderr)
return backend_pb2.Result(message="MLX model loaded successfully", success=True)
async def Predict(self, request, context):
"""
Generates text based on the given prompt and sampling parameters using MLX.
Args:
request: The predict request.
context: The gRPC context.
Returns:
backend_pb2.Reply: The predict result.
"""
try:
# Prepare the prompt
prompt = self._prepare_prompt(request)
# Build generation parameters using request attributes and options
max_tokens, sampler_params = self._build_generation_params(request)
print(f"Generating text with MLX - max_tokens: {max_tokens}, sampler_params: {sampler_params}", file=sys.stderr)
# Create sampler with parameters
sampler = make_sampler(**sampler_params)
# Generate text using MLX with proper parameters
response = generate(
self.model,
self.tokenizer,
prompt=prompt,
max_tokens=max_tokens,
sampler=sampler,
prompt_cache=self.prompt_cache,
verbose=False
)
return backend_pb2.Reply(message=bytes(response, encoding='utf-8'))
except Exception as e:
print(f"Error in MLX Predict: {e}", file=sys.stderr)
context.set_code(grpc.StatusCode.INTERNAL)
context.set_details(f"Generation failed: {str(e)}")
return backend_pb2.Reply(message=bytes("", encoding='utf-8'))
def Embedding(self, request, context):
"""
A gRPC method that calculates embeddings for a given sentence.
Note: MLX-LM doesn't support embeddings directly. This method returns an error.
Args:
request: An EmbeddingRequest object that contains the request parameters.
context: A grpc.ServicerContext object that provides information about the RPC.
Returns:
An EmbeddingResult object that contains the calculated embeddings.
"""
print("Embeddings not supported in MLX backend", file=sys.stderr)
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Embeddings are not supported in the MLX backend.")
return backend_pb2.EmbeddingResult()
async def PredictStream(self, request, context):
"""
Generates text based on the given prompt and sampling parameters, and streams the results using MLX.
Args:
request: The predict stream request.
context: The gRPC context.
Yields:
backend_pb2.Reply: Streaming predict results.
"""
try:
# Prepare the prompt
prompt = self._prepare_prompt(request)
# Build generation parameters using request attributes and options
max_tokens, sampler_params = self._build_generation_params(request, default_max_tokens=512)
print(f"Streaming text with MLX - max_tokens: {max_tokens}, sampler_params: {sampler_params}", file=sys.stderr)
# Create sampler with parameters
sampler = make_sampler(**sampler_params)
# Stream text generation using MLX with proper parameters
for response in stream_generate(
self.model,
self.tokenizer,
prompt=prompt,
max_tokens=max_tokens,
sampler=sampler,
prompt_cache=self.prompt_cache,
):
yield backend_pb2.Reply(message=bytes(response.text, encoding='utf-8'))
except Exception as e:
print(f"Error in MLX PredictStream: {e}", file=sys.stderr)
context.set_code(grpc.StatusCode.INTERNAL)
context.set_details(f"Streaming generation failed: {str(e)}")
yield backend_pb2.Reply(message=bytes("", encoding='utf-8'))
def _prepare_prompt(self, request):
"""
Prepare the prompt for MLX generation, handling chat templates if needed.
Args:
request: The gRPC request containing prompt and message information.
Returns:
str: The prepared prompt.
"""
# If tokenizer template is enabled and messages are provided instead of prompt, apply the tokenizer template
if not request.Prompt and request.UseTokenizerTemplate and request.Messages:
# Convert gRPC messages to the format expected by apply_chat_template
messages = []
for msg in request.Messages:
messages.append({"role": msg.role, "content": msg.content})
prompt = self.tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
return prompt
else:
return request.Prompt
def _build_generation_params(self, request, default_max_tokens=200):
"""
Build generation parameters from request attributes and options.
Args:
request: The gRPC request.
default_max_tokens: Default max_tokens if not specified.
Returns:
tuple: (max_tokens, sampler_params dict)
"""
# Extract max_tokens
max_tokens = getattr(request, 'Tokens', default_max_tokens)
if max_tokens == 0:
max_tokens = default_max_tokens
# Extract sampler parameters from request attributes
temp = getattr(request, 'Temperature', 0.0)
if temp == 0.0:
temp = 0.6 # Default temperature
top_p = getattr(request, 'TopP', 0.0)
if top_p == 0.0:
top_p = 1.0 # Default top_p
# Initialize sampler parameters
sampler_params = {
'temp': temp,
'top_p': top_p,
'xtc_threshold': 0.0,
'xtc_probability': 0.0,
}
# Add seed if specified
seed = getattr(request, 'Seed', 0)
if seed != 0:
mx.random.seed(seed)
# Override with options if available
if hasattr(self, 'options'):
# Max tokens from options
if 'max_tokens' in self.options:
max_tokens = self.options['max_tokens']
# Sampler parameters from options
sampler_option_mapping = {
'temp': 'temp',
'temperature': 'temp', # alias
'top_p': 'top_p',
'xtc_threshold': 'xtc_threshold',
'xtc_probability': 'xtc_probability',
}
for option_key, param_key in sampler_option_mapping.items():
if option_key in self.options:
sampler_params[param_key] = self.options[option_key]
# Handle seed from options
if 'seed' in self.options:
mx.random.seed(self.options['seed'])
# Special tokens for XTC sampling (if tokenizer has eos_token_ids)
xtc_special_tokens = []
if hasattr(self.tokenizer, 'eos_token_ids') and self.tokenizer.eos_token_ids:
xtc_special_tokens = list(self.tokenizer.eos_token_ids)
elif hasattr(self.tokenizer, 'eos_token_id') and self.tokenizer.eos_token_id is not None:
xtc_special_tokens = [self.tokenizer.eos_token_id]
# Add newline token if available
try:
newline_tokens = self.tokenizer.encode("\n")
xtc_special_tokens.extend(newline_tokens)
except:
pass # Skip if encoding fails
sampler_params['xtc_special_tokens'] = xtc_special_tokens
return max_tokens, sampler_params
async def serve(address):
# Start asyncio gRPC server
server = grpc.aio.server(migration_thread_pool=futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
options=[
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
])
# Add the servicer to the server
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
# Bind the server to the address
server.add_insecure_port(address)
# Gracefully shutdown the server on SIGTERM or SIGINT
loop = asyncio.get_event_loop()
for sig in (signal.SIGINT, signal.SIGTERM):
loop.add_signal_handler(
sig, lambda: asyncio.ensure_future(server.stop(5))
)
# Start the server
await server.start()
print("Server started. Listening on: " + address, file=sys.stderr)
# Wait for the server to be terminated
await server.wait_for_termination()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the gRPC server.")
parser.add_argument(
"--addr", default="localhost:50051", help="The address to bind the server to."
)
args = parser.parse_args()
asyncio.run(serve(args.addr))

View File

@@ -1 +0,0 @@
mlx-lm

View File

@@ -1,4 +0,0 @@
grpcio==1.71.0
protobuf
certifi
setuptools

View File

@@ -1,11 +0,0 @@
#!/bin/bash
backend_dir=$(dirname $0)
if [ -d $backend_dir/common ]; then
source $backend_dir/common/libbackend.sh
else
source $backend_dir/../common/libbackend.sh
fi
startBackend $@

View File

@@ -1,146 +0,0 @@
import unittest
import subprocess
import time
import backend_pb2
import backend_pb2_grpc
import grpc
import unittest
import subprocess
import time
import grpc
import backend_pb2_grpc
import backend_pb2
class TestBackendServicer(unittest.TestCase):
"""
TestBackendServicer is the class that tests the gRPC service.
This class contains methods to test the startup and shutdown of the gRPC service.
"""
def setUp(self):
self.service = subprocess.Popen(["python", "backend.py", "--addr", "localhost:50051"])
time.sleep(10)
def tearDown(self) -> None:
self.service.terminate()
self.service.wait()
def test_server_startup(self):
try:
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
response = stub.Health(backend_pb2.HealthMessage())
self.assertEqual(response.message, b'OK')
except Exception as err:
print(err)
self.fail("Server failed to start")
finally:
self.tearDown()
def test_load_model(self):
"""
This method tests if the model is loaded successfully
"""
try:
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
response = stub.LoadModel(backend_pb2.ModelOptions(Model="facebook/opt-125m"))
self.assertTrue(response.success)
self.assertEqual(response.message, "Model loaded successfully")
except Exception as err:
print(err)
self.fail("LoadModel service failed")
finally:
self.tearDown()
def test_text(self):
"""
This method tests if the embeddings are generated successfully
"""
try:
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
response = stub.LoadModel(backend_pb2.ModelOptions(Model="facebook/opt-125m"))
self.assertTrue(response.success)
req = backend_pb2.PredictOptions(Prompt="The capital of France is")
resp = stub.Predict(req)
self.assertIsNotNone(resp.message)
except Exception as err:
print(err)
self.fail("text service failed")
finally:
self.tearDown()
def test_sampling_params(self):
"""
This method tests if all sampling parameters are correctly processed
NOTE: this does NOT test for correctness, just that we received a compatible response
"""
try:
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
response = stub.LoadModel(backend_pb2.ModelOptions(Model="facebook/opt-125m"))
self.assertTrue(response.success)
req = backend_pb2.PredictOptions(
Prompt="The capital of France is",
TopP=0.8,
Tokens=50,
Temperature=0.7,
TopK=40,
PresencePenalty=0.1,
FrequencyPenalty=0.2,
RepetitionPenalty=1.1,
MinP=0.05,
Seed=42,
StopPrompts=["\n"],
StopTokenIds=[50256],
BadWords=["badword"],
IncludeStopStrInOutput=True,
IgnoreEOS=True,
MinTokens=5,
Logprobs=5,
PromptLogprobs=5,
SkipSpecialTokens=True,
SpacesBetweenSpecialTokens=True,
TruncatePromptTokens=10,
GuidedDecoding=True,
N=2,
)
resp = stub.Predict(req)
self.assertIsNotNone(resp.message)
self.assertIsNotNone(resp.logprobs)
except Exception as err:
print(err)
self.fail("sampling params service failed")
finally:
self.tearDown()
def test_embedding(self):
"""
This method tests if the embeddings are generated successfully
"""
try:
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
response = stub.LoadModel(backend_pb2.ModelOptions(Model="intfloat/e5-mistral-7b-instruct"))
self.assertTrue(response.success)
embedding_request = backend_pb2.PredictOptions(Embeddings="This is a test sentence.")
embedding_response = stub.Embedding(embedding_request)
self.assertIsNotNone(embedding_response.embeddings)
# assert that is a list of floats
self.assertIsInstance(embedding_response.embeddings, list)
# assert that the list is not empty
self.assertTrue(len(embedding_response.embeddings) > 0)
except Exception as err:
print(err)
self.fail("Embedding service failed")
finally:
self.tearDown()

View File

@@ -1,12 +0,0 @@
#!/bin/bash
set -e
backend_dir=$(dirname $0)
if [ -d $backend_dir/common ]; then
source $backend_dir/common/libbackend.sh
else
source $backend_dir/../common/libbackend.sh
fi
runUnittests

View File

@@ -1,24 +1,30 @@
.PHONY: rerankers
rerankers:
rerankers: protogen
bash install.sh
.PHONY: run
run: rerankers
run: protogen
@echo "Running rerankers..."
bash run.sh
@echo "rerankers run."
# It is not working well by using command line. It only6 works with IDE like VSCode.
.PHONY: test
test: rerankers
test: protogen
@echo "Testing rerankers..."
bash test.sh
@echo "rerankers tested."
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean
protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
.PHONY: clean
clean: protogen-clean
rm -rf venv __pycache__

View File

@@ -3,7 +3,7 @@ intel-extension-for-pytorch==2.3.110+xpu
transformers
accelerate
torch==2.3.1+cxx11.abi
oneccl_bind_pt==2.8.0+xpu
oneccl_bind_pt==2.3.100+xpu
rerankers[transformers]
optimum[openvino]
setuptools

View File

@@ -1,3 +1,3 @@
grpcio==1.74.0
grpcio==1.71.0
protobuf
certifi

View File

@@ -3,11 +3,18 @@
.PHONY: install
install:
bash install.sh
$(MAKE) protogen
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean
protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
bash protogen.sh
.PHONY: clean
clean: protogen-clean
rm -rf venv __pycache__

View File

@@ -1,15 +1,13 @@
#!/bin/bash
set -e
USE_PIP=true
PYTHON_VERSION=""
backend_dir=$(dirname $0)
if [ -d $backend_dir/common ]; then
source $backend_dir/common/libbackend.sh
else
source $backend_dir/../common/libbackend.sh
fi
installRequirements
ensureVenv
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto

View File

@@ -1,24 +1,30 @@
.PHONY: transformers
transformers:
transformers: protogen
bash install.sh
.PHONY: run
run: transformers
run: protogen
@echo "Running transformers..."
bash run.sh
@echo "transformers run."
# It is not working well by using command line. It only6 works with IDE like VSCode.
.PHONY: test
test: transformers
test: protogen
@echo "Testing transformers..."
bash test.sh
@echo "transformers tested."
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean
protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
.PHONY: clean
clean: protogen-clean
rm -rf venv __pycache__

View File

@@ -94,9 +94,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
self.SentenceTransformer = False
device_map="cpu"
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
if mps_available:
device_map = "mps"
quantization = None
autoTokenizer = True

View File

@@ -1,4 +1,3 @@
--extra-index-url https://download.pytorch.org/whl/cpu
torch==2.7.1
llvmlite==0.43.0
numba==0.60.0
@@ -6,5 +5,5 @@ accelerate
transformers
bitsandbytes
outetts
sentence-transformers==5.1.0
protobuf==6.32.0
sentence-transformers==5.0.0
protobuf==6.31.0

View File

@@ -6,5 +6,5 @@ accelerate
transformers
bitsandbytes
outetts
sentence-transformers==5.1.0
protobuf==6.32.0
sentence-transformers==5.0.0
protobuf==6.31.0

View File

@@ -5,5 +5,5 @@ numba==0.60.0
transformers
bitsandbytes
outetts
sentence-transformers==5.1.0
protobuf==6.32.0
sentence-transformers==5.0.0
protobuf==6.31.0

View File

@@ -7,5 +7,5 @@ numba==0.60.0
bitsandbytes
outetts
bitsandbytes
sentence-transformers==5.1.0
protobuf==6.32.0
sentence-transformers==5.0.0
protobuf==6.31.0

View File

@@ -9,5 +9,5 @@ transformers
intel-extension-for-transformers
bitsandbytes
outetts
sentence-transformers==5.1.0
protobuf==6.32.0
sentence-transformers==5.0.0
protobuf==6.31.0

View File

@@ -1,9 +0,0 @@
torch==2.7.1
accelerate
llvmlite==0.43.0
numba==0.60.0
transformers
bitsandbytes
outetts
sentence-transformers==5.1.0
protobuf==6.32.0

View File

@@ -1,5 +1,5 @@
grpcio==1.74.0
protobuf==6.32.0
grpcio==1.71.0
protobuf==6.31.0
certifi
setuptools
scipy==1.15.1

View File

@@ -1,23 +1,29 @@
.PHONY: vllm
vllm:
vllm: protogen
bash install.sh
.PHONY: run
run: vllm
run: protogen
@echo "Running vllm..."
bash run.sh
@echo "vllm run."
.PHONY: test
test: vllm
test: protogen
@echo "Testing vllm..."
bash test.sh
@echo "vllm tested."
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean
protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
.PHONY: clean
clean: protogen-clean
rm -rf venv __pycache__

View File

@@ -1,4 +1,4 @@
grpcio==1.74.0
grpcio==1.71.0
protobuf
certifi
setuptools

View File

@@ -2,29 +2,27 @@ package application
import (
"github.com/mudler/LocalAI/core/config"
"github.com/mudler/LocalAI/core/services"
"github.com/mudler/LocalAI/core/templates"
"github.com/mudler/LocalAI/pkg/model"
)
type Application struct {
backendLoader *config.ModelConfigLoader
backendLoader *config.BackendConfigLoader
modelLoader *model.ModelLoader
applicationConfig *config.ApplicationConfig
templatesEvaluator *templates.Evaluator
galleryService *services.GalleryService
}
func newApplication(appConfig *config.ApplicationConfig) *Application {
return &Application{
backendLoader: config.NewModelConfigLoader(appConfig.SystemState.Model.ModelsPath),
modelLoader: model.NewModelLoader(appConfig.SystemState, appConfig.SingleBackend),
backendLoader: config.NewBackendConfigLoader(appConfig.ModelPath),
modelLoader: model.NewModelLoader(appConfig.ModelPath, appConfig.SingleBackend),
applicationConfig: appConfig,
templatesEvaluator: templates.NewEvaluator(appConfig.SystemState.Model.ModelsPath),
templatesEvaluator: templates.NewEvaluator(appConfig.ModelPath),
}
}
func (a *Application) ModelConfigLoader() *config.ModelConfigLoader {
func (a *Application) BackendLoader() *config.BackendConfigLoader {
return a.backendLoader
}
@@ -39,19 +37,3 @@ func (a *Application) ApplicationConfig() *config.ApplicationConfig {
func (a *Application) TemplatesEvaluator() *templates.Evaluator {
return a.templatesEvaluator
}
func (a *Application) GalleryService() *services.GalleryService {
return a.galleryService
}
func (a *Application) start() error {
galleryService := services.NewGalleryService(a.ApplicationConfig(), a.ModelLoader())
err := galleryService.Start(a.ApplicationConfig().Context, a.ModelConfigLoader(), a.ApplicationConfig().SystemState)
if err != nil {
return err
}
a.galleryService = galleryService
return nil
}

Some files were not shown because too many files have changed in this diff Show More