mirror of
https://github.com/mudler/LocalAI.git
synced 2026-02-07 21:22:58 -05:00
Compare commits
2 Commits
feat/llama
...
gosec_fix
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
63c5d843b6 | ||
|
|
a9b0e264f2 |
@@ -9,7 +9,6 @@
|
|||||||
# Param 2: email
|
# Param 2: email
|
||||||
#
|
#
|
||||||
config_user() {
|
config_user() {
|
||||||
echo "Configuring git for $1 <$2>"
|
|
||||||
local gcn=$(git config --global user.name)
|
local gcn=$(git config --global user.name)
|
||||||
if [ -z "${gcn}" ]; then
|
if [ -z "${gcn}" ]; then
|
||||||
echo "Setting up git user / remote"
|
echo "Setting up git user / remote"
|
||||||
@@ -25,7 +24,6 @@ config_user() {
|
|||||||
# Param 2: remote url
|
# Param 2: remote url
|
||||||
#
|
#
|
||||||
config_remote() {
|
config_remote() {
|
||||||
echo "Adding git remote and fetching $2 as $1"
|
|
||||||
local gr=$(git remote -v | grep $1)
|
local gr=$(git remote -v | grep $1)
|
||||||
if [ -z "${gr}" ]; then
|
if [ -z "${gr}" ]; then
|
||||||
git remote add $1 $2
|
git remote add $1 $2
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ services:
|
|||||||
args:
|
args:
|
||||||
- FFMPEG=true
|
- FFMPEG=true
|
||||||
- IMAGE_TYPE=extras
|
- IMAGE_TYPE=extras
|
||||||
- GO_TAGS=p2p tts
|
- GO_TAGS=stablediffusion p2p tts
|
||||||
env_file:
|
env_file:
|
||||||
- ../.env
|
- ../.env
|
||||||
ports:
|
ports:
|
||||||
|
|||||||
18
.env
18
.env
@@ -29,9 +29,6 @@
|
|||||||
## Enable/Disable single backend (useful if only one GPU is available)
|
## Enable/Disable single backend (useful if only one GPU is available)
|
||||||
# LOCALAI_SINGLE_ACTIVE_BACKEND=true
|
# LOCALAI_SINGLE_ACTIVE_BACKEND=true
|
||||||
|
|
||||||
# Forces shutdown of the backends if busy (only if LOCALAI_SINGLE_ACTIVE_BACKEND is set)
|
|
||||||
# LOCALAI_FORCE_BACKEND_SHUTDOWN=true
|
|
||||||
|
|
||||||
## Specify a build type. Available: cublas, openblas, clblas.
|
## Specify a build type. Available: cublas, openblas, clblas.
|
||||||
## cuBLAS: This is a GPU-accelerated version of the complete standard BLAS (Basic Linear Algebra Subprograms) library. It's provided by Nvidia and is part of their CUDA toolkit.
|
## cuBLAS: This is a GPU-accelerated version of the complete standard BLAS (Basic Linear Algebra Subprograms) library. It's provided by Nvidia and is part of their CUDA toolkit.
|
||||||
## OpenBLAS: This is an open-source implementation of the BLAS library that aims to provide highly optimized code for various platforms. It includes support for multi-threading and can be compiled to use hardware-specific features for additional performance. OpenBLAS can run on many kinds of hardware, including CPUs from Intel, AMD, and ARM.
|
## OpenBLAS: This is an open-source implementation of the BLAS library that aims to provide highly optimized code for various platforms. It includes support for multi-threading and can be compiled to use hardware-specific features for additional performance. OpenBLAS can run on many kinds of hardware, including CPUs from Intel, AMD, and ARM.
|
||||||
@@ -41,12 +38,12 @@
|
|||||||
## Uncomment and set to true to enable rebuilding from source
|
## Uncomment and set to true to enable rebuilding from source
|
||||||
# REBUILD=true
|
# REBUILD=true
|
||||||
|
|
||||||
## Enable go tags, available: p2p, tts
|
## Enable go tags, available: stablediffusion, tts
|
||||||
## p2p: enable distributed inferencing
|
## stablediffusion: image generation with stablediffusion
|
||||||
## tts: enables text-to-speech with go-piper
|
## tts: enables text-to-speech with go-piper
|
||||||
## (requires REBUILD=true)
|
## (requires REBUILD=true)
|
||||||
#
|
#
|
||||||
# GO_TAGS=p2p
|
# GO_TAGS=stablediffusion
|
||||||
|
|
||||||
## Path where to store generated images
|
## Path where to store generated images
|
||||||
# LOCALAI_IMAGE_PATH=/tmp/generated/images
|
# LOCALAI_IMAGE_PATH=/tmp/generated/images
|
||||||
@@ -85,15 +82,6 @@
|
|||||||
# Enable to allow p2p mode
|
# Enable to allow p2p mode
|
||||||
# LOCALAI_P2P=true
|
# LOCALAI_P2P=true
|
||||||
|
|
||||||
# Enable to use federated mode
|
|
||||||
# LOCALAI_FEDERATED=true
|
|
||||||
|
|
||||||
# Enable to start federation server
|
|
||||||
# FEDERATED_SERVER=true
|
|
||||||
|
|
||||||
# Define to use federation token
|
|
||||||
# TOKEN=""
|
|
||||||
|
|
||||||
### Watchdog settings
|
### Watchdog settings
|
||||||
###
|
###
|
||||||
# Enables watchdog to kill backends that are inactive for too much time
|
# Enables watchdog to kill backends that are inactive for too much time
|
||||||
|
|||||||
1
.gitattributes
vendored
1
.gitattributes
vendored
@@ -1,2 +1 @@
|
|||||||
*.sh text eol=lf
|
*.sh text eol=lf
|
||||||
backend/cpp/llama/*.hpp linguist-vendored
|
|
||||||
11
.github/check_and_update.py
vendored
11
.github/check_and_update.py
vendored
@@ -29,14 +29,9 @@ def calculate_sha256(file_path):
|
|||||||
def manual_safety_check_hf(repo_id):
|
def manual_safety_check_hf(repo_id):
|
||||||
scanResponse = requests.get('https://huggingface.co/api/models/' + repo_id + "/scan")
|
scanResponse = requests.get('https://huggingface.co/api/models/' + repo_id + "/scan")
|
||||||
scan = scanResponse.json()
|
scan = scanResponse.json()
|
||||||
# Check if 'hasUnsafeFile' exists in the response
|
if scan['hasUnsafeFile']:
|
||||||
if 'hasUnsafeFile' in scan:
|
return scan
|
||||||
if scan['hasUnsafeFile']:
|
return None
|
||||||
return scan
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
download_type, repo_id_or_url = parse_uri(uri)
|
download_type, repo_id_or_url = parse_uri(uri)
|
||||||
|
|
||||||
|
|||||||
7
.github/ci/modelslist.go
vendored
7
.github/ci/modelslist.go
vendored
@@ -6,7 +6,6 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/microcosm-cc/bluemonday"
|
|
||||||
"gopkg.in/yaml.v3"
|
"gopkg.in/yaml.v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -280,12 +279,6 @@ func main() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that all arbitrary text content is sanitized before display
|
|
||||||
for i, m := range models {
|
|
||||||
models[i].Name = bluemonday.StrictPolicy().Sanitize(m.Name)
|
|
||||||
models[i].Description = bluemonday.StrictPolicy().Sanitize(m.Description)
|
|
||||||
}
|
|
||||||
|
|
||||||
// render the template
|
// render the template
|
||||||
data := struct {
|
data := struct {
|
||||||
Models []*GalleryModel
|
Models []*GalleryModel
|
||||||
|
|||||||
14
.github/dependabot.yml
vendored
14
.github/dependabot.yml
vendored
@@ -9,8 +9,6 @@ updates:
|
|||||||
directory: "/"
|
directory: "/"
|
||||||
schedule:
|
schedule:
|
||||||
interval: "weekly"
|
interval: "weekly"
|
||||||
ignore:
|
|
||||||
- dependency-name: "github.com/mudler/LocalAI/pkg/grpc/proto"
|
|
||||||
- package-ecosystem: "github-actions"
|
- package-ecosystem: "github-actions"
|
||||||
# Workflow files stored in the default location of `.github/workflows`. (You don't need to specify `/.github/workflows` for `directory`. You can use `directory: "/"`.)
|
# Workflow files stored in the default location of `.github/workflows`. (You don't need to specify `/.github/workflows` for `directory`. You can use `directory: "/"`.)
|
||||||
directory: "/"
|
directory: "/"
|
||||||
@@ -29,6 +27,10 @@ updates:
|
|||||||
schedule:
|
schedule:
|
||||||
# Check for updates to GitHub Actions every weekday
|
# Check for updates to GitHub Actions every weekday
|
||||||
interval: "weekly"
|
interval: "weekly"
|
||||||
|
- package-ecosystem: "pip"
|
||||||
|
directory: "/backend/python/autogptq"
|
||||||
|
schedule:
|
||||||
|
interval: "weekly"
|
||||||
- package-ecosystem: "pip"
|
- package-ecosystem: "pip"
|
||||||
directory: "/backend/python/bark"
|
directory: "/backend/python/bark"
|
||||||
schedule:
|
schedule:
|
||||||
@@ -77,6 +79,14 @@ updates:
|
|||||||
directory: "/backend/python/transformers"
|
directory: "/backend/python/transformers"
|
||||||
schedule:
|
schedule:
|
||||||
interval: "weekly"
|
interval: "weekly"
|
||||||
|
- package-ecosystem: "pip"
|
||||||
|
directory: "/backend/python/transformers-musicgen"
|
||||||
|
schedule:
|
||||||
|
interval: "weekly"
|
||||||
|
- package-ecosystem: "pip"
|
||||||
|
directory: "/backend/python/vall-e-x"
|
||||||
|
schedule:
|
||||||
|
interval: "weekly"
|
||||||
- package-ecosystem: "pip"
|
- package-ecosystem: "pip"
|
||||||
directory: "/backend/python/vllm"
|
directory: "/backend/python/vllm"
|
||||||
schedule:
|
schedule:
|
||||||
|
|||||||
11
.github/labeler.yml
vendored
11
.github/labeler.yml
vendored
@@ -1,15 +1,6 @@
|
|||||||
enhancement:
|
enhancements:
|
||||||
- head-branch: ['^feature', 'feature']
|
- head-branch: ['^feature', 'feature']
|
||||||
|
|
||||||
dependencies:
|
|
||||||
- any:
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file: 'Makefile'
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file: '*.mod'
|
|
||||||
- changed-files:
|
|
||||||
- any-glob-to-any-file: '*.sum'
|
|
||||||
|
|
||||||
kind/documentation:
|
kind/documentation:
|
||||||
- any:
|
- any:
|
||||||
- changed-files:
|
- changed-files:
|
||||||
|
|||||||
19
.github/workflows/bump_deps.yaml
vendored
19
.github/workflows/bump_deps.yaml
vendored
@@ -9,17 +9,26 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- repository: "ggml-org/llama.cpp"
|
- repository: "ggerganov/llama.cpp"
|
||||||
variable: "CPPLLAMA_VERSION"
|
variable: "CPPLLAMA_VERSION"
|
||||||
branch: "master"
|
branch: "master"
|
||||||
|
- repository: "go-skynet/go-ggml-transformers.cpp"
|
||||||
|
variable: "GOGGMLTRANSFORMERS_VERSION"
|
||||||
|
branch: "master"
|
||||||
|
- repository: "donomii/go-rwkv.cpp"
|
||||||
|
variable: "RWKV_VERSION"
|
||||||
|
branch: "main"
|
||||||
- repository: "ggerganov/whisper.cpp"
|
- repository: "ggerganov/whisper.cpp"
|
||||||
variable: "WHISPER_CPP_VERSION"
|
variable: "WHISPER_CPP_VERSION"
|
||||||
branch: "master"
|
branch: "master"
|
||||||
- repository: "PABannier/bark.cpp"
|
- repository: "go-skynet/go-bert.cpp"
|
||||||
variable: "BARKCPP_VERSION"
|
variable: "BERT_VERSION"
|
||||||
|
branch: "master"
|
||||||
|
- repository: "go-skynet/bloomz.cpp"
|
||||||
|
variable: "BLOOMZ_VERSION"
|
||||||
branch: "main"
|
branch: "main"
|
||||||
- repository: "leejet/stable-diffusion.cpp"
|
- repository: "mudler/go-ggllm.cpp"
|
||||||
variable: "STABLEDIFFUSION_GGML_VERSION"
|
variable: "GOGGLLM_VERSION"
|
||||||
branch: "master"
|
branch: "master"
|
||||||
- repository: "mudler/go-stable-diffusion"
|
- repository: "mudler/go-stable-diffusion"
|
||||||
variable: "STABLEDIFFUSION_VERSION"
|
variable: "STABLEDIFFUSION_VERSION"
|
||||||
|
|||||||
2
.github/workflows/checksum_checker.yaml
vendored
2
.github/workflows/checksum_checker.yaml
vendored
@@ -23,7 +23,7 @@ jobs:
|
|||||||
sudo pip install --upgrade pip
|
sudo pip install --upgrade pip
|
||||||
pip install huggingface_hub
|
pip install huggingface_hub
|
||||||
- name: 'Setup yq'
|
- name: 'Setup yq'
|
||||||
uses: dcarbone/install-yq-action@v1.3.1
|
uses: dcarbone/install-yq-action@v1.1.1
|
||||||
with:
|
with:
|
||||||
version: 'v4.44.2'
|
version: 'v4.44.2'
|
||||||
download-compressed: true
|
download-compressed: true
|
||||||
|
|||||||
2
.github/workflows/dependabot_auto.yml
vendored
2
.github/workflows/dependabot_auto.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Dependabot metadata
|
- name: Dependabot metadata
|
||||||
id: metadata
|
id: metadata
|
||||||
uses: dependabot/fetch-metadata@v2.3.0
|
uses: dependabot/fetch-metadata@v2.2.0
|
||||||
with:
|
with:
|
||||||
github-token: "${{ secrets.GITHUB_TOKEN }}"
|
github-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||||
skip-commit-verification: true
|
skip-commit-verification: true
|
||||||
|
|||||||
4
.github/workflows/deploy-explorer.yaml
vendored
4
.github/workflows/deploy-explorer.yaml
vendored
@@ -33,7 +33,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
CGO_ENABLED=0 make build-api
|
CGO_ENABLED=0 make build-api
|
||||||
- name: rm
|
- name: rm
|
||||||
uses: appleboy/ssh-action@v1.2.2
|
uses: appleboy/ssh-action@v1.0.3
|
||||||
with:
|
with:
|
||||||
host: ${{ secrets.EXPLORER_SSH_HOST }}
|
host: ${{ secrets.EXPLORER_SSH_HOST }}
|
||||||
username: ${{ secrets.EXPLORER_SSH_USERNAME }}
|
username: ${{ secrets.EXPLORER_SSH_USERNAME }}
|
||||||
@@ -53,7 +53,7 @@ jobs:
|
|||||||
rm: true
|
rm: true
|
||||||
target: ./local-ai
|
target: ./local-ai
|
||||||
- name: restarting
|
- name: restarting
|
||||||
uses: appleboy/ssh-action@v1.2.2
|
uses: appleboy/ssh-action@v1.0.3
|
||||||
with:
|
with:
|
||||||
host: ${{ secrets.EXPLORER_SSH_HOST }}
|
host: ${{ secrets.EXPLORER_SSH_HOST }}
|
||||||
username: ${{ secrets.EXPLORER_SSH_USERNAME }}
|
username: ${{ secrets.EXPLORER_SSH_USERNAME }}
|
||||||
|
|||||||
9
.github/workflows/generate_grpc_cache.yaml
vendored
9
.github/workflows/generate_grpc_cache.yaml
vendored
@@ -2,10 +2,9 @@ name: 'generate and publish GRPC docker caches'
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
push:
|
||||||
schedule:
|
branches:
|
||||||
# daily at midnight
|
- master
|
||||||
- cron: '0 0 * * *'
|
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: grpc-cache-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
group: grpc-cache-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||||
@@ -17,7 +16,7 @@ jobs:
|
|||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- grpc-base-image: ubuntu:22.04
|
- grpc-base-image: ubuntu:22.04
|
||||||
runs-on: 'arc-runner-set'
|
runs-on: 'ubuntu-latest'
|
||||||
platforms: 'linux/amd64,linux/arm64'
|
platforms: 'linux/amd64,linux/arm64'
|
||||||
runs-on: ${{matrix.runs-on}}
|
runs-on: ${{matrix.runs-on}}
|
||||||
steps:
|
steps:
|
||||||
|
|||||||
2
.github/workflows/generate_intel_image.yaml
vendored
2
.github/workflows/generate_intel_image.yaml
vendored
@@ -15,7 +15,7 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- base-image: intel/oneapi-basekit:2025.1.0-0-devel-ubuntu22.04
|
- base-image: intel/oneapi-basekit:2024.2.0-devel-ubuntu22.04
|
||||||
runs-on: 'ubuntu-latest'
|
runs-on: 'ubuntu-latest'
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
runs-on: ${{matrix.runs-on}}
|
runs-on: ${{matrix.runs-on}}
|
||||||
|
|||||||
171
.github/workflows/image.yml
vendored
171
.github/workflows/image.yml
vendored
@@ -13,79 +13,6 @@ concurrency:
|
|||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
hipblas-jobs:
|
|
||||||
uses: ./.github/workflows/image_build.yml
|
|
||||||
with:
|
|
||||||
tag-latest: ${{ matrix.tag-latest }}
|
|
||||||
tag-suffix: ${{ matrix.tag-suffix }}
|
|
||||||
ffmpeg: ${{ matrix.ffmpeg }}
|
|
||||||
image-type: ${{ matrix.image-type }}
|
|
||||||
build-type: ${{ matrix.build-type }}
|
|
||||||
cuda-major-version: ${{ matrix.cuda-major-version }}
|
|
||||||
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
|
||||||
platforms: ${{ matrix.platforms }}
|
|
||||||
runs-on: ${{ matrix.runs-on }}
|
|
||||||
base-image: ${{ matrix.base-image }}
|
|
||||||
grpc-base-image: ${{ matrix.grpc-base-image }}
|
|
||||||
aio: ${{ matrix.aio }}
|
|
||||||
makeflags: ${{ matrix.makeflags }}
|
|
||||||
latest-image: ${{ matrix.latest-image }}
|
|
||||||
latest-image-aio: ${{ matrix.latest-image-aio }}
|
|
||||||
secrets:
|
|
||||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
||||||
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
|
||||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
|
||||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
|
||||||
strategy:
|
|
||||||
# Pushing with all jobs in parallel
|
|
||||||
# eats the bandwidth of all the nodes
|
|
||||||
max-parallel: 2
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- build-type: 'hipblas'
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'auto'
|
|
||||||
tag-suffix: '-hipblas-ffmpeg'
|
|
||||||
ffmpeg: 'true'
|
|
||||||
image-type: 'extras'
|
|
||||||
aio: "-aio-gpu-hipblas"
|
|
||||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
|
||||||
grpc-base-image: "ubuntu:22.04"
|
|
||||||
latest-image: 'latest-gpu-hipblas'
|
|
||||||
latest-image-aio: 'latest-aio-gpu-hipblas'
|
|
||||||
runs-on: 'arc-runner-set'
|
|
||||||
makeflags: "--jobs=3 --output-sync=target"
|
|
||||||
- build-type: 'hipblas'
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'false'
|
|
||||||
tag-suffix: '-hipblas'
|
|
||||||
ffmpeg: 'false'
|
|
||||||
image-type: 'extras'
|
|
||||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
|
||||||
grpc-base-image: "ubuntu:22.04"
|
|
||||||
runs-on: 'arc-runner-set'
|
|
||||||
makeflags: "--jobs=3 --output-sync=target"
|
|
||||||
- build-type: 'hipblas'
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'false'
|
|
||||||
tag-suffix: '-hipblas-ffmpeg-core'
|
|
||||||
ffmpeg: 'true'
|
|
||||||
image-type: 'core'
|
|
||||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
|
||||||
grpc-base-image: "ubuntu:22.04"
|
|
||||||
runs-on: 'arc-runner-set'
|
|
||||||
makeflags: "--jobs=3 --output-sync=target"
|
|
||||||
latest-image: 'latest-gpu-hipblas-core'
|
|
||||||
- build-type: 'hipblas'
|
|
||||||
platforms: 'linux/amd64'
|
|
||||||
tag-latest: 'false'
|
|
||||||
tag-suffix: '-hipblas-core'
|
|
||||||
ffmpeg: 'false'
|
|
||||||
image-type: 'core'
|
|
||||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
|
||||||
grpc-base-image: "ubuntu:22.04"
|
|
||||||
runs-on: 'arc-runner-set'
|
|
||||||
makeflags: "--jobs=3 --output-sync=target"
|
|
||||||
self-hosted-jobs:
|
self-hosted-jobs:
|
||||||
uses: ./.github/workflows/image_build.yml
|
uses: ./.github/workflows/image_build.yml
|
||||||
with:
|
with:
|
||||||
@@ -112,7 +39,7 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
# Pushing with all jobs in parallel
|
# Pushing with all jobs in parallel
|
||||||
# eats the bandwidth of all the nodes
|
# eats the bandwidth of all the nodes
|
||||||
max-parallel: ${{ github.event_name != 'pull_request' && 5 || 8 }}
|
max-parallel: ${{ github.event_name != 'pull_request' && 6 || 10 }}
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
# Extra images
|
# Extra images
|
||||||
@@ -195,6 +122,29 @@ jobs:
|
|||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
runs-on: 'arc-runner-set'
|
runs-on: 'arc-runner-set'
|
||||||
makeflags: "--jobs=3 --output-sync=target"
|
makeflags: "--jobs=3 --output-sync=target"
|
||||||
|
- build-type: 'hipblas'
|
||||||
|
platforms: 'linux/amd64'
|
||||||
|
tag-latest: 'auto'
|
||||||
|
tag-suffix: '-hipblas-ffmpeg'
|
||||||
|
ffmpeg: 'true'
|
||||||
|
image-type: 'extras'
|
||||||
|
aio: "-aio-gpu-hipblas"
|
||||||
|
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
||||||
|
grpc-base-image: "ubuntu:22.04"
|
||||||
|
latest-image: 'latest-gpu-hipblas'
|
||||||
|
latest-image-aio: 'latest-aio-gpu-hipblas'
|
||||||
|
runs-on: 'arc-runner-set'
|
||||||
|
makeflags: "--jobs=3 --output-sync=target"
|
||||||
|
- build-type: 'hipblas'
|
||||||
|
platforms: 'linux/amd64'
|
||||||
|
tag-latest: 'false'
|
||||||
|
tag-suffix: '-hipblas'
|
||||||
|
ffmpeg: 'false'
|
||||||
|
image-type: 'extras'
|
||||||
|
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
||||||
|
grpc-base-image: "ubuntu:22.04"
|
||||||
|
runs-on: 'arc-runner-set'
|
||||||
|
makeflags: "--jobs=3 --output-sync=target"
|
||||||
- build-type: 'sycl_f16'
|
- build-type: 'sycl_f16'
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'auto'
|
tag-latest: 'auto'
|
||||||
@@ -252,7 +202,6 @@ jobs:
|
|||||||
image-type: 'core'
|
image-type: 'core'
|
||||||
runs-on: 'arc-runner-set'
|
runs-on: 'arc-runner-set'
|
||||||
makeflags: "--jobs=3 --output-sync=target"
|
makeflags: "--jobs=3 --output-sync=target"
|
||||||
latest-image: 'latest-gpu-intel-f16-core'
|
|
||||||
- build-type: 'sycl_f32'
|
- build-type: 'sycl_f32'
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'false'
|
tag-latest: 'false'
|
||||||
@@ -263,7 +212,26 @@ jobs:
|
|||||||
image-type: 'core'
|
image-type: 'core'
|
||||||
runs-on: 'arc-runner-set'
|
runs-on: 'arc-runner-set'
|
||||||
makeflags: "--jobs=3 --output-sync=target"
|
makeflags: "--jobs=3 --output-sync=target"
|
||||||
latest-image: 'latest-gpu-intel-f32-core'
|
- build-type: 'hipblas'
|
||||||
|
platforms: 'linux/amd64'
|
||||||
|
tag-latest: 'false'
|
||||||
|
tag-suffix: '-hipblas-ffmpeg-core'
|
||||||
|
ffmpeg: 'true'
|
||||||
|
image-type: 'core'
|
||||||
|
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
||||||
|
grpc-base-image: "ubuntu:22.04"
|
||||||
|
runs-on: 'arc-runner-set'
|
||||||
|
makeflags: "--jobs=3 --output-sync=target"
|
||||||
|
- build-type: 'hipblas'
|
||||||
|
platforms: 'linux/amd64'
|
||||||
|
tag-latest: 'false'
|
||||||
|
tag-suffix: '-hipblas-core'
|
||||||
|
ffmpeg: 'false'
|
||||||
|
image-type: 'core'
|
||||||
|
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
||||||
|
grpc-base-image: "ubuntu:22.04"
|
||||||
|
runs-on: 'arc-runner-set'
|
||||||
|
makeflags: "--jobs=3 --output-sync=target"
|
||||||
|
|
||||||
core-image-build:
|
core-image-build:
|
||||||
uses: ./.github/workflows/image_build.yml
|
uses: ./.github/workflows/image_build.yml
|
||||||
@@ -283,7 +251,6 @@ jobs:
|
|||||||
makeflags: ${{ matrix.makeflags }}
|
makeflags: ${{ matrix.makeflags }}
|
||||||
latest-image: ${{ matrix.latest-image }}
|
latest-image: ${{ matrix.latest-image }}
|
||||||
latest-image-aio: ${{ matrix.latest-image-aio }}
|
latest-image-aio: ${{ matrix.latest-image-aio }}
|
||||||
skip-drivers: ${{ matrix.skip-drivers }}
|
|
||||||
secrets:
|
secrets:
|
||||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||||
@@ -305,7 +272,6 @@ jobs:
|
|||||||
latest-image: 'latest-cpu'
|
latest-image: 'latest-cpu'
|
||||||
latest-image-aio: 'latest-aio-cpu'
|
latest-image-aio: 'latest-aio-cpu'
|
||||||
makeflags: "--jobs=4 --output-sync=target"
|
makeflags: "--jobs=4 --output-sync=target"
|
||||||
skip-drivers: 'false'
|
|
||||||
- build-type: 'cublas'
|
- build-type: 'cublas'
|
||||||
cuda-major-version: "11"
|
cuda-major-version: "11"
|
||||||
cuda-minor-version: "7"
|
cuda-minor-version: "7"
|
||||||
@@ -317,7 +283,6 @@ jobs:
|
|||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
runs-on: 'arc-runner-set'
|
runs-on: 'arc-runner-set'
|
||||||
makeflags: "--jobs=4 --output-sync=target"
|
makeflags: "--jobs=4 --output-sync=target"
|
||||||
skip-drivers: 'false'
|
|
||||||
- build-type: 'cublas'
|
- build-type: 'cublas'
|
||||||
cuda-major-version: "12"
|
cuda-major-version: "12"
|
||||||
cuda-minor-version: "0"
|
cuda-minor-version: "0"
|
||||||
@@ -329,7 +294,6 @@ jobs:
|
|||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
runs-on: 'arc-runner-set'
|
runs-on: 'arc-runner-set'
|
||||||
makeflags: "--jobs=4 --output-sync=target"
|
makeflags: "--jobs=4 --output-sync=target"
|
||||||
skip-drivers: 'false'
|
|
||||||
- build-type: 'cublas'
|
- build-type: 'cublas'
|
||||||
cuda-major-version: "11"
|
cuda-major-version: "11"
|
||||||
cuda-minor-version: "7"
|
cuda-minor-version: "7"
|
||||||
@@ -341,8 +305,6 @@ jobs:
|
|||||||
runs-on: 'arc-runner-set'
|
runs-on: 'arc-runner-set'
|
||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
makeflags: "--jobs=4 --output-sync=target"
|
makeflags: "--jobs=4 --output-sync=target"
|
||||||
skip-drivers: 'false'
|
|
||||||
latest-image: 'latest-gpu-nvidia-cuda-12-core'
|
|
||||||
- build-type: 'cublas'
|
- build-type: 'cublas'
|
||||||
cuda-major-version: "12"
|
cuda-major-version: "12"
|
||||||
cuda-minor-version: "0"
|
cuda-minor-version: "0"
|
||||||
@@ -353,57 +315,14 @@ jobs:
|
|||||||
image-type: 'core'
|
image-type: 'core'
|
||||||
runs-on: 'arc-runner-set'
|
runs-on: 'arc-runner-set'
|
||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
skip-drivers: 'false'
|
|
||||||
makeflags: "--jobs=4 --output-sync=target"
|
makeflags: "--jobs=4 --output-sync=target"
|
||||||
latest-image: 'latest-gpu-nvidia-cuda-12-core'
|
|
||||||
- build-type: 'vulkan'
|
- build-type: 'vulkan'
|
||||||
platforms: 'linux/amd64'
|
platforms: 'linux/amd64'
|
||||||
tag-latest: 'false'
|
tag-latest: 'false'
|
||||||
tag-suffix: '-vulkan-ffmpeg-core'
|
tag-suffix: '-vulkan-ffmpeg-core'
|
||||||
|
latest-image: 'latest-vulkan-ffmpeg-core'
|
||||||
ffmpeg: 'true'
|
ffmpeg: 'true'
|
||||||
image-type: 'core'
|
image-type: 'core'
|
||||||
runs-on: 'arc-runner-set'
|
runs-on: 'arc-runner-set'
|
||||||
base-image: "ubuntu:22.04"
|
base-image: "ubuntu:22.04"
|
||||||
skip-drivers: 'false'
|
|
||||||
makeflags: "--jobs=4 --output-sync=target"
|
makeflags: "--jobs=4 --output-sync=target"
|
||||||
latest-image: 'latest-gpu-vulkan-core'
|
|
||||||
gh-runner:
|
|
||||||
uses: ./.github/workflows/image_build.yml
|
|
||||||
with:
|
|
||||||
tag-latest: ${{ matrix.tag-latest }}
|
|
||||||
tag-suffix: ${{ matrix.tag-suffix }}
|
|
||||||
ffmpeg: ${{ matrix.ffmpeg }}
|
|
||||||
image-type: ${{ matrix.image-type }}
|
|
||||||
build-type: ${{ matrix.build-type }}
|
|
||||||
cuda-major-version: ${{ matrix.cuda-major-version }}
|
|
||||||
cuda-minor-version: ${{ matrix.cuda-minor-version }}
|
|
||||||
platforms: ${{ matrix.platforms }}
|
|
||||||
runs-on: ${{ matrix.runs-on }}
|
|
||||||
aio: ${{ matrix.aio }}
|
|
||||||
base-image: ${{ matrix.base-image }}
|
|
||||||
grpc-base-image: ${{ matrix.grpc-base-image }}
|
|
||||||
makeflags: ${{ matrix.makeflags }}
|
|
||||||
latest-image: ${{ matrix.latest-image }}
|
|
||||||
latest-image-aio: ${{ matrix.latest-image-aio }}
|
|
||||||
skip-drivers: ${{ matrix.skip-drivers }}
|
|
||||||
secrets:
|
|
||||||
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
||||||
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
|
|
||||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
|
||||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- build-type: 'cublas'
|
|
||||||
cuda-major-version: "12"
|
|
||||||
cuda-minor-version: "0"
|
|
||||||
platforms: 'linux/arm64'
|
|
||||||
tag-latest: 'false'
|
|
||||||
tag-suffix: '-nvidia-l4t-arm64-core'
|
|
||||||
latest-image: 'latest-nvidia-l4t-arm64-core'
|
|
||||||
ffmpeg: 'true'
|
|
||||||
image-type: 'core'
|
|
||||||
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
|
||||||
runs-on: 'ubuntu-24.04-arm'
|
|
||||||
makeflags: "--jobs=4 --output-sync=target"
|
|
||||||
skip-drivers: 'true'
|
|
||||||
11
.github/workflows/image_build.yml
vendored
11
.github/workflows/image_build.yml
vendored
@@ -49,10 +49,6 @@ on:
|
|||||||
description: 'FFMPEG'
|
description: 'FFMPEG'
|
||||||
default: ''
|
default: ''
|
||||||
type: string
|
type: string
|
||||||
skip-drivers:
|
|
||||||
description: 'Skip drivers by default'
|
|
||||||
default: 'false'
|
|
||||||
type: string
|
|
||||||
image-type:
|
image-type:
|
||||||
description: 'Image type'
|
description: 'Image type'
|
||||||
default: ''
|
default: ''
|
||||||
@@ -238,7 +234,6 @@ jobs:
|
|||||||
GRPC_MAKEFLAGS=--jobs=4 --output-sync=target
|
GRPC_MAKEFLAGS=--jobs=4 --output-sync=target
|
||||||
GRPC_VERSION=v1.65.0
|
GRPC_VERSION=v1.65.0
|
||||||
MAKEFLAGS=${{ inputs.makeflags }}
|
MAKEFLAGS=${{ inputs.makeflags }}
|
||||||
SKIP_DRIVERS=${{ inputs.skip-drivers }}
|
|
||||||
context: .
|
context: .
|
||||||
file: ./Dockerfile
|
file: ./Dockerfile
|
||||||
cache-from: type=gha
|
cache-from: type=gha
|
||||||
@@ -267,7 +262,6 @@ jobs:
|
|||||||
GRPC_MAKEFLAGS=--jobs=4 --output-sync=target
|
GRPC_MAKEFLAGS=--jobs=4 --output-sync=target
|
||||||
GRPC_VERSION=v1.65.0
|
GRPC_VERSION=v1.65.0
|
||||||
MAKEFLAGS=${{ inputs.makeflags }}
|
MAKEFLAGS=${{ inputs.makeflags }}
|
||||||
SKIP_DRIVERS=${{ inputs.skip-drivers }}
|
|
||||||
context: .
|
context: .
|
||||||
file: ./Dockerfile
|
file: ./Dockerfile
|
||||||
cache-from: type=gha
|
cache-from: type=gha
|
||||||
@@ -310,11 +304,6 @@ jobs:
|
|||||||
tags: ${{ steps.meta_aio_dockerhub.outputs.tags }}
|
tags: ${{ steps.meta_aio_dockerhub.outputs.tags }}
|
||||||
labels: ${{ steps.meta_aio_dockerhub.outputs.labels }}
|
labels: ${{ steps.meta_aio_dockerhub.outputs.labels }}
|
||||||
|
|
||||||
- name: Cleanup
|
|
||||||
run: |
|
|
||||||
docker builder prune -f
|
|
||||||
docker system prune --force --volumes --all
|
|
||||||
|
|
||||||
- name: Latest tag
|
- name: Latest tag
|
||||||
# run this on branches, when it is a tag and there is a latest-image defined
|
# run this on branches, when it is a tag and there is a latest-image defined
|
||||||
if: github.event_name != 'pull_request' && inputs.latest-image != '' && github.ref_type == 'tag'
|
if: github.event_name != 'pull_request' && inputs.latest-image != '' && github.ref_type == 'tag'
|
||||||
|
|||||||
14
.github/workflows/notify-models.yaml
vendored
14
.github/workflows/notify-models.yaml
vendored
@@ -8,7 +8,7 @@ jobs:
|
|||||||
notify-discord:
|
notify-discord:
|
||||||
if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'area/ai-model')) }}
|
if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'area/ai-model')) }}
|
||||||
env:
|
env:
|
||||||
MODEL_NAME: gemma-3-12b-it
|
MODEL_NAME: hermes-2-theta-llama-3-8b
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -16,9 +16,9 @@ jobs:
|
|||||||
fetch-depth: 0 # needed to checkout all branches for this Action to work
|
fetch-depth: 0 # needed to checkout all branches for this Action to work
|
||||||
- uses: mudler/localai-github-action@v1
|
- uses: mudler/localai-github-action@v1
|
||||||
with:
|
with:
|
||||||
model: 'gemma-3-12b-it' # Any from models.localai.io, or from huggingface.com with: "huggingface://<repository>/file"
|
model: 'hermes-2-theta-llama-3-8b' # Any from models.localai.io, or from huggingface.com with: "huggingface://<repository>/file"
|
||||||
# Check the PR diff using the current branch and the base branch of the PR
|
# Check the PR diff using the current branch and the base branch of the PR
|
||||||
- uses: GrantBirki/git-diff-action@v2.8.0
|
- uses: GrantBirki/git-diff-action@v2.7.0
|
||||||
id: git-diff-action
|
id: git-diff-action
|
||||||
with:
|
with:
|
||||||
json_diff_file_output: diff.json
|
json_diff_file_output: diff.json
|
||||||
@@ -79,7 +79,7 @@ jobs:
|
|||||||
args: ${{ steps.summarize.outputs.message }}
|
args: ${{ steps.summarize.outputs.message }}
|
||||||
- name: Setup tmate session if fails
|
- name: Setup tmate session if fails
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
uses: mxschmitt/action-tmate@v3.19
|
uses: mxschmitt/action-tmate@v3.18
|
||||||
with:
|
with:
|
||||||
detached: true
|
detached: true
|
||||||
connect-timeout-seconds: 180
|
connect-timeout-seconds: 180
|
||||||
@@ -87,7 +87,7 @@ jobs:
|
|||||||
notify-twitter:
|
notify-twitter:
|
||||||
if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'area/ai-model')) }}
|
if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'area/ai-model')) }}
|
||||||
env:
|
env:
|
||||||
MODEL_NAME: gemma-3-12b-it
|
MODEL_NAME: hermes-2-theta-llama-3-8b
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
@@ -99,7 +99,7 @@ jobs:
|
|||||||
docker run -e -ti -d --name local-ai -p 8080:8080 localai/localai:master-ffmpeg-core run --debug $MODEL_NAME
|
docker run -e -ti -d --name local-ai -p 8080:8080 localai/localai:master-ffmpeg-core run --debug $MODEL_NAME
|
||||||
until [ "`docker inspect -f {{.State.Health.Status}} local-ai`" == "healthy" ]; do echo "Waiting for container to be ready"; docker logs --tail 10 local-ai; sleep 2; done
|
until [ "`docker inspect -f {{.State.Health.Status}} local-ai`" == "healthy" ]; do echo "Waiting for container to be ready"; docker logs --tail 10 local-ai; sleep 2; done
|
||||||
# Check the PR diff using the current branch and the base branch of the PR
|
# Check the PR diff using the current branch and the base branch of the PR
|
||||||
- uses: GrantBirki/git-diff-action@v2.8.0
|
- uses: GrantBirki/git-diff-action@v2.7.0
|
||||||
id: git-diff-action
|
id: git-diff-action
|
||||||
with:
|
with:
|
||||||
json_diff_file_output: diff.json
|
json_diff_file_output: diff.json
|
||||||
@@ -161,7 +161,7 @@ jobs:
|
|||||||
TWITTER_ACCESS_TOKEN_SECRET: ${{ secrets.TWITTER_ACCESS_TOKEN_SECRET }}
|
TWITTER_ACCESS_TOKEN_SECRET: ${{ secrets.TWITTER_ACCESS_TOKEN_SECRET }}
|
||||||
- name: Setup tmate session if fails
|
- name: Setup tmate session if fails
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
uses: mxschmitt/action-tmate@v3.19
|
uses: mxschmitt/action-tmate@v3.18
|
||||||
with:
|
with:
|
||||||
detached: true
|
detached: true
|
||||||
connect-timeout-seconds: 180
|
connect-timeout-seconds: 180
|
||||||
|
|||||||
4
.github/workflows/notify-releases.yaml
vendored
4
.github/workflows/notify-releases.yaml
vendored
@@ -14,7 +14,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- uses: mudler/localai-github-action@v1
|
- uses: mudler/localai-github-action@v1
|
||||||
with:
|
with:
|
||||||
model: 'gemma-3-12b-it' # Any from models.localai.io, or from huggingface.com with: "huggingface://<repository>/file"
|
model: 'hermes-2-theta-llama-3-8b' # Any from models.localai.io, or from huggingface.com with: "huggingface://<repository>/file"
|
||||||
- name: Summarize
|
- name: Summarize
|
||||||
id: summarize
|
id: summarize
|
||||||
run: |
|
run: |
|
||||||
@@ -60,4 +60,4 @@ jobs:
|
|||||||
DISCORD_AVATAR: "https://avatars.githubusercontent.com/u/139863280?v=4"
|
DISCORD_AVATAR: "https://avatars.githubusercontent.com/u/139863280?v=4"
|
||||||
uses: Ilshidur/action-discord@master
|
uses: Ilshidur/action-discord@master
|
||||||
with:
|
with:
|
||||||
args: ${{ steps.summarize.outputs.message }}
|
args: ${{ steps.summarize.outputs.message }}
|
||||||
43
.github/workflows/release.yaml
vendored
43
.github/workflows/release.yaml
vendored
@@ -123,7 +123,7 @@ jobs:
|
|||||||
release/*
|
release/*
|
||||||
- name: Setup tmate session if tests fail
|
- name: Setup tmate session if tests fail
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
uses: mxschmitt/action-tmate@v3.19
|
uses: mxschmitt/action-tmate@v3.18
|
||||||
with:
|
with:
|
||||||
detached: true
|
detached: true
|
||||||
connect-timeout-seconds: 180
|
connect-timeout-seconds: 180
|
||||||
@@ -232,12 +232,45 @@ jobs:
|
|||||||
release/*
|
release/*
|
||||||
- name: Setup tmate session if tests fail
|
- name: Setup tmate session if tests fail
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
uses: mxschmitt/action-tmate@v3.19
|
uses: mxschmitt/action-tmate@v3.18
|
||||||
with:
|
with:
|
||||||
detached: true
|
detached: true
|
||||||
connect-timeout-seconds: 180
|
connect-timeout-seconds: 180
|
||||||
limit-access-to-actor: true
|
limit-access-to-actor: true
|
||||||
|
build-stablediffusion:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: '1.21.x'
|
||||||
|
cache: false
|
||||||
|
- name: Dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y --no-install-recommends libopencv-dev protobuf-compiler ccache upx-ucl
|
||||||
|
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
|
||||||
|
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2
|
||||||
|
- name: Build stablediffusion
|
||||||
|
run: |
|
||||||
|
export PATH=$PATH:$GOPATH/bin
|
||||||
|
make backend-assets/grpc/stablediffusion
|
||||||
|
mkdir -p release && cp backend-assets/grpc/stablediffusion release
|
||||||
|
env:
|
||||||
|
GO_TAGS: stablediffusion
|
||||||
|
- uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: stablediffusion
|
||||||
|
path: release/
|
||||||
|
- name: Release
|
||||||
|
uses: softprops/action-gh-release@v2
|
||||||
|
if: startsWith(github.ref, 'refs/tags/')
|
||||||
|
with:
|
||||||
|
files: |
|
||||||
|
release/*
|
||||||
|
|
||||||
build-macOS-x86_64:
|
build-macOS-x86_64:
|
||||||
runs-on: macos-13
|
runs-on: macos-13
|
||||||
@@ -275,7 +308,7 @@ jobs:
|
|||||||
release/*
|
release/*
|
||||||
- name: Setup tmate session if tests fail
|
- name: Setup tmate session if tests fail
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
uses: mxschmitt/action-tmate@v3.19
|
uses: mxschmitt/action-tmate@v3.18
|
||||||
with:
|
with:
|
||||||
detached: true
|
detached: true
|
||||||
connect-timeout-seconds: 180
|
connect-timeout-seconds: 180
|
||||||
@@ -317,7 +350,7 @@ jobs:
|
|||||||
release/*
|
release/*
|
||||||
- name: Setup tmate session if tests fail
|
- name: Setup tmate session if tests fail
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
uses: mxschmitt/action-tmate@v3.19
|
uses: mxschmitt/action-tmate@v3.18
|
||||||
with:
|
with:
|
||||||
detached: true
|
detached: true
|
||||||
connect-timeout-seconds: 180
|
connect-timeout-seconds: 180
|
||||||
|
|||||||
2
.github/workflows/secscan.yaml
vendored
2
.github/workflows/secscan.yaml
vendored
@@ -18,7 +18,7 @@ jobs:
|
|||||||
if: ${{ github.actor != 'dependabot[bot]' }}
|
if: ${{ github.actor != 'dependabot[bot]' }}
|
||||||
- name: Run Gosec Security Scanner
|
- name: Run Gosec Security Scanner
|
||||||
if: ${{ github.actor != 'dependabot[bot]' }}
|
if: ${{ github.actor != 'dependabot[bot]' }}
|
||||||
uses: securego/gosec@v2.22.3
|
uses: securego/gosec@v2.21.0
|
||||||
with:
|
with:
|
||||||
# we let the report trigger content trigger a failure using the GitHub Security features.
|
# we let the report trigger content trigger a failure using the GitHub Security features.
|
||||||
args: '-no-fail -fmt sarif -out results.sarif ./...'
|
args: '-no-fail -fmt sarif -out results.sarif ./...'
|
||||||
|
|||||||
128
.github/workflows/test-extra.yml
vendored
128
.github/workflows/test-extra.yml
vendored
@@ -35,6 +35,30 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
make --jobs=5 --output-sync=target -C backend/python/transformers
|
make --jobs=5 --output-sync=target -C backend/python/transformers
|
||||||
make --jobs=5 --output-sync=target -C backend/python/transformers test
|
make --jobs=5 --output-sync=target -C backend/python/transformers test
|
||||||
|
|
||||||
|
tests-sentencetransformers:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
- name: Dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install build-essential ffmpeg
|
||||||
|
# Install UV
|
||||||
|
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
|
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||||
|
sudo apt-get install -y libopencv-dev
|
||||||
|
pip install --user --no-cache-dir grpcio-tools==1.64.1
|
||||||
|
|
||||||
|
- name: Test sentencetransformers
|
||||||
|
run: |
|
||||||
|
make --jobs=5 --output-sync=target -C backend/python/sentencetransformers
|
||||||
|
make --jobs=5 --output-sync=target -C backend/python/sentencetransformers test
|
||||||
|
|
||||||
|
|
||||||
tests-rerankers:
|
tests-rerankers:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
@@ -78,27 +102,71 @@ jobs:
|
|||||||
make --jobs=5 --output-sync=target -C backend/python/diffusers
|
make --jobs=5 --output-sync=target -C backend/python/diffusers
|
||||||
make --jobs=5 --output-sync=target -C backend/python/diffusers test
|
make --jobs=5 --output-sync=target -C backend/python/diffusers test
|
||||||
|
|
||||||
# tests-transformers-musicgen:
|
tests-parler-tts:
|
||||||
# runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
# steps:
|
steps:
|
||||||
# - name: Clone
|
- name: Clone
|
||||||
# uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
# with:
|
with:
|
||||||
# submodules: true
|
submodules: true
|
||||||
# - name: Dependencies
|
- name: Dependencies
|
||||||
# run: |
|
run: |
|
||||||
# sudo apt-get update
|
sudo apt-get update
|
||||||
# sudo apt-get install build-essential ffmpeg
|
sudo apt-get install build-essential ffmpeg
|
||||||
# # Install UV
|
# Install UV
|
||||||
# curl -LsSf https://astral.sh/uv/install.sh | sh
|
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
# sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||||
# sudo apt-get install -y libopencv-dev
|
sudo apt-get install -y libopencv-dev
|
||||||
# pip install --user --no-cache-dir grpcio-tools==1.64.1
|
pip install --user --no-cache-dir grpcio-tools==1.64.1
|
||||||
|
|
||||||
# - name: Test transformers-musicgen
|
- name: Test parler-tts
|
||||||
# run: |
|
run: |
|
||||||
# make --jobs=5 --output-sync=target -C backend/python/transformers-musicgen
|
make --jobs=5 --output-sync=target -C backend/python/parler-tts
|
||||||
# make --jobs=5 --output-sync=target -C backend/python/transformers-musicgen test
|
make --jobs=5 --output-sync=target -C backend/python/parler-tts test
|
||||||
|
|
||||||
|
tests-openvoice:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
- name: Dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install build-essential ffmpeg
|
||||||
|
# Install UV
|
||||||
|
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
|
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||||
|
sudo apt-get install -y libopencv-dev
|
||||||
|
pip install --user --no-cache-dir grpcio-tools==1.64.1
|
||||||
|
|
||||||
|
- name: Test openvoice
|
||||||
|
run: |
|
||||||
|
make --jobs=5 --output-sync=target -C backend/python/openvoice
|
||||||
|
make --jobs=5 --output-sync=target -C backend/python/openvoice test
|
||||||
|
|
||||||
|
tests-transformers-musicgen:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
- name: Dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install build-essential ffmpeg
|
||||||
|
# Install UV
|
||||||
|
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
|
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||||
|
sudo apt-get install -y libopencv-dev
|
||||||
|
pip install --user --no-cache-dir grpcio-tools==1.64.1
|
||||||
|
|
||||||
|
- name: Test transformers-musicgen
|
||||||
|
run: |
|
||||||
|
make --jobs=5 --output-sync=target -C backend/python/transformers-musicgen
|
||||||
|
make --jobs=5 --output-sync=target -C backend/python/transformers-musicgen test
|
||||||
|
|
||||||
# tests-bark:
|
# tests-bark:
|
||||||
# runs-on: ubuntu-latest
|
# runs-on: ubuntu-latest
|
||||||
@@ -185,6 +253,26 @@ jobs:
|
|||||||
# run: |
|
# run: |
|
||||||
# make --jobs=5 --output-sync=target -C backend/python/vllm
|
# make --jobs=5 --output-sync=target -C backend/python/vllm
|
||||||
# make --jobs=5 --output-sync=target -C backend/python/vllm test
|
# make --jobs=5 --output-sync=target -C backend/python/vllm test
|
||||||
|
tests-vallex:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Clone
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
submodules: true
|
||||||
|
- name: Dependencies
|
||||||
|
run: |
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install build-essential ffmpeg
|
||||||
|
# Install UV
|
||||||
|
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
|
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||||
|
sudo apt-get install -y libopencv-dev
|
||||||
|
pip install --user --no-cache-dir grpcio-tools==1.64.1
|
||||||
|
- name: Test vall-e-x
|
||||||
|
run: |
|
||||||
|
make --jobs=5 --output-sync=target -C backend/python/vall-e-x
|
||||||
|
make --jobs=5 --output-sync=target -C backend/python/vall-e-x test
|
||||||
|
|
||||||
tests-coqui:
|
tests-coqui:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|||||||
28
.github/workflows/test.yml
vendored
28
.github/workflows/test.yml
vendored
@@ -100,12 +100,15 @@ jobs:
|
|||||||
# The python3-grpc-tools package in 22.04 is too old
|
# The python3-grpc-tools package in 22.04 is too old
|
||||||
pip install --user grpcio-tools
|
pip install --user grpcio-tools
|
||||||
|
|
||||||
make -C backend/python/transformers
|
sudo rm -rfv /usr/bin/conda || true
|
||||||
|
PATH=$PATH:/opt/conda/bin make -C backend/python/sentencetransformers
|
||||||
|
|
||||||
# Pre-build piper before we start tests in order to have shared libraries in place
|
# Pre-build piper before we start tests in order to have shared libraries in place
|
||||||
make sources/go-piper && \
|
make sources/go-piper && \
|
||||||
GO_TAGS="tts" make -C sources/go-piper piper.o && \
|
GO_TAGS="tts" make -C sources/go-piper piper.o && \
|
||||||
sudo cp -rfv sources/go-piper/piper-phonemize/pi/lib/. /usr/lib/
|
sudo cp -rfv sources/go-piper/piper-phonemize/pi/lib/. /usr/lib/ && \
|
||||||
|
# Pre-build stable diffusion before we install a newer version of abseil (not compatible with stablediffusion-ncn)
|
||||||
|
PATH="$PATH:/root/go/bin" GO_TAGS="stablediffusion tts" GRPC_BACKENDS=backend-assets/grpc/stablediffusion make build
|
||||||
env:
|
env:
|
||||||
CUDA_VERSION: 12-4
|
CUDA_VERSION: 12-4
|
||||||
- name: Cache grpc
|
- name: Cache grpc
|
||||||
@@ -127,10 +130,10 @@ jobs:
|
|||||||
cd grpc && cd cmake/build && sudo make --jobs 5 install
|
cd grpc && cd cmake/build && sudo make --jobs 5 install
|
||||||
- name: Test
|
- name: Test
|
||||||
run: |
|
run: |
|
||||||
PATH="$PATH:/root/go/bin" GO_TAGS="tts" make --jobs 5 --output-sync=target test
|
PATH="$PATH:/root/go/bin" GO_TAGS="stablediffusion tts" make --jobs 5 --output-sync=target test
|
||||||
- name: Setup tmate session if tests fail
|
- name: Setup tmate session if tests fail
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
uses: mxschmitt/action-tmate@v3.19
|
uses: mxschmitt/action-tmate@v3.18
|
||||||
with:
|
with:
|
||||||
detached: true
|
detached: true
|
||||||
connect-timeout-seconds: 180
|
connect-timeout-seconds: 180
|
||||||
@@ -175,26 +178,17 @@ jobs:
|
|||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
submodules: true
|
submodules: true
|
||||||
- name: Dependencies
|
|
||||||
run: |
|
|
||||||
# Install protoc
|
|
||||||
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v26.1/protoc-26.1-linux-x86_64.zip -o protoc.zip && \
|
|
||||||
unzip -j -d /usr/local/bin protoc.zip bin/protoc && \
|
|
||||||
rm protoc.zip
|
|
||||||
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2
|
|
||||||
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
|
|
||||||
PATH="$PATH:$HOME/go/bin" make protogen-go
|
|
||||||
- name: Build images
|
- name: Build images
|
||||||
run: |
|
run: |
|
||||||
docker build --build-arg FFMPEG=true --build-arg IMAGE_TYPE=extras --build-arg EXTRA_BACKENDS=rerankers --build-arg MAKEFLAGS="--jobs=5 --output-sync=target" -t local-ai:tests -f Dockerfile .
|
docker build --build-arg FFMPEG=true --build-arg IMAGE_TYPE=extras --build-arg EXTRA_BACKENDS=rerankers --build-arg MAKEFLAGS="--jobs=5 --output-sync=target" -t local-ai:tests -f Dockerfile .
|
||||||
BASE_IMAGE=local-ai:tests DOCKER_AIO_IMAGE=local-ai-aio:test make docker-aio
|
BASE_IMAGE=local-ai:tests DOCKER_AIO_IMAGE=local-ai-aio:test make docker-aio
|
||||||
- name: Test
|
- name: Test
|
||||||
run: |
|
run: |
|
||||||
PATH="$PATH:$HOME/go/bin" LOCALAI_MODELS_DIR=$PWD/models LOCALAI_IMAGE_TAG=test LOCALAI_IMAGE=local-ai-aio \
|
LOCALAI_MODELS_DIR=$PWD/models LOCALAI_IMAGE_TAG=test LOCALAI_IMAGE=local-ai-aio \
|
||||||
make run-e2e-aio
|
make run-e2e-aio
|
||||||
- name: Setup tmate session if tests fail
|
- name: Setup tmate session if tests fail
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
uses: mxschmitt/action-tmate@v3.19
|
uses: mxschmitt/action-tmate@v3.18
|
||||||
with:
|
with:
|
||||||
detached: true
|
detached: true
|
||||||
connect-timeout-seconds: 180
|
connect-timeout-seconds: 180
|
||||||
@@ -221,7 +215,7 @@ jobs:
|
|||||||
- name: Dependencies
|
- name: Dependencies
|
||||||
run: |
|
run: |
|
||||||
brew install protobuf grpc make protoc-gen-go protoc-gen-go-grpc libomp llvm
|
brew install protobuf grpc make protoc-gen-go protoc-gen-go-grpc libomp llvm
|
||||||
pip install --user --no-cache-dir grpcio-tools
|
pip install --user --no-cache-dir grpcio-tools==1.64.1
|
||||||
- name: Test
|
- name: Test
|
||||||
run: |
|
run: |
|
||||||
export C_INCLUDE_PATH=/usr/local/include
|
export C_INCLUDE_PATH=/usr/local/include
|
||||||
@@ -232,7 +226,7 @@ jobs:
|
|||||||
BUILD_TYPE="GITHUB_CI_HAS_BROKEN_METAL" CMAKE_ARGS="-DGGML_F16C=OFF -DGGML_AVX512=OFF -DGGML_AVX2=OFF -DGGML_FMA=OFF" make --jobs 4 --output-sync=target test
|
BUILD_TYPE="GITHUB_CI_HAS_BROKEN_METAL" CMAKE_ARGS="-DGGML_F16C=OFF -DGGML_AVX512=OFF -DGGML_AVX2=OFF -DGGML_FMA=OFF" make --jobs 4 --output-sync=target test
|
||||||
- name: Setup tmate session if tests fail
|
- name: Setup tmate session if tests fail
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
uses: mxschmitt/action-tmate@v3.19
|
uses: mxschmitt/action-tmate@v3.18
|
||||||
with:
|
with:
|
||||||
detached: true
|
detached: true
|
||||||
connect-timeout-seconds: 180
|
connect-timeout-seconds: 180
|
||||||
|
|||||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -2,7 +2,6 @@
|
|||||||
/sources/
|
/sources/
|
||||||
__pycache__/
|
__pycache__/
|
||||||
*.a
|
*.a
|
||||||
*.o
|
|
||||||
get-sources
|
get-sources
|
||||||
prepare-sources
|
prepare-sources
|
||||||
/backend/cpp/llama/grpc-server
|
/backend/cpp/llama/grpc-server
|
||||||
@@ -13,6 +12,7 @@ prepare-sources
|
|||||||
|
|
||||||
go-ggml-transformers
|
go-ggml-transformers
|
||||||
go-gpt2
|
go-gpt2
|
||||||
|
go-rwkv
|
||||||
whisper.cpp
|
whisper.cpp
|
||||||
/bloomz
|
/bloomz
|
||||||
go-bert
|
go-bert
|
||||||
|
|||||||
2
.vscode/launch.json
vendored
2
.vscode/launch.json
vendored
@@ -26,7 +26,7 @@
|
|||||||
"LOCALAI_P2P": "true",
|
"LOCALAI_P2P": "true",
|
||||||
"LOCALAI_FEDERATED": "true"
|
"LOCALAI_FEDERATED": "true"
|
||||||
},
|
},
|
||||||
"buildFlags": ["-tags", "p2p tts", "-v"],
|
"buildFlags": ["-tags", "stablediffusion p2p tts", "-v"],
|
||||||
"envFile": "${workspaceFolder}/.env",
|
"envFile": "${workspaceFolder}/.env",
|
||||||
"cwd": "${workspaceRoot}"
|
"cwd": "${workspaceRoot}"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,6 +15,8 @@ Thank you for your interest in contributing to LocalAI! We appreciate your time
|
|||||||
- [Documentation](#documentation)
|
- [Documentation](#documentation)
|
||||||
- [Community and Communication](#community-and-communication)
|
- [Community and Communication](#community-and-communication)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Getting Started
|
## Getting Started
|
||||||
|
|
||||||
### Prerequisites
|
### Prerequisites
|
||||||
@@ -52,7 +54,7 @@ If you find a bug, have a feature request, or encounter any issues, please check
|
|||||||
|
|
||||||
## Coding Guidelines
|
## Coding Guidelines
|
||||||
|
|
||||||
- No specific coding guidelines at the moment. Please make sure the code can be tested. The most popular lint tools like [`golangci-lint`](https://golangci-lint.run) can help you here.
|
- No specific coding guidelines at the moment. Please make sure the code can be tested. The most popular lint tools like []`golangci-lint`](https://golangci-lint.run) can help you here.
|
||||||
|
|
||||||
## Testing
|
## Testing
|
||||||
|
|
||||||
@@ -82,3 +84,5 @@ We are welcome the contribution of the documents, please open new PR or create a
|
|||||||
- You can reach out via the Github issue tracker.
|
- You can reach out via the Github issue tracker.
|
||||||
- Open a new discussion at [Discussion](https://github.com/go-skynet/LocalAI/discussions)
|
- Open a new discussion at [Discussion](https://github.com/go-skynet/LocalAI/discussions)
|
||||||
- Join the Discord channel [Discord](https://discord.gg/uJAeKSAGDy)
|
- Join the Discord channel [Discord](https://discord.gg/uJAeKSAGDy)
|
||||||
|
|
||||||
|
---
|
||||||
|
|||||||
132
Dockerfile
132
Dockerfile
@@ -9,39 +9,25 @@ FROM ${BASE_IMAGE} AS requirements-core
|
|||||||
USER root
|
USER root
|
||||||
|
|
||||||
ARG GO_VERSION=1.22.6
|
ARG GO_VERSION=1.22.6
|
||||||
ARG CMAKE_VERSION=3.26.4
|
|
||||||
ARG CMAKE_FROM_SOURCE=false
|
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
ARG TARGETVARIANT
|
ARG TARGETVARIANT
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
ENV EXTERNAL_GRPC_BACKENDS="coqui:/build/backend/python/coqui/run.sh,transformers:/build/backend/python/transformers/run.sh,rerankers:/build/backend/python/rerankers/run.sh,bark:/build/backend/python/bark/run.sh,diffusers:/build/backend/python/diffusers/run.sh,faster-whisper:/build/backend/python/faster-whisper/run.sh,kokoro:/build/backend/python/kokoro/run.sh,vllm:/build/backend/python/vllm/run.sh,exllama2:/build/backend/python/exllama2/run.sh"
|
ENV EXTERNAL_GRPC_BACKENDS="coqui:/build/backend/python/coqui/run.sh,huggingface-embeddings:/build/backend/python/sentencetransformers/run.sh,transformers:/build/backend/python/transformers/run.sh,sentencetransformers:/build/backend/python/sentencetransformers/run.sh,rerankers:/build/backend/python/rerankers/run.sh,autogptq:/build/backend/python/autogptq/run.sh,bark:/build/backend/python/bark/run.sh,diffusers:/build/backend/python/diffusers/run.sh,openvoice:/build/backend/python/openvoice/run.sh,vall-e-x:/build/backend/python/vall-e-x/run.sh,vllm:/build/backend/python/vllm/run.sh,mamba:/build/backend/python/mamba/run.sh,exllama2:/build/backend/python/exllama2/run.sh,transformers-musicgen:/build/backend/python/transformers-musicgen/run.sh,parler-tts:/build/backend/python/parler-tts/run.sh"
|
||||||
|
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y --no-install-recommends \
|
apt-get install -y --no-install-recommends \
|
||||||
build-essential \
|
build-essential \
|
||||||
ccache \
|
ccache \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl libssl-dev \
|
cmake \
|
||||||
|
curl \
|
||||||
git \
|
git \
|
||||||
git-lfs \
|
|
||||||
unzip upx-ucl && \
|
unzip upx-ucl && \
|
||||||
apt-get clean && \
|
apt-get clean && \
|
||||||
rm -rf /var/lib/apt/lists/*
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Install CMake (the version in 22.04 is too old)
|
|
||||||
RUN <<EOT bash
|
|
||||||
if [ "${CMAKE_FROM_SOURCE}}" = "true" ]; then
|
|
||||||
curl -L -s https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz -o cmake.tar.gz && tar xvf cmake.tar.gz && cd cmake-${CMAKE_VERSION} && ./configure && make && make install
|
|
||||||
else
|
|
||||||
apt-get update && \
|
|
||||||
apt-get install -y \
|
|
||||||
cmake && \
|
|
||||||
apt-get clean && \
|
|
||||||
rm -rf /var/lib/apt/lists/*
|
|
||||||
fi
|
|
||||||
EOT
|
|
||||||
|
|
||||||
# Install Go
|
# Install Go
|
||||||
RUN curl -L -s https://go.dev/dl/go${GO_VERSION}.linux-${TARGETARCH}.tar.gz | tar -C /usr/local -xz
|
RUN curl -L -s https://go.dev/dl/go${GO_VERSION}.linux-${TARGETARCH}.tar.gz | tar -C /usr/local -xz
|
||||||
ENV PATH=$PATH:/root/go/bin:/usr/local/go/bin
|
ENV PATH=$PATH:/root/go/bin:/usr/local/go/bin
|
||||||
@@ -69,10 +55,14 @@ ENV PATH=/opt/rocm/bin:${PATH}
|
|||||||
# OpenBLAS requirements and stable diffusion
|
# OpenBLAS requirements and stable diffusion
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y --no-install-recommends \
|
apt-get install -y --no-install-recommends \
|
||||||
libopenblas-dev && \
|
libopenblas-dev \
|
||||||
|
libopencv-dev && \
|
||||||
apt-get clean && \
|
apt-get clean && \
|
||||||
rm -rf /var/lib/apt/lists/*
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Set up OpenCV
|
||||||
|
RUN ln -s /usr/include/opencv4/opencv2 /usr/include/opencv2
|
||||||
|
|
||||||
WORKDIR /build
|
WORKDIR /build
|
||||||
|
|
||||||
###################################
|
###################################
|
||||||
@@ -81,8 +71,7 @@ WORKDIR /build
|
|||||||
# The requirements-extras target is for any builds with IMAGE_TYPE=extras. It should not be placed in this target unless every IMAGE_TYPE=extras build will use it
|
# The requirements-extras target is for any builds with IMAGE_TYPE=extras. It should not be placed in this target unless every IMAGE_TYPE=extras build will use it
|
||||||
FROM requirements-core AS requirements-extras
|
FROM requirements-core AS requirements-extras
|
||||||
|
|
||||||
# Install uv as a system package
|
RUN curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
RUN curl -LsSf https://astral.sh/uv/install.sh | UV_INSTALL_DIR=/usr/bin sh
|
|
||||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||||
|
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||||
@@ -111,13 +100,12 @@ FROM requirements-${IMAGE_TYPE} AS requirements-drivers
|
|||||||
ARG BUILD_TYPE
|
ARG BUILD_TYPE
|
||||||
ARG CUDA_MAJOR_VERSION=12
|
ARG CUDA_MAJOR_VERSION=12
|
||||||
ARG CUDA_MINOR_VERSION=0
|
ARG CUDA_MINOR_VERSION=0
|
||||||
ARG SKIP_DRIVERS=false
|
|
||||||
|
|
||||||
ENV BUILD_TYPE=${BUILD_TYPE}
|
ENV BUILD_TYPE=${BUILD_TYPE}
|
||||||
|
|
||||||
# Vulkan requirements
|
# Vulkan requirements
|
||||||
RUN <<EOT bash
|
RUN <<EOT bash
|
||||||
if [ "${BUILD_TYPE}" = "vulkan" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
|
if [ "${BUILD_TYPE}" = "vulkan" ]; then
|
||||||
apt-get update && \
|
apt-get update && \
|
||||||
apt-get install -y --no-install-recommends \
|
apt-get install -y --no-install-recommends \
|
||||||
software-properties-common pciutils wget gpg-agent && \
|
software-properties-common pciutils wget gpg-agent && \
|
||||||
@@ -133,7 +121,7 @@ EOT
|
|||||||
|
|
||||||
# CuBLAS requirements
|
# CuBLAS requirements
|
||||||
RUN <<EOT bash
|
RUN <<EOT bash
|
||||||
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
|
if [ "${BUILD_TYPE}" = "cublas" ]; then
|
||||||
apt-get update && \
|
apt-get update && \
|
||||||
apt-get install -y --no-install-recommends \
|
apt-get install -y --no-install-recommends \
|
||||||
software-properties-common pciutils
|
software-properties-common pciutils
|
||||||
@@ -159,7 +147,7 @@ RUN <<EOT bash
|
|||||||
EOT
|
EOT
|
||||||
|
|
||||||
# If we are building with clblas support, we need the libraries for the builds
|
# If we are building with clblas support, we need the libraries for the builds
|
||||||
RUN if [ "${BUILD_TYPE}" = "clblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
|
RUN if [ "${BUILD_TYPE}" = "clblas" ]; then \
|
||||||
apt-get update && \
|
apt-get update && \
|
||||||
apt-get install -y --no-install-recommends \
|
apt-get install -y --no-install-recommends \
|
||||||
libclblast-dev && \
|
libclblast-dev && \
|
||||||
@@ -167,7 +155,7 @@ RUN if [ "${BUILD_TYPE}" = "clblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
|
|||||||
rm -rf /var/lib/apt/lists/* \
|
rm -rf /var/lib/apt/lists/* \
|
||||||
; fi
|
; fi
|
||||||
|
|
||||||
RUN if [ "${BUILD_TYPE}" = "hipblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
|
RUN if [ "${BUILD_TYPE}" = "hipblas" ]; then \
|
||||||
apt-get update && \
|
apt-get update && \
|
||||||
apt-get install -y --no-install-recommends \
|
apt-get install -y --no-install-recommends \
|
||||||
hipblas-dev \
|
hipblas-dev \
|
||||||
@@ -200,8 +188,6 @@ FROM ${GRPC_BASE_IMAGE} AS grpc
|
|||||||
# This is a bit of a hack, but it's required in order to be able to effectively cache this layer in CI
|
# This is a bit of a hack, but it's required in order to be able to effectively cache this layer in CI
|
||||||
ARG GRPC_MAKEFLAGS="-j4 -Otarget"
|
ARG GRPC_MAKEFLAGS="-j4 -Otarget"
|
||||||
ARG GRPC_VERSION=v1.65.0
|
ARG GRPC_VERSION=v1.65.0
|
||||||
ARG CMAKE_FROM_SOURCE=false
|
|
||||||
ARG CMAKE_VERSION=3.26.4
|
|
||||||
|
|
||||||
ENV MAKEFLAGS=${GRPC_MAKEFLAGS}
|
ENV MAKEFLAGS=${GRPC_MAKEFLAGS}
|
||||||
|
|
||||||
@@ -210,24 +196,12 @@ WORKDIR /build
|
|||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y --no-install-recommends \
|
apt-get install -y --no-install-recommends \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
build-essential curl libssl-dev \
|
build-essential \
|
||||||
|
cmake \
|
||||||
git && \
|
git && \
|
||||||
apt-get clean && \
|
apt-get clean && \
|
||||||
rm -rf /var/lib/apt/lists/*
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Install CMake (the version in 22.04 is too old)
|
|
||||||
RUN <<EOT bash
|
|
||||||
if [ "${CMAKE_FROM_SOURCE}}" = "true" ]; then
|
|
||||||
curl -L -s https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz -o cmake.tar.gz && tar xvf cmake.tar.gz && cd cmake-${CMAKE_VERSION} && ./configure && make && make install
|
|
||||||
else
|
|
||||||
apt-get update && \
|
|
||||||
apt-get install -y \
|
|
||||||
cmake && \
|
|
||||||
apt-get clean && \
|
|
||||||
rm -rf /var/lib/apt/lists/*
|
|
||||||
fi
|
|
||||||
EOT
|
|
||||||
|
|
||||||
# We install GRPC to a different prefix here so that we can copy in only the build artifacts later
|
# We install GRPC to a different prefix here so that we can copy in only the build artifacts later
|
||||||
# saves several hundred MB on the final docker image size vs copying in the entire GRPC source tree
|
# saves several hundred MB on the final docker image size vs copying in the entire GRPC source tree
|
||||||
# and running make install in the target container
|
# and running make install in the target container
|
||||||
@@ -247,7 +221,7 @@ RUN git clone --recurse-submodules --jobs 4 -b ${GRPC_VERSION} --depth 1 --shall
|
|||||||
|
|
||||||
FROM requirements-drivers AS builder-base
|
FROM requirements-drivers AS builder-base
|
||||||
|
|
||||||
ARG GO_TAGS="tts p2p"
|
ARG GO_TAGS="stablediffusion tts p2p"
|
||||||
ARG GRPC_BACKENDS
|
ARG GRPC_BACKENDS
|
||||||
ARG MAKEFLAGS
|
ARG MAKEFLAGS
|
||||||
ARG LD_FLAGS="-s -w"
|
ARG LD_FLAGS="-s -w"
|
||||||
@@ -281,12 +255,35 @@ RUN <<EOT bash
|
|||||||
fi
|
fi
|
||||||
EOT
|
EOT
|
||||||
|
|
||||||
|
|
||||||
|
###################################
|
||||||
|
###################################
|
||||||
|
|
||||||
|
# This first portion of builder holds the layers specifically used to build backend-assets/grpc/stablediffusion
|
||||||
|
# In most cases, builder is the image you should be using - however, this can save build time if one just needs to copy backend-assets/grpc/stablediffusion and nothing else.
|
||||||
|
FROM builder-base AS builder-sd
|
||||||
|
|
||||||
|
# stablediffusion does not tolerate a newer version of abseil, copy only over enough elements to build it
|
||||||
|
COPY Makefile .
|
||||||
|
COPY go.mod .
|
||||||
|
COPY go.sum .
|
||||||
|
COPY backend/backend.proto ./backend/backend.proto
|
||||||
|
COPY backend/go/image/stablediffusion ./backend/go/image/stablediffusion
|
||||||
|
COPY pkg/grpc ./pkg/grpc
|
||||||
|
COPY pkg/stablediffusion ./pkg/stablediffusion
|
||||||
|
RUN git init
|
||||||
|
RUN make sources/go-stable-diffusion
|
||||||
|
RUN touch prepare-sources
|
||||||
|
|
||||||
|
# Actually build the backend
|
||||||
|
RUN GRPC_BACKENDS=backend-assets/grpc/stablediffusion make backend-assets/grpc/stablediffusion
|
||||||
|
|
||||||
###################################
|
###################################
|
||||||
###################################
|
###################################
|
||||||
|
|
||||||
# The builder target compiles LocalAI. This target is not the target that will be uploaded to the registry.
|
# The builder target compiles LocalAI. This target is not the target that will be uploaded to the registry.
|
||||||
# Adjustments to the build process should likely be made here.
|
# Adjustments to the build process should likely be made here.
|
||||||
FROM builder-base AS builder
|
FROM builder-sd AS builder
|
||||||
|
|
||||||
# Install the pre-built GRPC
|
# Install the pre-built GRPC
|
||||||
COPY --from=grpc /opt/grpc /usr/local
|
COPY --from=grpc /opt/grpc /usr/local
|
||||||
@@ -300,11 +297,11 @@ COPY .git .
|
|||||||
RUN make prepare
|
RUN make prepare
|
||||||
|
|
||||||
## Build the binary
|
## Build the binary
|
||||||
## If it's CUDA or hipblas, we want to skip some of the llama-compat backends to save space
|
## If it's CUDA, we want to skip some of the llama-compat backends to save space
|
||||||
## We only leave the most CPU-optimized variant and the fallback for the cublas/hipblas build
|
## We only leave the most CPU-optimized variant and the fallback for the cublas build
|
||||||
## (both will use CUDA or hipblas for the actual computation)
|
## (both will use CUDA for the actual computation)
|
||||||
RUN if [ "${BUILD_TYPE}" = "cublas" ] || [ "${BUILD_TYPE}" = "hipblas" ]; then \
|
RUN if [ "${BUILD_TYPE}" = "cublas" ]; then \
|
||||||
SKIP_GRPC_BACKEND="backend-assets/grpc/llama-cpp-avx512 backend-assets/grpc/llama-cpp-avx backend-assets/grpc/llama-cpp-avx2" make build; \
|
SKIP_GRPC_BACKEND="backend-assets/grpc/llama-cpp-avx backend-assets/grpc/llama-cpp-avx2" make build; \
|
||||||
else \
|
else \
|
||||||
make build; \
|
make build; \
|
||||||
fi
|
fi
|
||||||
@@ -326,6 +323,8 @@ ARG FFMPEG
|
|||||||
|
|
||||||
COPY --from=grpc /opt/grpc /usr/local
|
COPY --from=grpc /opt/grpc /usr/local
|
||||||
|
|
||||||
|
COPY --from=builder-sd /build/backend-assets/grpc/stablediffusion /build/backend-assets/grpc/stablediffusion
|
||||||
|
|
||||||
COPY .devcontainer-scripts /.devcontainer-scripts
|
COPY .devcontainer-scripts /.devcontainer-scripts
|
||||||
|
|
||||||
# Add FFmpeg
|
# Add FFmpeg
|
||||||
@@ -339,8 +338,9 @@ RUN if [ "${FFMPEG}" = "true" ]; then \
|
|||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y --no-install-recommends \
|
apt-get install -y --no-install-recommends \
|
||||||
ssh less wget
|
ssh less && \
|
||||||
# For the devcontainer, leave apt functional in case additional devtools are needed at runtime.
|
apt-get clean && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
RUN go install github.com/go-delve/delve/cmd/dlv@latest
|
RUN go install github.com/go-delve/delve/cmd/dlv@latest
|
||||||
|
|
||||||
@@ -398,28 +398,36 @@ COPY --from=builder /build/local-ai ./
|
|||||||
# Copy shared libraries for piper
|
# Copy shared libraries for piper
|
||||||
COPY --from=builder /build/sources/go-piper/piper-phonemize/pi/lib/* /usr/lib/
|
COPY --from=builder /build/sources/go-piper/piper-phonemize/pi/lib/* /usr/lib/
|
||||||
|
|
||||||
|
# do not let stablediffusion rebuild (requires an older version of absl)
|
||||||
|
COPY --from=builder-sd /build/backend-assets/grpc/stablediffusion ./backend-assets/grpc/stablediffusion
|
||||||
|
|
||||||
# Change the shell to bash so we can use [[ tests below
|
# Change the shell to bash so we can use [[ tests below
|
||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
# We try to strike a balance between individual layer size (as that affects total push time) and total image size
|
# We try to strike a balance between individual layer size (as that affects total push time) and total image size
|
||||||
# Splitting the backends into more groups with fewer items results in a larger image, but a smaller size for the largest layer
|
# Splitting the backends into more groups with fewer items results in a larger image, but a smaller size for the largest layer
|
||||||
# Splitting the backends into fewer groups with more items results in a smaller image, but a larger size for the largest layer
|
# Splitting the backends into fewer groups with more items results in a smaller image, but a larger size for the largest layer
|
||||||
|
|
||||||
RUN if [[ ( "${IMAGE_TYPE}" == "extras ")]]; then \
|
|
||||||
apt-get -qq -y install espeak-ng \
|
|
||||||
; fi
|
|
||||||
|
|
||||||
RUN if [[ ( "${EXTRA_BACKENDS}" =~ "coqui" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
RUN if [[ ( "${EXTRA_BACKENDS}" =~ "coqui" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
||||||
make -C backend/python/coqui \
|
make -C backend/python/coqui \
|
||||||
; fi && \
|
; fi && \
|
||||||
if [[ ( "${EXTRA_BACKENDS}" =~ "faster-whisper" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
if [[ ( "${EXTRA_BACKENDS}" =~ "parler-tts" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
||||||
make -C backend/python/faster-whisper \
|
make -C backend/python/parler-tts \
|
||||||
; fi && \
|
; fi && \
|
||||||
if [[ ( "${EXTRA_BACKENDS}" =~ "diffusers" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
if [[ ( "${EXTRA_BACKENDS}" =~ "diffusers" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
||||||
make -C backend/python/diffusers \
|
make -C backend/python/diffusers \
|
||||||
|
; fi && \
|
||||||
|
if [[ ( "${EXTRA_BACKENDS}" =~ "transformers-musicgen" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
||||||
|
make -C backend/python/transformers-musicgen \
|
||||||
; fi
|
; fi
|
||||||
|
|
||||||
RUN if [[ ( "${EXTRA_BACKENDS}" =~ "kokoro" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
RUN if [[ ( "${EXTRA_BACKENDS}" =~ "vall-e-x" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
||||||
make -C backend/python/kokoro \
|
make -C backend/python/vall-e-x \
|
||||||
|
; fi && \
|
||||||
|
if [[ ( "${EXTRA_BACKENDS}" =~ "openvoice" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
||||||
|
make -C backend/python/openvoice \
|
||||||
|
; fi && \
|
||||||
|
if [[ ( "${EXTRA_BACKENDS}" =~ "sentencetransformers" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
||||||
|
make -C backend/python/sentencetransformers \
|
||||||
; fi && \
|
; fi && \
|
||||||
if [[ ( "${EXTRA_BACKENDS}" =~ "exllama2" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
if [[ ( "${EXTRA_BACKENDS}" =~ "exllama2" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
||||||
make -C backend/python/exllama2 \
|
make -C backend/python/exllama2 \
|
||||||
@@ -431,11 +439,17 @@ RUN if [[ ( "${EXTRA_BACKENDS}" =~ "kokoro" || -z "${EXTRA_BACKENDS}" ) && "$IMA
|
|||||||
RUN if [[ ( "${EXTRA_BACKENDS}" =~ "vllm" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
RUN if [[ ( "${EXTRA_BACKENDS}" =~ "vllm" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
||||||
make -C backend/python/vllm \
|
make -C backend/python/vllm \
|
||||||
; fi && \
|
; fi && \
|
||||||
|
if [[ ( "${EXTRA_BACKENDS}" =~ "autogptq" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
||||||
|
make -C backend/python/autogptq \
|
||||||
|
; fi && \
|
||||||
if [[ ( "${EXTRA_BACKENDS}" =~ "bark" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
if [[ ( "${EXTRA_BACKENDS}" =~ "bark" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
||||||
make -C backend/python/bark \
|
make -C backend/python/bark \
|
||||||
; fi && \
|
; fi && \
|
||||||
if [[ ( "${EXTRA_BACKENDS}" =~ "rerankers" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
if [[ ( "${EXTRA_BACKENDS}" =~ "rerankers" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
||||||
make -C backend/python/rerankers \
|
make -C backend/python/rerankers \
|
||||||
|
; fi && \
|
||||||
|
if [[ ( "${EXTRA_BACKENDS}" =~ "mamba" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
||||||
|
make -C backend/python/mamba \
|
||||||
; fi
|
; fi
|
||||||
|
|
||||||
# Make sure the models directory exists
|
# Make sure the models directory exists
|
||||||
|
|||||||
2
LICENSE
2
LICENSE
@@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2023-2025 Ettore Di Giacinto (mudler@localai.io)
|
Copyright (c) 2023-2024 Ettore Di Giacinto (mudler@localai.io)
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|||||||
359
Makefile
359
Makefile
@@ -6,27 +6,33 @@ BINARY_NAME=local-ai
|
|||||||
DETECT_LIBS?=true
|
DETECT_LIBS?=true
|
||||||
|
|
||||||
# llama.cpp versions
|
# llama.cpp versions
|
||||||
CPPLLAMA_VERSION?=6408210082cc0a61b992b487be7e2ff2efbb9e36
|
GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp
|
||||||
|
GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be
|
||||||
|
CPPLLAMA_VERSION?=e6b7801bd189d102d901d3e72035611a25456ef1
|
||||||
|
|
||||||
|
# go-rwkv version
|
||||||
|
RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp
|
||||||
|
RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6
|
||||||
|
|
||||||
# whisper.cpp version
|
# whisper.cpp version
|
||||||
WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp
|
WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp
|
||||||
WHISPER_CPP_VERSION?=6266a9f9e56a5b925e9892acf650f3eb1245814d
|
WHISPER_CPP_VERSION?=a551933542d956ae84634937acd2942eb40efaaf
|
||||||
|
|
||||||
|
# bert.cpp version
|
||||||
|
BERT_REPO?=https://github.com/go-skynet/go-bert.cpp
|
||||||
|
BERT_VERSION?=710044b124545415f555e4260d16b146c725a6e4
|
||||||
|
|
||||||
# go-piper version
|
# go-piper version
|
||||||
PIPER_REPO?=https://github.com/mudler/go-piper
|
PIPER_REPO?=https://github.com/mudler/go-piper
|
||||||
PIPER_VERSION?=e10ca041a885d4a8f3871d52924b47792d5e5aa0
|
PIPER_VERSION?=9d0100873a7dbb0824dfea40e8cec70a1b110759
|
||||||
|
|
||||||
# bark.cpp
|
# stablediffusion version
|
||||||
BARKCPP_REPO?=https://github.com/PABannier/bark.cpp.git
|
STABLEDIFFUSION_REPO?=https://github.com/mudler/go-stable-diffusion
|
||||||
BARKCPP_VERSION?=v1.0.0
|
STABLEDIFFUSION_VERSION?=4a3cd6aeae6f66ee57eae9a0075f8c58c3a6a38f
|
||||||
|
|
||||||
# stablediffusion.cpp (ggml)
|
# tinydream version
|
||||||
STABLEDIFFUSION_GGML_REPO?=https://github.com/richiejp/stable-diffusion.cpp
|
TINYDREAM_REPO?=https://github.com/M0Rf30/go-tiny-dream
|
||||||
STABLEDIFFUSION_GGML_VERSION?=53e3b17eb3d0b5760ced06a1f98320b68b34aaae
|
TINYDREAM_VERSION?=c04fa463ace9d9a6464313aa5f9cd0f953b6c057
|
||||||
|
|
||||||
ONNX_VERSION?=1.20.0
|
|
||||||
ONNX_ARCH?=x64
|
|
||||||
ONNX_OS?=linux
|
|
||||||
|
|
||||||
export BUILD_TYPE?=
|
export BUILD_TYPE?=
|
||||||
export STABLE_BUILD_TYPE?=$(BUILD_TYPE)
|
export STABLE_BUILD_TYPE?=$(BUILD_TYPE)
|
||||||
@@ -39,7 +45,6 @@ CGO_LDFLAGS_WHISPER+=-lggml
|
|||||||
CUDA_LIBPATH?=/usr/local/cuda/lib64/
|
CUDA_LIBPATH?=/usr/local/cuda/lib64/
|
||||||
GO_TAGS?=
|
GO_TAGS?=
|
||||||
BUILD_ID?=
|
BUILD_ID?=
|
||||||
NATIVE?=false
|
|
||||||
|
|
||||||
TEST_DIR=/tmp/test
|
TEST_DIR=/tmp/test
|
||||||
|
|
||||||
@@ -78,25 +83,7 @@ ifndef UNAME_S
|
|||||||
UNAME_S := $(shell uname -s)
|
UNAME_S := $(shell uname -s)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# IF native is false, we add -DGGML_NATIVE=OFF to CMAKE_ARGS
|
|
||||||
ifeq ($(NATIVE),false)
|
|
||||||
CMAKE_ARGS+=-DGGML_NATIVE=OFF
|
|
||||||
endif
|
|
||||||
|
|
||||||
# Detect if we are running on arm64
|
|
||||||
ifneq (,$(findstring aarch64,$(shell uname -m)))
|
|
||||||
ONNX_ARCH=aarch64
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifeq ($(OS),Darwin)
|
ifeq ($(OS),Darwin)
|
||||||
ONNX_OS=osx
|
|
||||||
ifneq (,$(findstring aarch64,$(shell uname -m)))
|
|
||||||
ONNX_ARCH=arm64
|
|
||||||
else ifneq (,$(findstring arm64,$(shell uname -m)))
|
|
||||||
ONNX_ARCH=arm64
|
|
||||||
else
|
|
||||||
ONNX_ARCH=x86_64
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifeq ($(OSX_SIGNING_IDENTITY),)
|
ifeq ($(OSX_SIGNING_IDENTITY),)
|
||||||
OSX_SIGNING_IDENTITY := $(shell security find-identity -v -p codesigning | grep '"' | head -n 1 | sed -E 's/.*"(.*)"/\1/')
|
OSX_SIGNING_IDENTITY := $(shell security find-identity -v -p codesigning | grep '"' | head -n 1 | sed -E 's/.*"(.*)"/\1/')
|
||||||
@@ -149,11 +136,12 @@ ifeq ($(BUILD_TYPE),hipblas)
|
|||||||
LD_LIBRARY_PATH ?= /opt/rocm/lib:/opt/rocm/llvm/lib
|
LD_LIBRARY_PATH ?= /opt/rocm/lib:/opt/rocm/llvm/lib
|
||||||
export CXX=$(ROCM_HOME)/llvm/bin/clang++
|
export CXX=$(ROCM_HOME)/llvm/bin/clang++
|
||||||
export CC=$(ROCM_HOME)/llvm/bin/clang
|
export CC=$(ROCM_HOME)/llvm/bin/clang
|
||||||
|
# llama-ggml has no hipblas support, so override it here.
|
||||||
export STABLE_BUILD_TYPE=
|
export STABLE_BUILD_TYPE=
|
||||||
export GGML_HIP=1
|
export GGML_HIPBLAS=1
|
||||||
GPU_TARGETS ?= gfx900,gfx906,gfx908,gfx940,gfx941,gfx942,gfx90a,gfx1030,gfx1031,gfx1100,gfx1101
|
GPU_TARGETS ?= gfx900,gfx906,gfx908,gfx940,gfx941,gfx942,gfx90a,gfx1030,gfx1031,gfx1100,gfx1101
|
||||||
AMDGPU_TARGETS ?= "$(GPU_TARGETS)"
|
AMDGPU_TARGETS ?= "$(GPU_TARGETS)"
|
||||||
CMAKE_ARGS+=-DGGML_HIP=ON -DAMDGPU_TARGETS="$(AMDGPU_TARGETS)" -DGPU_TARGETS="$(GPU_TARGETS)"
|
CMAKE_ARGS+=-DGGML_HIPBLAS=ON -DAMDGPU_TARGETS="$(AMDGPU_TARGETS)" -DGPU_TARGETS="$(GPU_TARGETS)"
|
||||||
CGO_LDFLAGS += -O3 --rtlib=compiler-rt -unwindlib=libgcc -lhipblas -lrocblas --hip-link -L${ROCM_HOME}/lib/llvm/lib
|
CGO_LDFLAGS += -O3 --rtlib=compiler-rt -unwindlib=libgcc -lhipblas -lrocblas --hip-link -L${ROCM_HOME}/lib/llvm/lib
|
||||||
endif
|
endif
|
||||||
|
|
||||||
@@ -172,6 +160,16 @@ ifeq ($(STATIC),true)
|
|||||||
LD_FLAGS+=-linkmode external -extldflags -static
|
LD_FLAGS+=-linkmode external -extldflags -static
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifeq ($(findstring stablediffusion,$(GO_TAGS)),stablediffusion)
|
||||||
|
# OPTIONAL_TARGETS+=go-stable-diffusion/libstablediffusion.a
|
||||||
|
OPTIONAL_GRPC+=backend-assets/grpc/stablediffusion
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifeq ($(findstring tinydream,$(GO_TAGS)),tinydream)
|
||||||
|
# OPTIONAL_TARGETS+=go-tiny-dream/libtinydream.a
|
||||||
|
OPTIONAL_GRPC+=backend-assets/grpc/tinydream
|
||||||
|
endif
|
||||||
|
|
||||||
ifeq ($(findstring tts,$(GO_TAGS)),tts)
|
ifeq ($(findstring tts,$(GO_TAGS)),tts)
|
||||||
# OPTIONAL_TARGETS+=go-piper/libpiper_binding.a
|
# OPTIONAL_TARGETS+=go-piper/libpiper_binding.a
|
||||||
# OPTIONAL_TARGETS+=backend-assets/espeak-ng-data
|
# OPTIONAL_TARGETS+=backend-assets/espeak-ng-data
|
||||||
@@ -181,23 +179,16 @@ ifeq ($(findstring tts,$(GO_TAGS)),tts)
|
|||||||
endif
|
endif
|
||||||
|
|
||||||
ALL_GRPC_BACKENDS=backend-assets/grpc/huggingface
|
ALL_GRPC_BACKENDS=backend-assets/grpc/huggingface
|
||||||
|
ALL_GRPC_BACKENDS+=backend-assets/grpc/bert-embeddings
|
||||||
ALL_GRPC_BACKENDS+=backend-assets/grpc/llama-cpp-avx
|
ALL_GRPC_BACKENDS+=backend-assets/grpc/llama-cpp-avx
|
||||||
ALL_GRPC_BACKENDS+=backend-assets/grpc/llama-cpp-avx2
|
ALL_GRPC_BACKENDS+=backend-assets/grpc/llama-cpp-avx2
|
||||||
ALL_GRPC_BACKENDS+=backend-assets/grpc/llama-cpp-avx512
|
|
||||||
ALL_GRPC_BACKENDS+=backend-assets/grpc/llama-cpp-fallback
|
ALL_GRPC_BACKENDS+=backend-assets/grpc/llama-cpp-fallback
|
||||||
|
ALL_GRPC_BACKENDS+=backend-assets/grpc/llama-ggml
|
||||||
ALL_GRPC_BACKENDS+=backend-assets/grpc/llama-cpp-grpc
|
ALL_GRPC_BACKENDS+=backend-assets/grpc/llama-cpp-grpc
|
||||||
ALL_GRPC_BACKENDS+=backend-assets/util/llama-cpp-rpc-server
|
ALL_GRPC_BACKENDS+=backend-assets/util/llama-cpp-rpc-server
|
||||||
|
ALL_GRPC_BACKENDS+=backend-assets/grpc/rwkv
|
||||||
ALL_GRPC_BACKENDS+=backend-assets/grpc/whisper
|
ALL_GRPC_BACKENDS+=backend-assets/grpc/whisper
|
||||||
|
|
||||||
ifeq ($(ONNX_OS),linux)
|
|
||||||
ifeq ($(ONNX_ARCH),x64)
|
|
||||||
ALL_GRPC_BACKENDS+=backend-assets/grpc/bark-cpp
|
|
||||||
ALL_GRPC_BACKENDS+=backend-assets/grpc/stablediffusion-ggml
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
ALL_GRPC_BACKENDS+=backend-assets/grpc/local-store
|
ALL_GRPC_BACKENDS+=backend-assets/grpc/local-store
|
||||||
ALL_GRPC_BACKENDS+=backend-assets/grpc/silero-vad
|
|
||||||
ALL_GRPC_BACKENDS+=$(OPTIONAL_GRPC)
|
ALL_GRPC_BACKENDS+=$(OPTIONAL_GRPC)
|
||||||
# Use filter-out to remove the specified backends
|
# Use filter-out to remove the specified backends
|
||||||
ALL_GRPC_BACKENDS := $(filter-out $(SKIP_GRPC_BACKEND),$(ALL_GRPC_BACKENDS))
|
ALL_GRPC_BACKENDS := $(filter-out $(SKIP_GRPC_BACKEND),$(ALL_GRPC_BACKENDS))
|
||||||
@@ -218,22 +209,31 @@ endif
|
|||||||
|
|
||||||
all: help
|
all: help
|
||||||
|
|
||||||
## bark.cpp
|
## BERT embeddings
|
||||||
sources/bark.cpp:
|
sources/go-bert.cpp:
|
||||||
git clone --recursive $(BARKCPP_REPO) sources/bark.cpp && \
|
mkdir -p sources/go-bert.cpp
|
||||||
cd sources/bark.cpp && \
|
cd sources/go-bert.cpp && \
|
||||||
git checkout $(BARKCPP_VERSION) && \
|
git init && \
|
||||||
|
git remote add origin $(BERT_REPO) && \
|
||||||
|
git fetch origin && \
|
||||||
|
git checkout $(BERT_VERSION) && \
|
||||||
git submodule update --init --recursive --depth 1 --single-branch
|
git submodule update --init --recursive --depth 1 --single-branch
|
||||||
|
|
||||||
sources/bark.cpp/build/libbark.a: sources/bark.cpp
|
sources/go-bert.cpp/libgobert.a: sources/go-bert.cpp
|
||||||
cd sources/bark.cpp && \
|
$(MAKE) -C sources/go-bert.cpp libgobert.a
|
||||||
mkdir -p build && \
|
|
||||||
cd build && \
|
|
||||||
cmake $(CMAKE_ARGS) .. && \
|
|
||||||
cmake --build . --config Release
|
|
||||||
|
|
||||||
backend/go/bark/libbark.a: sources/bark.cpp/build/libbark.a
|
## go-llama.cpp
|
||||||
$(MAKE) -C backend/go/bark libbark.a
|
sources/go-llama.cpp:
|
||||||
|
mkdir -p sources/go-llama.cpp
|
||||||
|
cd sources/go-llama.cpp && \
|
||||||
|
git init && \
|
||||||
|
git remote add origin $(GOLLAMA_REPO) && \
|
||||||
|
git fetch origin && \
|
||||||
|
git checkout $(GOLLAMA_VERSION) && \
|
||||||
|
git submodule update --init --recursive --depth 1 --single-branch
|
||||||
|
|
||||||
|
sources/go-llama.cpp/libbinding.a: sources/go-llama.cpp
|
||||||
|
$(MAKE) -C sources/go-llama.cpp BUILD_TYPE=$(STABLE_BUILD_TYPE) libbinding.a
|
||||||
|
|
||||||
## go-piper
|
## go-piper
|
||||||
sources/go-piper:
|
sources/go-piper:
|
||||||
@@ -248,33 +248,45 @@ sources/go-piper:
|
|||||||
sources/go-piper/libpiper_binding.a: sources/go-piper
|
sources/go-piper/libpiper_binding.a: sources/go-piper
|
||||||
$(MAKE) -C sources/go-piper libpiper_binding.a example/main piper.o
|
$(MAKE) -C sources/go-piper libpiper_binding.a example/main piper.o
|
||||||
|
|
||||||
## stablediffusion (ggml)
|
|
||||||
sources/stablediffusion-ggml.cpp:
|
## RWKV
|
||||||
git clone --recursive $(STABLEDIFFUSION_GGML_REPO) sources/stablediffusion-ggml.cpp && \
|
sources/go-rwkv.cpp:
|
||||||
cd sources/stablediffusion-ggml.cpp && \
|
mkdir -p sources/go-rwkv.cpp
|
||||||
git checkout $(STABLEDIFFUSION_GGML_VERSION) && \
|
cd sources/go-rwkv.cpp && \
|
||||||
|
git init && \
|
||||||
|
git remote add origin $(RWKV_REPO) && \
|
||||||
|
git fetch origin && \
|
||||||
|
git checkout $(RWKV_VERSION) && \
|
||||||
git submodule update --init --recursive --depth 1 --single-branch
|
git submodule update --init --recursive --depth 1 --single-branch
|
||||||
|
|
||||||
backend/go/image/stablediffusion-ggml/libsd.a: sources/stablediffusion-ggml.cpp
|
sources/go-rwkv.cpp/librwkv.a: sources/go-rwkv.cpp
|
||||||
$(MAKE) -C backend/go/image/stablediffusion-ggml build/libstable-diffusion.a
|
cd sources/go-rwkv.cpp && cd rwkv.cpp && cmake . -DRWKV_BUILD_SHARED_LIBRARY=OFF && cmake --build . && cp librwkv.a ..
|
||||||
$(MAKE) -C backend/go/image/stablediffusion-ggml libsd.a
|
|
||||||
|
|
||||||
backend-assets/grpc/stablediffusion-ggml: backend/go/image/stablediffusion-ggml/libsd.a backend-assets/grpc
|
## stable diffusion
|
||||||
$(MAKE) -C backend/go/image/stablediffusion-ggml CGO_LDFLAGS="$(CGO_LDFLAGS)" stablediffusion-ggml
|
sources/go-stable-diffusion:
|
||||||
|
mkdir -p sources/go-stable-diffusion
|
||||||
|
cd sources/go-stable-diffusion && \
|
||||||
|
git init && \
|
||||||
|
git remote add origin $(STABLEDIFFUSION_REPO) && \
|
||||||
|
git fetch origin && \
|
||||||
|
git checkout $(STABLEDIFFUSION_VERSION) && \
|
||||||
|
git submodule update --init --recursive --depth 1 --single-branch
|
||||||
|
|
||||||
sources/onnxruntime:
|
sources/go-stable-diffusion/libstablediffusion.a: sources/go-stable-diffusion
|
||||||
mkdir -p sources/onnxruntime
|
CPATH="$(CPATH):/usr/include/opencv4" $(MAKE) -C sources/go-stable-diffusion libstablediffusion.a
|
||||||
curl -L https://github.com/microsoft/onnxruntime/releases/download/v$(ONNX_VERSION)/onnxruntime-$(ONNX_OS)-$(ONNX_ARCH)-$(ONNX_VERSION).tgz -o sources/onnxruntime/onnxruntime-$(ONNX_OS)-$(ONNX_ARCH)-$(ONNX_VERSION).tgz
|
|
||||||
cd sources/onnxruntime && tar -xvf onnxruntime-$(ONNX_OS)-$(ONNX_ARCH)-$(ONNX_VERSION).tgz && rm onnxruntime-$(ONNX_OS)-$(ONNX_ARCH)-$(ONNX_VERSION).tgz
|
|
||||||
cd sources/onnxruntime && mv onnxruntime-$(ONNX_OS)-$(ONNX_ARCH)-$(ONNX_VERSION)/* ./
|
|
||||||
|
|
||||||
backend-assets/lib/libonnxruntime.so.1: backend-assets/lib sources/onnxruntime
|
## tiny-dream
|
||||||
cp -rfv sources/onnxruntime/lib/* backend-assets/lib/
|
sources/go-tiny-dream:
|
||||||
ifeq ($(OS),Darwin)
|
mkdir -p sources/go-tiny-dream
|
||||||
mv backend-assets/lib/libonnxruntime.$(ONNX_VERSION).dylib backend-assets/lib/libonnxruntime.dylib
|
cd sources/go-tiny-dream && \
|
||||||
else
|
git init && \
|
||||||
mv backend-assets/lib/libonnxruntime.so.$(ONNX_VERSION) backend-assets/lib/libonnxruntime.so.1
|
git remote add origin $(TINYDREAM_REPO) && \
|
||||||
endif
|
git fetch origin && \
|
||||||
|
git checkout $(TINYDREAM_VERSION) && \
|
||||||
|
git submodule update --init --recursive --depth 1 --single-branch
|
||||||
|
|
||||||
|
sources/go-tiny-dream/libtinydream.a: sources/go-tiny-dream
|
||||||
|
$(MAKE) -C sources/go-tiny-dream libtinydream.a
|
||||||
|
|
||||||
## whisper
|
## whisper
|
||||||
sources/whisper.cpp:
|
sources/whisper.cpp:
|
||||||
@@ -289,17 +301,27 @@ sources/whisper.cpp:
|
|||||||
sources/whisper.cpp/libwhisper.a: sources/whisper.cpp
|
sources/whisper.cpp/libwhisper.a: sources/whisper.cpp
|
||||||
cd sources/whisper.cpp && $(MAKE) libwhisper.a libggml.a
|
cd sources/whisper.cpp && $(MAKE) libwhisper.a libggml.a
|
||||||
|
|
||||||
get-sources: sources/go-piper sources/stablediffusion-ggml.cpp sources/bark.cpp sources/whisper.cpp backend/cpp/llama/llama.cpp
|
get-sources: sources/go-llama.cpp sources/go-piper sources/go-rwkv.cpp sources/whisper.cpp sources/go-bert.cpp sources/go-stable-diffusion sources/go-tiny-dream backend/cpp/llama/llama.cpp
|
||||||
|
|
||||||
replace:
|
replace:
|
||||||
|
$(GOCMD) mod edit -replace github.com/donomii/go-rwkv.cpp=$(CURDIR)/sources/go-rwkv.cpp
|
||||||
$(GOCMD) mod edit -replace github.com/ggerganov/whisper.cpp=$(CURDIR)/sources/whisper.cpp
|
$(GOCMD) mod edit -replace github.com/ggerganov/whisper.cpp=$(CURDIR)/sources/whisper.cpp
|
||||||
$(GOCMD) mod edit -replace github.com/ggerganov/whisper.cpp/bindings/go=$(CURDIR)/sources/whisper.cpp/bindings/go
|
$(GOCMD) mod edit -replace github.com/ggerganov/whisper.cpp/bindings/go=$(CURDIR)/sources/whisper.cpp/bindings/go
|
||||||
|
$(GOCMD) mod edit -replace github.com/go-skynet/go-bert.cpp=$(CURDIR)/sources/go-bert.cpp
|
||||||
|
$(GOCMD) mod edit -replace github.com/M0Rf30/go-tiny-dream=$(CURDIR)/sources/go-tiny-dream
|
||||||
$(GOCMD) mod edit -replace github.com/mudler/go-piper=$(CURDIR)/sources/go-piper
|
$(GOCMD) mod edit -replace github.com/mudler/go-piper=$(CURDIR)/sources/go-piper
|
||||||
|
$(GOCMD) mod edit -replace github.com/mudler/go-stable-diffusion=$(CURDIR)/sources/go-stable-diffusion
|
||||||
|
$(GOCMD) mod edit -replace github.com/go-skynet/go-llama.cpp=$(CURDIR)/sources/go-llama.cpp
|
||||||
|
|
||||||
dropreplace:
|
dropreplace:
|
||||||
|
$(GOCMD) mod edit -dropreplace github.com/donomii/go-rwkv.cpp
|
||||||
$(GOCMD) mod edit -dropreplace github.com/ggerganov/whisper.cpp
|
$(GOCMD) mod edit -dropreplace github.com/ggerganov/whisper.cpp
|
||||||
$(GOCMD) mod edit -dropreplace github.com/ggerganov/whisper.cpp/bindings/go
|
$(GOCMD) mod edit -dropreplace github.com/ggerganov/whisper.cpp/bindings/go
|
||||||
|
$(GOCMD) mod edit -dropreplace github.com/go-skynet/go-bert.cpp
|
||||||
|
$(GOCMD) mod edit -dropreplace github.com/M0Rf30/go-tiny-dream
|
||||||
$(GOCMD) mod edit -dropreplace github.com/mudler/go-piper
|
$(GOCMD) mod edit -dropreplace github.com/mudler/go-piper
|
||||||
|
$(GOCMD) mod edit -dropreplace github.com/mudler/go-stable-diffusion
|
||||||
|
$(GOCMD) mod edit -dropreplace github.com/go-skynet/go-llama.cpp
|
||||||
|
|
||||||
prepare-sources: get-sources replace
|
prepare-sources: get-sources replace
|
||||||
$(GOCMD) mod download
|
$(GOCMD) mod download
|
||||||
@@ -307,8 +329,13 @@ prepare-sources: get-sources replace
|
|||||||
## GENERIC
|
## GENERIC
|
||||||
rebuild: ## Rebuilds the project
|
rebuild: ## Rebuilds the project
|
||||||
$(GOCMD) clean -cache
|
$(GOCMD) clean -cache
|
||||||
|
$(MAKE) -C sources/go-llama.cpp clean
|
||||||
|
$(MAKE) -C sources/go-rwkv.cpp clean
|
||||||
$(MAKE) -C sources/whisper.cpp clean
|
$(MAKE) -C sources/whisper.cpp clean
|
||||||
|
$(MAKE) -C sources/go-stable-diffusion clean
|
||||||
|
$(MAKE) -C sources/go-bert.cpp clean
|
||||||
$(MAKE) -C sources/go-piper clean
|
$(MAKE) -C sources/go-piper clean
|
||||||
|
$(MAKE) -C sources/go-tiny-dream clean
|
||||||
$(MAKE) build
|
$(MAKE) build
|
||||||
|
|
||||||
prepare: prepare-sources $(OPTIONAL_TARGETS)
|
prepare: prepare-sources $(OPTIONAL_TARGETS)
|
||||||
@@ -321,9 +348,7 @@ clean: ## Remove build related file
|
|||||||
rm -rf release/
|
rm -rf release/
|
||||||
rm -rf backend-assets/*
|
rm -rf backend-assets/*
|
||||||
$(MAKE) -C backend/cpp/grpc clean
|
$(MAKE) -C backend/cpp/grpc clean
|
||||||
$(MAKE) -C backend/go/bark clean
|
|
||||||
$(MAKE) -C backend/cpp/llama clean
|
$(MAKE) -C backend/cpp/llama clean
|
||||||
$(MAKE) -C backend/go/image/stablediffusion-ggml clean
|
|
||||||
rm -rf backend/cpp/llama-* || true
|
rm -rf backend/cpp/llama-* || true
|
||||||
$(MAKE) dropreplace
|
$(MAKE) dropreplace
|
||||||
$(MAKE) protogen-clean
|
$(MAKE) protogen-clean
|
||||||
@@ -334,9 +359,6 @@ clean-tests:
|
|||||||
rm -rf test-dir
|
rm -rf test-dir
|
||||||
rm -rf core/http/backend-assets
|
rm -rf core/http/backend-assets
|
||||||
|
|
||||||
clean-dc: clean
|
|
||||||
cp -r /build/backend-assets /workspace/backend-assets
|
|
||||||
|
|
||||||
## Build:
|
## Build:
|
||||||
build: prepare backend-assets grpcs ## Build the project
|
build: prepare backend-assets grpcs ## Build the project
|
||||||
$(info ${GREEN}I local-ai build info:${RESET})
|
$(info ${GREEN}I local-ai build info:${RESET})
|
||||||
@@ -410,10 +432,12 @@ run: prepare ## run local-ai
|
|||||||
test-models/testmodel.ggml:
|
test-models/testmodel.ggml:
|
||||||
mkdir test-models
|
mkdir test-models
|
||||||
mkdir test-dir
|
mkdir test-dir
|
||||||
wget -q https://huggingface.co/RichardErkhov/Qwen_-_Qwen2-1.5B-Instruct-gguf/resolve/main/Qwen2-1.5B-Instruct.Q2_K.gguf -O test-models/testmodel.ggml
|
wget -q https://huggingface.co/TheBloke/orca_mini_3B-GGML/resolve/main/orca-mini-3b.ggmlv3.q4_0.bin -O test-models/testmodel.ggml
|
||||||
wget -q https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-base.en.bin -O test-models/whisper-en
|
wget -q https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-base.en.bin -O test-models/whisper-en
|
||||||
wget -q https://huggingface.co/mudler/all-MiniLM-L6-v2/resolve/main/ggml-model-q4_0.bin -O test-models/bert
|
wget -q https://huggingface.co/mudler/all-MiniLM-L6-v2/resolve/main/ggml-model-q4_0.bin -O test-models/bert
|
||||||
wget -q https://cdn.openai.com/whisper/draft-20220913a/micro-machines.wav -O test-dir/audio.wav
|
wget -q https://cdn.openai.com/whisper/draft-20220913a/micro-machines.wav -O test-dir/audio.wav
|
||||||
|
wget -q https://huggingface.co/mudler/rwkv-4-raven-1.5B-ggml/resolve/main/RWKV-4-Raven-1B5-v11-Eng99%2525-Other1%2525-20230425-ctx4096_Q4_0.bin -O test-models/rwkv
|
||||||
|
wget -q https://raw.githubusercontent.com/saharNooby/rwkv.cpp/5eb8f09c146ea8124633ab041d9ea0b1f1db4459/rwkv/20B_tokenizer.json -O test-models/rwkv.tokenizer.json
|
||||||
cp tests/models_fixtures/* test-models
|
cp tests/models_fixtures/* test-models
|
||||||
|
|
||||||
prepare-test: grpcs
|
prepare-test: grpcs
|
||||||
@@ -422,10 +446,11 @@ prepare-test: grpcs
|
|||||||
|
|
||||||
test: prepare test-models/testmodel.ggml grpcs
|
test: prepare test-models/testmodel.ggml grpcs
|
||||||
@echo 'Running tests'
|
@echo 'Running tests'
|
||||||
export GO_TAGS="tts debug"
|
export GO_TAGS="tts stablediffusion debug"
|
||||||
$(MAKE) prepare-test
|
$(MAKE) prepare-test
|
||||||
HUGGINGFACE_GRPC=$(abspath ./)/backend/python/transformers/run.sh TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models \
|
HUGGINGFACE_GRPC=$(abspath ./)/backend/python/sentencetransformers/run.sh TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models \
|
||||||
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="!llama-gguf" --flake-attempts $(TEST_FLAKES) --fail-fast -v -r $(TEST_PATHS)
|
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="!llama && !llama-gguf" --flake-attempts $(TEST_FLAKES) --fail-fast -v -r $(TEST_PATHS)
|
||||||
|
$(MAKE) test-llama
|
||||||
$(MAKE) test-llama-gguf
|
$(MAKE) test-llama-gguf
|
||||||
$(MAKE) test-tts
|
$(MAKE) test-tts
|
||||||
$(MAKE) test-stablediffusion
|
$(MAKE) test-stablediffusion
|
||||||
@@ -440,36 +465,40 @@ run-e2e-image:
|
|||||||
ls -liah $(abspath ./tests/e2e-fixtures)
|
ls -liah $(abspath ./tests/e2e-fixtures)
|
||||||
docker run -p 5390:8080 -e MODELS_PATH=/models -e THREADS=1 -e DEBUG=true -d --rm -v $(TEST_DIR):/models --gpus all --name e2e-tests-$(RANDOM) localai-tests
|
docker run -p 5390:8080 -e MODELS_PATH=/models -e THREADS=1 -e DEBUG=true -d --rm -v $(TEST_DIR):/models --gpus all --name e2e-tests-$(RANDOM) localai-tests
|
||||||
|
|
||||||
run-e2e-aio: protogen-go
|
run-e2e-aio:
|
||||||
@echo 'Running e2e AIO tests'
|
@echo 'Running e2e AIO tests'
|
||||||
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --flake-attempts $(TEST_FLAKES) -v -r ./tests/e2e-aio
|
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --flake-attempts 5 -v -r ./tests/e2e-aio
|
||||||
|
|
||||||
test-e2e:
|
test-e2e:
|
||||||
@echo 'Running e2e tests'
|
@echo 'Running e2e tests'
|
||||||
BUILD_TYPE=$(BUILD_TYPE) \
|
BUILD_TYPE=$(BUILD_TYPE) \
|
||||||
LOCALAI_API=http://$(E2E_BRIDGE_IP):5390/v1 \
|
LOCALAI_API=http://$(E2E_BRIDGE_IP):5390/v1 \
|
||||||
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --flake-attempts $(TEST_FLAKES) -v -r ./tests/e2e
|
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --flake-attempts 5 -v -r ./tests/e2e
|
||||||
|
|
||||||
teardown-e2e:
|
teardown-e2e:
|
||||||
rm -rf $(TEST_DIR) || true
|
rm -rf $(TEST_DIR) || true
|
||||||
docker stop $$(docker ps -q --filter ancestor=localai-tests)
|
docker stop $$(docker ps -q --filter ancestor=localai-tests)
|
||||||
|
|
||||||
|
test-llama: prepare-test
|
||||||
|
TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models \
|
||||||
|
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="llama" --flake-attempts 5 -v -r $(TEST_PATHS)
|
||||||
|
|
||||||
test-llama-gguf: prepare-test
|
test-llama-gguf: prepare-test
|
||||||
TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models \
|
TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models \
|
||||||
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="llama-gguf" --flake-attempts $(TEST_FLAKES) -v -r $(TEST_PATHS)
|
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="llama-gguf" --flake-attempts 5 -v -r $(TEST_PATHS)
|
||||||
|
|
||||||
test-tts: prepare-test
|
test-tts: prepare-test
|
||||||
TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models \
|
TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models \
|
||||||
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="tts" --flake-attempts $(TEST_FLAKES) -v -r $(TEST_PATHS)
|
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="tts" --flake-attempts 1 -v -r $(TEST_PATHS)
|
||||||
|
|
||||||
test-stablediffusion: prepare-test
|
test-stablediffusion: prepare-test
|
||||||
TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models \
|
TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models \
|
||||||
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="stablediffusion" --flake-attempts $(TEST_FLAKES) -v -r $(TEST_PATHS)
|
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="stablediffusion" --flake-attempts 1 -v -r $(TEST_PATHS)
|
||||||
|
|
||||||
test-stores: backend-assets/grpc/local-store
|
test-stores: backend-assets/grpc/local-store
|
||||||
mkdir -p tests/integration/backend-assets/grpc
|
mkdir -p tests/integration/backend-assets/grpc
|
||||||
cp -f backend-assets/grpc/local-store tests/integration/backend-assets/grpc/
|
cp -f backend-assets/grpc/local-store tests/integration/backend-assets/grpc/
|
||||||
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="stores" --flake-attempts $(TEST_FLAKES) -v -r tests/integration
|
$(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="stores" --flake-attempts 1 -v -r tests/integration
|
||||||
|
|
||||||
test-container:
|
test-container:
|
||||||
docker build --target requirements -t local-ai-test-container .
|
docker build --target requirements -t local-ai-test-container .
|
||||||
@@ -505,10 +534,18 @@ protogen-go-clean:
|
|||||||
$(RM) bin/*
|
$(RM) bin/*
|
||||||
|
|
||||||
.PHONY: protogen-python
|
.PHONY: protogen-python
|
||||||
protogen-python: bark-protogen coqui-protogen diffusers-protogen exllama2-protogen rerankers-protogen transformers-protogen kokoro-protogen vllm-protogen faster-whisper-protogen
|
protogen-python: autogptq-protogen bark-protogen coqui-protogen diffusers-protogen exllama2-protogen mamba-protogen rerankers-protogen sentencetransformers-protogen transformers-protogen parler-tts-protogen transformers-musicgen-protogen vall-e-x-protogen vllm-protogen openvoice-protogen
|
||||||
|
|
||||||
.PHONY: protogen-python-clean
|
.PHONY: protogen-python-clean
|
||||||
protogen-python-clean: bark-protogen-clean coqui-protogen-clean diffusers-protogen-clean exllama2-protogen-clean rerankers-protogen-clean transformers-protogen-clean kokoro-protogen-clean vllm-protogen-clean faster-whisper-protogen-clean
|
protogen-python-clean: autogptq-protogen-clean bark-protogen-clean coqui-protogen-clean diffusers-protogen-clean exllama2-protogen-clean mamba-protogen-clean sentencetransformers-protogen-clean rerankers-protogen-clean transformers-protogen-clean transformers-musicgen-protogen-clean parler-tts-protogen-clean vall-e-x-protogen-clean vllm-protogen-clean openvoice-protogen-clean
|
||||||
|
|
||||||
|
.PHONY: autogptq-protogen
|
||||||
|
autogptq-protogen:
|
||||||
|
$(MAKE) -C backend/python/autogptq protogen
|
||||||
|
|
||||||
|
.PHONY: autogptq-protogen-clean
|
||||||
|
autogptq-protogen-clean:
|
||||||
|
$(MAKE) -C backend/python/autogptq protogen-clean
|
||||||
|
|
||||||
.PHONY: bark-protogen
|
.PHONY: bark-protogen
|
||||||
bark-protogen:
|
bark-protogen:
|
||||||
@@ -534,14 +571,6 @@ diffusers-protogen:
|
|||||||
diffusers-protogen-clean:
|
diffusers-protogen-clean:
|
||||||
$(MAKE) -C backend/python/diffusers protogen-clean
|
$(MAKE) -C backend/python/diffusers protogen-clean
|
||||||
|
|
||||||
.PHONY: faster-whisper-protogen
|
|
||||||
faster-whisper-protogen:
|
|
||||||
$(MAKE) -C backend/python/faster-whisper protogen
|
|
||||||
|
|
||||||
.PHONY: faster-whisper-protogen-clean
|
|
||||||
faster-whisper-protogen-clean:
|
|
||||||
$(MAKE) -C backend/python/faster-whisper protogen-clean
|
|
||||||
|
|
||||||
.PHONY: exllama2-protogen
|
.PHONY: exllama2-protogen
|
||||||
exllama2-protogen:
|
exllama2-protogen:
|
||||||
$(MAKE) -C backend/python/exllama2 protogen
|
$(MAKE) -C backend/python/exllama2 protogen
|
||||||
@@ -550,6 +579,14 @@ exllama2-protogen:
|
|||||||
exllama2-protogen-clean:
|
exllama2-protogen-clean:
|
||||||
$(MAKE) -C backend/python/exllama2 protogen-clean
|
$(MAKE) -C backend/python/exllama2 protogen-clean
|
||||||
|
|
||||||
|
.PHONY: mamba-protogen
|
||||||
|
mamba-protogen:
|
||||||
|
$(MAKE) -C backend/python/mamba protogen
|
||||||
|
|
||||||
|
.PHONY: mamba-protogen-clean
|
||||||
|
mamba-protogen-clean:
|
||||||
|
$(MAKE) -C backend/python/mamba protogen-clean
|
||||||
|
|
||||||
.PHONY: rerankers-protogen
|
.PHONY: rerankers-protogen
|
||||||
rerankers-protogen:
|
rerankers-protogen:
|
||||||
$(MAKE) -C backend/python/rerankers protogen
|
$(MAKE) -C backend/python/rerankers protogen
|
||||||
@@ -558,6 +595,14 @@ rerankers-protogen:
|
|||||||
rerankers-protogen-clean:
|
rerankers-protogen-clean:
|
||||||
$(MAKE) -C backend/python/rerankers protogen-clean
|
$(MAKE) -C backend/python/rerankers protogen-clean
|
||||||
|
|
||||||
|
.PHONY: sentencetransformers-protogen
|
||||||
|
sentencetransformers-protogen:
|
||||||
|
$(MAKE) -C backend/python/sentencetransformers protogen
|
||||||
|
|
||||||
|
.PHONY: sentencetransformers-protogen-clean
|
||||||
|
sentencetransformers-protogen-clean:
|
||||||
|
$(MAKE) -C backend/python/sentencetransformers protogen-clean
|
||||||
|
|
||||||
.PHONY: transformers-protogen
|
.PHONY: transformers-protogen
|
||||||
transformers-protogen:
|
transformers-protogen:
|
||||||
$(MAKE) -C backend/python/transformers protogen
|
$(MAKE) -C backend/python/transformers protogen
|
||||||
@@ -566,13 +611,37 @@ transformers-protogen:
|
|||||||
transformers-protogen-clean:
|
transformers-protogen-clean:
|
||||||
$(MAKE) -C backend/python/transformers protogen-clean
|
$(MAKE) -C backend/python/transformers protogen-clean
|
||||||
|
|
||||||
.PHONY: kokoro-protogen
|
.PHONY: parler-tts-protogen
|
||||||
kokoro-protogen:
|
parler-tts-protogen:
|
||||||
$(MAKE) -C backend/python/kokoro protogen
|
$(MAKE) -C backend/python/parler-tts protogen
|
||||||
|
|
||||||
.PHONY: kokoro-protogen-clean
|
.PHONY: parler-tts-protogen-clean
|
||||||
kokoro-protogen-clean:
|
parler-tts-protogen-clean:
|
||||||
$(MAKE) -C backend/python/kokoro protogen-clean
|
$(MAKE) -C backend/python/parler-tts protogen-clean
|
||||||
|
|
||||||
|
.PHONY: transformers-musicgen-protogen
|
||||||
|
transformers-musicgen-protogen:
|
||||||
|
$(MAKE) -C backend/python/transformers-musicgen protogen
|
||||||
|
|
||||||
|
.PHONY: transformers-musicgen-protogen-clean
|
||||||
|
transformers-musicgen-protogen-clean:
|
||||||
|
$(MAKE) -C backend/python/transformers-musicgen protogen-clean
|
||||||
|
|
||||||
|
.PHONY: vall-e-x-protogen
|
||||||
|
vall-e-x-protogen:
|
||||||
|
$(MAKE) -C backend/python/vall-e-x protogen
|
||||||
|
|
||||||
|
.PHONY: vall-e-x-protogen-clean
|
||||||
|
vall-e-x-protogen-clean:
|
||||||
|
$(MAKE) -C backend/python/vall-e-x protogen-clean
|
||||||
|
|
||||||
|
.PHONY: openvoice-protogen
|
||||||
|
openvoice-protogen:
|
||||||
|
$(MAKE) -C backend/python/openvoice protogen
|
||||||
|
|
||||||
|
.PHONY: openvoice-protogen-clean
|
||||||
|
openvoice-protogen-clean:
|
||||||
|
$(MAKE) -C backend/python/openvoice protogen-clean
|
||||||
|
|
||||||
.PHONY: vllm-protogen
|
.PHONY: vllm-protogen
|
||||||
vllm-protogen:
|
vllm-protogen:
|
||||||
@@ -585,14 +654,19 @@ vllm-protogen-clean:
|
|||||||
## GRPC
|
## GRPC
|
||||||
# Note: it is duplicated in the Dockerfile
|
# Note: it is duplicated in the Dockerfile
|
||||||
prepare-extra-conda-environments: protogen-python
|
prepare-extra-conda-environments: protogen-python
|
||||||
|
$(MAKE) -C backend/python/autogptq
|
||||||
$(MAKE) -C backend/python/bark
|
$(MAKE) -C backend/python/bark
|
||||||
$(MAKE) -C backend/python/coqui
|
$(MAKE) -C backend/python/coqui
|
||||||
$(MAKE) -C backend/python/diffusers
|
$(MAKE) -C backend/python/diffusers
|
||||||
$(MAKE) -C backend/python/faster-whisper
|
|
||||||
$(MAKE) -C backend/python/vllm
|
$(MAKE) -C backend/python/vllm
|
||||||
|
$(MAKE) -C backend/python/mamba
|
||||||
|
$(MAKE) -C backend/python/sentencetransformers
|
||||||
$(MAKE) -C backend/python/rerankers
|
$(MAKE) -C backend/python/rerankers
|
||||||
$(MAKE) -C backend/python/transformers
|
$(MAKE) -C backend/python/transformers
|
||||||
$(MAKE) -C backend/python/kokoro
|
$(MAKE) -C backend/python/transformers-musicgen
|
||||||
|
$(MAKE) -C backend/python/parler-tts
|
||||||
|
$(MAKE) -C backend/python/vall-e-x
|
||||||
|
$(MAKE) -C backend/python/openvoice
|
||||||
$(MAKE) -C backend/python/exllama2
|
$(MAKE) -C backend/python/exllama2
|
||||||
|
|
||||||
prepare-test-extra: protogen-python
|
prepare-test-extra: protogen-python
|
||||||
@@ -616,6 +690,13 @@ backend-assets/espeak-ng-data: sources/go-piper sources/go-piper/libpiper_bindin
|
|||||||
backend-assets/grpc: protogen-go replace
|
backend-assets/grpc: protogen-go replace
|
||||||
mkdir -p backend-assets/grpc
|
mkdir -p backend-assets/grpc
|
||||||
|
|
||||||
|
backend-assets/grpc/bert-embeddings: sources/go-bert.cpp sources/go-bert.cpp/libgobert.a backend-assets/grpc
|
||||||
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(CURDIR)/sources/go-bert.cpp LIBRARY_PATH=$(CURDIR)/sources/go-bert.cpp \
|
||||||
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/bert-embeddings ./backend/go/llm/bert/
|
||||||
|
ifneq ($(UPX),)
|
||||||
|
$(UPX) backend-assets/grpc/bert-embeddings
|
||||||
|
endif
|
||||||
|
|
||||||
backend-assets/grpc/huggingface: backend-assets/grpc
|
backend-assets/grpc/huggingface: backend-assets/grpc
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/huggingface ./backend/go/llm/langchain/
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/huggingface ./backend/go/llm/langchain/
|
||||||
ifneq ($(UPX),)
|
ifneq ($(UPX),)
|
||||||
@@ -662,13 +743,6 @@ backend-assets/grpc/llama-cpp-avx2: backend-assets/grpc backend/cpp/llama/llama.
|
|||||||
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on" $(MAKE) VARIANT="llama-avx2" build-llama-cpp-grpc-server
|
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on" $(MAKE) VARIANT="llama-avx2" build-llama-cpp-grpc-server
|
||||||
cp -rfv backend/cpp/llama-avx2/grpc-server backend-assets/grpc/llama-cpp-avx2
|
cp -rfv backend/cpp/llama-avx2/grpc-server backend-assets/grpc/llama-cpp-avx2
|
||||||
|
|
||||||
backend-assets/grpc/llama-cpp-avx512: backend-assets/grpc backend/cpp/llama/llama.cpp
|
|
||||||
cp -rf backend/cpp/llama backend/cpp/llama-avx512
|
|
||||||
$(MAKE) -C backend/cpp/llama-avx512 purge
|
|
||||||
$(info ${GREEN}I llama-cpp build info:avx512${RESET})
|
|
||||||
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=on -DGGML_FMA=on -DGGML_F16C=on" $(MAKE) VARIANT="llama-avx512" build-llama-cpp-grpc-server
|
|
||||||
cp -rfv backend/cpp/llama-avx512/grpc-server backend-assets/grpc/llama-cpp-avx512
|
|
||||||
|
|
||||||
backend-assets/grpc/llama-cpp-avx: backend-assets/grpc backend/cpp/llama/llama.cpp
|
backend-assets/grpc/llama-cpp-avx: backend-assets/grpc backend/cpp/llama/llama.cpp
|
||||||
cp -rf backend/cpp/llama backend/cpp/llama-avx
|
cp -rf backend/cpp/llama backend/cpp/llama-avx
|
||||||
$(MAKE) -C backend/cpp/llama-avx purge
|
$(MAKE) -C backend/cpp/llama-avx purge
|
||||||
@@ -682,6 +756,10 @@ backend-assets/grpc/llama-cpp-fallback: backend-assets/grpc backend/cpp/llama/ll
|
|||||||
$(info ${GREEN}I llama-cpp build info:fallback${RESET})
|
$(info ${GREEN}I llama-cpp build info:fallback${RESET})
|
||||||
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE) VARIANT="llama-fallback" build-llama-cpp-grpc-server
|
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE) VARIANT="llama-fallback" build-llama-cpp-grpc-server
|
||||||
cp -rfv backend/cpp/llama-fallback/grpc-server backend-assets/grpc/llama-cpp-fallback
|
cp -rfv backend/cpp/llama-fallback/grpc-server backend-assets/grpc/llama-cpp-fallback
|
||||||
|
# TODO: every binary should have its own folder instead, so can have different metal implementations
|
||||||
|
ifeq ($(BUILD_TYPE),metal)
|
||||||
|
cp backend/cpp/llama-fallback/llama.cpp/build/bin/default.metallib backend-assets/grpc/
|
||||||
|
endif
|
||||||
|
|
||||||
backend-assets/grpc/llama-cpp-cuda: backend-assets/grpc backend/cpp/llama/llama.cpp
|
backend-assets/grpc/llama-cpp-cuda: backend-assets/grpc backend/cpp/llama/llama.cpp
|
||||||
cp -rf backend/cpp/llama backend/cpp/llama-cuda
|
cp -rf backend/cpp/llama backend/cpp/llama-cuda
|
||||||
@@ -694,7 +772,7 @@ backend-assets/grpc/llama-cpp-hipblas: backend-assets/grpc backend/cpp/llama/lla
|
|||||||
cp -rf backend/cpp/llama backend/cpp/llama-hipblas
|
cp -rf backend/cpp/llama backend/cpp/llama-hipblas
|
||||||
$(MAKE) -C backend/cpp/llama-hipblas purge
|
$(MAKE) -C backend/cpp/llama-hipblas purge
|
||||||
$(info ${GREEN}I llama-cpp build info:hipblas${RESET})
|
$(info ${GREEN}I llama-cpp build info:hipblas${RESET})
|
||||||
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" BUILD_TYPE="hipblas" $(MAKE) VARIANT="llama-hipblas" build-llama-cpp-grpc-server
|
BUILD_TYPE="hipblas" $(MAKE) VARIANT="llama-hipblas" build-llama-cpp-grpc-server
|
||||||
cp -rfv backend/cpp/llama-hipblas/grpc-server backend-assets/grpc/llama-cpp-hipblas
|
cp -rfv backend/cpp/llama-hipblas/grpc-server backend-assets/grpc/llama-cpp-hipblas
|
||||||
|
|
||||||
backend-assets/grpc/llama-cpp-sycl_f16: backend-assets/grpc backend/cpp/llama/llama.cpp
|
backend-assets/grpc/llama-cpp-sycl_f16: backend-assets/grpc backend/cpp/llama/llama.cpp
|
||||||
@@ -722,11 +800,11 @@ backend-assets/util/llama-cpp-rpc-server: backend-assets/grpc/llama-cpp-grpc
|
|||||||
mkdir -p backend-assets/util/
|
mkdir -p backend-assets/util/
|
||||||
cp -rf backend/cpp/llama-grpc/llama.cpp/build/bin/rpc-server backend-assets/util/llama-cpp-rpc-server
|
cp -rf backend/cpp/llama-grpc/llama.cpp/build/bin/rpc-server backend-assets/util/llama-cpp-rpc-server
|
||||||
|
|
||||||
backend-assets/grpc/bark-cpp: backend/go/bark/libbark.a backend-assets/grpc
|
backend-assets/grpc/llama-ggml: sources/go-llama.cpp sources/go-llama.cpp/libbinding.a backend-assets/grpc
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(CURDIR)/backend/go/bark/ LIBRARY_PATH=$(CURDIR)/backend/go/bark/ \
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(CURDIR)/sources/go-llama.cpp LIBRARY_PATH=$(CURDIR)/sources/go-llama.cpp \
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/bark-cpp ./backend/go/bark/
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/llama-ggml ./backend/go/llm/llama-ggml/
|
||||||
ifneq ($(UPX),)
|
ifneq ($(UPX),)
|
||||||
$(UPX) backend-assets/grpc/bark-cpp
|
$(UPX) backend-assets/grpc/llama-ggml
|
||||||
endif
|
endif
|
||||||
|
|
||||||
backend-assets/grpc/piper: sources/go-piper sources/go-piper/libpiper_binding.a backend-assets/grpc backend-assets/espeak-ng-data
|
backend-assets/grpc/piper: sources/go-piper sources/go-piper/libpiper_binding.a backend-assets/grpc backend-assets/espeak-ng-data
|
||||||
@@ -736,11 +814,25 @@ ifneq ($(UPX),)
|
|||||||
$(UPX) backend-assets/grpc/piper
|
$(UPX) backend-assets/grpc/piper
|
||||||
endif
|
endif
|
||||||
|
|
||||||
backend-assets/grpc/silero-vad: backend-assets/grpc backend-assets/lib/libonnxruntime.so.1
|
backend-assets/grpc/rwkv: sources/go-rwkv.cpp sources/go-rwkv.cpp/librwkv.a backend-assets/grpc
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" CPATH="$(CPATH):$(CURDIR)/sources/onnxruntime/include/" LIBRARY_PATH=$(CURDIR)/backend-assets/lib \
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(CURDIR)/sources/go-rwkv.cpp LIBRARY_PATH=$(CURDIR)/sources/go-rwkv.cpp \
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/silero-vad ./backend/go/vad/silero
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/rwkv ./backend/go/llm/rwkv
|
||||||
ifneq ($(UPX),)
|
ifneq ($(UPX),)
|
||||||
$(UPX) backend-assets/grpc/silero-vad
|
$(UPX) backend-assets/grpc/rwkv
|
||||||
|
endif
|
||||||
|
|
||||||
|
backend-assets/grpc/stablediffusion: sources/go-stable-diffusion sources/go-stable-diffusion/libstablediffusion.a backend-assets/grpc
|
||||||
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" CPATH="$(CPATH):$(CURDIR)/sources/go-stable-diffusion/:/usr/include/opencv4" LIBRARY_PATH=$(CURDIR)/sources/go-stable-diffusion/ \
|
||||||
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/stablediffusion ./backend/go/image/stablediffusion
|
||||||
|
ifneq ($(UPX),)
|
||||||
|
$(UPX) backend-assets/grpc/stablediffusion
|
||||||
|
endif
|
||||||
|
|
||||||
|
backend-assets/grpc/tinydream: sources/go-tiny-dream sources/go-tiny-dream/libtinydream.a backend-assets/grpc
|
||||||
|
CGO_LDFLAGS="$(CGO_LDFLAGS)" LIBRARY_PATH=$(CURDIR)/go-tiny-dream \
|
||||||
|
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/tinydream ./backend/go/image/tinydream
|
||||||
|
ifneq ($(UPX),)
|
||||||
|
$(UPX) backend-assets/grpc/tinydream
|
||||||
endif
|
endif
|
||||||
|
|
||||||
backend-assets/grpc/whisper: sources/whisper.cpp sources/whisper.cpp/libwhisper.a backend-assets/grpc
|
backend-assets/grpc/whisper: sources/whisper.cpp sources/whisper.cpp/libwhisper.a backend-assets/grpc
|
||||||
@@ -796,8 +888,7 @@ docker-aio-all:
|
|||||||
|
|
||||||
docker-image-intel:
|
docker-image-intel:
|
||||||
docker build \
|
docker build \
|
||||||
--progress plain \
|
--build-arg BASE_IMAGE=intel/oneapi-basekit:2024.2.0-devel-ubuntu22.04 \
|
||||||
--build-arg BASE_IMAGE=intel/oneapi-basekit:2025.1.0-0-devel-ubuntu24.04 \
|
|
||||||
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
|
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
|
||||||
--build-arg GO_TAGS="none" \
|
--build-arg GO_TAGS="none" \
|
||||||
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
|
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
|
||||||
@@ -805,7 +896,7 @@ docker-image-intel:
|
|||||||
|
|
||||||
docker-image-intel-xpu:
|
docker-image-intel-xpu:
|
||||||
docker build \
|
docker build \
|
||||||
--build-arg BASE_IMAGE=intel/oneapi-basekit:2025.1.0-0-devel-ubuntu22.04 \
|
--build-arg BASE_IMAGE=intel/oneapi-basekit:2024.2.0-devel-ubuntu22.04 \
|
||||||
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
|
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
|
||||||
--build-arg GO_TAGS="none" \
|
--build-arg GO_TAGS="none" \
|
||||||
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
|
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
|
||||||
@@ -817,7 +908,7 @@ swagger:
|
|||||||
|
|
||||||
.PHONY: gen-assets
|
.PHONY: gen-assets
|
||||||
gen-assets:
|
gen-assets:
|
||||||
$(GOCMD) run core/dependencies_manager/manager.go webui_static.yaml core/http/static/assets
|
$(GOCMD) run core/dependencies_manager/manager.go embedded/webui_static.yaml core/http/static/assets
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
docs/layouts/_default:
|
docs/layouts/_default:
|
||||||
|
|||||||
146
README.md
146
README.md
@@ -1,6 +1,7 @@
|
|||||||
<h1 align="center">
|
<h1 align="center">
|
||||||
<br>
|
<br>
|
||||||
<img height="300" src="./core/http/static/logo.png"> <br>
|
<img height="300" src="https://github.com/go-skynet/LocalAI/assets/2420543/0966aa2a-166e-4f99-a3e5-6c915fc997dd"> <br>
|
||||||
|
LocalAI
|
||||||
<br>
|
<br>
|
||||||
</h1>
|
</h1>
|
||||||
|
|
||||||
@@ -37,68 +38,15 @@
|
|||||||
</a>
|
</a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="center">
|
|
||||||
<a href="https://trendshift.io/repositories/5539" target="_blank"><img src="https://trendshift.io/api/badge/repositories/5539" alt="mudler%2FLocalAI | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
|
||||||
</p>
|
|
||||||
|
|
||||||
> :bulb: Get help - [❓FAQ](https://localai.io/faq/) [💭Discussions](https://github.com/go-skynet/LocalAI/discussions) [:speech_balloon: Discord](https://discord.gg/uJAeKSAGDy) [:book: Documentation website](https://localai.io/)
|
> :bulb: Get help - [❓FAQ](https://localai.io/faq/) [💭Discussions](https://github.com/go-skynet/LocalAI/discussions) [:speech_balloon: Discord](https://discord.gg/uJAeKSAGDy) [:book: Documentation website](https://localai.io/)
|
||||||
>
|
>
|
||||||
> [💻 Quickstart](https://localai.io/basics/getting_started/) [🖼️ Models](https://models.localai.io/) [🚀 Roadmap](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) [🥽 Demo](https://demo.localai.io) [🌍 Explorer](https://explorer.localai.io) [🛫 Examples](https://github.com/mudler/LocalAI-examples)
|
> [💻 Quickstart](https://localai.io/basics/getting_started/) [🖼️ Models](https://models.localai.io/) [🚀 Roadmap](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) [🥽 Demo](https://demo.localai.io) [🌍 Explorer](https://explorer.localai.io) [🛫 Examples](https://github.com/go-skynet/LocalAI/tree/master/examples/)
|
||||||
|
|
||||||
[](https://github.com/go-skynet/LocalAI/actions/workflows/test.yml)[](https://github.com/go-skynet/LocalAI/actions/workflows/release.yaml)[](https://github.com/go-skynet/LocalAI/actions/workflows/image.yml)[](https://github.com/go-skynet/LocalAI/actions/workflows/bump_deps.yaml)[](https://artifacthub.io/packages/search?repo=localai)
|
[](https://github.com/go-skynet/LocalAI/actions/workflows/test.yml)[](https://github.com/go-skynet/LocalAI/actions/workflows/release.yaml)[](https://github.com/go-skynet/LocalAI/actions/workflows/image.yml)[](https://github.com/go-skynet/LocalAI/actions/workflows/bump_deps.yaml)[](https://artifacthub.io/packages/search?repo=localai)
|
||||||
|
|
||||||
**LocalAI** is the free, Open Source OpenAI alternative. LocalAI act as a drop-in replacement REST API that's compatible with OpenAI (Elevenlabs, Anthropic... ) API specifications for local AI inferencing. It allows you to run LLMs, generate images, audio (and not only) locally or on-prem with consumer grade hardware, supporting multiple model families. Does not require GPU. It is created and maintained by [Ettore Di Giacinto](https://github.com/mudler).
|
**LocalAI** is the free, Open Source OpenAI alternative. LocalAI act as a drop-in replacement REST API that’s compatible with OpenAI (Elevenlabs, Anthropic... ) API specifications for local AI inferencing. It allows you to run LLMs, generate images, audio (and not only) locally or on-prem with consumer grade hardware, supporting multiple model families. Does not require GPU. It is created and maintained by [Ettore Di Giacinto](https://github.com/mudler).
|
||||||
|
|
||||||
|

|
||||||
## 📚🆕 Local Stack Family
|
|
||||||
|
|
||||||
🆕 LocalAI is now part of a comprehensive suite of AI tools designed to work together:
|
|
||||||
|
|
||||||
<table>
|
|
||||||
<tr>
|
|
||||||
<td width="50%" valign="top">
|
|
||||||
<a href="https://github.com/mudler/LocalAGI">
|
|
||||||
<img src="https://raw.githubusercontent.com/mudler/LocalAGI/refs/heads/main/webui/react-ui/public/logo_2.png" width="300" alt="LocalAGI Logo">
|
|
||||||
</a>
|
|
||||||
</td>
|
|
||||||
<td width="50%" valign="top">
|
|
||||||
<h3><a href="https://github.com/mudler/LocalAGI">LocalAGI</a></h3>
|
|
||||||
<p>A powerful Local AI agent management platform that serves as a drop-in replacement for OpenAI's Responses API, enhanced with advanced agentic capabilities.</p>
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<td width="50%" valign="top">
|
|
||||||
<a href="https://github.com/mudler/LocalRecall">
|
|
||||||
<img src="https://raw.githubusercontent.com/mudler/LocalRecall/refs/heads/main/static/localrecall_horizontal.png" width="300" alt="LocalRecall Logo">
|
|
||||||
</a>
|
|
||||||
</td>
|
|
||||||
<td width="50%" valign="top">
|
|
||||||
<h3><a href="https://github.com/mudler/LocalRecall">LocalRecall</a></h3>
|
|
||||||
<p>A REST-ful API and knowledge base management system that provides persistent memory and storage capabilities for AI agents.</p>
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
</table>
|
|
||||||
|
|
||||||
## Screenshots
|
|
||||||
|
|
||||||
|
|
||||||
| Talk Interface | Generate Audio |
|
|
||||||
| --- | --- |
|
|
||||||
|  |  |
|
|
||||||
|
|
||||||
| Models Overview | Generate Images |
|
|
||||||
| --- | --- |
|
|
||||||
|  |  |
|
|
||||||
|
|
||||||
| Chat Interface | Home |
|
|
||||||
| --- | --- |
|
|
||||||
|  |  |
|
|
||||||
|
|
||||||
| Login | Swarm |
|
|
||||||
| --- | --- |
|
|
||||||
| |  |
|
|
||||||
|
|
||||||
## 💻 Quickstart
|
|
||||||
|
|
||||||
Run the installer script:
|
Run the installer script:
|
||||||
|
|
||||||
@@ -107,65 +55,53 @@ curl https://localai.io/install.sh | sh
|
|||||||
```
|
```
|
||||||
|
|
||||||
Or run with docker:
|
Or run with docker:
|
||||||
|
|
||||||
### CPU only image:
|
|
||||||
```bash
|
|
||||||
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-cpu
|
|
||||||
```
|
|
||||||
### Nvidia GPU:
|
|
||||||
```bash
|
|
||||||
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-gpu-nvidia-cuda-12
|
|
||||||
```
|
|
||||||
### CPU and GPU image (bigger size):
|
|
||||||
```bash
|
|
||||||
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest
|
|
||||||
```
|
|
||||||
### AIO images (it will pre-download a set of models ready for use, see https://localai.io/basics/container/)
|
|
||||||
```bash
|
```bash
|
||||||
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-cpu
|
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-cpu
|
||||||
|
# Alternative images:
|
||||||
|
# - if you have an Nvidia GPU:
|
||||||
|
# docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||||
|
# - without preconfigured models
|
||||||
|
# docker run -ti --name local-ai -p 8080:8080 localai/localai:latest
|
||||||
|
# - without preconfigured models for Nvidia GPUs
|
||||||
|
# docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-gpu-nvidia-cuda-12
|
||||||
```
|
```
|
||||||
|
|
||||||
To load models:
|
[💻 Getting started](https://localai.io/basics/getting_started/index.html)
|
||||||
|
|
||||||
```bash
|
## 🔥🔥 Hot topics / Roadmap
|
||||||
# From the model gallery (see available models with `local-ai models list`, in the WebUI from the model tab, or visiting https://models.localai.io)
|
|
||||||
local-ai run llama-3.2-1b-instruct:q4_k_m
|
|
||||||
# Start LocalAI with the phi-2 model directly from huggingface
|
|
||||||
local-ai run huggingface://TheBloke/phi-2-GGUF/phi-2.Q8_0.gguf
|
|
||||||
# Install and run a model from the Ollama OCI registry
|
|
||||||
local-ai run ollama://gemma:2b
|
|
||||||
# Run a model from a configuration file
|
|
||||||
local-ai run https://gist.githubusercontent.com/.../phi-2.yaml
|
|
||||||
# Install and run a model from a standard OCI registry (e.g., Docker Hub)
|
|
||||||
local-ai run oci://localai/phi-2:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
For more information, see [💻 Getting started](https://localai.io/basics/getting_started/index.html)
|
[Roadmap](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap)
|
||||||
|
|
||||||
## 📰 Latest project news
|
|
||||||
|
|
||||||
- Apr 2025: [LocalAGI](https://github.com/mudler/LocalAGI) and [LocalRecall](https://github.com/mudler/LocalRecall) join the LocalAI family stack.
|
|
||||||
- Apr 2025: WebUI overhaul, AIO images updates
|
|
||||||
- Feb 2025: Backend cleanup, Breaking changes, new backends (kokoro, OutelTTS, faster-whisper), Nvidia L4T images
|
|
||||||
- Jan 2025: LocalAI model release: https://huggingface.co/mudler/LocalAI-functioncall-phi-4-v0.3, SANA support in diffusers: https://github.com/mudler/LocalAI/pull/4603
|
|
||||||
- Dec 2024: stablediffusion.cpp backend (ggml) added ( https://github.com/mudler/LocalAI/pull/4289 )
|
|
||||||
- Nov 2024: Bark.cpp backend added ( https://github.com/mudler/LocalAI/pull/4287 )
|
|
||||||
- Nov 2024: Voice activity detection models (**VAD**) added to the API: https://github.com/mudler/LocalAI/pull/4204
|
|
||||||
- Oct 2024: examples moved to [LocalAI-examples](https://github.com/mudler/LocalAI-examples)
|
|
||||||
- Aug 2024: 🆕 FLUX-1, [P2P Explorer](https://explorer.localai.io)
|
- Aug 2024: 🆕 FLUX-1, [P2P Explorer](https://explorer.localai.io)
|
||||||
- July 2024: 🔥🔥 🆕 P2P Dashboard, LocalAI Federated mode and AI Swarms: https://github.com/mudler/LocalAI/pull/2723. P2P Global community pools: https://github.com/mudler/LocalAI/issues/3113
|
- July 2024: 🔥🔥 🆕 P2P Dashboard, LocalAI Federated mode and AI Swarms: https://github.com/mudler/LocalAI/pull/2723
|
||||||
|
- June 2024: 🆕 You can browse now the model gallery without LocalAI! Check out https://models.localai.io
|
||||||
|
- June 2024: Support for models from OCI registries: https://github.com/mudler/LocalAI/pull/2628
|
||||||
- May 2024: 🔥🔥 Decentralized P2P llama.cpp: https://github.com/mudler/LocalAI/pull/2343 (peer2peer llama.cpp!) 👉 Docs https://localai.io/features/distribute/
|
- May 2024: 🔥🔥 Decentralized P2P llama.cpp: https://github.com/mudler/LocalAI/pull/2343 (peer2peer llama.cpp!) 👉 Docs https://localai.io/features/distribute/
|
||||||
|
- May 2024: 🔥🔥 Openvoice: https://github.com/mudler/LocalAI/pull/2334
|
||||||
|
- May 2024: 🆕 Function calls without grammars and mixed mode: https://github.com/mudler/LocalAI/pull/2328
|
||||||
- May 2024: 🔥🔥 Distributed inferencing: https://github.com/mudler/LocalAI/pull/2324
|
- May 2024: 🔥🔥 Distributed inferencing: https://github.com/mudler/LocalAI/pull/2324
|
||||||
|
- May 2024: Chat, TTS, and Image generation in the WebUI: https://github.com/mudler/LocalAI/pull/2222
|
||||||
- April 2024: Reranker API: https://github.com/mudler/LocalAI/pull/2121
|
- April 2024: Reranker API: https://github.com/mudler/LocalAI/pull/2121
|
||||||
|
|
||||||
Roadmap items: [List of issues](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap)
|
Hot topics (looking for contributors):
|
||||||
|
|
||||||
|
- 🔥🔥 Distributed, P2P Global community pools: https://github.com/mudler/LocalAI/issues/3113
|
||||||
|
- WebUI improvements: https://github.com/mudler/LocalAI/issues/2156
|
||||||
|
- Backends v2: https://github.com/mudler/LocalAI/issues/1126
|
||||||
|
- Improving UX v2: https://github.com/mudler/LocalAI/issues/1373
|
||||||
|
- Assistant API: https://github.com/mudler/LocalAI/issues/1273
|
||||||
|
- Moderation endpoint: https://github.com/mudler/LocalAI/issues/999
|
||||||
|
- Vulkan: https://github.com/mudler/LocalAI/issues/1647
|
||||||
|
- Anthropic API: https://github.com/mudler/LocalAI/issues/1808
|
||||||
|
|
||||||
|
If you want to help and contribute, issues up for grabs: https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3A%22up+for+grabs%22
|
||||||
|
|
||||||
## 🚀 [Features](https://localai.io/features/)
|
## 🚀 [Features](https://localai.io/features/)
|
||||||
|
|
||||||
- 📖 [Text generation with GPTs](https://localai.io/features/text-generation/) (`llama.cpp`, `transformers`, `vllm` ... [:book: and more](https://localai.io/model-compatibility/index.html#model-compatibility-table))
|
- 📖 [Text generation with GPTs](https://localai.io/features/text-generation/) (`llama.cpp`, `gpt4all.cpp`, ... [:book: and more](https://localai.io/model-compatibility/index.html#model-compatibility-table))
|
||||||
- 🗣 [Text to Audio](https://localai.io/features/text-to-audio/)
|
- 🗣 [Text to Audio](https://localai.io/features/text-to-audio/)
|
||||||
- 🔈 [Audio to Text](https://localai.io/features/audio-to-text/) (Audio transcription with `whisper.cpp`)
|
- 🔈 [Audio to Text](https://localai.io/features/audio-to-text/) (Audio transcription with `whisper.cpp`)
|
||||||
- 🎨 [Image generation](https://localai.io/features/image-generation)
|
- 🎨 [Image generation with stable diffusion](https://localai.io/features/image-generation)
|
||||||
- 🔥 [OpenAI-alike tools API](https://localai.io/features/openai-functions/)
|
- 🔥 [OpenAI-alike tools API](https://localai.io/features/openai-functions/)
|
||||||
- 🧠 [Embeddings generation for vector databases](https://localai.io/features/embeddings/)
|
- 🧠 [Embeddings generation for vector databases](https://localai.io/features/embeddings/)
|
||||||
- ✍️ [Constrained grammars](https://localai.io/features/constrained_grammars/)
|
- ✍️ [Constrained grammars](https://localai.io/features/constrained_grammars/)
|
||||||
@@ -173,10 +109,11 @@ Roadmap items: [List of issues](https://github.com/mudler/LocalAI/issues?q=is%3A
|
|||||||
- 🥽 [Vision API](https://localai.io/features/gpt-vision/)
|
- 🥽 [Vision API](https://localai.io/features/gpt-vision/)
|
||||||
- 📈 [Reranker API](https://localai.io/features/reranker/)
|
- 📈 [Reranker API](https://localai.io/features/reranker/)
|
||||||
- 🆕🖧 [P2P Inferencing](https://localai.io/features/distribute/)
|
- 🆕🖧 [P2P Inferencing](https://localai.io/features/distribute/)
|
||||||
- [Agentic capabilities](https://github.com/mudler/LocalAGI)
|
|
||||||
- 🔊 Voice activity detection (Silero-VAD support)
|
|
||||||
- 🌍 Integrated WebUI!
|
- 🌍 Integrated WebUI!
|
||||||
|
|
||||||
|
## 💻 Usage
|
||||||
|
|
||||||
|
Check out the [Getting started](https://localai.io/basics/getting_started/index.html) section in our documentation.
|
||||||
|
|
||||||
### 🔗 Community and integrations
|
### 🔗 Community and integrations
|
||||||
|
|
||||||
@@ -194,7 +131,6 @@ Model galleries
|
|||||||
Other:
|
Other:
|
||||||
- Helm chart https://github.com/go-skynet/helm-charts
|
- Helm chart https://github.com/go-skynet/helm-charts
|
||||||
- VSCode extension https://github.com/badgooooor/localai-vscode-plugin
|
- VSCode extension https://github.com/badgooooor/localai-vscode-plugin
|
||||||
- Langchain: https://python.langchain.com/docs/integrations/providers/localai/
|
|
||||||
- Terminal utility https://github.com/djcopley/ShellOracle
|
- Terminal utility https://github.com/djcopley/ShellOracle
|
||||||
- Local Smart assistant https://github.com/mudler/LocalAGI
|
- Local Smart assistant https://github.com/mudler/LocalAGI
|
||||||
- Home Assistant https://github.com/sammcj/homeassistant-localai / https://github.com/drndos/hass-openai-custom-conversation / https://github.com/valentinfrlch/ha-gpt4vision
|
- Home Assistant https://github.com/sammcj/homeassistant-localai / https://github.com/drndos/hass-openai-custom-conversation / https://github.com/valentinfrlch/ha-gpt4vision
|
||||||
@@ -202,9 +138,6 @@ Other:
|
|||||||
- Slack bot https://github.com/mudler/LocalAGI/tree/main/examples/slack
|
- Slack bot https://github.com/mudler/LocalAGI/tree/main/examples/slack
|
||||||
- Shell-Pilot(Interact with LLM using LocalAI models via pure shell scripts on your Linux or MacOS system) https://github.com/reid41/shell-pilot
|
- Shell-Pilot(Interact with LLM using LocalAI models via pure shell scripts on your Linux or MacOS system) https://github.com/reid41/shell-pilot
|
||||||
- Telegram bot https://github.com/mudler/LocalAI/tree/master/examples/telegram-bot
|
- Telegram bot https://github.com/mudler/LocalAI/tree/master/examples/telegram-bot
|
||||||
- Another Telegram Bot https://github.com/JackBekket/Hellper
|
|
||||||
- Auto-documentation https://github.com/JackBekket/Reflexia
|
|
||||||
- Github bot which answer on issues, with code and documentation as context https://github.com/JackBekket/GitHelper
|
|
||||||
- Github Actions: https://github.com/marketplace/actions/start-localai
|
- Github Actions: https://github.com/marketplace/actions/start-localai
|
||||||
- Examples: https://github.com/mudler/LocalAI/tree/master/examples/
|
- Examples: https://github.com/mudler/LocalAI/tree/master/examples/
|
||||||
|
|
||||||
@@ -252,7 +185,7 @@ A huge thank you to our generous sponsors who support this project covering CI e
|
|||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<a href="https://www.spectrocloud.com/" target="blank">
|
<a href="https://www.spectrocloud.com/" target="blank">
|
||||||
<img height="200" src="https://github.com/user-attachments/assets/72eab1dd-8b93-4fc0-9ade-84db49f24962">
|
<img height="200" src="https://github.com/go-skynet/LocalAI/assets/2420543/68a6f3cb-8a65-4a4d-99b5-6417a8905512">
|
||||||
</a>
|
</a>
|
||||||
<a href="https://www.premai.io/" target="blank">
|
<a href="https://www.premai.io/" target="blank">
|
||||||
<img height="200" src="https://github.com/mudler/LocalAI/assets/2420543/42e4ca83-661e-4f79-8e46-ae43689683d6"> <br>
|
<img height="200" src="https://github.com/mudler/LocalAI/assets/2420543/42e4ca83-661e-4f79-8e46-ae43689683d6"> <br>
|
||||||
@@ -279,6 +212,7 @@ LocalAI couldn't have been built without the help of great software already avai
|
|||||||
- https://github.com/antimatter15/alpaca.cpp
|
- https://github.com/antimatter15/alpaca.cpp
|
||||||
- https://github.com/EdVince/Stable-Diffusion-NCNN
|
- https://github.com/EdVince/Stable-Diffusion-NCNN
|
||||||
- https://github.com/ggerganov/whisper.cpp
|
- https://github.com/ggerganov/whisper.cpp
|
||||||
|
- https://github.com/saharNooby/rwkv.cpp
|
||||||
- https://github.com/rhasspy/piper
|
- https://github.com/rhasspy/piper
|
||||||
|
|
||||||
## 🤗 Contributors
|
## 🤗 Contributors
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
embeddings: true
|
|
||||||
name: text-embedding-ada-002
|
name: text-embedding-ada-002
|
||||||
|
backend: bert-embeddings
|
||||||
parameters:
|
parameters:
|
||||||
model: huggingface://bartowski/granite-embedding-107m-multilingual-GGUF/granite-embedding-107m-multilingual-f16.gguf
|
model: huggingface://mudler/all-MiniLM-L6-v2/ggml-model-q4_0.bin
|
||||||
|
|
||||||
usage: |
|
usage: |
|
||||||
You can test this model with curl like this:
|
You can test this model with curl like this:
|
||||||
|
|||||||
@@ -1,17 +1,56 @@
|
|||||||
name: stablediffusion
|
name: stablediffusion
|
||||||
backend: stablediffusion-ggml
|
backend: stablediffusion
|
||||||
cfg_scale: 4.5
|
|
||||||
|
|
||||||
options:
|
|
||||||
- sampler:euler
|
|
||||||
parameters:
|
parameters:
|
||||||
model: stable-diffusion-v1-5-pruned-emaonly-Q4_0.gguf
|
model: stablediffusion_assets
|
||||||
step: 25
|
|
||||||
|
license: "BSD-3"
|
||||||
|
urls:
|
||||||
|
- https://github.com/EdVince/Stable-Diffusion-NCNN
|
||||||
|
- https://github.com/EdVince/Stable-Diffusion-NCNN/blob/main/LICENSE
|
||||||
|
|
||||||
|
description: |
|
||||||
|
Stable Diffusion in NCNN with c++, supported txt2img and img2img
|
||||||
|
|
||||||
download_files:
|
download_files:
|
||||||
- filename: "stable-diffusion-v1-5-pruned-emaonly-Q4_0.gguf"
|
- filename: "stablediffusion_assets/AutoencoderKL-256-256-fp16-opt.param"
|
||||||
sha256: "b8944e9fe0b69b36ae1b5bb0185b3a7b8ef14347fe0fa9af6c64c4829022261f"
|
sha256: "18ca4b66685e21406bcf64c484b3b680b4949900415536d599cc876579c85c82"
|
||||||
uri: "huggingface://second-state/stable-diffusion-v1-5-GGUF/stable-diffusion-v1-5-pruned-emaonly-Q4_0.gguf"
|
uri: "https://raw.githubusercontent.com/EdVince/Stable-Diffusion-NCNN/main/x86/linux/assets/AutoencoderKL-256-256-fp16-opt.param"
|
||||||
|
- filename: "stablediffusion_assets/AutoencoderKL-512-512-fp16-opt.param"
|
||||||
|
sha256: "cf45f63aacf3dbbab0f59ed92a6f2c14d9a1801314631cd3abe91e3c85639a20"
|
||||||
|
uri: "https://raw.githubusercontent.com/EdVince/Stable-Diffusion-NCNN/main/x86/linux/assets/AutoencoderKL-512-512-fp16-opt.param"
|
||||||
|
- filename: "stablediffusion_assets/AutoencoderKL-base-fp16.param"
|
||||||
|
sha256: "0254a056dce61b0c27dc9ec1b78b53bcf55315c540f55f051eb841aa992701ba"
|
||||||
|
uri: "https://raw.githubusercontent.com/EdVince/Stable-Diffusion-NCNN/main/x86/linux/assets/AutoencoderKL-base-fp16.param"
|
||||||
|
- filename: "stablediffusion_assets/AutoencoderKL-encoder-512-512-fp16.bin"
|
||||||
|
sha256: "ddcb79a9951b9f91e05e087739ed69da2c1c4ae30ba4168cce350b49d617c9fa"
|
||||||
|
uri: "https://github.com/EdVince/Stable-Diffusion-NCNN/releases/download/naifu/AutoencoderKL-encoder-512-512-fp16.bin"
|
||||||
|
- filename: "stablediffusion_assets/AutoencoderKL-fp16.bin"
|
||||||
|
sha256: "f02e71f80e70252734724bbfaed5c4ddd3a8ed7e61bb2175ff5f53099f0e35dd"
|
||||||
|
uri: "https://github.com/EdVince/Stable-Diffusion-NCNN/releases/download/naifu/AutoencoderKL-fp16.bin"
|
||||||
|
- filename: "stablediffusion_assets/FrozenCLIPEmbedder-fp16.bin"
|
||||||
|
sha256: "1c9a12f4e1dd1b295a388045f7f28a2352a4d70c3dc96a542189a3dd7051fdd6"
|
||||||
|
uri: "https://github.com/EdVince/Stable-Diffusion-NCNN/releases/download/naifu/FrozenCLIPEmbedder-fp16.bin"
|
||||||
|
- filename: "stablediffusion_assets/FrozenCLIPEmbedder-fp16.param"
|
||||||
|
sha256: "471afbe678dd1fd3fe764ef9c6eccaccb0a7d7e601f27b462aa926b20eb368c9"
|
||||||
|
uri: "https://raw.githubusercontent.com/EdVince/Stable-Diffusion-NCNN/main/x86/linux/assets/FrozenCLIPEmbedder-fp16.param"
|
||||||
|
- filename: "stablediffusion_assets/log_sigmas.bin"
|
||||||
|
sha256: "a2089f8aa4c61f9c200feaec541ab3f5c94233b28deb6d5e8bcd974fa79b68ac"
|
||||||
|
uri: "https://github.com/EdVince/Stable-Diffusion-NCNN/raw/main/x86/linux/assets/log_sigmas.bin"
|
||||||
|
- filename: "stablediffusion_assets/UNetModel-256-256-MHA-fp16-opt.param"
|
||||||
|
sha256: "a58c380229f09491776df837b7aa7adffc0a87821dc4708b34535da2e36e3da1"
|
||||||
|
uri: "https://raw.githubusercontent.com/EdVince/Stable-Diffusion-NCNN/main/x86/linux/assets/UNetModel-256-256-MHA-fp16-opt.param"
|
||||||
|
- filename: "stablediffusion_assets/UNetModel-512-512-MHA-fp16-opt.param"
|
||||||
|
sha256: "f12034067062827bd7f43d1d21888d1f03905401acf6c6eea22be23c259636fa"
|
||||||
|
uri: "https://raw.githubusercontent.com/EdVince/Stable-Diffusion-NCNN/main/x86/linux/assets/UNetModel-512-512-MHA-fp16-opt.param"
|
||||||
|
- filename: "stablediffusion_assets/UNetModel-base-MHA-fp16.param"
|
||||||
|
sha256: "696f6975de49f4325b53ce32aff81861a6d6c07cd9ce3f0aae2cc405350af38d"
|
||||||
|
uri: "https://raw.githubusercontent.com/EdVince/Stable-Diffusion-NCNN/main/x86/linux/assets/UNetModel-base-MHA-fp16.param"
|
||||||
|
- filename: "stablediffusion_assets/UNetModel-MHA-fp16.bin"
|
||||||
|
sha256: "d618918d011bfc1f644c0f2a33bf84931bd53b28a98492b0a8ed6f3a818852c3"
|
||||||
|
uri: "https://github.com/EdVince/Stable-Diffusion-NCNN/releases/download/naifu/UNetModel-MHA-fp16.bin"
|
||||||
|
- filename: "stablediffusion_assets/vocab.txt"
|
||||||
|
sha256: "e30e57b6f1e47616982ef898d8922be24e535b4fa3d0110477b3a6f02ebbae7d"
|
||||||
|
uri: "https://raw.githubusercontent.com/EdVince/Stable-Diffusion-NCNN/main/x86/linux/assets/vocab.txt"
|
||||||
|
|
||||||
usage: |
|
usage: |
|
||||||
curl http://localhost:8080/v1/images/generations \
|
curl http://localhost:8080/v1/images/generations \
|
||||||
|
|||||||
@@ -1,57 +1,101 @@
|
|||||||
context_size: 8192
|
|
||||||
f16: true
|
|
||||||
function:
|
|
||||||
grammar:
|
|
||||||
no_mixed_free_string: true
|
|
||||||
schema_type: llama3.1 # or JSON is supported too (json)
|
|
||||||
response_regex:
|
|
||||||
- <function=(?P<name>\w+)>(?P<arguments>.*)</function>
|
|
||||||
mmap: true
|
|
||||||
name: gpt-4
|
name: gpt-4
|
||||||
|
mmap: true
|
||||||
parameters:
|
parameters:
|
||||||
model: Hermes-3-Llama-3.2-3B-Q4_K_M.gguf
|
model: huggingface://NousResearch/Hermes-2-Pro-Llama-3-8B-GGUF/Hermes-2-Pro-Llama-3-8B-Q4_K_M.gguf
|
||||||
|
context_size: 8192
|
||||||
|
|
||||||
stopwords:
|
stopwords:
|
||||||
- <|im_end|>
|
- "<|im_end|>"
|
||||||
- <dummy32000>
|
- "<dummy32000>"
|
||||||
- <|eot_id|>
|
- "</tool_call>"
|
||||||
- <|end_of_text|>
|
- "<|eot_id|>"
|
||||||
|
- "<|end_of_text|>"
|
||||||
|
|
||||||
|
function:
|
||||||
|
# disable injecting the "answer" tool
|
||||||
|
disable_no_action: true
|
||||||
|
|
||||||
|
grammar:
|
||||||
|
# This allows the grammar to also return messages
|
||||||
|
mixed_mode: true
|
||||||
|
# Suffix to add to the grammar
|
||||||
|
#prefix: '<tool_call>\n'
|
||||||
|
# Force parallel calls in the grammar
|
||||||
|
# parallel_calls: true
|
||||||
|
|
||||||
|
return_name_in_function_response: true
|
||||||
|
# Without grammar uncomment the lines below
|
||||||
|
# Warning: this is relying only on the capability of the
|
||||||
|
# LLM model to generate the correct function call.
|
||||||
|
json_regex_match:
|
||||||
|
- "(?s)<tool_call>(.*?)</tool_call>"
|
||||||
|
- "(?s)<tool_call>(.*?)"
|
||||||
|
replace_llm_results:
|
||||||
|
# Drop the scratchpad content from responses
|
||||||
|
- key: "(?s)<scratchpad>.*</scratchpad>"
|
||||||
|
value: ""
|
||||||
|
replace_function_results:
|
||||||
|
# Replace everything that is not JSON array or object
|
||||||
|
#
|
||||||
|
- key: '(?s)^[^{\[]*'
|
||||||
|
value: ""
|
||||||
|
- key: '(?s)[^}\]]*$'
|
||||||
|
value: ""
|
||||||
|
- key: "'([^']*?)'"
|
||||||
|
value: "_DQUOTE_${1}_DQUOTE_"
|
||||||
|
- key: '\\"'
|
||||||
|
value: "__TEMP_QUOTE__"
|
||||||
|
- key: "\'"
|
||||||
|
value: "'"
|
||||||
|
- key: "_DQUOTE_"
|
||||||
|
value: '"'
|
||||||
|
- key: "__TEMP_QUOTE__"
|
||||||
|
value: '"'
|
||||||
|
# Drop the scratchpad content from responses
|
||||||
|
- key: "(?s)<scratchpad>.*</scratchpad>"
|
||||||
|
value: ""
|
||||||
|
|
||||||
template:
|
template:
|
||||||
chat: |
|
chat: |
|
||||||
<|begin_of_text|><|start_header_id|>system<|end_header_id|>
|
{{.Input -}}
|
||||||
You are a helpful assistant<|eot_id|><|start_header_id|>user<|end_header_id|>
|
<|im_start|>assistant
|
||||||
{{.Input }}
|
|
||||||
<|start_header_id|>assistant<|end_header_id|>
|
|
||||||
chat_message: |
|
chat_message: |
|
||||||
<|start_header_id|>{{if eq .RoleName "assistant"}}assistant{{else if eq .RoleName "system"}}system{{else if eq .RoleName "tool"}}tool{{else if eq .RoleName "user"}}user{{end}}<|end_header_id|>
|
<|im_start|>{{if eq .RoleName "assistant"}}assistant{{else if eq .RoleName "system"}}system{{else if eq .RoleName "tool"}}tool{{else if eq .RoleName "user"}}user{{end}}
|
||||||
{{ if .FunctionCall -}}
|
{{- if .FunctionCall }}
|
||||||
{{ else if eq .RoleName "tool" -}}
|
<tool_call>
|
||||||
The Function was executed and the response was:
|
{{- else if eq .RoleName "tool" }}
|
||||||
{{ end -}}
|
<tool_response>
|
||||||
{{ if .Content -}}
|
{{- end }}
|
||||||
{{.Content -}}
|
{{- if .Content}}
|
||||||
{{ else if .FunctionCall -}}
|
{{.Content }}
|
||||||
{{ range .FunctionCall }}
|
{{- end }}
|
||||||
[{{.FunctionCall.Name}}({{.FunctionCall.Arguments}})]
|
{{- if .FunctionCall}}
|
||||||
{{ end }}
|
{{toJson .FunctionCall}}
|
||||||
{{ end -}}
|
{{- end }}
|
||||||
<|eot_id|>
|
{{- if .FunctionCall }}
|
||||||
|
</tool_call>
|
||||||
|
{{- else if eq .RoleName "tool" }}
|
||||||
|
</tool_response>
|
||||||
|
{{- end }}<|im_end|>
|
||||||
completion: |
|
completion: |
|
||||||
{{.Input}}
|
{{.Input}}
|
||||||
function: |
|
function: |-
|
||||||
<|start_header_id|>system<|end_header_id|>
|
<|im_start|>system
|
||||||
You are an expert in composing functions. You are given a question and a set of possible functions.
|
You are a function calling AI model.
|
||||||
Based on the question, you will need to make one or more function/tool calls to achieve the purpose.
|
Here are the available tools:
|
||||||
If none of the functions can be used, point it out. If the given question lacks the parameters required by the function, also point it out. You should only return the function call in tools call sections.
|
<tools>
|
||||||
If you decide to invoke any of the function(s), you MUST put it in the format as follows:
|
{{range .Functions}}
|
||||||
[func_name1(params_name1=params_value1,params_name2=params_value2,...),func_name2(params_name1=params_value1,params_name2=params_value2,...)]
|
{'type': 'function', 'function': {'name': '{{.Name}}', 'description': '{{.Description}}', 'parameters': {{toJson .Parameters}} }}
|
||||||
You SHOULD NOT include any other text in the response.
|
{{end}}
|
||||||
Here is a list of functions in JSON format that you can invoke.
|
</tools>
|
||||||
{{toJson .Functions}}
|
You should call the tools provided to you sequentially
|
||||||
<|eot_id|><|start_header_id|>user<|end_header_id|>
|
Please use <scratchpad> XML tags to record your reasoning and planning before you call the functions as follows:
|
||||||
{{.Input}}
|
<scratchpad>
|
||||||
<|eot_id|><|start_header_id|>assistant<|end_header_id|>
|
{step-by-step reasoning and plan in bullet points}
|
||||||
|
</scratchpad>
|
||||||
download_files:
|
For each function call return a json object with function name and arguments within <tool_call> XML tags as follows:
|
||||||
- filename: Hermes-3-Llama-3.2-3B-Q4_K_M.gguf
|
<tool_call>
|
||||||
sha256: 2e220a14ba4328fee38cf36c2c068261560f999fadb5725ce5c6d977cb5126b5
|
{"arguments": <args-dict>, "name": <function-name>}
|
||||||
uri: huggingface://bartowski/Hermes-3-Llama-3.2-3B-GGUF/Hermes-3-Llama-3.2-3B-Q4_K_M.gguf
|
</tool_call><|im_end|>
|
||||||
|
{{.Input -}}
|
||||||
|
<|im_start|>assistant
|
||||||
|
|||||||
@@ -1,8 +0,0 @@
|
|||||||
backend: silero-vad
|
|
||||||
name: silero-vad
|
|
||||||
parameters:
|
|
||||||
model: silero-vad.onnx
|
|
||||||
download_files:
|
|
||||||
- filename: silero-vad.onnx
|
|
||||||
uri: https://huggingface.co/onnx-community/silero-vad/resolve/main/onnx/model.onnx
|
|
||||||
sha256: a4a068cd6cf1ea8355b84327595838ca748ec29a25bc91fc82e6c299ccdc5808
|
|
||||||
@@ -1,49 +1,31 @@
|
|||||||
|
backend: llama-cpp
|
||||||
context_size: 4096
|
context_size: 4096
|
||||||
f16: true
|
f16: true
|
||||||
mmap: true
|
mmap: true
|
||||||
mmproj: minicpm-v-2_6-mmproj-f16.gguf
|
name: gpt-4-vision-preview
|
||||||
name: gpt-4o
|
|
||||||
|
roles:
|
||||||
|
user: "USER:"
|
||||||
|
assistant: "ASSISTANT:"
|
||||||
|
system: "SYSTEM:"
|
||||||
|
|
||||||
|
mmproj: bakllava-mmproj.gguf
|
||||||
parameters:
|
parameters:
|
||||||
model: minicpm-v-2_6-Q4_K_M.gguf
|
model: bakllava.gguf
|
||||||
stopwords:
|
|
||||||
- <|im_end|>
|
|
||||||
- <dummy32000>
|
|
||||||
- </s>
|
|
||||||
- <|endoftext|>
|
|
||||||
template:
|
template:
|
||||||
chat: |
|
chat: |
|
||||||
{{.Input -}}
|
A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.
|
||||||
<|im_start|>assistant
|
|
||||||
chat_message: |
|
|
||||||
<|im_start|>{{ .RoleName }}
|
|
||||||
{{ if .FunctionCall -}}
|
|
||||||
Function call:
|
|
||||||
{{ else if eq .RoleName "tool" -}}
|
|
||||||
Function response:
|
|
||||||
{{ end -}}
|
|
||||||
{{ if .Content -}}
|
|
||||||
{{.Content }}
|
|
||||||
{{ end -}}
|
|
||||||
{{ if .FunctionCall -}}
|
|
||||||
{{toJson .FunctionCall}}
|
|
||||||
{{ end -}}<|im_end|>
|
|
||||||
completion: |
|
|
||||||
{{.Input}}
|
{{.Input}}
|
||||||
function: |
|
ASSISTANT:
|
||||||
<|im_start|>system
|
|
||||||
You are a function calling AI model. You are provided with functions to execute. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools:
|
|
||||||
{{range .Functions}}
|
|
||||||
{'type': 'function', 'function': {'name': '{{.Name}}', 'description': '{{.Description}}', 'parameters': {{toJson .Parameters}} }}
|
|
||||||
{{end}}
|
|
||||||
For each function call return a json object with function name and arguments
|
|
||||||
<|im_end|>
|
|
||||||
{{.Input -}}
|
|
||||||
<|im_start|>assistant
|
|
||||||
|
|
||||||
download_files:
|
download_files:
|
||||||
- filename: minicpm-v-2_6-Q4_K_M.gguf
|
- filename: bakllava.gguf
|
||||||
sha256: 3a4078d53b46f22989adbf998ce5a3fd090b6541f112d7e936eb4204a04100b1
|
uri: huggingface://mys/ggml_bakllava-1/ggml-model-q4_k.gguf
|
||||||
uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/ggml-model-Q4_K_M.gguf
|
- filename: bakllava-mmproj.gguf
|
||||||
- filename: minicpm-v-2_6-mmproj-f16.gguf
|
uri: huggingface://mys/ggml_bakllava-1/mmproj-model-f16.gguf
|
||||||
uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/mmproj-model-f16.gguf
|
|
||||||
sha256: 4485f68a0f1aa404c391e788ea88ea653c100d8e98fe572698f701e5809711fd
|
usage: |
|
||||||
|
curl http://localhost:8080/v1/chat/completions -H "Content-Type: application/json" -d '{
|
||||||
|
"model": "gpt-4-vision-preview",
|
||||||
|
"messages": [{"role": "user", "content": [{"type":"text", "text": "What is in the image?"}, {"type": "image_url", "image_url": {"url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" }}], "temperature": 0.9}]}'
|
||||||
|
|||||||
@@ -129,7 +129,7 @@ detect_gpu
|
|||||||
detect_gpu_size
|
detect_gpu_size
|
||||||
|
|
||||||
PROFILE="${PROFILE:-$GPU_SIZE}" # default to cpu
|
PROFILE="${PROFILE:-$GPU_SIZE}" # default to cpu
|
||||||
export MODELS="${MODELS:-/aio/${PROFILE}/embeddings.yaml,/aio/${PROFILE}/rerank.yaml,/aio/${PROFILE}/text-to-speech.yaml,/aio/${PROFILE}/image-gen.yaml,/aio/${PROFILE}/text-to-text.yaml,/aio/${PROFILE}/speech-to-text.yaml,/aio/${PROFILE}/vad.yaml,/aio/${PROFILE}/vision.yaml}"
|
export MODELS="${MODELS:-/aio/${PROFILE}/embeddings.yaml,/aio/${PROFILE}/rerank.yaml,/aio/${PROFILE}/text-to-speech.yaml,/aio/${PROFILE}/image-gen.yaml,/aio/${PROFILE}/text-to-text.yaml,/aio/${PROFILE}/speech-to-text.yaml,/aio/${PROFILE}/vision.yaml}"
|
||||||
|
|
||||||
check_vars
|
check_vars
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
embeddings: true
|
|
||||||
name: text-embedding-ada-002
|
name: text-embedding-ada-002
|
||||||
|
backend: sentencetransformers
|
||||||
parameters:
|
parameters:
|
||||||
model: huggingface://bartowski/granite-embedding-107m-multilingual-GGUF/granite-embedding-107m-multilingual-f16.gguf
|
model: all-MiniLM-L6-v2
|
||||||
|
|
||||||
usage: |
|
usage: |
|
||||||
You can test this model with curl like this:
|
You can test this model with curl like this:
|
||||||
|
|||||||
@@ -1,53 +1,101 @@
|
|||||||
context_size: 4096
|
|
||||||
f16: true
|
|
||||||
function:
|
|
||||||
capture_llm_results:
|
|
||||||
- (?s)<Thought>(.*?)</Thought>
|
|
||||||
grammar:
|
|
||||||
properties_order: name,arguments
|
|
||||||
json_regex_match:
|
|
||||||
- (?s)<Output>(.*?)</Output>
|
|
||||||
replace_llm_results:
|
|
||||||
- key: (?s)<Thought>(.*?)</Thought>
|
|
||||||
value: ""
|
|
||||||
mmap: true
|
|
||||||
name: gpt-4
|
name: gpt-4
|
||||||
|
mmap: true
|
||||||
parameters:
|
parameters:
|
||||||
model: localai-functioncall-qwen2.5-7b-v0.5-q4_k_m.gguf
|
model: huggingface://NousResearch/Hermes-2-Pro-Llama-3-8B-GGUF/Hermes-2-Pro-Llama-3-8B-Q4_K_M.gguf
|
||||||
|
context_size: 8192
|
||||||
|
|
||||||
stopwords:
|
stopwords:
|
||||||
- <|im_end|>
|
- "<|im_end|>"
|
||||||
- <dummy32000>
|
- "<dummy32000>"
|
||||||
- </s>
|
- "</tool_call>"
|
||||||
|
- "<|eot_id|>"
|
||||||
|
- "<|end_of_text|>"
|
||||||
|
|
||||||
|
function:
|
||||||
|
# disable injecting the "answer" tool
|
||||||
|
disable_no_action: true
|
||||||
|
|
||||||
|
grammar:
|
||||||
|
# This allows the grammar to also return messages
|
||||||
|
mixed_mode: true
|
||||||
|
# Suffix to add to the grammar
|
||||||
|
#prefix: '<tool_call>\n'
|
||||||
|
# Force parallel calls in the grammar
|
||||||
|
# parallel_calls: true
|
||||||
|
|
||||||
|
return_name_in_function_response: true
|
||||||
|
# Without grammar uncomment the lines below
|
||||||
|
# Warning: this is relying only on the capability of the
|
||||||
|
# LLM model to generate the correct function call.
|
||||||
|
json_regex_match:
|
||||||
|
- "(?s)<tool_call>(.*?)</tool_call>"
|
||||||
|
- "(?s)<tool_call>(.*?)"
|
||||||
|
replace_llm_results:
|
||||||
|
# Drop the scratchpad content from responses
|
||||||
|
- key: "(?s)<scratchpad>.*</scratchpad>"
|
||||||
|
value: ""
|
||||||
|
replace_function_results:
|
||||||
|
# Replace everything that is not JSON array or object
|
||||||
|
#
|
||||||
|
- key: '(?s)^[^{\[]*'
|
||||||
|
value: ""
|
||||||
|
- key: '(?s)[^}\]]*$'
|
||||||
|
value: ""
|
||||||
|
- key: "'([^']*?)'"
|
||||||
|
value: "_DQUOTE_${1}_DQUOTE_"
|
||||||
|
- key: '\\"'
|
||||||
|
value: "__TEMP_QUOTE__"
|
||||||
|
- key: "\'"
|
||||||
|
value: "'"
|
||||||
|
- key: "_DQUOTE_"
|
||||||
|
value: '"'
|
||||||
|
- key: "__TEMP_QUOTE__"
|
||||||
|
value: '"'
|
||||||
|
# Drop the scratchpad content from responses
|
||||||
|
- key: "(?s)<scratchpad>.*</scratchpad>"
|
||||||
|
value: ""
|
||||||
|
|
||||||
template:
|
template:
|
||||||
chat: |
|
chat: |
|
||||||
{{.Input -}}
|
{{.Input -}}
|
||||||
<|im_start|>assistant
|
<|im_start|>assistant
|
||||||
chat_message: |
|
chat_message: |
|
||||||
<|im_start|>{{ .RoleName }}
|
<|im_start|>{{if eq .RoleName "assistant"}}assistant{{else if eq .RoleName "system"}}system{{else if eq .RoleName "tool"}}tool{{else if eq .RoleName "user"}}user{{end}}
|
||||||
{{ if .FunctionCall -}}
|
{{- if .FunctionCall }}
|
||||||
Function call:
|
<tool_call>
|
||||||
{{ else if eq .RoleName "tool" -}}
|
{{- else if eq .RoleName "tool" }}
|
||||||
Function response:
|
<tool_response>
|
||||||
{{ end -}}
|
{{- end }}
|
||||||
{{ if .Content -}}
|
{{- if .Content}}
|
||||||
{{.Content }}
|
{{.Content }}
|
||||||
{{ end -}}
|
{{- end }}
|
||||||
{{ if .FunctionCall -}}
|
{{- if .FunctionCall}}
|
||||||
{{toJson .FunctionCall}}
|
{{toJson .FunctionCall}}
|
||||||
{{ end -}}<|im_end|>
|
{{- end }}
|
||||||
|
{{- if .FunctionCall }}
|
||||||
|
</tool_call>
|
||||||
|
{{- else if eq .RoleName "tool" }}
|
||||||
|
</tool_response>
|
||||||
|
{{- end }}<|im_end|>
|
||||||
completion: |
|
completion: |
|
||||||
{{.Input}}
|
{{.Input}}
|
||||||
function: |
|
function: |-
|
||||||
<|im_start|>system
|
<|im_start|>system
|
||||||
You are an AI assistant that executes function calls, and these are the tools at your disposal:
|
You are a function calling AI model.
|
||||||
|
Here are the available tools:
|
||||||
|
<tools>
|
||||||
{{range .Functions}}
|
{{range .Functions}}
|
||||||
{'type': 'function', 'function': {'name': '{{.Name}}', 'description': '{{.Description}}', 'parameters': {{toJson .Parameters}} }}
|
{'type': 'function', 'function': {'name': '{{.Name}}', 'description': '{{.Description}}', 'parameters': {{toJson .Parameters}} }}
|
||||||
{{end}}
|
{{end}}
|
||||||
<|im_end|>
|
</tools>
|
||||||
|
You should call the tools provided to you sequentially
|
||||||
|
Please use <scratchpad> XML tags to record your reasoning and planning before you call the functions as follows:
|
||||||
|
<scratchpad>
|
||||||
|
{step-by-step reasoning and plan in bullet points}
|
||||||
|
</scratchpad>
|
||||||
|
For each function call return a json object with function name and arguments within <tool_call> XML tags as follows:
|
||||||
|
<tool_call>
|
||||||
|
{"arguments": <args-dict>, "name": <function-name>}
|
||||||
|
</tool_call><|im_end|>
|
||||||
{{.Input -}}
|
{{.Input -}}
|
||||||
<|im_start|>assistant
|
<|im_start|>assistant
|
||||||
|
|
||||||
download_files:
|
|
||||||
- filename: localai-functioncall-phi-4-v0.3-q4_k_m.gguf
|
|
||||||
sha256: 23fee048ded2a6e2e1a7b6bbefa6cbf83068f194caa9552aecbaa00fec8a16d5
|
|
||||||
uri: huggingface://mudler/LocalAI-functioncall-phi-4-v0.3-Q4_K_M-GGUF/localai-functioncall-phi-4-v0.3-q4_k_m.gguf
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
backend: silero-vad
|
|
||||||
name: silero-vad
|
|
||||||
parameters:
|
|
||||||
model: silero-vad.onnx
|
|
||||||
download_files:
|
|
||||||
- filename: silero-vad.onnx
|
|
||||||
uri: https://huggingface.co/onnx-community/silero-vad/resolve/main/onnx/model.onnx
|
|
||||||
sha256: a4a068cd6cf1ea8355b84327595838ca748ec29a25bc91fc82e6c299ccdc5808
|
|
||||||
@@ -1,49 +1,35 @@
|
|||||||
|
backend: llama-cpp
|
||||||
context_size: 4096
|
context_size: 4096
|
||||||
f16: true
|
f16: true
|
||||||
mmap: true
|
mmap: true
|
||||||
mmproj: minicpm-v-2_6-mmproj-f16.gguf
|
name: gpt-4-vision-preview
|
||||||
name: gpt-4o
|
|
||||||
|
roles:
|
||||||
|
user: "USER:"
|
||||||
|
assistant: "ASSISTANT:"
|
||||||
|
system: "SYSTEM:"
|
||||||
|
|
||||||
|
mmproj: llava-v1.6-7b-mmproj-f16.gguf
|
||||||
parameters:
|
parameters:
|
||||||
model: minicpm-v-2_6-Q4_K_M.gguf
|
model: llava-v1.6-mistral-7b.Q5_K_M.gguf
|
||||||
stopwords:
|
temperature: 0.2
|
||||||
- <|im_end|>
|
top_k: 40
|
||||||
- <dummy32000>
|
top_p: 0.95
|
||||||
- </s>
|
seed: -1
|
||||||
- <|endoftext|>
|
|
||||||
template:
|
template:
|
||||||
chat: |
|
chat: |
|
||||||
{{.Input -}}
|
A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.
|
||||||
<|im_start|>assistant
|
|
||||||
chat_message: |
|
|
||||||
<|im_start|>{{ .RoleName }}
|
|
||||||
{{ if .FunctionCall -}}
|
|
||||||
Function call:
|
|
||||||
{{ else if eq .RoleName "tool" -}}
|
|
||||||
Function response:
|
|
||||||
{{ end -}}
|
|
||||||
{{ if .Content -}}
|
|
||||||
{{.Content }}
|
|
||||||
{{ end -}}
|
|
||||||
{{ if .FunctionCall -}}
|
|
||||||
{{toJson .FunctionCall}}
|
|
||||||
{{ end -}}<|im_end|>
|
|
||||||
completion: |
|
|
||||||
{{.Input}}
|
{{.Input}}
|
||||||
function: |
|
ASSISTANT:
|
||||||
<|im_start|>system
|
|
||||||
You are a function calling AI model. You are provided with functions to execute. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools:
|
|
||||||
{{range .Functions}}
|
|
||||||
{'type': 'function', 'function': {'name': '{{.Name}}', 'description': '{{.Description}}', 'parameters': {{toJson .Parameters}} }}
|
|
||||||
{{end}}
|
|
||||||
For each function call return a json object with function name and arguments
|
|
||||||
<|im_end|>
|
|
||||||
{{.Input -}}
|
|
||||||
<|im_start|>assistant
|
|
||||||
|
|
||||||
download_files:
|
download_files:
|
||||||
- filename: minicpm-v-2_6-Q4_K_M.gguf
|
- filename: llava-v1.6-mistral-7b.Q5_K_M.gguf
|
||||||
sha256: 3a4078d53b46f22989adbf998ce5a3fd090b6541f112d7e936eb4204a04100b1
|
uri: huggingface://cjpais/llava-1.6-mistral-7b-gguf/llava-v1.6-mistral-7b.Q5_K_M.gguf
|
||||||
uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/ggml-model-Q4_K_M.gguf
|
- filename: llava-v1.6-7b-mmproj-f16.gguf
|
||||||
- filename: minicpm-v-2_6-mmproj-f16.gguf
|
uri: huggingface://cjpais/llava-1.6-mistral-7b-gguf/mmproj-model-f16.gguf
|
||||||
uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/mmproj-model-f16.gguf
|
|
||||||
sha256: 4485f68a0f1aa404c391e788ea88ea653c100d8e98fe572698f701e5809711fd
|
usage: |
|
||||||
|
curl http://localhost:8080/v1/chat/completions -H "Content-Type: application/json" -d '{
|
||||||
|
"model": "gpt-4-vision-preview",
|
||||||
|
"messages": [{"role": "user", "content": [{"type":"text", "text": "What is in the image?"}, {"type": "image_url", "image_url": {"url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" }}], "temperature": 0.9}]}'
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
embeddings: true
|
|
||||||
name: text-embedding-ada-002
|
name: text-embedding-ada-002
|
||||||
|
backend: sentencetransformers
|
||||||
parameters:
|
parameters:
|
||||||
model: huggingface://bartowski/granite-embedding-107m-multilingual-GGUF/granite-embedding-107m-multilingual-f16.gguf
|
model: all-MiniLM-L6-v2
|
||||||
|
|
||||||
usage: |
|
usage: |
|
||||||
You can test this model with curl like this:
|
You can test this model with curl like this:
|
||||||
|
|||||||
@@ -1,53 +1,103 @@
|
|||||||
context_size: 4096
|
|
||||||
f16: true
|
|
||||||
function:
|
|
||||||
capture_llm_results:
|
|
||||||
- (?s)<Thought>(.*?)</Thought>
|
|
||||||
grammar:
|
|
||||||
properties_order: name,arguments
|
|
||||||
json_regex_match:
|
|
||||||
- (?s)<Output>(.*?)</Output>
|
|
||||||
replace_llm_results:
|
|
||||||
- key: (?s)<Thought>(.*?)</Thought>
|
|
||||||
value: ""
|
|
||||||
mmap: true
|
|
||||||
name: gpt-4
|
name: gpt-4
|
||||||
|
mmap: false
|
||||||
|
context_size: 8192
|
||||||
|
|
||||||
|
f16: false
|
||||||
parameters:
|
parameters:
|
||||||
model: localai-functioncall-qwen2.5-7b-v0.5-q4_k_m.gguf
|
model: huggingface://NousResearch/Hermes-2-Pro-Llama-3-8B-GGUF/Hermes-2-Pro-Llama-3-8B-Q4_K_M.gguf
|
||||||
|
|
||||||
stopwords:
|
stopwords:
|
||||||
- <|im_end|>
|
- "<|im_end|>"
|
||||||
- <dummy32000>
|
- "<dummy32000>"
|
||||||
- </s>
|
- "</tool_call>"
|
||||||
|
- "<|eot_id|>"
|
||||||
|
- "<|end_of_text|>"
|
||||||
|
|
||||||
|
function:
|
||||||
|
# disable injecting the "answer" tool
|
||||||
|
disable_no_action: true
|
||||||
|
|
||||||
|
grammar:
|
||||||
|
# This allows the grammar to also return messages
|
||||||
|
mixed_mode: true
|
||||||
|
# Suffix to add to the grammar
|
||||||
|
#prefix: '<tool_call>\n'
|
||||||
|
# Force parallel calls in the grammar
|
||||||
|
# parallel_calls: true
|
||||||
|
|
||||||
|
return_name_in_function_response: true
|
||||||
|
# Without grammar uncomment the lines below
|
||||||
|
# Warning: this is relying only on the capability of the
|
||||||
|
# LLM model to generate the correct function call.
|
||||||
|
json_regex_match:
|
||||||
|
- "(?s)<tool_call>(.*?)</tool_call>"
|
||||||
|
- "(?s)<tool_call>(.*?)"
|
||||||
|
replace_llm_results:
|
||||||
|
# Drop the scratchpad content from responses
|
||||||
|
- key: "(?s)<scratchpad>.*</scratchpad>"
|
||||||
|
value: ""
|
||||||
|
replace_function_results:
|
||||||
|
# Replace everything that is not JSON array or object
|
||||||
|
#
|
||||||
|
- key: '(?s)^[^{\[]*'
|
||||||
|
value: ""
|
||||||
|
- key: '(?s)[^}\]]*$'
|
||||||
|
value: ""
|
||||||
|
- key: "'([^']*?)'"
|
||||||
|
value: "_DQUOTE_${1}_DQUOTE_"
|
||||||
|
- key: '\\"'
|
||||||
|
value: "__TEMP_QUOTE__"
|
||||||
|
- key: "\'"
|
||||||
|
value: "'"
|
||||||
|
- key: "_DQUOTE_"
|
||||||
|
value: '"'
|
||||||
|
- key: "__TEMP_QUOTE__"
|
||||||
|
value: '"'
|
||||||
|
# Drop the scratchpad content from responses
|
||||||
|
- key: "(?s)<scratchpad>.*</scratchpad>"
|
||||||
|
value: ""
|
||||||
|
|
||||||
template:
|
template:
|
||||||
chat: |
|
chat: |
|
||||||
{{.Input -}}
|
{{.Input -}}
|
||||||
<|im_start|>assistant
|
<|im_start|>assistant
|
||||||
chat_message: |
|
chat_message: |
|
||||||
<|im_start|>{{ .RoleName }}
|
<|im_start|>{{if eq .RoleName "assistant"}}assistant{{else if eq .RoleName "system"}}system{{else if eq .RoleName "tool"}}tool{{else if eq .RoleName "user"}}user{{end}}
|
||||||
{{ if .FunctionCall -}}
|
{{- if .FunctionCall }}
|
||||||
Function call:
|
<tool_call>
|
||||||
{{ else if eq .RoleName "tool" -}}
|
{{- else if eq .RoleName "tool" }}
|
||||||
Function response:
|
<tool_response>
|
||||||
{{ end -}}
|
{{- end }}
|
||||||
{{ if .Content -}}
|
{{- if .Content}}
|
||||||
{{.Content }}
|
{{.Content }}
|
||||||
{{ end -}}
|
{{- end }}
|
||||||
{{ if .FunctionCall -}}
|
{{- if .FunctionCall}}
|
||||||
{{toJson .FunctionCall}}
|
{{toJson .FunctionCall}}
|
||||||
{{ end -}}<|im_end|>
|
{{- end }}
|
||||||
|
{{- if .FunctionCall }}
|
||||||
|
</tool_call>
|
||||||
|
{{- else if eq .RoleName "tool" }}
|
||||||
|
</tool_response>
|
||||||
|
{{- end }}<|im_end|>
|
||||||
completion: |
|
completion: |
|
||||||
{{.Input}}
|
{{.Input}}
|
||||||
function: |
|
function: |-
|
||||||
<|im_start|>system
|
<|im_start|>system
|
||||||
You are an AI assistant that executes function calls, and these are the tools at your disposal:
|
You are a function calling AI model.
|
||||||
|
Here are the available tools:
|
||||||
|
<tools>
|
||||||
{{range .Functions}}
|
{{range .Functions}}
|
||||||
{'type': 'function', 'function': {'name': '{{.Name}}', 'description': '{{.Description}}', 'parameters': {{toJson .Parameters}} }}
|
{'type': 'function', 'function': {'name': '{{.Name}}', 'description': '{{.Description}}', 'parameters': {{toJson .Parameters}} }}
|
||||||
{{end}}
|
{{end}}
|
||||||
<|im_end|>
|
</tools>
|
||||||
|
You should call the tools provided to you sequentially
|
||||||
|
Please use <scratchpad> XML tags to record your reasoning and planning before you call the functions as follows:
|
||||||
|
<scratchpad>
|
||||||
|
{step-by-step reasoning and plan in bullet points}
|
||||||
|
</scratchpad>
|
||||||
|
For each function call return a json object with function name and arguments within <tool_call> XML tags as follows:
|
||||||
|
<tool_call>
|
||||||
|
{"arguments": <args-dict>, "name": <function-name>}
|
||||||
|
</tool_call><|im_end|>
|
||||||
{{.Input -}}
|
{{.Input -}}
|
||||||
<|im_start|>assistant
|
<|im_start|>assistant
|
||||||
|
|
||||||
download_files:
|
|
||||||
- filename: localai-functioncall-phi-4-v0.3-q4_k_m.gguf
|
|
||||||
sha256: 23fee048ded2a6e2e1a7b6bbefa6cbf83068f194caa9552aecbaa00fec8a16d5
|
|
||||||
uri: huggingface://mudler/LocalAI-functioncall-phi-4-v0.3-Q4_K_M-GGUF/localai-functioncall-phi-4-v0.3-q4_k_m.gguf
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
backend: silero-vad
|
|
||||||
name: silero-vad
|
|
||||||
parameters:
|
|
||||||
model: silero-vad.onnx
|
|
||||||
download_files:
|
|
||||||
- filename: silero-vad.onnx
|
|
||||||
uri: https://huggingface.co/onnx-community/silero-vad/resolve/main/onnx/model.onnx
|
|
||||||
sha256: a4a068cd6cf1ea8355b84327595838ca748ec29a25bc91fc82e6c299ccdc5808
|
|
||||||
@@ -1,50 +1,35 @@
|
|||||||
|
backend: llama-cpp
|
||||||
context_size: 4096
|
context_size: 4096
|
||||||
f16: true
|
mmap: false
|
||||||
mmap: true
|
f16: false
|
||||||
mmproj: minicpm-v-2_6-mmproj-f16.gguf
|
name: gpt-4-vision-preview
|
||||||
name: gpt-4o
|
|
||||||
|
roles:
|
||||||
|
user: "USER:"
|
||||||
|
assistant: "ASSISTANT:"
|
||||||
|
system: "SYSTEM:"
|
||||||
|
|
||||||
|
mmproj: llava-v1.6-7b-mmproj-f16.gguf
|
||||||
parameters:
|
parameters:
|
||||||
model: minicpm-v-2_6-Q4_K_M.gguf
|
model: llava-v1.6-mistral-7b.Q5_K_M.gguf
|
||||||
stopwords:
|
temperature: 0.2
|
||||||
- <|im_end|>
|
top_k: 40
|
||||||
- <dummy32000>
|
top_p: 0.95
|
||||||
- </s>
|
seed: -1
|
||||||
- <|endoftext|>
|
|
||||||
template:
|
template:
|
||||||
chat: |
|
chat: |
|
||||||
{{.Input -}}
|
A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.
|
||||||
<|im_start|>assistant
|
|
||||||
chat_message: |
|
|
||||||
<|im_start|>{{ .RoleName }}
|
|
||||||
{{ if .FunctionCall -}}
|
|
||||||
Function call:
|
|
||||||
{{ else if eq .RoleName "tool" -}}
|
|
||||||
Function response:
|
|
||||||
{{ end -}}
|
|
||||||
{{ if .Content -}}
|
|
||||||
{{.Content }}
|
|
||||||
{{ end -}}
|
|
||||||
{{ if .FunctionCall -}}
|
|
||||||
{{toJson .FunctionCall}}
|
|
||||||
{{ end -}}<|im_end|>
|
|
||||||
completion: |
|
|
||||||
{{.Input}}
|
{{.Input}}
|
||||||
function: |
|
ASSISTANT:
|
||||||
<|im_start|>system
|
|
||||||
You are a function calling AI model. You are provided with functions to execute. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools:
|
|
||||||
{{range .Functions}}
|
|
||||||
{'type': 'function', 'function': {'name': '{{.Name}}', 'description': '{{.Description}}', 'parameters': {{toJson .Parameters}} }}
|
|
||||||
{{end}}
|
|
||||||
For each function call return a json object with function name and arguments
|
|
||||||
<|im_end|>
|
|
||||||
{{.Input -}}
|
|
||||||
<|im_start|>assistant
|
|
||||||
|
|
||||||
|
|
||||||
download_files:
|
download_files:
|
||||||
- filename: minicpm-v-2_6-Q4_K_M.gguf
|
- filename: llava-v1.6-mistral-7b.Q5_K_M.gguf
|
||||||
sha256: 3a4078d53b46f22989adbf998ce5a3fd090b6541f112d7e936eb4204a04100b1
|
uri: huggingface://cjpais/llava-1.6-mistral-7b-gguf/llava-v1.6-mistral-7b.Q5_K_M.gguf
|
||||||
uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/ggml-model-Q4_K_M.gguf
|
- filename: llava-v1.6-7b-mmproj-f16.gguf
|
||||||
- filename: minicpm-v-2_6-mmproj-f16.gguf
|
uri: huggingface://cjpais/llava-1.6-mistral-7b-gguf/mmproj-model-f16.gguf
|
||||||
uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/mmproj-model-f16.gguf
|
|
||||||
sha256: 4485f68a0f1aa404c391e788ea88ea653c100d8e98fe572698f701e5809711fd
|
usage: |
|
||||||
|
curl http://localhost:8080/v1/chat/completions -H "Content-Type: application/json" -d '{
|
||||||
|
"model": "gpt-4-vision-preview",
|
||||||
|
"messages": [{"role": "user", "content": [{"type":"text", "text": "What is in the image?"}, {"type": "image_url", "image_url": {"url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" }}], "temperature": 0.9}]}'
|
||||||
|
|||||||
@@ -26,21 +26,6 @@ service Backend {
|
|||||||
rpc StoresFind(StoresFindOptions) returns (StoresFindResult) {}
|
rpc StoresFind(StoresFindOptions) returns (StoresFindResult) {}
|
||||||
|
|
||||||
rpc Rerank(RerankRequest) returns (RerankResult) {}
|
rpc Rerank(RerankRequest) returns (RerankResult) {}
|
||||||
|
|
||||||
rpc GetMetrics(MetricsRequest) returns (MetricsResponse);
|
|
||||||
|
|
||||||
rpc VAD(VADRequest) returns (VADResponse) {}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Define the empty request
|
|
||||||
message MetricsRequest {}
|
|
||||||
|
|
||||||
message MetricsResponse {
|
|
||||||
int32 slot_id = 1;
|
|
||||||
string prompt_json_for_slot = 2; // Stores the prompt as a JSON string.
|
|
||||||
float tokens_per_second = 3;
|
|
||||||
int32 tokens_generated = 4;
|
|
||||||
int32 prompt_tokens_processed = 5;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
message RerankRequest {
|
message RerankRequest {
|
||||||
@@ -149,9 +134,6 @@ message PredictOptions {
|
|||||||
repeated string Images = 42;
|
repeated string Images = 42;
|
||||||
bool UseTokenizerTemplate = 43;
|
bool UseTokenizerTemplate = 43;
|
||||||
repeated Message Messages = 44;
|
repeated Message Messages = 44;
|
||||||
repeated string Videos = 45;
|
|
||||||
repeated string Audios = 46;
|
|
||||||
string CorrelationId = 47;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// The response message containing the result
|
// The response message containing the result
|
||||||
@@ -159,12 +141,6 @@ message Reply {
|
|||||||
bytes message = 1;
|
bytes message = 1;
|
||||||
int32 tokens = 2;
|
int32 tokens = 2;
|
||||||
int32 prompt_tokens = 3;
|
int32 prompt_tokens = 3;
|
||||||
double timing_prompt_processing = 4;
|
|
||||||
double timing_token_generation = 5;
|
|
||||||
}
|
|
||||||
|
|
||||||
message GrammarTrigger {
|
|
||||||
string word = 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
message ModelOptions {
|
message ModelOptions {
|
||||||
@@ -190,7 +166,11 @@ message ModelOptions {
|
|||||||
int32 NGQA = 20;
|
int32 NGQA = 20;
|
||||||
string ModelFile = 21;
|
string ModelFile = 21;
|
||||||
|
|
||||||
|
// AutoGPTQ
|
||||||
|
string Device = 22;
|
||||||
|
bool UseTriton = 23;
|
||||||
|
string ModelBaseName = 24;
|
||||||
|
bool UseFastTokenizer = 25;
|
||||||
|
|
||||||
// Diffusers
|
// Diffusers
|
||||||
string PipelineType = 26;
|
string PipelineType = 26;
|
||||||
@@ -223,12 +203,6 @@ message ModelOptions {
|
|||||||
int32 SwapSpace = 53;
|
int32 SwapSpace = 53;
|
||||||
int32 MaxModelLen = 54;
|
int32 MaxModelLen = 54;
|
||||||
int32 TensorParallelSize = 55;
|
int32 TensorParallelSize = 55;
|
||||||
string LoadFormat = 58;
|
|
||||||
bool DisableLogStatus = 66;
|
|
||||||
string DType = 67;
|
|
||||||
int32 LimitImagePerPrompt = 68;
|
|
||||||
int32 LimitVideoPerPrompt = 69;
|
|
||||||
int32 LimitAudioPerPrompt = 70;
|
|
||||||
|
|
||||||
string MMProj = 41;
|
string MMProj = 41;
|
||||||
|
|
||||||
@@ -242,18 +216,6 @@ message ModelOptions {
|
|||||||
|
|
||||||
bool FlashAttention = 56;
|
bool FlashAttention = 56;
|
||||||
bool NoKVOffload = 57;
|
bool NoKVOffload = 57;
|
||||||
|
|
||||||
string ModelPath = 59;
|
|
||||||
|
|
||||||
repeated string LoraAdapters = 60;
|
|
||||||
repeated float LoraScales = 61;
|
|
||||||
|
|
||||||
repeated string Options = 62;
|
|
||||||
|
|
||||||
string CacheTypeKey = 63;
|
|
||||||
string CacheTypeValue = 64;
|
|
||||||
|
|
||||||
repeated GrammarTrigger GrammarTriggers = 65;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
message Result {
|
message Result {
|
||||||
@@ -309,19 +271,6 @@ message TTSRequest {
|
|||||||
optional string language = 5;
|
optional string language = 5;
|
||||||
}
|
}
|
||||||
|
|
||||||
message VADRequest {
|
|
||||||
repeated float audio = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message VADSegment {
|
|
||||||
float start = 1;
|
|
||||||
float end = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message VADResponse {
|
|
||||||
repeated VADSegment segments = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message SoundGenerationRequest {
|
message SoundGenerationRequest {
|
||||||
string text = 1;
|
string text = 1;
|
||||||
string model = 2;
|
string model = 2;
|
||||||
@@ -357,4 +306,4 @@ message StatusResponse {
|
|||||||
message Message {
|
message Message {
|
||||||
string role = 1;
|
string role = 1;
|
||||||
string content = 2;
|
string content = 2;
|
||||||
}
|
}
|
||||||
@@ -2,7 +2,7 @@
|
|||||||
## XXX: In some versions of CMake clip wasn't being built before llama.
|
## XXX: In some versions of CMake clip wasn't being built before llama.
|
||||||
## This is an hack for now, but it should be fixed in the future.
|
## This is an hack for now, but it should be fixed in the future.
|
||||||
set(TARGET myclip)
|
set(TARGET myclip)
|
||||||
add_library(${TARGET} clip.cpp clip.h clip-impl.h llava.cpp llava.h)
|
add_library(${TARGET} clip.cpp clip.h llava.cpp llava.h)
|
||||||
install(TARGETS ${TARGET} LIBRARY)
|
install(TARGETS ${TARGET} LIBRARY)
|
||||||
target_include_directories(myclip PUBLIC .)
|
target_include_directories(myclip PUBLIC .)
|
||||||
target_include_directories(myclip PUBLIC ../..)
|
target_include_directories(myclip PUBLIC ../..)
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh
|
|||||||
TARGET?=--target grpc-server
|
TARGET?=--target grpc-server
|
||||||
|
|
||||||
# Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
|
# Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
|
||||||
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF -DLLAMA_CURL=OFF
|
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF
|
||||||
|
|
||||||
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
|
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
|
||||||
ifeq ($(BUILD_TYPE),cublas)
|
ifeq ($(BUILD_TYPE),cublas)
|
||||||
@@ -22,7 +22,7 @@ else ifeq ($(BUILD_TYPE),clblas)
|
|||||||
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
|
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
|
||||||
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
|
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
|
||||||
else ifeq ($(BUILD_TYPE),hipblas)
|
else ifeq ($(BUILD_TYPE),hipblas)
|
||||||
CMAKE_ARGS+=-DGGML_HIP=ON
|
CMAKE_ARGS+=-DGGML_HIPBLAS=ON
|
||||||
# If it's OSX, DO NOT embed the metal library - -DGGML_METAL_EMBED_LIBRARY=ON requires further investigation
|
# If it's OSX, DO NOT embed the metal library - -DGGML_METAL_EMBED_LIBRARY=ON requires further investigation
|
||||||
# But if it's OSX without metal, disable it here
|
# But if it's OSX without metal, disable it here
|
||||||
else ifeq ($(OS),Darwin)
|
else ifeq ($(OS),Darwin)
|
||||||
@@ -30,24 +30,19 @@ else ifeq ($(OS),Darwin)
|
|||||||
CMAKE_ARGS+=-DGGML_METAL=OFF
|
CMAKE_ARGS+=-DGGML_METAL=OFF
|
||||||
else
|
else
|
||||||
CMAKE_ARGS+=-DGGML_METAL=ON
|
CMAKE_ARGS+=-DGGML_METAL=ON
|
||||||
CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON
|
# Until this is tested properly, we disable embedded metal file
|
||||||
|
# as we already embed it as part of the LocalAI assets
|
||||||
|
CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=OFF
|
||||||
TARGET+=--target ggml-metal
|
TARGET+=--target ggml-metal
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifeq ($(BUILD_TYPE),sycl_f16)
|
ifeq ($(BUILD_TYPE),sycl_f16)
|
||||||
CMAKE_ARGS+=-DGGML_SYCL=ON \
|
CMAKE_ARGS+=-DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL_F16=ON
|
||||||
-DCMAKE_C_COMPILER=icx \
|
|
||||||
-DCMAKE_CXX_COMPILER=icpx \
|
|
||||||
-DCMAKE_CXX_FLAGS="-fsycl" \
|
|
||||||
-DGGML_SYCL_F16=ON
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifeq ($(BUILD_TYPE),sycl_f32)
|
ifeq ($(BUILD_TYPE),sycl_f32)
|
||||||
CMAKE_ARGS+=-DGGML_SYCL=ON \
|
CMAKE_ARGS+=-DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx
|
||||||
-DCMAKE_C_COMPILER=icx \
|
|
||||||
-DCMAKE_CXX_COMPILER=icpx \
|
|
||||||
-DCMAKE_CXX_FLAGS="-fsycl"
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
llama.cpp:
|
llama.cpp:
|
||||||
@@ -84,4 +79,4 @@ ifneq (,$(findstring sycl,$(BUILD_TYPE)))
|
|||||||
else
|
else
|
||||||
+cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release $(TARGET)
|
+cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release $(TARGET)
|
||||||
endif
|
endif
|
||||||
cp llama.cpp/build/bin/grpc-server .
|
cp llama.cpp/build/bin/grpc-server .
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,13 +1,13 @@
|
|||||||
diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp
|
diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp
|
||||||
index 3cd0d2fa..6c5e811a 100644
|
index 342042ff..224db9b5 100644
|
||||||
--- a/examples/llava/clip.cpp
|
--- a/examples/llava/clip.cpp
|
||||||
+++ b/examples/llava/clip.cpp
|
+++ b/examples/llava/clip.cpp
|
||||||
@@ -2608,7 +2608,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
|
@@ -2419,7 +2419,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
|
||||||
struct ggml_tensor * patches = ggml_graph_get_tensor(gf, "patches");
|
struct ggml_tensor * patches = ggml_graph_get_tensor(gf, "patches");
|
||||||
int* patches_data = (int*)malloc(ggml_nbytes(patches));
|
int* patches_data = (int*)malloc(ggml_nbytes(patches));
|
||||||
for (int i = 0; i < num_patches; i++) {
|
for (int i = 0; i < num_patches; i++) {
|
||||||
- patches_data[i] = i + 1;
|
- patches_data[i] = i + 1;
|
||||||
+ patches_data[i] = i;
|
+ patches_data[i] = i;
|
||||||
}
|
}
|
||||||
ggml_backend_tensor_set(patches, patches_data, 0, ggml_nbytes(patches));
|
ggml_backend_tensor_set(patches, patches_data, 0, ggml_nbytes(patches));
|
||||||
free(patches_data);
|
free(patches_data);
|
||||||
@@ -21,7 +21,6 @@ fi
|
|||||||
## XXX: In some versions of CMake clip wasn't being built before llama.
|
## XXX: In some versions of CMake clip wasn't being built before llama.
|
||||||
## This is an hack for now, but it should be fixed in the future.
|
## This is an hack for now, but it should be fixed in the future.
|
||||||
cp -rfv llama.cpp/examples/llava/clip.h llama.cpp/examples/grpc-server/clip.h
|
cp -rfv llama.cpp/examples/llava/clip.h llama.cpp/examples/grpc-server/clip.h
|
||||||
cp -rfv llama.cpp/examples/llava/clip-impl.h llama.cpp/examples/grpc-server/clip-impl.h
|
|
||||||
cp -rfv llama.cpp/examples/llava/llava.cpp llama.cpp/examples/grpc-server/llava.cpp
|
cp -rfv llama.cpp/examples/llava/llava.cpp llama.cpp/examples/grpc-server/llava.cpp
|
||||||
echo '#include "llama.h"' > llama.cpp/examples/grpc-server/llava.h
|
echo '#include "llama.h"' > llama.cpp/examples/grpc-server/llava.h
|
||||||
cat llama.cpp/examples/llava/llava.h >> llama.cpp/examples/grpc-server/llava.h
|
cat llama.cpp/examples/llava/llava.h >> llama.cpp/examples/grpc-server/llava.h
|
||||||
|
|||||||
@@ -61,7 +61,6 @@ struct task_server {
|
|||||||
json data;
|
json data;
|
||||||
bool infill_mode = false;
|
bool infill_mode = false;
|
||||||
bool embedding_mode = false;
|
bool embedding_mode = false;
|
||||||
bool reranking_mode = false;
|
|
||||||
int multitask_id = -1;
|
int multitask_id = -1;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -1,25 +0,0 @@
|
|||||||
INCLUDE_PATH := $(abspath ./)
|
|
||||||
LIBRARY_PATH := $(abspath ./)
|
|
||||||
|
|
||||||
AR?=ar
|
|
||||||
|
|
||||||
BUILD_TYPE?=
|
|
||||||
# keep standard at C11 and C++11
|
|
||||||
CXXFLAGS = -I. -I$(INCLUDE_PATH)/../../../sources/bark.cpp/examples -I$(INCLUDE_PATH)/../../../sources/bark.cpp/spm-headers -I$(INCLUDE_PATH)/../../../sources/bark.cpp -O3 -DNDEBUG -std=c++17 -fPIC
|
|
||||||
LDFLAGS = -L$(LIBRARY_PATH) -L$(LIBRARY_PATH)/../../../sources/bark.cpp/build/examples -lbark -lstdc++ -lm
|
|
||||||
|
|
||||||
# warnings
|
|
||||||
CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function
|
|
||||||
|
|
||||||
gobark.o:
|
|
||||||
$(CXX) $(CXXFLAGS) gobark.cpp -o gobark.o -c $(LDFLAGS)
|
|
||||||
|
|
||||||
libbark.a: gobark.o
|
|
||||||
cp $(INCLUDE_PATH)/../../../sources/bark.cpp/build/libbark.a ./
|
|
||||||
$(AR) rcs libbark.a gobark.o
|
|
||||||
$(AR) rcs libbark.a $(LIBRARY_PATH)/../../../sources/bark.cpp/build/encodec.cpp/ggml/src/CMakeFiles/ggml.dir/ggml.c.o
|
|
||||||
$(AR) rcs libbark.a $(LIBRARY_PATH)/../../../sources/bark.cpp/build/encodec.cpp/ggml/src/CMakeFiles/ggml.dir/ggml-alloc.c.o
|
|
||||||
$(AR) rcs libbark.a $(LIBRARY_PATH)/../../../sources/bark.cpp/build/encodec.cpp/ggml/src/CMakeFiles/ggml.dir/ggml-backend.c.o
|
|
||||||
|
|
||||||
clean:
|
|
||||||
rm -f gobark.o libbark.a
|
|
||||||
@@ -1,85 +0,0 @@
|
|||||||
#include <iostream>
|
|
||||||
#include <tuple>
|
|
||||||
|
|
||||||
#include "bark.h"
|
|
||||||
#include "gobark.h"
|
|
||||||
#include "common.h"
|
|
||||||
#include "ggml.h"
|
|
||||||
|
|
||||||
struct bark_context *c;
|
|
||||||
|
|
||||||
void bark_print_progress_callback(struct bark_context *bctx, enum bark_encoding_step step, int progress, void *user_data) {
|
|
||||||
if (step == bark_encoding_step::SEMANTIC) {
|
|
||||||
printf("\rGenerating semantic tokens... %d%%", progress);
|
|
||||||
} else if (step == bark_encoding_step::COARSE) {
|
|
||||||
printf("\rGenerating coarse tokens... %d%%", progress);
|
|
||||||
} else if (step == bark_encoding_step::FINE) {
|
|
||||||
printf("\rGenerating fine tokens... %d%%", progress);
|
|
||||||
}
|
|
||||||
fflush(stdout);
|
|
||||||
}
|
|
||||||
|
|
||||||
int load_model(char *model) {
|
|
||||||
// initialize bark context
|
|
||||||
struct bark_context_params ctx_params = bark_context_default_params();
|
|
||||||
bark_params params;
|
|
||||||
|
|
||||||
params.model_path = model;
|
|
||||||
|
|
||||||
// ctx_params.verbosity = verbosity;
|
|
||||||
ctx_params.progress_callback = bark_print_progress_callback;
|
|
||||||
ctx_params.progress_callback_user_data = nullptr;
|
|
||||||
|
|
||||||
struct bark_context *bctx = bark_load_model(params.model_path.c_str(), ctx_params, params.seed);
|
|
||||||
if (!bctx) {
|
|
||||||
fprintf(stderr, "%s: Could not load model\n", __func__);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
c = bctx;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int tts(char *text,int threads, char *dst ) {
|
|
||||||
|
|
||||||
ggml_time_init();
|
|
||||||
const int64_t t_main_start_us = ggml_time_us();
|
|
||||||
|
|
||||||
// generate audio
|
|
||||||
if (!bark_generate_audio(c, text, threads)) {
|
|
||||||
fprintf(stderr, "%s: An error occured. If the problem persists, feel free to open an issue to report it.\n", __func__);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
const float *audio_data = bark_get_audio_data(c);
|
|
||||||
if (audio_data == NULL) {
|
|
||||||
fprintf(stderr, "%s: Could not get audio data\n", __func__);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
const int audio_arr_size = bark_get_audio_data_size(c);
|
|
||||||
|
|
||||||
std::vector<float> audio_arr(audio_data, audio_data + audio_arr_size);
|
|
||||||
|
|
||||||
write_wav_on_disk(audio_arr, dst);
|
|
||||||
|
|
||||||
// report timing
|
|
||||||
{
|
|
||||||
const int64_t t_main_end_us = ggml_time_us();
|
|
||||||
const int64_t t_load_us = bark_get_load_time(c);
|
|
||||||
const int64_t t_eval_us = bark_get_eval_time(c);
|
|
||||||
|
|
||||||
printf("\n\n");
|
|
||||||
printf("%s: load time = %8.2f ms\n", __func__, t_load_us / 1000.0f);
|
|
||||||
printf("%s: eval time = %8.2f ms\n", __func__, t_eval_us / 1000.0f);
|
|
||||||
printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us) / 1000.0f);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int unload() {
|
|
||||||
bark_free(c);
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
// #cgo CXXFLAGS: -I${SRCDIR}/../../../sources/bark.cpp/ -I${SRCDIR}/../../../sources/bark.cpp/encodec.cpp -I${SRCDIR}/../../../sources/bark.cpp/examples -I${SRCDIR}/../../../sources/bark.cpp/spm-headers
|
|
||||||
// #cgo LDFLAGS: -L${SRCDIR}/ -L${SRCDIR}/../../../sources/bark.cpp/build/examples -L${SRCDIR}/../../../sources/bark.cpp/build/encodec.cpp/ -lbark -lencodec -lcommon
|
|
||||||
// #include <gobark.h>
|
|
||||||
// #include <stdlib.h>
|
|
||||||
import "C"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"github.com/mudler/LocalAI/pkg/grpc/base"
|
|
||||||
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Bark struct {
|
|
||||||
base.SingleThread
|
|
||||||
threads int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sd *Bark) Load(opts *pb.ModelOptions) error {
|
|
||||||
|
|
||||||
sd.threads = int(opts.Threads)
|
|
||||||
|
|
||||||
modelFile := C.CString(opts.ModelFile)
|
|
||||||
defer C.free(unsafe.Pointer(modelFile))
|
|
||||||
|
|
||||||
ret := C.load_model(modelFile)
|
|
||||||
if ret != 0 {
|
|
||||||
return fmt.Errorf("inference failed")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sd *Bark) TTS(opts *pb.TTSRequest) error {
|
|
||||||
t := C.CString(opts.Text)
|
|
||||||
defer C.free(unsafe.Pointer(t))
|
|
||||||
|
|
||||||
dst := C.CString(opts.Dst)
|
|
||||||
defer C.free(unsafe.Pointer(dst))
|
|
||||||
|
|
||||||
threads := C.int(sd.threads)
|
|
||||||
|
|
||||||
ret := C.tts(t, threads, dst)
|
|
||||||
if ret != 0 {
|
|
||||||
return fmt.Errorf("inference failed")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
int load_model(char *model);
|
|
||||||
int tts(char *text,int threads, char *dst );
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
@@ -1,135 +0,0 @@
|
|||||||
INCLUDE_PATH := $(abspath ./)
|
|
||||||
LIBRARY_PATH := $(abspath ./)
|
|
||||||
|
|
||||||
AR?=ar
|
|
||||||
CMAKE_ARGS?=
|
|
||||||
BUILD_TYPE?=
|
|
||||||
ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh
|
|
||||||
# keep standard at C11 and C++11
|
|
||||||
CXXFLAGS = -I. -I$(INCLUDE_PATH)/../../../../sources/stablediffusion-ggml.cpp/thirdparty -I$(INCLUDE_PATH)/../../../../sources/stablediffusion-ggml.cpp/ggml/include -I$(INCLUDE_PATH)/../../../../sources/stablediffusion-ggml.cpp -O3 -DNDEBUG -std=c++17 -fPIC
|
|
||||||
|
|
||||||
GOCMD?=go
|
|
||||||
CGO_LDFLAGS?=
|
|
||||||
# Avoid parent make file overwriting CGO_LDFLAGS which is needed for hipblas
|
|
||||||
CGO_LDFLAGS_SYCL=
|
|
||||||
GO_TAGS?=
|
|
||||||
LD_FLAGS?=
|
|
||||||
|
|
||||||
# Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
|
|
||||||
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF
|
|
||||||
|
|
||||||
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
|
|
||||||
ifeq ($(BUILD_TYPE),cublas)
|
|
||||||
CMAKE_ARGS+=-DGGML_CUDA=ON
|
|
||||||
# If build type is openblas then we set -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
|
|
||||||
# to CMAKE_ARGS automatically
|
|
||||||
else ifeq ($(BUILD_TYPE),openblas)
|
|
||||||
CMAKE_ARGS+=-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
|
|
||||||
# If build type is clblas (openCL) we set -DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
|
|
||||||
else ifeq ($(BUILD_TYPE),clblas)
|
|
||||||
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
|
|
||||||
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
|
|
||||||
else ifeq ($(BUILD_TYPE),hipblas)
|
|
||||||
CMAKE_ARGS+=-DGGML_HIP=ON
|
|
||||||
# If it's OSX, DO NOT embed the metal library - -DGGML_METAL_EMBED_LIBRARY=ON requires further investigation
|
|
||||||
# But if it's OSX without metal, disable it here
|
|
||||||
else ifeq ($(OS),Darwin)
|
|
||||||
ifneq ($(BUILD_TYPE),metal)
|
|
||||||
CMAKE_ARGS+=-DGGML_METAL=OFF
|
|
||||||
else
|
|
||||||
CMAKE_ARGS+=-DGGML_METAL=ON
|
|
||||||
CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON
|
|
||||||
TARGET+=--target ggml-metal
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifeq ($(BUILD_TYPE),sycl_f16)
|
|
||||||
CMAKE_ARGS+=-DGGML_SYCL=ON \
|
|
||||||
-DCMAKE_C_COMPILER=icx \
|
|
||||||
-DCMAKE_CXX_COMPILER=icpx \
|
|
||||||
-DSD_SYCL=ON \
|
|
||||||
-DGGML_SYCL_F16=ON
|
|
||||||
CC=icx
|
|
||||||
CXX=icpx
|
|
||||||
CGO_LDFLAGS_SYCL += -fsycl -L${DNNLROOT}/lib -ldnnl ${MKLROOT}/lib/intel64/libmkl_sycl.a -fiopenmp -fopenmp-targets=spir64 -lOpenCL
|
|
||||||
CGO_LDFLAGS_SYCL += $(shell pkg-config --libs mkl-static-lp64-gomp)
|
|
||||||
CGO_CXXFLAGS += -fiopenmp -fopenmp-targets=spir64
|
|
||||||
CGO_CXXFLAGS += $(shell pkg-config --cflags mkl-static-lp64-gomp )
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifeq ($(BUILD_TYPE),sycl_f32)
|
|
||||||
CMAKE_ARGS+=-DGGML_SYCL=ON \
|
|
||||||
-DCMAKE_C_COMPILER=icx \
|
|
||||||
-DCMAKE_CXX_COMPILER=icpx \
|
|
||||||
-DSD_SYCL=ON
|
|
||||||
CC=icx
|
|
||||||
CXX=icpx
|
|
||||||
CGO_LDFLAGS_SYCL += -fsycl -L${DNNLROOT}/lib -ldnnl ${MKLROOT}/lib/intel64/libmkl_sycl.a -fiopenmp -fopenmp-targets=spir64 -lOpenCL
|
|
||||||
CGO_LDFLAGS_SYCL += $(shell pkg-config --libs mkl-static-lp64-gomp)
|
|
||||||
CGO_CXXFLAGS += -fiopenmp -fopenmp-targets=spir64
|
|
||||||
CGO_CXXFLAGS += $(shell pkg-config --cflags mkl-static-lp64-gomp )
|
|
||||||
endif
|
|
||||||
|
|
||||||
# warnings
|
|
||||||
# CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function
|
|
||||||
|
|
||||||
# Find all .a archives in ARCHIVE_DIR
|
|
||||||
# (ggml can have different backends cpu, cuda, etc., each backend generates a .a archive)
|
|
||||||
GGML_ARCHIVE_DIR := build/ggml/src/
|
|
||||||
ALL_ARCHIVES := $(shell find $(GGML_ARCHIVE_DIR) -type f -name '*.a')
|
|
||||||
|
|
||||||
# Name of the single merged library
|
|
||||||
COMBINED_LIB := libggmlall.a
|
|
||||||
|
|
||||||
# Rule to merge all the .a files into one
|
|
||||||
$(COMBINED_LIB): $(ALL_ARCHIVES)
|
|
||||||
@echo "Merging all .a into $(COMBINED_LIB)"
|
|
||||||
rm -f $@
|
|
||||||
mkdir -p merge-tmp
|
|
||||||
for a in $(ALL_ARCHIVES); do \
|
|
||||||
( cd merge-tmp && ar x ../$$a ); \
|
|
||||||
done
|
|
||||||
( cd merge-tmp && ar rcs ../$@ *.o )
|
|
||||||
# Ensure we have a proper index
|
|
||||||
ranlib $@
|
|
||||||
# Clean up
|
|
||||||
rm -rf merge-tmp
|
|
||||||
|
|
||||||
build/libstable-diffusion.a:
|
|
||||||
@echo "Building SD with $(BUILD_TYPE) build type and $(CMAKE_ARGS)"
|
|
||||||
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
|
|
||||||
+bash -c "source $(ONEAPI_VARS); \
|
|
||||||
mkdir -p build && \
|
|
||||||
cd build && \
|
|
||||||
cmake $(CMAKE_ARGS) ../../../../../sources/stablediffusion-ggml.cpp && \
|
|
||||||
cmake --build . --config Release"
|
|
||||||
else
|
|
||||||
mkdir -p build && \
|
|
||||||
cd build && \
|
|
||||||
cmake $(CMAKE_ARGS) ../../../../../sources/stablediffusion-ggml.cpp && \
|
|
||||||
cmake --build . --config Release
|
|
||||||
endif
|
|
||||||
$(MAKE) $(COMBINED_LIB)
|
|
||||||
|
|
||||||
gosd.o:
|
|
||||||
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
|
|
||||||
+bash -c "source $(ONEAPI_VARS); \
|
|
||||||
$(CXX) $(CXXFLAGS) gosd.cpp -o gosd.o -c"
|
|
||||||
else
|
|
||||||
$(CXX) $(CXXFLAGS) gosd.cpp -o gosd.o -c
|
|
||||||
endif
|
|
||||||
|
|
||||||
libsd.a: gosd.o
|
|
||||||
cp $(INCLUDE_PATH)/build/libstable-diffusion.a ./libsd.a
|
|
||||||
$(AR) rcs libsd.a gosd.o
|
|
||||||
|
|
||||||
stablediffusion-ggml:
|
|
||||||
CGO_LDFLAGS="$(CGO_LDFLAGS) $(CGO_LDFLAGS_SYCL)" C_INCLUDE_PATH="$(INCLUDE_PATH)" LIBRARY_PATH="$(LIBRARY_PATH)" \
|
|
||||||
CC="$(CC)" CXX="$(CXX)" CGO_CXXFLAGS="$(CGO_CXXFLAGS)" \
|
|
||||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o ../../../../backend-assets/grpc/stablediffusion-ggml ./
|
|
||||||
ifneq ($(UPX),)
|
|
||||||
$(UPX) ../../../../backend-assets/grpc/stablediffusion-ggml
|
|
||||||
endif
|
|
||||||
|
|
||||||
clean:
|
|
||||||
rm -rf gosd.o libsd.a build $(COMBINED_LIB)
|
|
||||||
@@ -1,231 +0,0 @@
|
|||||||
#include <stdio.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <time.h>
|
|
||||||
#include <iostream>
|
|
||||||
#include <random>
|
|
||||||
#include <string>
|
|
||||||
#include <vector>
|
|
||||||
#include "gosd.h"
|
|
||||||
|
|
||||||
// #include "preprocessing.hpp"
|
|
||||||
#include "flux.hpp"
|
|
||||||
#include "stable-diffusion.h"
|
|
||||||
|
|
||||||
#define STB_IMAGE_IMPLEMENTATION
|
|
||||||
#define STB_IMAGE_STATIC
|
|
||||||
#include "stb_image.h"
|
|
||||||
|
|
||||||
#define STB_IMAGE_WRITE_IMPLEMENTATION
|
|
||||||
#define STB_IMAGE_WRITE_STATIC
|
|
||||||
#include "stb_image_write.h"
|
|
||||||
|
|
||||||
#define STB_IMAGE_RESIZE_IMPLEMENTATION
|
|
||||||
#define STB_IMAGE_RESIZE_STATIC
|
|
||||||
#include "stb_image_resize.h"
|
|
||||||
|
|
||||||
// Names of the sampler method, same order as enum sample_method in stable-diffusion.h
|
|
||||||
const char* sample_method_str[] = {
|
|
||||||
"euler_a",
|
|
||||||
"euler",
|
|
||||||
"heun",
|
|
||||||
"dpm2",
|
|
||||||
"dpm++2s_a",
|
|
||||||
"dpm++2m",
|
|
||||||
"dpm++2mv2",
|
|
||||||
"ipndm",
|
|
||||||
"ipndm_v",
|
|
||||||
"lcm",
|
|
||||||
"ddim_trailing",
|
|
||||||
"tcd",
|
|
||||||
};
|
|
||||||
|
|
||||||
// Names of the sigma schedule overrides, same order as sample_schedule in stable-diffusion.h
|
|
||||||
const char* schedule_str[] = {
|
|
||||||
"default",
|
|
||||||
"discrete",
|
|
||||||
"karras",
|
|
||||||
"exponential",
|
|
||||||
"ays",
|
|
||||||
"gits",
|
|
||||||
};
|
|
||||||
|
|
||||||
sd_ctx_t* sd_c;
|
|
||||||
|
|
||||||
sample_method_t sample_method;
|
|
||||||
|
|
||||||
int load_model(char *model, char* options[], int threads, int diff) {
|
|
||||||
fprintf (stderr, "Loading model!\n");
|
|
||||||
|
|
||||||
char *stableDiffusionModel = "";
|
|
||||||
if (diff == 1 ) {
|
|
||||||
stableDiffusionModel = model;
|
|
||||||
model = "";
|
|
||||||
}
|
|
||||||
|
|
||||||
// decode options. Options are in form optname:optvale, or if booleans only optname.
|
|
||||||
char *clip_l_path = "";
|
|
||||||
char *clip_g_path = "";
|
|
||||||
char *t5xxl_path = "";
|
|
||||||
char *vae_path = "";
|
|
||||||
char *scheduler = "";
|
|
||||||
char *sampler = "";
|
|
||||||
|
|
||||||
// If options is not NULL, parse options
|
|
||||||
for (int i = 0; options[i] != NULL; i++) {
|
|
||||||
char *optname = strtok(options[i], ":");
|
|
||||||
char *optval = strtok(NULL, ":");
|
|
||||||
if (optval == NULL) {
|
|
||||||
optval = "true";
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!strcmp(optname, "clip_l_path")) {
|
|
||||||
clip_l_path = optval;
|
|
||||||
}
|
|
||||||
if (!strcmp(optname, "clip_g_path")) {
|
|
||||||
clip_g_path = optval;
|
|
||||||
}
|
|
||||||
if (!strcmp(optname, "t5xxl_path")) {
|
|
||||||
t5xxl_path = optval;
|
|
||||||
}
|
|
||||||
if (!strcmp(optname, "vae_path")) {
|
|
||||||
vae_path = optval;
|
|
||||||
}
|
|
||||||
if (!strcmp(optname, "scheduler")) {
|
|
||||||
scheduler = optval;
|
|
||||||
}
|
|
||||||
if (!strcmp(optname, "sampler")) {
|
|
||||||
sampler = optval;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
int sample_method_found = -1;
|
|
||||||
for (int m = 0; m < N_SAMPLE_METHODS; m++) {
|
|
||||||
if (!strcmp(sampler, sample_method_str[m])) {
|
|
||||||
sample_method_found = m;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (sample_method_found == -1) {
|
|
||||||
fprintf(stderr, "Invalid sample method, default to EULER_A!\n");
|
|
||||||
sample_method_found = EULER_A;
|
|
||||||
}
|
|
||||||
sample_method = (sample_method_t)sample_method_found;
|
|
||||||
|
|
||||||
int schedule_found = -1;
|
|
||||||
for (int d = 0; d < N_SCHEDULES; d++) {
|
|
||||||
if (!strcmp(scheduler, schedule_str[d])) {
|
|
||||||
schedule_found = d;
|
|
||||||
fprintf (stderr, "Found scheduler: %s\n", scheduler);
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (schedule_found == -1) {
|
|
||||||
fprintf (stderr, "Invalid scheduler! using DEFAULT\n");
|
|
||||||
schedule_found = DEFAULT;
|
|
||||||
}
|
|
||||||
|
|
||||||
schedule_t schedule = (schedule_t)schedule_found;
|
|
||||||
|
|
||||||
fprintf (stderr, "Creating context\n");
|
|
||||||
sd_ctx_t* sd_ctx = new_sd_ctx(model,
|
|
||||||
clip_l_path,
|
|
||||||
clip_g_path,
|
|
||||||
t5xxl_path,
|
|
||||||
stableDiffusionModel,
|
|
||||||
vae_path,
|
|
||||||
"",
|
|
||||||
"",
|
|
||||||
"",
|
|
||||||
"",
|
|
||||||
"",
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
threads,
|
|
||||||
SD_TYPE_COUNT,
|
|
||||||
STD_DEFAULT_RNG,
|
|
||||||
schedule,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
false);
|
|
||||||
|
|
||||||
if (sd_ctx == NULL) {
|
|
||||||
fprintf (stderr, "failed loading model (generic error)\n");
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
fprintf (stderr, "Created context: OK\n");
|
|
||||||
|
|
||||||
sd_c = sd_ctx;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int gen_image(char *text, char *negativeText, int width, int height, int steps, int seed , char *dst, float cfg_scale) {
|
|
||||||
|
|
||||||
sd_image_t* results;
|
|
||||||
|
|
||||||
std::vector<int> skip_layers = {7, 8, 9};
|
|
||||||
|
|
||||||
fprintf (stderr, "Generating image\n");
|
|
||||||
|
|
||||||
results = txt2img(sd_c,
|
|
||||||
text,
|
|
||||||
negativeText,
|
|
||||||
-1, //clip_skip
|
|
||||||
cfg_scale, // sfg_scale
|
|
||||||
3.5f,
|
|
||||||
0, // eta
|
|
||||||
width,
|
|
||||||
height,
|
|
||||||
sample_method,
|
|
||||||
steps,
|
|
||||||
seed,
|
|
||||||
1,
|
|
||||||
NULL,
|
|
||||||
0.9f,
|
|
||||||
20.f,
|
|
||||||
false,
|
|
||||||
"",
|
|
||||||
skip_layers.data(),
|
|
||||||
skip_layers.size(),
|
|
||||||
0,
|
|
||||||
0.01,
|
|
||||||
0.2);
|
|
||||||
|
|
||||||
if (results == NULL) {
|
|
||||||
fprintf (stderr, "NO results\n");
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (results[0].data == NULL) {
|
|
||||||
fprintf (stderr, "Results with no data\n");
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
fprintf (stderr, "Writing PNG\n");
|
|
||||||
|
|
||||||
fprintf (stderr, "DST: %s\n", dst);
|
|
||||||
fprintf (stderr, "Width: %d\n", results[0].width);
|
|
||||||
fprintf (stderr, "Height: %d\n", results[0].height);
|
|
||||||
fprintf (stderr, "Channel: %d\n", results[0].channel);
|
|
||||||
fprintf (stderr, "Data: %p\n", results[0].data);
|
|
||||||
|
|
||||||
stbi_write_png(dst, results[0].width, results[0].height, results[0].channel,
|
|
||||||
results[0].data, 0, NULL);
|
|
||||||
fprintf (stderr, "Saved resulting image to '%s'\n", dst);
|
|
||||||
|
|
||||||
// TODO: free results. Why does it crash?
|
|
||||||
|
|
||||||
free(results[0].data);
|
|
||||||
results[0].data = NULL;
|
|
||||||
free(results);
|
|
||||||
fprintf (stderr, "gen_image is done", dst);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int unload() {
|
|
||||||
free_sd_ctx(sd_c);
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -1,96 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
// #cgo CXXFLAGS: -I${SRCDIR}/../../../../sources/stablediffusion-ggml.cpp/thirdparty -I${SRCDIR}/../../../../sources/stablediffusion-ggml.cpp -I${SRCDIR}/../../../../sources/stablediffusion-ggml.cpp/ggml/include
|
|
||||||
// #cgo LDFLAGS: -L${SRCDIR}/ -lsd -lstdc++ -lm -lggmlall -lgomp
|
|
||||||
// #include <gosd.h>
|
|
||||||
// #include <stdlib.h>
|
|
||||||
import "C"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"github.com/mudler/LocalAI/pkg/grpc/base"
|
|
||||||
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
|
||||||
"github.com/mudler/LocalAI/pkg/utils"
|
|
||||||
)
|
|
||||||
|
|
||||||
type SDGGML struct {
|
|
||||||
base.SingleThread
|
|
||||||
threads int
|
|
||||||
sampleMethod string
|
|
||||||
cfgScale float32
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sd *SDGGML) Load(opts *pb.ModelOptions) error {
|
|
||||||
|
|
||||||
sd.threads = int(opts.Threads)
|
|
||||||
|
|
||||||
modelFile := C.CString(opts.ModelFile)
|
|
||||||
defer C.free(unsafe.Pointer(modelFile))
|
|
||||||
|
|
||||||
var options **C.char
|
|
||||||
// prepare the options array to pass to C
|
|
||||||
|
|
||||||
size := C.size_t(unsafe.Sizeof((*C.char)(nil)))
|
|
||||||
length := C.size_t(len(opts.Options))
|
|
||||||
options = (**C.char)(C.malloc(length * size))
|
|
||||||
view := (*[1 << 30]*C.char)(unsafe.Pointer(options))[0:len(opts.Options):len(opts.Options)]
|
|
||||||
|
|
||||||
var diffusionModel int
|
|
||||||
|
|
||||||
var oo []string
|
|
||||||
for _, op := range opts.Options {
|
|
||||||
if op == "diffusion_model" {
|
|
||||||
diffusionModel = 1
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// If it's an option path, we resolve absolute path from the model path
|
|
||||||
if strings.Contains(op, ":") && strings.Contains(op, "path") {
|
|
||||||
data := strings.Split(op, ":")
|
|
||||||
data[1] = filepath.Join(opts.ModelPath, data[1])
|
|
||||||
if err := utils.VerifyPath(data[1], opts.ModelPath); err == nil {
|
|
||||||
oo = append(oo, strings.Join(data, ":"))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
oo = append(oo, op)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprintf(os.Stderr, "Options: %+v\n", oo)
|
|
||||||
|
|
||||||
for i, x := range oo {
|
|
||||||
view[i] = C.CString(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
sd.cfgScale = opts.CFGScale
|
|
||||||
|
|
||||||
ret := C.load_model(modelFile, options, C.int(opts.Threads), C.int(diffusionModel))
|
|
||||||
if ret != 0 {
|
|
||||||
return fmt.Errorf("could not load model")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sd *SDGGML) GenerateImage(opts *pb.GenerateImageRequest) error {
|
|
||||||
t := C.CString(opts.PositivePrompt)
|
|
||||||
defer C.free(unsafe.Pointer(t))
|
|
||||||
|
|
||||||
dst := C.CString(opts.Dst)
|
|
||||||
defer C.free(unsafe.Pointer(dst))
|
|
||||||
|
|
||||||
negative := C.CString(opts.NegativePrompt)
|
|
||||||
defer C.free(unsafe.Pointer(negative))
|
|
||||||
|
|
||||||
ret := C.gen_image(t, negative, C.int(opts.Width), C.int(opts.Height), C.int(opts.Step), C.int(opts.Seed), dst, C.float(sd.cfgScale))
|
|
||||||
if ret != 0 {
|
|
||||||
return fmt.Errorf("inference failed")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
int load_model(char *model, char* options[], int threads, int diffusionModel);
|
|
||||||
int gen_image(char *text, char *negativeText, int width, int height, int steps, int seed, char *dst, float cfg_scale);
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
// Note: this is started internally by LocalAI and a server is allocated for each model
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
|
|
||||||
@@ -14,7 +15,7 @@ var (
|
|||||||
func main() {
|
func main() {
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
if err := grpc.StartServer(*addr, &Bark{}); err != nil {
|
if err := grpc.StartServer(*addr, &Image{}); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
33
backend/go/image/stablediffusion/stablediffusion.go
Normal file
33
backend/go/image/stablediffusion/stablediffusion.go
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
// This is a wrapper to statisfy the GRPC service interface
|
||||||
|
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
|
||||||
|
import (
|
||||||
|
"github.com/mudler/LocalAI/pkg/grpc/base"
|
||||||
|
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||||
|
"github.com/mudler/LocalAI/pkg/stablediffusion"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Image struct {
|
||||||
|
base.SingleThread
|
||||||
|
stablediffusion *stablediffusion.StableDiffusion
|
||||||
|
}
|
||||||
|
|
||||||
|
func (image *Image) Load(opts *pb.ModelOptions) error {
|
||||||
|
var err error
|
||||||
|
// Note: the Model here is a path to a directory containing the model files
|
||||||
|
image.stablediffusion, err = stablediffusion.New(opts.ModelFile)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (image *Image) GenerateImage(opts *pb.GenerateImageRequest) error {
|
||||||
|
return image.stablediffusion.GenerateImage(
|
||||||
|
int(opts.Height),
|
||||||
|
int(opts.Width),
|
||||||
|
int(opts.Mode),
|
||||||
|
int(opts.Step),
|
||||||
|
int(opts.Seed),
|
||||||
|
opts.PositivePrompt,
|
||||||
|
opts.NegativePrompt,
|
||||||
|
opts.Dst)
|
||||||
|
}
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
// Note: this is started internally by LocalAI and a server is allocated for each model
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
|
|
||||||
@@ -14,7 +15,7 @@ var (
|
|||||||
func main() {
|
func main() {
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
if err := grpc.StartServer(*addr, &SDGGML{}); err != nil {
|
if err := grpc.StartServer(*addr, &Image{}); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
32
backend/go/image/tinydream/tinydream.go
Normal file
32
backend/go/image/tinydream/tinydream.go
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
// This is a wrapper to statisfy the GRPC service interface
|
||||||
|
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
|
||||||
|
import (
|
||||||
|
"github.com/mudler/LocalAI/pkg/grpc/base"
|
||||||
|
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||||
|
"github.com/mudler/LocalAI/pkg/tinydream"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Image struct {
|
||||||
|
base.SingleThread
|
||||||
|
tinydream *tinydream.TinyDream
|
||||||
|
}
|
||||||
|
|
||||||
|
func (image *Image) Load(opts *pb.ModelOptions) error {
|
||||||
|
var err error
|
||||||
|
// Note: the Model here is a path to a directory containing the model files
|
||||||
|
image.tinydream, err = tinydream.New(opts.ModelFile)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (image *Image) GenerateImage(opts *pb.GenerateImageRequest) error {
|
||||||
|
return image.tinydream.GenerateImage(
|
||||||
|
int(opts.Height),
|
||||||
|
int(opts.Width),
|
||||||
|
int(opts.Step),
|
||||||
|
int(opts.Seed),
|
||||||
|
opts.PositivePrompt,
|
||||||
|
opts.NegativePrompt,
|
||||||
|
opts.Dst)
|
||||||
|
}
|
||||||
34
backend/go/llm/bert/bert.go
Normal file
34
backend/go/llm/bert/bert.go
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
// This is a wrapper to statisfy the GRPC service interface
|
||||||
|
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
|
||||||
|
import (
|
||||||
|
bert "github.com/go-skynet/go-bert.cpp"
|
||||||
|
|
||||||
|
"github.com/mudler/LocalAI/pkg/grpc/base"
|
||||||
|
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Embeddings struct {
|
||||||
|
base.SingleThread
|
||||||
|
bert *bert.Bert
|
||||||
|
}
|
||||||
|
|
||||||
|
func (llm *Embeddings) Load(opts *pb.ModelOptions) error {
|
||||||
|
model, err := bert.New(opts.ModelFile)
|
||||||
|
llm.bert = model
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (llm *Embeddings) Embeddings(opts *pb.PredictOptions) ([]float32, error) {
|
||||||
|
|
||||||
|
if len(opts.EmbeddingTokens) > 0 {
|
||||||
|
tokens := []int{}
|
||||||
|
for _, t := range opts.EmbeddingTokens {
|
||||||
|
tokens = append(tokens, int(t))
|
||||||
|
}
|
||||||
|
return llm.bert.TokenEmbeddings(tokens, bert.SetThreads(int(opts.Threads)))
|
||||||
|
}
|
||||||
|
|
||||||
|
return llm.bert.Embeddings(opts.Embeddings, bert.SetThreads(int(opts.Threads)))
|
||||||
|
}
|
||||||
21
backend/go/llm/bert/main.go
Normal file
21
backend/go/llm/bert/main.go
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
// Note: this is started internally by LocalAI and a server is allocated for each model
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
|
||||||
|
grpc "github.com/mudler/LocalAI/pkg/grpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
addr = flag.String("addr", "localhost:50051", "the address to connect to")
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
if err := grpc.StartServer(*addr, &Embeddings{}); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
204
backend/go/llm/llama-ggml/llama.go
Normal file
204
backend/go/llm/llama-ggml/llama.go
Normal file
@@ -0,0 +1,204 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
// This is a wrapper to statisfy the GRPC service interface
|
||||||
|
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/go-skynet/go-llama.cpp"
|
||||||
|
"github.com/mudler/LocalAI/pkg/grpc/base"
|
||||||
|
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
type LLM struct {
|
||||||
|
base.SingleThread
|
||||||
|
|
||||||
|
llama *llama.LLama
|
||||||
|
}
|
||||||
|
|
||||||
|
func (llm *LLM) Load(opts *pb.ModelOptions) error {
|
||||||
|
ropeFreqBase := float32(10000)
|
||||||
|
ropeFreqScale := float32(1)
|
||||||
|
|
||||||
|
if opts.RopeFreqBase != 0 {
|
||||||
|
ropeFreqBase = opts.RopeFreqBase
|
||||||
|
}
|
||||||
|
if opts.RopeFreqScale != 0 {
|
||||||
|
ropeFreqScale = opts.RopeFreqScale
|
||||||
|
}
|
||||||
|
|
||||||
|
llamaOpts := []llama.ModelOption{
|
||||||
|
llama.WithRopeFreqBase(ropeFreqBase),
|
||||||
|
llama.WithRopeFreqScale(ropeFreqScale),
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.NGQA != 0 {
|
||||||
|
llamaOpts = append(llamaOpts, llama.WithGQA(int(opts.NGQA)))
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.RMSNormEps != 0 {
|
||||||
|
llamaOpts = append(llamaOpts, llama.WithRMSNormEPS(opts.RMSNormEps))
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.ContextSize != 0 {
|
||||||
|
llamaOpts = append(llamaOpts, llama.SetContext(int(opts.ContextSize)))
|
||||||
|
}
|
||||||
|
if opts.F16Memory {
|
||||||
|
llamaOpts = append(llamaOpts, llama.EnableF16Memory)
|
||||||
|
}
|
||||||
|
if opts.Embeddings {
|
||||||
|
llamaOpts = append(llamaOpts, llama.EnableEmbeddings)
|
||||||
|
}
|
||||||
|
if opts.NGPULayers != 0 {
|
||||||
|
llamaOpts = append(llamaOpts, llama.SetGPULayers(int(opts.NGPULayers)))
|
||||||
|
}
|
||||||
|
|
||||||
|
llamaOpts = append(llamaOpts, llama.SetMMap(opts.MMap))
|
||||||
|
llamaOpts = append(llamaOpts, llama.SetMainGPU(opts.MainGPU))
|
||||||
|
llamaOpts = append(llamaOpts, llama.SetTensorSplit(opts.TensorSplit))
|
||||||
|
if opts.NBatch != 0 {
|
||||||
|
llamaOpts = append(llamaOpts, llama.SetNBatch(int(opts.NBatch)))
|
||||||
|
} else {
|
||||||
|
llamaOpts = append(llamaOpts, llama.SetNBatch(512))
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.NUMA {
|
||||||
|
llamaOpts = append(llamaOpts, llama.EnableNUMA)
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.LowVRAM {
|
||||||
|
llamaOpts = append(llamaOpts, llama.EnabelLowVRAM)
|
||||||
|
}
|
||||||
|
|
||||||
|
model, err := llama.New(opts.ModelFile, llamaOpts...)
|
||||||
|
llm.llama = model
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildPredictOptions(opts *pb.PredictOptions) []llama.PredictOption {
|
||||||
|
ropeFreqBase := float32(10000)
|
||||||
|
ropeFreqScale := float32(1)
|
||||||
|
|
||||||
|
if opts.RopeFreqBase != 0 {
|
||||||
|
ropeFreqBase = opts.RopeFreqBase
|
||||||
|
}
|
||||||
|
if opts.RopeFreqScale != 0 {
|
||||||
|
ropeFreqScale = opts.RopeFreqScale
|
||||||
|
}
|
||||||
|
predictOptions := []llama.PredictOption{
|
||||||
|
llama.SetTemperature(opts.Temperature),
|
||||||
|
llama.SetTopP(opts.TopP),
|
||||||
|
llama.SetTopK(int(opts.TopK)),
|
||||||
|
llama.SetTokens(int(opts.Tokens)),
|
||||||
|
llama.SetThreads(int(opts.Threads)),
|
||||||
|
llama.WithGrammar(opts.Grammar),
|
||||||
|
llama.SetRopeFreqBase(ropeFreqBase),
|
||||||
|
llama.SetRopeFreqScale(ropeFreqScale),
|
||||||
|
llama.SetNegativePromptScale(opts.NegativePromptScale),
|
||||||
|
llama.SetNegativePrompt(opts.NegativePrompt),
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.PromptCacheAll {
|
||||||
|
predictOptions = append(predictOptions, llama.EnablePromptCacheAll)
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.PromptCacheRO {
|
||||||
|
predictOptions = append(predictOptions, llama.EnablePromptCacheRO)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Expected absolute path
|
||||||
|
if opts.PromptCachePath != "" {
|
||||||
|
predictOptions = append(predictOptions, llama.SetPathPromptCache(opts.PromptCachePath))
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.Mirostat != 0 {
|
||||||
|
predictOptions = append(predictOptions, llama.SetMirostat(int(opts.Mirostat)))
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.MirostatETA != 0 {
|
||||||
|
predictOptions = append(predictOptions, llama.SetMirostatETA(opts.MirostatETA))
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.MirostatTAU != 0 {
|
||||||
|
predictOptions = append(predictOptions, llama.SetMirostatTAU(opts.MirostatTAU))
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.Debug {
|
||||||
|
predictOptions = append(predictOptions, llama.Debug)
|
||||||
|
}
|
||||||
|
|
||||||
|
predictOptions = append(predictOptions, llama.SetStopWords(opts.StopPrompts...))
|
||||||
|
|
||||||
|
if opts.PresencePenalty != 0 {
|
||||||
|
predictOptions = append(predictOptions, llama.SetPenalty(opts.PresencePenalty))
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.NKeep != 0 {
|
||||||
|
predictOptions = append(predictOptions, llama.SetNKeep(int(opts.NKeep)))
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.Batch != 0 {
|
||||||
|
predictOptions = append(predictOptions, llama.SetBatch(int(opts.Batch)))
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.F16KV {
|
||||||
|
predictOptions = append(predictOptions, llama.EnableF16KV)
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.IgnoreEOS {
|
||||||
|
predictOptions = append(predictOptions, llama.IgnoreEOS)
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.Seed != 0 {
|
||||||
|
predictOptions = append(predictOptions, llama.SetSeed(int(opts.Seed)))
|
||||||
|
}
|
||||||
|
|
||||||
|
//predictOptions = append(predictOptions, llama.SetLogitBias(c.Seed))
|
||||||
|
|
||||||
|
predictOptions = append(predictOptions, llama.SetFrequencyPenalty(opts.FrequencyPenalty))
|
||||||
|
predictOptions = append(predictOptions, llama.SetMlock(opts.MLock))
|
||||||
|
predictOptions = append(predictOptions, llama.SetMemoryMap(opts.MMap))
|
||||||
|
predictOptions = append(predictOptions, llama.SetPredictionMainGPU(opts.MainGPU))
|
||||||
|
predictOptions = append(predictOptions, llama.SetPredictionTensorSplit(opts.TensorSplit))
|
||||||
|
predictOptions = append(predictOptions, llama.SetTailFreeSamplingZ(opts.TailFreeSamplingZ))
|
||||||
|
predictOptions = append(predictOptions, llama.SetTypicalP(opts.TypicalP))
|
||||||
|
return predictOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
func (llm *LLM) Predict(opts *pb.PredictOptions) (string, error) {
|
||||||
|
return llm.llama.Predict(opts.Prompt, buildPredictOptions(opts)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (llm *LLM) PredictStream(opts *pb.PredictOptions, results chan string) error {
|
||||||
|
predictOptions := buildPredictOptions(opts)
|
||||||
|
|
||||||
|
predictOptions = append(predictOptions, llama.SetTokenCallback(func(token string) bool {
|
||||||
|
results <- token
|
||||||
|
return true
|
||||||
|
}))
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
_, err := llm.llama.Predict(opts.Prompt, predictOptions...)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("err: ", err)
|
||||||
|
}
|
||||||
|
close(results)
|
||||||
|
}()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (llm *LLM) Embeddings(opts *pb.PredictOptions) ([]float32, error) {
|
||||||
|
predictOptions := buildPredictOptions(opts)
|
||||||
|
|
||||||
|
if len(opts.EmbeddingTokens) > 0 {
|
||||||
|
tokens := []int{}
|
||||||
|
for _, t := range opts.EmbeddingTokens {
|
||||||
|
tokens = append(tokens, int(t))
|
||||||
|
}
|
||||||
|
return llm.llama.TokenEmbeddings(tokens, predictOptions...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return llm.llama.Embeddings(opts.Embeddings, predictOptions...)
|
||||||
|
}
|
||||||
19
backend/go/llm/llama-ggml/main.go
Normal file
19
backend/go/llm/llama-ggml/main.go
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
|
||||||
|
grpc "github.com/mudler/LocalAI/pkg/grpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
addr = flag.String("addr", "localhost:50051", "the address to connect to")
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
if err := grpc.StartServer(*addr, &LLM{}); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -15,7 +15,7 @@ var (
|
|||||||
func main() {
|
func main() {
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
if err := grpc.StartServer(*addr, &VAD{}); err != nil {
|
if err := grpc.StartServer(*addr, &LLM{}); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
95
backend/go/llm/rwkv/rwkv.go
Normal file
95
backend/go/llm/rwkv/rwkv.go
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
// This is a wrapper to statisfy the GRPC service interface
|
||||||
|
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/donomii/go-rwkv.cpp"
|
||||||
|
"github.com/mudler/LocalAI/pkg/grpc/base"
|
||||||
|
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
const tokenizerSuffix = ".tokenizer.json"
|
||||||
|
|
||||||
|
type LLM struct {
|
||||||
|
base.SingleThread
|
||||||
|
|
||||||
|
rwkv *rwkv.RwkvState
|
||||||
|
}
|
||||||
|
|
||||||
|
func (llm *LLM) Load(opts *pb.ModelOptions) error {
|
||||||
|
tokenizerFile := opts.Tokenizer
|
||||||
|
if tokenizerFile == "" {
|
||||||
|
modelFile := filepath.Base(opts.ModelFile)
|
||||||
|
tokenizerFile = modelFile + tokenizerSuffix
|
||||||
|
}
|
||||||
|
modelPath := filepath.Dir(opts.ModelFile)
|
||||||
|
tokenizerPath := filepath.Join(modelPath, tokenizerFile)
|
||||||
|
|
||||||
|
model := rwkv.LoadFiles(opts.ModelFile, tokenizerPath, uint32(opts.GetThreads()))
|
||||||
|
|
||||||
|
if model == nil {
|
||||||
|
return fmt.Errorf("rwkv could not load model")
|
||||||
|
}
|
||||||
|
llm.rwkv = model
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (llm *LLM) Predict(opts *pb.PredictOptions) (string, error) {
|
||||||
|
stopWord := "\n"
|
||||||
|
if len(opts.StopPrompts) > 0 {
|
||||||
|
stopWord = opts.StopPrompts[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := llm.rwkv.ProcessInput(opts.Prompt); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
response := llm.rwkv.GenerateResponse(int(opts.Tokens), stopWord, float32(opts.Temperature), float32(opts.TopP), nil)
|
||||||
|
|
||||||
|
return response, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (llm *LLM) PredictStream(opts *pb.PredictOptions, results chan string) error {
|
||||||
|
go func() {
|
||||||
|
|
||||||
|
stopWord := "\n"
|
||||||
|
if len(opts.StopPrompts) > 0 {
|
||||||
|
stopWord = opts.StopPrompts[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := llm.rwkv.ProcessInput(opts.Prompt); err != nil {
|
||||||
|
fmt.Println("Error processing input: ", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
llm.rwkv.GenerateResponse(int(opts.Tokens), stopWord, float32(opts.Temperature), float32(opts.TopP), func(s string) bool {
|
||||||
|
results <- s
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
close(results)
|
||||||
|
}()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (llm *LLM) TokenizeString(opts *pb.PredictOptions) (pb.TokenizationResponse, error) {
|
||||||
|
tokens, err := llm.rwkv.Tokenizer.Encode(opts.Prompt)
|
||||||
|
if err != nil {
|
||||||
|
return pb.TokenizationResponse{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
l := len(tokens)
|
||||||
|
i32Tokens := make([]int32, l)
|
||||||
|
|
||||||
|
for i, t := range tokens {
|
||||||
|
i32Tokens[i] = int32(t.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pb.TokenizationResponse{
|
||||||
|
Length: int32(l),
|
||||||
|
Tokens: i32Tokens,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
@@ -311,16 +311,12 @@ func (s *Store) StoresGet(opts *pb.StoresGetOptions) (pb.StoresGetResult, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func isNormalized(k []float32) bool {
|
func isNormalized(k []float32) bool {
|
||||||
var sum float64
|
var sum float32
|
||||||
|
|
||||||
for _, v := range k {
|
for _, v := range k {
|
||||||
v64 := float64(v)
|
sum += v
|
||||||
sum += v64*v64
|
|
||||||
}
|
}
|
||||||
|
|
||||||
s := math.Sqrt(sum)
|
return sum == 1.0
|
||||||
|
|
||||||
return s >= 0.99 && s <= 1.01
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: This we could replace with handwritten SIMD code
|
// TODO: This we could replace with handwritten SIMD code
|
||||||
@@ -332,7 +328,7 @@ func normalizedCosineSimilarity(k1, k2 []float32) float32 {
|
|||||||
dot += k1[i] * k2[i]
|
dot += k1[i] * k2[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(dot >= -1.01 && dot <= 1.01, fmt.Sprintf("dot = %f", dot))
|
assert(dot >= -1 && dot <= 1, fmt.Sprintf("dot = %f", dot))
|
||||||
|
|
||||||
// 2.0 * (1.0 - dot) would be the Euclidean distance
|
// 2.0 * (1.0 - dot) would be the Euclidean distance
|
||||||
return dot
|
return dot
|
||||||
@@ -422,7 +418,7 @@ func cosineSimilarity(k1, k2 []float32, mag1 float64) float32 {
|
|||||||
|
|
||||||
sim := float32(dot / (mag1 * math.Sqrt(mag2)))
|
sim := float32(dot / (mag1 * math.Sqrt(mag2)))
|
||||||
|
|
||||||
assert(sim >= -1.01 && sim <= 1.01, fmt.Sprintf("sim = %f", sim))
|
assert(sim >= -1 && sim <= 1, fmt.Sprintf("sim = %f", sim))
|
||||||
|
|
||||||
return sim
|
return sim
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,54 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
// This is a wrapper to statisfy the GRPC service interface
|
|
||||||
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/mudler/LocalAI/pkg/grpc/base"
|
|
||||||
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
|
||||||
"github.com/streamer45/silero-vad-go/speech"
|
|
||||||
)
|
|
||||||
|
|
||||||
type VAD struct {
|
|
||||||
base.SingleThread
|
|
||||||
detector *speech.Detector
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vad *VAD) Load(opts *pb.ModelOptions) error {
|
|
||||||
v, err := speech.NewDetector(speech.DetectorConfig{
|
|
||||||
ModelPath: opts.ModelFile,
|
|
||||||
SampleRate: 16000,
|
|
||||||
//WindowSize: 1024,
|
|
||||||
Threshold: 0.5,
|
|
||||||
MinSilenceDurationMs: 0,
|
|
||||||
SpeechPadMs: 0,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("create silero detector: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
vad.detector = v
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vad *VAD) VAD(req *pb.VADRequest) (pb.VADResponse, error) {
|
|
||||||
audio := req.Audio
|
|
||||||
|
|
||||||
segments, err := vad.detector.Detect(audio)
|
|
||||||
if err != nil {
|
|
||||||
return pb.VADResponse{}, fmt.Errorf("detect: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
vadSegments := []*pb.VADSegment{}
|
|
||||||
for _, s := range segments {
|
|
||||||
vadSegments = append(vadSegments, &pb.VADSegment{
|
|
||||||
Start: float32(s.SpeechStartAt),
|
|
||||||
End: float32(s.SpeechEndAt),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return pb.VADResponse{
|
|
||||||
Segments: vadSegments,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
@@ -1,9 +1,6 @@
|
|||||||
.DEFAULT_GOAL := install
|
.PHONY: autogptq
|
||||||
|
autogptq: protogen
|
||||||
.PHONY: install
|
|
||||||
install:
|
|
||||||
bash install.sh
|
bash install.sh
|
||||||
$(MAKE) protogen
|
|
||||||
|
|
||||||
.PHONY: protogen
|
.PHONY: protogen
|
||||||
protogen: backend_pb2_grpc.py backend_pb2.py
|
protogen: backend_pb2_grpc.py backend_pb2.py
|
||||||
@@ -13,7 +10,7 @@ protogen-clean:
|
|||||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||||
|
|
||||||
backend_pb2_grpc.py backend_pb2.py:
|
backend_pb2_grpc.py backend_pb2.py:
|
||||||
bash protogen.sh
|
python3 -m grpc_tools.protoc -I../.. --python_out=. --grpc_python_out=. backend.proto
|
||||||
|
|
||||||
.PHONY: clean
|
.PHONY: clean
|
||||||
clean: protogen-clean
|
clean: protogen-clean
|
||||||
5
backend/python/autogptq/README.md
Normal file
5
backend/python/autogptq/README.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
# Creating a separate environment for the autogptq project
|
||||||
|
|
||||||
|
```
|
||||||
|
make autogptq
|
||||||
|
```
|
||||||
153
backend/python/autogptq/backend.py
Executable file
153
backend/python/autogptq/backend.py
Executable file
@@ -0,0 +1,153 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
from concurrent import futures
|
||||||
|
import argparse
|
||||||
|
import signal
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import base64
|
||||||
|
|
||||||
|
import grpc
|
||||||
|
import backend_pb2
|
||||||
|
import backend_pb2_grpc
|
||||||
|
|
||||||
|
from auto_gptq import AutoGPTQForCausalLM
|
||||||
|
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||||
|
from transformers import TextGenerationPipeline
|
||||||
|
|
||||||
|
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||||
|
|
||||||
|
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
|
||||||
|
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
||||||
|
|
||||||
|
# Implement the BackendServicer class with the service methods
|
||||||
|
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||||
|
def Health(self, request, context):
|
||||||
|
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
||||||
|
def LoadModel(self, request, context):
|
||||||
|
try:
|
||||||
|
device = "cuda:0"
|
||||||
|
if request.Device != "":
|
||||||
|
device = request.Device
|
||||||
|
|
||||||
|
# support loading local model files
|
||||||
|
model_path = os.path.join(os.environ.get('MODELS_PATH', './'), request.Model)
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=True, trust_remote_code=request.TrustRemoteCode)
|
||||||
|
|
||||||
|
# support model `Qwen/Qwen-VL-Chat-Int4`
|
||||||
|
if "qwen-vl" in request.Model.lower():
|
||||||
|
self.model_name = "Qwen-VL-Chat"
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(model_path,
|
||||||
|
trust_remote_code=request.TrustRemoteCode,
|
||||||
|
device_map="auto").eval()
|
||||||
|
else:
|
||||||
|
model = AutoGPTQForCausalLM.from_quantized(model_path,
|
||||||
|
model_basename=request.ModelBaseName,
|
||||||
|
use_safetensors=True,
|
||||||
|
trust_remote_code=request.TrustRemoteCode,
|
||||||
|
device=device,
|
||||||
|
use_triton=request.UseTriton,
|
||||||
|
quantize_config=None)
|
||||||
|
|
||||||
|
self.model = model
|
||||||
|
self.tokenizer = tokenizer
|
||||||
|
except Exception as err:
|
||||||
|
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||||
|
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
||||||
|
|
||||||
|
def Predict(self, request, context):
|
||||||
|
penalty = 1.0
|
||||||
|
if request.Penalty != 0.0:
|
||||||
|
penalty = request.Penalty
|
||||||
|
tokens = 512
|
||||||
|
if request.Tokens != 0:
|
||||||
|
tokens = request.Tokens
|
||||||
|
top_p = 0.95
|
||||||
|
if request.TopP != 0.0:
|
||||||
|
top_p = request.TopP
|
||||||
|
|
||||||
|
|
||||||
|
prompt_images = self.recompile_vl_prompt(request)
|
||||||
|
compiled_prompt = prompt_images[0]
|
||||||
|
print(f"Prompt: {compiled_prompt}", file=sys.stderr)
|
||||||
|
|
||||||
|
# Implement Predict RPC
|
||||||
|
pipeline = TextGenerationPipeline(
|
||||||
|
model=self.model,
|
||||||
|
tokenizer=self.tokenizer,
|
||||||
|
max_new_tokens=tokens,
|
||||||
|
temperature=request.Temperature,
|
||||||
|
top_p=top_p,
|
||||||
|
repetition_penalty=penalty,
|
||||||
|
)
|
||||||
|
t = pipeline(compiled_prompt)[0]["generated_text"]
|
||||||
|
print(f"generated_text: {t}", file=sys.stderr)
|
||||||
|
|
||||||
|
if compiled_prompt in t:
|
||||||
|
t = t.replace(compiled_prompt, "")
|
||||||
|
# house keeping. Remove the image files from /tmp folder
|
||||||
|
for img_path in prompt_images[1]:
|
||||||
|
try:
|
||||||
|
os.remove(img_path)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error removing image file: {img_path}, {e}", file=sys.stderr)
|
||||||
|
|
||||||
|
return backend_pb2.Result(message=bytes(t, encoding='utf-8'))
|
||||||
|
|
||||||
|
def PredictStream(self, request, context):
|
||||||
|
# Implement PredictStream RPC
|
||||||
|
#for reply in some_data_generator():
|
||||||
|
# yield reply
|
||||||
|
# Not implemented yet
|
||||||
|
return self.Predict(request, context)
|
||||||
|
|
||||||
|
def recompile_vl_prompt(self, request):
|
||||||
|
prompt = request.Prompt
|
||||||
|
image_paths = []
|
||||||
|
|
||||||
|
if "qwen-vl" in self.model_name.lower():
|
||||||
|
# request.Images is an array which contains base64 encoded images. Iterate the request.Images array, decode and save each image to /tmp folder with a random filename.
|
||||||
|
# Then, save the image file paths to an array "image_paths".
|
||||||
|
# read "request.Prompt", replace "[img-%d]" with the image file paths in the order they appear in "image_paths". Save the new prompt to "prompt".
|
||||||
|
for i, img in enumerate(request.Images):
|
||||||
|
timestamp = str(int(time.time() * 1000)) # Generate timestamp
|
||||||
|
img_path = f"/tmp/vl-{timestamp}.jpg" # Use timestamp in filename
|
||||||
|
with open(img_path, "wb") as f:
|
||||||
|
f.write(base64.b64decode(img))
|
||||||
|
image_paths.append(img_path)
|
||||||
|
prompt = prompt.replace(f"[img-{i}]", "<img>" + img_path + "</img>,")
|
||||||
|
else:
|
||||||
|
prompt = request.Prompt
|
||||||
|
return (prompt, image_paths)
|
||||||
|
|
||||||
|
def serve(address):
|
||||||
|
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS))
|
||||||
|
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||||
|
server.add_insecure_port(address)
|
||||||
|
server.start()
|
||||||
|
print("Server started. Listening on: " + address, file=sys.stderr)
|
||||||
|
|
||||||
|
# Define the signal handler function
|
||||||
|
def signal_handler(sig, frame):
|
||||||
|
print("Received termination signal. Shutting down...")
|
||||||
|
server.stop(0)
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
# Set the signal handlers for SIGINT and SIGTERM
|
||||||
|
signal.signal(signal.SIGINT, signal_handler)
|
||||||
|
signal.signal(signal.SIGTERM, signal_handler)
|
||||||
|
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
time.sleep(_ONE_DAY_IN_SECONDS)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
server.stop(0)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
parser = argparse.ArgumentParser(description="Run the gRPC server.")
|
||||||
|
parser.add_argument(
|
||||||
|
"--addr", default="localhost:50051", help="The address to bind the server to."
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
serve(args.addr)
|
||||||
@@ -1,3 +1,2 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||||
torch==2.4.1+cu118
|
torch
|
||||||
transformers
|
|
||||||
1
backend/python/autogptq/requirements-cublas12.txt
Normal file
1
backend/python/autogptq/requirements-cublas12.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
torch
|
||||||
@@ -1,3 +1,2 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/rocm6.0
|
--extra-index-url https://download.pytorch.org/whl/rocm6.0
|
||||||
torch
|
torch
|
||||||
faster-whisper
|
|
||||||
5
backend/python/autogptq/requirements-intel.txt
Normal file
5
backend/python/autogptq/requirements-intel.txt
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||||
|
intel-extension-for-pytorch
|
||||||
|
torch
|
||||||
|
optimum[openvino]
|
||||||
|
setuptools==72.1.0 # https://github.com/mudler/LocalAI/issues/2406
|
||||||
6
backend/python/autogptq/requirements.txt
Normal file
6
backend/python/autogptq/requirements.txt
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
accelerate
|
||||||
|
auto-gptq==0.7.1
|
||||||
|
grpcio==1.66.1
|
||||||
|
protobuf
|
||||||
|
certifi
|
||||||
|
transformers
|
||||||
@@ -61,12 +61,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
|||||||
return backend_pb2.Result(success=True)
|
return backend_pb2.Result(success=True)
|
||||||
|
|
||||||
def serve(address):
|
def serve(address):
|
||||||
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS))
|
||||||
options=[
|
|
||||||
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
|
|
||||||
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
|
|
||||||
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
|
|
||||||
])
|
|
||||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||||
server.add_insecure_port(address)
|
server.add_insecure_port(address)
|
||||||
server.start()
|
server.start()
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
transformers
|
transformers
|
||||||
accelerate
|
accelerate
|
||||||
torch==2.4.1
|
torch
|
||||||
torchaudio==2.4.1
|
torchaudio
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||||
torch==2.4.1+cu118
|
torch
|
||||||
torchaudio==2.4.1+cu118
|
torchaudio
|
||||||
transformers
|
transformers
|
||||||
accelerate
|
accelerate
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
torch==2.4.1
|
torch
|
||||||
torchaudio==2.4.1
|
torchaudio
|
||||||
transformers
|
transformers
|
||||||
accelerate
|
accelerate
|
||||||
@@ -1,5 +1,5 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/rocm6.0
|
--extra-index-url https://download.pytorch.org/whl/rocm6.0
|
||||||
torch==2.4.1+rocm6.0
|
torch
|
||||||
torchaudio==2.4.1+rocm6.0
|
torchaudio
|
||||||
transformers
|
transformers
|
||||||
accelerate
|
accelerate
|
||||||
@@ -1,9 +1,8 @@
|
|||||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||||
intel-extension-for-pytorch==2.3.110+xpu
|
intel-extension-for-pytorch
|
||||||
torch==2.3.1+cxx11.abi
|
torch
|
||||||
torchaudio==2.3.1+cxx11.abi
|
torchaudio
|
||||||
oneccl_bind_pt==2.3.100+xpu
|
|
||||||
optimum[openvino]
|
optimum[openvino]
|
||||||
setuptools
|
setuptools==70.3.0 # https://github.com/mudler/LocalAI/issues/2406
|
||||||
transformers
|
transformers
|
||||||
accelerate
|
accelerate
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
bark==0.1.5
|
bark==0.1.5
|
||||||
grpcio==1.71.0
|
grpcio==1.66.1
|
||||||
protobuf
|
protobuf
|
||||||
certifi
|
certifi
|
||||||
@@ -17,9 +17,6 @@
|
|||||||
# LIMIT_TARGETS="cublas12"
|
# LIMIT_TARGETS="cublas12"
|
||||||
# source $(dirname $0)/../common/libbackend.sh
|
# source $(dirname $0)/../common/libbackend.sh
|
||||||
#
|
#
|
||||||
|
|
||||||
PYTHON_VERSION="3.10"
|
|
||||||
|
|
||||||
function init() {
|
function init() {
|
||||||
# Name of the backend (directory name)
|
# Name of the backend (directory name)
|
||||||
BACKEND_NAME=${PWD##*/}
|
BACKEND_NAME=${PWD##*/}
|
||||||
@@ -91,7 +88,7 @@ function getBuildProfile() {
|
|||||||
# always result in an activated virtual environment
|
# always result in an activated virtual environment
|
||||||
function ensureVenv() {
|
function ensureVenv() {
|
||||||
if [ ! -d "${EDIR}/venv" ]; then
|
if [ ! -d "${EDIR}/venv" ]; then
|
||||||
uv venv --python ${PYTHON_VERSION} ${EDIR}/venv
|
uv venv ${EDIR}/venv
|
||||||
echo "virtualenv created"
|
echo "virtualenv created"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|||||||
@@ -1,9 +1,8 @@
|
|||||||
.DEFAULT_GOAL := install
|
.DEFAULT_GOAL := install
|
||||||
|
|
||||||
.PHONY: install
|
.PHONY: install
|
||||||
install:
|
install: protogen
|
||||||
bash install.sh
|
bash install.sh
|
||||||
$(MAKE) protogen
|
|
||||||
|
|
||||||
.PHONY: protogen
|
.PHONY: protogen
|
||||||
protogen: backend_pb2_grpc.py backend_pb2.py
|
protogen: backend_pb2_grpc.py backend_pb2.py
|
||||||
@@ -13,7 +12,7 @@ protogen-clean:
|
|||||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||||
|
|
||||||
backend_pb2_grpc.py backend_pb2.py:
|
backend_pb2_grpc.py backend_pb2.py:
|
||||||
bash protogen.sh
|
python3 -m grpc_tools.protoc -I../.. --python_out=. --grpc_python_out=. backend.proto
|
||||||
|
|
||||||
.PHONY: clean
|
.PHONY: clean
|
||||||
clean: protogen-clean
|
clean: protogen-clean
|
||||||
|
|||||||
@@ -1,6 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
set -e
|
|
||||||
|
|
||||||
source $(dirname $0)/../common/libbackend.sh
|
|
||||||
|
|
||||||
python3 -m grpc_tools.protoc -I../.. --python_out=. --grpc_python_out=. backend.proto
|
|
||||||
@@ -1,5 +1,4 @@
|
|||||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||||
intel-extension-for-pytorch==2.3.110+xpu
|
intel-extension-for-pytorch
|
||||||
torch==2.3.1+cxx11.abi
|
torch
|
||||||
oneccl_bind_pt==2.3.100+xpu
|
|
||||||
optimum[openvino]
|
optimum[openvino]
|
||||||
@@ -1,3 +1,2 @@
|
|||||||
grpcio==1.71.0
|
grpcio==1.66.1
|
||||||
protobuf
|
protobuf
|
||||||
grpcio-tools
|
|
||||||
@@ -86,12 +86,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
|||||||
return backend_pb2.Result(success=True)
|
return backend_pb2.Result(success=True)
|
||||||
|
|
||||||
def serve(address):
|
def serve(address):
|
||||||
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS))
|
||||||
options=[
|
|
||||||
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
|
|
||||||
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
|
|
||||||
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
|
|
||||||
])
|
|
||||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||||
server.add_insecure_port(address)
|
server.add_insecure_port(address)
|
||||||
server.start()
|
server.start()
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
transformers==4.48.3
|
transformers
|
||||||
accelerate
|
accelerate
|
||||||
torch==2.4.1
|
torch
|
||||||
coqui-tts
|
|
||||||
@@ -1,6 +1,5 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||||
torch==2.4.1+cu118
|
torch
|
||||||
torchaudio==2.4.1+cu118
|
torchaudio
|
||||||
transformers==4.48.3
|
transformers
|
||||||
accelerate
|
accelerate
|
||||||
coqui-tts
|
|
||||||
@@ -1,5 +1,4 @@
|
|||||||
torch==2.4.1
|
torch
|
||||||
torchaudio==2.4.1
|
torchaudio
|
||||||
transformers==4.48.3
|
transformers
|
||||||
accelerate
|
accelerate
|
||||||
coqui-tts
|
|
||||||
@@ -1,6 +1,5 @@
|
|||||||
--extra-index-url https://download.pytorch.org/whl/rocm6.0
|
--extra-index-url https://download.pytorch.org/whl/rocm6.0
|
||||||
torch==2.4.1+rocm6.0
|
torch
|
||||||
torchaudio==2.4.1+rocm6.0
|
torchaudio
|
||||||
transformers==4.48.3
|
transformers
|
||||||
accelerate
|
accelerate
|
||||||
coqui-tts
|
|
||||||
@@ -1,10 +1,8 @@
|
|||||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||||
intel-extension-for-pytorch==2.3.110+xpu
|
intel-extension-for-pytorch
|
||||||
torch==2.3.1+cxx11.abi
|
torch
|
||||||
torchaudio==2.3.1+cxx11.abi
|
torchaudio
|
||||||
oneccl_bind_pt==2.3.100+xpu
|
|
||||||
optimum[openvino]
|
optimum[openvino]
|
||||||
setuptools
|
setuptools==72.1.0 # https://github.com/mudler/LocalAI/issues/2406
|
||||||
transformers==4.48.3
|
transformers
|
||||||
accelerate
|
accelerate
|
||||||
coqui-tts
|
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
grpcio==1.71.0
|
TTS==0.22.0
|
||||||
|
grpcio==1.66.1
|
||||||
protobuf
|
protobuf
|
||||||
certifi
|
certifi
|
||||||
packaging==24.1
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user