mirror of
https://github.com/mudler/LocalAI.git
synced 2026-02-04 03:32:40 -05:00
Compare commits
128 Commits
dave-gray1
...
v2.15.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f69de3be0d | ||
|
|
650ae620c5 | ||
|
|
6a209cbef6 | ||
|
|
9786bb826d | ||
|
|
9b4c6f348a | ||
|
|
cb6ddb21ec | ||
|
|
0baacca605 | ||
|
|
222d714ec7 | ||
|
|
fd2d89d37b | ||
|
|
6440b608dc | ||
|
|
1937118eab | ||
|
|
bc272d1e4b | ||
|
|
d651f390cd | ||
|
|
ea777f8716 | ||
|
|
eca5200fbd | ||
|
|
0809e9e7a0 | ||
|
|
b66baa3db6 | ||
|
|
6eb77f0d3a | ||
|
|
b20354b3ad | ||
|
|
d6f76c75e1 | ||
|
|
ed4f412f1c | ||
|
|
5bf56e01aa | ||
|
|
5ff5f0b393 | ||
|
|
6559ac11b1 | ||
|
|
02ec546dd6 | ||
|
|
995aa5ed21 | ||
|
|
e28ba4b807 | ||
|
|
d1e3436de5 | ||
|
|
d3ddc9e4aa | ||
|
|
fea9522982 | ||
|
|
fe055d4b36 | ||
|
|
581b894789 | ||
|
|
477655f6e6 | ||
|
|
169d8d21ff | ||
|
|
c5475020fe | ||
|
|
b52ff1249f | ||
|
|
c5798500cb | ||
|
|
67ad3532ec | ||
|
|
5cb96fe7df | ||
|
|
810e8e5855 | ||
|
|
f3bcc648e7 | ||
|
|
3096566333 | ||
|
|
f50c6a4e88 | ||
|
|
ab4ee54855 | ||
|
|
f2d35062d4 | ||
|
|
b69ff46c7e | ||
|
|
117c9873e1 | ||
|
|
17e94fbcb1 | ||
|
|
92f7feb874 | ||
|
|
b70e2bffa3 | ||
|
|
06c43ca285 | ||
|
|
530bec9c64 | ||
|
|
fa10302dd2 | ||
|
|
54faaa87ea | ||
|
|
daba8a85f9 | ||
|
|
ac0f3d6e82 | ||
|
|
da0b6a89ae | ||
|
|
929a68c06d | ||
|
|
a0aa5d01a1 | ||
|
|
dc834cc9d2 | ||
|
|
b58274b8a2 | ||
|
|
a31d00d904 | ||
|
|
2cc1bd85af | ||
|
|
2c5a46bc34 | ||
|
|
f7f8b4804b | ||
|
|
e5bd9a76c7 | ||
|
|
4690b534e0 | ||
|
|
6a7a7996bb | ||
|
|
962ebbaf77 | ||
|
|
f90d56d371 | ||
|
|
445cfd4db3 | ||
|
|
b24d44dc56 | ||
|
|
cd31f8d865 | ||
|
|
970cb3a219 | ||
|
|
f7aabf1b50 | ||
|
|
e38610e521 | ||
|
|
3754f154ee | ||
|
|
29d7812344 | ||
|
|
5fd46175dc | ||
|
|
52a268c38c | ||
|
|
53c3842bc2 | ||
|
|
c4f958e11b | ||
|
|
147440b39b | ||
|
|
baff5ff8c2 | ||
|
|
ea13863221 | ||
|
|
93ca56086e | ||
|
|
11c48a0004 | ||
|
|
b7ea9602f5 | ||
|
|
982dc6a2bd | ||
|
|
74d903acca | ||
|
|
5fef3b0ff1 | ||
|
|
0674893649 | ||
|
|
e8d44447ad | ||
|
|
a24cd4fda0 | ||
|
|
01860674c4 | ||
|
|
987b7ad42d | ||
|
|
21974fe1d3 | ||
|
|
26e1892521 | ||
|
|
a78cd67737 | ||
|
|
5e243ceaeb | ||
|
|
1a0a6f60a7 | ||
|
|
3179c019af | ||
|
|
a8089494fd | ||
|
|
a248ede222 | ||
|
|
0f0ae13ad0 | ||
|
|
773d5d23d5 | ||
|
|
c3982212f9 | ||
|
|
7e6bf6e7a1 | ||
|
|
9fc0135991 | ||
|
|
164be58445 | ||
|
|
1f8461767d | ||
|
|
935f4c23f6 | ||
|
|
4c97406f2b | ||
|
|
fb2a05ff43 | ||
|
|
030d555995 | ||
|
|
56d843c263 | ||
|
|
2dc1fa2474 | ||
|
|
c9451cb604 | ||
|
|
006306b183 | ||
|
|
2cd4936c99 | ||
|
|
44bc540bb5 | ||
|
|
6b411ae212 | ||
|
|
eed285f9de | ||
|
|
c8dd8e5ef4 | ||
|
|
365ef92530 | ||
|
|
5fceb876c4 | ||
|
|
d98063e80e | ||
|
|
45761f8be2 |
4
.env
4
.env
@@ -10,7 +10,7 @@
|
||||
#
|
||||
## Define galleries.
|
||||
## models will to install will be visible in `/models/available`
|
||||
# LOCALAI_GALLERIES=[{"name":"model-gallery", "url":"github:go-skynet/model-gallery/index.yaml"}]
|
||||
# LOCALAI_GALLERIES=[{"name":"localai", "url":"github:mudler/LocalAI/gallery/index.yaml@master"}]
|
||||
|
||||
## CORS settings
|
||||
# LOCALAI_CORS=true
|
||||
@@ -86,4 +86,4 @@
|
||||
# LOCALAI_WATCHDOG_BUSY=true
|
||||
#
|
||||
# Time in duration format (e.g. 1h30m) after which a backend is considered busy
|
||||
# LOCALAI_WATCHDOG_BUSY_TIMEOUT=5m
|
||||
# LOCALAI_WATCHDOG_BUSY_TIMEOUT=5m
|
||||
|
||||
2
.github/bump_docs.sh
vendored
2
.github/bump_docs.sh
vendored
@@ -2,6 +2,6 @@
|
||||
set -xe
|
||||
REPO=$1
|
||||
|
||||
LATEST_TAG=$(curl -s "https://api.github.com/repos/$REPO/releases/latest" | jq -r '.name')
|
||||
LATEST_TAG=$(curl -s "https://api.github.com/repos/$REPO/releases/latest" | jq -r '.tag_name')
|
||||
|
||||
cat <<< $(jq ".version = \"$LATEST_TAG\"" docs/data/version.json) > docs/data/version.json
|
||||
|
||||
111
.github/checksum_checker.sh
vendored
Normal file
111
.github/checksum_checker.sh
vendored
Normal file
@@ -0,0 +1,111 @@
|
||||
#!/bin/bash
|
||||
# This scripts needs yq and huggingface_hub to be installed
|
||||
# to install hugingface_hub run pip install huggingface_hub
|
||||
|
||||
# Path to the input YAML file
|
||||
input_yaml=$1
|
||||
|
||||
# Function to download file and check checksum using Python
|
||||
function check_and_update_checksum() {
|
||||
model_name="$1"
|
||||
file_name="$2"
|
||||
uri="$3"
|
||||
old_checksum="$4"
|
||||
idx="$5"
|
||||
|
||||
# Download the file and calculate new checksum using Python
|
||||
new_checksum=$(python3 -c "
|
||||
import hashlib
|
||||
from huggingface_hub import hf_hub_download
|
||||
import requests
|
||||
import sys
|
||||
import os
|
||||
|
||||
uri = '$uri'
|
||||
file_name = uri.split('/')[-1]
|
||||
|
||||
# Function to parse the URI and determine download method
|
||||
# Function to parse the URI and determine download method
|
||||
def parse_uri(uri):
|
||||
if uri.startswith('huggingface://'):
|
||||
repo_id = uri.split('://')[1]
|
||||
return 'huggingface', repo_id.rsplit('/', 1)[0]
|
||||
elif 'huggingface.co' in uri:
|
||||
parts = uri.split('/resolve/')
|
||||
if len(parts) > 1:
|
||||
repo_path = parts[0].split('https://huggingface.co/')[-1]
|
||||
return 'huggingface', repo_path
|
||||
return 'direct', uri
|
||||
|
||||
def calculate_sha256(file_path):
|
||||
sha256_hash = hashlib.sha256()
|
||||
with open(file_path, 'rb') as f:
|
||||
for byte_block in iter(lambda: f.read(4096), b''):
|
||||
sha256_hash.update(byte_block)
|
||||
return sha256_hash.hexdigest()
|
||||
|
||||
download_type, repo_id_or_url = parse_uri(uri)
|
||||
|
||||
# Decide download method based on URI type
|
||||
if download_type == 'huggingface':
|
||||
try:
|
||||
file_path = hf_hub_download(repo_id=repo_id_or_url, filename=file_name)
|
||||
except Exception as e:
|
||||
print(f'Error from Hugging Face Hub: {str(e)}', file=sys.stderr)
|
||||
sys.exit(2)
|
||||
else:
|
||||
response = requests.get(repo_id_or_url)
|
||||
if response.status_code == 200:
|
||||
with open(file_name, 'wb') as f:
|
||||
f.write(response.content)
|
||||
file_path = file_name
|
||||
elif response.status_code == 404:
|
||||
print(f'File not found: {response.status_code}', file=sys.stderr)
|
||||
sys.exit(2)
|
||||
else:
|
||||
print(f'Error downloading file: {response.status_code}', file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
print(calculate_sha256(file_path))
|
||||
# Clean up the downloaded file
|
||||
os.remove(file_path)
|
||||
")
|
||||
|
||||
if [[ "$new_checksum" == "" ]]; then
|
||||
echo "Error calculating checksum for $file_name. Skipping..."
|
||||
return
|
||||
fi
|
||||
|
||||
echo "Checksum for $file_name: $new_checksum"
|
||||
|
||||
# Compare and update the YAML file if checksums do not match
|
||||
result=$?
|
||||
if [[ $result -eq 2 ]]; then
|
||||
echo "File not found, deleting entry for $file_name..."
|
||||
# yq eval -i "del(.[$idx].files[] | select(.filename == \"$file_name\"))" "$input_yaml"
|
||||
elif [[ "$old_checksum" != "$new_checksum" ]]; then
|
||||
echo "Checksum mismatch for $file_name. Updating..."
|
||||
yq eval -i "del(.[$idx].files[] | select(.filename == \"$file_name\").sha256)" "$input_yaml"
|
||||
yq eval -i "(.[$idx].files[] | select(.filename == \"$file_name\")).sha256 = \"$new_checksum\"" "$input_yaml"
|
||||
elif [[ $result -ne 0 ]]; then
|
||||
echo "Error downloading file $file_name. Skipping..."
|
||||
else
|
||||
echo "Checksum match for $file_name. No update needed."
|
||||
fi
|
||||
}
|
||||
|
||||
# Read the YAML and process each file
|
||||
len=$(yq eval '. | length' "$input_yaml")
|
||||
for ((i=0; i<$len; i++))
|
||||
do
|
||||
name=$(yq eval ".[$i].name" "$input_yaml")
|
||||
files_len=$(yq eval ".[$i].files | length" "$input_yaml")
|
||||
for ((j=0; j<$files_len; j++))
|
||||
do
|
||||
filename=$(yq eval ".[$i].files[$j].filename" "$input_yaml")
|
||||
uri=$(yq eval ".[$i].files[$j].uri" "$input_yaml")
|
||||
checksum=$(yq eval ".[$i].files[$j].sha256" "$input_yaml")
|
||||
echo "Checking model $name, file $filename. URI = $uri, Checksum = $checksum"
|
||||
check_and_update_checksum "$name" "$filename" "$uri" "$checksum" "$i"
|
||||
done
|
||||
done
|
||||
7
.github/labeler.yml
vendored
7
.github/labeler.yml
vendored
@@ -8,6 +8,11 @@ kind/documentation:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: '*.md'
|
||||
|
||||
area/ai-model:
|
||||
- any:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'gallery/*'
|
||||
|
||||
examples:
|
||||
- any:
|
||||
- changed-files:
|
||||
@@ -16,4 +21,4 @@ examples:
|
||||
ci:
|
||||
- any:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: '.github/*'
|
||||
- any-glob-to-any-file: '.github/*'
|
||||
|
||||
47
.github/workflows/checksum_checker.yaml
vendored
Normal file
47
.github/workflows/checksum_checker.yaml
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
name: Check if checksums are up-to-date
|
||||
on:
|
||||
schedule:
|
||||
- cron: 0 20 * * *
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
checksum_check:
|
||||
runs-on: arc-runner-set
|
||||
steps:
|
||||
- name: Force Install GIT latest
|
||||
run: |
|
||||
sudo apt-get update \
|
||||
&& sudo apt-get install -y software-properties-common \
|
||||
&& sudo apt-get update \
|
||||
&& sudo add-apt-repository -y ppa:git-core/ppa \
|
||||
&& sudo apt-get update \
|
||||
&& sudo apt-get install -y git
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y pip wget
|
||||
sudo pip install --upgrade pip
|
||||
pip install huggingface_hub
|
||||
- name: 'Setup yq'
|
||||
uses: dcarbone/install-yq-action@v1.1.1
|
||||
with:
|
||||
version: 'v4.43.1'
|
||||
download-compressed: true
|
||||
force: true
|
||||
|
||||
- name: Checksum checker 🔧
|
||||
run: |
|
||||
export HF_HOME=/hf_cache
|
||||
sudo mkdir /hf_cache
|
||||
sudo chmod 777 /hf_cache
|
||||
bash .github/checksum_checker.sh gallery/index.yaml
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
with:
|
||||
token: ${{ secrets.UPDATE_BOT_TOKEN }}
|
||||
push-to-fork: ci-forks/LocalAI
|
||||
commit-message: ':arrow_up: Checksum updates in gallery/index.yaml'
|
||||
title: 'models(gallery): :arrow_up: update checksum'
|
||||
branch: "update/checksum"
|
||||
body: Updating checksums in gallery/index.yaml
|
||||
signoff: true
|
||||
2
.github/workflows/dependabot_auto.yml
vendored
2
.github/workflows/dependabot_auto.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
steps:
|
||||
- name: Dependabot metadata
|
||||
id: metadata
|
||||
uses: dependabot/fetch-metadata@v2.0.0
|
||||
uses: dependabot/fetch-metadata@v2.1.0
|
||||
with:
|
||||
github-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
skip-commit-verification: true
|
||||
|
||||
10
.github/workflows/generate_grpc_cache.yaml
vendored
10
.github/workflows/generate_grpc_cache.yaml
vendored
@@ -1,7 +1,10 @@
|
||||
name: 'generate and publish GRPC docker caches'
|
||||
|
||||
on:
|
||||
- workflow_dispatch
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
concurrency:
|
||||
group: grpc-cache-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
@@ -80,11 +83,12 @@ jobs:
|
||||
# If the build-args are not an EXACT match, it will result in a cache miss, which will require GRPC to be built from scratch.
|
||||
build-args: |
|
||||
GRPC_BASE_IMAGE=${{ matrix.grpc-base-image }}
|
||||
MAKEFLAGS=--jobs=4 --output-sync=target
|
||||
GRPC_VERSION=v1.58.0
|
||||
GRPC_MAKEFLAGS=--jobs=4 --output-sync=target
|
||||
GRPC_VERSION=v1.63.0
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
cache-to: type=gha,ignore-error=true
|
||||
cache-from: type=gha
|
||||
target: grpc
|
||||
platforms: ${{ matrix.platforms }}
|
||||
push: false
|
||||
6
.github/workflows/image-pr.yml
vendored
6
.github/workflows/image-pr.yml
vendored
@@ -61,14 +61,14 @@ jobs:
|
||||
tag-suffix: '-hipblas'
|
||||
ffmpeg: 'false'
|
||||
image-type: 'extras'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.0-complete"
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
runs-on: 'arc-runner-set'
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
- build-type: 'sycl_f16'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
base-image: "intel/oneapi-basekit:2024.0.1-devel-ubuntu22.04"
|
||||
base-image: "intel/oneapi-basekit:2024.1.0-devel-ubuntu22.04"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
tag-suffix: 'sycl-f16-ffmpeg'
|
||||
ffmpeg: 'true'
|
||||
@@ -110,7 +110,7 @@ jobs:
|
||||
- build-type: 'sycl_f16'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
base-image: "intel/oneapi-basekit:2024.0.1-devel-ubuntu22.04"
|
||||
base-image: "intel/oneapi-basekit:2024.1.0-devel-ubuntu22.04"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
tag-suffix: 'sycl-f16-ffmpeg-core'
|
||||
ffmpeg: 'true'
|
||||
|
||||
20
.github/workflows/image.yml
vendored
20
.github/workflows/image.yml
vendored
@@ -129,7 +129,7 @@ jobs:
|
||||
ffmpeg: 'true'
|
||||
image-type: 'extras'
|
||||
aio: "-aio-gpu-hipblas"
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.0-complete"
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
latest-image: 'latest-gpu-hipblas'
|
||||
latest-image-aio: 'latest-aio-gpu-hipblas'
|
||||
@@ -141,14 +141,14 @@ jobs:
|
||||
tag-suffix: '-hipblas'
|
||||
ffmpeg: 'false'
|
||||
image-type: 'extras'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.0-complete"
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
runs-on: 'arc-runner-set'
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
- build-type: 'sycl_f16'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
base-image: "intel/oneapi-basekit:2024.0.1-devel-ubuntu22.04"
|
||||
base-image: "intel/oneapi-basekit:2024.1.0-devel-ubuntu22.04"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
tag-suffix: '-sycl-f16-ffmpeg'
|
||||
ffmpeg: 'true'
|
||||
@@ -161,7 +161,7 @@ jobs:
|
||||
- build-type: 'sycl_f32'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
base-image: "intel/oneapi-basekit:2024.0.1-devel-ubuntu22.04"
|
||||
base-image: "intel/oneapi-basekit:2024.1.0-devel-ubuntu22.04"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
tag-suffix: '-sycl-f32-ffmpeg'
|
||||
ffmpeg: 'true'
|
||||
@@ -175,7 +175,7 @@ jobs:
|
||||
- build-type: 'sycl_f16'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
base-image: "intel/oneapi-basekit:2024.0.1-devel-ubuntu22.04"
|
||||
base-image: "intel/oneapi-basekit:2024.1.0-devel-ubuntu22.04"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
tag-suffix: '-sycl-f16-core'
|
||||
ffmpeg: 'false'
|
||||
@@ -185,7 +185,7 @@ jobs:
|
||||
- build-type: 'sycl_f32'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
base-image: "intel/oneapi-basekit:2024.0.1-devel-ubuntu22.04"
|
||||
base-image: "intel/oneapi-basekit:2024.1.0-devel-ubuntu22.04"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
tag-suffix: '-sycl-f32-core'
|
||||
ffmpeg: 'false'
|
||||
@@ -195,7 +195,7 @@ jobs:
|
||||
- build-type: 'sycl_f16'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
base-image: "intel/oneapi-basekit:2024.0.1-devel-ubuntu22.04"
|
||||
base-image: "intel/oneapi-basekit:2024.1.0-devel-ubuntu22.04"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
tag-suffix: '-sycl-f16-ffmpeg-core'
|
||||
ffmpeg: 'true'
|
||||
@@ -205,7 +205,7 @@ jobs:
|
||||
- build-type: 'sycl_f32'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
base-image: "intel/oneapi-basekit:2024.0.1-devel-ubuntu22.04"
|
||||
base-image: "intel/oneapi-basekit:2024.1.0-devel-ubuntu22.04"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
tag-suffix: '-sycl-f32-ffmpeg-core'
|
||||
ffmpeg: 'true'
|
||||
@@ -218,7 +218,7 @@ jobs:
|
||||
tag-suffix: '-hipblas-ffmpeg-core'
|
||||
ffmpeg: 'true'
|
||||
image-type: 'core'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.0-complete"
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
runs-on: 'arc-runner-set'
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
@@ -228,7 +228,7 @@ jobs:
|
||||
tag-suffix: '-hipblas-core'
|
||||
ffmpeg: 'false'
|
||||
image-type: 'core'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.0-complete"
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.1"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
runs-on: 'arc-runner-set'
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
|
||||
31
.github/workflows/image_build.yml
vendored
31
.github/workflows/image_build.yml
vendored
@@ -201,30 +201,14 @@ jobs:
|
||||
username: ${{ secrets.quayUsername }}
|
||||
password: ${{ secrets.quayPassword }}
|
||||
|
||||
- name: Cache GRPC
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
builder: ${{ steps.buildx.outputs.name }}
|
||||
# The build-args MUST be an EXACT match between the image cache and other workflow steps that want to use that cache.
|
||||
# This means that even the MAKEFLAGS have to be an EXACT match.
|
||||
# If the build-args are not an EXACT match, it will result in a cache miss, which will require GRPC to be built from scratch.
|
||||
build-args: |
|
||||
GRPC_BASE_IMAGE=${{ inputs.grpc-base-image || inputs.base-image }}
|
||||
MAKEFLAGS=--jobs=4 --output-sync=target
|
||||
GRPC_VERSION=v1.58.0
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
cache-from: type=gha
|
||||
target: grpc
|
||||
platforms: ${{ inputs.platforms }}
|
||||
push: false
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
builder: ${{ steps.buildx.outputs.name }}
|
||||
# This is why some build args like GRPC_VERSION and MAKEFLAGS are hardcoded
|
||||
build-args: |
|
||||
BUILD_TYPE=${{ inputs.build-type }}
|
||||
CUDA_MAJOR_VERSION=${{ inputs.cuda-major-version }}
|
||||
@@ -232,6 +216,9 @@ jobs:
|
||||
FFMPEG=${{ inputs.ffmpeg }}
|
||||
IMAGE_TYPE=${{ inputs.image-type }}
|
||||
BASE_IMAGE=${{ inputs.base-image }}
|
||||
GRPC_BASE_IMAGE=${{ inputs.grpc-base-image || inputs.base-image }}
|
||||
GRPC_MAKEFLAGS=--jobs=4 --output-sync=target
|
||||
GRPC_VERSION=v1.63.0
|
||||
MAKEFLAGS=${{ inputs.makeflags }}
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
@@ -241,14 +228,6 @@ jobs:
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
|
||||
- name: Inspect image
|
||||
if: github.event_name != 'pull_request'
|
||||
run: |
|
||||
docker pull localai/localai:${{ steps.meta.outputs.version }}
|
||||
docker image inspect localai/localai:${{ steps.meta.outputs.version }}
|
||||
docker pull quay.io/go-skynet/local-ai:${{ steps.meta.outputs.version }}
|
||||
docker image inspect quay.io/go-skynet/local-ai:${{ steps.meta.outputs.version }}
|
||||
|
||||
- name: Build and push AIO image
|
||||
if: inputs.aio != ''
|
||||
uses: docker/build-push-action@v5
|
||||
|
||||
70
.github/workflows/release.yaml
vendored
70
.github/workflows/release.yaml
vendored
@@ -5,7 +5,7 @@ on:
|
||||
- pull_request
|
||||
|
||||
env:
|
||||
GRPC_VERSION: v1.58.0
|
||||
GRPC_VERSION: v1.63.0
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
@@ -19,12 +19,8 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- build: 'avx2'
|
||||
- build: ''
|
||||
defines: ''
|
||||
- build: 'avx'
|
||||
defines: '-DLLAMA_AVX2=OFF'
|
||||
- build: 'avx512'
|
||||
defines: '-DLLAMA_AVX512=ON'
|
||||
- build: 'cuda12'
|
||||
defines: ''
|
||||
- build: 'cuda11'
|
||||
@@ -74,7 +70,6 @@ jobs:
|
||||
- name: Build
|
||||
id: build
|
||||
env:
|
||||
CMAKE_ARGS: "${{ matrix.defines }}"
|
||||
BUILD_ID: "${{ matrix.build }}"
|
||||
run: |
|
||||
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest
|
||||
@@ -124,63 +119,7 @@ jobs:
|
||||
name: stablediffusion
|
||||
path: release/
|
||||
|
||||
build-macOS:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- build: 'avx2'
|
||||
defines: ''
|
||||
- build: 'avx'
|
||||
defines: '-DLLAMA_AVX2=OFF'
|
||||
- build: 'avx512'
|
||||
defines: '-DLLAMA_AVX512=ON'
|
||||
runs-on: macOS-latest
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.21.x'
|
||||
cache: false
|
||||
- name: Dependencies
|
||||
run: |
|
||||
brew install protobuf grpc
|
||||
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest
|
||||
go install google.golang.org/protobuf/cmd/protoc-gen-go@latest
|
||||
- name: Build
|
||||
id: build
|
||||
env:
|
||||
CMAKE_ARGS: "${{ matrix.defines }}"
|
||||
BUILD_ID: "${{ matrix.build }}"
|
||||
run: |
|
||||
export C_INCLUDE_PATH=/usr/local/include
|
||||
export CPLUS_INCLUDE_PATH=/usr/local/include
|
||||
export PATH=$PATH:$GOPATH/bin
|
||||
make dist
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: LocalAI-MacOS-${{ matrix.build }}
|
||||
path: release/
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
files: |
|
||||
release/*
|
||||
|
||||
|
||||
build-macOS-arm64:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- build: 'avx2'
|
||||
defines: ''
|
||||
- build: 'avx'
|
||||
defines: '-DLLAMA_AVX2=OFF'
|
||||
- build: 'avx512'
|
||||
defines: '-DLLAMA_AVX512=ON'
|
||||
runs-on: macos-14
|
||||
steps:
|
||||
- name: Clone
|
||||
@@ -198,9 +137,6 @@ jobs:
|
||||
go install google.golang.org/protobuf/cmd/protoc-gen-go@latest
|
||||
- name: Build
|
||||
id: build
|
||||
env:
|
||||
CMAKE_ARGS: "${{ matrix.defines }}"
|
||||
BUILD_ID: "${{ matrix.build }}"
|
||||
run: |
|
||||
export C_INCLUDE_PATH=/usr/local/include
|
||||
export CPLUS_INCLUDE_PATH=/usr/local/include
|
||||
@@ -208,7 +144,7 @@ jobs:
|
||||
make dist
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: LocalAI-MacOS-arm64-${{ matrix.build }}
|
||||
name: LocalAI-MacOS-arm64
|
||||
path: release/
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
|
||||
22
.github/workflows/test-extra.yml
vendored
22
.github/workflows/test-extra.yml
vendored
@@ -34,7 +34,7 @@ jobs:
|
||||
sudo apt-get install -y conda
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
sudo apt-get install -y libopencv-dev
|
||||
pip install --user grpcio-tools
|
||||
pip install --user grpcio-tools==1.63.0
|
||||
|
||||
sudo rm -rfv /usr/bin/conda || true
|
||||
|
||||
@@ -64,7 +64,7 @@ jobs:
|
||||
sudo apt-get install -y conda
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
sudo apt-get install -y libopencv-dev
|
||||
pip install --user grpcio-tools
|
||||
pip install --user grpcio-tools==1.63.0
|
||||
|
||||
sudo rm -rfv /usr/bin/conda || true
|
||||
|
||||
@@ -95,7 +95,7 @@ jobs:
|
||||
sudo apt-get install -y conda
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
sudo apt-get install -y libopencv-dev
|
||||
pip install --user grpcio-tools
|
||||
pip install --user grpcio-tools==1.63.0
|
||||
|
||||
sudo rm -rfv /usr/bin/conda || true
|
||||
|
||||
@@ -125,7 +125,7 @@ jobs:
|
||||
sudo apt-get install -y conda
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
sudo apt-get install -y libopencv-dev
|
||||
pip install --user grpcio-tools
|
||||
pip install --user grpcio-tools==1.63.0
|
||||
|
||||
sudo rm -rfv /usr/bin/conda || true
|
||||
|
||||
@@ -155,7 +155,7 @@ jobs:
|
||||
sudo apt-get install -y conda
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
sudo apt-get install -y libopencv-dev
|
||||
pip install --user grpcio-tools
|
||||
pip install --user grpcio-tools==1.63.0
|
||||
|
||||
sudo rm -rfv /usr/bin/conda || true
|
||||
|
||||
@@ -185,7 +185,7 @@ jobs:
|
||||
sudo apt-get install -y conda
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
sudo apt-get install -y libopencv-dev
|
||||
pip install --user grpcio-tools
|
||||
pip install --user grpcio-tools==1.63.0
|
||||
|
||||
sudo rm -rfv /usr/bin/conda || true
|
||||
|
||||
@@ -217,7 +217,7 @@ jobs:
|
||||
# sudo apt-get install -y conda
|
||||
# sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
# sudo apt-get install -y libopencv-dev
|
||||
# pip install --user grpcio-tools
|
||||
# pip install --user grpcio-tools==1.63.0
|
||||
|
||||
# sudo rm -rfv /usr/bin/conda || true
|
||||
|
||||
@@ -289,7 +289,7 @@ jobs:
|
||||
# sudo apt-get install -y conda
|
||||
# sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
# sudo apt-get install -y libopencv-dev
|
||||
# pip install --user grpcio-tools
|
||||
# pip install --user grpcio-tools==1.63.0
|
||||
|
||||
# sudo rm -rfv /usr/bin/conda || true
|
||||
|
||||
@@ -322,7 +322,7 @@ jobs:
|
||||
# sudo apt-get install -y conda
|
||||
# sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
# sudo apt-get install -y libopencv-dev
|
||||
# pip install --user grpcio-tools
|
||||
# pip install --user grpcio-tools==1.63.0
|
||||
# sudo rm -rfv /usr/bin/conda || true
|
||||
# - name: Test vllm
|
||||
# run: |
|
||||
@@ -349,7 +349,7 @@ jobs:
|
||||
sudo apt-get install -y conda
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
sudo apt-get install -y libopencv-dev
|
||||
pip install --user grpcio-tools
|
||||
pip install --user grpcio-tools==1.63.0
|
||||
sudo rm -rfv /usr/bin/conda || true
|
||||
- name: Test vall-e-x
|
||||
run: |
|
||||
@@ -376,7 +376,7 @@ jobs:
|
||||
sudo apt-get update && \
|
||||
sudo apt-get install -y conda
|
||||
sudo apt-get install -y ca-certificates cmake curl patch espeak espeak-ng python3-pip
|
||||
pip install --user grpcio-tools
|
||||
pip install --user grpcio-tools==1.63.0
|
||||
sudo rm -rfv /usr/bin/conda || true
|
||||
|
||||
- name: Test coqui
|
||||
|
||||
4
.github/workflows/test.yml
vendored
4
.github/workflows/test.yml
vendored
@@ -10,7 +10,7 @@ on:
|
||||
- '*'
|
||||
|
||||
env:
|
||||
GRPC_VERSION: v1.58.0
|
||||
GRPC_VERSION: v1.63.0
|
||||
|
||||
concurrency:
|
||||
group: ci-tests-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
@@ -203,7 +203,7 @@ jobs:
|
||||
- name: Dependencies
|
||||
run: |
|
||||
brew install protobuf grpc make protoc-gen-go protoc-gen-go-grpc
|
||||
pip install --user grpcio-tools
|
||||
pip install --user grpcio-tools==1.63.0
|
||||
- name: Test
|
||||
run: |
|
||||
export C_INCLUDE_PATH=/usr/local/include
|
||||
|
||||
204
Dockerfile
204
Dockerfile
@@ -2,41 +2,42 @@ ARG IMAGE_TYPE=extras
|
||||
ARG BASE_IMAGE=ubuntu:22.04
|
||||
ARG GRPC_BASE_IMAGE=${BASE_IMAGE}
|
||||
|
||||
# extras or core
|
||||
# The requirements-core target is common to all images. It should not be placed in requirements-core unless every single build will use it.
|
||||
FROM ${BASE_IMAGE} AS requirements-core
|
||||
|
||||
USER root
|
||||
|
||||
ARG GO_VERSION=1.21.7
|
||||
ARG BUILD_TYPE
|
||||
ARG CUDA_MAJOR_VERSION=11
|
||||
ARG CUDA_MINOR_VERSION=7
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
|
||||
ENV BUILD_TYPE=${BUILD_TYPE}
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV EXTERNAL_GRPC_BACKENDS="coqui:/build/backend/python/coqui/run.sh,huggingface-embeddings:/build/backend/python/sentencetransformers/run.sh,petals:/build/backend/python/petals/run.sh,transformers:/build/backend/python/transformers/run.sh,sentencetransformers:/build/backend/python/sentencetransformers/run.sh,rerankers:/build/backend/python/rerankers/run.sh,autogptq:/build/backend/python/autogptq/run.sh,bark:/build/backend/python/bark/run.sh,diffusers:/build/backend/python/diffusers/run.sh,exllama:/build/backend/python/exllama/run.sh,vall-e-x:/build/backend/python/vall-e-x/run.sh,vllm:/build/backend/python/vllm/run.sh,mamba:/build/backend/python/mamba/run.sh,exllama2:/build/backend/python/exllama2/run.sh,transformers-musicgen:/build/backend/python/transformers-musicgen/run.sh,parler-tts:/build/backend/python/parler-tts/run.sh"
|
||||
|
||||
ARG GO_TAGS="stablediffusion tinydream tts"
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y ca-certificates curl python3-pip unzip && apt-get clean
|
||||
apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
cmake \
|
||||
curl \
|
||||
git \
|
||||
python3-pip \
|
||||
python-is-python3 \
|
||||
unzip && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/* && \
|
||||
pip install --upgrade pip
|
||||
|
||||
# Install Go
|
||||
RUN curl -L -s https://go.dev/dl/go${GO_VERSION}.linux-${TARGETARCH}.tar.gz | tar -C /usr/local -xz
|
||||
ENV PATH $PATH:/usr/local/go/bin
|
||||
ENV PATH $PATH:/root/go/bin:/usr/local/go/bin
|
||||
|
||||
# Install grpc compilers
|
||||
ENV PATH $PATH:/root/go/bin
|
||||
RUN go install google.golang.org/protobuf/cmd/protoc-gen-go@latest && \
|
||||
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest
|
||||
|
||||
# Install protobuf (the version in 22.04 is too old)
|
||||
RUN curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v26.1/protoc-26.1-linux-x86_64.zip -o protoc.zip && \
|
||||
unzip -j -d /usr/local/bin protoc.zip bin/protoc && \
|
||||
rm protoc.zip
|
||||
|
||||
# Install grpcio-tools (the version in 22.04 is too old)
|
||||
RUN pip install --user grpcio-tools
|
||||
|
||||
@@ -47,16 +48,6 @@ RUN update-ca-certificates
|
||||
RUN echo "Target Architecture: $TARGETARCH"
|
||||
RUN echo "Target Variant: $TARGETVARIANT"
|
||||
|
||||
# CuBLAS requirements
|
||||
RUN if [ "${BUILD_TYPE}" = "cublas" ]; then \
|
||||
apt-get install -y software-properties-common && \
|
||||
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb && \
|
||||
dpkg -i cuda-keyring_1.1-1_all.deb && \
|
||||
rm -f cuda-keyring_1.1-1_all.deb && \
|
||||
apt-get update && \
|
||||
apt-get install -y cuda-nvcc-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libcurand-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} && apt-get clean \
|
||||
; fi
|
||||
|
||||
# Cuda
|
||||
ENV PATH /usr/local/cuda/bin:${PATH}
|
||||
|
||||
@@ -64,10 +55,12 @@ ENV PATH /usr/local/cuda/bin:${PATH}
|
||||
ENV PATH /opt/rocm/bin:${PATH}
|
||||
|
||||
# OpenBLAS requirements and stable diffusion
|
||||
RUN apt-get install -y \
|
||||
libopenblas-dev \
|
||||
libopencv-dev \
|
||||
&& apt-get clean
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
libopenblas-dev \
|
||||
libopencv-dev && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Set up OpenCV
|
||||
RUN ln -s /usr/include/opencv4/opencv2 /usr/include/opencv2
|
||||
@@ -80,56 +73,126 @@ RUN test -n "$TARGETARCH" \
|
||||
###################################
|
||||
###################################
|
||||
|
||||
# The requirements-extras target is for any builds with IMAGE_TYPE=extras. It should not be placed in this target unless every IMAGE_TYPE=extras build will use it
|
||||
FROM requirements-core AS requirements-extras
|
||||
|
||||
RUN apt install -y gpg && \
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends gpg && \
|
||||
curl https://repo.anaconda.com/pkgs/misc/gpgkeys/anaconda.asc | gpg --dearmor > conda.gpg && \
|
||||
install -o root -g root -m 644 conda.gpg /usr/share/keyrings/conda-archive-keyring.gpg && \
|
||||
gpg --keyring /usr/share/keyrings/conda-archive-keyring.gpg --no-default-keyring --fingerprint 34161F5BF5EB1D4BFBBB8F0A8AEB4F8B29D82806 && \
|
||||
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/conda-archive-keyring.gpg] https://repo.anaconda.com/pkgs/misc/debrepo/conda stable main" > /etc/apt/sources.list.d/conda.list && \
|
||||
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/conda-archive-keyring.gpg] https://repo.anaconda.com/pkgs/misc/debrepo/conda stable main" | tee -a /etc/apt/sources.list.d/conda.list && \
|
||||
apt-get update && \
|
||||
apt-get install -y conda && apt-get clean
|
||||
apt-get install -y --no-install-recommends \
|
||||
conda && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
RUN apt-get install -y python3-pip && apt-get clean
|
||||
RUN pip install --upgrade pip
|
||||
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
RUN apt-get install -y espeak-ng espeak && apt-get clean
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
espeak-ng \
|
||||
espeak && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN if [ ! -e /usr/bin/python ]; then \
|
||||
ln -s /usr/bin/python3 /usr/bin/python \
|
||||
###################################
|
||||
###################################
|
||||
|
||||
# The requirements-drivers target is for BUILD_TYPE specific items. If you need to install something specific to CUDA, or specific to ROCM, it goes here.
|
||||
# This target will be built on top of requirements-core or requirements-extras as retermined by the IMAGE_TYPE build-arg
|
||||
FROM requirements-${IMAGE_TYPE} AS requirements-drivers
|
||||
|
||||
ARG BUILD_TYPE
|
||||
ARG CUDA_MAJOR_VERSION=11
|
||||
ARG CUDA_MINOR_VERSION=7
|
||||
|
||||
ENV BUILD_TYPE=${BUILD_TYPE}
|
||||
|
||||
# CuBLAS requirements
|
||||
RUN if [ "${BUILD_TYPE}" = "cublas" ]; then \
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
software-properties-common && \
|
||||
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb && \
|
||||
dpkg -i cuda-keyring_1.1-1_all.deb && \
|
||||
rm -f cuda-keyring_1.1-1_all.deb && \
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
cuda-nvcc-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
|
||||
libcurand-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
|
||||
libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
|
||||
libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
|
||||
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/* \
|
||||
; fi
|
||||
|
||||
# If we are building with clblas support, we need the libraries for the builds
|
||||
RUN if [ "${BUILD_TYPE}" = "clblas" ]; then \
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
libclblast-dev && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/* \
|
||||
; fi
|
||||
|
||||
RUN if [ "${BUILD_TYPE}" = "hipblas" ]; then \
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
hipblas-dev \
|
||||
rocblas-dev && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/* && \
|
||||
# I have no idea why, but the ROCM lib packages don't trigger ldconfig after they install, which results in local-ai and others not being able
|
||||
# to locate the libraries. We run ldconfig ourselves to work around this packaging deficiency
|
||||
ldconfig \
|
||||
; fi
|
||||
|
||||
###################################
|
||||
###################################
|
||||
|
||||
# The grpc target does one thing, it builds and installs GRPC. This is in it's own layer so that it can be effectively cached by CI.
|
||||
# You probably don't need to change anything here, and if you do, make sure that CI is adjusted so that the cache continues to work.
|
||||
FROM ${GRPC_BASE_IMAGE} AS grpc
|
||||
|
||||
ARG MAKEFLAGS
|
||||
# This is a bit of a hack, but it's required in order to be able to effectively cache this layer in CI
|
||||
ARG GRPC_MAKEFLAGS="-j4 -Otarget"
|
||||
ARG GRPC_VERSION=v1.58.0
|
||||
|
||||
ENV MAKEFLAGS=${MAKEFLAGS}
|
||||
ENV MAKEFLAGS=${GRPC_MAKEFLAGS}
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y build-essential cmake git && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
build-essential \
|
||||
cmake \
|
||||
git && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN git clone --recurse-submodules --jobs 4 -b ${GRPC_VERSION} --depth 1 --shallow-submodules https://github.com/grpc/grpc
|
||||
|
||||
WORKDIR /build/grpc/cmake/build
|
||||
|
||||
RUN cmake -DgRPC_INSTALL=ON -DgRPC_BUILD_TESTS=OFF ../.. && \
|
||||
make
|
||||
# We install GRPC to a different prefix here so that we can copy in only the build artifacts later
|
||||
# saves several hundred MB on the final docker image size vs copying in the entire GRPC source tree
|
||||
# and running make install in the target container
|
||||
RUN git clone --recurse-submodules --jobs 4 -b ${GRPC_VERSION} --depth 1 --shallow-submodules https://github.com/grpc/grpc && \
|
||||
mkdir -p /build/grpc/cmake/build && \
|
||||
cd /build/grpc/cmake/build && \
|
||||
cmake -DgRPC_INSTALL=ON -DgRPC_BUILD_TESTS=OFF -DCMAKE_INSTALL_PREFIX:PATH=/opt/grpc ../.. && \
|
||||
make && \
|
||||
make install && \
|
||||
rm -rf /build
|
||||
|
||||
###################################
|
||||
###################################
|
||||
|
||||
FROM requirements-${IMAGE_TYPE} AS builder
|
||||
# The builder target compiles LocalAI. This target is not the target that will be uploaded to the registry.
|
||||
# Adjustments to the build process should likely be made here.
|
||||
FROM requirements-drivers AS builder
|
||||
|
||||
ARG GO_TAGS="stablediffusion tts"
|
||||
ARG GRPC_BACKENDS
|
||||
@@ -148,41 +211,36 @@ COPY . .
|
||||
COPY .git .
|
||||
RUN echo "GO_TAGS: $GO_TAGS"
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y build-essential cmake git && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN make prepare
|
||||
|
||||
# If we are building with clblas support, we need the libraries for the builds
|
||||
RUN if [ "${BUILD_TYPE}" = "clblas" ]; then \
|
||||
apt-get update && \
|
||||
apt-get install -y libclblast-dev && \
|
||||
apt-get clean \
|
||||
; fi
|
||||
# We need protoc installed, and the version in 22.04 is too old. We will create one as part installing the GRPC build below
|
||||
# but that will also being in a newer version of absl which stablediffusion cannot compile with. This version of protoc is only
|
||||
# here so that we can generate the grpc code for the stablediffusion build
|
||||
RUN curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v26.1/protoc-26.1-linux-x86_64.zip -o protoc.zip && \
|
||||
unzip -j -d /usr/local/bin protoc.zip bin/protoc && \
|
||||
rm protoc.zip
|
||||
|
||||
# stablediffusion does not tolerate a newer version of abseil, build it first
|
||||
RUN GRPC_BACKENDS=backend-assets/grpc/stablediffusion make build
|
||||
|
||||
COPY --from=grpc /build/grpc ./grpc/
|
||||
|
||||
WORKDIR /build/grpc/cmake/build
|
||||
RUN make install
|
||||
# Install the pre-built GRPC
|
||||
COPY --from=grpc /opt/grpc /usr/local
|
||||
|
||||
# Rebuild with defaults backends
|
||||
WORKDIR /build
|
||||
RUN make build
|
||||
|
||||
RUN if [ ! -d "/build/sources/go-piper/piper-phonemize/pi/lib/" ]; then \
|
||||
mkdir -p /build/sources/go-piper/piper-phonemize/pi/lib/ \
|
||||
touch /build/sources/go-piper/piper-phonemize/pi/lib/keep \
|
||||
mkdir -p /build/sources/go-piper/piper-phonemize/pi/lib/ \
|
||||
touch /build/sources/go-piper/piper-phonemize/pi/lib/keep \
|
||||
; fi
|
||||
|
||||
###################################
|
||||
###################################
|
||||
|
||||
FROM requirements-${IMAGE_TYPE}
|
||||
# This is the final target. The result of this target will be the image uploaded to the registry.
|
||||
# If you cannot find a more suitable place for an addition, this layer is a suitable place for it.
|
||||
FROM requirements-drivers
|
||||
|
||||
ARG FFMPEG
|
||||
ARG BUILD_TYPE
|
||||
@@ -203,21 +261,13 @@ ENV PIP_CACHE_PURGE=true
|
||||
|
||||
# Add FFmpeg
|
||||
RUN if [ "${FFMPEG}" = "true" ]; then \
|
||||
apt-get install -y ffmpeg && apt-get clean \
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
ffmpeg && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/* \
|
||||
; fi
|
||||
|
||||
# Add OpenCL
|
||||
RUN if [ "${BUILD_TYPE}" = "clblas" ]; then \
|
||||
apt-get update && \
|
||||
apt-get install -y libclblast1 && \
|
||||
apt-get clean \
|
||||
; fi
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y cmake git && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
# we start fresh & re-copy all assets because `make build` does not clean up nicely after itself
|
||||
@@ -227,9 +277,9 @@ WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
COPY --from=builder /build/sources ./sources/
|
||||
COPY --from=grpc /build/grpc ./grpc/
|
||||
COPY --from=grpc /opt/grpc /usr/local
|
||||
|
||||
RUN make prepare-sources && cd /build/grpc/cmake/build && make install && rm -rf /build/grpc
|
||||
RUN make prepare-sources
|
||||
|
||||
# Copy the binary
|
||||
COPY --from=builder /build/local-ai ./
|
||||
|
||||
62
Makefile
62
Makefile
@@ -5,7 +5,7 @@ BINARY_NAME=local-ai
|
||||
|
||||
# llama.cpp versions
|
||||
GOLLAMA_STABLE_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be
|
||||
CPPLLAMA_VERSION?=784e11dea1f5ce9638851b2b0dddb107e2a609c8
|
||||
CPPLLAMA_VERSION?=c12452c7aec8a02264afc00196a13caa591a13ac
|
||||
|
||||
# gpt4all version
|
||||
GPT4ALL_REPO?=https://github.com/nomic-ai/gpt4all
|
||||
@@ -16,7 +16,7 @@ RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp
|
||||
RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6
|
||||
|
||||
# whisper.cpp version
|
||||
WHISPER_CPP_VERSION?=858452d58dba3acdc3431c9bced2bb8cfd9bf418
|
||||
WHISPER_CPP_VERSION?=73d13ad19a8c9c4da4f405088a85169b1a171e66
|
||||
|
||||
# bert.cpp version
|
||||
BERT_VERSION?=6abe312cded14042f6b7c3cd8edf082713334a4d
|
||||
@@ -25,10 +25,10 @@ BERT_VERSION?=6abe312cded14042f6b7c3cd8edf082713334a4d
|
||||
PIPER_VERSION?=9d0100873a7dbb0824dfea40e8cec70a1b110759
|
||||
|
||||
# stablediffusion version
|
||||
STABLEDIFFUSION_VERSION?=362df9da29f882dbf09ade61972d16a1f53c3485
|
||||
STABLEDIFFUSION_VERSION?=4a3cd6aeae6f66ee57eae9a0075f8c58c3a6a38f
|
||||
|
||||
# tinydream version
|
||||
TINYDREAM_VERSION?=22a12a4bc0ac5455856f28f3b771331a551a4293
|
||||
TINYDREAM_VERSION?=c04fa463ace9d9a6464313aa5f9cd0f953b6c057
|
||||
|
||||
export BUILD_TYPE?=
|
||||
export STABLE_BUILD_TYPE?=$(BUILD_TYPE)
|
||||
@@ -99,7 +99,7 @@ endif
|
||||
ifeq ($(BUILD_TYPE),cublas)
|
||||
CGO_LDFLAGS+=-lcublas -lcudart -L$(CUDA_LIBPATH)
|
||||
export LLAMA_CUBLAS=1
|
||||
export WHISPER_CUBLAS=1
|
||||
export WHISPER_CUDA=1
|
||||
CGO_LDFLAGS_WHISPER+=-L$(CUDA_LIBPATH)/stubs/ -lcuda
|
||||
endif
|
||||
|
||||
@@ -152,9 +152,11 @@ ifeq ($(findstring tts,$(GO_TAGS)),tts)
|
||||
OPTIONAL_GRPC+=backend-assets/grpc/piper
|
||||
endif
|
||||
|
||||
ALL_GRPC_BACKENDS=backend-assets/grpc/langchain-huggingface
|
||||
ALL_GRPC_BACKENDS=backend-assets/grpc/huggingface
|
||||
ALL_GRPC_BACKENDS+=backend-assets/grpc/bert-embeddings
|
||||
ALL_GRPC_BACKENDS+=backend-assets/grpc/llama-cpp
|
||||
ALL_GRPC_BACKENDS+=backend-assets/grpc/llama-cpp-noavx
|
||||
ALL_GRPC_BACKENDS+=backend-assets/grpc/llama-cpp-fallback
|
||||
ALL_GRPC_BACKENDS+=backend-assets/grpc/llama-ggml
|
||||
ALL_GRPC_BACKENDS+=backend-assets/grpc/gpt4all
|
||||
ALL_GRPC_BACKENDS+=backend-assets/grpc/rwkv
|
||||
@@ -240,7 +242,7 @@ sources/whisper.cpp:
|
||||
cd sources/whisper.cpp && git checkout -b build $(WHISPER_CPP_VERSION) && git submodule update --init --recursive --depth 1
|
||||
|
||||
sources/whisper.cpp/libwhisper.a: sources/whisper.cpp
|
||||
cd sources/whisper.cpp && make libwhisper.a
|
||||
cd sources/whisper.cpp && $(MAKE) libwhisper.a
|
||||
|
||||
get-sources: sources/go-llama.cpp sources/gpt4all sources/go-piper sources/go-rwkv.cpp sources/whisper.cpp sources/go-bert.cpp sources/go-stable-diffusion sources/go-tiny-dream
|
||||
|
||||
@@ -293,6 +295,7 @@ clean: ## Remove build related file
|
||||
rm -rf backend-assets/*
|
||||
$(MAKE) -C backend/cpp/grpc clean
|
||||
$(MAKE) -C backend/cpp/llama clean
|
||||
rm -rf backend/cpp/llama-* || true
|
||||
$(MAKE) dropreplace
|
||||
$(MAKE) protogen-clean
|
||||
rmdir pkg/grpc/proto || true
|
||||
@@ -311,14 +314,19 @@ build: prepare backend-assets grpcs ## Build the project
|
||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o $(BINARY_NAME) ./
|
||||
|
||||
build-minimal:
|
||||
BUILD_GRPC_FOR_BACKEND_LLAMA=true GRPC_BACKENDS=backend-assets/grpc/llama-cpp GO_TAGS=none $(MAKE) build
|
||||
BUILD_GRPC_FOR_BACKEND_LLAMA=true GRPC_BACKENDS="backend-assets/grpc/llama-cpp" GO_TAGS=none $(MAKE) build
|
||||
|
||||
build-api:
|
||||
BUILD_GRPC_FOR_BACKEND_LLAMA=true BUILD_API_ONLY=true GO_TAGS=none $(MAKE) build
|
||||
|
||||
dist: build
|
||||
mkdir -p release
|
||||
# if BUILD_ID is empty, then we don't append it to the binary name
|
||||
ifeq ($(BUILD_ID),)
|
||||
cp $(BINARY_NAME) release/$(BINARY_NAME)-$(OS)-$(ARCH)
|
||||
else
|
||||
cp $(BINARY_NAME) release/$(BINARY_NAME)-$(BUILD_ID)-$(OS)-$(ARCH)
|
||||
endif
|
||||
|
||||
osx-signed: build
|
||||
codesign --deep --force --sign "$(OSX_SIGNING_IDENTITY)" --entitlements "./Entitlements.plist" "./$(BINARY_NAME)"
|
||||
@@ -616,8 +624,8 @@ backend-assets/grpc/gpt4all: sources/gpt4all sources/gpt4all/gpt4all-bindings/go
|
||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(CURDIR)/sources/gpt4all/gpt4all-bindings/golang/ LIBRARY_PATH=$(CURDIR)/sources/gpt4all/gpt4all-bindings/golang/ \
|
||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/gpt4all ./backend/go/llm/gpt4all/
|
||||
|
||||
backend-assets/grpc/langchain-huggingface: backend-assets/grpc
|
||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/langchain-huggingface ./backend/go/llm/langchain/
|
||||
backend-assets/grpc/huggingface: backend-assets/grpc
|
||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/huggingface ./backend/go/llm/langchain/
|
||||
|
||||
backend/cpp/llama/llama.cpp:
|
||||
LLAMA_VERSION=$(CPPLLAMA_VERSION) $(MAKE) -C backend/cpp/llama llama.cpp
|
||||
@@ -629,7 +637,7 @@ ADDED_CMAKE_ARGS=-Dabsl_DIR=${INSTALLED_LIB_CMAKE}/absl \
|
||||
-Dutf8_range_DIR=${INSTALLED_LIB_CMAKE}/utf8_range \
|
||||
-DgRPC_DIR=${INSTALLED_LIB_CMAKE}/grpc \
|
||||
-DCMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES=${INSTALLED_PACKAGES}/include
|
||||
backend/cpp/llama/grpc-server:
|
||||
build-llama-cpp-grpc-server:
|
||||
# Conditionally build grpc for the llama backend to use if needed
|
||||
ifdef BUILD_GRPC_FOR_BACKEND_LLAMA
|
||||
$(MAKE) -C backend/cpp/grpc build
|
||||
@@ -638,19 +646,37 @@ ifdef BUILD_GRPC_FOR_BACKEND_LLAMA
|
||||
PATH="${INSTALLED_PACKAGES}/bin:${PATH}" \
|
||||
CMAKE_ARGS="${CMAKE_ARGS} ${ADDED_CMAKE_ARGS}" \
|
||||
LLAMA_VERSION=$(CPPLLAMA_VERSION) \
|
||||
$(MAKE) -C backend/cpp/llama grpc-server
|
||||
$(MAKE) -C backend/cpp/${VARIANT} grpc-server
|
||||
else
|
||||
echo "BUILD_GRPC_FOR_BACKEND_LLAMA is not defined."
|
||||
LLAMA_VERSION=$(CPPLLAMA_VERSION) $(MAKE) -C backend/cpp/llama grpc-server
|
||||
LLAMA_VERSION=$(CPPLLAMA_VERSION) $(MAKE) -C backend/cpp/${VARIANT} grpc-server
|
||||
endif
|
||||
|
||||
backend-assets/grpc/llama-cpp: backend-assets/grpc backend/cpp/llama/grpc-server
|
||||
cp -rfv backend/cpp/llama/grpc-server backend-assets/grpc/llama-cpp
|
||||
backend-assets/grpc/llama-cpp: backend-assets/grpc
|
||||
$(info ${GREEN}I llama-cpp build info:standard${RESET})
|
||||
cp -rf backend/cpp/llama backend/cpp/llama-default
|
||||
$(MAKE) -C backend/cpp/llama-default purge
|
||||
$(MAKE) VARIANT="llama-default" build-llama-cpp-grpc-server
|
||||
cp -rfv backend/cpp/llama-default/grpc-server backend-assets/grpc/llama-cpp
|
||||
# TODO: every binary should have its own folder instead, so can have different metal implementations
|
||||
ifeq ($(BUILD_TYPE),metal)
|
||||
cp backend/cpp/llama/llama.cpp/build/bin/default.metallib backend-assets/grpc/
|
||||
cp backend/cpp/llama-default/llama.cpp/build/bin/default.metallib backend-assets/grpc/
|
||||
endif
|
||||
|
||||
backend-assets/grpc/llama-cpp-noavx: backend-assets/grpc
|
||||
cp -rf backend/cpp/llama backend/cpp/llama-noavx
|
||||
$(MAKE) -C backend/cpp/llama-noavx purge
|
||||
$(info ${GREEN}I llama-cpp build info:noavx${RESET})
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DLLAMA_AVX512=OFF -DLLAMA_AVX2=OFF" $(MAKE) VARIANT="llama-noavx" build-llama-cpp-grpc-server
|
||||
cp -rfv backend/cpp/llama-noavx/grpc-server backend-assets/grpc/llama-cpp-noavx
|
||||
|
||||
backend-assets/grpc/llama-cpp-fallback: backend-assets/grpc
|
||||
cp -rf backend/cpp/llama backend/cpp/llama-fallback
|
||||
$(MAKE) -C backend/cpp/llama-fallback purge
|
||||
$(info ${GREEN}I llama-cpp build info:fallback${RESET})
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DLLAMA_F16C=OFF -DLLAMA_AVX512=OFF -DLLAMA_AVX2=OFF -DLLAMA_FMA=OFF" $(MAKE) VARIANT="llama-fallback" build-llama-cpp-grpc-server
|
||||
cp -rfv backend/cpp/llama-fallback/grpc-server backend-assets/grpc/llama-cpp-fallback
|
||||
|
||||
backend-assets/grpc/llama-ggml: sources/go-llama.cpp sources/go-llama.cpp/libbinding.a backend-assets/grpc
|
||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(CURDIR)/sources/go-llama.cpp LIBRARY_PATH=$(CURDIR)/sources/go-llama.cpp \
|
||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/llama-ggml ./backend/go/llm/llama-ggml/
|
||||
@@ -707,7 +733,7 @@ docker-aio-all:
|
||||
|
||||
docker-image-intel:
|
||||
docker build \
|
||||
--build-arg BASE_IMAGE=intel/oneapi-basekit:2024.0.1-devel-ubuntu22.04 \
|
||||
--build-arg BASE_IMAGE=intel/oneapi-basekit:2024.1.0-devel-ubuntu22.04 \
|
||||
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
|
||||
--build-arg GO_TAGS="none" \
|
||||
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
|
||||
@@ -715,7 +741,7 @@ docker-image-intel:
|
||||
|
||||
docker-image-intel-xpu:
|
||||
docker build \
|
||||
--build-arg BASE_IMAGE=intel/oneapi-basekit:2024.0.1-devel-ubuntu22.04 \
|
||||
--build-arg BASE_IMAGE=intel/oneapi-basekit:2024.1.0-devel-ubuntu22.04 \
|
||||
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
|
||||
--build-arg GO_TAGS="none" \
|
||||
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
|
||||
|
||||
@@ -50,6 +50,7 @@
|
||||
|
||||
[Roadmap](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap)
|
||||
|
||||
- Chat, TTS, and Image generation in the WebUI: https://github.com/mudler/LocalAI/pull/2222
|
||||
- Reranker API: https://github.com/mudler/LocalAI/pull/2121
|
||||
- Gallery WebUI: https://github.com/mudler/LocalAI/pull/2104
|
||||
- llama3: https://github.com/mudler/LocalAI/discussions/2076
|
||||
@@ -59,6 +60,8 @@
|
||||
- All-in-one container image: https://github.com/mudler/LocalAI/issues/1855
|
||||
|
||||
Hot topics (looking for contributors):
|
||||
|
||||
- WebUI improvements: https://github.com/mudler/LocalAI/issues/2156
|
||||
- Backends v2: https://github.com/mudler/LocalAI/issues/1126
|
||||
- Improving UX v2: https://github.com/mudler/LocalAI/issues/1373
|
||||
- Assistant API: https://github.com/mudler/LocalAI/issues/1273
|
||||
@@ -89,7 +92,8 @@ docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-cpu
|
||||
- 🧠 [Embeddings generation for vector databases](https://localai.io/features/embeddings/)
|
||||
- ✍️ [Constrained grammars](https://localai.io/features/constrained_grammars/)
|
||||
- 🖼️ [Download Models directly from Huggingface ](https://localai.io/models/)
|
||||
- 🆕 [Vision API](https://localai.io/features/gpt-vision/)
|
||||
- 🥽 [Vision API](https://localai.io/features/gpt-vision/)
|
||||
- 🆕 [Reranker API](https://localai.io/features/reranker/)
|
||||
|
||||
## 💻 Usage
|
||||
|
||||
@@ -110,6 +114,7 @@ Model galleries
|
||||
Other:
|
||||
- Helm chart https://github.com/go-skynet/helm-charts
|
||||
- VSCode extension https://github.com/badgooooor/localai-vscode-plugin
|
||||
- Terminal utility https://github.com/djcopley/ShellOracle
|
||||
- Local Smart assistant https://github.com/mudler/LocalAGI
|
||||
- Home Assistant https://github.com/sammcj/homeassistant-localai / https://github.com/drndos/hass-openai-custom-conversation
|
||||
- Discord bot https://github.com/mudler/LocalAGI/tree/main/examples/discord
|
||||
@@ -128,7 +133,7 @@ Other:
|
||||
|
||||
## :book: 🎥 [Media, Blogs, Social](https://localai.io/basics/news/#media-blogs-social)
|
||||
|
||||
- [Run LocalAI on AWS EKS with Pulumi](https://www.pulumi.com/ai/answers/tiZMDoZzZV6TLxgDXNBnFE/deploying-helm-charts-on-aws-eks)
|
||||
- [Run LocalAI on AWS EKS with Pulumi](https://www.pulumi.com/blog/low-code-llm-apps-with-local-ai-flowise-and-pulumi/)
|
||||
- [Run LocalAI on AWS](https://staleks.hashnode.dev/installing-localai-on-aws-ec2-instance)
|
||||
- [Create a slackbot for teams and OSS projects that answer to documentation](https://mudler.pm/posts/smart-slackbot-for-teams/)
|
||||
- [LocalAI meets k8sgpt](https://www.youtube.com/watch?v=PKrDNuJ_dfE)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
name: gpt-4
|
||||
mmap: true
|
||||
parameters:
|
||||
model: huggingface://NousResearch/Hermes-2-Pro-Mistral-7B-GGUF/Hermes-2-Pro-Mistral-7B.Q2_K.gguf
|
||||
model: huggingface://NousResearch/Hermes-2-Pro-Llama-3-8B-GGUF/Hermes-2-Pro-Llama-3-8B-Q4_K_M.gguf
|
||||
|
||||
template:
|
||||
chat_message: |
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
name: gpt-4
|
||||
mmap: true
|
||||
parameters:
|
||||
model: huggingface://NousResearch/Hermes-2-Pro-Mistral-7B-GGUF/Hermes-2-Pro-Mistral-7B.Q6_K.gguf
|
||||
model: huggingface://NousResearch/Hermes-2-Pro-Llama-3-8B-GGUF/Hermes-2-Pro-Llama-3-8B-Q4_K_M.gguf
|
||||
|
||||
template:
|
||||
chat_message: |
|
||||
|
||||
@@ -2,7 +2,7 @@ name: gpt-4
|
||||
mmap: false
|
||||
f16: false
|
||||
parameters:
|
||||
model: huggingface://NousResearch/Hermes-2-Pro-Mistral-7B-GGUF/Hermes-2-Pro-Mistral-7B.Q6_K.gguf
|
||||
model: huggingface://NousResearch/Hermes-2-Pro-Llama-3-8B-GGUF/Hermes-2-Pro-Llama-3-8B-Q4_K_M.gguf
|
||||
|
||||
template:
|
||||
chat_message: |
|
||||
|
||||
@@ -43,31 +43,23 @@ llama.cpp:
|
||||
|
||||
llama.cpp/examples/grpc-server: llama.cpp
|
||||
mkdir -p llama.cpp/examples/grpc-server
|
||||
cp -r $(abspath ./)/CMakeLists.txt llama.cpp/examples/grpc-server/
|
||||
cp -r $(abspath ./)/grpc-server.cpp llama.cpp/examples/grpc-server/
|
||||
cp -rfv $(abspath ./)/json.hpp llama.cpp/examples/grpc-server/
|
||||
cp -rfv $(abspath ./)/utils.hpp llama.cpp/examples/grpc-server/
|
||||
echo "add_subdirectory(grpc-server)" >> llama.cpp/examples/CMakeLists.txt
|
||||
## XXX: In some versions of CMake clip wasn't being built before llama.
|
||||
## This is an hack for now, but it should be fixed in the future.
|
||||
cp -rfv llama.cpp/examples/llava/clip.h llama.cpp/examples/grpc-server/clip.h
|
||||
cp -rfv llama.cpp/examples/llava/llava.cpp llama.cpp/examples/grpc-server/llava.cpp
|
||||
echo '#include "llama.h"' > llama.cpp/examples/grpc-server/llava.h
|
||||
cat llama.cpp/examples/llava/llava.h >> llama.cpp/examples/grpc-server/llava.h
|
||||
cp -rfv llama.cpp/examples/llava/clip.cpp llama.cpp/examples/grpc-server/clip.cpp
|
||||
bash prepare.sh
|
||||
|
||||
rebuild:
|
||||
cp -rfv $(abspath ./)/CMakeLists.txt llama.cpp/examples/grpc-server/
|
||||
cp -rfv $(abspath ./)/grpc-server.cpp llama.cpp/examples/grpc-server/
|
||||
cp -rfv $(abspath ./)/json.hpp llama.cpp/examples/grpc-server/
|
||||
bash prepare.sh
|
||||
rm -rf grpc-server
|
||||
$(MAKE) grpc-server
|
||||
|
||||
clean:
|
||||
rm -rf llama.cpp
|
||||
purge:
|
||||
rm -rf llama.cpp/build
|
||||
rm -rf llama.cpp/examples/grpc-server
|
||||
rm -rf grpc-server
|
||||
|
||||
clean: purge
|
||||
rm -rf llama.cpp
|
||||
|
||||
grpc-server: llama.cpp llama.cpp/examples/grpc-server
|
||||
@echo "Building grpc-server with $(BUILD_TYPE) build type and $(CMAKE_ARGS)"
|
||||
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
|
||||
bash -c "source $(ONEAPI_VARS); \
|
||||
cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release"
|
||||
|
||||
20
backend/cpp/llama/prepare.sh
Normal file
20
backend/cpp/llama/prepare.sh
Normal file
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
|
||||
cp -r CMakeLists.txt llama.cpp/examples/grpc-server/
|
||||
cp -r grpc-server.cpp llama.cpp/examples/grpc-server/
|
||||
cp -rfv json.hpp llama.cpp/examples/grpc-server/
|
||||
cp -rfv utils.hpp llama.cpp/examples/grpc-server/
|
||||
|
||||
if grep -q "grpc-server" llama.cpp/examples/CMakeLists.txt; then
|
||||
echo "grpc-server already added"
|
||||
else
|
||||
echo "add_subdirectory(grpc-server)" >> llama.cpp/examples/CMakeLists.txt
|
||||
fi
|
||||
|
||||
## XXX: In some versions of CMake clip wasn't being built before llama.
|
||||
## This is an hack for now, but it should be fixed in the future.
|
||||
cp -rfv llama.cpp/examples/llava/clip.h llama.cpp/examples/grpc-server/clip.h
|
||||
cp -rfv llama.cpp/examples/llava/llava.cpp llama.cpp/examples/grpc-server/llava.cpp
|
||||
echo '#include "llama.h"' > llama.cpp/examples/grpc-server/llava.h
|
||||
cat llama.cpp/examples/llava/llava.h >> llama.cpp/examples/grpc-server/llava.h
|
||||
cp -rfv llama.cpp/examples/llava/clip.cpp llama.cpp/examples/grpc-server/clip.cpp
|
||||
@@ -4,6 +4,7 @@ package main
|
||||
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/grpc/base"
|
||||
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
|
||||
@@ -18,9 +19,14 @@ type LLM struct {
|
||||
}
|
||||
|
||||
func (llm *LLM) Load(opts *pb.ModelOptions) error {
|
||||
llm.langchain, _ = langchain.NewHuggingFace(opts.Model)
|
||||
var err error
|
||||
hfToken := os.Getenv("HUGGINGFACEHUB_API_TOKEN")
|
||||
if hfToken == "" {
|
||||
return fmt.Errorf("no huggingface token provided")
|
||||
}
|
||||
llm.langchain, err = langchain.NewHuggingFace(opts.Model, hfToken)
|
||||
llm.model = opts.Model
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (llm *LLM) Predict(opts *pb.PredictOptions) (string, error) {
|
||||
|
||||
@@ -11,8 +11,8 @@ import (
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
)
|
||||
|
||||
func runCommand(command []string) (string, error) {
|
||||
cmd := exec.Command(command[0], command[1:]...)
|
||||
func ffmpegCommand(args []string) (string, error) {
|
||||
cmd := exec.Command("ffmpeg", args...) // Constrain this to ffmpeg to permit security scanner to see that the command is safe.
|
||||
cmd.Env = os.Environ()
|
||||
out, err := cmd.CombinedOutput()
|
||||
return string(out), err
|
||||
@@ -21,16 +21,16 @@ func runCommand(command []string) (string, error) {
|
||||
// AudioToWav converts audio to wav for transcribe.
|
||||
// TODO: use https://github.com/mccoyst/ogg?
|
||||
func audioToWav(src, dst string) error {
|
||||
command := []string{"ffmpeg", "-i", src, "-format", "s16le", "-ar", "16000", "-ac", "1", "-acodec", "pcm_s16le", dst}
|
||||
out, err := runCommand(command)
|
||||
commandArgs := []string{"-i", src, "-format", "s16le", "-ar", "16000", "-ac", "1", "-acodec", "pcm_s16le", dst}
|
||||
out, err := ffmpegCommand(commandArgs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error: %w out: %s", err, out)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Transcript(model whisper.Model, audiopath, language string, threads uint) (schema.Result, error) {
|
||||
res := schema.Result{}
|
||||
func Transcript(model whisper.Model, audiopath, language string, threads uint) (schema.TranscriptionResult, error) {
|
||||
res := schema.TranscriptionResult{}
|
||||
|
||||
dir, err := os.MkdirTemp("", "whisper")
|
||||
if err != nil {
|
||||
|
||||
@@ -21,6 +21,6 @@ func (sd *Whisper) Load(opts *pb.ModelOptions) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (sd *Whisper) AudioTranscription(opts *pb.TranscriptRequest) (schema.Result, error) {
|
||||
func (sd *Whisper) AudioTranscription(opts *pb.TranscriptRequest) (schema.TranscriptionResult, error) {
|
||||
return Transcript(sd.whisper, opts.Dst, opts.Language, uint(opts.Threads))
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ dependencies:
|
||||
- filelock==3.12.4
|
||||
- frozenlist==1.4.0
|
||||
- fsspec==2023.6.0
|
||||
- grpcio==1.59.0
|
||||
- grpcio==1.63.0
|
||||
- huggingface-hub==0.16.4
|
||||
- idna==3.4
|
||||
- jinja2==3.1.2
|
||||
|
||||
@@ -26,7 +26,7 @@ if [ -d "/opt/intel" ]; then
|
||||
# Intel GPU: If the directory exists, we assume we are using the intel image
|
||||
# (no conda env)
|
||||
# https://github.com/intel/intel-extension-for-pytorch/issues/538
|
||||
pip install intel-extension-for-transformers datasets sentencepiece tiktoken neural_speed optimum[openvino]
|
||||
pip install torch==2.1.0.post0 torchvision==0.16.0.post0 torchaudio==2.1.0.post0 intel-extension-for-pytorch==2.1.20+xpu oneccl_bind_pt==2.1.200+xpu intel-extension-for-transformers datasets sentencepiece tiktoken neural_speed optimum[openvino] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
fi
|
||||
|
||||
# If we didn't skip conda, activate the environment
|
||||
|
||||
@@ -47,7 +47,7 @@ dependencies:
|
||||
- frozenlist==1.4.0
|
||||
- fsspec==2023.6.0
|
||||
- funcy==2.0
|
||||
- grpcio==1.59.0
|
||||
- grpcio==1.63.0
|
||||
- huggingface-hub
|
||||
- idna==3.4
|
||||
- jinja2==3.1.2
|
||||
|
||||
@@ -48,7 +48,7 @@ dependencies:
|
||||
- frozenlist==1.4.0
|
||||
- fsspec==2023.6.0
|
||||
- funcy==2.0
|
||||
- grpcio==1.59.0
|
||||
- grpcio==1.63.0
|
||||
- huggingface-hub
|
||||
- idna==3.4
|
||||
- jinja2==3.1.2
|
||||
|
||||
@@ -47,7 +47,7 @@ dependencies:
|
||||
- frozenlist==1.4.0
|
||||
- fsspec==2023.6.0
|
||||
- funcy==2.0
|
||||
- grpcio==1.59.0
|
||||
- grpcio==1.63.0
|
||||
- huggingface-hub
|
||||
- humanfriendly==10.0
|
||||
- idna==3.4
|
||||
@@ -60,9 +60,10 @@ dependencies:
|
||||
- networkx
|
||||
- numpy==1.26.0
|
||||
- onnx==1.15.0
|
||||
- openvino==2024.0.0
|
||||
- openvino-telemetry==2023.2.1
|
||||
- optimum[openvino]==1.17.1
|
||||
- openvino==2024.1.0
|
||||
- openvino-telemetry==2024.1.0
|
||||
- optimum[openvino]==1.19.1
|
||||
- optimum-intel==1.16.1
|
||||
- packaging==23.2
|
||||
- pandas
|
||||
- peft==0.5.0
|
||||
|
||||
@@ -34,7 +34,7 @@ dependencies:
|
||||
- diffusers==0.24.0
|
||||
- filelock==3.12.4
|
||||
- fsspec==2023.9.2
|
||||
- grpcio==1.59.0
|
||||
- grpcio==1.63.0
|
||||
- huggingface-hub>=0.19.4
|
||||
- idna==3.4
|
||||
- importlib-metadata==6.8.0
|
||||
|
||||
@@ -32,7 +32,7 @@ dependencies:
|
||||
- diffusers==0.24.0
|
||||
- filelock==3.12.4
|
||||
- fsspec==2023.9.2
|
||||
- grpcio==1.59.0
|
||||
- grpcio==1.63.0
|
||||
- huggingface-hub>=0.19.4
|
||||
- idna==3.4
|
||||
- importlib-metadata==6.8.0
|
||||
|
||||
@@ -31,8 +31,8 @@ if [ -d "/opt/intel" ]; then
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
|
||||
pip install google-api-python-client \
|
||||
grpcio \
|
||||
grpcio-tools \
|
||||
grpcio==1.63.0 \
|
||||
grpcio-tools==1.63.0 \
|
||||
diffusers==0.24.0 \
|
||||
transformers>=4.25.1 \
|
||||
accelerate \
|
||||
|
||||
@@ -27,7 +27,7 @@ dependencies:
|
||||
- pip:
|
||||
- filelock==3.12.4
|
||||
- fsspec==2023.9.2
|
||||
- grpcio==1.59.0
|
||||
- grpcio==1.63.0
|
||||
- jinja2==3.1.2
|
||||
- markupsafe==2.1.3
|
||||
- mpmath==1.3.0
|
||||
|
||||
@@ -27,7 +27,7 @@ dependencies:
|
||||
- pip:
|
||||
- filelock==3.12.4
|
||||
- fsspec==2023.9.2
|
||||
- grpcio==1.59.0
|
||||
- grpcio==1.63.0
|
||||
- markupsafe==2.1.3
|
||||
- mpmath==1.3.0
|
||||
- networkx==3.1
|
||||
|
||||
@@ -26,7 +26,7 @@ dependencies:
|
||||
- zlib=1.2.13=h5eee18b_0
|
||||
- pip:
|
||||
- accelerate>=0.11.0
|
||||
- grpcio==1.59.0
|
||||
- grpcio==1.63.0
|
||||
- numpy==1.26.0
|
||||
- nvidia-cublas-cu12==12.1.3.1
|
||||
- nvidia-cuda-cupti-cu12==12.1.105
|
||||
|
||||
@@ -27,7 +27,7 @@ dependencies:
|
||||
- pip:
|
||||
- accelerate>=0.11.0
|
||||
- numpy==1.26.0
|
||||
- grpcio==1.59.0
|
||||
- grpcio==1.63.0
|
||||
- torch==2.1.0
|
||||
- transformers>=4.34.0
|
||||
- descript-audio-codec
|
||||
|
||||
@@ -89,8 +89,8 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
quantization = None
|
||||
|
||||
if self.CUDA:
|
||||
if request.Device:
|
||||
device_map=request.Device
|
||||
if request.MainGPU:
|
||||
device_map=request.MainGPU
|
||||
else:
|
||||
device_map="cuda:0"
|
||||
if request.Quantization == "bnb_4bit":
|
||||
@@ -143,14 +143,49 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
from optimum.intel.openvino import OVModelForCausalLM
|
||||
from openvino.runtime import Core
|
||||
|
||||
if "GPU" in Core().available_devices:
|
||||
device_map="GPU"
|
||||
if request.MainGPU:
|
||||
device_map=request.MainGPU
|
||||
else:
|
||||
device_map="CPU"
|
||||
device_map="AUTO"
|
||||
devices = Core().available_devices
|
||||
if "GPU" in " ".join(devices):
|
||||
device_map="AUTO:GPU"
|
||||
# While working on a fine tuned model, inference may give an inaccuracy and performance drop on GPU if winograd convolutions are selected.
|
||||
# https://docs.openvino.ai/2024/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device.html
|
||||
if "CPU" or "NPU" in device_map:
|
||||
if "-CPU" or "-NPU" not in device_map:
|
||||
ovconfig={"PERFORMANCE_HINT": "CUMULATIVE_THROUGHPUT"}
|
||||
else:
|
||||
ovconfig={"PERFORMANCE_HINT": "CUMULATIVE_THROUGHPUT","GPU_DISABLE_WINOGRAD_CONVOLUTION": "YES"}
|
||||
self.model = OVModelForCausalLM.from_pretrained(model_name,
|
||||
compile=True,
|
||||
trust_remote_code=request.TrustRemoteCode,
|
||||
ov_config={"PERFORMANCE_HINT": "LATENCY"},
|
||||
ov_config=ovconfig,
|
||||
device=device_map)
|
||||
self.OV = True
|
||||
elif request.Type == "OVModelForFeatureExtraction":
|
||||
from optimum.intel.openvino import OVModelForFeatureExtraction
|
||||
from openvino.runtime import Core
|
||||
|
||||
if request.MainGPU:
|
||||
device_map=request.MainGPU
|
||||
else:
|
||||
device_map="AUTO"
|
||||
devices = Core().available_devices
|
||||
if "GPU" in " ".join(devices):
|
||||
device_map="AUTO:GPU"
|
||||
# While working on a fine tuned model, inference may give an inaccuracy and performance drop on GPU if winograd convolutions are selected.
|
||||
# https://docs.openvino.ai/2024/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device.html
|
||||
if "CPU" or "NPU" in device_map:
|
||||
if "-CPU" or "-NPU" not in device_map:
|
||||
ovconfig={"PERFORMANCE_HINT": "CUMULATIVE_THROUGHPUT"}
|
||||
else:
|
||||
ovconfig={"PERFORMANCE_HINT": "CUMULATIVE_THROUGHPUT","GPU_DISABLE_WINOGRAD_CONVOLUTION": "YES"}
|
||||
self.model = OVModelForFeatureExtraction.from_pretrained(model_name,
|
||||
compile=True,
|
||||
trust_remote_code=request.TrustRemoteCode,
|
||||
ov_config=ovconfig,
|
||||
export=True,
|
||||
device=device_map)
|
||||
self.OV = True
|
||||
else:
|
||||
@@ -211,8 +246,8 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
|
||||
# Pool to get sentence embeddings; i.e. generate one 1024 vector for the entire sentence
|
||||
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
|
||||
print("Calculated embeddings for: " + request.Embeddings, file=sys.stderr)
|
||||
print("Embeddings:", sentence_embeddings, file=sys.stderr)
|
||||
# print("Calculated embeddings for: " + request.Embeddings, file=sys.stderr)
|
||||
# print("Embeddings:", sentence_embeddings, file=sys.stderr)
|
||||
return backend_pb2.EmbeddingResult(embeddings=sentence_embeddings[0])
|
||||
|
||||
async def _predict(self, request, context, streaming=False):
|
||||
@@ -356,4 +391,4 @@ if __name__ == "__main__":
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
asyncio.run(serve(args.addr))
|
||||
asyncio.run(serve(args.addr))
|
||||
|
||||
@@ -42,7 +42,7 @@ dependencies:
|
||||
- future==0.18.3
|
||||
- gradio==3.47.1
|
||||
- gradio-client==0.6.0
|
||||
- grpcio==1.59.0
|
||||
- grpcio==1.63.0
|
||||
- h11==0.14.0
|
||||
- httpcore==0.18.0
|
||||
- httpx==0.25.0
|
||||
|
||||
39
core/application.go
Normal file
39
core/application.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/core/services"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
)
|
||||
|
||||
// The purpose of this structure is to hold pointers to all initialized services, to make plumbing easy
|
||||
// Perhaps a proper DI system is worth it in the future, but for now keep things simple.
|
||||
type Application struct {
|
||||
|
||||
// Application-Level Config
|
||||
ApplicationConfig *config.ApplicationConfig
|
||||
// ApplicationState *ApplicationState
|
||||
|
||||
// Core Low-Level Services
|
||||
BackendConfigLoader *config.BackendConfigLoader
|
||||
ModelLoader *model.ModelLoader
|
||||
|
||||
// Backend Services
|
||||
// EmbeddingsBackendService *backend.EmbeddingsBackendService
|
||||
// ImageGenerationBackendService *backend.ImageGenerationBackendService
|
||||
// LLMBackendService *backend.LLMBackendService
|
||||
// TranscriptionBackendService *backend.TranscriptionBackendService
|
||||
// TextToSpeechBackendService *backend.TextToSpeechBackendService
|
||||
|
||||
// LocalAI System Services
|
||||
BackendMonitorService *services.BackendMonitorService
|
||||
GalleryService *services.GalleryService
|
||||
ListModelsService *services.ListModelsService
|
||||
LocalAIMetricsService *services.LocalAIMetricsService
|
||||
// OpenAIService *services.OpenAIService
|
||||
}
|
||||
|
||||
// TODO [NEXT PR?]: Break up ApplicationConfig.
|
||||
// Migrate over stuff that is not set via config at all - especially runtime stuff
|
||||
type ApplicationState struct {
|
||||
}
|
||||
@@ -7,7 +7,8 @@ import (
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
|
||||
model "github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func modelOpts(c config.BackendConfig, so *config.ApplicationConfig, opts []model.Option) []model.Option {
|
||||
@@ -109,8 +110,12 @@ func gRPCPredictOpts(c config.BackendConfig, modelPath string) *pb.PredictOption
|
||||
promptCachePath := ""
|
||||
if c.PromptCachePath != "" {
|
||||
p := filepath.Join(modelPath, c.PromptCachePath)
|
||||
os.MkdirAll(filepath.Dir(p), 0755)
|
||||
promptCachePath = p
|
||||
err := os.MkdirAll(filepath.Dir(p), 0750)
|
||||
if err == nil {
|
||||
promptCachePath = p
|
||||
} else {
|
||||
log.Error().Err(err).Str("promptCachePath", promptCachePath).Msg("error creating prompt cache folder")
|
||||
}
|
||||
}
|
||||
|
||||
return &pb.PredictOptions{
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
model "github.com/go-skynet/LocalAI/pkg/model"
|
||||
)
|
||||
|
||||
func ModelTranscription(audio, language string, ml *model.ModelLoader, backendConfig config.BackendConfig, appConfig *config.ApplicationConfig) (*schema.Result, error) {
|
||||
func ModelTranscription(audio, language string, ml *model.ModelLoader, backendConfig config.BackendConfig, appConfig *config.ApplicationConfig) (*schema.TranscriptionResult, error) {
|
||||
|
||||
opts := modelOpts(backendConfig, appConfig, []model.Option{
|
||||
model.WithBackendString(model.WhisperBackend),
|
||||
|
||||
@@ -53,7 +53,7 @@ func ModelTTS(backend, text, modelFile, voice string, loader *model.ModelLoader,
|
||||
return "", nil, fmt.Errorf("could not load piper model")
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(appConfig.AudioDir, 0755); err != nil {
|
||||
if err := os.MkdirAll(appConfig.AudioDir, 0750); err != nil {
|
||||
return "", nil, fmt.Errorf("failed creating audio directory: %s", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -64,7 +64,11 @@ func (mi *ModelsInstall) Run(ctx *Context) error {
|
||||
progressbar.OptionClearOnFinish(),
|
||||
)
|
||||
progressCallback := func(fileName string, current string, total string, percentage float64) {
|
||||
progressBar.Set(int(percentage * 10))
|
||||
v := int(percentage * 10)
|
||||
err := progressBar.Set(v)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("filename", fileName).Int("value", v).Msg("error while updating progress bar")
|
||||
}
|
||||
}
|
||||
err := gallery.InstallModelFromGallery(galleries, modelName, mi.ModelsPath, gallery.GalleryModel{}, progressCallback)
|
||||
if err != nil {
|
||||
|
||||
@@ -42,7 +42,7 @@ type RunCMD struct {
|
||||
CORSAllowOrigins string `env:"LOCALAI_CORS_ALLOW_ORIGINS,CORS_ALLOW_ORIGINS" group:"api"`
|
||||
UploadLimit int `env:"LOCALAI_UPLOAD_LIMIT,UPLOAD_LIMIT" default:"15" help:"Default upload-limit in MB" group:"api"`
|
||||
APIKeys []string `env:"LOCALAI_API_KEY,API_KEY" help:"List of API Keys to enable API authentication. When this is set, all the requests must be authenticated with one of these API keys" group:"api"`
|
||||
DisableWelcome bool `env:"LOCALAI_DISABLE_WELCOME,DISABLE_WELCOME" default:"false" help:"Disable welcome pages" group:"api"`
|
||||
DisableWebUI bool `env:"LOCALAI_DISABLE_WEBUI,DISABLE_WEBUI" default:"false" help:"Disable webui" group:"api"`
|
||||
|
||||
ParallelRequests bool `env:"LOCALAI_PARALLEL_REQUESTS,PARALLEL_REQUESTS" help:"Enable backends to handle multiple requests in parallel if they support it (e.g.: llama.cpp or vllm)" group:"backends"`
|
||||
SingleActiveBackend bool `env:"LOCALAI_SINGLE_ACTIVE_BACKEND,SINGLE_ACTIVE_BACKEND" help:"Allow only one backend to be run at a time" group:"backends"`
|
||||
@@ -84,8 +84,8 @@ func (r *RunCMD) Run(ctx *Context) error {
|
||||
idleWatchDog := r.EnableWatchdogIdle
|
||||
busyWatchDog := r.EnableWatchdogBusy
|
||||
|
||||
if r.DisableWelcome {
|
||||
opts = append(opts, config.DisableWelcomePage)
|
||||
if r.DisableWebUI {
|
||||
opts = append(opts, config.DisableWebUI)
|
||||
}
|
||||
|
||||
if idleWatchDog || busyWatchDog {
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/go-skynet/LocalAI/core/backend"
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
type TranscriptCMD struct {
|
||||
@@ -41,7 +42,12 @@ func (t *TranscriptCMD) Run(ctx *Context) error {
|
||||
|
||||
c.Threads = &t.Threads
|
||||
|
||||
defer ml.StopAllGRPC()
|
||||
defer func() {
|
||||
err := ml.StopAllGRPC()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("unable to stop all grpc processes")
|
||||
}
|
||||
}()
|
||||
|
||||
tr, err := backend.ModelTranscription(t.Filename, t.Language, ml, c, opts)
|
||||
if err != nil {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/go-skynet/LocalAI/core/backend"
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
type TTSCMD struct {
|
||||
@@ -40,7 +41,12 @@ func (t *TTSCMD) Run(ctx *Context) error {
|
||||
}
|
||||
ml := model.NewModelLoader(opts.ModelPath)
|
||||
|
||||
defer ml.StopAllGRPC()
|
||||
defer func() {
|
||||
err := ml.StopAllGRPC()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("unable to stop all grpc processes")
|
||||
}
|
||||
}()
|
||||
|
||||
options := config.BackendConfig{}
|
||||
options.SetDefaults()
|
||||
|
||||
@@ -15,7 +15,7 @@ type ApplicationConfig struct {
|
||||
ConfigFile string
|
||||
ModelPath string
|
||||
UploadLimitMB, Threads, ContextSize int
|
||||
DisableWelcomePage bool
|
||||
DisableWebUI bool
|
||||
F16 bool
|
||||
Debug bool
|
||||
ImageDir string
|
||||
@@ -107,8 +107,8 @@ var EnableWatchDogBusyCheck = func(o *ApplicationConfig) {
|
||||
o.WatchDogBusy = true
|
||||
}
|
||||
|
||||
var DisableWelcomePage = func(o *ApplicationConfig) {
|
||||
o.DisableWelcomePage = true
|
||||
var DisableWebUI = func(o *ApplicationConfig) {
|
||||
o.DisableWebUI = true
|
||||
}
|
||||
|
||||
func SetWatchDogBusyTimeout(t time.Duration) AppOption {
|
||||
|
||||
@@ -1,23 +1,12 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
"github.com/go-skynet/LocalAI/pkg/downloader"
|
||||
"github.com/go-skynet/LocalAI/pkg/functions"
|
||||
"github.com/go-skynet/LocalAI/pkg/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/charmbracelet/glamour"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -140,7 +129,7 @@ type LLMConfig struct {
|
||||
EnforceEager bool `yaml:"enforce_eager"` // vLLM
|
||||
SwapSpace int `yaml:"swap_space"` // vLLM
|
||||
MaxModelLen int `yaml:"max_model_len"` // vLLM
|
||||
TensorParallelSize int `yaml:"tensor_parallel_size"` // vLLM
|
||||
TensorParallelSize int `yaml:"tensor_parallel_size"` // vLLM
|
||||
MMProj string `yaml:"mmproj"`
|
||||
|
||||
RopeScaling string `yaml:"rope_scaling"`
|
||||
@@ -184,6 +173,36 @@ func (c *BackendConfig) ShouldCallSpecificFunction() bool {
|
||||
return len(c.functionCallNameString) > 0
|
||||
}
|
||||
|
||||
// MMProjFileName returns the filename of the MMProj file
|
||||
// If the MMProj is a URL, it will return the MD5 of the URL which is the filename
|
||||
func (c *BackendConfig) MMProjFileName() string {
|
||||
modelURL := downloader.ConvertURL(c.MMProj)
|
||||
if downloader.LooksLikeURL(modelURL) {
|
||||
return utils.MD5(modelURL)
|
||||
}
|
||||
|
||||
return c.MMProj
|
||||
}
|
||||
|
||||
func (c *BackendConfig) IsMMProjURL() bool {
|
||||
return downloader.LooksLikeURL(downloader.ConvertURL(c.MMProj))
|
||||
}
|
||||
|
||||
func (c *BackendConfig) IsModelURL() bool {
|
||||
return downloader.LooksLikeURL(downloader.ConvertURL(c.Model))
|
||||
}
|
||||
|
||||
// ModelFileName returns the filename of the model
|
||||
// If the model is a URL, it will return the MD5 of the URL which is the filename
|
||||
func (c *BackendConfig) ModelFileName() string {
|
||||
modelURL := downloader.ConvertURL(c.Model)
|
||||
if downloader.LooksLikeURL(modelURL) {
|
||||
return utils.MD5(modelURL)
|
||||
}
|
||||
|
||||
return c.Model
|
||||
}
|
||||
|
||||
func (c *BackendConfig) FunctionToCall() string {
|
||||
if c.functionCallNameString != "" &&
|
||||
c.functionCallNameString != "none" && c.functionCallNameString != "auto" {
|
||||
@@ -238,7 +257,13 @@ func (cfg *BackendConfig) SetDefaults(opts ...ConfigLoaderOption) {
|
||||
|
||||
if cfg.MMap == nil {
|
||||
// MMap is enabled by default
|
||||
cfg.MMap = &trueV
|
||||
|
||||
// Only exception is for Intel GPUs
|
||||
if os.Getenv("XPU") != "" {
|
||||
cfg.MMap = &falseV
|
||||
} else {
|
||||
cfg.MMap = &trueV
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.MMlock == nil {
|
||||
@@ -307,287 +332,3 @@ func (cfg *BackendConfig) SetDefaults(opts ...ConfigLoaderOption) {
|
||||
cfg.Debug = &trueV
|
||||
}
|
||||
}
|
||||
|
||||
////// Config Loader ////////
|
||||
|
||||
type BackendConfigLoader struct {
|
||||
configs map[string]BackendConfig
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
type LoadOptions struct {
|
||||
debug bool
|
||||
threads, ctxSize int
|
||||
f16 bool
|
||||
}
|
||||
|
||||
func LoadOptionDebug(debug bool) ConfigLoaderOption {
|
||||
return func(o *LoadOptions) {
|
||||
o.debug = debug
|
||||
}
|
||||
}
|
||||
|
||||
func LoadOptionThreads(threads int) ConfigLoaderOption {
|
||||
return func(o *LoadOptions) {
|
||||
o.threads = threads
|
||||
}
|
||||
}
|
||||
|
||||
func LoadOptionContextSize(ctxSize int) ConfigLoaderOption {
|
||||
return func(o *LoadOptions) {
|
||||
o.ctxSize = ctxSize
|
||||
}
|
||||
}
|
||||
|
||||
func LoadOptionF16(f16 bool) ConfigLoaderOption {
|
||||
return func(o *LoadOptions) {
|
||||
o.f16 = f16
|
||||
}
|
||||
}
|
||||
|
||||
type ConfigLoaderOption func(*LoadOptions)
|
||||
|
||||
func (lo *LoadOptions) Apply(options ...ConfigLoaderOption) {
|
||||
for _, l := range options {
|
||||
l(lo)
|
||||
}
|
||||
}
|
||||
|
||||
// Load a config file for a model
|
||||
func (cl *BackendConfigLoader) LoadBackendConfigFileByName(modelName, modelPath string, opts ...ConfigLoaderOption) (*BackendConfig, error) {
|
||||
|
||||
// Load a config file if present after the model name
|
||||
cfg := &BackendConfig{
|
||||
PredictionOptions: schema.PredictionOptions{
|
||||
Model: modelName,
|
||||
},
|
||||
}
|
||||
|
||||
cfgExisting, exists := cl.GetBackendConfig(modelName)
|
||||
if exists {
|
||||
cfg = &cfgExisting
|
||||
} else {
|
||||
// Try loading a model config file
|
||||
modelConfig := filepath.Join(modelPath, modelName+".yaml")
|
||||
if _, err := os.Stat(modelConfig); err == nil {
|
||||
if err := cl.LoadBackendConfig(
|
||||
modelConfig, opts...,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("failed loading model config (%s) %s", modelConfig, err.Error())
|
||||
}
|
||||
cfgExisting, exists = cl.GetBackendConfig(modelName)
|
||||
if exists {
|
||||
cfg = &cfgExisting
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cfg.SetDefaults(opts...)
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func NewBackendConfigLoader() *BackendConfigLoader {
|
||||
return &BackendConfigLoader{
|
||||
configs: make(map[string]BackendConfig),
|
||||
}
|
||||
}
|
||||
func ReadBackendConfigFile(file string, opts ...ConfigLoaderOption) ([]*BackendConfig, error) {
|
||||
c := &[]*BackendConfig{}
|
||||
f, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read config file: %w", err)
|
||||
}
|
||||
if err := yaml.Unmarshal(f, c); err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal config file: %w", err)
|
||||
}
|
||||
|
||||
for _, cc := range *c {
|
||||
cc.SetDefaults(opts...)
|
||||
}
|
||||
|
||||
return *c, nil
|
||||
}
|
||||
|
||||
func ReadBackendConfig(file string, opts ...ConfigLoaderOption) (*BackendConfig, error) {
|
||||
lo := &LoadOptions{}
|
||||
lo.Apply(opts...)
|
||||
|
||||
c := &BackendConfig{}
|
||||
f, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read config file: %w", err)
|
||||
}
|
||||
if err := yaml.Unmarshal(f, c); err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal config file: %w", err)
|
||||
}
|
||||
|
||||
c.SetDefaults(opts...)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (cm *BackendConfigLoader) LoadBackendConfigFile(file string, opts ...ConfigLoaderOption) error {
|
||||
cm.Lock()
|
||||
defer cm.Unlock()
|
||||
c, err := ReadBackendConfigFile(file, opts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot load config file: %w", err)
|
||||
}
|
||||
|
||||
for _, cc := range c {
|
||||
cm.configs[cc.Name] = *cc
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cl *BackendConfigLoader) LoadBackendConfig(file string, opts ...ConfigLoaderOption) error {
|
||||
cl.Lock()
|
||||
defer cl.Unlock()
|
||||
c, err := ReadBackendConfig(file, opts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read config file: %w", err)
|
||||
}
|
||||
|
||||
cl.configs[c.Name] = *c
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cl *BackendConfigLoader) GetBackendConfig(m string) (BackendConfig, bool) {
|
||||
cl.Lock()
|
||||
defer cl.Unlock()
|
||||
v, exists := cl.configs[m]
|
||||
return v, exists
|
||||
}
|
||||
|
||||
func (cl *BackendConfigLoader) GetAllBackendConfigs() []BackendConfig {
|
||||
cl.Lock()
|
||||
defer cl.Unlock()
|
||||
var res []BackendConfig
|
||||
for _, v := range cl.configs {
|
||||
res = append(res, v)
|
||||
}
|
||||
|
||||
sort.SliceStable(res, func(i, j int) bool {
|
||||
return res[i].Name < res[j].Name
|
||||
})
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (cl *BackendConfigLoader) ListBackendConfigs() []string {
|
||||
cl.Lock()
|
||||
defer cl.Unlock()
|
||||
var res []string
|
||||
for k := range cl.configs {
|
||||
res = append(res, k)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// Preload prepare models if they are not local but url or huggingface repositories
|
||||
func (cl *BackendConfigLoader) Preload(modelPath string) error {
|
||||
cl.Lock()
|
||||
defer cl.Unlock()
|
||||
|
||||
status := func(fileName, current, total string, percent float64) {
|
||||
utils.DisplayDownloadFunction(fileName, current, total, percent)
|
||||
}
|
||||
|
||||
log.Info().Msgf("Preloading models from %s", modelPath)
|
||||
|
||||
renderMode := "dark"
|
||||
if os.Getenv("COLOR") != "" {
|
||||
renderMode = os.Getenv("COLOR")
|
||||
}
|
||||
|
||||
glamText := func(t string) {
|
||||
out, err := glamour.Render(t, renderMode)
|
||||
if err == nil && os.Getenv("NO_COLOR") == "" {
|
||||
fmt.Println(out)
|
||||
} else {
|
||||
fmt.Println(t)
|
||||
}
|
||||
}
|
||||
|
||||
for i, config := range cl.configs {
|
||||
|
||||
// Download files and verify their SHA
|
||||
for i, file := range config.DownloadFiles {
|
||||
log.Debug().Msgf("Checking %q exists and matches SHA", file.Filename)
|
||||
|
||||
if err := utils.VerifyPath(file.Filename, modelPath); err != nil {
|
||||
return err
|
||||
}
|
||||
// Create file path
|
||||
filePath := filepath.Join(modelPath, file.Filename)
|
||||
|
||||
if err := downloader.DownloadFile(file.URI, filePath, file.SHA256, i, len(config.DownloadFiles), status); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
modelURL := config.PredictionOptions.Model
|
||||
modelURL = downloader.ConvertURL(modelURL)
|
||||
|
||||
if downloader.LooksLikeURL(modelURL) {
|
||||
// md5 of model name
|
||||
md5Name := utils.MD5(modelURL)
|
||||
|
||||
// check if file exists
|
||||
if _, err := os.Stat(filepath.Join(modelPath, md5Name)); errors.Is(err, os.ErrNotExist) {
|
||||
err := downloader.DownloadFile(modelURL, filepath.Join(modelPath, md5Name), "", 0, 0, status)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
cc := cl.configs[i]
|
||||
c := &cc
|
||||
c.PredictionOptions.Model = md5Name
|
||||
cl.configs[i] = *c
|
||||
}
|
||||
if cl.configs[i].Name != "" {
|
||||
glamText(fmt.Sprintf("**Model name**: _%s_", cl.configs[i].Name))
|
||||
}
|
||||
if cl.configs[i].Description != "" {
|
||||
//glamText("**Description**")
|
||||
glamText(cl.configs[i].Description)
|
||||
}
|
||||
if cl.configs[i].Usage != "" {
|
||||
//glamText("**Usage**")
|
||||
glamText(cl.configs[i].Usage)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadBackendConfigsFromPath reads all the configurations of the models from a path
|
||||
// (non-recursive)
|
||||
func (cm *BackendConfigLoader) LoadBackendConfigsFromPath(path string, opts ...ConfigLoaderOption) error {
|
||||
cm.Lock()
|
||||
defer cm.Unlock()
|
||||
entries, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
files := make([]fs.FileInfo, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
info, err := entry.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
files = append(files, info)
|
||||
}
|
||||
for _, file := range files {
|
||||
// Skip templates, YAML and .keep files
|
||||
if !strings.Contains(file.Name(), ".yaml") && !strings.Contains(file.Name(), ".yml") {
|
||||
continue
|
||||
}
|
||||
c, err := ReadBackendConfig(filepath.Join(path, file.Name()), opts...)
|
||||
if err == nil {
|
||||
cm.configs[c.Name] = *c
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
323
core/config/backend_config_loader.go
Normal file
323
core/config/backend_config_loader.go
Normal file
@@ -0,0 +1,323 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/charmbracelet/glamour"
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
"github.com/go-skynet/LocalAI/pkg/downloader"
|
||||
"github.com/go-skynet/LocalAI/pkg/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
type BackendConfigLoader struct {
|
||||
configs map[string]BackendConfig
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
type LoadOptions struct {
|
||||
debug bool
|
||||
threads, ctxSize int
|
||||
f16 bool
|
||||
}
|
||||
|
||||
func LoadOptionDebug(debug bool) ConfigLoaderOption {
|
||||
return func(o *LoadOptions) {
|
||||
o.debug = debug
|
||||
}
|
||||
}
|
||||
|
||||
func LoadOptionThreads(threads int) ConfigLoaderOption {
|
||||
return func(o *LoadOptions) {
|
||||
o.threads = threads
|
||||
}
|
||||
}
|
||||
|
||||
func LoadOptionContextSize(ctxSize int) ConfigLoaderOption {
|
||||
return func(o *LoadOptions) {
|
||||
o.ctxSize = ctxSize
|
||||
}
|
||||
}
|
||||
|
||||
func LoadOptionF16(f16 bool) ConfigLoaderOption {
|
||||
return func(o *LoadOptions) {
|
||||
o.f16 = f16
|
||||
}
|
||||
}
|
||||
|
||||
type ConfigLoaderOption func(*LoadOptions)
|
||||
|
||||
func (lo *LoadOptions) Apply(options ...ConfigLoaderOption) {
|
||||
for _, l := range options {
|
||||
l(lo)
|
||||
}
|
||||
}
|
||||
|
||||
// Load a config file for a model
|
||||
func (cl *BackendConfigLoader) LoadBackendConfigFileByName(modelName, modelPath string, opts ...ConfigLoaderOption) (*BackendConfig, error) {
|
||||
|
||||
// Load a config file if present after the model name
|
||||
cfg := &BackendConfig{
|
||||
PredictionOptions: schema.PredictionOptions{
|
||||
Model: modelName,
|
||||
},
|
||||
}
|
||||
|
||||
cfgExisting, exists := cl.GetBackendConfig(modelName)
|
||||
if exists {
|
||||
cfg = &cfgExisting
|
||||
} else {
|
||||
// Try loading a model config file
|
||||
modelConfig := filepath.Join(modelPath, modelName+".yaml")
|
||||
if _, err := os.Stat(modelConfig); err == nil {
|
||||
if err := cl.LoadBackendConfig(
|
||||
modelConfig, opts...,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("failed loading model config (%s) %s", modelConfig, err.Error())
|
||||
}
|
||||
cfgExisting, exists = cl.GetBackendConfig(modelName)
|
||||
if exists {
|
||||
cfg = &cfgExisting
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cfg.SetDefaults(opts...)
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func NewBackendConfigLoader() *BackendConfigLoader {
|
||||
return &BackendConfigLoader{
|
||||
configs: make(map[string]BackendConfig),
|
||||
}
|
||||
}
|
||||
func ReadBackendConfigFile(file string, opts ...ConfigLoaderOption) ([]*BackendConfig, error) {
|
||||
c := &[]*BackendConfig{}
|
||||
f, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read config file: %w", err)
|
||||
}
|
||||
if err := yaml.Unmarshal(f, c); err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal config file: %w", err)
|
||||
}
|
||||
|
||||
for _, cc := range *c {
|
||||
cc.SetDefaults(opts...)
|
||||
}
|
||||
|
||||
return *c, nil
|
||||
}
|
||||
|
||||
func ReadBackendConfig(file string, opts ...ConfigLoaderOption) (*BackendConfig, error) {
|
||||
lo := &LoadOptions{}
|
||||
lo.Apply(opts...)
|
||||
|
||||
c := &BackendConfig{}
|
||||
f, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read config file: %w", err)
|
||||
}
|
||||
if err := yaml.Unmarshal(f, c); err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal config file: %w", err)
|
||||
}
|
||||
|
||||
c.SetDefaults(opts...)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (cm *BackendConfigLoader) LoadBackendConfigFile(file string, opts ...ConfigLoaderOption) error {
|
||||
cm.Lock()
|
||||
defer cm.Unlock()
|
||||
c, err := ReadBackendConfigFile(file, opts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot load config file: %w", err)
|
||||
}
|
||||
|
||||
for _, cc := range c {
|
||||
cm.configs[cc.Name] = *cc
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cl *BackendConfigLoader) LoadBackendConfig(file string, opts ...ConfigLoaderOption) error {
|
||||
cl.Lock()
|
||||
defer cl.Unlock()
|
||||
c, err := ReadBackendConfig(file, opts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read config file: %w", err)
|
||||
}
|
||||
|
||||
cl.configs[c.Name] = *c
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cl *BackendConfigLoader) GetBackendConfig(m string) (BackendConfig, bool) {
|
||||
cl.Lock()
|
||||
defer cl.Unlock()
|
||||
v, exists := cl.configs[m]
|
||||
return v, exists
|
||||
}
|
||||
|
||||
func (cl *BackendConfigLoader) GetAllBackendConfigs() []BackendConfig {
|
||||
cl.Lock()
|
||||
defer cl.Unlock()
|
||||
var res []BackendConfig
|
||||
for _, v := range cl.configs {
|
||||
res = append(res, v)
|
||||
}
|
||||
|
||||
sort.SliceStable(res, func(i, j int) bool {
|
||||
return res[i].Name < res[j].Name
|
||||
})
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (cl *BackendConfigLoader) RemoveBackendConfig(m string) {
|
||||
cl.Lock()
|
||||
defer cl.Unlock()
|
||||
delete(cl.configs, m)
|
||||
}
|
||||
|
||||
func (cl *BackendConfigLoader) ListBackendConfigs() []string {
|
||||
cl.Lock()
|
||||
defer cl.Unlock()
|
||||
var res []string
|
||||
for k := range cl.configs {
|
||||
res = append(res, k)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// Preload prepare models if they are not local but url or huggingface repositories
|
||||
func (cl *BackendConfigLoader) Preload(modelPath string) error {
|
||||
cl.Lock()
|
||||
defer cl.Unlock()
|
||||
|
||||
status := func(fileName, current, total string, percent float64) {
|
||||
utils.DisplayDownloadFunction(fileName, current, total, percent)
|
||||
}
|
||||
|
||||
log.Info().Msgf("Preloading models from %s", modelPath)
|
||||
|
||||
renderMode := "dark"
|
||||
if os.Getenv("COLOR") != "" {
|
||||
renderMode = os.Getenv("COLOR")
|
||||
}
|
||||
|
||||
glamText := func(t string) {
|
||||
out, err := glamour.Render(t, renderMode)
|
||||
if err == nil && os.Getenv("NO_COLOR") == "" {
|
||||
fmt.Println(out)
|
||||
} else {
|
||||
fmt.Println(t)
|
||||
}
|
||||
}
|
||||
|
||||
for i, config := range cl.configs {
|
||||
|
||||
// Download files and verify their SHA
|
||||
for i, file := range config.DownloadFiles {
|
||||
log.Debug().Msgf("Checking %q exists and matches SHA", file.Filename)
|
||||
|
||||
if err := utils.VerifyPath(file.Filename, modelPath); err != nil {
|
||||
return err
|
||||
}
|
||||
// Create file path
|
||||
filePath := filepath.Join(modelPath, file.Filename)
|
||||
|
||||
if err := downloader.DownloadFile(file.URI, filePath, file.SHA256, i, len(config.DownloadFiles), status); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// If the model is an URL, expand it, and download the file
|
||||
if config.IsModelURL() {
|
||||
modelFileName := config.ModelFileName()
|
||||
modelURL := downloader.ConvertURL(config.Model)
|
||||
// check if file exists
|
||||
if _, err := os.Stat(filepath.Join(modelPath, modelFileName)); errors.Is(err, os.ErrNotExist) {
|
||||
err := downloader.DownloadFile(modelURL, filepath.Join(modelPath, modelFileName), "", 0, 0, status)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
cc := cl.configs[i]
|
||||
c := &cc
|
||||
c.PredictionOptions.Model = modelFileName
|
||||
cl.configs[i] = *c
|
||||
}
|
||||
|
||||
if config.IsMMProjURL() {
|
||||
modelFileName := config.MMProjFileName()
|
||||
modelURL := downloader.ConvertURL(config.MMProj)
|
||||
// check if file exists
|
||||
if _, err := os.Stat(filepath.Join(modelPath, modelFileName)); errors.Is(err, os.ErrNotExist) {
|
||||
err := downloader.DownloadFile(modelURL, filepath.Join(modelPath, modelFileName), "", 0, 0, status)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
cc := cl.configs[i]
|
||||
c := &cc
|
||||
c.MMProj = modelFileName
|
||||
cl.configs[i] = *c
|
||||
}
|
||||
|
||||
if cl.configs[i].Name != "" {
|
||||
glamText(fmt.Sprintf("**Model name**: _%s_", cl.configs[i].Name))
|
||||
}
|
||||
if cl.configs[i].Description != "" {
|
||||
//glamText("**Description**")
|
||||
glamText(cl.configs[i].Description)
|
||||
}
|
||||
if cl.configs[i].Usage != "" {
|
||||
//glamText("**Usage**")
|
||||
glamText(cl.configs[i].Usage)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadBackendConfigsFromPath reads all the configurations of the models from a path
|
||||
// (non-recursive)
|
||||
func (cm *BackendConfigLoader) LoadBackendConfigsFromPath(path string, opts ...ConfigLoaderOption) error {
|
||||
cm.Lock()
|
||||
defer cm.Unlock()
|
||||
entries, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
files := make([]fs.FileInfo, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
info, err := entry.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
files = append(files, info)
|
||||
}
|
||||
for _, file := range files {
|
||||
// Skip templates, YAML and .keep files
|
||||
if !strings.Contains(file.Name(), ".yaml") && !strings.Contains(file.Name(), ".yml") ||
|
||||
strings.HasPrefix(file.Name(), ".") {
|
||||
continue
|
||||
}
|
||||
c, err := ReadBackendConfig(filepath.Join(path, file.Name()), opts...)
|
||||
if err == nil {
|
||||
cm.configs[c.Name] = *c
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,9 +1,9 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"embed"
|
||||
"errors"
|
||||
"os"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/utils"
|
||||
@@ -20,6 +20,8 @@ import (
|
||||
"github.com/gofiber/contrib/fiberzerolog"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/gofiber/fiber/v2/middleware/cors"
|
||||
"github.com/gofiber/fiber/v2/middleware/favicon"
|
||||
"github.com/gofiber/fiber/v2/middleware/filesystem"
|
||||
"github.com/gofiber/fiber/v2/middleware/recover"
|
||||
|
||||
// swagger handler
|
||||
@@ -44,6 +46,11 @@ func readAuthHeader(c *fiber.Ctx) string {
|
||||
return authHeader
|
||||
}
|
||||
|
||||
// Embed a directory
|
||||
//
|
||||
//go:embed static/*
|
||||
var embedDirStatic embed.FS
|
||||
|
||||
// @title LocalAI API
|
||||
// @version 2.0.0
|
||||
// @description The LocalAI Rest API.
|
||||
@@ -124,20 +131,6 @@ func App(cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfig *confi
|
||||
return c.Next()
|
||||
}
|
||||
|
||||
// Check for api_keys.json file
|
||||
fileContent, err := os.ReadFile("api_keys.json")
|
||||
if err == nil {
|
||||
// Parse JSON content from the file
|
||||
var fileKeys []string
|
||||
err := json.Unmarshal(fileContent, &fileKeys)
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{"message": "Error parsing api_keys.json"})
|
||||
}
|
||||
|
||||
// Add file keys to options.ApiKeys
|
||||
appConfig.ApiKeys = append(appConfig.ApiKeys, fileKeys...)
|
||||
}
|
||||
|
||||
if len(appConfig.ApiKeys) == 0 {
|
||||
return c.Next()
|
||||
}
|
||||
@@ -174,13 +167,6 @@ func App(cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfig *confi
|
||||
app.Use(c)
|
||||
}
|
||||
|
||||
// Make sure directories exists
|
||||
os.MkdirAll(appConfig.ImageDir, 0755)
|
||||
os.MkdirAll(appConfig.AudioDir, 0755)
|
||||
os.MkdirAll(appConfig.UploadDir, 0755)
|
||||
os.MkdirAll(appConfig.ConfigsDir, 0755)
|
||||
os.MkdirAll(appConfig.ModelPath, 0755)
|
||||
|
||||
// Load config jsons
|
||||
utils.LoadConfig(appConfig.UploadDir, openai.UploadedFilesFile, &openai.UploadedFiles)
|
||||
utils.LoadConfig(appConfig.ConfigsDir, openai.AssistantsConfigFile, &openai.Assistants)
|
||||
@@ -192,10 +178,25 @@ func App(cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfig *confi
|
||||
routes.RegisterElevenLabsRoutes(app, cl, ml, appConfig, auth)
|
||||
routes.RegisterLocalAIRoutes(app, cl, ml, appConfig, galleryService, auth)
|
||||
routes.RegisterOpenAIRoutes(app, cl, ml, appConfig, auth)
|
||||
routes.RegisterPagesRoutes(app, cl, ml, appConfig, auth)
|
||||
routes.RegisterUIRoutes(app, cl, ml, appConfig, galleryService, auth)
|
||||
if !appConfig.DisableWebUI {
|
||||
routes.RegisterUIRoutes(app, cl, ml, appConfig, galleryService, auth)
|
||||
}
|
||||
routes.RegisterJINARoutes(app, cl, ml, appConfig, auth)
|
||||
|
||||
httpFS := http.FS(embedDirStatic)
|
||||
|
||||
app.Use(favicon.New(favicon.Config{
|
||||
URL: "/favicon.ico",
|
||||
FileSystem: httpFS,
|
||||
File: "static/favicon.ico",
|
||||
}))
|
||||
|
||||
app.Use("/static", filesystem.New(filesystem.Config{
|
||||
Root: httpFS,
|
||||
PathPrefix: "static",
|
||||
Browse: true,
|
||||
}))
|
||||
|
||||
// Define a custom 404 handler
|
||||
// Note: keep this at the bottom!
|
||||
app.Use(notFoundHandler)
|
||||
|
||||
@@ -222,7 +222,7 @@ var _ = Describe("API test", func() {
|
||||
|
||||
modelDir = filepath.Join(tmpdir, "models")
|
||||
backendAssetsDir := filepath.Join(tmpdir, "backend-assets")
|
||||
err = os.Mkdir(backendAssetsDir, 0755)
|
||||
err = os.Mkdir(backendAssetsDir, 0750)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
c, cancel = context.WithCancel(context.Background())
|
||||
@@ -241,7 +241,7 @@ var _ = Describe("API test", func() {
|
||||
}
|
||||
out, err := yaml.Marshal(g)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = os.WriteFile(filepath.Join(tmpdir, "gallery_simple.yaml"), out, 0644)
|
||||
err = os.WriteFile(filepath.Join(tmpdir, "gallery_simple.yaml"), out, 0600)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
galleries := []gallery.Gallery{
|
||||
@@ -595,7 +595,7 @@ var _ = Describe("API test", func() {
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
modelDir = filepath.Join(tmpdir, "models")
|
||||
backendAssetsDir := filepath.Join(tmpdir, "backend-assets")
|
||||
err = os.Mkdir(backendAssetsDir, 0755)
|
||||
err = os.Mkdir(backendAssetsDir, 0750)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
c, cancel = context.WithCancel(context.Background())
|
||||
@@ -708,10 +708,26 @@ var _ = Describe("API test", func() {
|
||||
// The response should contain an URL
|
||||
Expect(err).ToNot(HaveOccurred(), fmt.Sprint(resp))
|
||||
dat, err := io.ReadAll(resp.Body)
|
||||
Expect(err).ToNot(HaveOccurred(), string(dat))
|
||||
Expect(string(dat)).To(ContainSubstring("http://127.0.0.1:9090/"), string(dat))
|
||||
Expect(string(dat)).To(ContainSubstring(".png"), string(dat))
|
||||
Expect(err).ToNot(HaveOccurred(), "error reading /image/generations response")
|
||||
|
||||
imgUrlResp := &schema.OpenAIResponse{}
|
||||
err = json.Unmarshal(dat, imgUrlResp)
|
||||
Expect(imgUrlResp.Data).ToNot(Or(BeNil(), BeZero()))
|
||||
imgUrl := imgUrlResp.Data[0].URL
|
||||
Expect(imgUrl).To(ContainSubstring("http://127.0.0.1:9090/"), imgUrl)
|
||||
Expect(imgUrl).To(ContainSubstring(".png"), imgUrl)
|
||||
|
||||
imgResp, err := http.Get(imgUrl)
|
||||
Expect(err).To(BeNil())
|
||||
Expect(imgResp).ToNot(BeNil())
|
||||
Expect(imgResp.StatusCode).To(Equal(200))
|
||||
Expect(imgResp.ContentLength).To(BeNumerically(">", 0))
|
||||
imgData := make([]byte, 512)
|
||||
count, err := io.ReadFull(imgResp.Body, imgData)
|
||||
Expect(err).To(Or(BeNil(), MatchError(io.EOF)))
|
||||
Expect(count).To(BeNumerically(">", 0))
|
||||
Expect(count).To(BeNumerically("<=", 512))
|
||||
Expect(http.DetectContentType(imgData)).To(Equal("image/png"))
|
||||
})
|
||||
})
|
||||
|
||||
@@ -787,11 +803,11 @@ var _ = Describe("API test", func() {
|
||||
})
|
||||
|
||||
It("returns errors", func() {
|
||||
backends := len(model.AutoLoadBackends) + 1 // +1 for huggingface
|
||||
_, err := client.CreateCompletion(context.TODO(), openai.CompletionRequest{Model: "foomodel", Prompt: testPrompt})
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring(fmt.Sprintf("error, status code: 500, message: could not load model - all backends returned error: %d errors occurred:", backends)))
|
||||
Expect(err.Error()).To(ContainSubstring("error, status code: 500, message: could not load model - all backends returned error:"))
|
||||
})
|
||||
|
||||
It("transcribes audio", func() {
|
||||
if runtime.GOOS != "linux" {
|
||||
Skip("test supported only on linux")
|
||||
|
||||
@@ -2,19 +2,30 @@ package elements
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/chasefleming/elem-go"
|
||||
"github.com/chasefleming/elem-go/attrs"
|
||||
"github.com/go-skynet/LocalAI/core/services"
|
||||
"github.com/go-skynet/LocalAI/pkg/gallery"
|
||||
"github.com/go-skynet/LocalAI/pkg/xsync"
|
||||
)
|
||||
|
||||
const (
|
||||
NoImage = "https://upload.wikimedia.org/wikipedia/commons/6/65/No-Image-Placeholder.svg"
|
||||
noImage = "https://upload.wikimedia.org/wikipedia/commons/6/65/No-Image-Placeholder.svg"
|
||||
)
|
||||
|
||||
func DoneProgress(uid string) string {
|
||||
func DoneProgress(galleryID, text string, showDelete bool) string {
|
||||
var modelName = galleryID
|
||||
// Split by @ and grab the name
|
||||
if strings.Contains(galleryID, "@") {
|
||||
modelName = strings.Split(galleryID, "@")[1]
|
||||
}
|
||||
|
||||
return elem.Div(
|
||||
attrs.Props{},
|
||||
attrs.Props{
|
||||
"id": "action-div-" + dropBadChars(galleryID),
|
||||
},
|
||||
elem.H3(
|
||||
attrs.Props{
|
||||
"role": "status",
|
||||
@@ -22,12 +33,13 @@ func DoneProgress(uid string) string {
|
||||
"tabindex": "-1",
|
||||
"autofocus": "",
|
||||
},
|
||||
elem.Text("Installation completed"),
|
||||
elem.Text(text),
|
||||
),
|
||||
elem.If(showDelete, deleteButton(galleryID, modelName), reInstallButton(galleryID)),
|
||||
).Render()
|
||||
}
|
||||
|
||||
func ErrorProgress(err string) string {
|
||||
func ErrorProgress(err, galleryName string) string {
|
||||
return elem.Div(
|
||||
attrs.Props{},
|
||||
elem.H3(
|
||||
@@ -37,8 +49,9 @@ func ErrorProgress(err string) string {
|
||||
"tabindex": "-1",
|
||||
"autofocus": "",
|
||||
},
|
||||
elem.Text("Error"+err),
|
||||
elem.Text("Error "+err),
|
||||
),
|
||||
installButton(galleryName),
|
||||
).Render()
|
||||
}
|
||||
|
||||
@@ -59,16 +72,17 @@ func ProgressBar(progress string) string {
|
||||
).Render()
|
||||
}
|
||||
|
||||
func StartProgressBar(uid, progress string) string {
|
||||
func StartProgressBar(uid, progress, text string) string {
|
||||
if progress == "" {
|
||||
progress = "0"
|
||||
}
|
||||
return elem.Div(attrs.Props{
|
||||
"hx-trigger": "done",
|
||||
"hx-get": "/browse/job/" + uid,
|
||||
"hx-swap": "outerHTML",
|
||||
"hx-target": "this",
|
||||
},
|
||||
return elem.Div(
|
||||
attrs.Props{
|
||||
"hx-trigger": "done",
|
||||
"hx-get": "/browse/job/" + uid,
|
||||
"hx-swap": "outerHTML",
|
||||
"hx-target": "this",
|
||||
},
|
||||
elem.H3(
|
||||
attrs.Props{
|
||||
"role": "status",
|
||||
@@ -76,8 +90,7 @@ func StartProgressBar(uid, progress string) string {
|
||||
"tabindex": "-1",
|
||||
"autofocus": "",
|
||||
},
|
||||
elem.Text("Installing"),
|
||||
// This is a simple example of how to use the HTMLX library to create a progress bar that updates every 600ms.
|
||||
elem.Text(text),
|
||||
elem.Div(attrs.Props{
|
||||
"hx-get": "/browse/job/progress/" + uid,
|
||||
"hx-trigger": "every 600ms",
|
||||
@@ -98,40 +111,132 @@ func cardSpan(text, icon string) elem.Node {
|
||||
elem.I(attrs.Props{
|
||||
"class": icon + " pr-2",
|
||||
}),
|
||||
|
||||
elem.Text(text),
|
||||
|
||||
//elem.Text(text),
|
||||
)
|
||||
}
|
||||
|
||||
func ListModels(models []*gallery.GalleryModel) string {
|
||||
modelsElements := []elem.Node{}
|
||||
span := func(s string) elem.Node {
|
||||
return elem.Span(
|
||||
func searchableElement(text, icon string) elem.Node {
|
||||
return elem.Form(
|
||||
attrs.Props{},
|
||||
elem.Input(
|
||||
attrs.Props{
|
||||
"class": "float-right inline-block bg-green-500 text-white py-1 px-3 rounded-full text-xs",
|
||||
"type": "hidden",
|
||||
"name": "search",
|
||||
"value": text,
|
||||
},
|
||||
elem.Text(s),
|
||||
)
|
||||
}
|
||||
installButton := func(m *gallery.GalleryModel) elem.Node {
|
||||
return elem.Button(
|
||||
),
|
||||
elem.Span(
|
||||
attrs.Props{
|
||||
"data-twe-ripple-init": "",
|
||||
"data-twe-ripple-color": "light",
|
||||
"class": "float-right inline-block rounded bg-primary px-6 pb-2.5 mb-3 pt-2.5 text-xs font-medium uppercase leading-normal text-white shadow-primary-3 transition duration-150 ease-in-out hover:bg-primary-accent-300 hover:shadow-primary-2 focus:bg-primary-accent-300 focus:shadow-primary-2 focus:outline-none focus:ring-0 active:bg-primary-600 active:shadow-primary-2 dark:shadow-black/30 dark:hover:shadow-dark-strong dark:focus:shadow-dark-strong dark:active:shadow-dark-strong",
|
||||
// post the Model ID as param
|
||||
"hx-post": "/browse/install/model/" + fmt.Sprintf("%s@%s", m.Gallery.Name, m.Name),
|
||||
"class": "inline-block bg-gray-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2 hover:bg-gray-300 hover:shadow-gray-2",
|
||||
},
|
||||
elem.I(
|
||||
|
||||
elem.A(
|
||||
attrs.Props{
|
||||
"class": "fa-solid fa-download pr-2",
|
||||
// "name": "search",
|
||||
// "value": text,
|
||||
//"class": "inline-block bg-gray-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2",
|
||||
"href": "#!",
|
||||
"hx-post": "/browse/search/models",
|
||||
"hx-target": "#search-results",
|
||||
// TODO: this doesn't work
|
||||
// "hx-vals": `{ \"search\": \"` + text + `\" }`,
|
||||
"hx-indicator": ".htmx-indicator",
|
||||
},
|
||||
elem.I(attrs.Props{
|
||||
"class": icon + " pr-2",
|
||||
}),
|
||||
elem.Text(text),
|
||||
),
|
||||
elem.Text("Install"),
|
||||
)
|
||||
}
|
||||
),
|
||||
|
||||
//elem.Text(text),
|
||||
)
|
||||
}
|
||||
|
||||
func link(text, url string) elem.Node {
|
||||
return elem.A(
|
||||
attrs.Props{
|
||||
"class": "inline-block bg-gray-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2 hover:bg-gray-300 hover:shadow-gray-2",
|
||||
"href": url,
|
||||
"target": "_blank",
|
||||
},
|
||||
elem.I(attrs.Props{
|
||||
"class": "fas fa-link pr-2",
|
||||
}),
|
||||
elem.Text(text),
|
||||
)
|
||||
}
|
||||
func installButton(galleryName string) elem.Node {
|
||||
return elem.Button(
|
||||
attrs.Props{
|
||||
"data-twe-ripple-init": "",
|
||||
"data-twe-ripple-color": "light",
|
||||
"class": "float-right inline-block rounded bg-primary px-6 pb-2.5 mb-3 pt-2.5 text-xs font-medium uppercase leading-normal text-white shadow-primary-3 transition duration-150 ease-in-out hover:bg-primary-accent-300 hover:shadow-primary-2 focus:bg-primary-accent-300 focus:shadow-primary-2 focus:outline-none focus:ring-0 active:bg-primary-600 active:shadow-primary-2 dark:shadow-black/30 dark:hover:shadow-dark-strong dark:focus:shadow-dark-strong dark:active:shadow-dark-strong",
|
||||
"hx-swap": "outerHTML",
|
||||
// post the Model ID as param
|
||||
"hx-post": "/browse/install/model/" + galleryName,
|
||||
},
|
||||
elem.I(
|
||||
attrs.Props{
|
||||
"class": "fa-solid fa-download pr-2",
|
||||
},
|
||||
),
|
||||
elem.Text("Install"),
|
||||
)
|
||||
}
|
||||
|
||||
func reInstallButton(galleryName string) elem.Node {
|
||||
return elem.Button(
|
||||
attrs.Props{
|
||||
"data-twe-ripple-init": "",
|
||||
"data-twe-ripple-color": "light",
|
||||
"class": "float-right inline-block rounded bg-primary ml-2 px-6 pb-2.5 mb-3 pt-2.5 text-xs font-medium uppercase leading-normal text-white shadow-primary-3 transition duration-150 ease-in-out hover:bg-primary-accent-300 hover:shadow-primary-2 focus:bg-primary-accent-300 focus:shadow-primary-2 focus:outline-none focus:ring-0 active:bg-primary-600 active:shadow-primary-2 dark:shadow-black/30 dark:hover:shadow-dark-strong dark:focus:shadow-dark-strong dark:active:shadow-dark-strong",
|
||||
"hx-target": "#action-div-" + dropBadChars(galleryName),
|
||||
"hx-swap": "outerHTML",
|
||||
// post the Model ID as param
|
||||
"hx-post": "/browse/install/model/" + galleryName,
|
||||
},
|
||||
elem.I(
|
||||
attrs.Props{
|
||||
"class": "fa-solid fa-arrow-rotate-right pr-2",
|
||||
},
|
||||
),
|
||||
elem.Text("Reinstall"),
|
||||
)
|
||||
}
|
||||
|
||||
func deleteButton(galleryID, modelName string) elem.Node {
|
||||
return elem.Button(
|
||||
attrs.Props{
|
||||
"data-twe-ripple-init": "",
|
||||
"data-twe-ripple-color": "light",
|
||||
"hx-confirm": "Are you sure you wish to delete the model?",
|
||||
"class": "float-right inline-block rounded bg-red-800 px-6 pb-2.5 mb-3 pt-2.5 text-xs font-medium uppercase leading-normal text-white shadow-primary-3 transition duration-150 ease-in-out hover:bg-red-accent-300 hover:shadow-red-2 focus:bg-red-accent-300 focus:shadow-primary-2 focus:outline-none focus:ring-0 active:bg-red-600 active:shadow-primary-2 dark:shadow-black/30 dark:hover:shadow-dark-strong dark:focus:shadow-dark-strong dark:active:shadow-dark-strong",
|
||||
"hx-target": "#action-div-" + dropBadChars(galleryID),
|
||||
"hx-swap": "outerHTML",
|
||||
// post the Model ID as param
|
||||
"hx-post": "/browse/delete/model/" + galleryID,
|
||||
},
|
||||
elem.I(
|
||||
attrs.Props{
|
||||
"class": "fa-solid fa-cancel pr-2",
|
||||
},
|
||||
),
|
||||
elem.Text("Delete"),
|
||||
)
|
||||
}
|
||||
|
||||
// Javascript/HTMX doesn't like weird IDs
|
||||
func dropBadChars(s string) string {
|
||||
return strings.ReplaceAll(s, "@", "__")
|
||||
}
|
||||
|
||||
func ListModels(models []*gallery.GalleryModel, processing *xsync.SyncedMap[string, string], galleryService *services.GalleryService) string {
|
||||
modelsElements := []elem.Node{}
|
||||
descriptionDiv := func(m *gallery.GalleryModel) elem.Node {
|
||||
|
||||
return elem.Div(
|
||||
attrs.Props{
|
||||
"class": "p-6 text-surface dark:text-white",
|
||||
@@ -152,6 +257,20 @@ func ListModels(models []*gallery.GalleryModel) string {
|
||||
}
|
||||
|
||||
actionDiv := func(m *gallery.GalleryModel) elem.Node {
|
||||
galleryID := fmt.Sprintf("%s@%s", m.Gallery.Name, m.Name)
|
||||
currentlyProcessing := processing.Exists(galleryID)
|
||||
jobID := ""
|
||||
isDeletionOp := false
|
||||
if currentlyProcessing {
|
||||
status := galleryService.GetStatus(galleryID)
|
||||
if status != nil && status.Deletion {
|
||||
isDeletionOp = true
|
||||
}
|
||||
jobID = processing.Get(galleryID)
|
||||
// TODO:
|
||||
// case not handled, if status == nil : "Waiting"
|
||||
}
|
||||
|
||||
nodes := []elem.Node{
|
||||
cardSpan("Repository: "+m.Gallery.Name, "fa-brands fa-git-alt"),
|
||||
}
|
||||
@@ -162,25 +281,31 @@ func ListModels(models []*gallery.GalleryModel) string {
|
||||
)
|
||||
}
|
||||
|
||||
tagsNodes := []elem.Node{}
|
||||
for _, tag := range m.Tags {
|
||||
nodes = append(nodes,
|
||||
cardSpan(tag, "fas fa-tag"),
|
||||
tagsNodes = append(tagsNodes,
|
||||
searchableElement(tag, "fas fa-tag"),
|
||||
)
|
||||
}
|
||||
|
||||
nodes = append(nodes,
|
||||
elem.Div(
|
||||
attrs.Props{
|
||||
"class": "flex flex-row flex-wrap content-center",
|
||||
},
|
||||
tagsNodes...,
|
||||
),
|
||||
)
|
||||
|
||||
for i, url := range m.URLs {
|
||||
nodes = append(nodes,
|
||||
elem.A(
|
||||
attrs.Props{
|
||||
"class": "inline-block bg-gray-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2",
|
||||
"href": url,
|
||||
"target": "_blank",
|
||||
},
|
||||
elem.I(attrs.Props{
|
||||
"class": "fas fa-link pr-2",
|
||||
}),
|
||||
elem.Text("Link #"+fmt.Sprintf("%d", i+1)),
|
||||
))
|
||||
link("Link #"+fmt.Sprintf("%d", i+1), url),
|
||||
)
|
||||
}
|
||||
|
||||
progressMessage := "Installation"
|
||||
if isDeletionOp {
|
||||
progressMessage = "Deletion"
|
||||
}
|
||||
|
||||
return elem.Div(
|
||||
@@ -193,23 +318,41 @@ func ListModels(models []*gallery.GalleryModel) string {
|
||||
},
|
||||
nodes...,
|
||||
),
|
||||
elem.If(m.Installed, span("Installed"), installButton(m)),
|
||||
elem.Div(
|
||||
attrs.Props{
|
||||
"id": "action-div-" + dropBadChars(galleryID),
|
||||
},
|
||||
elem.If(
|
||||
currentlyProcessing,
|
||||
elem.Node( // If currently installing, show progress bar
|
||||
elem.Raw(StartProgressBar(jobID, "0", progressMessage)),
|
||||
), // Otherwise, show install button (if not installed) or display "Installed"
|
||||
elem.If(m.Installed,
|
||||
elem.Node(elem.Div(
|
||||
attrs.Props{},
|
||||
reInstallButton(m.ID()),
|
||||
deleteButton(m.ID(), m.Name),
|
||||
)),
|
||||
installButton(m.ID()),
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
for _, m := range models {
|
||||
|
||||
elems := []elem.Node{}
|
||||
|
||||
if m.Icon == "" {
|
||||
m.Icon = NoImage
|
||||
m.Icon = noImage
|
||||
}
|
||||
|
||||
divProperties := attrs.Props{
|
||||
"class": "flex justify-center items-center",
|
||||
}
|
||||
|
||||
elems = append(elems,
|
||||
|
||||
elem.Div(attrs.Props{
|
||||
"class": "flex justify-center items-center",
|
||||
},
|
||||
elem.Div(divProperties,
|
||||
elem.A(attrs.Props{
|
||||
"href": "#!",
|
||||
// "class": "justify-center items-center",
|
||||
@@ -220,7 +363,23 @@ func ListModels(models []*gallery.GalleryModel) string {
|
||||
"src": m.Icon,
|
||||
}),
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
// Special/corner case: if a model sets Trust Remote Code as required, show a warning
|
||||
// TODO: handle this more generically later
|
||||
_, trustRemoteCodeExists := m.Overrides["trust_remote_code"]
|
||||
if trustRemoteCodeExists {
|
||||
elems = append(elems, elem.Div(
|
||||
attrs.Props{
|
||||
"class": "flex justify-center items-center bg-red-500 text-white p-2 rounded-lg mt-2",
|
||||
},
|
||||
elem.I(attrs.Props{
|
||||
"class": "fa-solid fa-circle-exclamation pr-2",
|
||||
}),
|
||||
elem.Text("Attention: Trust Remote Code is required for this model"),
|
||||
))
|
||||
}
|
||||
|
||||
elems = append(elems, descriptionDiv(m), actionDiv(m))
|
||||
modelsElements = append(modelsElements,
|
||||
@@ -240,7 +399,6 @@ func ListModels(models []*gallery.GalleryModel) string {
|
||||
|
||||
wrapper := elem.Div(attrs.Props{
|
||||
"class": "dark grid grid-cols-1 grid-rows-1 md:grid-cols-3 block rounded-lg shadow-secondary-1 dark:bg-surface-dark",
|
||||
//"class": "block rounded-lg bg-white shadow-secondary-1 dark:bg-surface-dark",
|
||||
}, modelsElements...)
|
||||
|
||||
return wrapper.Render()
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
func BackendMonitorEndpoint(bm services.BackendMonitor) func(c *fiber.Ctx) error {
|
||||
func BackendMonitorEndpoint(bm *services.BackendMonitorService) func(c *fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
|
||||
input := new(schema.BackendMonitorRequest)
|
||||
@@ -23,7 +23,7 @@ func BackendMonitorEndpoint(bm services.BackendMonitor) func(c *fiber.Ctx) error
|
||||
}
|
||||
}
|
||||
|
||||
func BackendShutdownEndpoint(bm services.BackendMonitor) func(c *fiber.Ctx) error {
|
||||
func BackendShutdownEndpoint(bm *services.BackendMonitorService) func(c *fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
input := new(schema.BackendMonitorRequest)
|
||||
// Get input data from the request body
|
||||
|
||||
@@ -61,11 +61,11 @@ func (mgs *ModelGalleryEndpointService) ApplyModelGalleryEndpoint() func(c *fibe
|
||||
return err
|
||||
}
|
||||
mgs.galleryApplier.C <- gallery.GalleryOp{
|
||||
Req: input.GalleryModel,
|
||||
Id: uuid.String(),
|
||||
GalleryName: input.ID,
|
||||
Galleries: mgs.galleries,
|
||||
ConfigURL: input.ConfigURL,
|
||||
Req: input.GalleryModel,
|
||||
Id: uuid.String(),
|
||||
GalleryModelName: input.ID,
|
||||
Galleries: mgs.galleries,
|
||||
ConfigURL: input.ConfigURL,
|
||||
}
|
||||
return c.JSON(struct {
|
||||
ID string `json:"uuid"`
|
||||
@@ -74,6 +74,27 @@ func (mgs *ModelGalleryEndpointService) ApplyModelGalleryEndpoint() func(c *fibe
|
||||
}
|
||||
}
|
||||
|
||||
func (mgs *ModelGalleryEndpointService) DeleteModelGalleryEndpoint() func(c *fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
modelName := c.Params("name")
|
||||
|
||||
mgs.galleryApplier.C <- gallery.GalleryOp{
|
||||
Delete: true,
|
||||
GalleryModelName: modelName,
|
||||
}
|
||||
|
||||
uuid, err := uuid.NewUUID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.JSON(struct {
|
||||
ID string `json:"uuid"`
|
||||
StatusURL string `json:"status"`
|
||||
}{ID: uuid.String(), StatusURL: c.BaseURL() + "/models/jobs/" + uuid.String()})
|
||||
}
|
||||
}
|
||||
|
||||
func (mgs *ModelGalleryEndpointService) ListModelFromGalleryEndpoint() func(c *fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
log.Debug().Msgf("Listing models from galleries: %+v", mgs.galleries)
|
||||
|
||||
@@ -3,22 +3,39 @@ package localai
|
||||
import (
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/internal"
|
||||
"github.com/go-skynet/LocalAI/pkg/gallery"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
func WelcomeEndpoint(appConfig *config.ApplicationConfig,
|
||||
cl *config.BackendConfigLoader, ml *model.ModelLoader) func(*fiber.Ctx) error {
|
||||
cl *config.BackendConfigLoader, ml *model.ModelLoader, modelStatus func() (map[string]string, map[string]string)) func(*fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
models, _ := ml.ListModels()
|
||||
backendConfigs := cl.GetAllBackendConfigs()
|
||||
|
||||
galleryConfigs := map[string]*gallery.Config{}
|
||||
for _, m := range backendConfigs {
|
||||
|
||||
cfg, err := gallery.GetLocalModelConfiguration(ml.ModelPath, m.Name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
galleryConfigs[m.Name] = cfg
|
||||
}
|
||||
|
||||
// Get model statuses to display in the UI the operation in progress
|
||||
processingModels, taskTypes := modelStatus()
|
||||
|
||||
summary := fiber.Map{
|
||||
"Title": "LocalAI API - " + internal.PrintableVersion(),
|
||||
"Version": internal.PrintableVersion(),
|
||||
"Models": models,
|
||||
"ModelsConfig": backendConfigs,
|
||||
"GalleryConfig": galleryConfigs,
|
||||
"ApplicationConfig": appConfig,
|
||||
"ProcessingModels": processingModels,
|
||||
"TaskTypes": taskTypes,
|
||||
}
|
||||
|
||||
if string(c.Context().Request.Header.ContentType()) == "application/json" || len(c.Accepts("html")) == 0 {
|
||||
|
||||
@@ -3,10 +3,6 @@ package openai
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
@@ -16,6 +12,11 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var configsDir string = "/tmp/localai/configs"
|
||||
@@ -49,8 +50,8 @@ func TestAssistantEndpoints(t *testing.T) {
|
||||
}
|
||||
|
||||
_ = os.RemoveAll(appConfig.ConfigsDir)
|
||||
_ = os.MkdirAll(appConfig.ConfigsDir, 0755)
|
||||
_ = os.MkdirAll(modelPath, 0755)
|
||||
_ = os.MkdirAll(appConfig.ConfigsDir, 0750)
|
||||
_ = os.MkdirAll(modelPath, 0750)
|
||||
os.Create(filepath.Join(modelPath, "ggml-gpt4all-j"))
|
||||
|
||||
app := fiber.New(fiber.Config{
|
||||
|
||||
@@ -251,7 +251,7 @@ func newMultipartFile(filePath, tag, purpose string) (*strings.Reader, *multipar
|
||||
|
||||
// Helper to create test files
|
||||
func createTestFile(t *testing.T, name string, sizeMB int, option *config.ApplicationConfig) *os.File {
|
||||
err := os.MkdirAll(option.UploadDir, 0755)
|
||||
err := os.MkdirAll(option.UploadDir, 0750)
|
||||
if err != nil {
|
||||
|
||||
t.Fatalf("Error MKDIR: %v", err)
|
||||
|
||||
@@ -1,63 +1,23 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
model "github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/go-skynet/LocalAI/core/services"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
func ListModelsEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader) func(ctx *fiber.Ctx) error {
|
||||
func ListModelsEndpoint(lms *services.ListModelsService) func(ctx *fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
models, err := ml.ListModels()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var mm map[string]interface{} = map[string]interface{}{}
|
||||
|
||||
dataModels := []schema.OpenAIModel{}
|
||||
|
||||
var filterFn func(name string) bool
|
||||
// If blank, no filter is applied.
|
||||
filter := c.Query("filter")
|
||||
|
||||
// If filter is not specified, do not filter the list by model name
|
||||
if filter == "" {
|
||||
filterFn = func(_ string) bool { return true }
|
||||
} else {
|
||||
// If filter _IS_ specified, we compile it to a regex which is used to create the filterFn
|
||||
rxp, err := regexp.Compile(filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filterFn = func(name string) bool {
|
||||
return rxp.MatchString(name)
|
||||
}
|
||||
}
|
||||
|
||||
// By default, exclude any loose files that are already referenced by a configuration file.
|
||||
excludeConfigured := c.QueryBool("excludeConfigured", true)
|
||||
|
||||
// Start with the known configurations
|
||||
for _, c := range cl.GetAllBackendConfigs() {
|
||||
if excludeConfigured {
|
||||
mm[c.Model] = nil
|
||||
}
|
||||
|
||||
if filterFn(c.Name) {
|
||||
dataModels = append(dataModels, schema.OpenAIModel{ID: c.Name, Object: "model"})
|
||||
}
|
||||
dataModels, err := lms.ListModels(filter, excludeConfigured)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Then iterate through the loose files:
|
||||
for _, m := range models {
|
||||
// And only adds them if they shouldn't be skipped.
|
||||
if _, exists := mm[m]; !exists && filterFn(m) {
|
||||
dataModels = append(dataModels, schema.OpenAIModel{ID: m, Object: "model"})
|
||||
}
|
||||
}
|
||||
|
||||
return c.JSON(struct {
|
||||
Object string `json:"object"`
|
||||
Data []schema.OpenAIModel `json:"data"`
|
||||
|
||||
@@ -63,10 +63,14 @@ func getBase64Image(s string) (string, error) {
|
||||
return encoded, nil
|
||||
}
|
||||
|
||||
// if the string instead is prefixed with "data:image/jpeg;base64,", drop it
|
||||
if strings.HasPrefix(s, "data:image/jpeg;base64,") {
|
||||
return strings.ReplaceAll(s, "data:image/jpeg;base64,", ""), nil
|
||||
// if the string instead is prefixed with "data:image/...;base64,", drop it
|
||||
dropPrefix := []string{"data:image/jpeg;base64,", "data:image/png;base64,"}
|
||||
for _, prefix := range dropPrefix {
|
||||
if strings.HasPrefix(s, prefix) {
|
||||
return strings.ReplaceAll(s, prefix, ""), nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("not valid string")
|
||||
}
|
||||
|
||||
@@ -181,7 +185,7 @@ func updateRequestConfig(config *config.BackendConfig, input *schema.OpenAIReque
|
||||
input.Messages[i].StringContent = fmt.Sprintf("[img-%d]", index) + input.Messages[i].StringContent
|
||||
index++
|
||||
} else {
|
||||
fmt.Print("Failed encoding image", err)
|
||||
log.Error().Msgf("Failed encoding image: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
fiberhtml "github.com/gofiber/template/html/v2"
|
||||
"github.com/microcosm-cc/bluemonday"
|
||||
"github.com/russross/blackfriday"
|
||||
)
|
||||
|
||||
@@ -39,5 +40,5 @@ func renderEngine() *fiberhtml.Engine {
|
||||
|
||||
func markDowner(args ...interface{}) template.HTML {
|
||||
s := blackfriday.MarkdownCommon([]byte(fmt.Sprintf("%s", args...)))
|
||||
return template.HTML(s)
|
||||
return template.HTML(bluemonday.UGCPolicy().Sanitize(string(s)))
|
||||
}
|
||||
|
||||
@@ -23,6 +23,8 @@ func RegisterLocalAIRoutes(app *fiber.App,
|
||||
|
||||
modelGalleryEndpointService := localai.CreateModelGalleryEndpointService(appConfig.Galleries, appConfig.ModelPath, galleryService)
|
||||
app.Post("/models/apply", auth, modelGalleryEndpointService.ApplyModelGalleryEndpoint())
|
||||
app.Post("/models/delete/:name", auth, modelGalleryEndpointService.DeleteModelGalleryEndpoint())
|
||||
|
||||
app.Get("/models/available", auth, modelGalleryEndpointService.ListModelFromGalleryEndpoint())
|
||||
app.Get("/models/galleries", auth, modelGalleryEndpointService.ListModelGalleriesEndpoint())
|
||||
app.Post("/models/galleries", auth, modelGalleryEndpointService.AddModelGalleryEndpoint())
|
||||
@@ -50,9 +52,9 @@ func RegisterLocalAIRoutes(app *fiber.App,
|
||||
app.Get("/metrics", auth, localai.LocalAIMetricsEndpoint())
|
||||
|
||||
// Experimental Backend Statistics Module
|
||||
backendMonitor := services.NewBackendMonitor(cl, ml, appConfig) // Split out for now
|
||||
app.Get("/backend/monitor", auth, localai.BackendMonitorEndpoint(backendMonitor))
|
||||
app.Post("/backend/shutdown", auth, localai.BackendShutdownEndpoint(backendMonitor))
|
||||
backendMonitorService := services.NewBackendMonitorService(ml, cl, appConfig) // Split out for now
|
||||
app.Get("/backend/monitor", auth, localai.BackendMonitorEndpoint(backendMonitorService))
|
||||
app.Post("/backend/shutdown", auth, localai.BackendShutdownEndpoint(backendMonitorService))
|
||||
|
||||
app.Get("/version", auth, func(c *fiber.Ctx) error {
|
||||
return c.JSON(struct {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/core/http/endpoints/localai"
|
||||
"github.com/go-skynet/LocalAI/core/http/endpoints/openai"
|
||||
"github.com/go-skynet/LocalAI/core/services"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
@@ -81,6 +82,7 @@ func RegisterOpenAIRoutes(app *fiber.App,
|
||||
}
|
||||
|
||||
// models
|
||||
app.Get("/v1/models", auth, openai.ListModelsEndpoint(cl, ml))
|
||||
app.Get("/models", auth, openai.ListModelsEndpoint(cl, ml))
|
||||
tmpLMS := services.NewListModelsService(ml, cl, appConfig) // TODO: once createApplication() is fully in use, reference the central instance.
|
||||
app.Get("/v1/models", auth, openai.ListModelsEndpoint(tmpLMS))
|
||||
app.Get("/models", auth, openai.ListModelsEndpoint(tmpLMS))
|
||||
}
|
||||
|
||||
@@ -3,13 +3,19 @@ package routes
|
||||
import (
|
||||
"fmt"
|
||||
"html/template"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/core/http/elements"
|
||||
"github.com/go-skynet/LocalAI/core/http/endpoints/localai"
|
||||
"github.com/go-skynet/LocalAI/core/services"
|
||||
"github.com/go-skynet/LocalAI/internal"
|
||||
"github.com/go-skynet/LocalAI/pkg/gallery"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/go-skynet/LocalAI/pkg/xsync"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
@@ -21,14 +27,65 @@ func RegisterUIRoutes(app *fiber.App,
|
||||
galleryService *services.GalleryService,
|
||||
auth func(*fiber.Ctx) error) {
|
||||
|
||||
// Show the Models page
|
||||
// keeps the state of models that are being installed from the UI
|
||||
var processingModels = xsync.NewSyncedMap[string, string]()
|
||||
|
||||
// modelStatus returns the current status of the models being processed (installation or deletion)
|
||||
// it is called asynchonously from the UI
|
||||
modelStatus := func() (map[string]string, map[string]string) {
|
||||
processingModelsData := processingModels.Map()
|
||||
|
||||
taskTypes := map[string]string{}
|
||||
|
||||
for k, v := range processingModelsData {
|
||||
status := galleryService.GetStatus(v)
|
||||
taskTypes[k] = "Installation"
|
||||
if status != nil && status.Deletion {
|
||||
taskTypes[k] = "Deletion"
|
||||
} else if status == nil {
|
||||
taskTypes[k] = "Waiting"
|
||||
}
|
||||
}
|
||||
|
||||
return processingModelsData, taskTypes
|
||||
}
|
||||
|
||||
app.Get("/", auth, localai.WelcomeEndpoint(appConfig, cl, ml, modelStatus))
|
||||
|
||||
// Show the Models page (all models)
|
||||
app.Get("/browse", auth, func(c *fiber.Ctx) error {
|
||||
term := c.Query("term")
|
||||
|
||||
models, _ := gallery.AvailableGalleryModels(appConfig.Galleries, appConfig.ModelPath)
|
||||
|
||||
// Get all available tags
|
||||
allTags := map[string]struct{}{}
|
||||
tags := []string{}
|
||||
for _, m := range models {
|
||||
for _, t := range m.Tags {
|
||||
allTags[t] = struct{}{}
|
||||
}
|
||||
}
|
||||
for t := range allTags {
|
||||
tags = append(tags, t)
|
||||
}
|
||||
sort.Strings(tags)
|
||||
|
||||
if term != "" {
|
||||
models = gallery.GalleryModels(models).Search(term)
|
||||
}
|
||||
|
||||
// Get model statuses
|
||||
processingModelsData, taskTypes := modelStatus()
|
||||
|
||||
summary := fiber.Map{
|
||||
"Title": "LocalAI - Models",
|
||||
"Models": template.HTML(elements.ListModels(models)),
|
||||
"Repositories": appConfig.Galleries,
|
||||
"Title": "LocalAI - Models",
|
||||
"Version": internal.PrintableVersion(),
|
||||
"Models": template.HTML(elements.ListModels(models, processingModels, galleryService)),
|
||||
"Repositories": appConfig.Galleries,
|
||||
"AllTags": tags,
|
||||
"ProcessingModels": processingModelsData,
|
||||
"TaskTypes": taskTypes,
|
||||
// "ApplicationConfig": appConfig,
|
||||
}
|
||||
|
||||
@@ -36,7 +93,7 @@ func RegisterUIRoutes(app *fiber.App,
|
||||
return c.Render("views/models", summary)
|
||||
})
|
||||
|
||||
// HTMX: return the model details
|
||||
// Show the models, filtered from the user input
|
||||
// https://htmx.org/examples/active-search/
|
||||
app.Post("/browse/search/models", auth, func(c *fiber.Ctx) error {
|
||||
form := struct {
|
||||
@@ -48,22 +105,20 @@ func RegisterUIRoutes(app *fiber.App,
|
||||
|
||||
models, _ := gallery.AvailableGalleryModels(appConfig.Galleries, appConfig.ModelPath)
|
||||
|
||||
filteredModels := []*gallery.GalleryModel{}
|
||||
for _, m := range models {
|
||||
if strings.Contains(m.Name, form.Search) ||
|
||||
strings.Contains(m.Description, form.Search) ||
|
||||
strings.Contains(m.Gallery.Name, form.Search) ||
|
||||
strings.Contains(strings.Join(m.Tags, ","), form.Search) {
|
||||
filteredModels = append(filteredModels, m)
|
||||
}
|
||||
}
|
||||
|
||||
return c.SendString(elements.ListModels(filteredModels))
|
||||
return c.SendString(elements.ListModels(gallery.GalleryModels(models).Search(form.Search), processingModels, galleryService))
|
||||
})
|
||||
|
||||
/*
|
||||
|
||||
Install routes
|
||||
|
||||
*/
|
||||
|
||||
// This route is used when the "Install" button is pressed, we submit here a new job to the gallery service
|
||||
// https://htmx.org/examples/progress-bar/
|
||||
app.Post("/browse/install/model/:id", auth, func(c *fiber.Ctx) error {
|
||||
galleryID := strings.Clone(c.Params("id")) // strings.Clone is required!
|
||||
galleryID := strings.Clone(c.Params("id")) // note: strings.Clone is required for multiple requests!
|
||||
log.Debug().Msgf("UI job submitted to install : %+v\n", galleryID)
|
||||
|
||||
id, err := uuid.NewUUID()
|
||||
if err != nil {
|
||||
@@ -72,21 +127,64 @@ func RegisterUIRoutes(app *fiber.App,
|
||||
|
||||
uid := id.String()
|
||||
|
||||
processingModels.Set(galleryID, uid)
|
||||
|
||||
op := gallery.GalleryOp{
|
||||
Id: uid,
|
||||
GalleryName: galleryID,
|
||||
Galleries: appConfig.Galleries,
|
||||
Id: uid,
|
||||
GalleryModelName: galleryID,
|
||||
Galleries: appConfig.Galleries,
|
||||
}
|
||||
go func() {
|
||||
galleryService.C <- op
|
||||
}()
|
||||
|
||||
return c.SendString(elements.StartProgressBar(uid, "0"))
|
||||
return c.SendString(elements.StartProgressBar(uid, "0", "Installation"))
|
||||
})
|
||||
|
||||
// This route is used when the "Install" button is pressed, we submit here a new job to the gallery service
|
||||
// https://htmx.org/examples/progress-bar/
|
||||
app.Post("/browse/delete/model/:id", auth, func(c *fiber.Ctx) error {
|
||||
galleryID := strings.Clone(c.Params("id")) // note: strings.Clone is required for multiple requests!
|
||||
log.Debug().Msgf("UI job submitted to delete : %+v\n", galleryID)
|
||||
var galleryName = galleryID
|
||||
if strings.Contains(galleryID, "@") {
|
||||
// if the galleryID contains a @ it means that it's a model from a gallery
|
||||
// but we want to delete it from the local models which does not need
|
||||
// a repository ID
|
||||
galleryName = strings.Split(galleryID, "@")[1]
|
||||
}
|
||||
|
||||
id, err := uuid.NewUUID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uid := id.String()
|
||||
|
||||
// Track the deletion job by galleryID and galleryName
|
||||
// The GalleryID contains information about the repository,
|
||||
// while the GalleryName is ONLY the name of the model
|
||||
processingModels.Set(galleryName, uid)
|
||||
processingModels.Set(galleryID, uid)
|
||||
|
||||
op := gallery.GalleryOp{
|
||||
Id: uid,
|
||||
Delete: true,
|
||||
GalleryModelName: galleryName,
|
||||
}
|
||||
go func() {
|
||||
galleryService.C <- op
|
||||
cl.RemoveBackendConfig(galleryName)
|
||||
}()
|
||||
|
||||
return c.SendString(elements.StartProgressBar(uid, "0", "Deletion"))
|
||||
})
|
||||
|
||||
// Display the job current progress status
|
||||
// If the job is done, we trigger the /browse/job/:uid route
|
||||
// https://htmx.org/examples/progress-bar/
|
||||
app.Get("/browse/job/progress/:uid", auth, func(c *fiber.Ctx) error {
|
||||
jobUID := c.Params("uid")
|
||||
jobUID := strings.Clone(c.Params("uid")) // note: strings.Clone is required for multiple requests!
|
||||
|
||||
status := galleryService.GetStatus(jobUID)
|
||||
if status == nil {
|
||||
@@ -95,17 +193,144 @@ func RegisterUIRoutes(app *fiber.App,
|
||||
}
|
||||
|
||||
if status.Progress == 100 {
|
||||
c.Set("HX-Trigger", "done")
|
||||
c.Set("HX-Trigger", "done") // this triggers /browse/job/:uid (which is when the job is done)
|
||||
return c.SendString(elements.ProgressBar("100"))
|
||||
}
|
||||
if status.Error != nil {
|
||||
return c.SendString(elements.ErrorProgress(status.Error.Error()))
|
||||
return c.SendString(elements.ErrorProgress(status.Error.Error(), status.GalleryModelName))
|
||||
}
|
||||
|
||||
return c.SendString(elements.ProgressBar(fmt.Sprint(status.Progress)))
|
||||
})
|
||||
|
||||
// this route is hit when the job is done, and we display the
|
||||
// final state (for now just displays "Installation completed")
|
||||
app.Get("/browse/job/:uid", auth, func(c *fiber.Ctx) error {
|
||||
return c.SendString(elements.DoneProgress(c.Params("uid")))
|
||||
jobUID := strings.Clone(c.Params("uid")) // note: strings.Clone is required for multiple requests!
|
||||
|
||||
status := galleryService.GetStatus(jobUID)
|
||||
|
||||
galleryID := ""
|
||||
for _, k := range processingModels.Keys() {
|
||||
if processingModels.Get(k) == jobUID {
|
||||
galleryID = k
|
||||
processingModels.Delete(k)
|
||||
}
|
||||
}
|
||||
if galleryID == "" {
|
||||
log.Debug().Msgf("no processing model found for job : %+v\n", jobUID)
|
||||
}
|
||||
|
||||
log.Debug().Msgf("JOB finished : %+v\n", status)
|
||||
showDelete := true
|
||||
displayText := "Installation completed"
|
||||
if status.Deletion {
|
||||
showDelete = false
|
||||
displayText = "Deletion completed"
|
||||
}
|
||||
|
||||
return c.SendString(elements.DoneProgress(galleryID, displayText, showDelete))
|
||||
})
|
||||
|
||||
// Show the Chat page
|
||||
app.Get("/chat/:model", auth, func(c *fiber.Ctx) error {
|
||||
backendConfigs := cl.GetAllBackendConfigs()
|
||||
|
||||
summary := fiber.Map{
|
||||
"Title": "LocalAI - Chat with " + c.Params("model"),
|
||||
"ModelsConfig": backendConfigs,
|
||||
"Model": c.Params("model"),
|
||||
"Version": internal.PrintableVersion(),
|
||||
}
|
||||
|
||||
// Render index
|
||||
return c.Render("views/chat", summary)
|
||||
})
|
||||
app.Get("/chat/", auth, func(c *fiber.Ctx) error {
|
||||
|
||||
backendConfigs := cl.GetAllBackendConfigs()
|
||||
|
||||
if len(backendConfigs) == 0 {
|
||||
// If no model is available redirect to the index which suggests how to install models
|
||||
return c.Redirect("/")
|
||||
}
|
||||
|
||||
summary := fiber.Map{
|
||||
"Title": "LocalAI - Chat with " + backendConfigs[0].Name,
|
||||
"ModelsConfig": backendConfigs,
|
||||
"Model": backendConfigs[0].Name,
|
||||
"Version": internal.PrintableVersion(),
|
||||
}
|
||||
|
||||
// Render index
|
||||
return c.Render("views/chat", summary)
|
||||
})
|
||||
|
||||
app.Get("/text2image/:model", auth, func(c *fiber.Ctx) error {
|
||||
backendConfigs := cl.GetAllBackendConfigs()
|
||||
|
||||
summary := fiber.Map{
|
||||
"Title": "LocalAI - Generate images with " + c.Params("model"),
|
||||
"ModelsConfig": backendConfigs,
|
||||
"Model": c.Params("model"),
|
||||
"Version": internal.PrintableVersion(),
|
||||
}
|
||||
|
||||
// Render index
|
||||
return c.Render("views/text2image", summary)
|
||||
})
|
||||
|
||||
app.Get("/text2image/", auth, func(c *fiber.Ctx) error {
|
||||
|
||||
backendConfigs := cl.GetAllBackendConfigs()
|
||||
|
||||
if len(backendConfigs) == 0 {
|
||||
// If no model is available redirect to the index which suggests how to install models
|
||||
return c.Redirect("/")
|
||||
}
|
||||
|
||||
summary := fiber.Map{
|
||||
"Title": "LocalAI - Generate images with " + backendConfigs[0].Name,
|
||||
"ModelsConfig": backendConfigs,
|
||||
"Model": backendConfigs[0].Name,
|
||||
"Version": internal.PrintableVersion(),
|
||||
}
|
||||
|
||||
// Render index
|
||||
return c.Render("views/text2image", summary)
|
||||
})
|
||||
|
||||
app.Get("/tts/:model", auth, func(c *fiber.Ctx) error {
|
||||
backendConfigs := cl.GetAllBackendConfigs()
|
||||
|
||||
summary := fiber.Map{
|
||||
"Title": "LocalAI - Generate images with " + c.Params("model"),
|
||||
"ModelsConfig": backendConfigs,
|
||||
"Model": c.Params("model"),
|
||||
"Version": internal.PrintableVersion(),
|
||||
}
|
||||
|
||||
// Render index
|
||||
return c.Render("views/tts", summary)
|
||||
})
|
||||
|
||||
app.Get("/tts/", auth, func(c *fiber.Ctx) error {
|
||||
|
||||
backendConfigs := cl.GetAllBackendConfigs()
|
||||
|
||||
if len(backendConfigs) == 0 {
|
||||
// If no model is available redirect to the index which suggests how to install models
|
||||
return c.Redirect("/")
|
||||
}
|
||||
|
||||
summary := fiber.Map{
|
||||
"Title": "LocalAI - Generate audio with " + backendConfigs[0].Name,
|
||||
"ModelsConfig": backendConfigs,
|
||||
"Model": backendConfigs[0].Name,
|
||||
"Version": internal.PrintableVersion(),
|
||||
}
|
||||
|
||||
// Render index
|
||||
return c.Render("views/tts", summary)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
package routes
|
||||
|
||||
import (
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/core/http/endpoints/localai"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
func RegisterPagesRoutes(app *fiber.App,
|
||||
cl *config.BackendConfigLoader,
|
||||
ml *model.ModelLoader,
|
||||
appConfig *config.ApplicationConfig,
|
||||
auth func(*fiber.Ctx) error) {
|
||||
|
||||
if !appConfig.DisableWelcomePage {
|
||||
app.Get("/", auth, localai.WelcomeEndpoint(appConfig, cl, ml))
|
||||
}
|
||||
}
|
||||
238
core/http/static/chat.js
Normal file
238
core/http/static/chat.js
Normal file
@@ -0,0 +1,238 @@
|
||||
/*
|
||||
|
||||
https://github.com/david-haerer/chatapi
|
||||
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2023 David Härer
|
||||
Copyright (c) 2024 Ettore Di Giacinto
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
*/
|
||||
|
||||
function submitKey(event) {
|
||||
event.preventDefault();
|
||||
localStorage.setItem("key", document.getElementById("apiKey").value);
|
||||
document.getElementById("apiKey").blur();
|
||||
}
|
||||
|
||||
function submitSystemPrompt(event) {
|
||||
event.preventDefault();
|
||||
localStorage.setItem("system_prompt", document.getElementById("systemPrompt").value);
|
||||
document.getElementById("systemPrompt").blur();
|
||||
}
|
||||
|
||||
var image = "";
|
||||
|
||||
function submitPrompt(event) {
|
||||
event.preventDefault();
|
||||
|
||||
const input = document.getElementById("input").value;
|
||||
Alpine.store("chat").add("user", input, image);
|
||||
document.getElementById("input").value = "";
|
||||
const key = localStorage.getItem("key");
|
||||
const systemPrompt = localStorage.getItem("system_prompt");
|
||||
|
||||
promptGPT(systemPrompt, key, input);
|
||||
}
|
||||
|
||||
function readInputImage() {
|
||||
|
||||
if (!this.files || !this.files[0]) return;
|
||||
|
||||
const FR = new FileReader();
|
||||
|
||||
FR.addEventListener("load", function(evt) {
|
||||
image = evt.target.result;
|
||||
});
|
||||
|
||||
FR.readAsDataURL(this.files[0]);
|
||||
}
|
||||
|
||||
|
||||
async function promptGPT(systemPrompt, key, input) {
|
||||
const model = document.getElementById("chat-model").value;
|
||||
// Set class "loader" to the element with "loader" id
|
||||
//document.getElementById("loader").classList.add("loader");
|
||||
// Make the "loader" visible
|
||||
document.getElementById("loader").style.display = "block";
|
||||
document.getElementById("input").disabled = true;
|
||||
document.getElementById('messages').scrollIntoView(false)
|
||||
|
||||
messages = Alpine.store("chat").messages();
|
||||
|
||||
// if systemPrompt isn't empty, push it at the start of messages
|
||||
if (systemPrompt) {
|
||||
messages.unshift({
|
||||
role: "system",
|
||||
content: systemPrompt
|
||||
});
|
||||
}
|
||||
|
||||
// loop all messages, and check if there are images. If there are, we need to change the content field
|
||||
messages.forEach((message) => {
|
||||
if (message.image) {
|
||||
// The content field now becomes an array
|
||||
message.content = [
|
||||
{
|
||||
"type": "text",
|
||||
"text": message.content
|
||||
}
|
||||
]
|
||||
message.content.push(
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": message.image,
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
// remove the image field
|
||||
delete message.image;
|
||||
}
|
||||
});
|
||||
|
||||
// reset the form and the image
|
||||
image = "";
|
||||
document.getElementById("input_image").value = null;
|
||||
document.getElementById("fileName").innerHTML = "";
|
||||
|
||||
// if (image) {
|
||||
// // take the last element content's and add the image
|
||||
// last_message = messages[messages.length - 1]
|
||||
// // The content field now becomes an array
|
||||
// last_message.content = [
|
||||
// {
|
||||
// "type": "text",
|
||||
// "text": last_message.content
|
||||
// }
|
||||
// ]
|
||||
// last_message.content.push(
|
||||
// {
|
||||
// "type": "image_url",
|
||||
// "image_url": {
|
||||
// "url": image,
|
||||
// }
|
||||
// }
|
||||
// );
|
||||
// // and we replace it in the messages array
|
||||
// messages[messages.length - 1] = last_message
|
||||
|
||||
// // reset the form and the image
|
||||
// image = "";
|
||||
// document.getElementById("input_image").value = null;
|
||||
// document.getElementById("fileName").innerHTML = "";
|
||||
// }
|
||||
|
||||
// Source: https://stackoverflow.com/a/75751803/11386095
|
||||
const response = await fetch("/v1/chat/completions", {
|
||||
method: "POST",
|
||||
headers: {
|
||||
Authorization: `Bearer ${key}`,
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: model,
|
||||
messages: messages,
|
||||
stream: true,
|
||||
}),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
Alpine.store("chat").add(
|
||||
"assistant",
|
||||
`<span class='error'>Error: POST /v1/chat/completions ${response.status}</span>`,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const reader = response.body
|
||||
?.pipeThrough(new TextDecoderStream())
|
||||
.getReader();
|
||||
|
||||
if (!reader) {
|
||||
Alpine.store("chat").add(
|
||||
"assistant",
|
||||
`<span class='error'>Error: Failed to decode API response</span>`,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
while (true) {
|
||||
const { value, done } = await reader.read();
|
||||
if (done) break;
|
||||
let dataDone = false;
|
||||
const arr = value.split("\n");
|
||||
arr.forEach((data) => {
|
||||
if (data.length === 0) return;
|
||||
if (data.startsWith(":")) return;
|
||||
if (data === "data: [DONE]") {
|
||||
dataDone = true;
|
||||
return;
|
||||
}
|
||||
const token = JSON.parse(data.substring(6)).choices[0].delta.content;
|
||||
if (!token) {
|
||||
return;
|
||||
}
|
||||
hljs.highlightAll();
|
||||
Alpine.store("chat").add("assistant", token);
|
||||
document.getElementById('messages').scrollIntoView(false)
|
||||
});
|
||||
hljs.highlightAll();
|
||||
if (dataDone) break;
|
||||
}
|
||||
// Remove class "loader" from the element with "loader" id
|
||||
//document.getElementById("loader").classList.remove("loader");
|
||||
document.getElementById("loader").style.display = "none";
|
||||
// enable input
|
||||
document.getElementById("input").disabled = false;
|
||||
// scroll to the bottom of the chat
|
||||
document.getElementById('messages').scrollIntoView(false)
|
||||
// set focus to the input
|
||||
document.getElementById("input").focus();
|
||||
}
|
||||
|
||||
document.getElementById("key").addEventListener("submit", submitKey);
|
||||
document.getElementById("system_prompt").addEventListener("submit", submitSystemPrompt);
|
||||
|
||||
document.getElementById("prompt").addEventListener("submit", submitPrompt);
|
||||
document.getElementById("input").focus();
|
||||
document.getElementById("input_image").addEventListener("change", readInputImage);
|
||||
|
||||
storeKey = localStorage.getItem("key");
|
||||
if (storeKey) {
|
||||
document.getElementById("apiKey").value = storeKey;
|
||||
} else {
|
||||
document.getElementById("apiKey").value = null;
|
||||
}
|
||||
|
||||
storesystemPrompt = localStorage.getItem("system_prompt");
|
||||
if (storesystemPrompt) {
|
||||
document.getElementById("systemPrompt").value = storesystemPrompt;
|
||||
} else {
|
||||
document.getElementById("systemPrompt").value = null;
|
||||
}
|
||||
|
||||
marked.setOptions({
|
||||
highlight: function (code) {
|
||||
return hljs.highlightAuto(code).value;
|
||||
},
|
||||
});
|
||||
BIN
core/http/static/favicon.ico
Normal file
BIN
core/http/static/favicon.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 15 KiB |
93
core/http/static/general.css
Normal file
93
core/http/static/general.css
Normal file
@@ -0,0 +1,93 @@
|
||||
body {
|
||||
font-family: 'Inter', sans-serif;
|
||||
}
|
||||
.chat-container { height: 90vh; display: flex; flex-direction: column; }
|
||||
.chat-messages { overflow-y: auto; flex-grow: 1; }
|
||||
.htmx-indicator{
|
||||
opacity:0;
|
||||
transition: opacity 10ms ease-in;
|
||||
}
|
||||
.htmx-request .htmx-indicator{
|
||||
opacity:1
|
||||
}
|
||||
/* Loader (https://cssloaders.github.io/) */
|
||||
.loader {
|
||||
width: 12px;
|
||||
height: 12px;
|
||||
border-radius: 50%;
|
||||
display: block;
|
||||
margin:15px auto;
|
||||
position: relative;
|
||||
color: #FFF;
|
||||
box-sizing: border-box;
|
||||
animation: animloader 2s linear infinite;
|
||||
}
|
||||
|
||||
@keyframes animloader {
|
||||
0% { box-shadow: 14px 0 0 -2px, 38px 0 0 -2px, -14px 0 0 -2px, -38px 0 0 -2px; }
|
||||
25% { box-shadow: 14px 0 0 -2px, 38px 0 0 -2px, -14px 0 0 -2px, -38px 0 0 2px; }
|
||||
50% { box-shadow: 14px 0 0 -2px, 38px 0 0 -2px, -14px 0 0 2px, -38px 0 0 -2px; }
|
||||
75% { box-shadow: 14px 0 0 2px, 38px 0 0 -2px, -14px 0 0 -2px, -38px 0 0 -2px; }
|
||||
100% { box-shadow: 14px 0 0 -2px, 38px 0 0 2px, -14px 0 0 -2px, -38px 0 0 -2px; }
|
||||
}
|
||||
.progress {
|
||||
height: 20px;
|
||||
margin-bottom: 20px;
|
||||
overflow: hidden;
|
||||
background-color: #f5f5f5;
|
||||
border-radius: 4px;
|
||||
box-shadow: inset 0 1px 2px rgba(0,0,0,.1);
|
||||
}
|
||||
.progress-bar {
|
||||
float: left;
|
||||
width: 0%;
|
||||
height: 100%;
|
||||
font-size: 12px;
|
||||
line-height: 20px;
|
||||
color: #fff;
|
||||
text-align: center;
|
||||
background-color: #337ab7;
|
||||
-webkit-box-shadow: inset 0 -1px 0 rgba(0,0,0,.15);
|
||||
box-shadow: inset 0 -1px 0 rgba(0,0,0,.15);
|
||||
-webkit-transition: width .6s ease;
|
||||
-o-transition: width .6s ease;
|
||||
transition: width .6s ease;
|
||||
}
|
||||
|
||||
.user {
|
||||
background-color: #007bff;
|
||||
}
|
||||
|
||||
.assistant {
|
||||
background-color: #28a745;
|
||||
}
|
||||
|
||||
.message {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.user, .assistant {
|
||||
flex-grow: 1;
|
||||
margin: 0.5rem;
|
||||
}
|
||||
|
||||
ul {
|
||||
list-style-type: disc; /* Adds bullet points */
|
||||
padding-left: 1.25rem; /* Indents the list from the left margin */
|
||||
margin-top: 1rem; /* Space above the list */
|
||||
}
|
||||
|
||||
li {
|
||||
font-size: 0.875rem; /* Small text size */
|
||||
color: #4a5568; /* Dark gray text */
|
||||
background-color: #f7fafc; /* Very light gray background */
|
||||
border-radius: 0.375rem; /* Rounded corners */
|
||||
padding: 0.5rem; /* Padding inside each list item */
|
||||
box-shadow: 0 1px 3px 0 rgba(0, 0, 0, 0.1), 0 1px 2px 0 rgba(0, 0, 0, 0.06); /* Subtle shadow */
|
||||
margin-bottom: 0.5rem; /* Vertical space between list items */
|
||||
}
|
||||
|
||||
li:last-child {
|
||||
margin-bottom: 0; /* Removes bottom margin from the last item */
|
||||
}
|
||||
96
core/http/static/image.js
Normal file
96
core/http/static/image.js
Normal file
@@ -0,0 +1,96 @@
|
||||
/*
|
||||
|
||||
https://github.com/david-haerer/chatapi
|
||||
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2023 David Härer
|
||||
Copyright (c) 2024 Ettore Di Giacinto
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
*/
|
||||
function submitKey(event) {
|
||||
event.preventDefault();
|
||||
localStorage.setItem("key", document.getElementById("apiKey").value);
|
||||
document.getElementById("apiKey").blur();
|
||||
}
|
||||
|
||||
|
||||
function genImage(event) {
|
||||
event.preventDefault();
|
||||
const input = document.getElementById("input").value;
|
||||
const key = localStorage.getItem("key");
|
||||
|
||||
promptDallE(key, input);
|
||||
|
||||
}
|
||||
|
||||
async function promptDallE(key, input) {
|
||||
document.getElementById("loader").style.display = "block";
|
||||
document.getElementById("input").value = "";
|
||||
document.getElementById("input").disabled = true;
|
||||
|
||||
const model = document.getElementById("image-model").value;
|
||||
const response = await fetch("/v1/images/generations", {
|
||||
method: "POST",
|
||||
headers: {
|
||||
Authorization: `Bearer ${key}`,
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: model,
|
||||
steps: 10,
|
||||
prompt: input,
|
||||
n: 1,
|
||||
size: "512x512",
|
||||
}),
|
||||
});
|
||||
const json = await response.json();
|
||||
if (json.error) {
|
||||
// Display error if there is one
|
||||
var div = document.getElementById('result'); // Get the div by its ID
|
||||
div.innerHTML = '<p style="color:red;">' + json.error.message + '</p>';
|
||||
return;
|
||||
}
|
||||
const url = json.data[0].url;
|
||||
|
||||
var div = document.getElementById('result'); // Get the div by its ID
|
||||
var img = document.createElement('img'); // Create a new img element
|
||||
img.src = url; // Set the source of the image
|
||||
img.alt = 'Generated image'; // Set the alt text of the image
|
||||
|
||||
div.innerHTML = ''; // Clear the existing content of the div
|
||||
div.appendChild(img); // Add the new img element to the div
|
||||
|
||||
document.getElementById("loader").style.display = "none";
|
||||
document.getElementById("input").disabled = false;
|
||||
document.getElementById("input").focus();
|
||||
}
|
||||
|
||||
document.getElementById("key").addEventListener("submit", submitKey);
|
||||
document.getElementById("input").focus();
|
||||
document.getElementById("genimage").addEventListener("submit", genImage);
|
||||
document.getElementById("loader").style.display = "none";
|
||||
|
||||
const storeKey = localStorage.getItem("key");
|
||||
if (storeKey) {
|
||||
document.getElementById("apiKey").value = storeKey;
|
||||
}
|
||||
|
||||
64
core/http/static/tts.js
Normal file
64
core/http/static/tts.js
Normal file
@@ -0,0 +1,64 @@
|
||||
function submitKey(event) {
|
||||
event.preventDefault();
|
||||
localStorage.setItem("key", document.getElementById("apiKey").value);
|
||||
document.getElementById("apiKey").blur();
|
||||
}
|
||||
|
||||
|
||||
function genAudio(event) {
|
||||
event.preventDefault();
|
||||
const input = document.getElementById("input").value;
|
||||
const key = localStorage.getItem("key");
|
||||
|
||||
tts(key, input);
|
||||
}
|
||||
|
||||
async function tts(key, input) {
|
||||
document.getElementById("loader").style.display = "block";
|
||||
document.getElementById("input").value = "";
|
||||
document.getElementById("input").disabled = true;
|
||||
|
||||
const model = document.getElementById("tts-model").value;
|
||||
const response = await fetch("/tts", {
|
||||
method: "POST",
|
||||
headers: {
|
||||
Authorization: `Bearer ${key}`,
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: model,
|
||||
input: input,
|
||||
}),
|
||||
});
|
||||
if (!response.ok) {
|
||||
const jsonData = await response.json(); // Now safely parse JSON
|
||||
var div = document.getElementById('result');
|
||||
div.innerHTML = '<p style="color:red;">Error: ' +jsonData.error.message + '</p>';
|
||||
return;
|
||||
}
|
||||
|
||||
var div = document.getElementById('result'); // Get the div by its ID
|
||||
var link=document.createElement('a');
|
||||
link.className = "m-2 float-right inline-block rounded bg-primary px-6 pb-2.5 mb-3 pt-2.5 text-xs font-medium uppercase leading-normal text-white shadow-primary-3 transition duration-150 ease-in-out hover:bg-primary-accent-300 hover:shadow-primary-2 focus:bg-primary-accent-300 focus:shadow-primary-2 focus:outline-none focus:ring-0 active:bg-primary-600 active:shadow-primary-2 dark:shadow-black/30 dark:hover:shadow-dark-strong dark:focus:shadow-dark-strong dark:active:shadow-dark-strong";
|
||||
link.innerHTML = "<i class='fa-solid fa-download'></i> Download result";
|
||||
const blob = await response.blob();
|
||||
link.href=window.URL.createObjectURL(blob);
|
||||
|
||||
div.innerHTML = ''; // Clear the existing content of the div
|
||||
div.appendChild(link); // Add the new img element to the div
|
||||
console.log(link)
|
||||
document.getElementById("loader").style.display = "none";
|
||||
document.getElementById("input").disabled = false;
|
||||
document.getElementById("input").focus();
|
||||
}
|
||||
|
||||
document.getElementById("key").addEventListener("submit", submitKey);
|
||||
document.getElementById("input").focus();
|
||||
document.getElementById("tts").addEventListener("submit", genAudio);
|
||||
document.getElementById("loader").style.display = "none";
|
||||
|
||||
const storeKey = localStorage.getItem("key");
|
||||
if (storeKey) {
|
||||
document.getElementById("apiKey").value = storeKey;
|
||||
}
|
||||
|
||||
229
core/http/views/chat.html
Normal file
229
core/http/views/chat.html
Normal file
@@ -0,0 +1,229 @@
|
||||
<!--
|
||||
|
||||
Part of this page is based on the OpenAI Chatbot example by David Härer:
|
||||
https://github.com/david-haerer/chatapi
|
||||
|
||||
MIT License Copyright (c) 2023 David Härer
|
||||
Copyright (c) 2024 Ettore Di Giacinto
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
-->
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
{{template "views/partials/head" .}}
|
||||
<script defer src="/static/chat.js"></script>
|
||||
<style>
|
||||
body {
|
||||
overflow: hidden;
|
||||
}
|
||||
</style>
|
||||
<body class="bg-gray-900 text-gray-200" x-data="{ key: $store.chat.key }">
|
||||
<div class="flex flex-col min-h-screen">
|
||||
|
||||
{{template "views/partials/navbar"}}
|
||||
<div class="chat-container mt-2 mr-2 ml-2 mb-2 bg-gray-800 shadow-lg rounded-lg" >
|
||||
<!-- Chat Header -->
|
||||
<div class="border-b border-gray-700 p-4" x-data="{ component: 'menu' }">
|
||||
|
||||
<div class="flex items-center justify-between">
|
||||
|
||||
<h1 class="text-lg font-semibold"> <i class="fa-solid fa-comments"></i> Chat with {{.Model}} <a href="https://localai.io/features/text-generation/" target="_blank" >
|
||||
<i class="fas fa-circle-info pr-2"></i>
|
||||
</a></h1>
|
||||
<div x-show="component === 'menu'" id="menu">
|
||||
<button
|
||||
@click="$store.chat.clear()"
|
||||
id="clear"
|
||||
title="Clear chat history"
|
||||
|
||||
data-twe-ripple-init
|
||||
data-twe-ripple-color="light"
|
||||
class="m-2 float-right inline-block rounded bg-primary px-6 pb-2.5 mb-3 pt-2.5 text-xs font-medium uppercase leading-normal text-white shadow-primary-3 transition duration-150 ease-in-out hover:bg-primary-accent-300 hover:shadow-primary-2 focus:bg-primary-accent-300 focus:shadow-primary-2 focus:outline-none focus:ring-0 active:bg-primary-600 active:shadow-primary-2 dark:shadow-black/30 dark:hover:shadow-dark-strong dark:focus:shadow-dark-strong dark:active:shadow-dark-strong"
|
||||
>
|
||||
Clear chat 🔥
|
||||
</button>
|
||||
<button @click="component = 'key'" title="Update API key"
|
||||
class="m-2 float-right inline-block rounded bg-primary px-6 pb-2.5 mb-3 pt-2.5 text-xs font-medium uppercase leading-normal text-white shadow-primary-3 transition duration-150 ease-in-out hover:bg-primary-accent-300 hover:shadow-primary-2 focus:bg-primary-accent-300 focus:shadow-primary-2 focus:outline-none focus:ring-0 active:bg-primary-600 active:shadow-primary-2 dark:shadow-black/30 dark:hover:shadow-dark-strong dark:focus:shadow-dark-strong dark:active:shadow-dark-strong"
|
||||
>Set API Key🔑</button>
|
||||
<button @click="component = 'system_prompt'" title="System Prompt"
|
||||
class="m-2 float-right inline-block rounded bg-primary px-6 pb-2.5 mb-3 pt-2.5 text-xs font-medium uppercase leading-normal text-white shadow-primary-3 transition duration-150 ease-in-out hover:bg-primary-accent-300 hover:shadow-primary-2 focus:bg-primary-accent-300 focus:shadow-primary-2 focus:outline-none focus:ring-0 active:bg-primary-600 active:shadow-primary-2 dark:shadow-black/30 dark:hover:shadow-dark-strong dark:focus:shadow-dark-strong dark:active:shadow-dark-strong"
|
||||
>Set system prompt</button>
|
||||
</div>
|
||||
<form x-show="component === 'key'" id="key">
|
||||
<input
|
||||
type="password"
|
||||
id="apiKey"
|
||||
name="apiKey"
|
||||
class="bg-gray-800 text-white border border-gray-600 focus:border-blue-500 focus:ring focus:ring-blue-500 focus:ring-opacity-50 rounded-md shadow-sm p-2 appearance-none"
|
||||
placeholder="OpenAI API Key"
|
||||
x-model.lazy="key"
|
||||
/>
|
||||
<button @click="component = 'menu'" type="submit" title="Save API key">
|
||||
<i class="fa-solid fa-arrow-right"></i>
|
||||
</button>
|
||||
</form>
|
||||
<form x-show="component === 'system_prompt'" id="system_prompt">
|
||||
<textarea
|
||||
type="text"
|
||||
id="systemPrompt"
|
||||
name="systemPrompt"
|
||||
class="bg-gray-800 text-white border border-gray-600 focus:border-blue-500 focus:ring focus:ring-blue-500 focus:ring-opacity-50 rounded-md shadow-sm p-2 appearance-none"
|
||||
placeholder="System prompt"
|
||||
x-model.lazy="system_prompt"
|
||||
></textarea>
|
||||
<button @click="component = 'menu'" type="submit" title="Save Prompt">
|
||||
<i class="fa-solid fa-arrow-right"></i>
|
||||
</button>
|
||||
</form>
|
||||
|
||||
<select x-data="{ link : '' }" x-model="link" x-init="$watch('link', value => window.location = link)"
|
||||
class="bg-gray-800 text-white border border-gray-600 focus:border-blue-500 focus:ring focus:ring-blue-500 focus:ring-opacity-50 rounded-md shadow-sm p-2 appearance-none"
|
||||
>
|
||||
<!-- Options -->
|
||||
<option value="" disabled class="text-gray-400" >Select a model</option>
|
||||
{{ $model:=.Model}}
|
||||
{{ range .ModelsConfig }}
|
||||
{{ if eq .Name $model }}
|
||||
<option value="/chat/{{.Name}}" selected class="bg-gray-700 text-white">{{.Name}}</option>
|
||||
{{ else }}
|
||||
<option value="/chat/{{.Name}}" class="bg-gray-700 text-white">{{.Name}}</option>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
</select>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="chat-messages p-4" id="chat" x-data="{history: $store.chat.history}">
|
||||
<p id="usage" x-show="history.length === 0">
|
||||
Start chatting with the AI by typing a prompt in the input field below and pressing Enter.
|
||||
For models that support images, you can upload an image by clicking the paperclip <i class="fa-solid fa-paperclip"></i> icon.
|
||||
</p>
|
||||
<div id="messages">
|
||||
<template x-for="message in history">
|
||||
<div class="message flex items-start space-x-2 my-2" >
|
||||
<!--<img :src="message.role === 'user' ? '/path/to/user-icon.png' : '/path/to/bot-icon.png'" alt="" class="h-6 w-6">-->
|
||||
<i class="fa-solid h-8 w-8" :class="message.role === 'user' ? 'fa-user' : 'fa-robot'" ></i>
|
||||
<div class="flex flex-col flex-1">
|
||||
<span class="text-xs font-semibold text-gray-600" x-text="message.role === 'user' ? 'User' : 'Assistant ({{.Model}})'"></span>
|
||||
<template x-if="message.role === 'user'">
|
||||
<div class="p-2 flex-1 rounded" :class="message.role" x-html="message.html"></div>
|
||||
</template>
|
||||
<template x-if="message.role === 'assistant'">
|
||||
<div class="p-2 flex-1 rounded" :class="message.role" x-html="message.html"></div>
|
||||
</template>
|
||||
<template x-if="message.image">
|
||||
<img :src="message.image" alt="Image" class="rounded-lg mt-2 h-36 w-36">
|
||||
</template>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="p-4 border-t border-gray-700" x-data="{ inputValue: '', shiftPressed: false, fileName: '' }">
|
||||
<div id="loader" class="my-2 loader" style="display: none;"></div>
|
||||
<input id="chat-model" type="hidden" value="{{.Model}}">
|
||||
<input id="input_image" type="file" style="display: none;" @change="fileName = $event.target.files[0].name">
|
||||
<form id="prompt" action="/chat/{{.Model}}" method="get" @submit.prevent="submitPrompt">
|
||||
<div class="relative w-full">
|
||||
<textarea
|
||||
id="input"
|
||||
name="input"
|
||||
x-model="inputValue"
|
||||
placeholder="Send a message..."
|
||||
class="p-2 pl-2 border rounded w-full bg-gray-600 text-white placeholder-gray-300"
|
||||
required
|
||||
@keydown.shift="shiftPressed = true"
|
||||
@keyup.shift="shiftPressed = false"
|
||||
@keydown.enter="if (!shiftPressed) { submitPrompt($event); }"
|
||||
style="padding-right: 4rem;"
|
||||
></textarea>
|
||||
<span x-text="fileName" id="fileName" class="absolute right-16 top-5 text-gray-300 text-sm mr-2"></span>
|
||||
<button type="button" onclick="document.getElementById('input_image').click()" class="fa-solid fa-paperclip text-gray-300 ml-2 absolute right-10 top-3 text-lg p-2">
|
||||
</button>
|
||||
<button type=submit><i class="fa-solid fa-circle-up text-gray-300 absolute right-2 top-3 text-lg p-2"></i></button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
<script>
|
||||
document.addEventListener("alpine:init", () => {
|
||||
Alpine.store("chat", {
|
||||
history: [],
|
||||
languages: [undefined],
|
||||
clear() {
|
||||
this.history.length = 0;
|
||||
},
|
||||
add(role, content, image) {
|
||||
const N = this.history.length - 1;
|
||||
if (this.history.length && this.history[N].role === role) {
|
||||
this.history[N].content += content;
|
||||
str = this.history[N].content;
|
||||
this.history[N].html = DOMPurify.sanitize(
|
||||
marked.parse(this.history[N].content),
|
||||
);
|
||||
} else {
|
||||
c = ""
|
||||
// split content newlines in content
|
||||
const lines = content.split("\n");
|
||||
// for each line, do DOMPurify.sanitize(marked.parse(line)) and add it to c
|
||||
lines.forEach((line) => {
|
||||
c += DOMPurify.sanitize(marked.parse(line));
|
||||
});
|
||||
|
||||
this.history.push({
|
||||
role: role,
|
||||
content: content,
|
||||
html: c,
|
||||
image: image,
|
||||
});
|
||||
}
|
||||
|
||||
const parser = new DOMParser();
|
||||
const html = parser.parseFromString(
|
||||
this.history[this.history.length - 1].html,
|
||||
"text/html",
|
||||
);
|
||||
const code = html.querySelectorAll("pre code");
|
||||
if (!code.length) return;
|
||||
code.forEach((el) => {
|
||||
const language = el.className.split("language-")[1];
|
||||
if (this.languages.includes(language)) return;
|
||||
const script = document.createElement("script");
|
||||
script.src = `https://cdn.jsdelivr.net/gh/highlightjs/cdn-release@11.8.0/build/languages/${language}.min.js`;
|
||||
document.head.appendChild(script);
|
||||
this.languages.push(language);
|
||||
});
|
||||
},
|
||||
messages() {
|
||||
return this.history.map((message) => {
|
||||
return {
|
||||
role: message.role,
|
||||
content: message.content,
|
||||
image: message.image,
|
||||
};
|
||||
});
|
||||
},
|
||||
});
|
||||
});
|
||||
</script>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
@@ -10,23 +10,54 @@
|
||||
<div class="container mx-auto px-4 flex-grow">
|
||||
<div class="header text-center py-12">
|
||||
<h1 class="text-5xl font-bold text-gray-100">Welcome to <i>your</i> LocalAI instance!</h1>
|
||||
<div class="mt-6">
|
||||
<!-- Logo can be uncommented and updated with a valid URL -->
|
||||
</div>
|
||||
<p class="mt-4 text-lg">The FOSS alternative to OpenAI, Claude, ...</p>
|
||||
<a href="https://localai.io" target="_blank" class="mt-4 inline-block bg-blue-500 text-white py-2 px-4 rounded-lg shadow transition duration-300 ease-in-out hover:bg-blue-700 hover:shadow-lg">
|
||||
<i class="fas fa-book-reader pr-2"></i>Documentation
|
||||
</a>
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<div class="models mt-12">
|
||||
<div class="models mt-4">
|
||||
|
||||
{{template "views/partials/inprogress" .}}
|
||||
|
||||
{{ if eq (len .ModelsConfig) 0 }}
|
||||
<h2 class="text-center text-3xl font-semibold text-gray-100"> <i class="text-yellow-200 ml-2 fa-solid fa-triangle-exclamation animate-pulse"></i> Ouch! seems you don't have any models installed!</h2>
|
||||
<p class="text-center mt-4 text-xl">..install something from the <a class="text-gray-400 hover:text-white ml-1 px-3 py-2 rounded" href="/browse">🖼️ Gallery</a> or check the <a href="https://localai.io/basics/getting_started/" class="text-gray-400 hover:text-white ml-1 px-3 py-2 rounded"> <i class="fa-solid fa-book"></i> Getting started documentation </a></p>
|
||||
{{ else }}
|
||||
<h2 class="text-center text-3xl font-semibold text-gray-100">Installed models</h2>
|
||||
<p class="text-center mt-4 text-xl">We have {{len .ModelsConfig}} pre-loaded models available.</p>
|
||||
<ul class="mt-8 space-y-4">
|
||||
<table class="table-auto mt-4 w-full text-left text-gray-200">
|
||||
<thead class="text-xs text-gray-400 uppercase bg-gray-700">
|
||||
<tr>
|
||||
<th class="px-4 py-2"></th>
|
||||
<th class="px-4 py-2">Model Name</th>
|
||||
<th class="px-4 py-2">Backend</th>
|
||||
<th class="px-4 py-2 float-right">Actions</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{$galleryConfig:=.GalleryConfig}}
|
||||
{{$noicon:="https://upload.wikimedia.org/wikipedia/commons/6/65/No-Image-Placeholder.svg"}}
|
||||
{{ range .ModelsConfig }}
|
||||
<li class="bg-gray-800 border border-gray-700 p-4 rounded-lg">
|
||||
<div class="flex justify-between items-center">
|
||||
<p class="font-bold text-white flex items-center"><i class="fas fa-brain pr-2"></i>{{.Name}}</p>
|
||||
{{ $cfg:= index $galleryConfig .Name}}
|
||||
<tr class="bg-gray-800 border-b border-gray-700">
|
||||
<td class="px-4 py-3">
|
||||
{{ with $cfg }}
|
||||
<img {{ if $cfg.Icon }}
|
||||
src="{{$cfg.Icon}}"
|
||||
{{ else }}
|
||||
src="{{$noicon}}"
|
||||
{{ end }}
|
||||
class="rounded-t-lg max-h-24 max-w-24 object-cover mt-3"
|
||||
>
|
||||
{{ else}}
|
||||
<img src="{{$noicon}}" class="rounded-t-lg max-h-24 max-w-24 object-cover mt-3">
|
||||
{{ end }}
|
||||
</td>
|
||||
<td class="px-4 py-3 font-bold">
|
||||
<p class="font-bold text-white flex items-center"><i class="fas fa-brain pr-2"></i><a href="/browse?term={{.Name}}">{{.Name}}</a></p>
|
||||
</td>
|
||||
<td class="px-4 py-3 font-bold">
|
||||
{{ if .Backend }}
|
||||
<!-- Badge for Backend -->
|
||||
<span class="inline-block bg-blue-500 text-white py-1 px-3 rounded-full text-xs">
|
||||
@@ -37,11 +68,20 @@
|
||||
auto
|
||||
</span>
|
||||
{{ end }}
|
||||
</div>
|
||||
<!-- Additional details can go here -->
|
||||
</li>
|
||||
</td>
|
||||
|
||||
<td class="px-4 py-3">
|
||||
<button
|
||||
class="float-right inline-block rounded bg-red-800 px-6 pb-2.5 mb-3 pt-2.5 text-xs font-medium uppercase leading-normal text-white shadow-primary-3 transition duration-150 ease-in-out hover:bg-red-accent-300 hover:shadow-red-2 focus:bg-red-accent-300 focus:shadow-primary-2 focus:outline-none focus:ring-0 active:bg-red-600 active:shadow-primary-2 dark:shadow-black/30 dark:hover:shadow-dark-strong dark:focus:shadow-dark-strong dark:active:shadow-dark-strong"
|
||||
data-twe-ripple-color="light" data-twe-ripple-init="" hx-confirm="Are you sure you wish to delete the model?" hx-post="/browse/delete/model/{{.Name}}" hx-swap="outerHTML"><i class="fa-solid fa-cancel pr-2"></i>Delete</button>
|
||||
</td>
|
||||
{{ end }}
|
||||
</ul>
|
||||
</tbody>
|
||||
</table>
|
||||
{{ end }}
|
||||
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
@@ -13,10 +13,61 @@
|
||||
🖼️ Available models from <i>{{ len .Repositories }}</i> repositories <a href="https://localai.io/models/" target="_blank" >
|
||||
<i class="fas fa-circle-info pr-2"></i>
|
||||
</a></h2>
|
||||
|
||||
<div class="text-center font-semibold text-gray-100">
|
||||
<h2>Filter by type:</h2>
|
||||
<button hx-post="/browse/search/models"
|
||||
class="text-white-500 inline-block bg-blue-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2 hover:bg-gray-300 hover:shadow-gray-2"
|
||||
hx-target="#search-results"
|
||||
hx-vals='{"search": "tts"}'
|
||||
hx-indicator=".htmx-indicator" >TTS</button>
|
||||
<button hx-post="/browse/search/models"
|
||||
class="text-white-500 inline-block bg-blue-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2 hover:bg-gray-300 hover:shadow-gray-2"
|
||||
hx-target="#search-results"
|
||||
hx-vals='{"search": "stablediffusion"}'
|
||||
hx-indicator=".htmx-indicator" >Image generation</button>
|
||||
<button hx-post="/browse/search/models" \
|
||||
class="text-white-500 inline-block bg-blue-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2 hover:bg-gray-300 hover:shadow-gray-2"
|
||||
hx-target="#search-results"
|
||||
hx-vals='{"search": "llm"}'
|
||||
hx-indicator=".htmx-indicator" >Text generation</button>
|
||||
<button hx-post="/browse/search/models"
|
||||
class="text-white-500 inline-block bg-blue-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2 hover:bg-gray-300 hover:shadow-gray-2"
|
||||
hx-target="#search-results"
|
||||
hx-vals='{"search": "multimodal"}'
|
||||
hx-indicator=".htmx-indicator" >Multimodal</button>
|
||||
<button hx-post="/browse/search/models"
|
||||
class="text-white-500 inline-block bg-blue-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2 hover:bg-gray-300 hover:shadow-gray-2"
|
||||
hx-target="#search-results"
|
||||
hx-vals='{"search": "embedding"}'
|
||||
hx-indicator=".htmx-indicator" >Embeddings</button>
|
||||
<button hx-post="/browse/search/models"
|
||||
class="text-white-500 inline-block bg-blue-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2 hover:bg-gray-300 hover:shadow-gray-2"
|
||||
hx-target="#search-results"
|
||||
hx-vals='{"search": "rerank"}'
|
||||
hx-indicator=".htmx-indicator" >Rerankers</button>
|
||||
<button
|
||||
hx-post="/browse/search/models"
|
||||
class="text-white-500 inline-block bg-blue-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2 hover:bg-gray-300 hover:shadow-gray-2"
|
||||
hx-target="#search-results"
|
||||
hx-vals='{"search": "whisper"}'
|
||||
hx-indicator=".htmx-indicator" >Audio transcription</button>
|
||||
</div>
|
||||
|
||||
<div class="text-center text-xs font-semibold text-gray-100">
|
||||
Filter by tags:
|
||||
{{ range .AllTags }}
|
||||
<button hx-post="/browse/search/models" class="text-blue-500" hx-target="#search-results"
|
||||
hx-vals='{"search": "{{.}}"}'
|
||||
hx-indicator=".htmx-indicator" >{{.}}</button>
|
||||
{{ end }}
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
<span class="htmx-indicator loader"></span>
|
||||
<input class="form-control appearance-none block w-full px-3 py-2 text-base font-normal text-gray-300 pb-2 mb-5 bg-gray-800 bg-clip-padding border border-solid border-gray-600 rounded transition ease-in-out m-0 focus:text-gray-300 focus:bg-gray-900 focus:border-blue-500 focus:outline-none" type="search"
|
||||
{{template "views/partials/inprogress" .}}
|
||||
|
||||
<input class="form-control appearance-none block w-full mt-5 px-3 py-2 text-base font-normal text-gray-300 pb-2 mb-5 bg-gray-800 bg-clip-padding border border-solid border-gray-600 rounded transition ease-in-out m-0 focus:text-gray-300 focus:bg-gray-900 focus:border-blue-500 focus:outline-none" type="search"
|
||||
name="search" placeholder="Begin Typing To Search models..."
|
||||
hx-post="/browse/search/models"
|
||||
hx-trigger="input changed delay:500ms, search"
|
||||
|
||||
@@ -2,6 +2,28 @@
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>{{.Title}}</title>
|
||||
<link
|
||||
rel="stylesheet"
|
||||
href="https://cdn.jsdelivr.net/gh/highlightjs/cdn-release@11.8.0/build/styles/default.min.css"
|
||||
/>
|
||||
<script
|
||||
defer
|
||||
src="https://cdn.jsdelivr.net/gh/highlightjs/cdn-release@11.8.0/build/highlight.min.js"
|
||||
></script>
|
||||
<script
|
||||
defer
|
||||
src="https://cdn.jsdelivr.net/npm/alpinejs@3.x.x/dist/cdn.min.js"
|
||||
></script>
|
||||
<script
|
||||
defer
|
||||
src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"
|
||||
></script>
|
||||
<script
|
||||
defer
|
||||
src="https://cdn.jsdelivr.net/npm/dompurify@3.0.6/dist/purify.min.js"
|
||||
></script>
|
||||
|
||||
<link href="/static/general.css" rel="stylesheet" />
|
||||
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;600;700&family=Roboto:wght@400;500&display=swap" rel="stylesheet">
|
||||
<link
|
||||
href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700,900&display=swap"
|
||||
@@ -27,52 +49,4 @@
|
||||
</script>
|
||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.1.1/css/all.min.css">
|
||||
<script src="https://unpkg.com/htmx.org@1.9.12" integrity="sha384-ujb1lZYygJmzgSwoxRggbCHcjc0rB2XoQrxeTUQyRjrOnlCoYta87iKBWq3EsdM2" crossorigin="anonymous"></script>
|
||||
<style>
|
||||
body {
|
||||
font-family: 'Inter', sans-serif;
|
||||
}
|
||||
/* Loader (https://cssloaders.github.io/) */
|
||||
.loader {
|
||||
width: 12px;
|
||||
height: 12px;
|
||||
border-radius: 50%;
|
||||
display: block;
|
||||
margin:15px auto;
|
||||
position: relative;
|
||||
color: #FFF;
|
||||
box-sizing: border-box;
|
||||
animation: animloader 2s linear infinite;
|
||||
}
|
||||
|
||||
@keyframes animloader {
|
||||
0% { box-shadow: 14px 0 0 -2px, 38px 0 0 -2px, -14px 0 0 -2px, -38px 0 0 -2px; }
|
||||
25% { box-shadow: 14px 0 0 -2px, 38px 0 0 -2px, -14px 0 0 -2px, -38px 0 0 2px; }
|
||||
50% { box-shadow: 14px 0 0 -2px, 38px 0 0 -2px, -14px 0 0 2px, -38px 0 0 -2px; }
|
||||
75% { box-shadow: 14px 0 0 2px, 38px 0 0 -2px, -14px 0 0 -2px, -38px 0 0 -2px; }
|
||||
100% { box-shadow: 14px 0 0 -2px, 38px 0 0 2px, -14px 0 0 -2px, -38px 0 0 -2px; }
|
||||
}
|
||||
.progress {
|
||||
height: 20px;
|
||||
margin-bottom: 20px;
|
||||
overflow: hidden;
|
||||
background-color: #f5f5f5;
|
||||
border-radius: 4px;
|
||||
box-shadow: inset 0 1px 2px rgba(0,0,0,.1);
|
||||
}
|
||||
.progress-bar {
|
||||
float: left;
|
||||
width: 0%;
|
||||
height: 100%;
|
||||
font-size: 12px;
|
||||
line-height: 20px;
|
||||
color: #fff;
|
||||
text-align: center;
|
||||
background-color: #337ab7;
|
||||
-webkit-box-shadow: inset 0 -1px 0 rgba(0,0,0,.15);
|
||||
box-shadow: inset 0 -1px 0 rgba(0,0,0,.15);
|
||||
-webkit-transition: width .6s ease;
|
||||
-o-transition: width .6s ease;
|
||||
transition: width .6s ease;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
32
core/http/views/partials/inprogress.html
Normal file
32
core/http/views/partials/inprogress.html
Normal file
@@ -0,0 +1,32 @@
|
||||
<!-- Show in progress operations-->
|
||||
{{ if .ProcessingModels }}
|
||||
<h2
|
||||
class="mt-4 mb-4 text-center text-3xl font-semibold text-gray-100">Operations in progress</h2>
|
||||
{{end}}
|
||||
{{$taskType:=.TaskTypes}}
|
||||
|
||||
{{ range $key,$value:=.ProcessingModels }}
|
||||
{{ $op := index $taskType $key}}
|
||||
{{$parts := split "@" $key}}
|
||||
{{$modelName:=$parts._1}}
|
||||
{{$repository:=$parts._0}}
|
||||
{{ if not (contains "@" $key)}}
|
||||
{{$repository = ""}}
|
||||
{{$modelName = $key}}
|
||||
{{ end }}
|
||||
|
||||
<div class="flex items-center justify-between bg-slate-600 p-2 mb-2 rounded-md">
|
||||
<div class="flex items center">
|
||||
<span class="text-gray-300"><a href="/browse?term={{$parts._1}}"
|
||||
class="text-white-500 inline-block bg-blue-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2 hover:bg-gray-300 hover:shadow-gray-2"
|
||||
>{{$modelName}}</a> {{if $repository}} (from the '{{$repository}}' repository) {{end}}</span>
|
||||
</div>
|
||||
<div hx-get="/browse/job/{{$value}}" hx-swap="outerHTML" hx-target="this" hx-trigger="done">
|
||||
<h3 role="status" id="pblabel" >{{$op}}
|
||||
<div hx-get="/browse/job/progress/{{$value}}" hx-trigger="every 600ms"
|
||||
hx-target="this"
|
||||
hx-swap="innerHTML" ></div></h3>
|
||||
</div>
|
||||
</div>
|
||||
{{ end }}
|
||||
<!-- END Show in progress operations-->
|
||||
@@ -6,12 +6,42 @@
|
||||
<a href="/" class="text-white text-xl font-bold"><img src="https://github.com/go-skynet/LocalAI/assets/2420543/0966aa2a-166e-4f99-a3e5-6c915fc997dd" alt="LocalAI Logo" class="h-10 mr-3 border-2 border-gray-300 shadow rounded"></a>
|
||||
<a href="/" class="text-white text-xl font-bold">LocalAI</a>
|
||||
</div>
|
||||
<div>
|
||||
<!-- Menu button for small screens -->
|
||||
<div class="lg:hidden">
|
||||
<button id="menu-toggle" class="text-gray-400 hover:text-white focus:outline-none">
|
||||
<i class="fas fa-bars fa-lg"></i>
|
||||
</button>
|
||||
</div>
|
||||
<!-- Navigation links -->
|
||||
<div class="hidden lg:flex lg:items-center lg:justify-end lg:flex-1 lg:w-0">
|
||||
<a href="/" class="text-gray-400 hover:text-white px-3 py-2 rounded"><i class="fas fa-home pr-2"></i>Home</a>
|
||||
<a href="https://localai.io" class="text-gray-400 hover:text-white px-3 py-2 rounded" target="_blank" ><i class="fas fa-book-reader pr-2"></i> Documentation</a>
|
||||
<a href="/browse/" class="text-gray-400 hover:text-white px-3 py-2 rounded"><i class="fas fa-brain pr-2"></i> Models</a>
|
||||
<a href="/chat/" class="text-gray-400 hover:text-white px-3 py-2 rounded"><i class="fa-solid fa-comments pr-2"></i> Chat</a>
|
||||
<a href="/text2image/" class="text-gray-400 hover:text-white px-3 py-2 rounded"><i class="fas fa-image pr-2"></i> Generate images</a>
|
||||
<a href="/tts/" class="text-gray-400 hover:text-white px-3 py-2 rounded"><i class="fa-solid fa-music pr-2"></i> TTS </a>
|
||||
<a href="/swagger/" class="text-gray-400 hover:text-white px-3 py-2 rounded"><i class="fas fa-code pr-2"></i> API</a>
|
||||
</div>
|
||||
</div>
|
||||
<!-- Collapsible menu for small screens -->
|
||||
<div class="hidden lg:hidden" id="mobile-menu">
|
||||
<div class="pt-4 pb-3 border-t border-gray-700">
|
||||
<a href="/" class="block text-gray-400 hover:text-white px-3 py-2 rounded mt-1"><i class="fas fa-home pr-2"></i>Home</a>
|
||||
<a href="https://localai.io" class="block text-gray-400 hover:text-white px-3 py-2 rounded mt-1" target="_blank" ><i class="fas fa-book-reader pr-2"></i> Documentation</a>
|
||||
<a href="/browse/" class="block text-gray-400 hover:text-white px-3 py-2 rounded mt-1"><i class="fas fa-brain pr-2"></i> Models</a>
|
||||
<a href="/chat/" class="block text-gray-400 hover:text-white px-3 py-2 rounded mt-1"><i class="fa-solid fa-comments pr-2"></i> Chat</a>
|
||||
<a href="/text2image/" class="block text-gray-400 hover:text-white px-3 py-2 rounded mt-1"><i class="fas fa-image pr-2"></i> Generate images</a>
|
||||
<a href="/tts/" class="block text-gray-400 hover:text-white px-3 py-2 rounded mt-1"><i class="fa-solid fa-music pr-2"></i> TTS </a>
|
||||
<a href="/swagger/" class="block text-gray-400 hover:text-white px-3 py-2 rounded mt-1"><i class="fas fa-code pr-2"></i> API</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</nav>
|
||||
</nav>
|
||||
|
||||
<script>
|
||||
// JavaScript to toggle the mobile menu
|
||||
document.getElementById('menu-toggle').addEventListener('click', function () {
|
||||
var mobileMenu = document.getElementById('mobile-menu');
|
||||
mobileMenu.classList.toggle('hidden');
|
||||
});
|
||||
</script>
|
||||
|
||||
89
core/http/views/text2image.html
Normal file
89
core/http/views/text2image.html
Normal file
@@ -0,0 +1,89 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
{{template "views/partials/head" .}}
|
||||
<script defer src="/static/image.js"></script>
|
||||
|
||||
<body class="bg-gray-900 text-gray-200">
|
||||
<div class="flex flex-col min-h-screen">
|
||||
|
||||
{{template "views/partials/navbar" .}}
|
||||
<div class="container mx-auto px-4 flex-grow " x-data="{ component: 'menu' }">
|
||||
|
||||
|
||||
<div class="mt-12">
|
||||
<div class="flex items-center justify-center text-center pb-2">
|
||||
<span class="text-3xl font-semibold text-gray-100">
|
||||
🖼️ Text to Image
|
||||
<a href="https://localai.io/features/image-generation" target="_blank" >
|
||||
<i class="fas fa-circle-info pr-2"></i>
|
||||
</a>
|
||||
</span>
|
||||
|
||||
</div>
|
||||
|
||||
<div class="text-center font-semibold text-gray-100">
|
||||
<div class="flex items-center justify-between">
|
||||
|
||||
<div x-show="component === 'menu'" id="menu">
|
||||
<button @click="component = 'key'" title="Update API key"
|
||||
class="m-2 float-right inline-block rounded bg-primary px-6 pb-2.5 mb-3 pt-2.5 text-xs font-medium uppercase leading-normal text-white shadow-primary-3 transition duration-150 ease-in-out hover:bg-primary-accent-300 hover:shadow-primary-2 focus:bg-primary-accent-300 focus:shadow-primary-2 focus:outline-none focus:ring-0 active:bg-primary-600 active:shadow-primary-2 dark:shadow-black/30 dark:hover:shadow-dark-strong dark:focus:shadow-dark-strong dark:active:shadow-dark-strong"
|
||||
>Set API Key🔑</button>
|
||||
</div>
|
||||
<form x-show="component === 'key'" id="key">
|
||||
<input
|
||||
type="password"
|
||||
id="apiKey"
|
||||
name="apiKey"
|
||||
placeholder="OpenAI API Key"
|
||||
x-model.lazy="key"
|
||||
/>
|
||||
<button @click="component = 'menu'" type="submit" title="Save API key">
|
||||
🔒
|
||||
</button>
|
||||
</form>
|
||||
|
||||
<select x-data="{ link : '' }" x-model="link" x-init="$watch('link', value => window.location = link)"
|
||||
class="bg-gray-800 text-white border border-gray-600 focus:border-blue-500 focus:ring focus:ring-blue-500 focus:ring-opacity-50 rounded-md shadow-sm p-2 appearance-none"
|
||||
>
|
||||
<!-- Options -->
|
||||
<option value="" disabled class="text-gray-400" >Select a model</option>
|
||||
{{ $model:=.Model}}
|
||||
{{ range .ModelsConfig }}
|
||||
{{ if eq .Name $model }}
|
||||
<option value="/text2image/{{.Name}}" selected class="bg-gray-700 text-white">{{.Name}}</option>
|
||||
{{ else }}
|
||||
<option value="/text2image/{{.Name}}" class="bg-gray-700 text-white">{{.Name}}</option>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
</select>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="mt-12">
|
||||
<input id="image-model" type="hidden" value="{{.Model}}">
|
||||
<form id="genimage" action="/text2image/{{.Model}}" method="get">
|
||||
<input
|
||||
type="text"
|
||||
id="input"
|
||||
name="input"
|
||||
placeholder="Prompt…"
|
||||
autocomplete="off"
|
||||
class="p-2 border rounded w-full bg-gray-600 text-white placeholder-gray-300"
|
||||
required
|
||||
/>
|
||||
</form>
|
||||
<div class="container max-w-screen-lg mx-auto mt-4 pb-10 flex justify-center">
|
||||
<div id="loader" class="my-2 loader" ></div>
|
||||
</div>
|
||||
<div class="container max-w-screen-lg mx-auto mt-4 pb-10 flex justify-center">
|
||||
<div id="result" class="mx-auto"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{{template "views/partials/footer" .}}
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
86
core/http/views/tts.html
Normal file
86
core/http/views/tts.html
Normal file
@@ -0,0 +1,86 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
{{template "views/partials/head" .}}
|
||||
<script defer src="/static/tts.js"></script>
|
||||
|
||||
<body class="bg-gray-900 text-gray-200">
|
||||
<div class="flex flex-col min-h-screen">
|
||||
|
||||
{{template "views/partials/navbar" .}}
|
||||
<div class="container mx-auto px-4 flex-grow " x-data="{ component: 'menu' }">
|
||||
<div class="mt-12">
|
||||
<div class="flex items-center justify-center text-center pb-2">
|
||||
<span class="text-3xl font-semibold text-gray-100">
|
||||
<i class="fa-solid fa-music"></i> Text to speech/audio
|
||||
<a href="https://localai.io/features/text-to-audio/" target="_blank" >
|
||||
<i class="fas fa-circle-info pr-2"></i>
|
||||
</a>
|
||||
</span>
|
||||
|
||||
</div>
|
||||
<div class="text-center font-semibold text-gray-100">
|
||||
<div class="flex items-center justify-between">
|
||||
|
||||
<div x-show="component === 'menu'" id="menu">
|
||||
<button @click="component = 'key'" title="Update API key"
|
||||
class="m-2 float-right inline-block rounded bg-primary px-6 pb-2.5 mb-3 pt-2.5 text-xs font-medium uppercase leading-normal text-white shadow-primary-3 transition duration-150 ease-in-out hover:bg-primary-accent-300 hover:shadow-primary-2 focus:bg-primary-accent-300 focus:shadow-primary-2 focus:outline-none focus:ring-0 active:bg-primary-600 active:shadow-primary-2 dark:shadow-black/30 dark:hover:shadow-dark-strong dark:focus:shadow-dark-strong dark:active:shadow-dark-strong"
|
||||
>Set API Key🔑</button>
|
||||
</div>
|
||||
<form x-show="component === 'key'" id="key">
|
||||
<input
|
||||
type="password"
|
||||
id="apiKey"
|
||||
name="apiKey"
|
||||
placeholder="OpenAI API Key"
|
||||
x-model.lazy="key"
|
||||
/>
|
||||
<button @click="component = 'menu'" type="submit" title="Save API key">
|
||||
🔒
|
||||
</button>
|
||||
</form>
|
||||
|
||||
<select x-data="{ link : '' }" x-model="link" x-init="$watch('link', value => window.location = link)"
|
||||
class="bg-gray-800 text-white border border-gray-600 focus:border-blue-500 focus:ring focus:ring-blue-500 focus:ring-opacity-50 rounded-md shadow-sm p-2 appearance-none"
|
||||
>
|
||||
<!-- Options -->
|
||||
<option value="" disabled class="text-gray-400" >Select a model</option>
|
||||
{{ $model:=.Model}}
|
||||
{{ range .ModelsConfig }}
|
||||
{{ if eq .Name $model }}
|
||||
<option value="/tts/{{.Name}}" selected class="bg-gray-700 text-white">{{.Name}}</option>
|
||||
{{ else }}
|
||||
<option value="/tts/{{.Name}}" class="bg-gray-700 text-white">{{.Name}}</option>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
</select>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="mt-12">
|
||||
<input id="tts-model" type="hidden" value="{{.Model}}">
|
||||
<form id="tts" action="/tts/{{.Model}}" method="get">
|
||||
<input
|
||||
type="text"
|
||||
id="input"
|
||||
name="input"
|
||||
placeholder="Prompt…"
|
||||
autocomplete="off"
|
||||
class="p-2 border rounded w-full bg-gray-600 text-white placeholder-gray-300"
|
||||
required
|
||||
/>
|
||||
</form>
|
||||
<div class="container max-w-screen-lg mx-auto mt-4 pb-10 flex justify-center">
|
||||
<div id="loader" class="my-2 loader" ></div>
|
||||
</div>
|
||||
<div class="container max-w-screen-lg mx-auto mt-4 pb-10 flex justify-center">
|
||||
<div id="result" class="mx-auto"></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{{template "views/partials/footer" .}}
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
@@ -10,7 +10,7 @@ type Segment struct {
|
||||
Tokens []int `json:"tokens"`
|
||||
}
|
||||
|
||||
type Result struct {
|
||||
type TranscriptionResult struct {
|
||||
Segments []Segment `json:"segments"`
|
||||
Text string `json:"text"`
|
||||
}
|
||||
@@ -15,22 +15,22 @@ import (
|
||||
gopsutil "github.com/shirou/gopsutil/v3/process"
|
||||
)
|
||||
|
||||
type BackendMonitor struct {
|
||||
configLoader *config.BackendConfigLoader
|
||||
modelLoader *model.ModelLoader
|
||||
options *config.ApplicationConfig // Taking options in case we need to inspect ExternalGRPCBackends, though that's out of scope for now, hence the name.
|
||||
type BackendMonitorService struct {
|
||||
backendConfigLoader *config.BackendConfigLoader
|
||||
modelLoader *model.ModelLoader
|
||||
options *config.ApplicationConfig // Taking options in case we need to inspect ExternalGRPCBackends, though that's out of scope for now, hence the name.
|
||||
}
|
||||
|
||||
func NewBackendMonitor(configLoader *config.BackendConfigLoader, modelLoader *model.ModelLoader, appConfig *config.ApplicationConfig) BackendMonitor {
|
||||
return BackendMonitor{
|
||||
configLoader: configLoader,
|
||||
modelLoader: modelLoader,
|
||||
options: appConfig,
|
||||
func NewBackendMonitorService(modelLoader *model.ModelLoader, configLoader *config.BackendConfigLoader, appConfig *config.ApplicationConfig) *BackendMonitorService {
|
||||
return &BackendMonitorService{
|
||||
modelLoader: modelLoader,
|
||||
backendConfigLoader: configLoader,
|
||||
options: appConfig,
|
||||
}
|
||||
}
|
||||
|
||||
func (bm BackendMonitor) getModelLoaderIDFromModelName(modelName string) (string, error) {
|
||||
config, exists := bm.configLoader.GetBackendConfig(modelName)
|
||||
func (bms BackendMonitorService) getModelLoaderIDFromModelName(modelName string) (string, error) {
|
||||
config, exists := bms.backendConfigLoader.GetBackendConfig(modelName)
|
||||
var backendId string
|
||||
if exists {
|
||||
backendId = config.Model
|
||||
@@ -46,8 +46,8 @@ func (bm BackendMonitor) getModelLoaderIDFromModelName(modelName string) (string
|
||||
return backendId, nil
|
||||
}
|
||||
|
||||
func (bm *BackendMonitor) SampleLocalBackendProcess(model string) (*schema.BackendMonitorResponse, error) {
|
||||
config, exists := bm.configLoader.GetBackendConfig(model)
|
||||
func (bms *BackendMonitorService) SampleLocalBackendProcess(model string) (*schema.BackendMonitorResponse, error) {
|
||||
config, exists := bms.backendConfigLoader.GetBackendConfig(model)
|
||||
var backend string
|
||||
if exists {
|
||||
backend = config.Model
|
||||
@@ -60,7 +60,7 @@ func (bm *BackendMonitor) SampleLocalBackendProcess(model string) (*schema.Backe
|
||||
backend = fmt.Sprintf("%s.bin", backend)
|
||||
}
|
||||
|
||||
pid, err := bm.modelLoader.GetGRPCPID(backend)
|
||||
pid, err := bms.modelLoader.GetGRPCPID(backend)
|
||||
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("model", model).Msg("failed to find GRPC pid")
|
||||
@@ -101,12 +101,12 @@ func (bm *BackendMonitor) SampleLocalBackendProcess(model string) (*schema.Backe
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (bm BackendMonitor) CheckAndSample(modelName string) (*proto.StatusResponse, error) {
|
||||
backendId, err := bm.getModelLoaderIDFromModelName(modelName)
|
||||
func (bms BackendMonitorService) CheckAndSample(modelName string) (*proto.StatusResponse, error) {
|
||||
backendId, err := bms.getModelLoaderIDFromModelName(modelName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
modelAddr := bm.modelLoader.CheckIsLoaded(backendId)
|
||||
modelAddr := bms.modelLoader.CheckIsLoaded(backendId)
|
||||
if modelAddr == "" {
|
||||
return nil, fmt.Errorf("backend %s is not currently loaded", backendId)
|
||||
}
|
||||
@@ -114,7 +114,7 @@ func (bm BackendMonitor) CheckAndSample(modelName string) (*proto.StatusResponse
|
||||
status, rpcErr := modelAddr.GRPC(false, nil).Status(context.TODO())
|
||||
if rpcErr != nil {
|
||||
log.Warn().Msgf("backend %s experienced an error retrieving status info: %s", backendId, rpcErr.Error())
|
||||
val, slbErr := bm.SampleLocalBackendProcess(backendId)
|
||||
val, slbErr := bms.SampleLocalBackendProcess(backendId)
|
||||
if slbErr != nil {
|
||||
return nil, fmt.Errorf("backend %s experienced an error retrieving status info via rpc: %s, then failed local node process sample: %s", backendId, rpcErr.Error(), slbErr.Error())
|
||||
}
|
||||
@@ -131,10 +131,10 @@ func (bm BackendMonitor) CheckAndSample(modelName string) (*proto.StatusResponse
|
||||
return status, nil
|
||||
}
|
||||
|
||||
func (bm BackendMonitor) ShutdownModel(modelName string) error {
|
||||
backendId, err := bm.getModelLoaderIDFromModelName(modelName)
|
||||
func (bms BackendMonitorService) ShutdownModel(modelName string) error {
|
||||
backendId, err := bms.getModelLoaderIDFromModelName(modelName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return bm.modelLoader.ShutdownModel(backendId)
|
||||
return bms.modelLoader.ShutdownModel(backendId)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
@@ -84,18 +85,47 @@ func (g *GalleryService) Start(c context.Context, cl *config.BackendConfigLoader
|
||||
}
|
||||
|
||||
var err error
|
||||
// if the request contains a gallery name, we apply the gallery from the gallery list
|
||||
if op.GalleryName != "" {
|
||||
if strings.Contains(op.GalleryName, "@") {
|
||||
err = gallery.InstallModelFromGallery(op.Galleries, op.GalleryName, g.modelPath, op.Req, progressCallback)
|
||||
} else {
|
||||
err = gallery.InstallModelFromGalleryByName(op.Galleries, op.GalleryName, g.modelPath, op.Req, progressCallback)
|
||||
|
||||
// delete a model
|
||||
if op.Delete {
|
||||
modelConfig := &config.BackendConfig{}
|
||||
// Galleryname is the name of the model in this case
|
||||
dat, err := os.ReadFile(filepath.Join(g.modelPath, op.GalleryModelName+".yaml"))
|
||||
if err != nil {
|
||||
updateError(err)
|
||||
continue
|
||||
}
|
||||
} else if op.ConfigURL != "" {
|
||||
startup.PreloadModelsConfigurations(op.ConfigURL, g.modelPath, op.ConfigURL)
|
||||
err = cl.Preload(g.modelPath)
|
||||
err = yaml.Unmarshal(dat, modelConfig)
|
||||
if err != nil {
|
||||
updateError(err)
|
||||
continue
|
||||
}
|
||||
|
||||
files := []string{}
|
||||
// Remove the model from the config
|
||||
if modelConfig.Model != "" {
|
||||
files = append(files, modelConfig.ModelFileName())
|
||||
}
|
||||
|
||||
if modelConfig.MMProj != "" {
|
||||
files = append(files, modelConfig.MMProjFileName())
|
||||
}
|
||||
|
||||
err = gallery.DeleteModelFromSystem(g.modelPath, op.GalleryModelName, files)
|
||||
} else {
|
||||
err = prepareModel(g.modelPath, op.Req, cl, progressCallback)
|
||||
// if the request contains a gallery name, we apply the gallery from the gallery list
|
||||
if op.GalleryModelName != "" {
|
||||
if strings.Contains(op.GalleryModelName, "@") {
|
||||
err = gallery.InstallModelFromGallery(op.Galleries, op.GalleryModelName, g.modelPath, op.Req, progressCallback)
|
||||
} else {
|
||||
err = gallery.InstallModelFromGalleryByName(op.Galleries, op.GalleryModelName, g.modelPath, op.Req, progressCallback)
|
||||
}
|
||||
} else if op.ConfigURL != "" {
|
||||
startup.PreloadModelsConfigurations(op.ConfigURL, g.modelPath, op.ConfigURL)
|
||||
err = cl.Preload(g.modelPath)
|
||||
} else {
|
||||
err = prepareModel(g.modelPath, op.Req, cl, progressCallback)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@@ -116,7 +146,13 @@ func (g *GalleryService) Start(c context.Context, cl *config.BackendConfigLoader
|
||||
continue
|
||||
}
|
||||
|
||||
g.UpdateStatus(op.Id, &gallery.GalleryOpStatus{Processed: true, Message: "completed", Progress: 100})
|
||||
g.UpdateStatus(op.Id,
|
||||
&gallery.GalleryOpStatus{
|
||||
Deletion: op.Delete,
|
||||
Processed: true,
|
||||
GalleryModelName: op.GalleryModelName,
|
||||
Message: "completed",
|
||||
Progress: 100})
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
72
core/services/list_models.go
Normal file
72
core/services/list_models.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
)
|
||||
|
||||
type ListModelsService struct {
|
||||
bcl *config.BackendConfigLoader
|
||||
ml *model.ModelLoader
|
||||
appConfig *config.ApplicationConfig
|
||||
}
|
||||
|
||||
func NewListModelsService(ml *model.ModelLoader, bcl *config.BackendConfigLoader, appConfig *config.ApplicationConfig) *ListModelsService {
|
||||
return &ListModelsService{
|
||||
bcl: bcl,
|
||||
ml: ml,
|
||||
appConfig: appConfig,
|
||||
}
|
||||
}
|
||||
|
||||
func (lms *ListModelsService) ListModels(filter string, excludeConfigured bool) ([]schema.OpenAIModel, error) {
|
||||
|
||||
models, err := lms.ml.ListModels()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var mm map[string]interface{} = map[string]interface{}{}
|
||||
|
||||
dataModels := []schema.OpenAIModel{}
|
||||
|
||||
var filterFn func(name string) bool
|
||||
|
||||
// If filter is not specified, do not filter the list by model name
|
||||
if filter == "" {
|
||||
filterFn = func(_ string) bool { return true }
|
||||
} else {
|
||||
// If filter _IS_ specified, we compile it to a regex which is used to create the filterFn
|
||||
rxp, err := regexp.Compile(filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
filterFn = func(name string) bool {
|
||||
return rxp.MatchString(name)
|
||||
}
|
||||
}
|
||||
|
||||
// Start with the known configurations
|
||||
for _, c := range lms.bcl.GetAllBackendConfigs() {
|
||||
if excludeConfigured {
|
||||
mm[c.Model] = nil
|
||||
}
|
||||
|
||||
if filterFn(c.Name) {
|
||||
dataModels = append(dataModels, schema.OpenAIModel{ID: c.Name, Object: "model"})
|
||||
}
|
||||
}
|
||||
|
||||
// Then iterate through the loose files:
|
||||
for _, m := range models {
|
||||
// And only adds them if they shouldn't be skipped.
|
||||
if _, exists := mm[m]; !exists && filterFn(m) {
|
||||
dataModels = append(dataModels, schema.OpenAIModel{ID: m, Object: "model"})
|
||||
}
|
||||
}
|
||||
|
||||
return dataModels, nil
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
@@ -20,7 +21,6 @@ type configFileHandler struct {
|
||||
|
||||
watcher *fsnotify.Watcher
|
||||
|
||||
configDir string
|
||||
appConfig *config.ApplicationConfig
|
||||
}
|
||||
|
||||
@@ -29,11 +29,16 @@ type configFileHandler struct {
|
||||
func newConfigFileHandler(appConfig *config.ApplicationConfig) configFileHandler {
|
||||
c := configFileHandler{
|
||||
handlers: make(map[string]fileHandler),
|
||||
configDir: appConfig.DynamicConfigsDir,
|
||||
appConfig: appConfig,
|
||||
}
|
||||
c.Register("api_keys.json", readApiKeysJson(*appConfig), true)
|
||||
c.Register("external_backends.json", readExternalBackendsJson(*appConfig), true)
|
||||
err := c.Register("api_keys.json", readApiKeysJson(*appConfig), true)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("file", "api_keys.json").Msg("unable to register config file handler")
|
||||
}
|
||||
err = c.Register("external_backends.json", readExternalBackendsJson(*appConfig), true)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("file", "external_backends.json").Msg("unable to register config file handler")
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -44,15 +49,17 @@ func (c *configFileHandler) Register(filename string, handler fileHandler, runNo
|
||||
}
|
||||
c.handlers[filename] = handler
|
||||
if runNow {
|
||||
c.callHandler(path.Join(c.appConfig.DynamicConfigsDir, filename), handler)
|
||||
c.callHandler(filename, handler)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *configFileHandler) callHandler(filename string, handler fileHandler) {
|
||||
fileContent, err := os.ReadFile(filename)
|
||||
rootedFilePath := filepath.Join(c.appConfig.DynamicConfigsDir, filepath.Clean(filename))
|
||||
log.Trace().Str("filename", rootedFilePath).Msg("reading file for dynamic config update")
|
||||
fileContent, err := os.ReadFile(rootedFilePath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
log.Error().Err(err).Str("filename", filename).Msg("could not read file")
|
||||
log.Error().Err(err).Str("filename", rootedFilePath).Msg("could not read file")
|
||||
}
|
||||
|
||||
if err = handler(fileContent, c.appConfig); err != nil {
|
||||
@@ -64,7 +71,8 @@ func (c *configFileHandler) Watch() error {
|
||||
configWatcher, err := fsnotify.NewWatcher()
|
||||
c.watcher = configWatcher
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Str("configdir", c.configDir).Msg("wnable to create a watcher for configuration directory")
|
||||
log.Fatal().Err(err).Str("configdir", c.appConfig.DynamicConfigsDir).Msg("unable to create a watcher for configuration directory")
|
||||
|
||||
}
|
||||
|
||||
if c.appConfig.DynamicConfigsDirPollInterval > 0 {
|
||||
@@ -95,7 +103,7 @@ func (c *configFileHandler) Watch() error {
|
||||
continue
|
||||
}
|
||||
|
||||
c.callHandler(event.Name, handler)
|
||||
c.callHandler(filepath.Base(event.Name), handler)
|
||||
}
|
||||
case err, ok := <-c.watcher.Errors:
|
||||
log.Error().Err(err).Msg("config watcher error received")
|
||||
@@ -116,13 +124,14 @@ func (c *configFileHandler) Watch() error {
|
||||
}
|
||||
|
||||
// TODO: When we institute graceful shutdown, this should be called
|
||||
func (c *configFileHandler) Stop() {
|
||||
c.watcher.Close()
|
||||
func (c *configFileHandler) Stop() error {
|
||||
return c.watcher.Close()
|
||||
}
|
||||
|
||||
func readApiKeysJson(startupAppConfig config.ApplicationConfig) fileHandler {
|
||||
handler := func(fileContent []byte, appConfig *config.ApplicationConfig) error {
|
||||
log.Debug().Msg("processing api_keys.json")
|
||||
log.Debug().Msg("processing api keys runtime update")
|
||||
log.Trace().Int("numKeys", len(startupAppConfig.ApiKeys)).Msg("api keys provided at startup")
|
||||
|
||||
if len(fileContent) > 0 {
|
||||
// Parse JSON content from the file
|
||||
@@ -132,11 +141,14 @@ func readApiKeysJson(startupAppConfig config.ApplicationConfig) fileHandler {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Trace().Int("numKeys", len(fileKeys)).Msg("discovered API keys from api keys dynamic config dile")
|
||||
|
||||
appConfig.ApiKeys = append(startupAppConfig.ApiKeys, fileKeys...)
|
||||
} else {
|
||||
log.Trace().Msg("no API keys discovered from dynamic config file")
|
||||
appConfig.ApiKeys = startupAppConfig.ApiKeys
|
||||
}
|
||||
log.Debug().Msg("api keys loaded from api_keys.json")
|
||||
log.Trace().Int("numKeys", len(appConfig.ApiKeys)).Msg("total api keys after processing")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -4,12 +4,14 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core"
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/core/services"
|
||||
"github.com/go-skynet/LocalAI/internal"
|
||||
"github.com/go-skynet/LocalAI/pkg/assets"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
pkgStartup "github.com/go-skynet/LocalAI/pkg/startup"
|
||||
"github.com/go-skynet/LocalAI/pkg/xsysinfo"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
@@ -18,29 +20,40 @@ func Startup(opts ...config.AppOption) (*config.BackendConfigLoader, *model.Mode
|
||||
|
||||
log.Info().Msgf("Starting LocalAI using %d threads, with models path: %s", options.Threads, options.ModelPath)
|
||||
log.Info().Msgf("LocalAI version: %s", internal.PrintableVersion())
|
||||
caps, err := xsysinfo.CPUCapabilities()
|
||||
if err == nil {
|
||||
log.Debug().Msgf("CPU capabilities: %v", caps)
|
||||
}
|
||||
gpus, err := xsysinfo.GPUs()
|
||||
if err == nil {
|
||||
log.Debug().Msgf("GPU count: %d", len(gpus))
|
||||
for _, gpu := range gpus {
|
||||
log.Debug().Msgf("GPU: %s", gpu.String())
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure directories exists
|
||||
if options.ModelPath == "" {
|
||||
return nil, nil, nil, fmt.Errorf("options.ModelPath cannot be empty")
|
||||
}
|
||||
err := os.MkdirAll(options.ModelPath, 0755)
|
||||
err = os.MkdirAll(options.ModelPath, 0750)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("unable to create ModelPath: %q", err)
|
||||
}
|
||||
if options.ImageDir != "" {
|
||||
err := os.MkdirAll(options.ImageDir, 0755)
|
||||
err := os.MkdirAll(options.ImageDir, 0750)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("unable to create ImageDir: %q", err)
|
||||
}
|
||||
}
|
||||
if options.AudioDir != "" {
|
||||
err := os.MkdirAll(options.AudioDir, 0755)
|
||||
err := os.MkdirAll(options.AudioDir, 0750)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("unable to create AudioDir: %q", err)
|
||||
}
|
||||
}
|
||||
if options.UploadDir != "" {
|
||||
err := os.MkdirAll(options.UploadDir, 0755)
|
||||
err := os.MkdirAll(options.UploadDir, 0750)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("unable to create UploadDir: %q", err)
|
||||
}
|
||||
@@ -100,7 +113,10 @@ func Startup(opts ...config.AppOption) (*config.BackendConfigLoader, *model.Mode
|
||||
go func() {
|
||||
<-options.Context.Done()
|
||||
log.Debug().Msgf("Context canceled, shutting down")
|
||||
ml.StopAllGRPC()
|
||||
err := ml.StopAllGRPC()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("error while stopping all grpc backends")
|
||||
}
|
||||
}()
|
||||
|
||||
if options.WatchDog {
|
||||
@@ -122,8 +138,41 @@ func Startup(opts ...config.AppOption) (*config.BackendConfigLoader, *model.Mode
|
||||
// Watch the configuration directory
|
||||
// If the directory does not exist, we don't watch it
|
||||
configHandler := newConfigFileHandler(options)
|
||||
configHandler.Watch()
|
||||
err = configHandler.Watch()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("error establishing configuration directory watcher")
|
||||
}
|
||||
|
||||
log.Info().Msg("core/startup process completed!")
|
||||
return cl, ml, options, nil
|
||||
}
|
||||
|
||||
// In Lieu of a proper DI framework, this function wires up the Application manually.
|
||||
// This is in core/startup rather than core/state.go to keep package references clean!
|
||||
func createApplication(appConfig *config.ApplicationConfig) *core.Application {
|
||||
app := &core.Application{
|
||||
ApplicationConfig: appConfig,
|
||||
BackendConfigLoader: config.NewBackendConfigLoader(),
|
||||
ModelLoader: model.NewModelLoader(appConfig.ModelPath),
|
||||
}
|
||||
|
||||
var err error
|
||||
|
||||
// app.EmbeddingsBackendService = backend.NewEmbeddingsBackendService(app.ModelLoader, app.BackendConfigLoader, app.ApplicationConfig)
|
||||
// app.ImageGenerationBackendService = backend.NewImageGenerationBackendService(app.ModelLoader, app.BackendConfigLoader, app.ApplicationConfig)
|
||||
// app.LLMBackendService = backend.NewLLMBackendService(app.ModelLoader, app.BackendConfigLoader, app.ApplicationConfig)
|
||||
// app.TranscriptionBackendService = backend.NewTranscriptionBackendService(app.ModelLoader, app.BackendConfigLoader, app.ApplicationConfig)
|
||||
// app.TextToSpeechBackendService = backend.NewTextToSpeechBackendService(app.ModelLoader, app.BackendConfigLoader, app.ApplicationConfig)
|
||||
|
||||
app.BackendMonitorService = services.NewBackendMonitorService(app.ModelLoader, app.BackendConfigLoader, app.ApplicationConfig)
|
||||
app.GalleryService = services.NewGalleryService(app.ApplicationConfig.ModelPath)
|
||||
app.ListModelsService = services.NewListModelsService(app.ModelLoader, app.BackendConfigLoader, app.ApplicationConfig)
|
||||
// app.OpenAIService = services.NewOpenAIService(app.ModelLoader, app.BackendConfigLoader, app.ApplicationConfig, app.LLMBackendService)
|
||||
|
||||
app.LocalAIMetricsService, err = services.NewLocalAIMetricsService()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("encountered an error initializing metrics service, startup will continue but metrics will not be tracked.")
|
||||
}
|
||||
|
||||
return app
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
weight: 20
|
||||
title: "Advanced"
|
||||
description: "Advanced usage"
|
||||
icon: science
|
||||
icon: settings
|
||||
lead: ""
|
||||
date: 2020-10-06T08:49:15+00:00
|
||||
lastmod: 2020-10-06T08:49:15+00:00
|
||||
|
||||
@@ -498,4 +498,28 @@ When using the `-core` container image it is possible to prepare the python back
|
||||
|
||||
```bash
|
||||
docker run --env EXTRA_BACKENDS="backend/python/diffusers" quay.io/go-skynet/local-ai:master-ffmpeg-core
|
||||
```
|
||||
```
|
||||
|
||||
### Concurrent requests
|
||||
|
||||
LocalAI supports parallel requests for the backends that supports it. For instance, vLLM and llama.cpp supports parallel requests, and thus LocalAI allows to run multiple requests in parallel.
|
||||
|
||||
In order to enable parallel requests, you have to pass `--parallel-requests` or set the `PARALLEL_REQUEST` to true as environment variable.
|
||||
|
||||
A list of the environment variable that tweaks parallelism is the following:
|
||||
|
||||
```
|
||||
### Python backends GRPC max workers
|
||||
### Default number of workers for GRPC Python backends.
|
||||
### This actually controls wether a backend can process multiple requests or not.
|
||||
# PYTHON_GRPC_MAX_WORKERS=1
|
||||
|
||||
### Define the number of parallel LLAMA.cpp workers (Defaults to 1)
|
||||
# LLAMACPP_PARALLEL=1
|
||||
|
||||
### Enable to run parallel requests
|
||||
# LOCALAI_PARALLEL_REQUESTS=true
|
||||
```
|
||||
|
||||
Note that, for llama.cpp you need to set accordingly `LLAMACPP_PARALLEL` to the number of parallel processes your GPU/CPU can handle. For python-based backends (like vLLM) you can set `PYTHON_GRPC_MAX_WORKERS` to the number of parallel requests.
|
||||
|
||||
|
||||
@@ -7,15 +7,10 @@ weight = 18
|
||||
url = '/models'
|
||||
+++
|
||||
|
||||
<h1 align="center">
|
||||
<br>
|
||||
<img height="300" src="https://github.com/go-skynet/model-gallery/assets/2420543/7a6a8183-6d0a-4dc4-8e1d-f2672fab354e"> <br>
|
||||
<br>
|
||||
</h1>
|
||||
The model gallery is a curated collection of models configurations for [LocalAI](https://github.com/go-skynet/LocalAI) that enables one-click install of models directly from the LocalAI Web interface.
|
||||
|
||||
The model gallery is a (experimental!) collection of models configurations for [LocalAI](https://github.com/go-skynet/LocalAI).
|
||||
LocalAI to ease out installations of models provide a way to preload models on start and downloading and installing them in runtime. You can install models manually by copying them over the `models` directory, or use the API or the Web interface to configure, download and verify the model assets for you.
|
||||
|
||||
LocalAI to ease out installations of models provide a way to preload models on start and downloading and installing them in runtime. You can install models manually by copying them over the `models` directory, or use the API to configure, download and verify the model assets for you. As the UI is still a work in progress, you will find here the documentation about the API Endpoints.
|
||||
|
||||
{{% alert note %}}
|
||||
The models in this gallery are not directly maintained by LocalAI. If you find a model that is not working, please open an issue on the model gallery repository.
|
||||
@@ -25,58 +20,55 @@ The models in this gallery are not directly maintained by LocalAI. If you find a
|
||||
GPT and text generation models might have a license which is not permissive for commercial use or might be questionable or without any license at all. Please check the model license before using it. The official gallery contains only open licensed models.
|
||||
{{% /alert %}}
|
||||
|
||||

|
||||
|
||||
## Useful Links and resources
|
||||
|
||||
- [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) - here you can find a list of the most performing models on the Open LLM benchmark. Keep in mind models compatible with LocalAI must be quantized in the `gguf` format.
|
||||
|
||||
## How it works
|
||||
|
||||
## Model repositories
|
||||
Navigate the WebUI interface in the "Models" section from the navbar at the top. Here you can find a list of models that can be installed, and you can install them by clicking the "Install" button.
|
||||
|
||||
## Add other galleries
|
||||
|
||||
You can add other galleries by setting the `GALLERIES` environment variable. The `GALLERIES` environment variable is a list of JSON objects, where each object has a `name` and a `url` field. The `name` field is the name of the gallery, and the `url` field is the URL of the gallery's index file, for example:
|
||||
|
||||
```json
|
||||
GALLERIES=[{"name":"<GALLERY_NAME>", "url":"<GALLERY_URL"}]
|
||||
```
|
||||
|
||||
The models in the gallery will be automatically indexed and available for installation.
|
||||
|
||||
## API Reference
|
||||
|
||||
### Model repositories
|
||||
|
||||
You can install a model in runtime, while the API is running and it is started already, or before starting the API by preloading the models.
|
||||
|
||||
To install a model in runtime you will need to use the `/models/apply` LocalAI API endpoint.
|
||||
|
||||
To enable the `model-gallery` repository you need to start `local-ai` with the `GALLERIES` environment variable:
|
||||
By default LocalAI is configured with the `localai` repository.
|
||||
|
||||
To use additional repositories you need to start `local-ai` with the `GALLERIES` environment variable:
|
||||
|
||||
```
|
||||
GALLERIES=[{"name":"<GALLERY_NAME>", "url":"<GALLERY_URL"}]
|
||||
```
|
||||
|
||||
For example, to enable the `model-gallery` repository, start `local-ai` with:
|
||||
For example, to enable the default `localai` repository, you can start `local-ai` with:
|
||||
|
||||
```
|
||||
GALLERIES=[{"name":"model-gallery", "url":"github:go-skynet/model-gallery/index.yaml"}]
|
||||
GALLERIES=[{"name":"localai", "url":"github:mudler/localai/gallery/index.yaml"}]
|
||||
```
|
||||
|
||||
where `github:go-skynet/model-gallery/index.yaml` will be expanded automatically to `https://raw.githubusercontent.com/go-skynet/model-gallery/main/index.yaml`.
|
||||
where `github:mudler/localai/gallery/index.yaml` will be expanded automatically to `https://raw.githubusercontent.com/mudler/LocalAI/main/index.yaml`.
|
||||
|
||||
Note: the url are expanded automatically for `github` and `huggingface`, however `https://` and `http://` prefix works as well.
|
||||
|
||||
{{% alert note %}}
|
||||
|
||||
As this feature is experimental, you need to run `local-ai` with a list of `GALLERIES`. Currently there are two galleries:
|
||||
|
||||
- An official one, containing only definitions and models with a clear LICENSE to avoid any dmca infringment. As I'm not sure what's the best action to do in this case, I'm not going to include any model that is not clearly licensed in this repository which is offically linked to LocalAI.
|
||||
- A "community" one that contains an index of `huggingface` models that are compatible with the `ggml` format and lives in the `localai-huggingface-zoo` repository.
|
||||
|
||||
To enable the two repositories, start `LocalAI` with the `GALLERIES` environment variable:
|
||||
|
||||
```bash
|
||||
GALLERIES=[{"name":"model-gallery", "url":"github:go-skynet/model-gallery/index.yaml"}, {"url": "github:go-skynet/model-gallery/huggingface.yaml","name":"huggingface"}]
|
||||
```
|
||||
|
||||
If running with `docker-compose`, simply edit the `.env` file and uncomment the `GALLERIES` variable, and add the one you want to use.
|
||||
|
||||
{{% /alert %}}
|
||||
|
||||
{{% alert note %}}
|
||||
You might not find all the models in this gallery. Automated CI updates the gallery automatically. You can find however most of the models on huggingface (https://huggingface.co/), generally it should be available `~24h` after upload.
|
||||
|
||||
By under any circumstances LocalAI and any developer is not responsible for the models in this gallery, as CI is just indexing them and providing a convenient way to install with an automatic configuration with a consistent API. Don't install models from authors you don't trust, and, check the appropriate license for your use case. Models are automatically indexed and hosted on huggingface (https://huggingface.co/). For any issue with the models, please open an issue on the model gallery repository if it's a LocalAI misconfiguration, otherwise refer to the huggingface repository. If you think a model should not be listed, please reach to us and we will remove it from the gallery.
|
||||
{{% /alert %}}
|
||||
|
||||
{{% alert note %}}
|
||||
|
||||
There is no documentation yet on how to build a gallery or a repository - but you can find an example in the [model-gallery](https://github.com/go-skynet/model-gallery) repository.
|
||||
|
||||
If you want to build your own gallery, there is no documentation yet. However you can find the source of the default gallery in the [LocalAI repository](https://github.com/mudler/LocalAI/tree/master/gallery).
|
||||
{{% /alert %}}
|
||||
|
||||
|
||||
@@ -110,34 +102,16 @@ To install a model from the gallery repository, you can pass the model name in t
|
||||
```bash
|
||||
LOCALAI=http://localhost:8080
|
||||
curl $LOCALAI/models/apply -H "Content-Type: application/json" -d '{
|
||||
"id": "model-gallery@bert-embeddings"
|
||||
"id": "localai@bert-embeddings"
|
||||
}'
|
||||
```
|
||||
|
||||
where:
|
||||
- `model-gallery` is the repository. It is optional and can be omitted. If the repository is omitted LocalAI will search the model by name in all the repositories. In the case the same model name is present in both galleries the first match wins.
|
||||
- `localai` is the repository. It is optional and can be omitted. If the repository is omitted LocalAI will search the model by name in all the repositories. In the case the same model name is present in both galleries the first match wins.
|
||||
- `bert-embeddings` is the model name in the gallery
|
||||
(read its [config here](https://github.com/go-skynet/model-gallery/blob/main/bert-embeddings.yaml)).
|
||||
(read its [config here](https://github.com/mudler/LocalAI/tree/master/gallery/blob/main/bert-embeddings.yaml)).
|
||||
|
||||
{{% alert note %}}
|
||||
If the `huggingface` model gallery is enabled (it's enabled by default),
|
||||
and the model has an entry in the model gallery's associated YAML config
|
||||
(for `huggingface`, see [`model-gallery/huggingface.yaml`](https://github.com/go-skynet/model-gallery/blob/main/huggingface.yaml)),
|
||||
you can install models by specifying directly the model's `id`.
|
||||
For example, to install wizardlm superhot:
|
||||
|
||||
```bash
|
||||
LOCALAI=http://localhost:8080
|
||||
curl $LOCALAI/models/apply -H "Content-Type: application/json" -d '{
|
||||
"id": "huggingface@TheBloke/WizardLM-13B-V1-0-Uncensored-SuperHOT-8K-GGML/wizardlm-13b-v1.0-superhot-8k.ggmlv3.q4_K_M.bin"
|
||||
}'
|
||||
```
|
||||
|
||||
Note that the `id` can be used similarly when pre-loading models at start.
|
||||
{{% /alert %}}
|
||||
|
||||
|
||||
## How to install a model (without a gallery)
|
||||
### How to install a model not part of a gallery
|
||||
|
||||
If you don't want to set any gallery repository, you can still install models by loading a model configuration file.
|
||||
|
||||
@@ -201,13 +175,13 @@ Note: `url` or `id` must be specified. `url` is used to a url to a model gallery
|
||||
For example:
|
||||
|
||||
```bash
|
||||
PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/stablediffusion.yaml"}]
|
||||
PRELOAD_MODELS=[{"url": "github:mudler/LocalAI/gallery/stablediffusion.yaml@master"}]
|
||||
```
|
||||
|
||||
or as arg:
|
||||
|
||||
```bash
|
||||
local-ai --preload-models '[{"url": "github:go-skynet/model-gallery/stablediffusion.yaml"}]'
|
||||
local-ai --preload-models '[{"url": "github:mudler/LocalAI/gallery/stablediffusion.yaml@master"}]'
|
||||
```
|
||||
|
||||
or in a YAML file:
|
||||
@@ -218,14 +192,14 @@ local-ai --preload-models-config "/path/to/yaml"
|
||||
|
||||
YAML:
|
||||
```yaml
|
||||
- url: github:go-skynet/model-gallery/stablediffusion.yaml
|
||||
- url: github:mudler/LocalAI/gallery/stablediffusion.yaml@master
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
{{% alert note %}}
|
||||
|
||||
You can find already some open licensed models in the [model gallery](https://github.com/go-skynet/model-gallery).
|
||||
You can find already some open licensed models in the [LocalAI gallery](https://github.com/mudler/LocalAI/tree/master/gallery).
|
||||
|
||||
If you don't find the model in the gallery you can try to use the "base" model and provide an URL to LocalAI:
|
||||
|
||||
@@ -233,7 +207,7 @@ If you don't find the model in the gallery you can try to use the "base" model a
|
||||
|
||||
```
|
||||
curl $LOCALAI/models/apply -H "Content-Type: application/json" -d '{
|
||||
"url": "github:go-skynet/model-gallery/base.yaml",
|
||||
"url": "github:mudler/LocalAI/gallery/base.yaml@master",
|
||||
"name": "model-name",
|
||||
"files": [
|
||||
{
|
||||
@@ -249,7 +223,7 @@ curl $LOCALAI/models/apply -H "Content-Type: application/json" -d '{
|
||||
|
||||
{{% /alert %}}
|
||||
|
||||
## Installing a model with a different name
|
||||
### Override a model name
|
||||
|
||||
To install a model with a different name, specify a `name` parameter in the request body.
|
||||
|
||||
@@ -266,11 +240,11 @@ For example, to install a model as `gpt-3.5-turbo`:
|
||||
```bash
|
||||
LOCALAI=http://localhost:8080
|
||||
curl $LOCALAI/models/apply -H "Content-Type: application/json" -d '{
|
||||
"url": "github:go-skynet/model-gallery/gpt4all-j.yaml",
|
||||
"url": "github:mudler/LocalAI/gallery/gpt4all-j.yaml",
|
||||
"name": "gpt-3.5-turbo"
|
||||
}'
|
||||
```
|
||||
## Additional Files
|
||||
### Additional Files
|
||||
|
||||
<details>
|
||||
|
||||
@@ -293,7 +267,7 @@ curl $LOCALAI/models/apply -H "Content-Type: application/json" -d '{
|
||||
|
||||
</details>
|
||||
|
||||
## Overriding configuration files
|
||||
### Overriding configuration files
|
||||
|
||||
<details>
|
||||
|
||||
@@ -324,7 +298,7 @@ curl $LOCALAI/models/apply -H "Content-Type: application/json" -d '{
|
||||
|
||||
```bash
|
||||
curl $LOCALAI/models/apply -H "Content-Type: application/json" -d '{
|
||||
"url": "github:go-skynet/model-gallery/bert-embeddings.yaml",
|
||||
"url": "github:mudler/LocalAI/gallery/bert-embeddings.yaml",
|
||||
"name": "text-embedding-ada-002"
|
||||
}'
|
||||
```
|
||||
@@ -348,10 +322,10 @@ URL: https://github.com/EdVince/Stable-Diffusion-NCNN
|
||||
{{< tabs >}}
|
||||
{{% tab name="Prepare the model in runtime" %}}
|
||||
|
||||
While the API is running, you can install the model by using the `/models/apply` endpoint and point it to the `stablediffusion` model in the [models-gallery](https://github.com/go-skynet/model-gallery#image-generation-stable-diffusion):
|
||||
While the API is running, you can install the model by using the `/models/apply` endpoint and point it to the `stablediffusion` model in the [models-gallery](https://github.com/mudler/LocalAI/tree/master/gallery#image-generation-stable-diffusion):
|
||||
```bash
|
||||
curl $LOCALAI/models/apply -H "Content-Type: application/json" -d '{
|
||||
"url": "github:go-skynet/model-gallery/stablediffusion.yaml"
|
||||
"url": "github:mudler/LocalAI/gallery/stablediffusion.yaml@master"
|
||||
}'
|
||||
```
|
||||
|
||||
@@ -361,13 +335,13 @@ curl $LOCALAI/models/apply -H "Content-Type: application/json" -d '{
|
||||
You can set the `PRELOAD_MODELS` environment variable:
|
||||
|
||||
```bash
|
||||
PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/stablediffusion.yaml"}]
|
||||
PRELOAD_MODELS=[{"url": "github:mudler/LocalAI/gallery/stablediffusion.yaml@master"}]
|
||||
```
|
||||
|
||||
or as arg:
|
||||
|
||||
```bash
|
||||
local-ai --preload-models '[{"url": "github:go-skynet/model-gallery/stablediffusion.yaml"}]'
|
||||
local-ai --preload-models '[{"url": "github:mudler/LocalAI/gallery/stablediffusion.yaml@master"}]'
|
||||
```
|
||||
|
||||
or in a YAML file:
|
||||
@@ -378,7 +352,7 @@ local-ai --preload-models-config "/path/to/yaml"
|
||||
|
||||
YAML:
|
||||
```yaml
|
||||
- url: github:go-skynet/model-gallery/stablediffusion.yaml
|
||||
- url: github:mudler/LocalAI/gallery/stablediffusion.yaml@master
|
||||
```
|
||||
|
||||
{{% /tab %}}
|
||||
@@ -403,7 +377,7 @@ URL: https://github.com/ggerganov/whisper.cpp
|
||||
|
||||
```bash
|
||||
curl $LOCALAI/models/apply -H "Content-Type: application/json" -d '{
|
||||
"url": "github:go-skynet/model-gallery/whisper-base.yaml",
|
||||
"url": "github:mudler/LocalAI/gallery/whisper-base.yaml@master",
|
||||
"name": "whisper-1"
|
||||
}'
|
||||
```
|
||||
@@ -414,13 +388,13 @@ curl $LOCALAI/models/apply -H "Content-Type: application/json" -d '{
|
||||
You can set the `PRELOAD_MODELS` environment variable:
|
||||
|
||||
```bash
|
||||
PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/whisper-base.yaml", "name": "whisper-1"}]
|
||||
PRELOAD_MODELS=[{"url": "github:mudler/LocalAI/gallery/whisper-base.yaml@master", "name": "whisper-1"}]
|
||||
```
|
||||
|
||||
or as arg:
|
||||
|
||||
```bash
|
||||
local-ai --preload-models '[{"url": "github:go-skynet/model-gallery/whisper-base.yaml", "name": "whisper-1"}]'
|
||||
local-ai --preload-models '[{"url": "github:mudler/LocalAI/gallery/whisper-base.yaml@master", "name": "whisper-1"}]'
|
||||
```
|
||||
|
||||
or in a YAML file:
|
||||
@@ -431,37 +405,13 @@ local-ai --preload-models-config "/path/to/yaml"
|
||||
|
||||
YAML:
|
||||
```yaml
|
||||
- url: github:go-skynet/model-gallery/whisper-base.yaml
|
||||
- url: github:mudler/LocalAI/gallery/whisper-base.yaml@master
|
||||
name: whisper-1
|
||||
```
|
||||
|
||||
{{% /tab %}}
|
||||
{{< /tabs >}}
|
||||
|
||||
### GPTs
|
||||
|
||||
<details>
|
||||
|
||||
```bash
|
||||
LOCALAI=http://localhost:8080
|
||||
curl $LOCALAI/models/apply -H "Content-Type: application/json" -d '{
|
||||
"url": "github:go-skynet/model-gallery/gpt4all-j.yaml",
|
||||
"name": "gpt4all-j"
|
||||
}'
|
||||
```
|
||||
|
||||
To test it:
|
||||
|
||||
```
|
||||
curl $LOCALAI/v1/chat/completions -H "Content-Type: application/json" -d '{
|
||||
"model": "gpt4all-j",
|
||||
"messages": [{"role": "user", "content": "How are you?"}],
|
||||
"temperature": 0.1
|
||||
}'
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Note
|
||||
|
||||
LocalAI will create a batch process that downloads the required files from a model definition and automatically reload itself to include the new model.
|
||||
@@ -495,7 +445,7 @@ Returns an `uuid` and an `url` to follow up the state of the process:
|
||||
{ "uuid":"251475c9-f666-11ed-95e0-9a8a4480ac58", "status":"http://localhost:8080/models/jobs/251475c9-f666-11ed-95e0-9a8a4480ac58"}
|
||||
```
|
||||
|
||||
To see a collection example of curated models definition files, see the [model-gallery](https://github.com/go-skynet/model-gallery).
|
||||
To see a collection example of curated models definition files, see the [LocalAI repository](https://github.com/mudler/LocalAI/tree/master/gallery).
|
||||
|
||||
#### Get model job state `/models/jobs/<uid>`
|
||||
|
||||
|
||||
57
docs/content/docs/features/reranker.md
Normal file
57
docs/content/docs/features/reranker.md
Normal file
@@ -0,0 +1,57 @@
|
||||
|
||||
+++
|
||||
disableToc = false
|
||||
title = " Reranker"
|
||||
weight = 11
|
||||
url = "/features/reranker/"
|
||||
+++
|
||||
|
||||
A **reranking** model, often referred to as a cross-encoder, is a core component in the two-stage retrieval systems used in information retrieval and natural language processing tasks.
|
||||
Given a query and a set of documents, it will output similarity scores.
|
||||
|
||||
We can use then the score to reorder the documents by relevance in our RAG system to increase its overall accuracy and filter out non-relevant results.
|
||||
|
||||

|
||||
|
||||
LocalAI supports reranker models, and you can use them by using the `rerankers` backend, which uses [rerankers](https://github.com/AnswerDotAI/rerankers).
|
||||
|
||||
## Usage
|
||||
|
||||
You can test `rerankers` by using container images with python (this does **NOT** work with `core` images) and a model config file like this, or by installing `cross-encoder` from the gallery in the UI:
|
||||
|
||||
```yaml
|
||||
name: jina-reranker-v1-base-en
|
||||
backend: rerankers
|
||||
parameters:
|
||||
model: cross-encoder
|
||||
|
||||
# optionally:
|
||||
# type: flashrank
|
||||
# diffusers:
|
||||
# pipeline_type: en # to specify the english language
|
||||
```
|
||||
|
||||
and test it with:
|
||||
|
||||
```bash
|
||||
|
||||
curl http://localhost:8080/v1/rerank \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"model": "jina-reranker-v1-base-en",
|
||||
"query": "Organic skincare products for sensitive skin",
|
||||
"documents": [
|
||||
"Eco-friendly kitchenware for modern homes",
|
||||
"Biodegradable cleaning supplies for eco-conscious consumers",
|
||||
"Organic cotton baby clothes for sensitive skin",
|
||||
"Natural organic skincare range for sensitive skin",
|
||||
"Tech gadgets for smart homes: 2024 edition",
|
||||
"Sustainable gardening tools and compost solutions",
|
||||
"Sensitive skin-friendly facial cleansers and toners",
|
||||
"Organic food wraps and storage solutions",
|
||||
"All-natural pet food for dogs with allergies",
|
||||
"Yoga mats made from recycled materials"
|
||||
],
|
||||
"top_n": 3
|
||||
}'
|
||||
```
|
||||
@@ -296,7 +296,7 @@ backend: transformers
|
||||
parameters:
|
||||
model: "facebook/opt-125m"
|
||||
type: AutoModelForCausalLM
|
||||
quantization: bnb_4bit # One of: bnb_8bit, bnb_4bit, xpu_4bit (optional)
|
||||
quantization: bnb_4bit # One of: bnb_8bit, bnb_4bit, xpu_4bit, xpu_8bit (optional)
|
||||
```
|
||||
|
||||
The backend will automatically download the required files in order to run the model.
|
||||
@@ -307,10 +307,42 @@ The backend will automatically download the required files in order to run the m
|
||||
|
||||
| Type | Description |
|
||||
| --- | --- |
|
||||
| `AutoModelForCausalLM` | `AutoModelForCausalLM` is a model that can be used to generate sequences. |
|
||||
| `OVModelForCausalLM` | for OpenVINO models |
|
||||
| `AutoModelForCausalLM` | `AutoModelForCausalLM` is a model that can be used to generate sequences. Use it for NVIDIA CUDA and Intel GPU with Intel Extensions for Pytorch acceleration |
|
||||
| `OVModelForCausalLM` | for Intel CPU/GPU/NPU OpenVINO Text Generation models |
|
||||
| `OVModelForFeatureExtraction` | for Intel CPU/GPU/NPU OpenVINO Embedding acceleration |
|
||||
| N/A | Defaults to `AutoModel` |
|
||||
|
||||
- `OVModelForCausalLM` requires OpenVINO IR [Text Generation](https://huggingface.co/models?library=openvino&pipeline_tag=text-generation) models from Hugging face
|
||||
- `OVModelForFeatureExtraction` works with any Safetensors Transformer [Feature Extraction](https://huggingface.co/models?pipeline_tag=feature-extraction&library=transformers,safetensors) model from Huggingface (Embedding Model)
|
||||
|
||||
Please note that streaming is currently not implemente in `AutoModelForCausalLM` for Intel GPU.
|
||||
AMD GPU support is not implemented.
|
||||
Although AMD CPU is not officially supported by OpenVINO there are reports that it works: YMMV.
|
||||
|
||||
##### Embeddings
|
||||
Use `embeddings: true` if the model is an embedding model
|
||||
|
||||
##### Inference device selection
|
||||
Transformer backend tries to automatically select the best device for inference, anyway you can override the decision manually overriding with the `main_gpu` parameter.
|
||||
|
||||
| Inference Engine | Applicable Values |
|
||||
| --- | --- |
|
||||
| CUDA | `cuda`, `cuda.X` where X is the GPU device like in `nvidia-smi -L` output |
|
||||
| OpenVINO | Any applicable value from [Inference Modes](https://docs.openvino.ai/2024/openvino-workflow/running-inference/inference-devices-and-modes.html) like `AUTO`,`CPU`,`GPU`,`NPU`,`MULTI`,`HETERO` |
|
||||
|
||||
Example for CUDA:
|
||||
`main_gpu: cuda.0`
|
||||
|
||||
Example for OpenVINO:
|
||||
`main_gpu: AUTO:-CPU`
|
||||
|
||||
This parameter applies to both Text Generation and Feature Extraction (i.e. Embeddings) models.
|
||||
|
||||
##### Inference Precision
|
||||
Transformer backend automatically select the fastest applicable inference precision according to the device support.
|
||||
CUDA backend can manually enable *bfloat16* if your hardware support it with the following parameter:
|
||||
|
||||
`f16: true`
|
||||
|
||||
##### Quantization
|
||||
|
||||
@@ -318,8 +350,42 @@ The backend will automatically download the required files in order to run the m
|
||||
| --- | --- |
|
||||
| `bnb_8bit` | 8-bit quantization |
|
||||
| `bnb_4bit` | 4-bit quantization |
|
||||
| `xpu_8bit` | 8-bit quantization for Intel XPUs |
|
||||
| `xpu_4bit` | 4-bit quantization for Intel XPUs |
|
||||
|
||||
##### Trust Remote Code
|
||||
Some models like Microsoft Phi-3 requires external code than what is provided by the transformer library.
|
||||
By default it is disabled for security.
|
||||
It can be manually enabled with:
|
||||
`trust_remote_code: true`
|
||||
|
||||
##### Maximum Context Size
|
||||
Maximum context size in bytes can be specified with the parameter: `context_size`. Do not use values higher than what your model support.
|
||||
|
||||
Usage example:
|
||||
`context_size: 8192`
|
||||
|
||||
##### Auto Prompt Template
|
||||
Usually chat template is defined by the model author in the `tokenizer_config.json` file.
|
||||
To enable it use the `use_tokenizer_template: true` parameter in the `template` section.
|
||||
|
||||
Usage example:
|
||||
```
|
||||
template:
|
||||
use_tokenizer_template: true
|
||||
```
|
||||
|
||||
##### Custom Stop Words
|
||||
Stopwords are usually defined in `tokenizer_config.json` file.
|
||||
They can be overridden with the `stopwords` parameter in case of need like in llama3-Instruct model.
|
||||
|
||||
Usage example:
|
||||
```
|
||||
stopwords:
|
||||
- "<|eot_id|>"
|
||||
- "<|end_of_text|>"
|
||||
```
|
||||
|
||||
#### Usage
|
||||
|
||||
Use the `completions` endpoint by specifying the `transformers` model:
|
||||
|
||||
@@ -163,3 +163,7 @@ curl http://localhost:8080/tts -H "Content-Type: application/json" -d '{
|
||||
"input":"Hello!"
|
||||
}' | aplay
|
||||
```
|
||||
|
||||
## Parler-tts
|
||||
|
||||
`parler-tts`. It is possible to install and configure the model directly from the gallery. https://github.com/huggingface/parler-tts
|
||||
@@ -144,7 +144,7 @@ Install `xcode` from the Apps Store (needed for metalkit)
|
||||
|
||||
```
|
||||
# install build dependencies
|
||||
brew install abseil cmake go grpc protobuf wget
|
||||
brew install abseil cmake go grpc protobuf wget protoc-gen-go protoc-gen-go-grpc
|
||||
|
||||
# clone the repo
|
||||
git clone https://github.com/go-skynet/LocalAI.git
|
||||
@@ -154,11 +154,11 @@ cd LocalAI
|
||||
# build the binary
|
||||
make build
|
||||
|
||||
# Download gpt4all-j to models/
|
||||
wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j
|
||||
# Download phi-2 to models/
|
||||
wget https://huggingface.co/TheBloke/phi-2-GGUF/resolve/main/phi-2.Q2_K.gguf -O models/phi-2.Q2_K
|
||||
|
||||
# Use a template from the examples
|
||||
cp -rf prompt-templates/ggml-gpt4all-j.tmpl models/
|
||||
cp -rf prompt-templates/ggml-gpt4all-j.tmpl models/phi-2.Q2_K.tmpl
|
||||
|
||||
# Run LocalAI
|
||||
./local-ai --models-path=./models/ --debug=true
|
||||
@@ -167,17 +167,29 @@ cp -rf prompt-templates/ggml-gpt4all-j.tmpl models/
|
||||
curl http://localhost:8080/v1/models
|
||||
|
||||
curl http://localhost:8080/v1/chat/completions -H "Content-Type: application/json" -d '{
|
||||
"model": "ggml-gpt4all-j",
|
||||
"model": "phi-2.Q2_K",
|
||||
"messages": [{"role": "user", "content": "How are you?"}],
|
||||
"temperature": 0.9
|
||||
}'
|
||||
```
|
||||
|
||||
#### Troublshooting mac
|
||||
#### Troubleshooting mac
|
||||
|
||||
If you encounter errors regarding a missing utility metal, install `Xcode` from the App Store.
|
||||
If completions are slow, ensure that `gpu-layers` in your model yaml matches the number of layers from the model in use (or simply use a high number such as 256).
|
||||
If you a get a compile error: `error: only virtual member functions can be marked 'final'`, reinstall all the necessary brew packages, clean the build, and try again.
|
||||
- If you encounter errors regarding a missing utility metal, install `Xcode` from the App Store.
|
||||
|
||||
- After the installation of Xcode, if you receive a xcrun error `'xcrun: error: unable to find utility "metal", not a developer tool or in PATH'`. You might have installed the Xcode command line tools before installing Xcode, the former one is pointing to an incomplete SDK.
|
||||
|
||||
```
|
||||
# print /Library/Developer/CommandLineTools, if command line tools were installed in advance
|
||||
xcode-select --print-path
|
||||
|
||||
# point to a complete SDK
|
||||
sudo xcode-select --switch /Applications/Xcode.app/Contents/Developer
|
||||
```
|
||||
|
||||
- If completions are slow, ensure that `gpu-layers` in your model yaml matches the number of layers from the model in use (or simply use a high number such as 256).
|
||||
|
||||
- If you a get a compile error: `error: only virtual member functions can be marked 'final'`, reinstall all the necessary brew packages, clean the build, and try again.
|
||||
|
||||
```
|
||||
# reinstall build dependencies
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
|
||||
+++
|
||||
disableToc = false
|
||||
title = "Available Container images"
|
||||
weight = 25
|
||||
title = "Run with container images"
|
||||
weight = 6
|
||||
url = '/basics/container/'
|
||||
ico = "rocket_launch"
|
||||
+++
|
||||
|
||||
LocalAI provides a variety of images to support different environments. These images are available on [quay.io](https://quay.io/repository/go-skynet/local-ai?tab=tags) and [Docker Hub](https://hub.docker.com/r/localai/localai).
|
||||
|
||||
> _For All-in-One image with a pre-configured set of models and backends, see the [AIO Images]({{%relref "docs/reference/aio-images" %}})._
|
||||
All-in-One images comes with a pre-configured set of models and backends, standard images instead do not have any model pre-configured and installed.
|
||||
|
||||
For GPU Acceleration support for Nvidia video graphic cards, use the Nvidia/CUDA images, if you don't have a GPU, use the CPU images. If you have AMD or Mac Silicon, see the [build section]({{%relref "docs/getting-started/build" %}}).
|
||||
|
||||
@@ -22,6 +23,62 @@ For GPU Acceleration support for Nvidia video graphic cards, use the Nvidia/CUDA
|
||||
|
||||
{{% /alert %}}
|
||||
|
||||
## All-in-one images
|
||||
|
||||
All-In-One images are images that come pre-configured with a set of models and backends to fully leverage almost all the LocalAI featureset. These images are available for both CPU and GPU environments. The AIO images are designed to be easy to use and requires no configuration. Models configuration can be found [here](https://github.com/mudler/LocalAI/tree/master/aio) separated by size.
|
||||
|
||||
In the AIO images there are models configured with the names of OpenAI models, however, they are really backed by Open Source models. You can find the table below
|
||||
|
||||
| Category | Model name | Real model (CPU) | Real model (GPU) |
|
||||
| ---- | ---- | ---- | ---- |
|
||||
| Text Generation | `gpt-4` | `phi-2` | `hermes-2-pro-mistral` |
|
||||
| Multimodal Vision | `gpt-4-vision-preview` | `bakllava` | `llava-1.6-mistral` |
|
||||
| Image Generation | `stablediffusion` | `stablediffusion` | `dreamshaper-8` |
|
||||
| Speech to Text | `whisper-1` | `whisper` with `whisper-base` model | <= same |
|
||||
| Text to Speech | `tts-1` | `en-us-amy-low.onnx` from `rhasspy/piper` | <= same |
|
||||
| Embeddings | `text-embedding-ada-002` | `all-MiniLM-L6-v2` in Q4 | `all-MiniLM-L6-v2` |
|
||||
|
||||
### Usage
|
||||
|
||||
Select the image (CPU or GPU) and start the container with Docker:
|
||||
|
||||
```bash
|
||||
# CPU example
|
||||
docker run -p 8080:8080 --name local-ai -ti localai/localai:latest-aio-cpu
|
||||
```
|
||||
|
||||
LocalAI will automatically download all the required models, and the API will be available at [localhost:8080](http://localhost:8080/v1/models).
|
||||
|
||||
### Available images
|
||||
|
||||
| Description | Quay | Docker Hub |
|
||||
| --- | --- |-----------------------------------------------|
|
||||
| Latest images for CPU | `quay.io/go-skynet/local-ai:latest-aio-cpu` | `localai/localai:latest-aio-cpu` |
|
||||
| Versioned image (e.g. for CPU) | `quay.io/go-skynet/local-ai:{{< version >}}-aio-cpu` | `localai/localai:{{< version >}}-aio-cpu` |
|
||||
| Latest images for Nvidia GPU (CUDA11) | `quay.io/go-skynet/local-ai:latest-aio-gpu-nvidia-cuda-11` | `localai/localai:latest-aio-gpu-nvidia-cuda-11` |
|
||||
| Latest images for Nvidia GPU (CUDA12) | `quay.io/go-skynet/local-ai:latest-aio-gpu-nvidia-cuda-12` | `localai/localai:latest-aio-gpu-nvidia-cuda-12` |
|
||||
| Latest images for AMD GPU | `quay.io/go-skynet/local-ai:latest-aio-gpu-hipblas` | `localai/localai:latest-aio-gpu-hipblas` |
|
||||
| Latest images for Intel GPU (sycl f16) | `quay.io/go-skynet/local-ai:latest-aio-gpu-intel-f16` | `localai/localai:latest-aio-gpu-intel-f16` |
|
||||
| Latest images for Intel GPU (sycl f32) | `quay.io/go-skynet/local-ai:latest-aio-gpu-intel-f32` | `localai/localai:latest-aio-gpu-intel-f32` |
|
||||
|
||||
### Available environment variables
|
||||
|
||||
The AIO Images are inheriting the same environment variables as the base images and the environment of LocalAI (that you can inspect by calling `--help`). However, it supports additional environment variables available only from the container image
|
||||
|
||||
| Variable | Default | Description |
|
||||
| ---------------------| ------- | ----------- |
|
||||
| `PROFILE` | Auto-detected | The size of the model to use. Available: `cpu`, `gpu-8g` |
|
||||
| `MODELS` | Auto-detected | A list of models YAML Configuration file URI/URL (see also [running models]({{%relref "docs/getting-started/run-other-models" %}})) |
|
||||
|
||||
|
||||
## Standard container images
|
||||
|
||||
Standard container images do not have pre-installed models.
|
||||
|
||||
Images are available with and without python dependencies. Note that images with python dependencies are bigger (in order of 17GB).
|
||||
|
||||
Images with `core` in the tag are smaller and do not contain any python dependencies.
|
||||
|
||||
{{< tabs tabTotal="6" >}}
|
||||
{{% tab tabName="Vanilla / CPU Images" %}}
|
||||
|
||||
@@ -100,4 +157,3 @@ For GPU Acceleration support for Nvidia video graphic cards, use the Nvidia/CUDA
|
||||
## See Also
|
||||
|
||||
- [GPU acceleration]({{%relref "docs/features/gpu-acceleration" %}})
|
||||
- [AIO Images]({{%relref "docs/reference/aio-images" %}})
|
||||
30
docs/content/docs/getting-started/kubernetes.md
Normal file
30
docs/content/docs/getting-started/kubernetes.md
Normal file
@@ -0,0 +1,30 @@
|
||||
+++
|
||||
disableToc = false
|
||||
title = "Run with Kubernetes"
|
||||
weight = 6
|
||||
url = '/basics/kubernetes/'
|
||||
ico = "rocket_launch"
|
||||
+++
|
||||
|
||||
For installing LocalAI in Kubernetes, you can use the `go-skynet` helm chart:
|
||||
|
||||
```bash
|
||||
# Install the helm repository
|
||||
helm repo add go-skynet https://go-skynet.github.io/helm-charts/
|
||||
# Update the repositories
|
||||
helm repo update
|
||||
# Get the values
|
||||
helm show values go-skynet/local-ai > values.yaml
|
||||
|
||||
# Edit the values value if needed
|
||||
# vim values.yaml ...
|
||||
|
||||
# Install the helm chart
|
||||
helm install local-ai go-skynet/local-ai -f values.yaml
|
||||
```
|
||||
|
||||
If you prefer to install from manifest file, you can install from the deployment file, and customize as you like:
|
||||
|
||||
```
|
||||
kubectl apply -f https://raw.githubusercontent.com/mudler/LocalAI/master/examples/kubernetes/deployment.yaml
|
||||
```
|
||||
@@ -131,22 +131,7 @@ Note: If you are on Windows, please make sure the project is on the Linux Filesy
|
||||
|
||||
{{% tab tabName="Kubernetes" %}}
|
||||
|
||||
For installing LocalAI in Kubernetes, you can use the following helm chart:
|
||||
|
||||
```bash
|
||||
# Install the helm repository
|
||||
helm repo add go-skynet https://go-skynet.github.io/helm-charts/
|
||||
# Update the repositories
|
||||
helm repo update
|
||||
# Get the values
|
||||
helm show values go-skynet/local-ai > values.yaml
|
||||
|
||||
# Edit the values value if needed
|
||||
# vim values.yaml ...
|
||||
|
||||
# Install the helm chart
|
||||
helm install local-ai go-skynet/local-ai -f values.yaml
|
||||
```
|
||||
See the [Kubernetes section]({{%relref "docs/getting-started/kubernetes" %}}).
|
||||
|
||||
{{% /tab %}}
|
||||
{{% tab tabName="From binary" %}}
|
||||
|
||||
@@ -30,7 +30,7 @@ Before you begin, ensure you have a container engine installed if you are not us
|
||||
|
||||
> _Do you have already a model file? Skip to [Run models manually]({{%relref "docs/getting-started/manual" %}}) or [Run other models]({{%relref "docs/getting-started/run-other-models" %}}) to use an already-configured model_.
|
||||
|
||||
LocalAI's All-in-One (AIO) images are pre-configured with a set of models and backends to fully leverage almost all the LocalAI featureset.
|
||||
LocalAI's All-in-One (AIO) images are pre-configured with a set of models and backends to fully leverage almost all the LocalAI featureset. If you don't need models pre-configured, you can use the standard [images]({{%relref "docs/getting-started/container-images" %}}).
|
||||
|
||||
These images are available for both CPU and GPU environments. The AIO images are designed to be easy to use and requires no configuration.
|
||||
|
||||
@@ -91,7 +91,7 @@ services:
|
||||
# capabilities: [gpu]
|
||||
```
|
||||
|
||||
For a list of all the container-images available, see [Container images]({{%relref "docs/reference/container-images" %}}). To learn more about All-in-one images instead, see [All-in-one Images]({{%relref "docs/reference/aio-images" %}}).
|
||||
For a list of all the container-images available, see [Container images]({{%relref "docs/getting-started/container-images" %}}). To learn more about All-in-one images instead, see [All-in-one Images]({{%relref "docs/getting-started/container-images" %}}).
|
||||
|
||||
{{% alert icon="💡" %}}
|
||||
|
||||
@@ -114,9 +114,36 @@ docker run -p 8080:8080 --name local-ai -ti -v localai-models:/build/models loca
|
||||
|
||||
{{% /alert %}}
|
||||
|
||||
## From binary
|
||||
|
||||
LocalAI is available as a standalone binary as well. Binaries are compiled for Linux and MacOS and automatically uploaded in the Github releases. Windows is known to work with WSL.
|
||||
|
||||
You can check out the releases in https://github.com/mudler/LocalAI/releases.
|
||||
|
||||
{{< tabs tabTotal="2" >}}
|
||||
{{% tab tabName="Linux" %}}
|
||||
| CPU flagset | Link |
|
||||
| --- | --- |
|
||||
| avx2 | [Download](https://github.com/mudler/LocalAI/releases/download/{{< version >}}/local-ai-avx2-Linux-x86_64) |
|
||||
| avx512 | [Download](https://github.com/mudler/LocalAI/releases/download/{{< version >}}/local-ai-avx512-Linux-x86_64) |
|
||||
| avx | [Download](https://github.com/mudler/LocalAI/releases/download/{{< version >}}/local-ai-avx-Linux-x86_64) |
|
||||
{{% /tab %}}
|
||||
{{% tab tabName="MacOS" %}}
|
||||
| CPU flagset | Link |
|
||||
| --- | --- |
|
||||
| avx2 | [Download](https://github.com/mudler/LocalAI/releases/download/{{< version >}}/local-ai-avx2-Darwin-arm64) |
|
||||
| avx512 | [Download](https://github.com/mudler/LocalAI/releases/download/{{< version >}}/local-ai-avx512-Darwin-arm64) |
|
||||
| avx | [Download](https://github.com/mudler/LocalAI/releases/download/{{< version >}}/local-ai-avx-Darwin-arm64) |
|
||||
|
||||
{{% /tab %}}
|
||||
|
||||
{{< /tabs >}}
|
||||
|
||||
## Try it out
|
||||
|
||||
LocalAI does not ship a webui by default, however you can use 3rd party projects to interact with it (see also [Integrations]({{%relref "docs/integrations" %}}) ). However, you can test out the API endpoints using `curl`, you can find few examples below.
|
||||
Connect to LocalAI, by default the WebUI should be accessible from http://localhost:8080 . You can also use 3rd party projects to interact with LocalAI as you would use OpenAI (see also [Integrations]({{%relref "docs/integrations" %}}) ).
|
||||
|
||||
You can also test out the API endpoints using `curl`, examples below.
|
||||
|
||||
### Text Generation
|
||||
|
||||
@@ -300,6 +327,6 @@ Explore further resources and community contributions:
|
||||
- [Build LocalAI and the container image]({{%relref "docs/getting-started/build" %}})
|
||||
- [Run models manually]({{%relref "docs/getting-started/manual" %}})
|
||||
- [Run other models]({{%relref "docs/getting-started/run-other-models" %}})
|
||||
- [Container images]({{%relref "docs/reference/container-images" %}})
|
||||
- [All-in-one Images]({{%relref "docs/reference/aio-images" %}})
|
||||
- [Container images]({{%relref "docs/getting-started/container-images" %}})
|
||||
- [All-in-one Images]({{%relref "docs/getting-started/container-images" %}})
|
||||
- [Examples](https://github.com/mudler/LocalAI/tree/master/examples#examples)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
disableToc = false
|
||||
title = "Integrations"
|
||||
weight = 19
|
||||
icon = "rocket_launch"
|
||||
icon = "sync"
|
||||
|
||||
+++
|
||||
|
||||
|
||||
@@ -99,8 +99,9 @@ Note that this started just as a fun weekend project by [mudler](https://github.
|
||||
- 🧠 [Embeddings generation for vector databases](https://localai.io/features/embeddings/)
|
||||
- ✍️ [Constrained grammars](https://localai.io/features/constrained_grammars/)
|
||||
- 🖼️ [Download Models directly from Huggingface ](https://localai.io/models/)
|
||||
- 🆕 [Vision API](https://localai.io/features/gpt-vision/)
|
||||
- 🥽 [Vision API](https://localai.io/features/gpt-vision/)
|
||||
- 💾 [Stores](https://localai.io/stores)
|
||||
- 🆕 [Reranker](https://localai.io/features/reranker/)
|
||||
|
||||
## Contribute and help
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
weight: 23
|
||||
title: "References"
|
||||
description: "Reference"
|
||||
icon: science
|
||||
icon: menu_book
|
||||
lead: ""
|
||||
date: 2020-10-06T08:49:15+00:00
|
||||
lastmod: 2020-10-06T08:49:15+00:00
|
||||
|
||||
@@ -1,53 +0,0 @@
|
||||
|
||||
+++
|
||||
disableToc = false
|
||||
title = "All-In-One images"
|
||||
weight = 26
|
||||
+++
|
||||
|
||||
All-In-One images are images that come pre-configured with a set of models and backends to fully leverage almost all the LocalAI featureset. These images are available for both CPU and GPU environments. The AIO images are designed to be easy to use and requires no configuration. Models configuration can be found [here](https://github.com/mudler/LocalAI/tree/master/aio) separated by size.
|
||||
|
||||
In the AIO images there are models configured with the names of OpenAI models, however, they are really backed by Open Source models. You can find the table below
|
||||
|
||||
| Category | Model name | Real model (CPU) | Real model (GPU) |
|
||||
| ---- | ---- | ---- | ---- |
|
||||
| Text Generation | `gpt-4` | `phi-2` | `hermes-2-pro-mistral` |
|
||||
| Multimodal Vision | `gpt-4-vision-preview` | `bakllava` | `llava-1.6-mistral` |
|
||||
| Image Generation | `stablediffusion` | `stablediffusion` | `dreamshaper-8` |
|
||||
| Speech to Text | `whisper-1` | `whisper` with `whisper-base` model | <= same |
|
||||
| Text to Speech | `tts-1` | `en-us-amy-low.onnx` from `rhasspy/piper` | <= same |
|
||||
| Embeddings | `text-embedding-ada-002` | `all-MiniLM-L6-v2` in Q4 | `all-MiniLM-L6-v2` |
|
||||
|
||||
## Usage
|
||||
|
||||
Select the image (CPU or GPU) and start the container with Docker:
|
||||
|
||||
```bash
|
||||
# CPU example
|
||||
docker run -p 8080:8080 --name local-ai -ti localai/localai:latest-aio-cpu
|
||||
```
|
||||
|
||||
LocalAI will automatically download all the required models, and the API will be available at [localhost:8080](http://localhost:8080/v1/models).
|
||||
|
||||
## Available images
|
||||
|
||||
| Description | Quay | Docker Hub |
|
||||
| --- | --- |-----------------------------------------------|
|
||||
| Latest images for CPU | `quay.io/go-skynet/local-ai:latest-aio-cpu` | `localai/localai:latest-aio-cpu` |
|
||||
| Versioned image (e.g. for CPU) | `quay.io/go-skynet/local-ai:{{< version >}}-aio-cpu` | `localai/localai:{{< version >}}-aio-cpu` |
|
||||
| Latest images for Nvidia GPU (CUDA11) | `quay.io/go-skynet/local-ai:latest-aio-gpu-nvidia-cuda-11` | `localai/localai:latest-aio-gpu-nvidia-cuda-11` |
|
||||
| Latest images for Nvidia GPU (CUDA12) | `quay.io/go-skynet/local-ai:latest-aio-gpu-nvidia-cuda-12` | `localai/localai:latest-aio-gpu-nvidia-cuda-12` |
|
||||
| Latest images for AMD GPU | `quay.io/go-skynet/local-ai:latest-aio-gpu-hipblas` | `localai/localai:latest-aio-gpu-hipblas` |
|
||||
| Latest images for Intel GPU (sycl f16) | `quay.io/go-skynet/local-ai:latest-aio-gpu-intel-f16` | `localai/localai:latest-aio-gpu-intel-f16` |
|
||||
| Latest images for Intel GPU (sycl f32) | `quay.io/go-skynet/local-ai:latest-aio-gpu-intel-f32` | `localai/localai:latest-aio-gpu-intel-f32` |
|
||||
|
||||
## Available environment variables
|
||||
|
||||
The AIO Images are inheriting the same environment variables as the base images and the environment of LocalAI (that you can inspect by calling `--help`). However, it supports additional environment variables available only from the container image
|
||||
|
||||
| Variable | Default | Description |
|
||||
| ---------------------| ------- | ----------- |
|
||||
| `PROFILE` | Auto-detected | The size of the model to use. Available: `cpu`, `gpu-8g` |
|
||||
| `MODELS` | Auto-detected | A list of models YAML Configuration file URI/URL (see also [running models]({{%relref "docs/getting-started/run-other-models" %}})) |
|
||||
|
||||
|
||||
@@ -45,10 +45,11 @@ LocalAI will attempt to automatically load models which are not explicitly confi
|
||||
| [tinydream](https://github.com/symisc/tiny-dream#tiny-dreaman-embedded-header-only-stable-diffusion-inference-c-librarypixlabiotiny-dream) | stablediffusion | no | Image | no | no | N/A |
|
||||
| `coqui` | Coqui | no | Audio generation and Voice cloning | no | no | CPU/CUDA |
|
||||
| `petals` | Various GPTs and quantization formats | yes | GPT | no | no | CPU/CUDA |
|
||||
| `transformers` | Various GPTs and quantization formats | yes | GPT, embeddings | yes | no | CPU/CUDA |
|
||||
| `transformers` | Various GPTs and quantization formats | yes | GPT, embeddings | yes | yes**** | CPU/CUDA/XPU |
|
||||
|
||||
Note: any backend name listed above can be used in the `backend` field of the model configuration file (See [the advanced section]({{%relref "docs/advanced" %}})).
|
||||
|
||||
- \* 7b ONLY
|
||||
- ** doesn't seem to be accurate
|
||||
- *** 7b and 40b with the `ggccv` format, for instance: https://huggingface.co/TheBloke/WizardLM-Uncensored-Falcon-40B-GGML
|
||||
- *** 7b and 40b with the `ggccv` format, for instance: https://huggingface.co/TheBloke/WizardLM-Uncensored-Falcon-40B-GGML
|
||||
- **** Only for CUDA and OpenVINO CPU/XPU acceleration.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user