Compare commits

..

60 Commits

Author SHA1 Message Date
LocalAI [bot]
f69de3be0d models(gallery): ⬆️ update checksum (#2278)
⬆️ Checksum updates in gallery/index.yaml

Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-05-09 12:21:24 +00:00
Ettore Di Giacinto
650ae620c5 ci: get latest git version
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-05-09 11:33:16 +02:00
Ettore Di Giacinto
6a209cbef6 ci: get file name correctly in checksum_checker.sh
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-05-09 10:57:23 +02:00
Ettore Di Giacinto
9786bb826d ci: try to fix checksum_checker.sh
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-05-09 09:34:07 +02:00
Ettore Di Giacinto
9b4c6f348a Update checksum_checker.yaml
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-05-09 00:57:22 +02:00
Ettore Di Giacinto
cb6ddb21ec Update checksum_checker.yaml
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-05-09 00:55:48 +02:00
Ettore Di Giacinto
0baacca605 Update checksum_checker.yaml
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-05-09 00:54:35 +02:00
Ettore Di Giacinto
222d714ec7 Update checksum_checker.yaml
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-05-09 00:51:57 +02:00
Ettore Di Giacinto
fd2d89d37b Update checksum_checker.sh
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-05-09 00:43:16 +02:00
Ettore Di Giacinto
6440b608dc Update checksum_checker.yaml
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-05-09 00:42:48 +02:00
Ettore Di Giacinto
1937118eab Update checksum_checker.yaml
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-05-09 00:34:56 +02:00
Ettore Di Giacinto
bc272d1e4b ci: add checksum checker pipeline (#2274)
Signed-off-by: mudler <mudler@localai.io>
2024-05-09 00:31:27 +02:00
LocalAI [bot]
d651f390cd ⬆️ Update ggerganov/whisper.cpp (#2273)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-05-08 22:11:10 +00:00
Ettore Di Giacinto
ea777f8716 models(gallery): update SHA for einstein
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-05-08 23:40:58 +02:00
LocalAI [bot]
eca5200fbd ⬆️ Update ggerganov/llama.cpp (#2272)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-05-08 21:34:56 +00:00
Ettore Di Giacinto
0809e9e7a0 models(gallery): fix openbiollm typo
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-05-08 23:19:43 +02:00
LocalAI [bot]
b66baa3db6 ⬆️ Update docs version mudler/LocalAI (#2271)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-05-08 21:10:30 +00:00
Ettore Di Giacinto
6eb77f0d3a models(gallery): add tiamat (#2269)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-08 19:59:42 +02:00
Ettore Di Giacinto
b20354b3ad models(gallery): add aurora (#2270)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-08 19:55:39 +02:00
Ettore Di Giacinto
d6f76c75e1 models(gallery): add kunocchini (#2268)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-08 19:52:08 +02:00
Ettore Di Giacinto
ed4f412f1c models(gallery): add lumimaid variant (#2267)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-08 19:51:53 +02:00
Ettore Di Giacinto
5bf56e01aa models(gallery): add tess (#2266)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-08 19:51:44 +02:00
Ettore Di Giacinto
5ff5f0b393 fix(ux): fix small glitches (#2265)
also drop duplicates for displaying in-progress model ops

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-08 19:34:33 +02:00
Ettore Di Giacinto
6559ac11b1 feat(ui): prompt for chat, support vision, enhancements (#2259)
* feat(ui): allow to set system prompt for chat

Make also the models in the index clickable, and display as table

Fixes #2257

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* feat(vision): support also png with base64 input

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* feat(ui): support vision and upload of files

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* display the processed image

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* make trust remote code stand out

Signed-off-by: mudler <mudler@localai.io>

* feat(ui): track in progress job across index/model gallery

Signed-off-by: mudler <mudler@localai.io>

* minor fixups

Signed-off-by: mudler <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
Signed-off-by: mudler <mudler@localai.io>
2024-05-08 00:42:34 +02:00
Ettore Di Giacinto
02ec546dd6 models(gallery): Add Soliloquy (#2260)
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-05-08 00:14:19 +02:00
LocalAI [bot]
995aa5ed21 ⬆️ Update ggerganov/llama.cpp (#2263)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-05-07 21:39:12 +00:00
Michael Mior
e28ba4b807 Add missing Homebrew dependencies (#2256)
Signed-off-by: Michael Mior <michael.mior@gmail.com>
Signed-off-by: Michael Mior <mmior@mail.rit.edu>
2024-05-07 16:34:30 +00:00
Daniel
d1e3436de5 Update readme: add ShellOracle to community integrations (#2254)
Signed-off-by: Daniel Copley <djcopley@users.noreply.github.com>
2024-05-07 08:39:58 +02:00
Dave
d3ddc9e4aa UI: flag trust_remote_code to users // favicon support (#2253)
* attempt to indicate trust_remote_code in some way

* bonus: favicon support!

---------

Signed-off-by: Dave Lee <dave@gray101.com>
2024-05-07 08:39:23 +02:00
fakezeta
fea9522982 fix: OpenVINO winograd always disabled (#2252)
Winograd convolutions were always disabled giving error when inference device was CPU.
This commit implement logic to disable Winograd convolutions only if CPU or NPU are declared.
2024-05-07 08:38:58 +02:00
Ettore Di Giacinto
fe055d4b36 feat(webui): ux improvements (#2247)
* ux: change welcome when there are no models installed

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* ux: filter

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* ux: show tags in filter

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* wip

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* make tags clickable

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* allow to delete models from the list

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* ui: display icon of installed models

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* gallery: remove gallery file when removing model

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* feat(gallery): show a re-install button

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* make filter buttons, rename Gallery field

Signed-off-by: mudler <mudler@localai.io>

* show again buttons at end of operations

Signed-off-by: mudler <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
Signed-off-by: mudler <mudler@localai.io>
2024-05-07 01:17:07 +02:00
LocalAI [bot]
581b894789 ⬆️ Update ggerganov/llama.cpp (#2255)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-05-06 21:28:07 +00:00
Ettore Di Giacinto
477655f6e6 models(gallery): average_norrmie reupload
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-05-06 19:56:24 +02:00
fakezeta
169d8d21ff gallery: Added some OpenVINO models (#2249)
* Added some OpenVINO models

Added Phi-3 trust_remote_code: true
Added Hermes 2 Pro Llama3
Added Multilingual-E5-base embedding model with OpenVINO acceleration (CPU and XPU)
Added all-MiniLM-L6-v2 with OpenVINO acceleration (CPU and XPU)

* Added Remote Code for phi, fixed error on Yamllint

* update openvino.yaml

I need to go to rest: today is not my day...
2024-05-06 10:52:05 +02:00
LocalAI [bot]
c5475020fe ⬆️ Update ggerganov/llama.cpp (#2251)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-05-05 21:16:00 +00:00
Dave
b52ff1249f test: check the response URL during image gen in app_test.go (#2248)
test: actually check the response URL from image gen

Signed-off-by: Dave Lee <dave@gray101.com>
2024-05-05 18:46:33 +00:00
Ettore Di Giacinto
c5798500cb feat(single-build): generate single binaries for releases (#2246)
* feat(single-build): generate single binaries for releases

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* drop old targets

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-05 17:20:51 +02:00
Ettore Di Giacinto
67ad3532ec Update README.md
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-05-05 15:45:55 +02:00
Ettore Di Giacinto
5cb96fe7df models(gallery): add openbiollm (#2245)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-05 15:19:46 +02:00
Ettore Di Giacinto
810e8e5855 models(gallery): add lumimaid (#2244)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-05 15:19:33 +02:00
Ettore Di Giacinto
f3bcc648e7 models(gallery): add icon for instruct-coder
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-05-05 12:20:06 +02:00
Ettore Di Giacinto
3096566333 models(gallery): poppy porpoise fix
correct mmproj URL

Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-05-05 11:56:07 +02:00
Ettore Di Giacinto
f50c6a4e88 models(gallery): update poppy porpoise (#2243)
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-05-05 11:19:09 +02:00
Ettore Di Giacinto
ab4ee54855 models(gallery): add llama3-instruct-coder (#2242)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-05 11:18:50 +02:00
Ettore Di Giacinto
f2d35062d4 models(gallery): moondream2 fixups
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-05-05 10:49:04 +02:00
Ettore Di Giacinto
b69ff46c7e feat(startup): show CPU/GPU information with --debug (#2241)
Signed-off-by: mudler <mudler@localai.io>
2024-05-05 09:10:23 +02:00
Ettore Di Giacinto
117c9873e1 fix(webui): display small navbar with smaller screens (#2240)
Signed-off-by: mudler <mudler@localai.io>
2024-05-04 23:38:39 +02:00
LocalAI [bot]
17e94fbcb1 ⬆️ Update ggerganov/llama.cpp (#2239)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-05-04 21:26:22 +00:00
Ettore Di Giacinto
92f7feb874 models(gallery): add llama3-llava (#2238)
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-04 22:43:11 +02:00
Ettore Di Giacinto
b70e2bffa3 models(gallery): add moondream2 (#2237)
* models(gallery): add moondream2

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* models(gallery): fix typo for TTS models

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* models(gallery): add base config for moondream2 and icon

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* linter fixes

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-04 18:29:04 +02:00
nold
06c43ca285 fix(gallery): hermes-2-pro-llama3 models checksum changed (#2236)
fix(gallery): hermes-2-pro-llama3 models checksum

Signed-off-by: Gerrit Pannek <nold@gnu.one>
2024-05-04 17:59:54 +02:00
Ettore Di Giacinto
530bec9c64 feat(llama.cpp): do not specify backends to autoload and add llama.cpp variants (#2232)
* feat(initializer): do not specify backends to autoload

We can simply try to autoload the backends extracted in the asset dir.
This will allow to build variants of the same backend (for e.g. with different instructions sets),
so to have a single binary for all the variants.

Signed-off-by: mudler <mudler@localai.io>

* refactor(prepare): refactor out llama.cpp prepare steps

Make it so are idempotent and that we can re-build

Signed-off-by: mudler <mudler@localai.io>

* [TEST] feat(build): build noavx version along

Signed-off-by: mudler <mudler@localai.io>

* build: make build parallel

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* build: do not override CMAKE_ARGS

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* build: add fallback variant

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* Fixups

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* fix(huggingface-langchain): fail if no token is set

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* fix(huggingface-langchain): rename

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* fix: do not autoload local-store

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* fix: give priority between the listed backends

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: mudler <mudler@localai.io>
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2024-05-04 17:56:12 +02:00
fakezeta
fa10302dd2 docs: updated Transformer parameters description (#2234)
updated Transformer parameters
2024-05-04 10:45:25 +02:00
Ettore Di Giacinto
54faaa87ea fix(webui): correct documentation URL for text2img (#2233)
Signed-off-by: mudler <mudler@localai.io>
Co-authored-by: Dave <dave@gray101.com>
2024-05-04 00:25:13 +00:00
dependabot[bot]
daba8a85f9 build(deps): bump tqdm from 4.65.0 to 4.66.3 in /examples/langchain/langchainpy-localai-example in the pip group across 1 directory (#2231)
build(deps): bump tqdm

Bumps the pip group with 1 update in the /examples/langchain/langchainpy-localai-example directory: [tqdm](https://github.com/tqdm/tqdm).


Updates `tqdm` from 4.65.0 to 4.66.3
- [Release notes](https://github.com/tqdm/tqdm/releases)
- [Commits](https://github.com/tqdm/tqdm/compare/v4.65.0...v4.66.3)

---
updated-dependencies:
- dependency-name: tqdm
  dependency-type: direct:production
  dependency-group: pip
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-03 23:15:06 +00:00
LocalAI [bot]
ac0f3d6e82 ⬆️ Update ggerganov/whisper.cpp (#2230)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-05-03 22:16:26 +00:00
LocalAI [bot]
da0b6a89ae ⬆️ Update ggerganov/llama.cpp (#2229)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-05-03 21:39:28 +00:00
LocalAI [bot]
929a68c06d ⬆️ Update docs version mudler/LocalAI (#2228)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: mudler <2420543+mudler@users.noreply.github.com>
2024-05-03 21:18:11 +00:00
cryptk
a0aa5d01a1 feat: update ROCM and use smaller image (#2196)
* feat: update ROCM and use smaller image

Signed-off-by: Chris Jowett <421501+cryptk@users.noreply.github.com>

* fix: add call to ldconfig to fix AMDs broken library packages

Signed-off-by: Chris Jowett <421501+cryptk@users.noreply.github.com>

---------

Signed-off-by: Chris Jowett <421501+cryptk@users.noreply.github.com>
2024-05-03 18:46:49 +02:00
Ettore Di Giacinto
dc834cc9d2 Update README.md
Signed-off-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2024-05-03 09:42:35 +02:00
49 changed files with 1833 additions and 655 deletions

111
.github/checksum_checker.sh vendored Normal file
View File

@@ -0,0 +1,111 @@
#!/bin/bash
# This scripts needs yq and huggingface_hub to be installed
# to install hugingface_hub run pip install huggingface_hub
# Path to the input YAML file
input_yaml=$1
# Function to download file and check checksum using Python
function check_and_update_checksum() {
model_name="$1"
file_name="$2"
uri="$3"
old_checksum="$4"
idx="$5"
# Download the file and calculate new checksum using Python
new_checksum=$(python3 -c "
import hashlib
from huggingface_hub import hf_hub_download
import requests
import sys
import os
uri = '$uri'
file_name = uri.split('/')[-1]
# Function to parse the URI and determine download method
# Function to parse the URI and determine download method
def parse_uri(uri):
if uri.startswith('huggingface://'):
repo_id = uri.split('://')[1]
return 'huggingface', repo_id.rsplit('/', 1)[0]
elif 'huggingface.co' in uri:
parts = uri.split('/resolve/')
if len(parts) > 1:
repo_path = parts[0].split('https://huggingface.co/')[-1]
return 'huggingface', repo_path
return 'direct', uri
def calculate_sha256(file_path):
sha256_hash = hashlib.sha256()
with open(file_path, 'rb') as f:
for byte_block in iter(lambda: f.read(4096), b''):
sha256_hash.update(byte_block)
return sha256_hash.hexdigest()
download_type, repo_id_or_url = parse_uri(uri)
# Decide download method based on URI type
if download_type == 'huggingface':
try:
file_path = hf_hub_download(repo_id=repo_id_or_url, filename=file_name)
except Exception as e:
print(f'Error from Hugging Face Hub: {str(e)}', file=sys.stderr)
sys.exit(2)
else:
response = requests.get(repo_id_or_url)
if response.status_code == 200:
with open(file_name, 'wb') as f:
f.write(response.content)
file_path = file_name
elif response.status_code == 404:
print(f'File not found: {response.status_code}', file=sys.stderr)
sys.exit(2)
else:
print(f'Error downloading file: {response.status_code}', file=sys.stderr)
sys.exit(1)
print(calculate_sha256(file_path))
# Clean up the downloaded file
os.remove(file_path)
")
if [[ "$new_checksum" == "" ]]; then
echo "Error calculating checksum for $file_name. Skipping..."
return
fi
echo "Checksum for $file_name: $new_checksum"
# Compare and update the YAML file if checksums do not match
result=$?
if [[ $result -eq 2 ]]; then
echo "File not found, deleting entry for $file_name..."
# yq eval -i "del(.[$idx].files[] | select(.filename == \"$file_name\"))" "$input_yaml"
elif [[ "$old_checksum" != "$new_checksum" ]]; then
echo "Checksum mismatch for $file_name. Updating..."
yq eval -i "del(.[$idx].files[] | select(.filename == \"$file_name\").sha256)" "$input_yaml"
yq eval -i "(.[$idx].files[] | select(.filename == \"$file_name\")).sha256 = \"$new_checksum\"" "$input_yaml"
elif [[ $result -ne 0 ]]; then
echo "Error downloading file $file_name. Skipping..."
else
echo "Checksum match for $file_name. No update needed."
fi
}
# Read the YAML and process each file
len=$(yq eval '. | length' "$input_yaml")
for ((i=0; i<$len; i++))
do
name=$(yq eval ".[$i].name" "$input_yaml")
files_len=$(yq eval ".[$i].files | length" "$input_yaml")
for ((j=0; j<$files_len; j++))
do
filename=$(yq eval ".[$i].files[$j].filename" "$input_yaml")
uri=$(yq eval ".[$i].files[$j].uri" "$input_yaml")
checksum=$(yq eval ".[$i].files[$j].sha256" "$input_yaml")
echo "Checking model $name, file $filename. URI = $uri, Checksum = $checksum"
check_and_update_checksum "$name" "$filename" "$uri" "$checksum" "$i"
done
done

47
.github/workflows/checksum_checker.yaml vendored Normal file
View File

@@ -0,0 +1,47 @@
name: Check if checksums are up-to-date
on:
schedule:
- cron: 0 20 * * *
workflow_dispatch:
jobs:
checksum_check:
runs-on: arc-runner-set
steps:
- name: Force Install GIT latest
run: |
sudo apt-get update \
&& sudo apt-get install -y software-properties-common \
&& sudo apt-get update \
&& sudo add-apt-repository -y ppa:git-core/ppa \
&& sudo apt-get update \
&& sudo apt-get install -y git
- uses: actions/checkout@v4
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install -y pip wget
sudo pip install --upgrade pip
pip install huggingface_hub
- name: 'Setup yq'
uses: dcarbone/install-yq-action@v1.1.1
with:
version: 'v4.43.1'
download-compressed: true
force: true
- name: Checksum checker 🔧
run: |
export HF_HOME=/hf_cache
sudo mkdir /hf_cache
sudo chmod 777 /hf_cache
bash .github/checksum_checker.sh gallery/index.yaml
- name: Create Pull Request
uses: peter-evans/create-pull-request@v6
with:
token: ${{ secrets.UPDATE_BOT_TOKEN }}
push-to-fork: ci-forks/LocalAI
commit-message: ':arrow_up: Checksum updates in gallery/index.yaml'
title: 'models(gallery): :arrow_up: update checksum'
branch: "update/checksum"
body: Updating checksums in gallery/index.yaml
signoff: true

View File

@@ -61,7 +61,7 @@ jobs:
tag-suffix: '-hipblas'
ffmpeg: 'false'
image-type: 'extras'
base-image: "rocm/dev-ubuntu-22.04:6.0-complete"
base-image: "rocm/dev-ubuntu-22.04:6.1"
grpc-base-image: "ubuntu:22.04"
runs-on: 'arc-runner-set'
makeflags: "--jobs=3 --output-sync=target"

View File

@@ -129,7 +129,7 @@ jobs:
ffmpeg: 'true'
image-type: 'extras'
aio: "-aio-gpu-hipblas"
base-image: "rocm/dev-ubuntu-22.04:6.0-complete"
base-image: "rocm/dev-ubuntu-22.04:6.1"
grpc-base-image: "ubuntu:22.04"
latest-image: 'latest-gpu-hipblas'
latest-image-aio: 'latest-aio-gpu-hipblas'
@@ -141,7 +141,7 @@ jobs:
tag-suffix: '-hipblas'
ffmpeg: 'false'
image-type: 'extras'
base-image: "rocm/dev-ubuntu-22.04:6.0-complete"
base-image: "rocm/dev-ubuntu-22.04:6.1"
grpc-base-image: "ubuntu:22.04"
runs-on: 'arc-runner-set'
makeflags: "--jobs=3 --output-sync=target"
@@ -218,7 +218,7 @@ jobs:
tag-suffix: '-hipblas-ffmpeg-core'
ffmpeg: 'true'
image-type: 'core'
base-image: "rocm/dev-ubuntu-22.04:6.0-complete"
base-image: "rocm/dev-ubuntu-22.04:6.1"
grpc-base-image: "ubuntu:22.04"
runs-on: 'arc-runner-set'
makeflags: "--jobs=3 --output-sync=target"
@@ -228,7 +228,7 @@ jobs:
tag-suffix: '-hipblas-core'
ffmpeg: 'false'
image-type: 'core'
base-image: "rocm/dev-ubuntu-22.04:6.0-complete"
base-image: "rocm/dev-ubuntu-22.04:6.1"
grpc-base-image: "ubuntu:22.04"
runs-on: 'arc-runner-set'
makeflags: "--jobs=3 --output-sync=target"

View File

@@ -19,12 +19,8 @@ jobs:
strategy:
matrix:
include:
- build: 'avx2'
- build: ''
defines: ''
- build: 'avx'
defines: '-DLLAMA_AVX2=OFF'
- build: 'avx512'
defines: '-DLLAMA_AVX512=ON'
- build: 'cuda12'
defines: ''
- build: 'cuda11'
@@ -74,7 +70,6 @@ jobs:
- name: Build
id: build
env:
CMAKE_ARGS: "${{ matrix.defines }}"
BUILD_ID: "${{ matrix.build }}"
run: |
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest
@@ -124,63 +119,7 @@ jobs:
name: stablediffusion
path: release/
build-macOS:
strategy:
matrix:
include:
- build: 'avx2'
defines: ''
- build: 'avx'
defines: '-DLLAMA_AVX2=OFF'
- build: 'avx512'
defines: '-DLLAMA_AVX512=ON'
runs-on: macOS-latest
steps:
- name: Clone
uses: actions/checkout@v4
with:
submodules: true
- uses: actions/setup-go@v5
with:
go-version: '1.21.x'
cache: false
- name: Dependencies
run: |
brew install protobuf grpc
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest
go install google.golang.org/protobuf/cmd/protoc-gen-go@latest
- name: Build
id: build
env:
CMAKE_ARGS: "${{ matrix.defines }}"
BUILD_ID: "${{ matrix.build }}"
run: |
export C_INCLUDE_PATH=/usr/local/include
export CPLUS_INCLUDE_PATH=/usr/local/include
export PATH=$PATH:$GOPATH/bin
make dist
- uses: actions/upload-artifact@v4
with:
name: LocalAI-MacOS-${{ matrix.build }}
path: release/
- name: Release
uses: softprops/action-gh-release@v2
if: startsWith(github.ref, 'refs/tags/')
with:
files: |
release/*
build-macOS-arm64:
strategy:
matrix:
include:
- build: 'avx2'
defines: ''
- build: 'avx'
defines: '-DLLAMA_AVX2=OFF'
- build: 'avx512'
defines: '-DLLAMA_AVX512=ON'
runs-on: macos-14
steps:
- name: Clone
@@ -198,9 +137,6 @@ jobs:
go install google.golang.org/protobuf/cmd/protoc-gen-go@latest
- name: Build
id: build
env:
CMAKE_ARGS: "${{ matrix.defines }}"
BUILD_ID: "${{ matrix.build }}"
run: |
export C_INCLUDE_PATH=/usr/local/include
export CPLUS_INCLUDE_PATH=/usr/local/include
@@ -208,7 +144,7 @@ jobs:
make dist
- uses: actions/upload-artifact@v4
with:
name: LocalAI-MacOS-arm64-${{ matrix.build }}
name: LocalAI-MacOS-arm64
path: release/
- name: Release
uses: softprops/action-gh-release@v2

View File

@@ -140,6 +140,18 @@ RUN if [ "${BUILD_TYPE}" = "clblas" ]; then \
rm -rf /var/lib/apt/lists/* \
; fi
RUN if [ "${BUILD_TYPE}" = "hipblas" ]; then \
apt-get update && \
apt-get install -y --no-install-recommends \
hipblas-dev \
rocblas-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* && \
# I have no idea why, but the ROCM lib packages don't trigger ldconfig after they install, which results in local-ai and others not being able
# to locate the libraries. We run ldconfig ourselves to work around this packaging deficiency
ldconfig \
; fi
###################################
###################################

View File

@@ -5,7 +5,7 @@ BINARY_NAME=local-ai
# llama.cpp versions
GOLLAMA_STABLE_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be
CPPLLAMA_VERSION?=6ecf3189e00a1e8e737a78b6d10e1d7006e050a2
CPPLLAMA_VERSION?=c12452c7aec8a02264afc00196a13caa591a13ac
# gpt4all version
GPT4ALL_REPO?=https://github.com/nomic-ai/gpt4all
@@ -16,7 +16,7 @@ RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp
RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6
# whisper.cpp version
WHISPER_CPP_VERSION?=8fac6455ffeb0a0950a84e790ddb74f7290d33c4
WHISPER_CPP_VERSION?=73d13ad19a8c9c4da4f405088a85169b1a171e66
# bert.cpp version
BERT_VERSION?=6abe312cded14042f6b7c3cd8edf082713334a4d
@@ -152,9 +152,11 @@ ifeq ($(findstring tts,$(GO_TAGS)),tts)
OPTIONAL_GRPC+=backend-assets/grpc/piper
endif
ALL_GRPC_BACKENDS=backend-assets/grpc/langchain-huggingface
ALL_GRPC_BACKENDS=backend-assets/grpc/huggingface
ALL_GRPC_BACKENDS+=backend-assets/grpc/bert-embeddings
ALL_GRPC_BACKENDS+=backend-assets/grpc/llama-cpp
ALL_GRPC_BACKENDS+=backend-assets/grpc/llama-cpp-noavx
ALL_GRPC_BACKENDS+=backend-assets/grpc/llama-cpp-fallback
ALL_GRPC_BACKENDS+=backend-assets/grpc/llama-ggml
ALL_GRPC_BACKENDS+=backend-assets/grpc/gpt4all
ALL_GRPC_BACKENDS+=backend-assets/grpc/rwkv
@@ -293,6 +295,7 @@ clean: ## Remove build related file
rm -rf backend-assets/*
$(MAKE) -C backend/cpp/grpc clean
$(MAKE) -C backend/cpp/llama clean
rm -rf backend/cpp/llama-* || true
$(MAKE) dropreplace
$(MAKE) protogen-clean
rmdir pkg/grpc/proto || true
@@ -311,14 +314,19 @@ build: prepare backend-assets grpcs ## Build the project
CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o $(BINARY_NAME) ./
build-minimal:
BUILD_GRPC_FOR_BACKEND_LLAMA=true GRPC_BACKENDS=backend-assets/grpc/llama-cpp GO_TAGS=none $(MAKE) build
BUILD_GRPC_FOR_BACKEND_LLAMA=true GRPC_BACKENDS="backend-assets/grpc/llama-cpp" GO_TAGS=none $(MAKE) build
build-api:
BUILD_GRPC_FOR_BACKEND_LLAMA=true BUILD_API_ONLY=true GO_TAGS=none $(MAKE) build
dist: build
mkdir -p release
# if BUILD_ID is empty, then we don't append it to the binary name
ifeq ($(BUILD_ID),)
cp $(BINARY_NAME) release/$(BINARY_NAME)-$(OS)-$(ARCH)
else
cp $(BINARY_NAME) release/$(BINARY_NAME)-$(BUILD_ID)-$(OS)-$(ARCH)
endif
osx-signed: build
codesign --deep --force --sign "$(OSX_SIGNING_IDENTITY)" --entitlements "./Entitlements.plist" "./$(BINARY_NAME)"
@@ -616,8 +624,8 @@ backend-assets/grpc/gpt4all: sources/gpt4all sources/gpt4all/gpt4all-bindings/go
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(CURDIR)/sources/gpt4all/gpt4all-bindings/golang/ LIBRARY_PATH=$(CURDIR)/sources/gpt4all/gpt4all-bindings/golang/ \
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/gpt4all ./backend/go/llm/gpt4all/
backend-assets/grpc/langchain-huggingface: backend-assets/grpc
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/langchain-huggingface ./backend/go/llm/langchain/
backend-assets/grpc/huggingface: backend-assets/grpc
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/huggingface ./backend/go/llm/langchain/
backend/cpp/llama/llama.cpp:
LLAMA_VERSION=$(CPPLLAMA_VERSION) $(MAKE) -C backend/cpp/llama llama.cpp
@@ -629,7 +637,7 @@ ADDED_CMAKE_ARGS=-Dabsl_DIR=${INSTALLED_LIB_CMAKE}/absl \
-Dutf8_range_DIR=${INSTALLED_LIB_CMAKE}/utf8_range \
-DgRPC_DIR=${INSTALLED_LIB_CMAKE}/grpc \
-DCMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES=${INSTALLED_PACKAGES}/include
backend/cpp/llama/grpc-server:
build-llama-cpp-grpc-server:
# Conditionally build grpc for the llama backend to use if needed
ifdef BUILD_GRPC_FOR_BACKEND_LLAMA
$(MAKE) -C backend/cpp/grpc build
@@ -638,19 +646,37 @@ ifdef BUILD_GRPC_FOR_BACKEND_LLAMA
PATH="${INSTALLED_PACKAGES}/bin:${PATH}" \
CMAKE_ARGS="${CMAKE_ARGS} ${ADDED_CMAKE_ARGS}" \
LLAMA_VERSION=$(CPPLLAMA_VERSION) \
$(MAKE) -C backend/cpp/llama grpc-server
$(MAKE) -C backend/cpp/${VARIANT} grpc-server
else
echo "BUILD_GRPC_FOR_BACKEND_LLAMA is not defined."
LLAMA_VERSION=$(CPPLLAMA_VERSION) $(MAKE) -C backend/cpp/llama grpc-server
LLAMA_VERSION=$(CPPLLAMA_VERSION) $(MAKE) -C backend/cpp/${VARIANT} grpc-server
endif
backend-assets/grpc/llama-cpp: backend-assets/grpc backend/cpp/llama/grpc-server
cp -rfv backend/cpp/llama/grpc-server backend-assets/grpc/llama-cpp
backend-assets/grpc/llama-cpp: backend-assets/grpc
$(info ${GREEN}I llama-cpp build info:standard${RESET})
cp -rf backend/cpp/llama backend/cpp/llama-default
$(MAKE) -C backend/cpp/llama-default purge
$(MAKE) VARIANT="llama-default" build-llama-cpp-grpc-server
cp -rfv backend/cpp/llama-default/grpc-server backend-assets/grpc/llama-cpp
# TODO: every binary should have its own folder instead, so can have different metal implementations
ifeq ($(BUILD_TYPE),metal)
cp backend/cpp/llama/llama.cpp/build/bin/default.metallib backend-assets/grpc/
cp backend/cpp/llama-default/llama.cpp/build/bin/default.metallib backend-assets/grpc/
endif
backend-assets/grpc/llama-cpp-noavx: backend-assets/grpc
cp -rf backend/cpp/llama backend/cpp/llama-noavx
$(MAKE) -C backend/cpp/llama-noavx purge
$(info ${GREEN}I llama-cpp build info:noavx${RESET})
CMAKE_ARGS="$(CMAKE_ARGS) -DLLAMA_AVX512=OFF -DLLAMA_AVX2=OFF" $(MAKE) VARIANT="llama-noavx" build-llama-cpp-grpc-server
cp -rfv backend/cpp/llama-noavx/grpc-server backend-assets/grpc/llama-cpp-noavx
backend-assets/grpc/llama-cpp-fallback: backend-assets/grpc
cp -rf backend/cpp/llama backend/cpp/llama-fallback
$(MAKE) -C backend/cpp/llama-fallback purge
$(info ${GREEN}I llama-cpp build info:fallback${RESET})
CMAKE_ARGS="$(CMAKE_ARGS) -DLLAMA_F16C=OFF -DLLAMA_AVX512=OFF -DLLAMA_AVX2=OFF -DLLAMA_FMA=OFF" $(MAKE) VARIANT="llama-fallback" build-llama-cpp-grpc-server
cp -rfv backend/cpp/llama-fallback/grpc-server backend-assets/grpc/llama-cpp-fallback
backend-assets/grpc/llama-ggml: sources/go-llama.cpp sources/go-llama.cpp/libbinding.a backend-assets/grpc
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(CURDIR)/sources/go-llama.cpp LIBRARY_PATH=$(CURDIR)/sources/go-llama.cpp \
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/llama-ggml ./backend/go/llm/llama-ggml/

View File

@@ -50,6 +50,7 @@
[Roadmap](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap)
- Chat, TTS, and Image generation in the WebUI: https://github.com/mudler/LocalAI/pull/2222
- Reranker API: https://github.com/mudler/LocalAI/pull/2121
- Gallery WebUI: https://github.com/mudler/LocalAI/pull/2104
- llama3: https://github.com/mudler/LocalAI/discussions/2076
@@ -113,6 +114,7 @@ Model galleries
Other:
- Helm chart https://github.com/go-skynet/helm-charts
- VSCode extension https://github.com/badgooooor/localai-vscode-plugin
- Terminal utility https://github.com/djcopley/ShellOracle
- Local Smart assistant https://github.com/mudler/LocalAGI
- Home Assistant https://github.com/sammcj/homeassistant-localai / https://github.com/drndos/hass-openai-custom-conversation
- Discord bot https://github.com/mudler/LocalAGI/tree/main/examples/discord
@@ -131,7 +133,7 @@ Other:
## :book: 🎥 [Media, Blogs, Social](https://localai.io/basics/news/#media-blogs-social)
- [Run LocalAI on AWS EKS with Pulumi](https://www.pulumi.com/ai/answers/tiZMDoZzZV6TLxgDXNBnFE/deploying-helm-charts-on-aws-eks)
- [Run LocalAI on AWS EKS with Pulumi](https://www.pulumi.com/blog/low-code-llm-apps-with-local-ai-flowise-and-pulumi/)
- [Run LocalAI on AWS](https://staleks.hashnode.dev/installing-localai-on-aws-ec2-instance)
- [Create a slackbot for teams and OSS projects that answer to documentation](https://mudler.pm/posts/smart-slackbot-for-teams/)
- [LocalAI meets k8sgpt](https://www.youtube.com/watch?v=PKrDNuJ_dfE)

View File

@@ -43,31 +43,23 @@ llama.cpp:
llama.cpp/examples/grpc-server: llama.cpp
mkdir -p llama.cpp/examples/grpc-server
cp -r $(abspath ./)/CMakeLists.txt llama.cpp/examples/grpc-server/
cp -r $(abspath ./)/grpc-server.cpp llama.cpp/examples/grpc-server/
cp -rfv $(abspath ./)/json.hpp llama.cpp/examples/grpc-server/
cp -rfv $(abspath ./)/utils.hpp llama.cpp/examples/grpc-server/
echo "add_subdirectory(grpc-server)" >> llama.cpp/examples/CMakeLists.txt
## XXX: In some versions of CMake clip wasn't being built before llama.
## This is an hack for now, but it should be fixed in the future.
cp -rfv llama.cpp/examples/llava/clip.h llama.cpp/examples/grpc-server/clip.h
cp -rfv llama.cpp/examples/llava/llava.cpp llama.cpp/examples/grpc-server/llava.cpp
echo '#include "llama.h"' > llama.cpp/examples/grpc-server/llava.h
cat llama.cpp/examples/llava/llava.h >> llama.cpp/examples/grpc-server/llava.h
cp -rfv llama.cpp/examples/llava/clip.cpp llama.cpp/examples/grpc-server/clip.cpp
bash prepare.sh
rebuild:
cp -rfv $(abspath ./)/CMakeLists.txt llama.cpp/examples/grpc-server/
cp -rfv $(abspath ./)/grpc-server.cpp llama.cpp/examples/grpc-server/
cp -rfv $(abspath ./)/json.hpp llama.cpp/examples/grpc-server/
bash prepare.sh
rm -rf grpc-server
$(MAKE) grpc-server
clean:
rm -rf llama.cpp
purge:
rm -rf llama.cpp/build
rm -rf llama.cpp/examples/grpc-server
rm -rf grpc-server
clean: purge
rm -rf llama.cpp
grpc-server: llama.cpp llama.cpp/examples/grpc-server
@echo "Building grpc-server with $(BUILD_TYPE) build type and $(CMAKE_ARGS)"
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
bash -c "source $(ONEAPI_VARS); \
cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release"

View File

@@ -0,0 +1,20 @@
#!/bin/bash
cp -r CMakeLists.txt llama.cpp/examples/grpc-server/
cp -r grpc-server.cpp llama.cpp/examples/grpc-server/
cp -rfv json.hpp llama.cpp/examples/grpc-server/
cp -rfv utils.hpp llama.cpp/examples/grpc-server/
if grep -q "grpc-server" llama.cpp/examples/CMakeLists.txt; then
echo "grpc-server already added"
else
echo "add_subdirectory(grpc-server)" >> llama.cpp/examples/CMakeLists.txt
fi
## XXX: In some versions of CMake clip wasn't being built before llama.
## This is an hack for now, but it should be fixed in the future.
cp -rfv llama.cpp/examples/llava/clip.h llama.cpp/examples/grpc-server/clip.h
cp -rfv llama.cpp/examples/llava/llava.cpp llama.cpp/examples/grpc-server/llava.cpp
echo '#include "llama.h"' > llama.cpp/examples/grpc-server/llava.h
cat llama.cpp/examples/llava/llava.h >> llama.cpp/examples/grpc-server/llava.h
cp -rfv llama.cpp/examples/llava/clip.cpp llama.cpp/examples/grpc-server/clip.cpp

View File

@@ -4,6 +4,7 @@ package main
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
import (
"fmt"
"os"
"github.com/go-skynet/LocalAI/pkg/grpc/base"
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
@@ -18,9 +19,14 @@ type LLM struct {
}
func (llm *LLM) Load(opts *pb.ModelOptions) error {
llm.langchain, _ = langchain.NewHuggingFace(opts.Model)
var err error
hfToken := os.Getenv("HUGGINGFACEHUB_API_TOKEN")
if hfToken == "" {
return fmt.Errorf("no huggingface token provided")
}
llm.langchain, err = langchain.NewHuggingFace(opts.Model, hfToken)
llm.model = opts.Model
return nil
return err
}
func (llm *LLM) Predict(opts *pb.PredictOptions) (string, error) {

View File

@@ -150,11 +150,17 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
devices = Core().available_devices
if "GPU" in " ".join(devices):
device_map="AUTO:GPU"
# While working on a fine tuned model, inference may give an inaccuracy and performance drop on GPU if winograd convolutions are selected.
# https://docs.openvino.ai/2024/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device.html
if "CPU" or "NPU" in device_map:
if "-CPU" or "-NPU" not in device_map:
ovconfig={"PERFORMANCE_HINT": "CUMULATIVE_THROUGHPUT"}
else:
ovconfig={"PERFORMANCE_HINT": "CUMULATIVE_THROUGHPUT","GPU_DISABLE_WINOGRAD_CONVOLUTION": "YES"}
self.model = OVModelForCausalLM.from_pretrained(model_name,
compile=True,
trust_remote_code=request.TrustRemoteCode,
ov_config={"PERFORMANCE_HINT": "CUMULATIVE_THROUGHPUT","GPU_DISABLE_WINOGRAD_CONVOLUTION": "YES"},
ov_config=ovconfig,
device=device_map)
self.OV = True
elif request.Type == "OVModelForFeatureExtraction":
@@ -168,11 +174,17 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
devices = Core().available_devices
if "GPU" in " ".join(devices):
device_map="AUTO:GPU"
# While working on a fine tuned model, inference may give an inaccuracy and performance drop on GPU if winograd convolutions are selected.
# https://docs.openvino.ai/2024/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device.html
if "CPU" or "NPU" in device_map:
if "-CPU" or "-NPU" not in device_map:
ovconfig={"PERFORMANCE_HINT": "CUMULATIVE_THROUGHPUT"}
else:
ovconfig={"PERFORMANCE_HINT": "CUMULATIVE_THROUGHPUT","GPU_DISABLE_WINOGRAD_CONVOLUTION": "YES"}
self.model = OVModelForFeatureExtraction.from_pretrained(model_name,
compile=True,
trust_remote_code=request.TrustRemoteCode,
ov_config={"PERFORMANCE_HINT": "CUMULATIVE_THROUGHPUT", "GPU_DISABLE_WINOGRAD_CONVOLUTION": "YES"},
ov_config=ovconfig,
export=True,
device=device_map)
self.OV = True
@@ -234,8 +246,8 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
# Pool to get sentence embeddings; i.e. generate one 1024 vector for the entire sentence
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
print("Calculated embeddings for: " + request.Embeddings, file=sys.stderr)
print("Embeddings:", sentence_embeddings, file=sys.stderr)
# print("Calculated embeddings for: " + request.Embeddings, file=sys.stderr)
# print("Embeddings:", sentence_embeddings, file=sys.stderr)
return backend_pb2.EmbeddingResult(embeddings=sentence_embeddings[0])
async def _predict(self, request, context, streaming=False):

View File

@@ -182,6 +182,12 @@ func (cl *BackendConfigLoader) GetAllBackendConfigs() []BackendConfig {
return res
}
func (cl *BackendConfigLoader) RemoveBackendConfig(m string) {
cl.Lock()
defer cl.Unlock()
delete(cl.configs, m)
}
func (cl *BackendConfigLoader) ListBackendConfigs() []string {
cl.Lock()
defer cl.Unlock()

View File

@@ -20,6 +20,7 @@ import (
"github.com/gofiber/contrib/fiberzerolog"
"github.com/gofiber/fiber/v2"
"github.com/gofiber/fiber/v2/middleware/cors"
"github.com/gofiber/fiber/v2/middleware/favicon"
"github.com/gofiber/fiber/v2/middleware/filesystem"
"github.com/gofiber/fiber/v2/middleware/recover"
@@ -182,8 +183,16 @@ func App(cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfig *confi
}
routes.RegisterJINARoutes(app, cl, ml, appConfig, auth)
httpFS := http.FS(embedDirStatic)
app.Use(favicon.New(favicon.Config{
URL: "/favicon.ico",
FileSystem: httpFS,
File: "static/favicon.ico",
}))
app.Use("/static", filesystem.New(filesystem.Config{
Root: http.FS(embedDirStatic),
Root: httpFS,
PathPrefix: "static",
Browse: true,
}))

View File

@@ -708,10 +708,26 @@ var _ = Describe("API test", func() {
// The response should contain an URL
Expect(err).ToNot(HaveOccurred(), fmt.Sprint(resp))
dat, err := io.ReadAll(resp.Body)
Expect(err).ToNot(HaveOccurred(), string(dat))
Expect(string(dat)).To(ContainSubstring("http://127.0.0.1:9090/"), string(dat))
Expect(string(dat)).To(ContainSubstring(".png"), string(dat))
Expect(err).ToNot(HaveOccurred(), "error reading /image/generations response")
imgUrlResp := &schema.OpenAIResponse{}
err = json.Unmarshal(dat, imgUrlResp)
Expect(imgUrlResp.Data).ToNot(Or(BeNil(), BeZero()))
imgUrl := imgUrlResp.Data[0].URL
Expect(imgUrl).To(ContainSubstring("http://127.0.0.1:9090/"), imgUrl)
Expect(imgUrl).To(ContainSubstring(".png"), imgUrl)
imgResp, err := http.Get(imgUrl)
Expect(err).To(BeNil())
Expect(imgResp).ToNot(BeNil())
Expect(imgResp.StatusCode).To(Equal(200))
Expect(imgResp.ContentLength).To(BeNumerically(">", 0))
imgData := make([]byte, 512)
count, err := io.ReadFull(imgResp.Body, imgData)
Expect(err).To(Or(BeNil(), MatchError(io.EOF)))
Expect(count).To(BeNumerically(">", 0))
Expect(count).To(BeNumerically("<=", 512))
Expect(http.DetectContentType(imgData)).To(Equal("image/png"))
})
})
@@ -787,11 +803,11 @@ var _ = Describe("API test", func() {
})
It("returns errors", func() {
backends := len(model.AutoLoadBackends) + 1 // +1 for huggingface
_, err := client.CreateCompletion(context.TODO(), openai.CompletionRequest{Model: "foomodel", Prompt: testPrompt})
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring(fmt.Sprintf("error, status code: 500, message: could not load model - all backends returned error: %d errors occurred:", backends)))
Expect(err.Error()).To(ContainSubstring("error, status code: 500, message: could not load model - all backends returned error:"))
})
It("transcribes audio", func() {
if runtime.GOOS != "linux" {
Skip("test supported only on linux")

View File

@@ -2,20 +2,30 @@ package elements
import (
"fmt"
"strings"
"github.com/chasefleming/elem-go"
"github.com/chasefleming/elem-go/attrs"
"github.com/go-skynet/LocalAI/core/services"
"github.com/go-skynet/LocalAI/pkg/gallery"
"github.com/go-skynet/LocalAI/pkg/xsync"
)
const (
NoImage = "https://upload.wikimedia.org/wikipedia/commons/6/65/No-Image-Placeholder.svg"
noImage = "https://upload.wikimedia.org/wikipedia/commons/6/65/No-Image-Placeholder.svg"
)
func DoneProgress(uid, text string) string {
func DoneProgress(galleryID, text string, showDelete bool) string {
var modelName = galleryID
// Split by @ and grab the name
if strings.Contains(galleryID, "@") {
modelName = strings.Split(galleryID, "@")[1]
}
return elem.Div(
attrs.Props{},
attrs.Props{
"id": "action-div-" + dropBadChars(galleryID),
},
elem.H3(
attrs.Props{
"role": "status",
@@ -25,10 +35,11 @@ func DoneProgress(uid, text string) string {
},
elem.Text(text),
),
elem.If(showDelete, deleteButton(galleryID, modelName), reInstallButton(galleryID)),
).Render()
}
func ErrorProgress(err string) string {
func ErrorProgress(err, galleryName string) string {
return elem.Div(
attrs.Props{},
elem.H3(
@@ -38,8 +49,9 @@ func ErrorProgress(err string) string {
"tabindex": "-1",
"autofocus": "",
},
elem.Text("Error"+err),
elem.Text("Error "+err),
),
installButton(galleryName),
).Render()
}
@@ -64,12 +76,13 @@ func StartProgressBar(uid, progress, text string) string {
if progress == "" {
progress = "0"
}
return elem.Div(attrs.Props{
"hx-trigger": "done",
"hx-get": "/browse/job/" + uid,
"hx-swap": "outerHTML",
"hx-target": "this",
},
return elem.Div(
attrs.Props{
"hx-trigger": "done",
"hx-get": "/browse/job/" + uid,
"hx-swap": "outerHTML",
"hx-target": "this",
},
elem.H3(
attrs.Props{
"role": "status",
@@ -78,7 +91,6 @@ func StartProgressBar(uid, progress, text string) string {
"autofocus": "",
},
elem.Text(text),
// This is a simple example of how to use the HTMLX library to create a progress bar that updates every 600ms.
elem.Div(attrs.Props{
"hx-get": "/browse/job/progress/" + uid,
"hx-trigger": "every 600ms",
@@ -99,61 +111,132 @@ func cardSpan(text, icon string) elem.Node {
elem.I(attrs.Props{
"class": icon + " pr-2",
}),
elem.Text(text),
//elem.Text(text),
)
}
func ListModels(models []*gallery.GalleryModel, installing *xsync.SyncedMap[string, string]) string {
//StartProgressBar(uid, "0")
func searchableElement(text, icon string) elem.Node {
return elem.Form(
attrs.Props{},
elem.Input(
attrs.Props{
"type": "hidden",
"name": "search",
"value": text,
},
),
elem.Span(
attrs.Props{
"class": "inline-block bg-gray-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2 hover:bg-gray-300 hover:shadow-gray-2",
},
elem.A(
attrs.Props{
// "name": "search",
// "value": text,
//"class": "inline-block bg-gray-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2",
"href": "#!",
"hx-post": "/browse/search/models",
"hx-target": "#search-results",
// TODO: this doesn't work
// "hx-vals": `{ \"search\": \"` + text + `\" }`,
"hx-indicator": ".htmx-indicator",
},
elem.I(attrs.Props{
"class": icon + " pr-2",
}),
elem.Text(text),
),
),
//elem.Text(text),
)
}
func link(text, url string) elem.Node {
return elem.A(
attrs.Props{
"class": "inline-block bg-gray-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2 hover:bg-gray-300 hover:shadow-gray-2",
"href": url,
"target": "_blank",
},
elem.I(attrs.Props{
"class": "fas fa-link pr-2",
}),
elem.Text(text),
)
}
func installButton(galleryName string) elem.Node {
return elem.Button(
attrs.Props{
"data-twe-ripple-init": "",
"data-twe-ripple-color": "light",
"class": "float-right inline-block rounded bg-primary px-6 pb-2.5 mb-3 pt-2.5 text-xs font-medium uppercase leading-normal text-white shadow-primary-3 transition duration-150 ease-in-out hover:bg-primary-accent-300 hover:shadow-primary-2 focus:bg-primary-accent-300 focus:shadow-primary-2 focus:outline-none focus:ring-0 active:bg-primary-600 active:shadow-primary-2 dark:shadow-black/30 dark:hover:shadow-dark-strong dark:focus:shadow-dark-strong dark:active:shadow-dark-strong",
"hx-swap": "outerHTML",
// post the Model ID as param
"hx-post": "/browse/install/model/" + galleryName,
},
elem.I(
attrs.Props{
"class": "fa-solid fa-download pr-2",
},
),
elem.Text("Install"),
)
}
func reInstallButton(galleryName string) elem.Node {
return elem.Button(
attrs.Props{
"data-twe-ripple-init": "",
"data-twe-ripple-color": "light",
"class": "float-right inline-block rounded bg-primary ml-2 px-6 pb-2.5 mb-3 pt-2.5 text-xs font-medium uppercase leading-normal text-white shadow-primary-3 transition duration-150 ease-in-out hover:bg-primary-accent-300 hover:shadow-primary-2 focus:bg-primary-accent-300 focus:shadow-primary-2 focus:outline-none focus:ring-0 active:bg-primary-600 active:shadow-primary-2 dark:shadow-black/30 dark:hover:shadow-dark-strong dark:focus:shadow-dark-strong dark:active:shadow-dark-strong",
"hx-target": "#action-div-" + dropBadChars(galleryName),
"hx-swap": "outerHTML",
// post the Model ID as param
"hx-post": "/browse/install/model/" + galleryName,
},
elem.I(
attrs.Props{
"class": "fa-solid fa-arrow-rotate-right pr-2",
},
),
elem.Text("Reinstall"),
)
}
func deleteButton(galleryID, modelName string) elem.Node {
return elem.Button(
attrs.Props{
"data-twe-ripple-init": "",
"data-twe-ripple-color": "light",
"hx-confirm": "Are you sure you wish to delete the model?",
"class": "float-right inline-block rounded bg-red-800 px-6 pb-2.5 mb-3 pt-2.5 text-xs font-medium uppercase leading-normal text-white shadow-primary-3 transition duration-150 ease-in-out hover:bg-red-accent-300 hover:shadow-red-2 focus:bg-red-accent-300 focus:shadow-primary-2 focus:outline-none focus:ring-0 active:bg-red-600 active:shadow-primary-2 dark:shadow-black/30 dark:hover:shadow-dark-strong dark:focus:shadow-dark-strong dark:active:shadow-dark-strong",
"hx-target": "#action-div-" + dropBadChars(galleryID),
"hx-swap": "outerHTML",
// post the Model ID as param
"hx-post": "/browse/delete/model/" + galleryID,
},
elem.I(
attrs.Props{
"class": "fa-solid fa-cancel pr-2",
},
),
elem.Text("Delete"),
)
}
// Javascript/HTMX doesn't like weird IDs
func dropBadChars(s string) string {
return strings.ReplaceAll(s, "@", "__")
}
func ListModels(models []*gallery.GalleryModel, processing *xsync.SyncedMap[string, string], galleryService *services.GalleryService) string {
modelsElements := []elem.Node{}
// span := func(s string) elem.Node {
// return elem.Span(
// attrs.Props{
// "class": "float-right inline-block bg-green-500 text-white py-1 px-3 rounded-full text-xs",
// },
// elem.Text(s),
// )
// }
deleteButton := func(m *gallery.GalleryModel) elem.Node {
return elem.Button(
attrs.Props{
"data-twe-ripple-init": "",
"data-twe-ripple-color": "light",
"class": "float-right inline-block rounded bg-red-800 px-6 pb-2.5 mb-3 pt-2.5 text-xs font-medium uppercase leading-normal text-white shadow-primary-3 transition duration-150 ease-in-out hover:bg-red-accent-300 hover:shadow-red-2 focus:bg-red-accent-300 focus:shadow-primary-2 focus:outline-none focus:ring-0 active:bg-red-600 active:shadow-primary-2 dark:shadow-black/30 dark:hover:shadow-dark-strong dark:focus:shadow-dark-strong dark:active:shadow-dark-strong",
"hx-swap": "outerHTML",
// post the Model ID as param
"hx-post": "/browse/delete/model/" + m.Name,
},
elem.I(
attrs.Props{
"class": "fa-solid fa-cancel pr-2",
},
),
elem.Text("Delete"),
)
}
installButton := func(m *gallery.GalleryModel) elem.Node {
return elem.Button(
attrs.Props{
"data-twe-ripple-init": "",
"data-twe-ripple-color": "light",
"class": "float-right inline-block rounded bg-primary px-6 pb-2.5 mb-3 pt-2.5 text-xs font-medium uppercase leading-normal text-white shadow-primary-3 transition duration-150 ease-in-out hover:bg-primary-accent-300 hover:shadow-primary-2 focus:bg-primary-accent-300 focus:shadow-primary-2 focus:outline-none focus:ring-0 active:bg-primary-600 active:shadow-primary-2 dark:shadow-black/30 dark:hover:shadow-dark-strong dark:focus:shadow-dark-strong dark:active:shadow-dark-strong",
"hx-swap": "outerHTML",
// post the Model ID as param
"hx-post": "/browse/install/model/" + fmt.Sprintf("%s@%s", m.Gallery.Name, m.Name),
},
elem.I(
attrs.Props{
"class": "fa-solid fa-download pr-2",
},
),
elem.Text("Install"),
)
}
descriptionDiv := func(m *gallery.GalleryModel) elem.Node {
return elem.Div(
attrs.Props{
"class": "p-6 text-surface dark:text-white",
@@ -175,7 +258,18 @@ func ListModels(models []*gallery.GalleryModel, installing *xsync.SyncedMap[stri
actionDiv := func(m *gallery.GalleryModel) elem.Node {
galleryID := fmt.Sprintf("%s@%s", m.Gallery.Name, m.Name)
currentlyInstalling := installing.Exists(galleryID)
currentlyProcessing := processing.Exists(galleryID)
jobID := ""
isDeletionOp := false
if currentlyProcessing {
status := galleryService.GetStatus(galleryID)
if status != nil && status.Deletion {
isDeletionOp = true
}
jobID = processing.Get(galleryID)
// TODO:
// case not handled, if status == nil : "Waiting"
}
nodes := []elem.Node{
cardSpan("Repository: "+m.Gallery.Name, "fa-brands fa-git-alt"),
@@ -187,25 +281,31 @@ func ListModels(models []*gallery.GalleryModel, installing *xsync.SyncedMap[stri
)
}
tagsNodes := []elem.Node{}
for _, tag := range m.Tags {
nodes = append(nodes,
cardSpan(tag, "fas fa-tag"),
tagsNodes = append(tagsNodes,
searchableElement(tag, "fas fa-tag"),
)
}
nodes = append(nodes,
elem.Div(
attrs.Props{
"class": "flex flex-row flex-wrap content-center",
},
tagsNodes...,
),
)
for i, url := range m.URLs {
nodes = append(nodes,
elem.A(
attrs.Props{
"class": "inline-block bg-gray-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2",
"href": url,
"target": "_blank",
},
elem.I(attrs.Props{
"class": "fas fa-link pr-2",
}),
elem.Text("Link #"+fmt.Sprintf("%d", i+1)),
))
link("Link #"+fmt.Sprintf("%d", i+1), url),
)
}
progressMessage := "Installation"
if isDeletionOp {
progressMessage = "Deletion"
}
return elem.Div(
@@ -218,36 +318,41 @@ func ListModels(models []*gallery.GalleryModel, installing *xsync.SyncedMap[stri
},
nodes...,
),
elem.If(
currentlyInstalling,
elem.Node( // If currently installing, show progress bar
elem.Raw(StartProgressBar(installing.Get(galleryID), "0", "Installing")),
), // Otherwise, show install button (if not installed) or display "Installed"
elem.If(m.Installed,
//elem.Node(elem.Div(
// attrs.Props{},
// span("Installed"), deleteButton(m),
// )),
deleteButton(m),
installButton(m),
elem.Div(
attrs.Props{
"id": "action-div-" + dropBadChars(galleryID),
},
elem.If(
currentlyProcessing,
elem.Node( // If currently installing, show progress bar
elem.Raw(StartProgressBar(jobID, "0", progressMessage)),
), // Otherwise, show install button (if not installed) or display "Installed"
elem.If(m.Installed,
elem.Node(elem.Div(
attrs.Props{},
reInstallButton(m.ID()),
deleteButton(m.ID(), m.Name),
)),
installButton(m.ID()),
),
),
),
)
}
for _, m := range models {
elems := []elem.Node{}
if m.Icon == "" {
m.Icon = NoImage
m.Icon = noImage
}
divProperties := attrs.Props{
"class": "flex justify-center items-center",
}
elems = append(elems,
elem.Div(attrs.Props{
"class": "flex justify-center items-center",
},
elem.Div(divProperties,
elem.A(attrs.Props{
"href": "#!",
// "class": "justify-center items-center",
@@ -258,7 +363,23 @@ func ListModels(models []*gallery.GalleryModel, installing *xsync.SyncedMap[stri
"src": m.Icon,
}),
),
),
)
// Special/corner case: if a model sets Trust Remote Code as required, show a warning
// TODO: handle this more generically later
_, trustRemoteCodeExists := m.Overrides["trust_remote_code"]
if trustRemoteCodeExists {
elems = append(elems, elem.Div(
attrs.Props{
"class": "flex justify-center items-center bg-red-500 text-white p-2 rounded-lg mt-2",
},
elem.I(attrs.Props{
"class": "fa-solid fa-circle-exclamation pr-2",
}),
elem.Text("Attention: Trust Remote Code is required for this model"),
))
}
elems = append(elems, descriptionDiv(m), actionDiv(m))
modelsElements = append(modelsElements,
@@ -278,7 +399,6 @@ func ListModels(models []*gallery.GalleryModel, installing *xsync.SyncedMap[stri
wrapper := elem.Div(attrs.Props{
"class": "dark grid grid-cols-1 grid-rows-1 md:grid-cols-3 block rounded-lg shadow-secondary-1 dark:bg-surface-dark",
//"class": "block rounded-lg bg-white shadow-secondary-1 dark:bg-surface-dark",
}, modelsElements...)
return wrapper.Render()

View File

@@ -61,11 +61,11 @@ func (mgs *ModelGalleryEndpointService) ApplyModelGalleryEndpoint() func(c *fibe
return err
}
mgs.galleryApplier.C <- gallery.GalleryOp{
Req: input.GalleryModel,
Id: uuid.String(),
GalleryName: input.ID,
Galleries: mgs.galleries,
ConfigURL: input.ConfigURL,
Req: input.GalleryModel,
Id: uuid.String(),
GalleryModelName: input.ID,
Galleries: mgs.galleries,
ConfigURL: input.ConfigURL,
}
return c.JSON(struct {
ID string `json:"uuid"`
@@ -79,8 +79,8 @@ func (mgs *ModelGalleryEndpointService) DeleteModelGalleryEndpoint() func(c *fib
modelName := c.Params("name")
mgs.galleryApplier.C <- gallery.GalleryOp{
Delete: true,
GalleryName: modelName,
Delete: true,
GalleryModelName: modelName,
}
uuid, err := uuid.NewUUID()

View File

@@ -3,22 +3,39 @@ package localai
import (
"github.com/go-skynet/LocalAI/core/config"
"github.com/go-skynet/LocalAI/internal"
"github.com/go-skynet/LocalAI/pkg/gallery"
"github.com/go-skynet/LocalAI/pkg/model"
"github.com/gofiber/fiber/v2"
)
func WelcomeEndpoint(appConfig *config.ApplicationConfig,
cl *config.BackendConfigLoader, ml *model.ModelLoader) func(*fiber.Ctx) error {
cl *config.BackendConfigLoader, ml *model.ModelLoader, modelStatus func() (map[string]string, map[string]string)) func(*fiber.Ctx) error {
return func(c *fiber.Ctx) error {
models, _ := ml.ListModels()
backendConfigs := cl.GetAllBackendConfigs()
galleryConfigs := map[string]*gallery.Config{}
for _, m := range backendConfigs {
cfg, err := gallery.GetLocalModelConfiguration(ml.ModelPath, m.Name)
if err != nil {
continue
}
galleryConfigs[m.Name] = cfg
}
// Get model statuses to display in the UI the operation in progress
processingModels, taskTypes := modelStatus()
summary := fiber.Map{
"Title": "LocalAI API - " + internal.PrintableVersion(),
"Version": internal.PrintableVersion(),
"Models": models,
"ModelsConfig": backendConfigs,
"GalleryConfig": galleryConfigs,
"ApplicationConfig": appConfig,
"ProcessingModels": processingModels,
"TaskTypes": taskTypes,
}
if string(c.Context().Request.Header.ContentType()) == "application/json" || len(c.Accepts("html")) == 0 {

View File

@@ -63,10 +63,14 @@ func getBase64Image(s string) (string, error) {
return encoded, nil
}
// if the string instead is prefixed with "data:image/jpeg;base64,", drop it
if strings.HasPrefix(s, "data:image/jpeg;base64,") {
return strings.ReplaceAll(s, "data:image/jpeg;base64,", ""), nil
// if the string instead is prefixed with "data:image/...;base64,", drop it
dropPrefix := []string{"data:image/jpeg;base64,", "data:image/png;base64,"}
for _, prefix := range dropPrefix {
if strings.HasPrefix(s, prefix) {
return strings.ReplaceAll(s, prefix, ""), nil
}
}
return "", fmt.Errorf("not valid string")
}
@@ -181,7 +185,7 @@ func updateRequestConfig(config *config.BackendConfig, input *schema.OpenAIReque
input.Messages[i].StringContent = fmt.Sprintf("[img-%d]", index) + input.Messages[i].StringContent
index++
} else {
fmt.Print("Failed encoding image", err)
log.Error().Msgf("Failed encoding image: %s", err)
}
}
}

View File

@@ -3,6 +3,7 @@ package routes
import (
"fmt"
"html/template"
"sort"
"strings"
"github.com/go-skynet/LocalAI/core/config"
@@ -13,6 +14,7 @@ import (
"github.com/go-skynet/LocalAI/pkg/gallery"
"github.com/go-skynet/LocalAI/pkg/model"
"github.com/go-skynet/LocalAI/pkg/xsync"
"github.com/rs/zerolog/log"
"github.com/gofiber/fiber/v2"
"github.com/google/uuid"
@@ -25,20 +27,65 @@ func RegisterUIRoutes(app *fiber.App,
galleryService *services.GalleryService,
auth func(*fiber.Ctx) error) {
app.Get("/", auth, localai.WelcomeEndpoint(appConfig, cl, ml))
// keeps the state of models that are being installed from the UI
var installingModels = xsync.NewSyncedMap[string, string]()
var processingModels = xsync.NewSyncedMap[string, string]()
// modelStatus returns the current status of the models being processed (installation or deletion)
// it is called asynchonously from the UI
modelStatus := func() (map[string]string, map[string]string) {
processingModelsData := processingModels.Map()
taskTypes := map[string]string{}
for k, v := range processingModelsData {
status := galleryService.GetStatus(v)
taskTypes[k] = "Installation"
if status != nil && status.Deletion {
taskTypes[k] = "Deletion"
} else if status == nil {
taskTypes[k] = "Waiting"
}
}
return processingModelsData, taskTypes
}
app.Get("/", auth, localai.WelcomeEndpoint(appConfig, cl, ml, modelStatus))
// Show the Models page (all models)
app.Get("/browse", auth, func(c *fiber.Ctx) error {
term := c.Query("term")
models, _ := gallery.AvailableGalleryModels(appConfig.Galleries, appConfig.ModelPath)
// Get all available tags
allTags := map[string]struct{}{}
tags := []string{}
for _, m := range models {
for _, t := range m.Tags {
allTags[t] = struct{}{}
}
}
for t := range allTags {
tags = append(tags, t)
}
sort.Strings(tags)
if term != "" {
models = gallery.GalleryModels(models).Search(term)
}
// Get model statuses
processingModelsData, taskTypes := modelStatus()
summary := fiber.Map{
"Title": "LocalAI - Models",
"Version": internal.PrintableVersion(),
"Models": template.HTML(elements.ListModels(models, installingModels)),
"Repositories": appConfig.Galleries,
"Title": "LocalAI - Models",
"Version": internal.PrintableVersion(),
"Models": template.HTML(elements.ListModels(models, processingModels, galleryService)),
"Repositories": appConfig.Galleries,
"AllTags": tags,
"ProcessingModels": processingModelsData,
"TaskTypes": taskTypes,
// "ApplicationConfig": appConfig,
}
@@ -58,17 +105,7 @@ func RegisterUIRoutes(app *fiber.App,
models, _ := gallery.AvailableGalleryModels(appConfig.Galleries, appConfig.ModelPath)
filteredModels := []*gallery.GalleryModel{}
for _, m := range models {
if strings.Contains(m.Name, form.Search) ||
strings.Contains(m.Description, form.Search) ||
strings.Contains(m.Gallery.Name, form.Search) ||
strings.Contains(strings.Join(m.Tags, ","), form.Search) {
filteredModels = append(filteredModels, m)
}
}
return c.SendString(elements.ListModels(filteredModels, installingModels))
return c.SendString(elements.ListModels(gallery.GalleryModels(models).Search(form.Search), processingModels, galleryService))
})
/*
@@ -81,6 +118,7 @@ func RegisterUIRoutes(app *fiber.App,
// https://htmx.org/examples/progress-bar/
app.Post("/browse/install/model/:id", auth, func(c *fiber.Ctx) error {
galleryID := strings.Clone(c.Params("id")) // note: strings.Clone is required for multiple requests!
log.Debug().Msgf("UI job submitted to install : %+v\n", galleryID)
id, err := uuid.NewUUID()
if err != nil {
@@ -89,12 +127,12 @@ func RegisterUIRoutes(app *fiber.App,
uid := id.String()
installingModels.Set(galleryID, uid)
processingModels.Set(galleryID, uid)
op := gallery.GalleryOp{
Id: uid,
GalleryName: galleryID,
Galleries: appConfig.Galleries,
Id: uid,
GalleryModelName: galleryID,
Galleries: appConfig.Galleries,
}
go func() {
galleryService.C <- op
@@ -107,6 +145,14 @@ func RegisterUIRoutes(app *fiber.App,
// https://htmx.org/examples/progress-bar/
app.Post("/browse/delete/model/:id", auth, func(c *fiber.Ctx) error {
galleryID := strings.Clone(c.Params("id")) // note: strings.Clone is required for multiple requests!
log.Debug().Msgf("UI job submitted to delete : %+v\n", galleryID)
var galleryName = galleryID
if strings.Contains(galleryID, "@") {
// if the galleryID contains a @ it means that it's a model from a gallery
// but we want to delete it from the local models which does not need
// a repository ID
galleryName = strings.Split(galleryID, "@")[1]
}
id, err := uuid.NewUUID()
if err != nil {
@@ -115,15 +161,20 @@ func RegisterUIRoutes(app *fiber.App,
uid := id.String()
installingModels.Set(galleryID, uid)
// Track the deletion job by galleryID and galleryName
// The GalleryID contains information about the repository,
// while the GalleryName is ONLY the name of the model
processingModels.Set(galleryName, uid)
processingModels.Set(galleryID, uid)
op := gallery.GalleryOp{
Id: uid,
Delete: true,
GalleryName: galleryID,
Id: uid,
Delete: true,
GalleryModelName: galleryName,
}
go func() {
galleryService.C <- op
cl.RemoveBackendConfig(galleryName)
}()
return c.SendString(elements.StartProgressBar(uid, "0", "Deletion"))
@@ -133,7 +184,7 @@ func RegisterUIRoutes(app *fiber.App,
// If the job is done, we trigger the /browse/job/:uid route
// https://htmx.org/examples/progress-bar/
app.Get("/browse/job/progress/:uid", auth, func(c *fiber.Ctx) error {
jobUID := c.Params("uid")
jobUID := strings.Clone(c.Params("uid")) // note: strings.Clone is required for multiple requests!
status := galleryService.GetStatus(jobUID)
if status == nil {
@@ -146,7 +197,7 @@ func RegisterUIRoutes(app *fiber.App,
return c.SendString(elements.ProgressBar("100"))
}
if status.Error != nil {
return c.SendString(elements.ErrorProgress(status.Error.Error()))
return c.SendString(elements.ErrorProgress(status.Error.Error(), status.GalleryModelName))
}
return c.SendString(elements.ProgressBar(fmt.Sprint(status.Progress)))
@@ -155,21 +206,30 @@ func RegisterUIRoutes(app *fiber.App,
// this route is hit when the job is done, and we display the
// final state (for now just displays "Installation completed")
app.Get("/browse/job/:uid", auth, func(c *fiber.Ctx) error {
jobUID := strings.Clone(c.Params("uid")) // note: strings.Clone is required for multiple requests!
status := galleryService.GetStatus(c.Params("uid"))
status := galleryService.GetStatus(jobUID)
for _, k := range installingModels.Keys() {
if installingModels.Get(k) == c.Params("uid") {
installingModels.Delete(k)
galleryID := ""
for _, k := range processingModels.Keys() {
if processingModels.Get(k) == jobUID {
galleryID = k
processingModels.Delete(k)
}
}
if galleryID == "" {
log.Debug().Msgf("no processing model found for job : %+v\n", jobUID)
}
log.Debug().Msgf("JOB finished : %+v\n", status)
showDelete := true
displayText := "Installation completed"
if status.Deletion {
showDelete = false
displayText = "Deletion completed"
}
return c.SendString(elements.DoneProgress(c.Params("uid"), displayText))
return c.SendString(elements.DoneProgress(galleryID, displayText, showDelete))
})
// Show the Chat page
@@ -191,7 +251,8 @@ func RegisterUIRoutes(app *fiber.App,
backendConfigs := cl.GetAllBackendConfigs()
if len(backendConfigs) == 0 {
return c.SendString("No models available")
// If no model is available redirect to the index which suggests how to install models
return c.Redirect("/")
}
summary := fiber.Map{
@@ -224,7 +285,8 @@ func RegisterUIRoutes(app *fiber.App,
backendConfigs := cl.GetAllBackendConfigs()
if len(backendConfigs) == 0 {
return c.SendString("No models available")
// If no model is available redirect to the index which suggests how to install models
return c.Redirect("/")
}
summary := fiber.Map{
@@ -257,7 +319,8 @@ func RegisterUIRoutes(app *fiber.App,
backendConfigs := cl.GetAllBackendConfigs()
if len(backendConfigs) == 0 {
return c.SendString("No models available")
// If no model is available redirect to the index which suggests how to install models
return c.Redirect("/")
}
summary := fiber.Map{

View File

@@ -26,25 +26,48 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
function submitKey(event) {
event.preventDefault();
localStorage.setItem("key", document.getElementById("apiKey").value);
document.getElementById("apiKey").blur();
}
}
function submitSystemPrompt(event) {
event.preventDefault();
localStorage.setItem("system_prompt", document.getElementById("systemPrompt").value);
document.getElementById("systemPrompt").blur();
}
var image = "";
function submitPrompt(event) {
event.preventDefault();
const input = document.getElementById("input").value;
Alpine.store("chat").add("user", input);
Alpine.store("chat").add("user", input, image);
document.getElementById("input").value = "";
const key = localStorage.getItem("key");
const systemPrompt = localStorage.getItem("system_prompt");
promptGPT(key, input);
promptGPT(systemPrompt, key, input);
}
function readInputImage() {
if (!this.files || !this.files[0]) return;
const FR = new FileReader();
FR.addEventListener("load", function(evt) {
image = evt.target.result;
});
FR.readAsDataURL(this.files[0]);
}
async function promptGPT(key, input) {
async function promptGPT(systemPrompt, key, input) {
const model = document.getElementById("chat-model").value;
// Set class "loader" to the element with "loader" id
//document.getElementById("loader").classList.add("loader");
@@ -53,6 +76,72 @@ function submitPrompt(event) {
document.getElementById("input").disabled = true;
document.getElementById('messages').scrollIntoView(false)
messages = Alpine.store("chat").messages();
// if systemPrompt isn't empty, push it at the start of messages
if (systemPrompt) {
messages.unshift({
role: "system",
content: systemPrompt
});
}
// loop all messages, and check if there are images. If there are, we need to change the content field
messages.forEach((message) => {
if (message.image) {
// The content field now becomes an array
message.content = [
{
"type": "text",
"text": message.content
}
]
message.content.push(
{
"type": "image_url",
"image_url": {
"url": message.image,
}
}
);
// remove the image field
delete message.image;
}
});
// reset the form and the image
image = "";
document.getElementById("input_image").value = null;
document.getElementById("fileName").innerHTML = "";
// if (image) {
// // take the last element content's and add the image
// last_message = messages[messages.length - 1]
// // The content field now becomes an array
// last_message.content = [
// {
// "type": "text",
// "text": last_message.content
// }
// ]
// last_message.content.push(
// {
// "type": "image_url",
// "image_url": {
// "url": image,
// }
// }
// );
// // and we replace it in the messages array
// messages[messages.length - 1] = last_message
// // reset the form and the image
// image = "";
// document.getElementById("input_image").value = null;
// document.getElementById("fileName").innerHTML = "";
// }
// Source: https://stackoverflow.com/a/75751803/11386095
const response = await fetch("/v1/chat/completions", {
method: "POST",
@@ -62,7 +151,7 @@ function submitPrompt(event) {
},
body: JSON.stringify({
model: model,
messages: Alpine.store("chat").messages(),
messages: messages,
stream: true,
}),
});
@@ -122,12 +211,24 @@ function submitPrompt(event) {
}
document.getElementById("key").addEventListener("submit", submitKey);
document.getElementById("system_prompt").addEventListener("submit", submitSystemPrompt);
document.getElementById("prompt").addEventListener("submit", submitPrompt);
document.getElementById("input").focus();
document.getElementById("input_image").addEventListener("change", readInputImage);
const storeKey = localStorage.getItem("key");
storeKey = localStorage.getItem("key");
if (storeKey) {
document.getElementById("apiKey").value = storeKey;
} else {
document.getElementById("apiKey").value = null;
}
storesystemPrompt = localStorage.getItem("system_prompt");
if (storesystemPrompt) {
document.getElementById("systemPrompt").value = storesystemPrompt;
} else {
document.getElementById("systemPrompt").value = null;
}
marked.setOptions({

View File

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

View File

@@ -62,17 +62,34 @@ SOFTWARE.
<button @click="component = 'key'" title="Update API key"
class="m-2 float-right inline-block rounded bg-primary px-6 pb-2.5 mb-3 pt-2.5 text-xs font-medium uppercase leading-normal text-white shadow-primary-3 transition duration-150 ease-in-out hover:bg-primary-accent-300 hover:shadow-primary-2 focus:bg-primary-accent-300 focus:shadow-primary-2 focus:outline-none focus:ring-0 active:bg-primary-600 active:shadow-primary-2 dark:shadow-black/30 dark:hover:shadow-dark-strong dark:focus:shadow-dark-strong dark:active:shadow-dark-strong"
>Set API Key🔑</button>
<button @click="component = 'system_prompt'" title="System Prompt"
class="m-2 float-right inline-block rounded bg-primary px-6 pb-2.5 mb-3 pt-2.5 text-xs font-medium uppercase leading-normal text-white shadow-primary-3 transition duration-150 ease-in-out hover:bg-primary-accent-300 hover:shadow-primary-2 focus:bg-primary-accent-300 focus:shadow-primary-2 focus:outline-none focus:ring-0 active:bg-primary-600 active:shadow-primary-2 dark:shadow-black/30 dark:hover:shadow-dark-strong dark:focus:shadow-dark-strong dark:active:shadow-dark-strong"
>Set system prompt</button>
</div>
<form x-show="component === 'key'" id="key">
<input
type="password"
id="apiKey"
name="apiKey"
class="bg-gray-800 text-white border border-gray-600 focus:border-blue-500 focus:ring focus:ring-blue-500 focus:ring-opacity-50 rounded-md shadow-sm p-2 appearance-none"
placeholder="OpenAI API Key"
x-model.lazy="key"
/>
<button @click="component = 'menu'" type="submit" title="Save API key">
🔒
<i class="fa-solid fa-arrow-right"></i>
</button>
</form>
<form x-show="component === 'system_prompt'" id="system_prompt">
<textarea
type="text"
id="systemPrompt"
name="systemPrompt"
class="bg-gray-800 text-white border border-gray-600 focus:border-blue-500 focus:ring focus:ring-blue-500 focus:ring-opacity-50 rounded-md shadow-sm p-2 appearance-none"
placeholder="System prompt"
x-model.lazy="system_prompt"
></textarea>
<button @click="component = 'menu'" type="submit" title="Save Prompt">
<i class="fa-solid fa-arrow-right"></i>
</button>
</form>
@@ -96,7 +113,8 @@ SOFTWARE.
<div class="chat-messages p-4" id="chat" x-data="{history: $store.chat.history}">
<p id="usage" x-show="history.length === 0">
Start chatting with the AI by typing a prompt in the input field below.
Start chatting with the AI by typing a prompt in the input field below and pressing Enter.
For models that support images, you can upload an image by clicking the paperclip <i class="fa-solid fa-paperclip"></i> icon.
</p>
<div id="messages">
<template x-for="message in history">
@@ -111,15 +129,19 @@ SOFTWARE.
<template x-if="message.role === 'assistant'">
<div class="p-2 flex-1 rounded" :class="message.role" x-html="message.html"></div>
</template>
<template x-if="message.image">
<img :src="message.image" alt="Image" class="rounded-lg mt-2 h-36 w-36">
</template>
</div>
</div>
</template>
</div>
</div>
<div class="p-4 border-t border-gray-700" x-data="{ inputValue: '', shiftPressed: false }">
<div class="p-4 border-t border-gray-700" x-data="{ inputValue: '', shiftPressed: false, fileName: '' }">
<div id="loader" class="my-2 loader" style="display: none;"></div>
<input id="chat-model" type="hidden" value="{{.Model}}">
<input id="input_image" type="file" style="display: none;" @change="fileName = $event.target.files[0].name">
<form id="prompt" action="/chat/{{.Model}}" method="get" @submit.prevent="submitPrompt">
<div class="relative w-full">
<textarea
@@ -134,7 +156,10 @@ SOFTWARE.
@keydown.enter="if (!shiftPressed) { submitPrompt($event); }"
style="padding-right: 4rem;"
></textarea>
<button type=submit><i class="fa-solid fa-circle-up text-gray-300 absolute right-2 top-3 text-lg p-2 ml-2"></i></button>
<span x-text="fileName" id="fileName" class="absolute right-16 top-5 text-gray-300 text-sm mr-2"></span>
<button type="button" onclick="document.getElementById('input_image').click()" class="fa-solid fa-paperclip text-gray-300 ml-2 absolute right-10 top-3 text-lg p-2">
</button>
<button type=submit><i class="fa-solid fa-circle-up text-gray-300 absolute right-2 top-3 text-lg p-2"></i></button>
</div>
</form>
</div>
@@ -146,7 +171,7 @@ SOFTWARE.
clear() {
this.history.length = 0;
},
add(role, content) {
add(role, content, image) {
const N = this.history.length - 1;
if (this.history.length && this.history[N].role === role) {
this.history[N].content += content;
@@ -167,6 +192,7 @@ SOFTWARE.
role: role,
content: content,
html: c,
image: image,
});
}
@@ -191,6 +217,7 @@ SOFTWARE.
return {
role: message.role,
content: message.content,
image: message.image,
};
});
},

View File

@@ -10,23 +10,54 @@
<div class="container mx-auto px-4 flex-grow">
<div class="header text-center py-12">
<h1 class="text-5xl font-bold text-gray-100">Welcome to <i>your</i> LocalAI instance!</h1>
<div class="mt-6">
<!-- Logo can be uncommented and updated with a valid URL -->
</div>
<p class="mt-4 text-lg">The FOSS alternative to OpenAI, Claude, ...</p>
<a href="https://localai.io" target="_blank" class="mt-4 inline-block bg-blue-500 text-white py-2 px-4 rounded-lg shadow transition duration-300 ease-in-out hover:bg-blue-700 hover:shadow-lg">
<i class="fas fa-book-reader pr-2"></i>Documentation
</a>
</a>
</div>
<div class="models mt-12">
<div class="models mt-4">
{{template "views/partials/inprogress" .}}
{{ if eq (len .ModelsConfig) 0 }}
<h2 class="text-center text-3xl font-semibold text-gray-100"> <i class="text-yellow-200 ml-2 fa-solid fa-triangle-exclamation animate-pulse"></i> Ouch! seems you don't have any models installed!</h2>
<p class="text-center mt-4 text-xl">..install something from the <a class="text-gray-400 hover:text-white ml-1 px-3 py-2 rounded" href="/browse">🖼️ Gallery</a> or check the <a href="https://localai.io/basics/getting_started/" class="text-gray-400 hover:text-white ml-1 px-3 py-2 rounded"> <i class="fa-solid fa-book"></i> Getting started documentation </a></p>
{{ else }}
<h2 class="text-center text-3xl font-semibold text-gray-100">Installed models</h2>
<p class="text-center mt-4 text-xl">We have {{len .ModelsConfig}} pre-loaded models available.</p>
<ul class="mt-8 space-y-4">
<table class="table-auto mt-4 w-full text-left text-gray-200">
<thead class="text-xs text-gray-400 uppercase bg-gray-700">
<tr>
<th class="px-4 py-2"></th>
<th class="px-4 py-2">Model Name</th>
<th class="px-4 py-2">Backend</th>
<th class="px-4 py-2 float-right">Actions</th>
</tr>
</thead>
<tbody>
{{$galleryConfig:=.GalleryConfig}}
{{$noicon:="https://upload.wikimedia.org/wikipedia/commons/6/65/No-Image-Placeholder.svg"}}
{{ range .ModelsConfig }}
<li class="bg-gray-800 border border-gray-700 p-4 rounded-lg">
<div class="flex justify-between items-center">
<p class="font-bold text-white flex items-center"><i class="fas fa-brain pr-2"></i>{{.Name}}</p>
{{ $cfg:= index $galleryConfig .Name}}
<tr class="bg-gray-800 border-b border-gray-700">
<td class="px-4 py-3">
{{ with $cfg }}
<img {{ if $cfg.Icon }}
src="{{$cfg.Icon}}"
{{ else }}
src="{{$noicon}}"
{{ end }}
class="rounded-t-lg max-h-24 max-w-24 object-cover mt-3"
>
{{ else}}
<img src="{{$noicon}}" class="rounded-t-lg max-h-24 max-w-24 object-cover mt-3">
{{ end }}
</td>
<td class="px-4 py-3 font-bold">
<p class="font-bold text-white flex items-center"><i class="fas fa-brain pr-2"></i><a href="/browse?term={{.Name}}">{{.Name}}</a></p>
</td>
<td class="px-4 py-3 font-bold">
{{ if .Backend }}
<!-- Badge for Backend -->
<span class="inline-block bg-blue-500 text-white py-1 px-3 rounded-full text-xs">
@@ -37,11 +68,20 @@
auto
</span>
{{ end }}
</div>
<!-- Additional details can go here -->
</li>
</td>
<td class="px-4 py-3">
<button
class="float-right inline-block rounded bg-red-800 px-6 pb-2.5 mb-3 pt-2.5 text-xs font-medium uppercase leading-normal text-white shadow-primary-3 transition duration-150 ease-in-out hover:bg-red-accent-300 hover:shadow-red-2 focus:bg-red-accent-300 focus:shadow-primary-2 focus:outline-none focus:ring-0 active:bg-red-600 active:shadow-primary-2 dark:shadow-black/30 dark:hover:shadow-dark-strong dark:focus:shadow-dark-strong dark:active:shadow-dark-strong"
data-twe-ripple-color="light" data-twe-ripple-init="" hx-confirm="Are you sure you wish to delete the model?" hx-post="/browse/delete/model/{{.Name}}" hx-swap="outerHTML"><i class="fa-solid fa-cancel pr-2"></i>Delete</button>
</td>
{{ end }}
</ul>
</tbody>
</table>
{{ end }}
</div>
</div>

View File

@@ -13,10 +13,61 @@
🖼️ Available models from <i>{{ len .Repositories }}</i> repositories <a href="https://localai.io/models/" target="_blank" >
<i class="fas fa-circle-info pr-2"></i>
</a></h2>
<div class="text-center font-semibold text-gray-100">
<h2>Filter by type:</h2>
<button hx-post="/browse/search/models"
class="text-white-500 inline-block bg-blue-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2 hover:bg-gray-300 hover:shadow-gray-2"
hx-target="#search-results"
hx-vals='{"search": "tts"}'
hx-indicator=".htmx-indicator" >TTS</button>
<button hx-post="/browse/search/models"
class="text-white-500 inline-block bg-blue-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2 hover:bg-gray-300 hover:shadow-gray-2"
hx-target="#search-results"
hx-vals='{"search": "stablediffusion"}'
hx-indicator=".htmx-indicator" >Image generation</button>
<button hx-post="/browse/search/models" \
class="text-white-500 inline-block bg-blue-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2 hover:bg-gray-300 hover:shadow-gray-2"
hx-target="#search-results"
hx-vals='{"search": "llm"}'
hx-indicator=".htmx-indicator" >Text generation</button>
<button hx-post="/browse/search/models"
class="text-white-500 inline-block bg-blue-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2 hover:bg-gray-300 hover:shadow-gray-2"
hx-target="#search-results"
hx-vals='{"search": "multimodal"}'
hx-indicator=".htmx-indicator" >Multimodal</button>
<button hx-post="/browse/search/models"
class="text-white-500 inline-block bg-blue-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2 hover:bg-gray-300 hover:shadow-gray-2"
hx-target="#search-results"
hx-vals='{"search": "embedding"}'
hx-indicator=".htmx-indicator" >Embeddings</button>
<button hx-post="/browse/search/models"
class="text-white-500 inline-block bg-blue-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2 hover:bg-gray-300 hover:shadow-gray-2"
hx-target="#search-results"
hx-vals='{"search": "rerank"}'
hx-indicator=".htmx-indicator" >Rerankers</button>
<button
hx-post="/browse/search/models"
class="text-white-500 inline-block bg-blue-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2 hover:bg-gray-300 hover:shadow-gray-2"
hx-target="#search-results"
hx-vals='{"search": "whisper"}'
hx-indicator=".htmx-indicator" >Audio transcription</button>
</div>
<div class="text-center text-xs font-semibold text-gray-100">
Filter by tags:
{{ range .AllTags }}
<button hx-post="/browse/search/models" class="text-blue-500" hx-target="#search-results"
hx-vals='{"search": "{{.}}"}'
hx-indicator=".htmx-indicator" >{{.}}</button>
{{ end }}
</div>
<span class="htmx-indicator loader"></span>
<input class="form-control appearance-none block w-full px-3 py-2 text-base font-normal text-gray-300 pb-2 mb-5 bg-gray-800 bg-clip-padding border border-solid border-gray-600 rounded transition ease-in-out m-0 focus:text-gray-300 focus:bg-gray-900 focus:border-blue-500 focus:outline-none" type="search"
{{template "views/partials/inprogress" .}}
<input class="form-control appearance-none block w-full mt-5 px-3 py-2 text-base font-normal text-gray-300 pb-2 mb-5 bg-gray-800 bg-clip-padding border border-solid border-gray-600 rounded transition ease-in-out m-0 focus:text-gray-300 focus:bg-gray-900 focus:border-blue-500 focus:outline-none" type="search"
name="search" placeholder="Begin Typing To Search models..."
hx-post="/browse/search/models"
hx-trigger="input changed delay:500ms, search"

View File

@@ -0,0 +1,32 @@
<!-- Show in progress operations-->
{{ if .ProcessingModels }}
<h2
class="mt-4 mb-4 text-center text-3xl font-semibold text-gray-100">Operations in progress</h2>
{{end}}
{{$taskType:=.TaskTypes}}
{{ range $key,$value:=.ProcessingModels }}
{{ $op := index $taskType $key}}
{{$parts := split "@" $key}}
{{$modelName:=$parts._1}}
{{$repository:=$parts._0}}
{{ if not (contains "@" $key)}}
{{$repository = ""}}
{{$modelName = $key}}
{{ end }}
<div class="flex items-center justify-between bg-slate-600 p-2 mb-2 rounded-md">
<div class="flex items center">
<span class="text-gray-300"><a href="/browse?term={{$parts._1}}"
class="text-white-500 inline-block bg-blue-200 rounded-full px-3 py-1 text-sm font-semibold text-gray-700 mr-2 mb-2 hover:bg-gray-300 hover:shadow-gray-2"
>{{$modelName}}</a> {{if $repository}} (from the '{{$repository}}' repository) {{end}}</span>
</div>
<div hx-get="/browse/job/{{$value}}" hx-swap="outerHTML" hx-target="this" hx-trigger="done">
<h3 role="status" id="pblabel" >{{$op}}
<div hx-get="/browse/job/progress/{{$value}}" hx-trigger="every 600ms"
hx-target="this"
hx-swap="innerHTML" ></div></h3>
</div>
</div>
{{ end }}
<!-- END Show in progress operations-->

View File

@@ -6,7 +6,14 @@
<a href="/" class="text-white text-xl font-bold"><img src="https://github.com/go-skynet/LocalAI/assets/2420543/0966aa2a-166e-4f99-a3e5-6c915fc997dd" alt="LocalAI Logo" class="h-10 mr-3 border-2 border-gray-300 shadow rounded"></a>
<a href="/" class="text-white text-xl font-bold">LocalAI</a>
</div>
<div>
<!-- Menu button for small screens -->
<div class="lg:hidden">
<button id="menu-toggle" class="text-gray-400 hover:text-white focus:outline-none">
<i class="fas fa-bars fa-lg"></i>
</button>
</div>
<!-- Navigation links -->
<div class="hidden lg:flex lg:items-center lg:justify-end lg:flex-1 lg:w-0">
<a href="/" class="text-gray-400 hover:text-white px-3 py-2 rounded"><i class="fas fa-home pr-2"></i>Home</a>
<a href="https://localai.io" class="text-gray-400 hover:text-white px-3 py-2 rounded" target="_blank" ><i class="fas fa-book-reader pr-2"></i> Documentation</a>
<a href="/browse/" class="text-gray-400 hover:text-white px-3 py-2 rounded"><i class="fas fa-brain pr-2"></i> Models</a>
@@ -16,5 +23,25 @@
<a href="/swagger/" class="text-gray-400 hover:text-white px-3 py-2 rounded"><i class="fas fa-code pr-2"></i> API</a>
</div>
</div>
<!-- Collapsible menu for small screens -->
<div class="hidden lg:hidden" id="mobile-menu">
<div class="pt-4 pb-3 border-t border-gray-700">
<a href="/" class="block text-gray-400 hover:text-white px-3 py-2 rounded mt-1"><i class="fas fa-home pr-2"></i>Home</a>
<a href="https://localai.io" class="block text-gray-400 hover:text-white px-3 py-2 rounded mt-1" target="_blank" ><i class="fas fa-book-reader pr-2"></i> Documentation</a>
<a href="/browse/" class="block text-gray-400 hover:text-white px-3 py-2 rounded mt-1"><i class="fas fa-brain pr-2"></i> Models</a>
<a href="/chat/" class="block text-gray-400 hover:text-white px-3 py-2 rounded mt-1"><i class="fa-solid fa-comments pr-2"></i> Chat</a>
<a href="/text2image/" class="block text-gray-400 hover:text-white px-3 py-2 rounded mt-1"><i class="fas fa-image pr-2"></i> Generate images</a>
<a href="/tts/" class="block text-gray-400 hover:text-white px-3 py-2 rounded mt-1"><i class="fa-solid fa-music pr-2"></i> TTS </a>
<a href="/swagger/" class="block text-gray-400 hover:text-white px-3 py-2 rounded mt-1"><i class="fas fa-code pr-2"></i> API</a>
</div>
</div>
</div>
</nav>
</nav>
<script>
// JavaScript to toggle the mobile menu
document.getElementById('menu-toggle').addEventListener('click', function () {
var mobileMenu = document.getElementById('mobile-menu');
mobileMenu.classList.toggle('hidden');
});
</script>

View File

@@ -14,7 +14,7 @@
<div class="flex items-center justify-center text-center pb-2">
<span class="text-3xl font-semibold text-gray-100">
🖼️ Text to Image
<a href="https://localai.io/models/" target="_blank" >
<a href="https://localai.io/features/image-generation" target="_blank" >
<i class="fas fa-circle-info pr-2"></i>
</a>
</span>

View File

@@ -90,7 +90,7 @@ func (g *GalleryService) Start(c context.Context, cl *config.BackendConfigLoader
if op.Delete {
modelConfig := &config.BackendConfig{}
// Galleryname is the name of the model in this case
dat, err := os.ReadFile(filepath.Join(g.modelPath, op.GalleryName+".yaml"))
dat, err := os.ReadFile(filepath.Join(g.modelPath, op.GalleryModelName+".yaml"))
if err != nil {
updateError(err)
continue
@@ -111,14 +111,14 @@ func (g *GalleryService) Start(c context.Context, cl *config.BackendConfigLoader
files = append(files, modelConfig.MMProjFileName())
}
err = gallery.DeleteModelFromSystem(g.modelPath, op.GalleryName, files)
err = gallery.DeleteModelFromSystem(g.modelPath, op.GalleryModelName, files)
} else {
// if the request contains a gallery name, we apply the gallery from the gallery list
if op.GalleryName != "" {
if strings.Contains(op.GalleryName, "@") {
err = gallery.InstallModelFromGallery(op.Galleries, op.GalleryName, g.modelPath, op.Req, progressCallback)
if op.GalleryModelName != "" {
if strings.Contains(op.GalleryModelName, "@") {
err = gallery.InstallModelFromGallery(op.Galleries, op.GalleryModelName, g.modelPath, op.Req, progressCallback)
} else {
err = gallery.InstallModelFromGalleryByName(op.Galleries, op.GalleryName, g.modelPath, op.Req, progressCallback)
err = gallery.InstallModelFromGalleryByName(op.Galleries, op.GalleryModelName, g.modelPath, op.Req, progressCallback)
}
} else if op.ConfigURL != "" {
startup.PreloadModelsConfigurations(op.ConfigURL, g.modelPath, op.ConfigURL)
@@ -148,10 +148,11 @@ func (g *GalleryService) Start(c context.Context, cl *config.BackendConfigLoader
g.UpdateStatus(op.Id,
&gallery.GalleryOpStatus{
Deletion: op.Delete,
Processed: true,
Message: "completed",
Progress: 100})
Deletion: op.Delete,
Processed: true,
GalleryModelName: op.GalleryModelName,
Message: "completed",
Progress: 100})
}
}
}()

View File

@@ -11,6 +11,7 @@ import (
"github.com/go-skynet/LocalAI/pkg/assets"
"github.com/go-skynet/LocalAI/pkg/model"
pkgStartup "github.com/go-skynet/LocalAI/pkg/startup"
"github.com/go-skynet/LocalAI/pkg/xsysinfo"
"github.com/rs/zerolog/log"
)
@@ -19,12 +20,23 @@ func Startup(opts ...config.AppOption) (*config.BackendConfigLoader, *model.Mode
log.Info().Msgf("Starting LocalAI using %d threads, with models path: %s", options.Threads, options.ModelPath)
log.Info().Msgf("LocalAI version: %s", internal.PrintableVersion())
caps, err := xsysinfo.CPUCapabilities()
if err == nil {
log.Debug().Msgf("CPU capabilities: %v", caps)
}
gpus, err := xsysinfo.GPUs()
if err == nil {
log.Debug().Msgf("GPU count: %d", len(gpus))
for _, gpu := range gpus {
log.Debug().Msgf("GPU: %s", gpu.String())
}
}
// Make sure directories exists
if options.ModelPath == "" {
return nil, nil, nil, fmt.Errorf("options.ModelPath cannot be empty")
}
err := os.MkdirAll(options.ModelPath, 0750)
err = os.MkdirAll(options.ModelPath, 0750)
if err != nil {
return nil, nil, nil, fmt.Errorf("unable to create ModelPath: %q", err)
}

View File

@@ -296,7 +296,7 @@ backend: transformers
parameters:
model: "facebook/opt-125m"
type: AutoModelForCausalLM
quantization: bnb_4bit # One of: bnb_8bit, bnb_4bit, xpu_4bit (optional)
quantization: bnb_4bit # One of: bnb_8bit, bnb_4bit, xpu_4bit, xpu_8bit (optional)
```
The backend will automatically download the required files in order to run the model.
@@ -307,10 +307,42 @@ The backend will automatically download the required files in order to run the m
| Type | Description |
| --- | --- |
| `AutoModelForCausalLM` | `AutoModelForCausalLM` is a model that can be used to generate sequences. |
| `OVModelForCausalLM` | for OpenVINO models |
| `AutoModelForCausalLM` | `AutoModelForCausalLM` is a model that can be used to generate sequences. Use it for NVIDIA CUDA and Intel GPU with Intel Extensions for Pytorch acceleration |
| `OVModelForCausalLM` | for Intel CPU/GPU/NPU OpenVINO Text Generation models |
| `OVModelForFeatureExtraction` | for Intel CPU/GPU/NPU OpenVINO Embedding acceleration |
| N/A | Defaults to `AutoModel` |
- `OVModelForCausalLM` requires OpenVINO IR [Text Generation](https://huggingface.co/models?library=openvino&pipeline_tag=text-generation) models from Hugging face
- `OVModelForFeatureExtraction` works with any Safetensors Transformer [Feature Extraction](https://huggingface.co/models?pipeline_tag=feature-extraction&library=transformers,safetensors) model from Huggingface (Embedding Model)
Please note that streaming is currently not implemente in `AutoModelForCausalLM` for Intel GPU.
AMD GPU support is not implemented.
Although AMD CPU is not officially supported by OpenVINO there are reports that it works: YMMV.
##### Embeddings
Use `embeddings: true` if the model is an embedding model
##### Inference device selection
Transformer backend tries to automatically select the best device for inference, anyway you can override the decision manually overriding with the `main_gpu` parameter.
| Inference Engine | Applicable Values |
| --- | --- |
| CUDA | `cuda`, `cuda.X` where X is the GPU device like in `nvidia-smi -L` output |
| OpenVINO | Any applicable value from [Inference Modes](https://docs.openvino.ai/2024/openvino-workflow/running-inference/inference-devices-and-modes.html) like `AUTO`,`CPU`,`GPU`,`NPU`,`MULTI`,`HETERO` |
Example for CUDA:
`main_gpu: cuda.0`
Example for OpenVINO:
`main_gpu: AUTO:-CPU`
This parameter applies to both Text Generation and Feature Extraction (i.e. Embeddings) models.
##### Inference Precision
Transformer backend automatically select the fastest applicable inference precision according to the device support.
CUDA backend can manually enable *bfloat16* if your hardware support it with the following parameter:
`f16: true`
##### Quantization
@@ -318,8 +350,42 @@ The backend will automatically download the required files in order to run the m
| --- | --- |
| `bnb_8bit` | 8-bit quantization |
| `bnb_4bit` | 4-bit quantization |
| `xpu_8bit` | 8-bit quantization for Intel XPUs |
| `xpu_4bit` | 4-bit quantization for Intel XPUs |
##### Trust Remote Code
Some models like Microsoft Phi-3 requires external code than what is provided by the transformer library.
By default it is disabled for security.
It can be manually enabled with:
`trust_remote_code: true`
##### Maximum Context Size
Maximum context size in bytes can be specified with the parameter: `context_size`. Do not use values higher than what your model support.
Usage example:
`context_size: 8192`
##### Auto Prompt Template
Usually chat template is defined by the model author in the `tokenizer_config.json` file.
To enable it use the `use_tokenizer_template: true` parameter in the `template` section.
Usage example:
```
template:
use_tokenizer_template: true
```
##### Custom Stop Words
Stopwords are usually defined in `tokenizer_config.json` file.
They can be overridden with the `stopwords` parameter in case of need like in llama3-Instruct model.
Usage example:
```
stopwords:
- "<|eot_id|>"
- "<|end_of_text|>"
```
#### Usage
Use the `completions` endpoint by specifying the `transformers` model:

View File

@@ -144,7 +144,7 @@ Install `xcode` from the Apps Store (needed for metalkit)
```
# install build dependencies
brew install abseil cmake go grpc protobuf wget
brew install abseil cmake go grpc protobuf wget protoc-gen-go protoc-gen-go-grpc
# clone the repo
git clone https://github.com/go-skynet/LocalAI.git

View File

@@ -45,10 +45,11 @@ LocalAI will attempt to automatically load models which are not explicitly confi
| [tinydream](https://github.com/symisc/tiny-dream#tiny-dreaman-embedded-header-only-stable-diffusion-inference-c-librarypixlabiotiny-dream) | stablediffusion | no | Image | no | no | N/A |
| `coqui` | Coqui | no | Audio generation and Voice cloning | no | no | CPU/CUDA |
| `petals` | Various GPTs and quantization formats | yes | GPT | no | no | CPU/CUDA |
| `transformers` | Various GPTs and quantization formats | yes | GPT, embeddings | yes | no | CPU/CUDA |
| `transformers` | Various GPTs and quantization formats | yes | GPT, embeddings | yes | yes**** | CPU/CUDA/XPU |
Note: any backend name listed above can be used in the `backend` field of the model configuration file (See [the advanced section]({{%relref "docs/advanced" %}})).
- \* 7b ONLY
- ** doesn't seem to be accurate
- *** 7b and 40b with the `ggccv` format, for instance: https://huggingface.co/TheBloke/WizardLM-Uncensored-Falcon-40B-GGML
- *** 7b and 40b with the `ggccv` format, for instance: https://huggingface.co/TheBloke/WizardLM-Uncensored-Falcon-40B-GGML
- **** Only for CUDA and OpenVINO CPU/XPU acceleration.

View File

@@ -1,3 +1,3 @@
{
"version": "v2.13.0"
"version": "null"
}

View File

@@ -25,7 +25,7 @@ PyYAML==6.0
requests==2.31.0
SQLAlchemy==2.0.12
tenacity==8.2.2
tqdm==4.65.0
tqdm==4.66.3
typing-inspect==0.8.0
typing_extensions==4.5.0
urllib3==1.26.18

View File

File diff suppressed because it is too large Load Diff

19
gallery/moondream.yaml Normal file
View File

@@ -0,0 +1,19 @@
---
name: "moondream2"
config_file: |
backend: llama-cpp
context_size: 2046
roles:
user: "\nQuestion: "
system: "\nSystem: "
assistant: "\nAnswer: "
stopwords:
- "Question:"
- "<|endoftext|>"
f16: true
template:
completion: |
Complete the following sentence: {{.Input}}
chat: "{{.Input}}\nAnswer:\n"

View File

@@ -7,6 +7,3 @@ config_file: |
type: OVModelForCausalLM
template:
use_tokenizer_template: true
stopwords:
- "<|eot_id|>"
- "<|end_of_text|>"

7
go.mod
View File

@@ -67,6 +67,7 @@ require (
github.com/Masterminds/semver/v3 v3.2.0 // indirect
github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
github.com/StackExchange/wmi v1.2.1 // indirect
github.com/alecthomas/chroma/v2 v2.8.0 // indirect
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
github.com/aymerick/douceur v0.2.0 // indirect
@@ -82,6 +83,7 @@ require (
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.21.0 // indirect
@@ -95,7 +97,10 @@ require (
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/gorilla/css v1.0.1 // indirect
github.com/huandu/xstrings v1.3.3 // indirect
github.com/jaypipes/ghw v0.12.0 // indirect
github.com/jaypipes/pcidb v1.0.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
github.com/klauspost/pgzip v1.2.5 // indirect
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
@@ -103,6 +108,7 @@ require (
github.com/microcosm-cc/bluemonday v1.0.26 // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/mitchellh/copystructure v1.0.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/reflectwalk v1.0.0 // indirect
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect
@@ -139,6 +145,7 @@ require (
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
gopkg.in/fsnotify.v1 v1.4.7 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
howett.net/plist v1.0.0 // indirect
)
require (

18
go.sum
View File

@@ -14,6 +14,8 @@ github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2y
github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA=
github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
github.com/alecthomas/assert/v2 v2.6.0 h1:o3WJwILtexrEUk3cUVal3oiQY2tfgr/FHWiz/v2n4FU=
github.com/alecthomas/assert/v2 v2.6.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k=
github.com/alecthomas/chroma/v2 v2.8.0 h1:w9WJUjFFmHHB2e8mRpL9jjy3alYDlU0QLDezj1xE264=
@@ -71,6 +73,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230628193450-85ed71aaec8e h1:KtbU2JR3lJuXFASHG2+sVLucfMPBjWKUUKByX6C81mQ=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230628193450-85ed71aaec8e/go.mod h1:QIjZ9OktHFG7p+/m3sMvrAJKKdWrr1fZIK0rM6HZlyo=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-audio/audio v1.0.0 h1:zS9vebldgbQqktK4H0lUqWrG8P0NxCJVqcj7ZpNnwd4=
github.com/go-audio/audio v1.0.0/go.mod h1:6uAu0+H2lHkwdGsAY+j2wHPNPpPoeg5AaEFh9FlA+Zs=
github.com/go-audio/riff v1.0.0 h1:d8iCGbDvox9BfLagY94fBynxSPHO80LmZCaOsmKxokA=
@@ -82,6 +86,7 @@ github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
@@ -162,6 +167,11 @@ github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/jaypipes/ghw v0.12.0 h1:xU2/MDJfWmBhJnujHY9qwXQLs3DBsf0/Xa9vECY0Tho=
github.com/jaypipes/ghw v0.12.0/go.mod h1:jeJGbkRB2lL3/gxYzNYzEDETV1ZJ56OKr+CSeSEym+g=
github.com/jaypipes/pcidb v1.0.0 h1:vtZIfkiCUE42oYbJS0TAq9XSfSmcsgo9IdxSm9qzYU8=
github.com/jaypipes/pcidb v1.0.0/go.mod h1:TnYUvqhPBzCKnH34KrIX22kAeEbDCSRJ9cqLRCuNDfk=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
@@ -174,6 +184,8 @@ github.com/klauspost/compress v1.11.4/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs
github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM=
github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
@@ -210,6 +222,8 @@ github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2Em
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw=
github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ=
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY=
@@ -430,6 +444,7 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -489,6 +504,7 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy
gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -500,3 +516,5 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.3.0 h1:MfDY1b1/0xN1CyMlQDac0ziEy9zJQd9CXBRRDHw2jJo=
gotest.tools/v3 v3.3.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A=
howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM=
howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=

View File

@@ -55,6 +55,9 @@ func InstallModelFromGallery(galleries []Gallery, name string, basePath string,
installName = req.Name
}
// Copy the model configuration from the request schema
config.URLs = append(config.URLs, model.URLs...)
config.Icon = model.Icon
config.Files = append(config.Files, req.AdditionalFiles...)
config.Files = append(config.Files, model.AdditionalFiles...)
@@ -186,6 +189,12 @@ func getGalleryModels(gallery Gallery, basePath string) ([]*GalleryModel, error)
return models, nil
}
func GetLocalModelConfiguration(basePath string, name string) (*Config, error) {
name = strings.ReplaceAll(name, string(os.PathSeparator), "__")
galleryFile := filepath.Join(basePath, galleryFileName(name))
return ReadConfigFile(galleryFile)
}
func DeleteModelFromSystem(basePath string, name string, additionalFiles []string) error {
// os.PathSeparator is not allowed in model names. Replace them with "__" to avoid conflicts with file paths.
name = strings.ReplaceAll(name, string(os.PathSeparator), "__")
@@ -228,5 +237,8 @@ func DeleteModelFromSystem(basePath string, name string, additionalFiles []strin
err = errors.Join(err, fmt.Errorf("failed to remove file %s: %w", configFile, e))
}
// Delete gallery config file
os.Remove(galleryFile)
return err
}

View File

@@ -40,8 +40,10 @@ prompt_templates:
*/
// Config is the model configuration which contains all the model details
// This configuration is read from the gallery endpoint and is used to download and install the model
// It is the internal structure, separated from the request
type Config struct {
Description string `yaml:"description"`
Icon string `yaml:"icon"`
License string `yaml:"license"`
URLs []string `yaml:"urls"`
Name string `yaml:"name"`

View File

@@ -1,10 +1,10 @@
package gallery
type GalleryOp struct {
Id string
GalleryName string
ConfigURL string
Delete bool
Id string
GalleryModelName string
ConfigURL string
Delete bool
Req GalleryModel
Galleries []Gallery
@@ -19,4 +19,5 @@ type GalleryOpStatus struct {
Progress float64 `json:"progress"`
TotalFileSize string `json:"file_size"`
DownloadedFileSize string `json:"downloaded_size"`
GalleryModelName string `json:"gallery_model_name"`
}

View File

@@ -1,5 +1,10 @@
package gallery
import (
"fmt"
"strings"
)
// GalleryModel is the struct used to represent a model in the gallery returned by the endpoint.
// It is used to install the model by resolving the URL and downloading the files.
// The other fields are used to override the configuration of the model.
@@ -22,3 +27,23 @@ type GalleryModel struct {
// Installed is used to indicate if the model is installed or not
Installed bool `json:"installed,omitempty" yaml:"installed,omitempty"`
}
func (m GalleryModel) ID() string {
return fmt.Sprintf("%s@%s", m.Gallery.Name, m.Name)
}
type GalleryModels []*GalleryModel
func (gm GalleryModels) Search(term string) GalleryModels {
var filteredModels GalleryModels
for _, m := range gm {
if strings.Contains(m.Name, term) ||
strings.Contains(m.Description, term) ||
strings.Contains(m.Gallery.Name, term) ||
strings.Contains(strings.Join(m.Tags, ","), term) {
filteredModels = append(filteredModels, m)
}
}
return filteredModels
}

View File

@@ -2,6 +2,7 @@ package langchain
import (
"context"
"fmt"
"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/llms/huggingface"
@@ -9,11 +10,16 @@ import (
type HuggingFace struct {
modelPath string
token string
}
func NewHuggingFace(repoId string) (*HuggingFace, error) {
func NewHuggingFace(repoId, token string) (*HuggingFace, error) {
if token == "" {
return nil, fmt.Errorf("no huggingface token provided")
}
return &HuggingFace{
modelPath: repoId,
token: token,
}, nil
}
@@ -21,7 +27,7 @@ func (s *HuggingFace) PredictHuggingFace(text string, opts ...PredictOption) (*P
po := NewPredictOptions(opts...)
// Init client
llm, err := huggingface.New()
llm, err := huggingface.New(huggingface.WithToken(s.token))
if err != nil {
return nil, err
}

View File

@@ -2,27 +2,32 @@ package model
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"slices"
"strings"
"time"
grpc "github.com/go-skynet/LocalAI/pkg/grpc"
"github.com/hashicorp/go-multierror"
"github.com/phayes/freeport"
"github.com/rs/zerolog/log"
)
var Aliases map[string]string = map[string]string{
"go-llama": LLamaCPP,
"llama": LLamaCPP,
"embedded-store": LocalStoreBackend,
"go-llama": LLamaCPP,
"llama": LLamaCPP,
"embedded-store": LocalStoreBackend,
"langchain-huggingface": LCHuggingFaceBackend,
}
const (
LlamaGGML = "llama-ggml"
LLamaCPP = "llama-cpp"
LlamaGGML = "llama-ggml"
LLamaCPP = "llama-cpp"
LLamaCPPFallback = "llama-cpp-fallback"
Gpt4AllLlamaBackend = "gpt4all-llama"
Gpt4AllMptBackend = "gpt4all-mpt"
Gpt4AllJBackend = "gpt4all-j"
@@ -34,21 +39,75 @@ const (
StableDiffusionBackend = "stablediffusion"
TinyDreamBackend = "tinydream"
PiperBackend = "piper"
LCHuggingFaceBackend = "langchain-huggingface"
LCHuggingFaceBackend = "huggingface"
LocalStoreBackend = "local-store"
)
var AutoLoadBackends []string = []string{
LLamaCPP,
LlamaGGML,
Gpt4All,
BertEmbeddingsBackend,
RwkvBackend,
WhisperBackend,
StableDiffusionBackend,
TinyDreamBackend,
PiperBackend,
func backendPath(assetDir, backend string) string {
return filepath.Join(assetDir, "backend-assets", "grpc", backend)
}
// backendsInAssetDir returns the list of backends in the asset directory
// that should be loaded
func backendsInAssetDir(assetDir string) ([]string, error) {
// Exclude backends from automatic loading
excludeBackends := []string{LocalStoreBackend}
entry, err := os.ReadDir(backendPath(assetDir, ""))
if err != nil {
return nil, err
}
var backends []string
ENTRY:
for _, e := range entry {
for _, exclude := range excludeBackends {
if e.Name() == exclude {
continue ENTRY
}
}
if !e.IsDir() {
backends = append(backends, e.Name())
}
}
// order backends from the asset directory.
// as we scan for backends, we want to keep some order which backends are tried of.
// for example, llama.cpp should be tried first, and we want to keep the huggingface backend at the last.
// sets a priority list
// First has more priority
priorityList := []string{
// First llama.cpp and llama-ggml
LLamaCPP, LLamaCPPFallback, LlamaGGML, Gpt4All,
}
toTheEnd := []string{
// last has to be huggingface
LCHuggingFaceBackend,
// then bert embeddings
BertEmbeddingsBackend,
}
slices.Reverse(priorityList)
slices.Reverse(toTheEnd)
// order certain backends first
for _, b := range priorityList {
for i, be := range backends {
if be == b {
backends = append([]string{be}, append(backends[:i], backends[i+1:]...)...)
break
}
}
}
// make sure that some others are pushed at the end
for _, b := range toTheEnd {
for i, be := range backends {
if be == b {
backends = append(append(backends[:i], backends[i+1:]...), be)
break
}
}
}
return backends, nil
}
// starts the grpcModelProcess for the backend, and returns a grpc client
@@ -99,7 +158,7 @@ func (ml *ModelLoader) grpcModel(backend string, o *Options) func(string, string
client = ModelAddress(uri)
}
} else {
grpcProcess := filepath.Join(o.assetDir, "backend-assets", "grpc", backend)
grpcProcess := backendPath(o.assetDir, backend)
// Check if the file exists
if _, err := os.Stat(grpcProcess); os.IsNotExist(err) {
return "", fmt.Errorf("grpc process not found: %s. some backends(stablediffusion, tts) require LocalAI compiled with GO_TAGS", grpcProcess)
@@ -243,7 +302,12 @@ func (ml *ModelLoader) GreedyLoader(opts ...Option) (grpc.Backend, error) {
// autoload also external backends
allBackendsToAutoLoad := []string{}
allBackendsToAutoLoad = append(allBackendsToAutoLoad, AutoLoadBackends...)
autoLoadBackends, err := backendsInAssetDir(o.assetDir)
if err != nil {
return nil, err
}
log.Debug().Msgf("Loading from the following backends (in order): %+v", autoLoadBackends)
allBackendsToAutoLoad = append(allBackendsToAutoLoad, autoLoadBackends...)
for _, b := range o.externalBackends {
allBackendsToAutoLoad = append(allBackendsToAutoLoad, b)
}
@@ -271,10 +335,10 @@ func (ml *ModelLoader) GreedyLoader(opts ...Option) (grpc.Backend, error) {
log.Info().Msgf("[%s] Loads OK", b)
return model, nil
} else if modelerr != nil {
err = multierror.Append(err, modelerr)
err = errors.Join(err, modelerr)
log.Info().Msgf("[%s] Fails: %s", b, modelerr.Error())
} else if model == nil {
err = multierror.Append(err, fmt.Errorf("backend returned no usable model"))
err = errors.Join(err, fmt.Errorf("backend returned no usable model"))
log.Info().Msgf("[%s] Fails: %s", b, "backend returned no usable model")
}
}

View File

@@ -15,6 +15,12 @@ func NewSyncedMap[K comparable, V any]() *SyncedMap[K, V] {
}
}
func (m *SyncedMap[K, V]) Map() map[K]V {
m.mu.RLock()
defer m.mu.RUnlock()
return m.m
}
func (m *SyncedMap[K, V]) Get(key K) V {
m.mu.RLock()
defer m.mu.RUnlock()

38
pkg/xsysinfo/cpu.go Normal file
View File

@@ -0,0 +1,38 @@
package xsysinfo
import (
"sort"
"github.com/jaypipes/ghw"
"github.com/klauspost/cpuid/v2"
)
func CPUCapabilities() ([]string, error) {
cpu, err := ghw.CPU()
if err != nil {
return nil, err
}
caps := map[string]struct{}{}
for _, proc := range cpu.Processors {
for _, c := range proc.Capabilities {
caps[c] = struct{}{}
}
}
ret := []string{}
for c := range caps {
ret = append(ret, c)
}
// order
sort.Strings(ret)
return ret, nil
}
func HasCPUCaps(ids ...cpuid.FeatureID) bool {
return cpuid.CPU.Supports(ids...)
}

15
pkg/xsysinfo/gpu.go Normal file
View File

@@ -0,0 +1,15 @@
package xsysinfo
import (
"github.com/jaypipes/ghw"
"github.com/jaypipes/ghw/pkg/gpu"
)
func GPUs() ([]*gpu.GraphicsCard, error) {
gpu, err := ghw.GPU()
if err != nil {
return nil, err
}
return gpu.GraphicsCards, nil
}