mirror of
https://github.com/mudler/LocalAI.git
synced 2026-02-03 03:02:38 -05:00
Compare commits
161 Commits
tests_mac
...
fix/abseil
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
18f22d7170 | ||
|
|
3bc5652b27 | ||
|
|
59ef426fbf | ||
|
|
28c6daf916 | ||
|
|
133987b1fb | ||
|
|
cbb93bd8ec | ||
|
|
7223284323 | ||
|
|
8d046de287 | ||
|
|
2845baecd5 | ||
|
|
d5a56f04be | ||
|
|
f120a0c9f9 | ||
|
|
401ee553f4 | ||
|
|
e3c89ac9cd | ||
|
|
b59841cf69 | ||
|
|
cca881ec49 | ||
|
|
dd95ae130f | ||
|
|
185ab93b0d | ||
|
|
bb38f051e6 | ||
|
|
2a05c39adf | ||
|
|
deb5311373 | ||
|
|
bdfebfe0f4 | ||
|
|
3a88299cfe | ||
|
|
748e4cb6b1 | ||
|
|
7c554be4ea | ||
|
|
6011845ee9 | ||
|
|
c184f23621 | ||
|
|
8cec0304ee | ||
|
|
dc51869c61 | ||
|
|
f881d25630 | ||
|
|
683c306f90 | ||
|
|
a985d8c239 | ||
|
|
17608ea6aa | ||
|
|
9280060e05 | ||
|
|
cbcb74e159 | ||
|
|
f5c1518438 | ||
|
|
29e4729c22 | ||
|
|
68f3943e0f | ||
|
|
b59f81abff | ||
|
|
94c5524277 | ||
|
|
5b3211e71c | ||
|
|
5c135d0dec | ||
|
|
a4c96836ac | ||
|
|
ff19b22d72 | ||
|
|
d96d4883ce | ||
|
|
83576d7f57 | ||
|
|
23b926d43e | ||
|
|
9aec1b3a61 | ||
|
|
2d65df38d1 | ||
|
|
6f5b6711ea | ||
|
|
89c888bf55 | ||
|
|
a637ee2278 | ||
|
|
1b270759ef | ||
|
|
b10441a41c | ||
|
|
97de2b6550 | ||
|
|
497a037344 | ||
|
|
cf0af16695 | ||
|
|
62b4030278 | ||
|
|
c047c19145 | ||
|
|
b941732f54 | ||
|
|
e591ff2e74 | ||
|
|
bd2f95c130 | ||
|
|
ad85c5a1e7 | ||
|
|
421eb8a727 | ||
|
|
b7ff441cc0 | ||
|
|
83d867ad46 | ||
|
|
6acba2bcbe | ||
|
|
6a2a10603c | ||
|
|
356907a5cf | ||
|
|
7ab7a188d0 | ||
|
|
ff1a5bfc62 | ||
|
|
522f185baf | ||
|
|
f7b5a4ca7d | ||
|
|
1d30955677 | ||
|
|
d3307e93d3 | ||
|
|
8d9a452e4b | ||
|
|
466eb82845 | ||
|
|
7e562d10a3 | ||
|
|
7b1e792732 | ||
|
|
30b883affe | ||
|
|
20ec4d0342 | ||
|
|
a9f8460086 | ||
|
|
98b3b2b1ab | ||
|
|
e8bc0a789b | ||
|
|
2b6a2c7dde | ||
|
|
c8c8238f9d | ||
|
|
3eaf59021c | ||
|
|
a8bfb6f9c2 | ||
|
|
b783c811db | ||
|
|
59af0e77af | ||
|
|
5d83c8d3a2 | ||
|
|
8f968d0341 | ||
|
|
f93fe30350 | ||
|
|
784ccf97ba | ||
|
|
a0163dafce | ||
|
|
f072cb3cd0 | ||
|
|
e84b31935c | ||
|
|
03b1cf51fd | ||
|
|
9e6dec0bc4 | ||
|
|
04b01cd62c | ||
|
|
a181dd0ebc | ||
|
|
69206fcd4b | ||
|
|
2c94e15746 | ||
|
|
12513ebae0 | ||
|
|
4156a4f15f | ||
|
|
491bb4f174 | ||
|
|
5866fc8ded | ||
|
|
eb4cd78ca6 | ||
|
|
40ce71855a | ||
|
|
9c0d0afd09 | ||
|
|
0f9aa1ef91 | ||
|
|
3ee5ceb9fa | ||
|
|
1bd72a3be5 | ||
|
|
fbd14118bf | ||
|
|
515d98b978 | ||
|
|
789cf6c599 | ||
|
|
0bc82d7270 | ||
|
|
9a7ad75bff | ||
|
|
9fb3e4040b | ||
|
|
070fd1b9da | ||
|
|
dda5b9f260 | ||
|
|
8d84dd4f88 | ||
|
|
f569237a50 | ||
|
|
e265a618d9 | ||
|
|
533343c84f | ||
|
|
260f2e1d94 | ||
|
|
964732590d | ||
|
|
70a2bfe82e | ||
|
|
ba2d969c44 | ||
|
|
d3c78cf4d7 | ||
|
|
34afd891a6 | ||
|
|
d3137775a1 | ||
|
|
e1772026a1 | ||
|
|
d0423254dd | ||
|
|
db0e52ae9d | ||
|
|
4f030f9cd3 | ||
|
|
60fb45eb97 | ||
|
|
43f0688a95 | ||
|
|
8142bdc48f | ||
|
|
89a11e15e7 | ||
|
|
06de542032 | ||
|
|
ecbb61cbf4 | ||
|
|
7f13e3a783 | ||
|
|
c926469b9c | ||
|
|
c30b57a629 | ||
|
|
2f297979a7 | ||
|
|
2437a2769d | ||
|
|
b58b7cad94 | ||
|
|
68148f2a1a | ||
|
|
4897eb0ba2 | ||
|
|
1b43966c48 | ||
|
|
c5f2f11503 | ||
|
|
895443d1b5 | ||
|
|
6a0802e8e6 | ||
|
|
94cfaad7f4 | ||
|
|
ac4a94dd44 | ||
|
|
58bf8614d9 | ||
|
|
3764e50b35 | ||
|
|
3f464d2d9e | ||
|
|
5116d561e1 | ||
|
|
96a7a3b59f | ||
|
|
112d0ffa45 |
79
.github/check_and_update.py
vendored
Normal file
79
.github/check_and_update.py
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
import hashlib
|
||||
from huggingface_hub import hf_hub_download, get_paths_info
|
||||
import requests
|
||||
import sys
|
||||
import os
|
||||
|
||||
uri = sys.argv[0]
|
||||
file_name = uri.split('/')[-1]
|
||||
|
||||
# Function to parse the URI and determine download method
|
||||
def parse_uri(uri):
|
||||
if uri.startswith('huggingface://'):
|
||||
repo_id = uri.split('://')[1]
|
||||
return 'huggingface', repo_id.rsplit('/', 1)[0]
|
||||
elif 'huggingface.co' in uri:
|
||||
parts = uri.split('/resolve/')
|
||||
if len(parts) > 1:
|
||||
repo_path = parts[0].split('https://huggingface.co/')[-1]
|
||||
return 'huggingface', repo_path
|
||||
return 'direct', uri
|
||||
|
||||
def calculate_sha256(file_path):
|
||||
sha256_hash = hashlib.sha256()
|
||||
with open(file_path, 'rb') as f:
|
||||
for byte_block in iter(lambda: f.read(4096), b''):
|
||||
sha256_hash.update(byte_block)
|
||||
return sha256_hash.hexdigest()
|
||||
|
||||
def manual_safety_check_hf(repo_id):
|
||||
scanResponse = requests.get('https://huggingface.co/api/models/' + repo_id + "/scan")
|
||||
scan = scanResponse.json()
|
||||
if scan['hasUnsafeFile']:
|
||||
return scan
|
||||
return None
|
||||
|
||||
download_type, repo_id_or_url = parse_uri(uri)
|
||||
|
||||
new_checksum = None
|
||||
|
||||
# Decide download method based on URI type
|
||||
if download_type == 'huggingface':
|
||||
# Check if the repo is flagged as dangerous by HF
|
||||
hazard = manual_safety_check_hf(repo_id_or_url)
|
||||
if hazard != None:
|
||||
print(f'Error: HuggingFace has detected security problems for {repo_id_or_url}: {str(hazard)}', filename=file_name)
|
||||
sys.exit(5)
|
||||
# Use HF API to pull sha
|
||||
for file in get_paths_info(repo_id_or_url, [file_name], repo_type='model'):
|
||||
try:
|
||||
new_checksum = file.lfs.sha256
|
||||
break
|
||||
except Exception as e:
|
||||
print(f'Error from Hugging Face Hub: {str(e)}', file=sys.stderr)
|
||||
sys.exit(2)
|
||||
if new_checksum is None:
|
||||
try:
|
||||
file_path = hf_hub_download(repo_id=repo_id_or_url, filename=file_name)
|
||||
except Exception as e:
|
||||
print(f'Error from Hugging Face Hub: {str(e)}', file=sys.stderr)
|
||||
sys.exit(2)
|
||||
else:
|
||||
response = requests.get(repo_id_or_url)
|
||||
if response.status_code == 200:
|
||||
with open(file_name, 'wb') as f:
|
||||
f.write(response.content)
|
||||
file_path = file_name
|
||||
elif response.status_code == 404:
|
||||
print(f'File not found: {response.status_code}', file=sys.stderr)
|
||||
sys.exit(2)
|
||||
else:
|
||||
print(f'Error downloading file: {response.status_code}', file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
if new_checksum is None:
|
||||
new_checksum = calculate_sha256(file_path)
|
||||
print(new_checksum)
|
||||
os.remove(file_path)
|
||||
else:
|
||||
print(new_checksum)
|
||||
79
.github/checksum_checker.sh
vendored
79
.github/checksum_checker.sh
vendored
@@ -14,77 +14,14 @@ function check_and_update_checksum() {
|
||||
idx="$5"
|
||||
|
||||
# Download the file and calculate new checksum using Python
|
||||
new_checksum=$(python3 -c "
|
||||
import hashlib
|
||||
from huggingface_hub import hf_hub_download, get_paths_info
|
||||
import requests
|
||||
import sys
|
||||
import os
|
||||
new_checksum=$(python3 ./check_and_update.py $uri)
|
||||
result=$?
|
||||
|
||||
uri = '$uri'
|
||||
file_name = uri.split('/')[-1]
|
||||
|
||||
# Function to parse the URI and determine download method
|
||||
# Function to parse the URI and determine download method
|
||||
def parse_uri(uri):
|
||||
if uri.startswith('huggingface://'):
|
||||
repo_id = uri.split('://')[1]
|
||||
return 'huggingface', repo_id.rsplit('/', 1)[0]
|
||||
elif 'huggingface.co' in uri:
|
||||
parts = uri.split('/resolve/')
|
||||
if len(parts) > 1:
|
||||
repo_path = parts[0].split('https://huggingface.co/')[-1]
|
||||
return 'huggingface', repo_path
|
||||
return 'direct', uri
|
||||
|
||||
def calculate_sha256(file_path):
|
||||
sha256_hash = hashlib.sha256()
|
||||
with open(file_path, 'rb') as f:
|
||||
for byte_block in iter(lambda: f.read(4096), b''):
|
||||
sha256_hash.update(byte_block)
|
||||
return sha256_hash.hexdigest()
|
||||
|
||||
download_type, repo_id_or_url = parse_uri(uri)
|
||||
|
||||
new_checksum = None
|
||||
|
||||
# Decide download method based on URI type
|
||||
if download_type == 'huggingface':
|
||||
# Use HF API to pull sha
|
||||
for file in get_paths_info(repo_id_or_url, [file_name], repo_type='model'):
|
||||
try:
|
||||
new_checksum = file.lfs.sha256
|
||||
break
|
||||
except Exception as e:
|
||||
print(f'Error from Hugging Face Hub: {str(e)}', file=sys.stderr)
|
||||
sys.exit(2)
|
||||
if new_checksum is None:
|
||||
try:
|
||||
file_path = hf_hub_download(repo_id=repo_id_or_url, filename=file_name)
|
||||
except Exception as e:
|
||||
print(f'Error from Hugging Face Hub: {str(e)}', file=sys.stderr)
|
||||
sys.exit(2)
|
||||
else:
|
||||
response = requests.get(repo_id_or_url)
|
||||
if response.status_code == 200:
|
||||
with open(file_name, 'wb') as f:
|
||||
f.write(response.content)
|
||||
file_path = file_name
|
||||
elif response.status_code == 404:
|
||||
print(f'File not found: {response.status_code}', file=sys.stderr)
|
||||
sys.exit(2)
|
||||
else:
|
||||
print(f'Error downloading file: {response.status_code}', file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
if new_checksum is None:
|
||||
new_checksum = calculate_sha256(file_path)
|
||||
print(new_checksum)
|
||||
os.remove(file_path)
|
||||
else:
|
||||
print(new_checksum)
|
||||
|
||||
")
|
||||
if [[ result -eq 5]]; then
|
||||
echo "Contaminated entry detected, deleting entry for $model_name..."
|
||||
yq eval -i "del([$idx])" "$input_yaml"
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ "$new_checksum" == "" ]]; then
|
||||
echo "Error calculating checksum for $file_name. Skipping..."
|
||||
@@ -94,7 +31,7 @@ else:
|
||||
echo "Checksum for $file_name: $new_checksum"
|
||||
|
||||
# Compare and update the YAML file if checksums do not match
|
||||
result=$?
|
||||
|
||||
if [[ $result -eq 2 ]]; then
|
||||
echo "File not found, deleting entry for $file_name..."
|
||||
# yq eval -i "del(.[$idx].files[] | select(.filename == \"$file_name\"))" "$input_yaml"
|
||||
|
||||
38
.github/ci/modelslist.go
vendored
38
.github/ci/modelslist.go
vendored
@@ -75,7 +75,7 @@ var modelPageTemplate string = `
|
||||
<div class="container mx-auto px-4 py-4">
|
||||
<div class="flex items-center justify-between">
|
||||
<div class="flex items-center">
|
||||
<a href="/" class="text-white text-xl font-bold"><img src="https://github.com/go-skynet/LocalAI/assets/2420543/0966aa2a-166e-4f99-a3e5-6c915fc997dd" alt="LocalAI Logo" class="h-10 mr-3 border-2 border-gray-300 shadow rounded"></a>
|
||||
<a href="/" class="text-white text-xl font-bold"><img src="https://github.com/mudler/LocalAI/assets/2420543/0966aa2a-166e-4f99-a3e5-6c915fc997dd" alt="LocalAI Logo" class="h-10 mr-3 border-2 border-gray-300 shadow rounded"></a>
|
||||
<a href="/" class="text-white text-xl font-bold">LocalAI</a>
|
||||
</div>
|
||||
<!-- Menu button for small screens -->
|
||||
@@ -92,9 +92,9 @@ var modelPageTemplate string = `
|
||||
<!-- Collapsible menu for small screens -->
|
||||
<div class="hidden lg:hidden" id="mobile-menu">
|
||||
<div class="pt-4 pb-3 border-t border-gray-700">
|
||||
|
||||
|
||||
<a href="https://localai.io" class="block text-gray-400 hover:text-white px-3 py-2 rounded mt-1" target="_blank" ><i class="fas fa-book-reader pr-2"></i> Documentation</a>
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -114,17 +114,17 @@ var modelPageTemplate string = `
|
||||
|
||||
<h2 class="text-center text-3xl font-semibold text-gray-100">
|
||||
|
||||
🖼️ Available {{.AvailableModels}} models</i> repositories <a href="https://localai.io/models/" target="_blank" >
|
||||
🖼️ Available {{.AvailableModels}} models</i> <a href="https://localai.io/models/" target="_blank" >
|
||||
<i class="fas fa-circle-info pr-2"></i>
|
||||
</a></h2>
|
||||
</a></h2>
|
||||
|
||||
<h3>
|
||||
Refer to <a href="https://localai.io/models" target=_blank> Model gallery</a> for more information on how to use the models with LocalAI.
|
||||
<h3>
|
||||
Refer to the Model gallery <a href="https://localai.io/models/" target="_blank" ><i class="fas fa-circle-info pr-2"></i></a> for more information on how to use the models with LocalAI.<br>
|
||||
|
||||
You can install models with the CLI command <code>local-ai models install <model-name></code>. or by using the WebUI.
|
||||
</h3>
|
||||
|
||||
<input class="form-control appearance-none block w-full mt-5 px-3 py-2 text-base font-normal text-gray-300 pb-2 mb-5 bg-gray-800 bg-clip-padding border border-solid border-gray-600 rounded transition ease-in-out m-0 focus:text-gray-300 focus:bg-gray-900 focus:border-blue-500 focus:outline-none" type="search"
|
||||
|
||||
<input class="form-control appearance-none block w-full mt-5 px-3 py-2 text-base font-normal text-gray-300 pb-2 mb-5 bg-gray-800 bg-clip-padding border border-solid border-gray-600 rounded transition ease-in-out m-0 focus:text-gray-300 focus:bg-gray-900 focus:border-blue-500 focus:outline-none" type="search"
|
||||
id="searchbox" placeholder="Live search keyword..">
|
||||
<div class="dark grid grid-cols-1 grid-rows-1 md:grid-cols-3 block rounded-lg shadow-secondary-1 dark:bg-surface-dark">
|
||||
{{ range $_, $model := .Models }}
|
||||
@@ -139,10 +139,10 @@ var modelPageTemplate string = `
|
||||
</div>
|
||||
<div class="p-6 text-surface dark:text-white">
|
||||
<h5 class="mb-2 text-xl font-medium leading-tight">{{$model.Name}}</h5>
|
||||
|
||||
|
||||
|
||||
|
||||
<p class="mb-4 text-base truncate">{{ $model.Description }}</p>
|
||||
|
||||
|
||||
</div>
|
||||
<div class="px-6 pt-4 pb-2">
|
||||
|
||||
@@ -178,7 +178,7 @@ var modelPageTemplate string = `
|
||||
{{ $model.Description }}
|
||||
|
||||
</p>
|
||||
|
||||
|
||||
<p class="text-base leading-relaxed text-gray-500 dark:text-gray-400">
|
||||
To install the model with the CLI, run: <br>
|
||||
<code> local-ai models install {{$model.Name}} </code> <br>
|
||||
@@ -193,7 +193,7 @@ var modelPageTemplate string = `
|
||||
<ul>
|
||||
{{ range $_, $u := $model.URLs }}
|
||||
<li><a href="{{ $u }}" target=_blank><i class="fa-solid fa-link"></i> {{ $u }}</a></li>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
</ul>
|
||||
</p>
|
||||
</div>
|
||||
@@ -209,7 +209,7 @@ var modelPageTemplate string = `
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
|
||||
</div>
|
||||
</div>
|
||||
@@ -221,10 +221,10 @@ var lazyLoadInstance = new LazyLoad({
|
||||
});
|
||||
|
||||
let cards = document.querySelectorAll('.box')
|
||||
|
||||
|
||||
function liveSearch() {
|
||||
let search_query = document.getElementById("searchbox").value;
|
||||
|
||||
|
||||
//Use innerText if all contents are visible
|
||||
//Use textContent for including hidden elements
|
||||
for (var i = 0; i < cards.length; i++) {
|
||||
@@ -238,8 +238,8 @@ function liveSearch() {
|
||||
}
|
||||
|
||||
//A little delay
|
||||
let typingTimer;
|
||||
let typeInterval = 500;
|
||||
let typingTimer;
|
||||
let typeInterval = 500;
|
||||
let searchInput = document.getElementById('searchbox');
|
||||
|
||||
searchInput.addEventListener('keyup', () => {
|
||||
|
||||
3
.github/release.yml
vendored
3
.github/release.yml
vendored
@@ -13,6 +13,9 @@ changelog:
|
||||
labels:
|
||||
- bug
|
||||
- regression
|
||||
- title: "🖧 P2P area"
|
||||
labels:
|
||||
- area/p2p
|
||||
- title: Exciting New Features 🎉
|
||||
labels:
|
||||
- Semver-Minor
|
||||
|
||||
17
.github/workflows/image-pr.yml
vendored
17
.github/workflows/image-pr.yml
vendored
@@ -32,7 +32,7 @@ jobs:
|
||||
strategy:
|
||||
# Pushing with all jobs in parallel
|
||||
# eats the bandwidth of all the nodes
|
||||
max-parallel: ${{ github.event_name != 'pull_request' && 2 || 4 }}
|
||||
max-parallel: ${{ github.event_name != 'pull_request' && 4 || 8 }}
|
||||
matrix:
|
||||
include:
|
||||
- build-type: ''
|
||||
@@ -46,7 +46,7 @@ jobs:
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "1"
|
||||
cuda-minor-version: "4"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-cublas-cuda12-ffmpeg'
|
||||
@@ -119,7 +119,7 @@ jobs:
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "1"
|
||||
cuda-minor-version: "4"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-cublas-cuda12-ffmpeg-core'
|
||||
@@ -127,4 +127,13 @@ jobs:
|
||||
image-type: 'core'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
- build-type: 'vulkan'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-vulkan-ffmpeg-core'
|
||||
ffmpeg: 'true'
|
||||
image-type: 'core'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
|
||||
33
.github/workflows/image.yml
vendored
33
.github/workflows/image.yml
vendored
@@ -39,7 +39,7 @@ jobs:
|
||||
strategy:
|
||||
# Pushing with all jobs in parallel
|
||||
# eats the bandwidth of all the nodes
|
||||
max-parallel: ${{ github.event_name != 'pull_request' && 2 || 4 }}
|
||||
max-parallel: ${{ github.event_name != 'pull_request' && 6 || 10 }}
|
||||
matrix:
|
||||
include:
|
||||
# Extra images
|
||||
@@ -75,7 +75,7 @@ jobs:
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "1"
|
||||
cuda-minor-version: "4"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-cublas-cuda12'
|
||||
@@ -100,7 +100,7 @@ jobs:
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "1"
|
||||
cuda-minor-version: "4"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-cublas-cuda12-ffmpeg'
|
||||
@@ -232,7 +232,7 @@ jobs:
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
runs-on: 'arc-runner-set'
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
|
||||
|
||||
core-image-build:
|
||||
uses: ./.github/workflows/image_build.yml
|
||||
with:
|
||||
@@ -257,6 +257,7 @@ jobs:
|
||||
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||
strategy:
|
||||
max-parallel: ${{ github.event_name != 'pull_request' && 2 || 4 }}
|
||||
matrix:
|
||||
include:
|
||||
- build-type: ''
|
||||
@@ -266,7 +267,7 @@ jobs:
|
||||
ffmpeg: 'true'
|
||||
image-type: 'core'
|
||||
base-image: "ubuntu:22.04"
|
||||
runs-on: 'ubuntu-latest'
|
||||
runs-on: 'arc-runner-set'
|
||||
aio: "-aio-cpu"
|
||||
latest-image: 'latest-cpu'
|
||||
latest-image-aio: 'latest-aio-cpu'
|
||||
@@ -280,18 +281,18 @@ jobs:
|
||||
ffmpeg: ''
|
||||
image-type: 'core'
|
||||
base-image: "ubuntu:22.04"
|
||||
runs-on: 'ubuntu-latest'
|
||||
runs-on: 'arc-runner-set'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "1"
|
||||
cuda-minor-version: "4"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-cublas-cuda12-core'
|
||||
ffmpeg: ''
|
||||
image-type: 'core'
|
||||
base-image: "ubuntu:22.04"
|
||||
runs-on: 'ubuntu-latest'
|
||||
runs-on: 'arc-runner-set'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "11"
|
||||
@@ -301,17 +302,27 @@ jobs:
|
||||
tag-suffix: '-cublas-cuda11-ffmpeg-core'
|
||||
ffmpeg: 'true'
|
||||
image-type: 'core'
|
||||
runs-on: 'ubuntu-latest'
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "ubuntu:22.04"
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "1"
|
||||
cuda-minor-version: "4"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-cublas-cuda12-ffmpeg-core'
|
||||
ffmpeg: 'true'
|
||||
image-type: 'core'
|
||||
runs-on: 'ubuntu-latest'
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "ubuntu:22.04"
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
- build-type: 'vulkan'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-vulkan-ffmpeg-core'
|
||||
latest-image: 'latest-vulkan-ffmpeg-core'
|
||||
ffmpeg: 'true'
|
||||
image-type: 'core'
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "ubuntu:22.04"
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
|
||||
6
.github/workflows/image_build.yml
vendored
6
.github/workflows/image_build.yml
vendored
@@ -19,11 +19,11 @@ on:
|
||||
type: string
|
||||
cuda-major-version:
|
||||
description: 'CUDA major version'
|
||||
default: "11"
|
||||
default: "12"
|
||||
type: string
|
||||
cuda-minor-version:
|
||||
description: 'CUDA minor version'
|
||||
default: "7"
|
||||
default: "4"
|
||||
type: string
|
||||
platforms:
|
||||
description: 'Platforms'
|
||||
@@ -324,7 +324,7 @@ jobs:
|
||||
docker pull quay.io/go-skynet/local-ai:${{ steps.meta_aio.outputs.version }}
|
||||
docker tag quay.io/go-skynet/local-ai:${{ steps.meta_aio.outputs.version }} quay.io/go-skynet/local-ai:${{ inputs.latest-image-aio }}
|
||||
docker push quay.io/go-skynet/local-ai:${{ inputs.latest-image-aio }}
|
||||
|
||||
|
||||
- name: job summary
|
||||
run: |
|
||||
echo "Built image: ${{ steps.meta.outputs.labels }}" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
170
.github/workflows/notify-models.yaml
vendored
Normal file
170
.github/workflows/notify-models.yaml
vendored
Normal file
@@ -0,0 +1,170 @@
|
||||
name: Notifications for new models
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- closed
|
||||
|
||||
jobs:
|
||||
notify-discord:
|
||||
if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'area/ai-model')) }}
|
||||
env:
|
||||
MODEL_NAME: hermes-2-theta-llama-3-8b
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # needed to checkout all branches for this Action to work
|
||||
- name: Start LocalAI
|
||||
run: |
|
||||
echo "Starting LocalAI..."
|
||||
docker run -e -ti -d --name local-ai -p 8080:8080 localai/localai:master-ffmpeg-core run --debug $MODEL_NAME
|
||||
until [ "`docker inspect -f {{.State.Health.Status}} local-ai`" == "healthy" ]; do echo "Waiting for container to be ready"; docker logs --tail 10 local-ai; sleep 2; done
|
||||
# Check the PR diff using the current branch and the base branch of the PR
|
||||
- uses: GrantBirki/git-diff-action@v2.7.0
|
||||
id: git-diff-action
|
||||
with:
|
||||
json_diff_file_output: diff.json
|
||||
raw_diff_file_output: diff.txt
|
||||
file_output_only: "true"
|
||||
- name: Summarize
|
||||
env:
|
||||
DIFF: ${{ steps.git-diff-action.outputs.raw-diff-path }}
|
||||
id: summarize
|
||||
run: |
|
||||
input="$(cat $DIFF)"
|
||||
|
||||
# Define the LocalAI API endpoint
|
||||
API_URL="http://localhost:8080/chat/completions"
|
||||
|
||||
# Create a JSON payload using jq to handle special characters
|
||||
json_payload=$(jq -n --arg input "$input" '{
|
||||
model: "'$MODEL_NAME'",
|
||||
messages: [
|
||||
{
|
||||
role: "system",
|
||||
content: "You are LocalAI-bot. Write a discord message to notify everyone about the new model from the git diff. Make it informal. An example can include: the URL of the model, the name, and a brief description of the model if exists. Also add an hint on how to install it in LocalAI and that can be browsed over https://models.localai.io. For example: local-ai run model_name_here"
|
||||
},
|
||||
{
|
||||
role: "user",
|
||||
content: $input
|
||||
}
|
||||
]
|
||||
}')
|
||||
|
||||
# Send the request to LocalAI
|
||||
response=$(curl -s -X POST $API_URL \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$json_payload")
|
||||
|
||||
# Extract the summary from the response
|
||||
summary="$(echo $response | jq -r '.choices[0].message.content')"
|
||||
|
||||
# Print the summary
|
||||
# -H "Authorization: Bearer $API_KEY" \
|
||||
echo "Summary:"
|
||||
echo "$summary"
|
||||
echo "payload sent"
|
||||
echo "$json_payload"
|
||||
{
|
||||
echo 'message<<EOF'
|
||||
echo "$summary"
|
||||
echo EOF
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
docker logs --tail 10 local-ai
|
||||
- name: Discord notification
|
||||
env:
|
||||
DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK_URL }}
|
||||
DISCORD_USERNAME: "LocalAI-Bot"
|
||||
DISCORD_AVATAR: "https://avatars.githubusercontent.com/u/139863280?v=4"
|
||||
uses: Ilshidur/action-discord@master
|
||||
with:
|
||||
args: ${{ steps.summarize.outputs.message }}
|
||||
- name: Setup tmate session if fails
|
||||
if: ${{ failure() }}
|
||||
uses: mxschmitt/action-tmate@v3.18
|
||||
with:
|
||||
detached: true
|
||||
connect-timeout-seconds: 180
|
||||
limit-access-to-actor: true
|
||||
notify-twitter:
|
||||
if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'area/ai-model')) }}
|
||||
env:
|
||||
MODEL_NAME: hermes-2-theta-llama-3-8b
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # needed to checkout all branches for this Action to work
|
||||
- name: Start LocalAI
|
||||
run: |
|
||||
echo "Starting LocalAI..."
|
||||
docker run -e -ti -d --name local-ai -p 8080:8080 localai/localai:master-ffmpeg-core run --debug $MODEL_NAME
|
||||
until [ "`docker inspect -f {{.State.Health.Status}} local-ai`" == "healthy" ]; do echo "Waiting for container to be ready"; docker logs --tail 10 local-ai; sleep 2; done
|
||||
# Check the PR diff using the current branch and the base branch of the PR
|
||||
- uses: GrantBirki/git-diff-action@v2.7.0
|
||||
id: git-diff-action
|
||||
with:
|
||||
json_diff_file_output: diff.json
|
||||
raw_diff_file_output: diff.txt
|
||||
file_output_only: "true"
|
||||
- name: Summarize
|
||||
env:
|
||||
DIFF: ${{ steps.git-diff-action.outputs.raw-diff-path }}
|
||||
id: summarize
|
||||
run: |
|
||||
input="$(cat $DIFF)"
|
||||
|
||||
# Define the LocalAI API endpoint
|
||||
API_URL="http://localhost:8080/chat/completions"
|
||||
|
||||
# Create a JSON payload using jq to handle special characters
|
||||
json_payload=$(jq -n --arg input "$input" '{
|
||||
model: "'$MODEL_NAME'",
|
||||
messages: [
|
||||
{
|
||||
role: "system",
|
||||
content: "You are LocalAI-bot. Write a twitter message to notify everyone about the new model from the git diff. Make it informal and really short. An example can include: the name, and a brief description of the model if exists. Also add an hint on how to install it in LocalAI. For example: local-ai run model_name_here"
|
||||
},
|
||||
{
|
||||
role: "user",
|
||||
content: $input
|
||||
}
|
||||
]
|
||||
}')
|
||||
|
||||
# Send the request to LocalAI
|
||||
response=$(curl -s -X POST $API_URL \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$json_payload")
|
||||
|
||||
# Extract the summary from the response
|
||||
summary="$(echo $response | jq -r '.choices[0].message.content')"
|
||||
|
||||
# Print the summary
|
||||
# -H "Authorization: Bearer $API_KEY" \
|
||||
echo "Summary:"
|
||||
echo "$summary"
|
||||
echo "payload sent"
|
||||
echo "$json_payload"
|
||||
{
|
||||
echo 'message<<EOF'
|
||||
echo "$summary"
|
||||
echo EOF
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
docker logs --tail 10 local-ai
|
||||
- uses: Eomm/why-don-t-you-tweet@v2
|
||||
with:
|
||||
tweet-message: ${{ steps.summarize.outputs.message }}
|
||||
env:
|
||||
# Get your tokens from https://developer.twitter.com/apps
|
||||
TWITTER_CONSUMER_API_KEY: ${{ secrets.TWITTER_APP_KEY }}
|
||||
TWITTER_CONSUMER_API_SECRET: ${{ secrets.TWITTER_APP_SECRET }}
|
||||
TWITTER_ACCESS_TOKEN: ${{ secrets.TWITTER_ACCESS_TOKEN }}
|
||||
TWITTER_ACCESS_TOKEN_SECRET: ${{ secrets.TWITTER_ACCESS_TOKEN_SECRET }}
|
||||
- name: Setup tmate session if fails
|
||||
if: ${{ failure() }}
|
||||
uses: mxschmitt/action-tmate@v3.18
|
||||
with:
|
||||
detached: true
|
||||
connect-timeout-seconds: 180
|
||||
limit-access-to-actor: true
|
||||
65
.github/workflows/notify-releases.yaml
vendored
Normal file
65
.github/workflows/notify-releases.yaml
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
name: Release notifications
|
||||
on:
|
||||
release:
|
||||
types:
|
||||
- published
|
||||
|
||||
jobs:
|
||||
notify-discord:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
RELEASE_BODY: ${{ github.event.release.body }}
|
||||
RELEASE_TITLE: ${{ github.event.release.name }}
|
||||
RELEASE_TAG_NAME: ${{ github.event.release.tag_name }}
|
||||
steps:
|
||||
- name: Start LocalAI
|
||||
run: |
|
||||
echo "Starting LocalAI..."
|
||||
docker run -e -ti -d --name local-ai -p 8080:8080 localai/localai:master-ffmpeg-core run --debug $MODEL_NAME
|
||||
until [ "`docker inspect -f {{.State.Health.Status}} local-ai`" == "healthy" ]; do echo "Waiting for container to be ready"; docker logs --tail 10 local-ai; sleep 2; done
|
||||
- name: Summarize
|
||||
id: summarize
|
||||
run: |
|
||||
input="$RELEASE_TITLE\b$RELEASE_BODY"
|
||||
|
||||
# Define the LocalAI API endpoint
|
||||
API_URL="http://localhost:8080/chat/completions"
|
||||
|
||||
# Create a JSON payload using jq to handle special characters
|
||||
json_payload=$(jq -n --arg input "$input" '{
|
||||
model: "'$MODEL_NAME'",
|
||||
messages: [
|
||||
{
|
||||
role: "system",
|
||||
content: "Write a discord message with a bullet point summary of the release notes."
|
||||
},
|
||||
{
|
||||
role: "user",
|
||||
content: $input
|
||||
}
|
||||
]
|
||||
}')
|
||||
|
||||
# Send the request to LocalAI API
|
||||
response=$(curl -s -X POST $API_URL \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "$json_payload")
|
||||
|
||||
# Extract the summary from the response
|
||||
summary=$(echo $response | jq -r '.choices[0].message.content')
|
||||
|
||||
# Print the summary
|
||||
# -H "Authorization: Bearer $API_KEY" \
|
||||
{
|
||||
echo 'message<<EOF'
|
||||
echo "$summary"
|
||||
echo EOF
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
- name: Discord notification
|
||||
env:
|
||||
DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK_URL_RELEASE }}
|
||||
DISCORD_USERNAME: "LocalAI-Bot"
|
||||
DISCORD_AVATAR: "https://avatars.githubusercontent.com/u/139863280?v=4"
|
||||
uses: Ilshidur/action-discord@master
|
||||
with:
|
||||
args: ${{ steps.summarize.outputs.message }}
|
||||
120
.github/workflows/release.yaml
vendored
120
.github/workflows/release.yaml
vendored
@@ -31,8 +31,8 @@ jobs:
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential ffmpeg protobuf-compiler ccache
|
||||
sudo apt-get install -qy binutils-aarch64-linux-gnu gcc-aarch64-linux-gnu g++-aarch64-linux-gnu
|
||||
sudo apt-get install build-essential ffmpeg protobuf-compiler ccache gawk
|
||||
sudo apt-get install -qy binutils-aarch64-linux-gnu gcc-aarch64-linux-gnu g++-aarch64-linux-gnu libgmock-dev
|
||||
- name: Install CUDA Dependencies
|
||||
run: |
|
||||
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/cross-linux-aarch64/cuda-keyring_1.1-1_all.deb
|
||||
@@ -51,7 +51,21 @@ jobs:
|
||||
if: steps.cache-grpc.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
|
||||
git clone --recurse-submodules -b ${{ env.GRPC_VERSION }} --depth 1 --shallow-submodules https://github.com/grpc/grpc && \
|
||||
git clone --recurse-submodules -b ${{ env.GRPC_VERSION }} --depth 1 --shallow-submodules https://github.com/grpc/grpc
|
||||
GRPC_DIR=$PWD/grpc/
|
||||
echo "include(FetchContent)" >> $GRPC_DIR/CMakeLists.txt
|
||||
echo "FetchContent_Declare(" >> $GRPC_DIR/CMakeLists.txt
|
||||
echo " googletest" >> $GRPC_DIR/CMakeLists.txt
|
||||
echo " URL https://github.com/google/googletest/archive/03597a01ee50ed33e9dfd640b249b4be3799d395.zip" >> $GRPC_DIR/CMakeLists.txt
|
||||
echo ")" >> $GRPC_DIR/CMakeLists.txt
|
||||
echo "FetchContent_MakeAvailable(googletest)" >> $GRPC_DIR/CMakeLists.txt
|
||||
GRPC_DIR=$PWD/grpc/third_party/abseil-cpp
|
||||
echo "include(FetchContent)" >> $GRPC_DIR/CMakeLists.txt
|
||||
echo "FetchContent_Declare(" >> $GRPC_DIR/CMakeLists.txt
|
||||
echo " googletest" >> $GRPC_DIR/CMakeLists.txt
|
||||
echo " URL https://github.com/google/googletest/archive/03597a01ee50ed33e9dfd640b249b4be3799d395.zip" >> $GRPC_DIR/CMakeLists.txt
|
||||
echo ")" >> $GRPC_DIR/CMakeLists.txt
|
||||
echo "FetchContent_MakeAvailable(googletest)" >> $GRPC_DIR/CMakeLists.txt
|
||||
cd grpc && mkdir -p cmake/build && cd cmake/build && cmake -DgRPC_INSTALL=ON \
|
||||
-DgRPC_BUILD_TESTS=OFF \
|
||||
../.. && sudo make --jobs 5 --output-sync=target
|
||||
@@ -100,7 +114,13 @@ jobs:
|
||||
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.0
|
||||
export PATH=$PATH:$GOPATH/bin
|
||||
export PATH=/usr/local/cuda/bin:$PATH
|
||||
GO_TAGS=p2p GOOS=linux GOARCH=arm64 CMAKE_ARGS="-DProtobuf_INCLUDE_DIRS=$CROSS_STAGING_PREFIX/include -DProtobuf_DIR=$CROSS_STAGING_PREFIX/lib/cmake/protobuf -DgRPC_DIR=$CROSS_STAGING_PREFIX/lib/cmake/grpc -DCMAKE_TOOLCHAIN_FILE=$CMAKE_CROSS_TOOLCHAIN -DCMAKE_C_COMPILER=aarch64-linux-gnu-gcc -DCMAKE_CXX_COMPILER=aarch64-linux-gnu-g++" make dist-cross-linux-arm64
|
||||
sudo rm -rf /usr/aarch64-linux-gnu/lib/libstdc++.so.6
|
||||
sudo cp -rf /usr/aarch64-linux-gnu/lib/libstdc++.so* /usr/aarch64-linux-gnu/lib/libstdc++.so.6
|
||||
sudo cp /usr/aarch64-linux-gnu/lib/ld-linux-aarch64.so.1 ld.so
|
||||
BACKEND_LIBS="./grpc/cmake/cross_build/third_party/re2/libre2.a ./grpc/cmake/cross_build/libgrpc.a ./grpc/cmake/cross_build/libgrpc++.a ./grpc/cmake/cross_build/third_party/protobuf/libprotobuf.a /usr/aarch64-linux-gnu/lib/libc.so.6 /usr/aarch64-linux-gnu/lib/libstdc++.so.6 /usr/aarch64-linux-gnu/lib/libgomp.so.1 /usr/aarch64-linux-gnu/lib/libm.so.6 /usr/aarch64-linux-gnu/lib/libgcc_s.so.1 /usr/aarch64-linux-gnu/lib/libdl.so.2 /usr/aarch64-linux-gnu/lib/libpthread.so.0 ./ld.so" \
|
||||
GOOS=linux \
|
||||
GOARCH=arm64 \
|
||||
CMAKE_ARGS="-DProtobuf_INCLUDE_DIRS=$CROSS_STAGING_PREFIX/include -DProtobuf_DIR=$CROSS_STAGING_PREFIX/lib/cmake/protobuf -DgRPC_DIR=$CROSS_STAGING_PREFIX/lib/cmake/grpc -DCMAKE_TOOLCHAIN_FILE=$CMAKE_CROSS_TOOLCHAIN -DCMAKE_C_COMPILER=aarch64-linux-gnu-gcc -DCMAKE_CXX_COMPILER=aarch64-linux-gnu-g++" make dist-cross-linux-arm64
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: LocalAI-linux-arm64
|
||||
@@ -111,7 +131,13 @@ jobs:
|
||||
with:
|
||||
files: |
|
||||
release/*
|
||||
|
||||
- name: Setup tmate session if tests fail
|
||||
if: ${{ failure() }}
|
||||
uses: mxschmitt/action-tmate@v3.18
|
||||
with:
|
||||
detached: true
|
||||
connect-timeout-seconds: 180
|
||||
limit-access-to-actor: true
|
||||
build-linux:
|
||||
runs-on: arc-runner-set
|
||||
steps:
|
||||
@@ -134,7 +160,7 @@ jobs:
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y wget curl build-essential ffmpeg protobuf-compiler ccache cmake
|
||||
sudo apt-get install -y wget curl build-essential ffmpeg protobuf-compiler ccache gawk cmake libgmock-dev
|
||||
- name: Intel Dependencies
|
||||
run: |
|
||||
wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpg --dearmor | sudo tee /usr/share/keyrings/oneapi-archive-keyring.gpg > /dev/null
|
||||
@@ -154,15 +180,15 @@ jobs:
|
||||
ROCM_VERSION: "6.1"
|
||||
AMDGPU_VERSION: "6.1"
|
||||
run: |
|
||||
set -ex
|
||||
set -ex
|
||||
|
||||
sudo apt-get update
|
||||
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends ca-certificates curl libnuma-dev gnupg
|
||||
|
||||
curl -sL https://repo.radeon.com/rocm/rocm.gpg.key | sudo apt-key add -
|
||||
|
||||
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends ca-certificates curl libnuma-dev gnupg
|
||||
|
||||
curl -sL https://repo.radeon.com/rocm/rocm.gpg.key | sudo apt-key add -
|
||||
|
||||
printf "deb [arch=amd64] https://repo.radeon.com/rocm/apt/$ROCM_VERSION/ jammy main" | sudo tee /etc/apt/sources.list.d/rocm.list
|
||||
|
||||
|
||||
printf "deb [arch=amd64] https://repo.radeon.com/amdgpu/$AMDGPU_VERSION/ubuntu jammy main" | sudo tee /etc/apt/sources.list.d/amdgpu.list
|
||||
printf 'Package: *\nPin: release o=repo.radeon.com\nPin-Priority: 600' | sudo tee /etc/apt/preferences.d/rocm-pin-600
|
||||
sudo apt-get update
|
||||
@@ -170,10 +196,10 @@ jobs:
|
||||
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y \
|
||||
hipblas-dev rocm-dev \
|
||||
rocblas-dev
|
||||
|
||||
|
||||
sudo apt-get clean
|
||||
sudo rm -rf /var/lib/apt/lists/*
|
||||
sudo ldconfig
|
||||
sudo ldconfig
|
||||
- name: Cache grpc
|
||||
id: cache-grpc
|
||||
uses: actions/cache@v4
|
||||
@@ -190,6 +216,7 @@ jobs:
|
||||
- name: Install gRPC
|
||||
run: |
|
||||
cd grpc && cd cmake/build && sudo make --jobs 5 --output-sync=target install
|
||||
# BACKEND_LIBS needed for gpu-workload: /opt/intel/oneapi/*/lib/libiomp5.so /opt/intel/oneapi/*/lib/libmkl_core.so /opt/intel/oneapi/*/lib/libmkl_core.so.2 /opt/intel/oneapi/*/lib/libmkl_intel_ilp64.so /opt/intel/oneapi/*/lib/libmkl_intel_ilp64.so.2 /opt/intel/oneapi/*/lib/libmkl_sycl_blas.so /opt/intel/oneapi/*/lib/libmkl_sycl_blas.so.4 /opt/intel/oneapi/*/lib/libmkl_tbb_thread.so /opt/intel/oneapi/*/lib/libmkl_tbb_thread.so.2 /opt/intel/oneapi/*/lib/libsycl.so /opt/intel/oneapi/*/lib/libsycl.so.7 /opt/intel/oneapi/*/lib/libsycl.so.7.1.0 /opt/rocm-*/lib/libamdhip64.so /opt/rocm-*/lib/libamdhip64.so.5 /opt/rocm-*/lib/libamdhip64.so.6 /opt/rocm-*/lib/libamdhip64.so.6.1.60100 /opt/rocm-*/lib/libhipblas.so /opt/rocm-*/lib/libhipblas.so.2 /opt/rocm-*/lib/libhipblas.so.2.1.60100 /opt/rocm-*/lib/librocblas.so /opt/rocm-*/lib/librocblas.so.4 /opt/rocm-*/lib/librocblas.so.4.1.60100 /usr/lib/x86_64-linux-gnu/libstdc++.so.6 /usr/lib/x86_64-linux-gnu/libOpenCL.so.1 /usr/lib/x86_64-linux-gnu/libOpenCL.so.1.0.0 /usr/lib/x86_64-linux-gnu/libm.so.6 /usr/lib/x86_64-linux-gnu/libgcc_s.so.1 /usr/lib/x86_64-linux-gnu/libc.so.6 /usr/lib/x86_64-linux-gnu/librt.so.1 /usr/local/cuda-*/targets/x86_64-linux/lib/libcublas.so /usr/local/cuda-*/targets/x86_64-linux/lib/libcublasLt.so /usr/local/cuda-*/targets/x86_64-linux/lib/libcudart.so /usr/local/cuda-*/targets/x86_64-linux/lib/stubs/libcuda.so
|
||||
- name: Build
|
||||
id: build
|
||||
run: |
|
||||
@@ -199,7 +226,10 @@ jobs:
|
||||
export PATH=/usr/local/cuda/bin:$PATH
|
||||
export PATH=/opt/rocm/bin:$PATH
|
||||
source /opt/intel/oneapi/setvars.sh
|
||||
GO_TAGS=p2p make -j4 dist
|
||||
GRPC_DIR=$PWD/backend/cpp/llama
|
||||
sudo cp /lib64/ld-linux-x86-64.so.2 ld.so
|
||||
BACKEND_LIBS="./ld.so ./sources/go-piper/piper/build/fi/lib/libfmt.a ./sources/go-piper/piper-phonemize/pi/lib/libonnxruntime.so.1.14.1 ./sources/go-piper/piper-phonemize/pi/src/libespeak-ng/libespeak-ng.so /usr/lib/x86_64-linux-gnu/libdl.so.2 /usr/lib/x86_64-linux-gnu/librt.so.1 /usr/lib/x86_64-linux-gnu/libpthread.so.0 ./sources/go-piper/piper-phonemize/pi/lib/libpiper_phonemize.so.1 ./sources/go-piper/piper/build/si/lib/libspdlog.a ./sources/go-piper/espeak/ei/lib/libucd.so" \
|
||||
make -j4 dist
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: LocalAI-linux
|
||||
@@ -210,7 +240,13 @@ jobs:
|
||||
with:
|
||||
files: |
|
||||
release/*
|
||||
|
||||
- name: Setup tmate session if tests fail
|
||||
if: ${{ failure() }}
|
||||
uses: mxschmitt/action-tmate@v3.18
|
||||
with:
|
||||
detached: true
|
||||
connect-timeout-seconds: 180
|
||||
limit-access-to-actor: true
|
||||
build-stablediffusion:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -246,6 +282,48 @@ jobs:
|
||||
files: |
|
||||
release/*
|
||||
|
||||
build-macOS-x86_64:
|
||||
runs-on: macos-13
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.21.x'
|
||||
cache: false
|
||||
- name: Dependencies
|
||||
run: |
|
||||
brew install protobuf grpc
|
||||
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@8ba23be9613c672d40ae261d2a1335d639bdd59b
|
||||
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.0
|
||||
- name: Build
|
||||
id: build
|
||||
run: |
|
||||
export C_INCLUDE_PATH=/usr/local/include
|
||||
export CPLUS_INCLUDE_PATH=/usr/local/include
|
||||
export PATH=$PATH:$GOPATH/bin
|
||||
|
||||
make dist
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: LocalAI-MacOS-x86_64
|
||||
path: release/
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
files: |
|
||||
release/*
|
||||
- name: Setup tmate session if tests fail
|
||||
if: ${{ failure() }}
|
||||
uses: mxschmitt/action-tmate@v3.18
|
||||
with:
|
||||
detached: true
|
||||
connect-timeout-seconds: 180
|
||||
limit-access-to-actor: true
|
||||
|
||||
build-macOS-arm64:
|
||||
runs-on: macos-14
|
||||
steps:
|
||||
@@ -268,7 +346,8 @@ jobs:
|
||||
export C_INCLUDE_PATH=/usr/local/include
|
||||
export CPLUS_INCLUDE_PATH=/usr/local/include
|
||||
export PATH=$PATH:$GOPATH/bin
|
||||
GO_TAGS=p2p make dist
|
||||
|
||||
make dist
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: LocalAI-MacOS-arm64
|
||||
@@ -279,3 +358,10 @@ jobs:
|
||||
with:
|
||||
files: |
|
||||
release/*
|
||||
- name: Setup tmate session if tests fail
|
||||
if: ${{ failure() }}
|
||||
uses: mxschmitt/action-tmate@v3.18
|
||||
with:
|
||||
detached: true
|
||||
connect-timeout-seconds: 180
|
||||
limit-access-to-actor: true
|
||||
|
||||
3
.github/workflows/test.yml
vendored
3
.github/workflows/test.yml
vendored
@@ -71,6 +71,7 @@ jobs:
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential curl ffmpeg
|
||||
sudo apt-get install -y libgmock-dev
|
||||
curl https://repo.anaconda.com/pkgs/misc/gpgkeys/anaconda.asc | gpg --dearmor > conda.gpg && \
|
||||
sudo install -o root -g root -m 644 conda.gpg /usr/share/keyrings/conda-archive-keyring.gpg && \
|
||||
gpg --keyring /usr/share/keyrings/conda-archive-keyring.gpg --no-default-keyring --fingerprint 34161F5BF5EB1D4BFBBB8F0A8AEB4F8B29D82806 && \
|
||||
@@ -220,7 +221,7 @@ jobs:
|
||||
export CPLUS_INCLUDE_PATH=/usr/local/include
|
||||
# Used to run the newer GNUMake version from brew that supports --output-sync
|
||||
export PATH="/opt/homebrew/opt/make/libexec/gnubin:$PATH"
|
||||
BUILD_TYPE="GITHUB_CI_HAS_BROKEN_METAL" CMAKE_ARGS="-DLLAMA_F16C=OFF -DLLAMA_AVX512=OFF -DLLAMA_AVX2=OFF -DLLAMA_FMA=OFF" make --jobs 4 --output-sync=target test
|
||||
BUILD_TYPE="GITHUB_CI_HAS_BROKEN_METAL" CMAKE_ARGS="-DGGML_F16C=OFF -DGGML_AVX512=OFF -DGGML_AVX2=OFF -DGGML_FMA=OFF" make --jobs 4 --output-sync=target test
|
||||
- name: Setup tmate session if tests fail
|
||||
if: ${{ failure() }}
|
||||
uses: mxschmitt/action-tmate@v3.18
|
||||
|
||||
26
Dockerfile
26
Dockerfile
@@ -33,7 +33,7 @@ RUN curl -L -s https://go.dev/dl/go${GO_VERSION}.linux-${TARGETARCH}.tar.gz | ta
|
||||
ENV PATH $PATH:/root/go/bin:/usr/local/go/bin
|
||||
|
||||
# Install grpc compilers
|
||||
RUN go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.1 && \
|
||||
RUN go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2 && \
|
||||
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
|
||||
|
||||
COPY --chmod=644 custom-ca-certs/* /usr/local/share/ca-certificates/
|
||||
@@ -98,11 +98,27 @@ RUN pip install --user grpcio-tools
|
||||
FROM requirements-${IMAGE_TYPE} AS requirements-drivers
|
||||
|
||||
ARG BUILD_TYPE
|
||||
ARG CUDA_MAJOR_VERSION=11
|
||||
ARG CUDA_MINOR_VERSION=8
|
||||
ARG CUDA_MAJOR_VERSION=12
|
||||
ARG CUDA_MINOR_VERSION=4
|
||||
|
||||
ENV BUILD_TYPE=${BUILD_TYPE}
|
||||
|
||||
# Vulkan requirements
|
||||
RUN <<EOT bash
|
||||
if [ "${BUILD_TYPE}" = "vulkan" ]; then
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
software-properties-common pciutils wget gpg-agent && \
|
||||
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
|
||||
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
|
||||
apt-get update && \
|
||||
apt-get install -y \
|
||||
vulkan-sdk && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
fi
|
||||
EOT
|
||||
|
||||
# CuBLAS requirements
|
||||
RUN <<EOT bash
|
||||
if [ "${BUILD_TYPE}" = "cublas" ]; then
|
||||
@@ -266,6 +282,8 @@ COPY --from=grpc /opt/grpc /usr/local
|
||||
|
||||
# Rebuild with defaults backends
|
||||
WORKDIR /build
|
||||
|
||||
## Build the binary
|
||||
RUN make build
|
||||
|
||||
RUN if [ ! -d "/build/sources/go-piper/piper-phonemize/pi/lib/" ]; then \
|
||||
@@ -292,7 +310,7 @@ ENV REBUILD=false
|
||||
ENV HEALTHCHECK_ENDPOINT=http://localhost:8080/readyz
|
||||
ENV MAKEFLAGS=${MAKEFLAGS}
|
||||
|
||||
ARG CUDA_MAJOR_VERSION=11
|
||||
ARG CUDA_MAJOR_VERSION=12
|
||||
ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
|
||||
ENV NVIDIA_REQUIRE_CUDA="cuda>=${CUDA_MAJOR_VERSION}.0"
|
||||
ENV NVIDIA_VISIBLE_DEVICES=all
|
||||
|
||||
107
Makefile
107
Makefile
@@ -3,9 +3,11 @@ GOTEST=$(GOCMD) test
|
||||
GOVET=$(GOCMD) vet
|
||||
BINARY_NAME=local-ai
|
||||
|
||||
DETECT_LIBS?=true
|
||||
|
||||
# llama.cpp versions
|
||||
GOLLAMA_STABLE_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be
|
||||
CPPLLAMA_VERSION?=172c8256840ffd882ab9992ecedbb587d9b21f15
|
||||
CPPLLAMA_VERSION?=fd560fe680c72fd0a0af2bc8881add20ad919071
|
||||
|
||||
# gpt4all version
|
||||
GPT4ALL_REPO?=https://github.com/nomic-ai/gpt4all
|
||||
@@ -16,7 +18,7 @@ RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp
|
||||
RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6
|
||||
|
||||
# whisper.cpp version
|
||||
WHISPER_CPP_VERSION?=b29b3b29240aac8b71ce8e5a4360c1f1562ad66f
|
||||
WHISPER_CPP_VERSION?=1c31f9d4a8936aec550e6c4dc9ca5cae3b4f304a
|
||||
|
||||
# bert.cpp version
|
||||
BERT_VERSION?=710044b124545415f555e4260d16b146c725a6e4
|
||||
@@ -33,9 +35,11 @@ TINYDREAM_VERSION?=c04fa463ace9d9a6464313aa5f9cd0f953b6c057
|
||||
export BUILD_TYPE?=
|
||||
export STABLE_BUILD_TYPE?=$(BUILD_TYPE)
|
||||
export CMAKE_ARGS?=
|
||||
export BACKEND_LIBS?=
|
||||
|
||||
CGO_LDFLAGS?=
|
||||
CGO_LDFLAGS_WHISPER?=
|
||||
CGO_LDFLAGS_WHISPER+=-lggml
|
||||
CUDA_LIBPATH?=/usr/local/cuda/lib64/
|
||||
GO_TAGS?=
|
||||
BUILD_ID?=
|
||||
@@ -49,12 +53,12 @@ RANDOM := $(shell bash -c 'echo $$RANDOM')
|
||||
VERSION?=$(shell git describe --always --tags || echo "dev" )
|
||||
# go tool nm ./local-ai | grep Commit
|
||||
LD_FLAGS?=
|
||||
override LD_FLAGS += -X "github.com/go-skynet/LocalAI/internal.Version=$(VERSION)"
|
||||
override LD_FLAGS += -X "github.com/go-skynet/LocalAI/internal.Commit=$(shell git rev-parse HEAD)"
|
||||
override LD_FLAGS += -X "github.com/mudler/LocalAI/internal.Version=$(VERSION)"
|
||||
override LD_FLAGS += -X "github.com/mudler/LocalAI/internal.Commit=$(shell git rev-parse HEAD)"
|
||||
|
||||
OPTIONAL_TARGETS?=
|
||||
|
||||
OS := $(shell uname -s)
|
||||
export OS := $(shell uname -s)
|
||||
ARCH := $(shell uname -m)
|
||||
GREEN := $(shell tput -Txterm setaf 2)
|
||||
YELLOW := $(shell tput -Txterm setaf 3)
|
||||
@@ -80,29 +84,42 @@ ifeq ($(OS),Darwin)
|
||||
BUILD_TYPE=metal
|
||||
# disable metal if on Darwin and any other value is explicitly passed.
|
||||
else ifneq ($(BUILD_TYPE),metal)
|
||||
CMAKE_ARGS+=-DLLAMA_METAL=OFF
|
||||
export LLAMA_NO_ACCELERATE=1
|
||||
CMAKE_ARGS+=-DGGML_METAL=OFF
|
||||
export GGML_NO_ACCELERATE=1
|
||||
export GGML_NO_METAL=1
|
||||
endif
|
||||
|
||||
ifeq ($(BUILD_TYPE),metal)
|
||||
# -lcblas removed: it seems to always be listed as a duplicate flag.
|
||||
CGO_LDFLAGS += -framework Accelerate
|
||||
endif
|
||||
else
|
||||
CGO_LDFLAGS_WHISPER+=-lgomp
|
||||
endif
|
||||
|
||||
ifeq ($(BUILD_TYPE),openblas)
|
||||
CGO_LDFLAGS+=-lopenblas
|
||||
export WHISPER_OPENBLAS=1
|
||||
export GGML_OPENBLAS=1
|
||||
endif
|
||||
|
||||
|
||||
ifeq ($(BUILD_TYPE),cublas)
|
||||
CGO_LDFLAGS+=-lcublas -lcudart -L$(CUDA_LIBPATH)
|
||||
export LLAMA_CUBLAS=1
|
||||
export WHISPER_CUDA=1
|
||||
export GGML_CUDA=1
|
||||
CGO_LDFLAGS_WHISPER+=-L$(CUDA_LIBPATH)/stubs/ -lcuda -lcufft
|
||||
endif
|
||||
|
||||
ifeq ($(BUILD_TYPE),vulkan)
|
||||
CMAKE_ARGS+=-DGGML_VULKAN=1
|
||||
endif
|
||||
|
||||
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
|
||||
export GGML_SYCL=1
|
||||
endif
|
||||
|
||||
ifeq ($(BUILD_TYPE),sycl_f16)
|
||||
export GGML_SYCL_F16=1
|
||||
endif
|
||||
|
||||
ifeq ($(BUILD_TYPE),hipblas)
|
||||
ROCM_HOME ?= /opt/rocm
|
||||
ROCM_PATH ?= /opt/rocm
|
||||
@@ -111,27 +128,26 @@ ifeq ($(BUILD_TYPE),hipblas)
|
||||
export CC=$(ROCM_HOME)/llvm/bin/clang
|
||||
# llama-ggml has no hipblas support, so override it here.
|
||||
export STABLE_BUILD_TYPE=
|
||||
export WHISPER_HIPBLAS=1
|
||||
export GGML_HIPBLAS=1
|
||||
GPU_TARGETS ?= gfx900,gfx906,gfx908,gfx940,gfx941,gfx942,gfx90a,gfx1030,gfx1031,gfx1100,gfx1101
|
||||
AMDGPU_TARGETS ?= "$(GPU_TARGETS)"
|
||||
CMAKE_ARGS+=-DLLAMA_HIPBLAS=ON -DAMDGPU_TARGETS="$(AMDGPU_TARGETS)" -DGPU_TARGETS="$(GPU_TARGETS)"
|
||||
CMAKE_ARGS+=-DGGML_HIPBLAS=ON -DAMDGPU_TARGETS="$(AMDGPU_TARGETS)" -DGPU_TARGETS="$(GPU_TARGETS)"
|
||||
CGO_LDFLAGS += -O3 --rtlib=compiler-rt -unwindlib=libgcc -lhipblas -lrocblas --hip-link -L${ROCM_HOME}/lib/llvm/lib
|
||||
endif
|
||||
|
||||
ifeq ($(BUILD_TYPE),metal)
|
||||
CGO_LDFLAGS+=-framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
|
||||
export LLAMA_METAL=1
|
||||
export WHISPER_METAL=1
|
||||
export GGML_METAL=1
|
||||
endif
|
||||
|
||||
ifeq ($(BUILD_TYPE),clblas)
|
||||
CGO_LDFLAGS+=-lOpenCL -lclblast
|
||||
export WHISPER_CLBLAST=1
|
||||
export GGML_OPENBLAS=1
|
||||
endif
|
||||
|
||||
# glibc-static or glibc-devel-static required
|
||||
ifeq ($(STATIC),true)
|
||||
LD_FLAGS=-linkmode external -extldflags -static
|
||||
LD_FLAGS+=-linkmode external -extldflags -static
|
||||
endif
|
||||
|
||||
ifeq ($(findstring stablediffusion,$(GO_TAGS)),stablediffusion)
|
||||
@@ -165,6 +181,8 @@ ALL_GRPC_BACKENDS+=backend-assets/grpc/rwkv
|
||||
ALL_GRPC_BACKENDS+=backend-assets/grpc/whisper
|
||||
ALL_GRPC_BACKENDS+=backend-assets/grpc/local-store
|
||||
ALL_GRPC_BACKENDS+=$(OPTIONAL_GRPC)
|
||||
# Use filter-out to remove the specified backends
|
||||
ALL_GRPC_BACKENDS := $(filter-out $(SKIP_GRPC_BACKEND),$(ALL_GRPC_BACKENDS))
|
||||
|
||||
GRPC_BACKENDS?=$(ALL_GRPC_BACKENDS) $(OPTIONAL_GRPC)
|
||||
TEST_PATHS?=./api/... ./pkg/... ./core/...
|
||||
@@ -244,7 +262,7 @@ sources/whisper.cpp:
|
||||
cd sources/whisper.cpp && git checkout -b build $(WHISPER_CPP_VERSION) && git submodule update --init --recursive --depth 1
|
||||
|
||||
sources/whisper.cpp/libwhisper.a: sources/whisper.cpp
|
||||
cd sources/whisper.cpp && $(MAKE) libwhisper.a
|
||||
cd sources/whisper.cpp && $(MAKE) libwhisper.a libggml.a
|
||||
|
||||
get-sources: sources/go-llama.cpp sources/gpt4all sources/go-piper sources/go-rwkv.cpp sources/whisper.cpp sources/go-bert.cpp sources/go-stable-diffusion sources/go-tiny-dream
|
||||
|
||||
@@ -313,6 +331,10 @@ build: prepare backend-assets grpcs ## Build the project
|
||||
$(info ${GREEN}I BUILD_TYPE: ${YELLOW}$(BUILD_TYPE)${RESET})
|
||||
$(info ${GREEN}I GO_TAGS: ${YELLOW}$(GO_TAGS)${RESET})
|
||||
$(info ${GREEN}I LD_FLAGS: ${YELLOW}$(LD_FLAGS)${RESET})
|
||||
ifneq ($(BACKEND_LIBS),)
|
||||
$(MAKE) backend-assets/lib
|
||||
cp -f $(BACKEND_LIBS) backend-assets/lib/
|
||||
endif
|
||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o $(BINARY_NAME) ./
|
||||
|
||||
build-minimal:
|
||||
@@ -321,8 +343,14 @@ build-minimal:
|
||||
build-api:
|
||||
BUILD_GRPC_FOR_BACKEND_LLAMA=true BUILD_API_ONLY=true GO_TAGS=none $(MAKE) build
|
||||
|
||||
backend-assets/lib:
|
||||
mkdir -p backend-assets/lib
|
||||
|
||||
dist:
|
||||
STATIC=true $(MAKE) backend-assets/grpc/llama-cpp-avx2
|
||||
$(MAKE) backend-assets/grpc/llama-cpp-avx2
|
||||
ifeq ($(DETECT_LIBS),true)
|
||||
scripts/prepare-libs.sh backend-assets/grpc/llama-cpp-avx2
|
||||
endif
|
||||
ifeq ($(OS),Darwin)
|
||||
$(info ${GREEN}I Skip CUDA/hipblas build on MacOS${RESET})
|
||||
else
|
||||
@@ -331,7 +359,11 @@ else
|
||||
$(MAKE) backend-assets/grpc/llama-cpp-sycl_f16
|
||||
$(MAKE) backend-assets/grpc/llama-cpp-sycl_f32
|
||||
endif
|
||||
$(MAKE) build
|
||||
GO_TAGS="tts p2p" $(MAKE) build
|
||||
ifeq ($(DETECT_LIBS),true)
|
||||
scripts/prepare-libs.sh backend-assets/grpc/piper
|
||||
endif
|
||||
GO_TAGS="tts p2p" STATIC=true $(MAKE) build
|
||||
mkdir -p release
|
||||
# if BUILD_ID is empty, then we don't append it to the binary name
|
||||
ifeq ($(BUILD_ID),)
|
||||
@@ -342,9 +374,9 @@ else
|
||||
shasum -a 256 release/$(BINARY_NAME)-$(BUILD_ID)-$(OS)-$(ARCH) > release/$(BINARY_NAME)-$(BUILD_ID)-$(OS)-$(ARCH).sha256
|
||||
endif
|
||||
|
||||
dist-cross-linux-arm64:
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DLLAMA_NATIVE=off" GRPC_BACKENDS="backend-assets/grpc/llama-cpp-fallback backend-assets/grpc/llama-cpp-grpc backend-assets/util/llama-cpp-rpc-server" \
|
||||
$(MAKE) build
|
||||
dist-cross-linux-arm64:
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_NATIVE=off" GRPC_BACKENDS="backend-assets/grpc/llama-cpp-fallback backend-assets/grpc/llama-cpp-grpc backend-assets/util/llama-cpp-rpc-server" \
|
||||
STATIC=true $(MAKE) build
|
||||
mkdir -p release
|
||||
# if BUILD_ID is empty, then we don't append it to the binary name
|
||||
ifeq ($(BUILD_ID),)
|
||||
@@ -393,7 +425,7 @@ prepare-e2e:
|
||||
mkdir -p $(TEST_DIR)
|
||||
cp -rfv $(abspath ./tests/e2e-fixtures)/gpu.yaml $(TEST_DIR)/gpu.yaml
|
||||
test -e $(TEST_DIR)/ggllm-test-model.bin || wget -q https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GGUF/resolve/main/codellama-7b-instruct.Q2_K.gguf -O $(TEST_DIR)/ggllm-test-model.bin
|
||||
docker build --build-arg GRPC_BACKENDS="$(GRPC_BACKENDS)" --build-arg IMAGE_TYPE=core --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg CUDA_MAJOR_VERSION=11 --build-arg CUDA_MINOR_VERSION=7 --build-arg FFMPEG=true -t localai-tests .
|
||||
docker build --build-arg GRPC_BACKENDS="$(GRPC_BACKENDS)" --build-arg IMAGE_TYPE=core --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg CUDA_MAJOR_VERSION=12 --build-arg CUDA_MINOR_VERSION=4 --build-arg FFMPEG=true -t localai-tests .
|
||||
|
||||
run-e2e-image:
|
||||
ls -liah $(abspath ./tests/e2e-fixtures)
|
||||
@@ -700,21 +732,21 @@ backend-assets/grpc/llama-cpp-avx2: backend-assets/grpc
|
||||
cp -rf backend/cpp/llama backend/cpp/llama-avx2
|
||||
$(MAKE) -C backend/cpp/llama-avx2 purge
|
||||
$(info ${GREEN}I llama-cpp build info:avx2${RESET})
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DLLAMA_AVX=on -DLLAMA_AVX2=on -DLLAMA_AVX512=off -DLLAMA_FMA=on -DLLAMA_F16C=on" $(MAKE) VARIANT="llama-avx2" build-llama-cpp-grpc-server
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on" $(MAKE) VARIANT="llama-avx2" build-llama-cpp-grpc-server
|
||||
cp -rfv backend/cpp/llama-avx2/grpc-server backend-assets/grpc/llama-cpp-avx2
|
||||
|
||||
backend-assets/grpc/llama-cpp-avx: backend-assets/grpc
|
||||
cp -rf backend/cpp/llama backend/cpp/llama-avx
|
||||
$(MAKE) -C backend/cpp/llama-avx purge
|
||||
$(info ${GREEN}I llama-cpp build info:avx${RESET})
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off" $(MAKE) VARIANT="llama-avx" build-llama-cpp-grpc-server
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE) VARIANT="llama-avx" build-llama-cpp-grpc-server
|
||||
cp -rfv backend/cpp/llama-avx/grpc-server backend-assets/grpc/llama-cpp-avx
|
||||
|
||||
backend-assets/grpc/llama-cpp-fallback: backend-assets/grpc
|
||||
cp -rf backend/cpp/llama backend/cpp/llama-fallback
|
||||
$(MAKE) -C backend/cpp/llama-fallback purge
|
||||
$(info ${GREEN}I llama-cpp build info:fallback${RESET})
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off" $(MAKE) VARIANT="llama-fallback" build-llama-cpp-grpc-server
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE) VARIANT="llama-fallback" build-llama-cpp-grpc-server
|
||||
cp -rfv backend/cpp/llama-fallback/grpc-server backend-assets/grpc/llama-cpp-fallback
|
||||
# TODO: every binary should have its own folder instead, so can have different metal implementations
|
||||
ifeq ($(BUILD_TYPE),metal)
|
||||
@@ -725,7 +757,7 @@ backend-assets/grpc/llama-cpp-cuda: backend-assets/grpc
|
||||
cp -rf backend/cpp/llama backend/cpp/llama-cuda
|
||||
$(MAKE) -C backend/cpp/llama-cuda purge
|
||||
$(info ${GREEN}I llama-cpp build info:cuda${RESET})
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DLLAMA_CUDA=ON" $(MAKE) VARIANT="llama-cuda" build-llama-cpp-grpc-server
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_CUDA=ON" $(MAKE) VARIANT="llama-cuda" build-llama-cpp-grpc-server
|
||||
cp -rfv backend/cpp/llama-cuda/grpc-server backend-assets/grpc/llama-cpp-cuda
|
||||
|
||||
backend-assets/grpc/llama-cpp-hipblas: backend-assets/grpc
|
||||
@@ -753,7 +785,7 @@ backend-assets/grpc/llama-cpp-grpc: backend-assets/grpc
|
||||
cp -rf backend/cpp/llama backend/cpp/llama-grpc
|
||||
$(MAKE) -C backend/cpp/llama-grpc purge
|
||||
$(info ${GREEN}I llama-cpp build info:grpc${RESET})
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DLLAMA_RPC=ON -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off" $(MAKE) VARIANT="llama-grpc" build-llama-cpp-grpc-server
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_RPC=ON -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" TARGET="--target grpc-server --target rpc-server" $(MAKE) VARIANT="llama-grpc" build-llama-cpp-grpc-server
|
||||
cp -rfv backend/cpp/llama-grpc/grpc-server backend-assets/grpc/llama-cpp-grpc
|
||||
|
||||
backend-assets/util/llama-cpp-rpc-server: backend-assets/grpc/llama-cpp-grpc
|
||||
@@ -781,7 +813,7 @@ backend-assets/grpc/tinydream: sources/go-tiny-dream sources/go-tiny-dream/libti
|
||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/tinydream ./backend/go/image/tinydream
|
||||
|
||||
backend-assets/grpc/whisper: sources/whisper.cpp sources/whisper.cpp/libwhisper.a backend-assets/grpc
|
||||
CGO_LDFLAGS="$(CGO_LDFLAGS) $(CGO_LDFLAGS_WHISPER)" C_INCLUDE_PATH=$(CURDIR)/sources/whisper.cpp LIBRARY_PATH=$(CURDIR)/sources/whisper.cpp \
|
||||
CGO_LDFLAGS="$(CGO_LDFLAGS) $(CGO_LDFLAGS_WHISPER)" C_INCLUDE_PATH="$(CURDIR)/sources/whisper.cpp/include:$(CURDIR)/sources/whisper.cpp/ggml/include" LIBRARY_PATH=$(CURDIR)/sources/whisper.cpp \
|
||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/whisper ./backend/go/transcribe/
|
||||
|
||||
backend-assets/grpc/local-store: backend-assets/grpc
|
||||
@@ -803,6 +835,17 @@ docker:
|
||||
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
|
||||
-t $(DOCKER_IMAGE) .
|
||||
|
||||
docker-cuda11:
|
||||
docker build \
|
||||
--build-arg CUDA_MAJOR_VERSION=11 \
|
||||
--build-arg CUDA_MINOR_VERSION=8 \
|
||||
--build-arg BASE_IMAGE=$(BASE_IMAGE) \
|
||||
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
|
||||
--build-arg GO_TAGS="$(GO_TAGS)" \
|
||||
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
|
||||
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
|
||||
-t $(DOCKER_IMAGE)-cuda11 .
|
||||
|
||||
docker-aio:
|
||||
@echo "Building AIO image with base $(BASE_IMAGE) as $(DOCKER_AIO_IMAGE)"
|
||||
docker build \
|
||||
@@ -839,7 +882,7 @@ gen-assets:
|
||||
$(GOCMD) run core/dependencies_manager/manager.go embedded/webui_static.yaml core/http/static/assets
|
||||
|
||||
## Documentation
|
||||
docs/layouts/_default:
|
||||
docs/layouts/_default:
|
||||
mkdir -p docs/layouts/_default
|
||||
|
||||
docs/static/gallery.html: docs/layouts/_default
|
||||
@@ -854,4 +897,4 @@ docs-clean:
|
||||
|
||||
.PHONY: docs
|
||||
docs: docs/static/gallery.html
|
||||
cd docs && hugo serve
|
||||
cd docs && hugo serve
|
||||
|
||||
10
README.md
10
README.md
@@ -48,6 +48,13 @@
|
||||
|
||||

|
||||
|
||||
Run the installer script:
|
||||
|
||||
```bash
|
||||
curl https://localai.io/install.sh | sh
|
||||
```
|
||||
|
||||
Or run with docker:
|
||||
```bash
|
||||
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-cpu
|
||||
# Alternative images:
|
||||
@@ -65,6 +72,8 @@ docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-cpu
|
||||
|
||||
[Roadmap](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap)
|
||||
|
||||
- 🔥🔥 🆕 P2P Dashboard, LocalAI Federated mode and AI Swarms: https://github.com/mudler/LocalAI/pull/2723
|
||||
- 🆕 You can browse now the model gallery without LocalAI! Check out https://models.localai.io
|
||||
- 🔥🔥 Decentralized llama.cpp: https://github.com/mudler/LocalAI/pull/2343 (peer2peer llama.cpp!) 👉 Docs https://localai.io/features/distribute/
|
||||
- 🔥🔥 Openvoice: https://github.com/mudler/LocalAI/pull/2334
|
||||
- 🆕 Function calls without grammars and mixed mode: https://github.com/mudler/LocalAI/pull/2328
|
||||
@@ -96,6 +105,7 @@ If you want to help and contribute, issues up for grabs: https://github.com/mudl
|
||||
- 🥽 [Vision API](https://localai.io/features/gpt-vision/)
|
||||
- 📈 [Reranker API](https://localai.io/features/reranker/)
|
||||
- 🆕🖧 [P2P Inferencing](https://localai.io/features/distribute/)
|
||||
- 🌍 Integrated WebUI!
|
||||
|
||||
## 💻 Usage
|
||||
|
||||
|
||||
@@ -230,6 +230,7 @@ message TranscriptRequest {
|
||||
string dst = 2;
|
||||
string language = 3;
|
||||
uint32 threads = 4;
|
||||
bool translate = 5;
|
||||
}
|
||||
|
||||
message TranscriptResult {
|
||||
|
||||
@@ -22,6 +22,15 @@ set(TARGET grpc-server)
|
||||
set(_PROTOBUF_LIBPROTOBUF libprotobuf)
|
||||
set(_REFLECTION grpc++_reflection)
|
||||
|
||||
include(FetchContent)
|
||||
FetchContent_Declare(
|
||||
googletest
|
||||
URL https://github.com/google/googletest/archive/03597a01ee50ed33e9dfd640b249b4be3799d395.zip
|
||||
)
|
||||
# For Windows: Prevent overriding the parent project's compiler/linker settings
|
||||
set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
|
||||
FetchContent_MakeAvailable(googletest)
|
||||
|
||||
if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
|
||||
# Set correct Homebrew install folder for Apple Silicon and Intel Macs
|
||||
if (CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "arm64")
|
||||
|
||||
@@ -4,34 +4,44 @@ LLAMA_VERSION?=
|
||||
CMAKE_ARGS?=
|
||||
BUILD_TYPE?=
|
||||
ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh
|
||||
TARGET?=--target grpc-server
|
||||
|
||||
# If build type is cublas, then we set -DLLAMA_CUBLAS=ON to CMAKE_ARGS automatically
|
||||
# Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
|
||||
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF
|
||||
|
||||
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
|
||||
ifeq ($(BUILD_TYPE),cublas)
|
||||
CMAKE_ARGS+=-DLLAMA_CUBLAS=ON
|
||||
# If build type is openblas then we set -DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS
|
||||
CMAKE_ARGS+=-DGGML_CUDA=ON
|
||||
# If build type is openblas then we set -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
|
||||
# to CMAKE_ARGS automatically
|
||||
else ifeq ($(BUILD_TYPE),openblas)
|
||||
CMAKE_ARGS+=-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS
|
||||
# If build type is clblas (openCL) we set -DLLAMA_CLBLAST=ON -DCLBlast_DIR=/some/path
|
||||
CMAKE_ARGS+=-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
|
||||
# If build type is clblas (openCL) we set -DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
|
||||
else ifeq ($(BUILD_TYPE),clblas)
|
||||
CMAKE_ARGS+=-DLLAMA_CLBLAST=ON -DCLBlast_DIR=/some/path
|
||||
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
|
||||
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
|
||||
else ifeq ($(BUILD_TYPE),hipblas)
|
||||
CMAKE_ARGS+=-DLLAMA_HIPBLAS=ON
|
||||
# If it's OSX, DO NOT embed the metal library - -DLLAMA_METAL_EMBED_LIBRARY=ON requires further investigation
|
||||
CMAKE_ARGS+=-DGGML_HIPBLAS=ON
|
||||
# If it's OSX, DO NOT embed the metal library - -DGGML_METAL_EMBED_LIBRARY=ON requires further investigation
|
||||
# But if it's OSX without metal, disable it here
|
||||
else ifeq ($(OS),darwin)
|
||||
else ifeq ($(OS),Darwin)
|
||||
ifneq ($(BUILD_TYPE),metal)
|
||||
CMAKE_ARGS+=-DLLAMA_METAL=OFF
|
||||
CMAKE_ARGS+=-DGGML_METAL=OFF
|
||||
else
|
||||
CMAKE_ARGS+=-DGGML_METAL=ON
|
||||
# Until this is tested properly, we disable embedded metal file
|
||||
# as we already embed it as part of the LocalAI assets
|
||||
CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=OFF
|
||||
TARGET+=--target ggml-metal
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(BUILD_TYPE),sycl_f16)
|
||||
CMAKE_ARGS+=-DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_SYCL_F16=ON
|
||||
CMAKE_ARGS+=-DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL_F16=ON
|
||||
endif
|
||||
|
||||
ifeq ($(BUILD_TYPE),sycl_f32)
|
||||
CMAKE_ARGS+=-DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx
|
||||
CMAKE_ARGS+=-DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx
|
||||
endif
|
||||
|
||||
llama.cpp:
|
||||
@@ -61,9 +71,9 @@ clean: purge
|
||||
grpc-server: llama.cpp llama.cpp/examples/grpc-server
|
||||
@echo "Building grpc-server with $(BUILD_TYPE) build type and $(CMAKE_ARGS)"
|
||||
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
|
||||
bash -c "source $(ONEAPI_VARS); \
|
||||
cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && $(MAKE)"
|
||||
+bash -c "source $(ONEAPI_VARS); \
|
||||
cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release $(TARGET)"
|
||||
else
|
||||
cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && $(MAKE)
|
||||
+cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release $(TARGET)
|
||||
endif
|
||||
cp llama.cpp/build/bin/grpc-server .
|
||||
@@ -886,6 +886,8 @@ struct llama_server_context
|
||||
{"task_id", slot->task_id},
|
||||
});
|
||||
|
||||
LOG_TEE("sampling: \n%s\n", llama_sampling_print(slot->sparams).c_str());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
|
||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc"
|
||||
grpc "github.com/mudler/LocalAI/pkg/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -3,9 +3,9 @@ package main
|
||||
// This is a wrapper to statisfy the GRPC service interface
|
||||
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
|
||||
import (
|
||||
"github.com/go-skynet/LocalAI/pkg/grpc/base"
|
||||
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
|
||||
"github.com/go-skynet/LocalAI/pkg/stablediffusion"
|
||||
"github.com/mudler/LocalAI/pkg/grpc/base"
|
||||
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||
"github.com/mudler/LocalAI/pkg/stablediffusion"
|
||||
)
|
||||
|
||||
type Image struct {
|
||||
|
||||
@@ -5,7 +5,7 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
|
||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc"
|
||||
grpc "github.com/mudler/LocalAI/pkg/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -3,9 +3,9 @@ package main
|
||||
// This is a wrapper to statisfy the GRPC service interface
|
||||
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
|
||||
import (
|
||||
"github.com/go-skynet/LocalAI/pkg/grpc/base"
|
||||
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
|
||||
"github.com/go-skynet/LocalAI/pkg/tinydream"
|
||||
"github.com/mudler/LocalAI/pkg/grpc/base"
|
||||
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||
"github.com/mudler/LocalAI/pkg/tinydream"
|
||||
)
|
||||
|
||||
type Image struct {
|
||||
|
||||
@@ -5,8 +5,8 @@ package main
|
||||
import (
|
||||
bert "github.com/go-skynet/go-bert.cpp"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/grpc/base"
|
||||
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
|
||||
"github.com/mudler/LocalAI/pkg/grpc/base"
|
||||
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||
)
|
||||
|
||||
type Embeddings struct {
|
||||
|
||||
@@ -5,7 +5,7 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
|
||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc"
|
||||
grpc "github.com/mudler/LocalAI/pkg/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -5,8 +5,8 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/grpc/base"
|
||||
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
|
||||
"github.com/mudler/LocalAI/pkg/grpc/base"
|
||||
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||
gpt4all "github.com/nomic-ai/gpt4all/gpt4all-bindings/golang"
|
||||
)
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
|
||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc"
|
||||
grpc "github.com/mudler/LocalAI/pkg/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -6,9 +6,9 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/grpc/base"
|
||||
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
|
||||
"github.com/go-skynet/LocalAI/pkg/langchain"
|
||||
"github.com/mudler/LocalAI/pkg/grpc/base"
|
||||
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||
"github.com/mudler/LocalAI/pkg/langchain"
|
||||
)
|
||||
|
||||
type LLM struct {
|
||||
|
||||
@@ -5,7 +5,7 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
|
||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc"
|
||||
grpc "github.com/mudler/LocalAI/pkg/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -5,9 +5,9 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/grpc/base"
|
||||
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
|
||||
"github.com/go-skynet/go-llama.cpp"
|
||||
"github.com/mudler/LocalAI/pkg/grpc/base"
|
||||
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||
)
|
||||
|
||||
type LLM struct {
|
||||
|
||||
@@ -3,7 +3,7 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
|
||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc"
|
||||
grpc "github.com/mudler/LocalAI/pkg/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -6,9 +6,9 @@ import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/grpc/base"
|
||||
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
|
||||
"github.com/go-skynet/go-llama.cpp"
|
||||
"github.com/mudler/LocalAI/pkg/grpc/base"
|
||||
)
|
||||
|
||||
type LLM struct {
|
||||
|
||||
@@ -7,7 +7,7 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
|
||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc"
|
||||
grpc "github.com/mudler/LocalAI/pkg/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -5,7 +5,7 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
|
||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc"
|
||||
grpc "github.com/mudler/LocalAI/pkg/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/donomii/go-rwkv.cpp"
|
||||
"github.com/go-skynet/LocalAI/pkg/grpc/base"
|
||||
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
|
||||
"github.com/mudler/LocalAI/pkg/grpc/base"
|
||||
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||
)
|
||||
|
||||
const tokenizerSuffix = ".tokenizer.json"
|
||||
@@ -31,7 +31,7 @@ func (llm *LLM) Load(opts *pb.ModelOptions) error {
|
||||
model := rwkv.LoadFiles(opts.ModelFile, tokenizerPath, uint32(opts.GetThreads()))
|
||||
|
||||
if model == nil {
|
||||
return fmt.Errorf("could not load model")
|
||||
return fmt.Errorf("rwkv could not load model")
|
||||
}
|
||||
llm.rwkv = model
|
||||
return nil
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"flag"
|
||||
"os"
|
||||
|
||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc"
|
||||
grpc "github.com/mudler/LocalAI/pkg/grpc"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"math"
|
||||
"slices"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/grpc/base"
|
||||
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
|
||||
"github.com/mudler/LocalAI/pkg/grpc/base"
|
||||
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
@@ -5,7 +5,7 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
|
||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc"
|
||||
grpc "github.com/mudler/LocalAI/pkg/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
|
||||
"github.com/ggerganov/whisper.cpp/bindings/go/pkg/whisper"
|
||||
"github.com/go-audio/wav"
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
)
|
||||
|
||||
func ffmpegCommand(args []string) (string, error) {
|
||||
@@ -29,7 +29,7 @@ func audioToWav(src, dst string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func Transcript(model whisper.Model, audiopath, language string, threads uint) (schema.TranscriptionResult, error) {
|
||||
func Transcript(model whisper.Model, audiopath, language string, translate bool, threads uint) (schema.TranscriptionResult, error) {
|
||||
res := schema.TranscriptionResult{}
|
||||
|
||||
dir, err := os.MkdirTemp("", "whisper")
|
||||
@@ -75,6 +75,10 @@ func Transcript(model whisper.Model, audiopath, language string, threads uint) (
|
||||
context.SetLanguage("auto")
|
||||
}
|
||||
|
||||
if translate {
|
||||
context.SetTranslate(true)
|
||||
}
|
||||
|
||||
if err := context.Process(data, nil, nil); err != nil {
|
||||
return res, err
|
||||
}
|
||||
|
||||
@@ -4,9 +4,9 @@ package main
|
||||
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
|
||||
import (
|
||||
"github.com/ggerganov/whisper.cpp/bindings/go/pkg/whisper"
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
"github.com/go-skynet/LocalAI/pkg/grpc/base"
|
||||
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
"github.com/mudler/LocalAI/pkg/grpc/base"
|
||||
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||
)
|
||||
|
||||
type Whisper struct {
|
||||
@@ -22,5 +22,5 @@ func (sd *Whisper) Load(opts *pb.ModelOptions) error {
|
||||
}
|
||||
|
||||
func (sd *Whisper) AudioTranscription(opts *pb.TranscriptRequest) (schema.TranscriptionResult, error) {
|
||||
return Transcript(sd.whisper, opts.Dst, opts.Language, uint(opts.Threads))
|
||||
return Transcript(sd.whisper, opts.Dst, opts.Language, opts.Translate, uint(opts.Threads))
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
|
||||
grpc "github.com/go-skynet/LocalAI/pkg/grpc"
|
||||
grpc "github.com/mudler/LocalAI/pkg/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/grpc/base"
|
||||
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
|
||||
"github.com/mudler/LocalAI/pkg/grpc/base"
|
||||
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||
piper "github.com/mudler/go-piper"
|
||||
)
|
||||
|
||||
|
||||
@@ -148,13 +148,13 @@ function startBackend() {
|
||||
ensureVenv
|
||||
|
||||
if [ ! -z ${BACKEND_FILE} ]; then
|
||||
python ${BACKEND_FILE} $@
|
||||
exec python ${BACKEND_FILE} $@
|
||||
elif [ -e "${MY_DIR}/server.py" ]; then
|
||||
python ${MY_DIR}/server.py $@
|
||||
exec python ${MY_DIR}/server.py $@
|
||||
elif [ -e "${MY_DIR}/backend.py" ]; then
|
||||
python ${MY_DIR}/backend.py $@
|
||||
exec python ${MY_DIR}/backend.py $@
|
||||
elif [ -e "${MY_DIR}/${BACKEND_NAME}.py" ]; then
|
||||
python ${MY_DIR}/${BACKEND_NAME}.py $@
|
||||
exec python ${MY_DIR}/${BACKEND_NAME}.py $@
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -210,4 +210,4 @@ function checkTargets() {
|
||||
echo false
|
||||
}
|
||||
|
||||
init
|
||||
init
|
||||
|
||||
@@ -17,7 +17,7 @@ import backend_pb2_grpc
|
||||
|
||||
import grpc
|
||||
|
||||
from diffusers import StableDiffusionXLPipeline, StableDiffusionDepth2ImgPipeline, DPMSolverMultistepScheduler, StableDiffusionPipeline, DiffusionPipeline, EulerAncestralDiscreteScheduler
|
||||
from diffusers import StableDiffusion3Pipeline, StableDiffusionXLPipeline, StableDiffusionDepth2ImgPipeline, DPMSolverMultistepScheduler, StableDiffusionPipeline, DiffusionPipeline, EulerAncestralDiscreteScheduler
|
||||
from diffusers import StableDiffusionImg2ImgPipeline, AutoPipelineForText2Image, ControlNetModel, StableVideoDiffusionPipeline
|
||||
from diffusers.pipelines.stable_diffusion import safety_checker
|
||||
from diffusers.utils import load_image,export_to_video
|
||||
@@ -225,6 +225,17 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
torch_dtype=torchType,
|
||||
use_safetensors=True,
|
||||
variant=variant)
|
||||
elif request.PipelineType == "StableDiffusion3Pipeline":
|
||||
if fromSingleFile:
|
||||
self.pipe = StableDiffusion3Pipeline.from_single_file(modelFile,
|
||||
torch_dtype=torchType,
|
||||
use_safetensors=True)
|
||||
else:
|
||||
self.pipe = StableDiffusion3Pipeline.from_pretrained(
|
||||
request.Model,
|
||||
torch_dtype=torchType,
|
||||
use_safetensors=True,
|
||||
variant=variant)
|
||||
|
||||
if CLIPSKIP and request.CLIPSkip != 0:
|
||||
self.clip_skip = request.CLIPSkip
|
||||
|
||||
@@ -5,6 +5,7 @@ grpcio==1.64.0
|
||||
opencv-python
|
||||
pillow
|
||||
protobuf
|
||||
sentencepiece
|
||||
torch
|
||||
transformers
|
||||
certifi
|
||||
certifi
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/core/services"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/services"
|
||||
"github.com/mudler/LocalAI/pkg/model"
|
||||
)
|
||||
|
||||
// The purpose of this structure is to hold pointers to all initialized services, to make plumbing easy
|
||||
@@ -28,7 +28,6 @@ type Application struct {
|
||||
// LocalAI System Services
|
||||
BackendMonitorService *services.BackendMonitorService
|
||||
GalleryService *services.GalleryService
|
||||
ListModelsService *services.ListModelsService
|
||||
LocalAIMetricsService *services.LocalAIMetricsService
|
||||
// OpenAIService *services.OpenAIService
|
||||
}
|
||||
|
||||
@@ -3,10 +3,10 @@ package backend
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/grpc"
|
||||
model "github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/mudler/LocalAI/pkg/grpc"
|
||||
model "github.com/mudler/LocalAI/pkg/model"
|
||||
)
|
||||
|
||||
func ModelEmbedding(s string, tokens []int, loader *model.ModelLoader, backendConfig config.BackendConfig, appConfig *config.ApplicationConfig) (func() ([]float32, error), error) {
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/grpc/proto"
|
||||
model "github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||
model "github.com/mudler/LocalAI/pkg/model"
|
||||
)
|
||||
|
||||
func ImageGeneration(height, width, mode, step, seed int, positive_prompt, negative_prompt, src, dst string, loader *model.ModelLoader, backendConfig config.BackendConfig, appConfig *config.ApplicationConfig) (func() error, error) {
|
||||
|
||||
@@ -9,14 +9,14 @@ import (
|
||||
"sync"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/gallery"
|
||||
"github.com/go-skynet/LocalAI/pkg/grpc"
|
||||
"github.com/go-skynet/LocalAI/pkg/grpc/proto"
|
||||
model "github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/go-skynet/LocalAI/pkg/utils"
|
||||
"github.com/mudler/LocalAI/core/gallery"
|
||||
"github.com/mudler/LocalAI/pkg/grpc"
|
||||
"github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||
model "github.com/mudler/LocalAI/pkg/model"
|
||||
"github.com/mudler/LocalAI/pkg/utils"
|
||||
)
|
||||
|
||||
type LLMResponse struct {
|
||||
@@ -57,7 +57,7 @@ func ModelInference(ctx context.Context, s string, messages []schema.Message, im
|
||||
if _, err := os.Stat(modelFile); os.IsNotExist(err) {
|
||||
utils.ResetDownloadTimers()
|
||||
// if we failed to load the model, we try to download it
|
||||
err := gallery.InstallModelFromGallery(o.Galleries, modelFile, loader.ModelPath, gallery.GalleryModel{}, utils.DisplayDownloadFunction)
|
||||
err := gallery.InstallModelFromGallery(o.Galleries, modelFile, loader.ModelPath, gallery.GalleryModel{}, utils.DisplayDownloadFunction, o.EnforcePredownloadScans)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -5,9 +5,9 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
pb "github.com/go-skynet/LocalAI/pkg/grpc/proto"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||
"github.com/mudler/LocalAI/pkg/model"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
@@ -142,12 +142,14 @@ func gRPCPredictOpts(c config.BackendConfig, modelPath string) *pb.PredictOption
|
||||
MirostatTAU: float32(*c.LLMConfig.MirostatTAU),
|
||||
Debug: *c.Debug,
|
||||
StopPrompts: c.StopWords,
|
||||
Repeat: int32(c.RepeatPenalty),
|
||||
Repeat: int32(c.RepeatLastN),
|
||||
FrequencyPenalty: float32(c.FrequencyPenalty),
|
||||
PresencePenalty: float32(c.PresencePenalty),
|
||||
Penalty: float32(c.RepeatPenalty),
|
||||
NKeep: int32(c.Keep),
|
||||
Batch: int32(c.Batch),
|
||||
IgnoreEOS: c.IgnoreEOS,
|
||||
Seed: getSeed(c),
|
||||
FrequencyPenalty: float32(c.FrequencyPenalty),
|
||||
MLock: *c.MMlock,
|
||||
MMap: *c.MMap,
|
||||
MainGPU: c.MainGPU,
|
||||
|
||||
@@ -4,9 +4,9 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/pkg/grpc/proto"
|
||||
model "github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||
model "github.com/mudler/LocalAI/pkg/model"
|
||||
)
|
||||
|
||||
func Rerank(backend, modelFile string, request *proto.RerankRequest, loader *model.ModelLoader, appConfig *config.ApplicationConfig, backendConfig config.BackendConfig) (*proto.RerankResult, error) {
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/grpc"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/mudler/LocalAI/pkg/grpc"
|
||||
"github.com/mudler/LocalAI/pkg/model"
|
||||
)
|
||||
|
||||
func StoreBackend(sl *model.ModelLoader, appConfig *config.ApplicationConfig, storeName string) (grpc.Backend, error) {
|
||||
|
||||
@@ -4,14 +4,14 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/grpc/proto"
|
||||
model "github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||
model "github.com/mudler/LocalAI/pkg/model"
|
||||
)
|
||||
|
||||
func ModelTranscription(audio, language string, ml *model.ModelLoader, backendConfig config.BackendConfig, appConfig *config.ApplicationConfig) (*schema.TranscriptionResult, error) {
|
||||
func ModelTranscription(audio, language string, translate bool, ml *model.ModelLoader, backendConfig config.BackendConfig, appConfig *config.ApplicationConfig) (*schema.TranscriptionResult, error) {
|
||||
|
||||
opts := modelOpts(backendConfig, appConfig, []model.Option{
|
||||
model.WithBackendString(model.WhisperBackend),
|
||||
@@ -31,8 +31,9 @@ func ModelTranscription(audio, language string, ml *model.ModelLoader, backendCo
|
||||
}
|
||||
|
||||
return whisperModel.AudioTranscription(context.Background(), &proto.TranscriptRequest{
|
||||
Dst: audio,
|
||||
Language: language,
|
||||
Threads: uint32(*backendConfig.Threads),
|
||||
Dst: audio,
|
||||
Language: language,
|
||||
Translate: translate,
|
||||
Threads: uint32(*backendConfig.Threads),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -6,11 +6,11 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/grpc/proto"
|
||||
model "github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/go-skynet/LocalAI/pkg/utils"
|
||||
"github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||
model "github.com/mudler/LocalAI/pkg/model"
|
||||
"github.com/mudler/LocalAI/pkg/utils"
|
||||
)
|
||||
|
||||
func generateUniqueFileName(dir, baseName, ext string) string {
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
cliContext "github.com/go-skynet/LocalAI/core/cli/context"
|
||||
"github.com/go-skynet/LocalAI/core/cli/worker"
|
||||
cliContext "github.com/mudler/LocalAI/core/cli/context"
|
||||
"github.com/mudler/LocalAI/core/cli/worker"
|
||||
)
|
||||
|
||||
var CLI struct {
|
||||
cliContext.Context `embed:""`
|
||||
|
||||
Run RunCMD `cmd:"" help:"Run LocalAI, this the default command if no other command is specified. Run 'local-ai run --help' for more information" default:"withargs"`
|
||||
Federated FederatedCLI `cmd:"" help:"Run LocalAI in federated mode"`
|
||||
Models ModelsCMD `cmd:"" help:"Manage LocalAI models and definitions"`
|
||||
TTS TTSCMD `cmd:"" help:"Convert text to speech"`
|
||||
Transcript TranscriptCMD `cmd:"" help:"Convert audio to text"`
|
||||
|
||||
130
core/cli/federated.go
Normal file
130
core/cli/federated.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"math/rand/v2"
|
||||
|
||||
cliContext "github.com/mudler/LocalAI/core/cli/context"
|
||||
"github.com/mudler/LocalAI/core/p2p"
|
||||
"github.com/mudler/edgevpn/pkg/node"
|
||||
"github.com/mudler/edgevpn/pkg/protocol"
|
||||
"github.com/mudler/edgevpn/pkg/types"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
type FederatedCLI struct {
|
||||
Address string `env:"LOCALAI_ADDRESS,ADDRESS" default:":8080" help:"Bind address for the API server" group:"api"`
|
||||
Peer2PeerToken string `env:"LOCALAI_P2P_TOKEN,P2P_TOKEN,TOKEN" name:"p2ptoken" help:"Token for P2P mode (optional)" group:"p2p"`
|
||||
}
|
||||
|
||||
func (f *FederatedCLI) Run(ctx *cliContext.Context) error {
|
||||
|
||||
n, err := p2p.NewNode(f.Peer2PeerToken)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating a new node: %w", err)
|
||||
}
|
||||
err = n.Start(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating a new node: %w", err)
|
||||
}
|
||||
|
||||
if err := p2p.ServiceDiscoverer(context.Background(), n, f.Peer2PeerToken, p2p.FederatedID, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return Proxy(context.Background(), n, f.Address, p2p.FederatedID)
|
||||
}
|
||||
|
||||
func Proxy(ctx context.Context, node *node.Node, listenAddr, service string) error {
|
||||
|
||||
log.Info().Msgf("Allocating service '%s' on: %s", service, listenAddr)
|
||||
// Open local port for listening
|
||||
l, err := net.Listen("tcp", listenAddr)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error listening")
|
||||
return err
|
||||
}
|
||||
// ll.Info("Binding local port on", srcaddr)
|
||||
|
||||
ledger, _ := node.Ledger()
|
||||
|
||||
// Announce ourselves so nodes accepts our connection
|
||||
ledger.Announce(
|
||||
ctx,
|
||||
10*time.Second,
|
||||
func() {
|
||||
// Retrieve current ID for ip in the blockchain
|
||||
//_, found := ledger.GetKey(protocol.UsersLedgerKey, node.Host().ID().String())
|
||||
// If mismatch, update the blockchain
|
||||
//if !found {
|
||||
updatedMap := map[string]interface{}{}
|
||||
updatedMap[node.Host().ID().String()] = &types.User{
|
||||
PeerID: node.Host().ID().String(),
|
||||
Timestamp: time.Now().String(),
|
||||
}
|
||||
ledger.Add(protocol.UsersLedgerKey, updatedMap)
|
||||
// }
|
||||
},
|
||||
)
|
||||
|
||||
defer l.Close()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return errors.New("context canceled")
|
||||
default:
|
||||
log.Debug().Msg("New for connection")
|
||||
// Listen for an incoming connection.
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
fmt.Println("Error accepting: ", err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle connections in a new goroutine, forwarding to the p2p service
|
||||
go func() {
|
||||
var tunnelAddresses []string
|
||||
for _, v := range p2p.GetAvailableNodes(p2p.FederatedID) {
|
||||
if v.IsOnline() {
|
||||
tunnelAddresses = append(tunnelAddresses, v.TunnelAddress)
|
||||
} else {
|
||||
log.Info().Msgf("Node %s is offline", v.ID)
|
||||
}
|
||||
}
|
||||
|
||||
// open a TCP stream to one of the tunnels
|
||||
// chosen randomly
|
||||
// TODO: optimize this and track usage
|
||||
tunnelAddr := tunnelAddresses[rand.IntN(len(tunnelAddresses))]
|
||||
|
||||
tunnelConn, err := net.Dial("tcp", tunnelAddr)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error connecting to tunnel")
|
||||
return
|
||||
}
|
||||
|
||||
log.Info().Msgf("Redirecting %s to %s", conn.LocalAddr().String(), tunnelConn.RemoteAddr().String())
|
||||
closer := make(chan struct{}, 2)
|
||||
go copyStream(closer, tunnelConn, conn)
|
||||
go copyStream(closer, conn, tunnelConn)
|
||||
<-closer
|
||||
|
||||
tunnelConn.Close()
|
||||
conn.Close()
|
||||
// ll.Infof("(service %s) Done handling %s", serviceID, l.Addr().String())
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func copyStream(closer chan struct{}, dst io.Writer, src io.Reader) {
|
||||
defer func() { closer <- struct{}{} }() // connection is closed, send signal to stop proxy
|
||||
io.Copy(dst, src)
|
||||
}
|
||||
@@ -2,12 +2,15 @@ package cli
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
cliContext "github.com/go-skynet/LocalAI/core/cli/context"
|
||||
cliContext "github.com/mudler/LocalAI/core/cli/context"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/gallery"
|
||||
"github.com/go-skynet/LocalAI/pkg/startup"
|
||||
"github.com/mudler/LocalAI/core/gallery"
|
||||
"github.com/mudler/LocalAI/pkg/downloader"
|
||||
"github.com/mudler/LocalAI/pkg/startup"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/schollz/progressbar/v3"
|
||||
)
|
||||
@@ -22,7 +25,8 @@ type ModelsList struct {
|
||||
}
|
||||
|
||||
type ModelsInstall struct {
|
||||
ModelArgs []string `arg:"" optional:"" name:"models" help:"Model configuration URLs to load"`
|
||||
DisablePredownloadScan bool `env:"LOCALAI_DISABLE_PREDOWNLOAD_SCAN" help:"If true, disables the best-effort security scanner before downloading any files." group:"hardening" default:"false"`
|
||||
ModelArgs []string `arg:"" optional:"" name:"models" help:"Model configuration URLs to load"`
|
||||
|
||||
ModelsCMDFlags `embed:""`
|
||||
}
|
||||
@@ -33,7 +37,7 @@ type ModelsCMD struct {
|
||||
}
|
||||
|
||||
func (ml *ModelsList) Run(ctx *cliContext.Context) error {
|
||||
var galleries []gallery.Gallery
|
||||
var galleries []config.Gallery
|
||||
if err := json.Unmarshal([]byte(ml.Galleries), &galleries); err != nil {
|
||||
log.Error().Err(err).Msg("unable to load galleries")
|
||||
}
|
||||
@@ -53,10 +57,11 @@ func (ml *ModelsList) Run(ctx *cliContext.Context) error {
|
||||
}
|
||||
|
||||
func (mi *ModelsInstall) Run(ctx *cliContext.Context) error {
|
||||
var galleries []gallery.Gallery
|
||||
var galleries []config.Gallery
|
||||
if err := json.Unmarshal([]byte(mi.Galleries), &galleries); err != nil {
|
||||
log.Error().Err(err).Msg("unable to load galleries")
|
||||
}
|
||||
|
||||
for _, modelName := range mi.ModelArgs {
|
||||
|
||||
progressBar := progressbar.NewOptions(
|
||||
@@ -78,14 +83,22 @@ func (mi *ModelsInstall) Run(ctx *cliContext.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
model := gallery.FindModel(models, modelName, mi.ModelsPath)
|
||||
if model == nil {
|
||||
log.Error().Str("model", modelName).Msg("model not found")
|
||||
return err
|
||||
if !downloader.LooksLikeOCI(modelName) {
|
||||
model := gallery.FindModel(models, modelName, mi.ModelsPath)
|
||||
if model == nil {
|
||||
log.Error().Str("model", modelName).Msg("model not found")
|
||||
return err
|
||||
}
|
||||
|
||||
err = gallery.SafetyScanGalleryModel(model)
|
||||
if err != nil && !errors.Is(err, downloader.ErrNonHuggingFaceFile) {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info().Str("model", modelName).Str("license", model.License).Msg("installing model")
|
||||
}
|
||||
|
||||
log.Info().Str("model", modelName).Str("license", model.License).Msg("installing model")
|
||||
err = startup.InstallModels(galleries, "", mi.ModelsPath, progressCallback, modelName)
|
||||
err = startup.InstallModels(galleries, "", mi.ModelsPath, !mi.DisablePredownloadScan, progressCallback, modelName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
100
core/cli/run.go
100
core/cli/run.go
@@ -3,14 +3,16 @@ package cli
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
cliContext "github.com/go-skynet/LocalAI/core/cli/context"
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/core/http"
|
||||
"github.com/go-skynet/LocalAI/core/p2p"
|
||||
"github.com/go-skynet/LocalAI/core/startup"
|
||||
cliContext "github.com/mudler/LocalAI/core/cli/context"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/http"
|
||||
"github.com/mudler/LocalAI/core/p2p"
|
||||
"github.com/mudler/LocalAI/core/startup"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
@@ -40,24 +42,27 @@ type RunCMD struct {
|
||||
Threads int `env:"LOCALAI_THREADS,THREADS" short:"t" help:"Number of threads used for parallel computation. Usage of the number of physical cores in the system is suggested" group:"performance"`
|
||||
ContextSize int `env:"LOCALAI_CONTEXT_SIZE,CONTEXT_SIZE" default:"512" help:"Default context size for models" group:"performance"`
|
||||
|
||||
Address string `env:"LOCALAI_ADDRESS,ADDRESS" default:":8080" help:"Bind address for the API server" group:"api"`
|
||||
CORS bool `env:"LOCALAI_CORS,CORS" help:"" group:"api"`
|
||||
CORSAllowOrigins string `env:"LOCALAI_CORS_ALLOW_ORIGINS,CORS_ALLOW_ORIGINS" group:"api"`
|
||||
CSRF bool `env:"LOCALAI_CSRF" help:"Enables fiber CSRF middleware" group:"api"`
|
||||
UploadLimit int `env:"LOCALAI_UPLOAD_LIMIT,UPLOAD_LIMIT" default:"15" help:"Default upload-limit in MB" group:"api"`
|
||||
APIKeys []string `env:"LOCALAI_API_KEY,API_KEY" help:"List of API Keys to enable API authentication. When this is set, all the requests must be authenticated with one of these API keys" group:"api"`
|
||||
DisableWebUI bool `env:"LOCALAI_DISABLE_WEBUI,DISABLE_WEBUI" default:"false" help:"Disable webui" group:"api"`
|
||||
OpaqueErrors bool `env:"LOCALAI_OPAQUE_ERRORS" default:"false" help:"If true, all error responses are replaced with blank 500 errors. This is intended only for hardening against information leaks and is normally not recommended." group:"api"`
|
||||
Peer2Peer bool `env:"LOCALAI_P2P,P2P" name:"p2p" default:"false" help:"Enable P2P mode" group:"p2p"`
|
||||
Peer2PeerToken string `env:"LOCALAI_P2P_TOKEN,P2P_TOKEN" name:"p2ptoken" help:"Token for P2P mode (optional)" group:"p2p"`
|
||||
ParallelRequests bool `env:"LOCALAI_PARALLEL_REQUESTS,PARALLEL_REQUESTS" help:"Enable backends to handle multiple requests in parallel if they support it (e.g.: llama.cpp or vllm)" group:"backends"`
|
||||
SingleActiveBackend bool `env:"LOCALAI_SINGLE_ACTIVE_BACKEND,SINGLE_ACTIVE_BACKEND" help:"Allow only one backend to be run at a time" group:"backends"`
|
||||
PreloadBackendOnly bool `env:"LOCALAI_PRELOAD_BACKEND_ONLY,PRELOAD_BACKEND_ONLY" default:"false" help:"Do not launch the API services, only the preloaded models / backends are started (useful for multi-node setups)" group:"backends"`
|
||||
ExternalGRPCBackends []string `env:"LOCALAI_EXTERNAL_GRPC_BACKENDS,EXTERNAL_GRPC_BACKENDS" help:"A list of external grpc backends" group:"backends"`
|
||||
EnableWatchdogIdle bool `env:"LOCALAI_WATCHDOG_IDLE,WATCHDOG_IDLE" default:"false" help:"Enable watchdog for stopping backends that are idle longer than the watchdog-idle-timeout" group:"backends"`
|
||||
WatchdogIdleTimeout string `env:"LOCALAI_WATCHDOG_IDLE_TIMEOUT,WATCHDOG_IDLE_TIMEOUT" default:"15m" help:"Threshold beyond which an idle backend should be stopped" group:"backends"`
|
||||
EnableWatchdogBusy bool `env:"LOCALAI_WATCHDOG_BUSY,WATCHDOG_BUSY" default:"false" help:"Enable watchdog for stopping backends that are busy longer than the watchdog-busy-timeout" group:"backends"`
|
||||
WatchdogBusyTimeout string `env:"LOCALAI_WATCHDOG_BUSY_TIMEOUT,WATCHDOG_BUSY_TIMEOUT" default:"5m" help:"Threshold beyond which a busy backend should be stopped" group:"backends"`
|
||||
Address string `env:"LOCALAI_ADDRESS,ADDRESS" default:":8080" help:"Bind address for the API server" group:"api"`
|
||||
CORS bool `env:"LOCALAI_CORS,CORS" help:"" group:"api"`
|
||||
CORSAllowOrigins string `env:"LOCALAI_CORS_ALLOW_ORIGINS,CORS_ALLOW_ORIGINS" group:"api"`
|
||||
LibraryPath string `env:"LOCALAI_LIBRARY_PATH,LIBRARY_PATH" help:"Path to the library directory (for e.g. external libraries used by backends)" default:"/usr/share/local-ai/libs" group:"backends"`
|
||||
CSRF bool `env:"LOCALAI_CSRF" help:"Enables fiber CSRF middleware" group:"api"`
|
||||
UploadLimit int `env:"LOCALAI_UPLOAD_LIMIT,UPLOAD_LIMIT" default:"15" help:"Default upload-limit in MB" group:"api"`
|
||||
APIKeys []string `env:"LOCALAI_API_KEY,API_KEY" help:"List of API Keys to enable API authentication. When this is set, all the requests must be authenticated with one of these API keys" group:"api"`
|
||||
DisableWebUI bool `env:"LOCALAI_DISABLE_WEBUI,DISABLE_WEBUI" default:"false" help:"Disable webui" group:"api"`
|
||||
DisablePredownloadScan bool `env:"LOCALAI_DISABLE_PREDOWNLOAD_SCAN" help:"If true, disables the best-effort security scanner before downloading any files." group:"hardening" default:"false"`
|
||||
OpaqueErrors bool `env:"LOCALAI_OPAQUE_ERRORS" default:"false" help:"If true, all error responses are replaced with blank 500 errors. This is intended only for hardening against information leaks and is normally not recommended." group:"hardening"`
|
||||
Peer2Peer bool `env:"LOCALAI_P2P,P2P" name:"p2p" default:"false" help:"Enable P2P mode" group:"p2p"`
|
||||
Peer2PeerToken string `env:"LOCALAI_P2P_TOKEN,P2P_TOKEN,TOKEN" name:"p2ptoken" help:"Token for P2P mode (optional)" group:"p2p"`
|
||||
ParallelRequests bool `env:"LOCALAI_PARALLEL_REQUESTS,PARALLEL_REQUESTS" help:"Enable backends to handle multiple requests in parallel if they support it (e.g.: llama.cpp or vllm)" group:"backends"`
|
||||
SingleActiveBackend bool `env:"LOCALAI_SINGLE_ACTIVE_BACKEND,SINGLE_ACTIVE_BACKEND" help:"Allow only one backend to be run at a time" group:"backends"`
|
||||
PreloadBackendOnly bool `env:"LOCALAI_PRELOAD_BACKEND_ONLY,PRELOAD_BACKEND_ONLY" default:"false" help:"Do not launch the API services, only the preloaded models / backends are started (useful for multi-node setups)" group:"backends"`
|
||||
ExternalGRPCBackends []string `env:"LOCALAI_EXTERNAL_GRPC_BACKENDS,EXTERNAL_GRPC_BACKENDS" help:"A list of external grpc backends" group:"backends"`
|
||||
EnableWatchdogIdle bool `env:"LOCALAI_WATCHDOG_IDLE,WATCHDOG_IDLE" default:"false" help:"Enable watchdog for stopping backends that are idle longer than the watchdog-idle-timeout" group:"backends"`
|
||||
WatchdogIdleTimeout string `env:"LOCALAI_WATCHDOG_IDLE_TIMEOUT,WATCHDOG_IDLE_TIMEOUT" default:"15m" help:"Threshold beyond which an idle backend should be stopped" group:"backends"`
|
||||
EnableWatchdogBusy bool `env:"LOCALAI_WATCHDOG_BUSY,WATCHDOG_BUSY" default:"false" help:"Enable watchdog for stopping backends that are busy longer than the watchdog-busy-timeout" group:"backends"`
|
||||
WatchdogBusyTimeout string `env:"LOCALAI_WATCHDOG_BUSY_TIMEOUT,WATCHDOG_BUSY_TIMEOUT" default:"5m" help:"Threshold beyond which a busy backend should be stopped" group:"backends"`
|
||||
Federated bool `env:"LOCALAI_FEDERATED,FEDERATED" help:"Enable federated instance" group:"federated"`
|
||||
}
|
||||
|
||||
func (r *RunCMD) Run(ctx *cliContext.Context) error {
|
||||
@@ -80,6 +85,7 @@ func (r *RunCMD) Run(ctx *cliContext.Context) error {
|
||||
config.WithCors(r.CORS),
|
||||
config.WithCorsAllowOrigins(r.CORSAllowOrigins),
|
||||
config.WithCsrf(r.CSRF),
|
||||
config.WithLibPath(r.LibraryPath),
|
||||
config.WithThreads(r.Threads),
|
||||
config.WithBackendAssets(ctx.BackendAssets),
|
||||
config.WithBackendAssetsOutput(r.BackendAssetsPath),
|
||||
@@ -87,11 +93,13 @@ func (r *RunCMD) Run(ctx *cliContext.Context) error {
|
||||
config.WithApiKeys(r.APIKeys),
|
||||
config.WithModelsURL(append(r.Models, r.ModelArgs...)...),
|
||||
config.WithOpaqueErrors(r.OpaqueErrors),
|
||||
config.WithEnforcedPredownloadScans(!r.DisablePredownloadScan),
|
||||
}
|
||||
|
||||
token := ""
|
||||
if r.Peer2Peer || r.Peer2PeerToken != "" {
|
||||
log.Info().Msg("P2P mode enabled")
|
||||
token := r.Peer2PeerToken
|
||||
token = r.Peer2PeerToken
|
||||
if token == "" {
|
||||
// IF no token is provided, and p2p is enabled,
|
||||
// we generate one and wait for the user to pick up the token (this is for interactive)
|
||||
@@ -102,14 +110,46 @@ func (r *RunCMD) Run(ctx *cliContext.Context) error {
|
||||
|
||||
log.Info().Msg("To use the token, you can run the following command in another node or terminal:")
|
||||
fmt.Printf("export TOKEN=\"%s\"\nlocal-ai worker p2p-llama-cpp-rpc\n", token)
|
||||
|
||||
// Ask for user confirmation
|
||||
log.Info().Msg("Press a button to proceed")
|
||||
var input string
|
||||
fmt.Scanln(&input)
|
||||
}
|
||||
opts = append(opts, config.WithP2PToken(token))
|
||||
|
||||
node, err := p2p.NewNode(token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info().Msg("Starting P2P server discovery...")
|
||||
if err := p2p.LLamaCPPRPCServerDiscoverer(context.Background(), token); err != nil {
|
||||
if err := p2p.ServiceDiscoverer(context.Background(), node, token, "", func() {
|
||||
var tunnelAddresses []string
|
||||
for _, v := range p2p.GetAvailableNodes("") {
|
||||
if v.IsOnline() {
|
||||
tunnelAddresses = append(tunnelAddresses, v.TunnelAddress)
|
||||
} else {
|
||||
log.Info().Msgf("Node %s is offline", v.ID)
|
||||
}
|
||||
}
|
||||
tunnelEnvVar := strings.Join(tunnelAddresses, ",")
|
||||
|
||||
os.Setenv("LLAMACPP_GRPC_SERVERS", tunnelEnvVar)
|
||||
log.Debug().Msgf("setting LLAMACPP_GRPC_SERVERS to %s", tunnelEnvVar)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if r.Federated {
|
||||
_, port, err := net.SplitHostPort(r.Address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p2p.ExposeService(context.Background(), "localhost", port, token, p2p.FederatedID); err != nil {
|
||||
return err
|
||||
}
|
||||
node, err := p2p.NewNode(token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p2p.ServiceDiscoverer(context.Background(), node, token, p2p.FederatedID, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,10 +5,10 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/backend"
|
||||
cliContext "github.com/go-skynet/LocalAI/core/cli/context"
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/mudler/LocalAI/core/backend"
|
||||
cliContext "github.com/mudler/LocalAI/core/cli/context"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/pkg/model"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
@@ -18,6 +18,7 @@ type TranscriptCMD struct {
|
||||
Backend string `short:"b" default:"whisper" help:"Backend to run the transcription model"`
|
||||
Model string `short:"m" required:"" help:"Model name to run the TTS"`
|
||||
Language string `short:"l" help:"Language of the audio file"`
|
||||
Translate bool `short:"c" help:"Translate the transcription to english"`
|
||||
Threads int `short:"t" default:"1" help:"Number of threads used for parallel computation"`
|
||||
ModelsPath string `env:"LOCALAI_MODELS_PATH,MODELS_PATH" type:"path" default:"${basepath}/models" help:"Path containing models used for inferencing" group:"storage"`
|
||||
BackendAssetsPath string `env:"LOCALAI_BACKEND_ASSETS_PATH,BACKEND_ASSETS_PATH" type:"path" default:"/tmp/localai/backend_data" help:"Path used to extract libraries that are required by some of the backends in runtime" group:"storage"`
|
||||
@@ -50,7 +51,7 @@ func (t *TranscriptCMD) Run(ctx *cliContext.Context) error {
|
||||
}
|
||||
}()
|
||||
|
||||
tr, err := backend.ModelTranscription(t.Filename, t.Language, ml, c, opts)
|
||||
tr, err := backend.ModelTranscription(t.Filename, t.Language, t.Translate, ml, c, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -7,10 +7,10 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/backend"
|
||||
cliContext "github.com/go-skynet/LocalAI/core/cli/context"
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/mudler/LocalAI/core/backend"
|
||||
cliContext "github.com/mudler/LocalAI/core/cli/context"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/pkg/model"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
|
||||
@@ -1,16 +1,22 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
cliContext "github.com/go-skynet/LocalAI/core/cli/context"
|
||||
cliContext "github.com/mudler/LocalAI/core/cli/context"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/gallery"
|
||||
"github.com/mudler/LocalAI/pkg/downloader"
|
||||
gguf "github.com/thxcode/gguf-parser-go"
|
||||
)
|
||||
|
||||
type UtilCMD struct {
|
||||
GGUFInfo GGUFInfoCMD `cmd:"" name:"gguf-info" help:"Get information about a GGUF file"`
|
||||
HFScan HFScanCMD `cmd:"" name:"hf-scan" help:"Checks installed models for known security issues. WARNING: this is a best-effort feature and may not catch everything!"`
|
||||
}
|
||||
|
||||
type GGUFInfoCMD struct {
|
||||
@@ -18,6 +24,12 @@ type GGUFInfoCMD struct {
|
||||
Header bool `optional:"" default:"false" name:"header" help:"Show header information"`
|
||||
}
|
||||
|
||||
type HFScanCMD struct {
|
||||
ModelsPath string `env:"LOCALAI_MODELS_PATH,MODELS_PATH" type:"path" default:"${basepath}/models" help:"Path containing models used for inferencing" group:"storage"`
|
||||
Galleries string `env:"LOCALAI_GALLERIES,GALLERIES" help:"JSON list of galleries" group:"models" default:"${galleries}"`
|
||||
ToScan []string `arg:""`
|
||||
}
|
||||
|
||||
func (u *GGUFInfoCMD) Run(ctx *cliContext.Context) error {
|
||||
if u.Args == nil || len(u.Args) == 0 {
|
||||
return fmt.Errorf("no GGUF file provided")
|
||||
@@ -53,3 +65,37 @@ func (u *GGUFInfoCMD) Run(ctx *cliContext.Context) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hfscmd *HFScanCMD) Run(ctx *cliContext.Context) error {
|
||||
log.Info().Msg("LocalAI Security Scanner - This is BEST EFFORT functionality! Currently limited to huggingface models!")
|
||||
if len(hfscmd.ToScan) == 0 {
|
||||
log.Info().Msg("Checking all installed models against galleries")
|
||||
var galleries []config.Gallery
|
||||
if err := json.Unmarshal([]byte(hfscmd.Galleries), &galleries); err != nil {
|
||||
log.Error().Err(err).Msg("unable to load galleries")
|
||||
}
|
||||
|
||||
err := gallery.SafetyScanGalleryModels(galleries, hfscmd.ModelsPath)
|
||||
if err == nil {
|
||||
log.Info().Msg("No security warnings were detected for your installed models. Please note that this is a BEST EFFORT tool, and all issues may not be detected.")
|
||||
} else {
|
||||
log.Error().Err(err).Msg("! WARNING ! A known-vulnerable model is installed!")
|
||||
}
|
||||
return err
|
||||
} else {
|
||||
var errs error = nil
|
||||
for _, uri := range hfscmd.ToScan {
|
||||
log.Info().Str("uri", uri).Msg("scanning specific uri")
|
||||
scanResults, err := downloader.HuggingFaceScan(uri)
|
||||
if err != nil && !errors.Is(err, downloader.ErrNonHuggingFaceFile) {
|
||||
log.Error().Err(err).Strs("clamAV", scanResults.ClamAVInfectedFiles).Strs("pickles", scanResults.DangerousPickles).Msg("! WARNING ! A known-vulnerable model is included in this repo!")
|
||||
errs = errors.Join(errs, err)
|
||||
}
|
||||
}
|
||||
if errs != nil {
|
||||
return errs
|
||||
}
|
||||
log.Info().Msg("No security warnings were detected for your installed models. Please note that this is a BEST EFFORT tool, and all issues may not be detected.")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,8 +5,9 @@ import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
cliContext "github.com/go-skynet/LocalAI/core/cli/context"
|
||||
"github.com/go-skynet/LocalAI/pkg/assets"
|
||||
cliContext "github.com/mudler/LocalAI/core/cli/context"
|
||||
"github.com/mudler/LocalAI/pkg/assets"
|
||||
"github.com/mudler/LocalAI/pkg/library"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
@@ -27,17 +28,18 @@ func (r *LLamaCPP) Run(ctx *cliContext.Context) error {
|
||||
return fmt.Errorf("usage: local-ai worker llama-cpp-rpc -- <llama-rpc-server-args>")
|
||||
}
|
||||
|
||||
grpcProcess := assets.ResolvePath(
|
||||
r.BackendAssetsPath,
|
||||
"util",
|
||||
"llama-cpp-rpc-server",
|
||||
)
|
||||
|
||||
args := os.Args[4:]
|
||||
args, grpcProcess = library.LoadLDSO(r.BackendAssetsPath, args, grpcProcess)
|
||||
|
||||
args = append([]string{grpcProcess}, args...)
|
||||
return syscall.Exec(
|
||||
assets.ResolvePath(
|
||||
r.BackendAssetsPath,
|
||||
"util",
|
||||
"llama-cpp-rpc-server",
|
||||
),
|
||||
append([]string{
|
||||
assets.ResolvePath(
|
||||
r.BackendAssetsPath,
|
||||
"util",
|
||||
"llama-cpp-rpc-server",
|
||||
)}, os.Args[4:]...),
|
||||
grpcProcess,
|
||||
args,
|
||||
os.Environ())
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ package worker
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
cliContext "github.com/go-skynet/LocalAI/core/cli/context"
|
||||
cliContext "github.com/mudler/LocalAI/core/cli/context"
|
||||
)
|
||||
|
||||
type P2P struct{}
|
||||
|
||||
@@ -10,16 +10,17 @@ import (
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
cliContext "github.com/go-skynet/LocalAI/core/cli/context"
|
||||
"github.com/go-skynet/LocalAI/core/p2p"
|
||||
"github.com/go-skynet/LocalAI/pkg/assets"
|
||||
cliContext "github.com/mudler/LocalAI/core/cli/context"
|
||||
"github.com/mudler/LocalAI/core/p2p"
|
||||
"github.com/mudler/LocalAI/pkg/assets"
|
||||
"github.com/mudler/LocalAI/pkg/library"
|
||||
"github.com/phayes/freeport"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
type P2P struct {
|
||||
WorkerFlags `embed:""`
|
||||
Token string `env:"LOCALAI_TOKEN,TOKEN" help:"JSON list of galleries"`
|
||||
Token string `env:"LOCALAI_TOKEN,LOCALAI_P2P_TOKEN,TOKEN" help:"P2P token to use"`
|
||||
NoRunner bool `env:"LOCALAI_NO_RUNNER,NO_RUNNER" help:"Do not start the llama-cpp-rpc-server"`
|
||||
RunnerAddress string `env:"LOCALAI_RUNNER_ADDRESS,RUNNER_ADDRESS" help:"Address of the llama-cpp-rpc-server"`
|
||||
RunnerPort string `env:"LOCALAI_RUNNER_PORT,RUNNER_PORT" help:"Port of the llama-cpp-rpc-server"`
|
||||
@@ -58,7 +59,7 @@ func (r *P2P) Run(ctx *cliContext.Context) error {
|
||||
p = r.RunnerPort
|
||||
}
|
||||
|
||||
err = p2p.BindLLamaCPPWorker(context.Background(), address, p, r.Token)
|
||||
err = p2p.ExposeService(context.Background(), address, p, r.Token, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -71,13 +72,18 @@ func (r *P2P) Run(ctx *cliContext.Context) error {
|
||||
go func() {
|
||||
for {
|
||||
log.Info().Msgf("Starting llama-cpp-rpc-server on '%s:%d'", address, port)
|
||||
|
||||
grpcProcess := assets.ResolvePath(
|
||||
r.BackendAssetsPath,
|
||||
"util",
|
||||
"llama-cpp-rpc-server",
|
||||
)
|
||||
|
||||
args := append([]string{"--host", address, "--port", fmt.Sprint(port)}, r.ExtraLLamaCPPArgs...)
|
||||
args, grpcProcess = library.LoadLDSO(r.BackendAssetsPath, args, grpcProcess)
|
||||
|
||||
cmd := exec.Command(
|
||||
assets.ResolvePath(
|
||||
r.BackendAssetsPath,
|
||||
"util",
|
||||
"llama-cpp-rpc-server",
|
||||
),
|
||||
append([]string{"--host", address, "--port", fmt.Sprint(port)}, r.ExtraLLamaCPPArgs...)...,
|
||||
grpcProcess, args...,
|
||||
)
|
||||
|
||||
cmd.Env = os.Environ()
|
||||
@@ -86,14 +92,14 @@ func (r *P2P) Run(ctx *cliContext.Context) error {
|
||||
cmd.Stdout = os.Stdout
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to start llama-cpp-rpc-server")
|
||||
log.Error().Any("grpcProcess", grpcProcess).Any("args", args).Err(err).Msg("Failed to start llama-cpp-rpc-server")
|
||||
}
|
||||
|
||||
cmd.Wait()
|
||||
}
|
||||
}()
|
||||
|
||||
err = p2p.BindLLamaCPPWorker(context.Background(), address, fmt.Sprint(port), r.Token)
|
||||
err = p2p.ExposeService(context.Background(), address, fmt.Sprint(port), r.Token, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -6,8 +6,7 @@ import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/gallery"
|
||||
"github.com/go-skynet/LocalAI/pkg/xsysinfo"
|
||||
"github.com/mudler/LocalAI/pkg/xsysinfo"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
@@ -15,6 +14,7 @@ type ApplicationConfig struct {
|
||||
Context context.Context
|
||||
ConfigFile string
|
||||
ModelPath string
|
||||
LibPath string
|
||||
UploadLimitMB, Threads, ContextSize int
|
||||
DisableWebUI bool
|
||||
F16 bool
|
||||
@@ -31,11 +31,13 @@ type ApplicationConfig struct {
|
||||
PreloadModelsFromPath string
|
||||
CORSAllowOrigins string
|
||||
ApiKeys []string
|
||||
EnforcePredownloadScans bool
|
||||
OpaqueErrors bool
|
||||
P2PToken string
|
||||
|
||||
ModelLibraryURL string
|
||||
|
||||
Galleries []gallery.Gallery
|
||||
Galleries []Gallery
|
||||
|
||||
BackendAssets embed.FS
|
||||
AssetsDestination string
|
||||
@@ -95,12 +97,24 @@ func WithCsrf(b bool) AppOption {
|
||||
}
|
||||
}
|
||||
|
||||
func WithP2PToken(s string) AppOption {
|
||||
return func(o *ApplicationConfig) {
|
||||
o.P2PToken = s
|
||||
}
|
||||
}
|
||||
|
||||
func WithModelLibraryURL(url string) AppOption {
|
||||
return func(o *ApplicationConfig) {
|
||||
o.ModelLibraryURL = url
|
||||
}
|
||||
}
|
||||
|
||||
func WithLibPath(path string) AppOption {
|
||||
return func(o *ApplicationConfig) {
|
||||
o.LibPath = path
|
||||
}
|
||||
}
|
||||
|
||||
var EnableWatchDog = func(o *ApplicationConfig) {
|
||||
o.WatchDog = true
|
||||
}
|
||||
@@ -173,10 +187,10 @@ func WithBackendAssets(f embed.FS) AppOption {
|
||||
func WithStringGalleries(galls string) AppOption {
|
||||
return func(o *ApplicationConfig) {
|
||||
if galls == "" {
|
||||
o.Galleries = []gallery.Gallery{}
|
||||
o.Galleries = []Gallery{}
|
||||
return
|
||||
}
|
||||
var galleries []gallery.Gallery
|
||||
var galleries []Gallery
|
||||
if err := json.Unmarshal([]byte(galls), &galleries); err != nil {
|
||||
log.Error().Err(err).Msg("failed loading galleries")
|
||||
}
|
||||
@@ -184,7 +198,7 @@ func WithStringGalleries(galls string) AppOption {
|
||||
}
|
||||
}
|
||||
|
||||
func WithGalleries(galleries []gallery.Gallery) AppOption {
|
||||
func WithGalleries(galleries []Gallery) AppOption {
|
||||
return func(o *ApplicationConfig) {
|
||||
o.Galleries = append(o.Galleries, galleries...)
|
||||
}
|
||||
@@ -288,6 +302,12 @@ func WithApiKeys(apiKeys []string) AppOption {
|
||||
}
|
||||
}
|
||||
|
||||
func WithEnforcedPredownloadScans(enforced bool) AppOption {
|
||||
return func(o *ApplicationConfig) {
|
||||
o.EnforcePredownloadScans = enforced
|
||||
}
|
||||
}
|
||||
|
||||
func WithOpaqueErrors(opaque bool) AppOption {
|
||||
return func(o *ApplicationConfig) {
|
||||
o.OpaqueErrors = opaque
|
||||
|
||||
@@ -5,10 +5,10 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
"github.com/go-skynet/LocalAI/pkg/downloader"
|
||||
"github.com/go-skynet/LocalAI/pkg/functions"
|
||||
"github.com/go-skynet/LocalAI/pkg/utils"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
"github.com/mudler/LocalAI/pkg/downloader"
|
||||
"github.com/mudler/LocalAI/pkg/functions"
|
||||
"github.com/mudler/LocalAI/pkg/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -390,10 +390,6 @@ func (c *BackendConfig) Validate() bool {
|
||||
}
|
||||
}
|
||||
|
||||
if c.Name == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
if c.Backend != "" {
|
||||
// a regex that checks that is a string name with no special characters, except '-' and '_'
|
||||
re := regexp.MustCompile(`^[a-zA-Z0-9-_]+$`)
|
||||
|
||||
@@ -11,9 +11,9 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/charmbracelet/glamour"
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
"github.com/go-skynet/LocalAI/pkg/downloader"
|
||||
"github.com/go-skynet/LocalAI/pkg/utils"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
"github.com/mudler/LocalAI/pkg/downloader"
|
||||
"github.com/mudler/LocalAI/pkg/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
@@ -16,7 +16,8 @@ var _ = Describe("Test cases for config related functions", func() {
|
||||
Expect(err).To(BeNil())
|
||||
defer os.Remove(tmp.Name())
|
||||
_, err = tmp.WriteString(
|
||||
`backend: "foo-bar"
|
||||
`backend: "../foo-bar"
|
||||
name: "foo"
|
||||
parameters:
|
||||
model: "foo-bar"`)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
6
core/config/gallery.go
Normal file
6
core/config/gallery.go
Normal file
@@ -0,0 +1,6 @@
|
||||
package config
|
||||
|
||||
type Gallery struct {
|
||||
URL string `json:"url" yaml:"url"`
|
||||
Name string `json:"name" yaml:"name"`
|
||||
}
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/downloader"
|
||||
"github.com/go-skynet/LocalAI/pkg/utils"
|
||||
"github.com/mudler/LocalAI/pkg/downloader"
|
||||
"github.com/mudler/LocalAI/pkg/utils"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
|
||||
@@ -7,19 +7,15 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/downloader"
|
||||
"github.com/imdario/mergo"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/pkg/downloader"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type Gallery struct {
|
||||
URL string `json:"url" yaml:"url"`
|
||||
Name string `json:"name" yaml:"name"`
|
||||
}
|
||||
|
||||
// Installs a model from the gallery
|
||||
func InstallModelFromGallery(galleries []Gallery, name string, basePath string, req GalleryModel, downloadStatus func(string, string, string, float64)) error {
|
||||
func InstallModelFromGallery(galleries []config.Gallery, name string, basePath string, req GalleryModel, downloadStatus func(string, string, string, float64), enforceScan bool) error {
|
||||
|
||||
applyModel := func(model *GalleryModel) error {
|
||||
name = strings.ReplaceAll(name, string(os.PathSeparator), "__")
|
||||
@@ -67,7 +63,7 @@ func InstallModelFromGallery(galleries []Gallery, name string, basePath string,
|
||||
return err
|
||||
}
|
||||
|
||||
if err := InstallModel(basePath, installName, &config, model.Overrides, downloadStatus); err != nil {
|
||||
if err := InstallModel(basePath, installName, &config, model.Overrides, downloadStatus, enforceScan); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -117,7 +113,7 @@ func FindModel(models []*GalleryModel, name string, basePath string) *GalleryMod
|
||||
// List available models
|
||||
// Models galleries are a list of yaml files that are hosted on a remote server (for example github).
|
||||
// Each yaml file contains a list of models that can be downloaded and optionally overrides to define a new model setting.
|
||||
func AvailableGalleryModels(galleries []Gallery, basePath string) ([]*GalleryModel, error) {
|
||||
func AvailableGalleryModels(galleries []config.Gallery, basePath string) ([]*GalleryModel, error) {
|
||||
var models []*GalleryModel
|
||||
|
||||
// Get models from galleries
|
||||
@@ -134,7 +130,7 @@ func AvailableGalleryModels(galleries []Gallery, basePath string) ([]*GalleryMod
|
||||
|
||||
func findGalleryURLFromReferenceURL(url string, basePath string) (string, error) {
|
||||
var refFile string
|
||||
err := downloader.GetURI(url, basePath, func(url string, d []byte) error {
|
||||
err := downloader.DownloadAndUnmarshal(url, basePath, func(url string, d []byte) error {
|
||||
refFile = string(d)
|
||||
if len(refFile) == 0 {
|
||||
return fmt.Errorf("invalid reference file at url %s: %s", url, d)
|
||||
@@ -146,7 +142,7 @@ func findGalleryURLFromReferenceURL(url string, basePath string) (string, error)
|
||||
return refFile, err
|
||||
}
|
||||
|
||||
func getGalleryModels(gallery Gallery, basePath string) ([]*GalleryModel, error) {
|
||||
func getGalleryModels(gallery config.Gallery, basePath string) ([]*GalleryModel, error) {
|
||||
var models []*GalleryModel = []*GalleryModel{}
|
||||
|
||||
if strings.HasSuffix(gallery.URL, ".ref") {
|
||||
@@ -157,7 +153,7 @@ func getGalleryModels(gallery Gallery, basePath string) ([]*GalleryModel, error)
|
||||
}
|
||||
}
|
||||
|
||||
err := downloader.GetURI(gallery.URL, basePath, func(url string, d []byte) error {
|
||||
err := downloader.DownloadAndUnmarshal(gallery.URL, basePath, func(url string, d []byte) error {
|
||||
return yaml.Unmarshal(d, &models)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -232,3 +228,29 @@ func DeleteModelFromSystem(basePath string, name string, additionalFiles []strin
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// This is ***NEVER*** going to be perfect or finished.
|
||||
// This is a BEST EFFORT function to surface known-vulnerable models to users.
|
||||
func SafetyScanGalleryModels(galleries []config.Gallery, basePath string) error {
|
||||
galleryModels, err := AvailableGalleryModels(galleries, basePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, gM := range galleryModels {
|
||||
if gM.Installed {
|
||||
err = errors.Join(err, SafetyScanGalleryModel(gM))
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func SafetyScanGalleryModel(galleryModel *GalleryModel) error {
|
||||
for _, file := range galleryModel.AdditionalFiles {
|
||||
scanResults, err := downloader.HuggingFaceScan(file.URI)
|
||||
if err != nil && !errors.Is(err, downloader.ErrNonHuggingFaceFile) {
|
||||
log.Error().Str("model", galleryModel.Name).Strs("clamAV", scanResults.ClamAVInfectedFiles).Strs("pickles", scanResults.DangerousPickles).Msg("Contains unsafe file(s)!")
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,13 +1,16 @@
|
||||
package gallery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/downloader"
|
||||
"github.com/go-skynet/LocalAI/pkg/utils"
|
||||
"github.com/imdario/mergo"
|
||||
lconfig "github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/pkg/downloader"
|
||||
"github.com/mudler/LocalAI/pkg/utils"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
@@ -65,7 +68,7 @@ type PromptTemplate struct {
|
||||
|
||||
func GetGalleryConfigFromURL(url string, basePath string) (Config, error) {
|
||||
var config Config
|
||||
err := downloader.GetURI(url, basePath, func(url string, d []byte) error {
|
||||
err := downloader.DownloadAndUnmarshal(url, basePath, func(url string, d []byte) error {
|
||||
return yaml.Unmarshal(d, &config)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -92,7 +95,7 @@ func ReadConfigFile(filePath string) (*Config, error) {
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
func InstallModel(basePath, nameOverride string, config *Config, configOverrides map[string]interface{}, downloadStatus func(string, string, string, float64)) error {
|
||||
func InstallModel(basePath, nameOverride string, config *Config, configOverrides map[string]interface{}, downloadStatus func(string, string, string, float64), enforceScan bool) error {
|
||||
// Create base path if it doesn't exist
|
||||
err := os.MkdirAll(basePath, 0750)
|
||||
if err != nil {
|
||||
@@ -110,9 +113,18 @@ func InstallModel(basePath, nameOverride string, config *Config, configOverrides
|
||||
if err := utils.VerifyPath(file.Filename, basePath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create file path
|
||||
filePath := filepath.Join(basePath, file.Filename)
|
||||
|
||||
if enforceScan {
|
||||
scanResults, err := downloader.HuggingFaceScan(file.URI)
|
||||
if err != nil && !errors.Is(err, downloader.ErrNonHuggingFaceFile) {
|
||||
log.Error().Str("model", config.Name).Strs("clamAV", scanResults.ClamAVInfectedFiles).Strs("pickles", scanResults.DangerousPickles).Msg("Contains unsafe file(s)!")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := downloader.DownloadFile(file.URI, filePath, file.SHA256, i, len(config.Files), downloadStatus); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -172,6 +184,15 @@ func InstallModel(basePath, nameOverride string, config *Config, configOverrides
|
||||
return fmt.Errorf("failed to marshal updated config YAML: %v", err)
|
||||
}
|
||||
|
||||
backendConfig := lconfig.BackendConfig{}
|
||||
err = yaml.Unmarshal(updatedConfigYAML, &backendConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal updated config YAML: %v", err)
|
||||
}
|
||||
if !backendConfig.Validate() {
|
||||
return fmt.Errorf("failed to validate updated config YAML")
|
||||
}
|
||||
|
||||
err = os.WriteFile(configFilePath, updatedConfigYAML, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write updated config file: %v", err)
|
||||
@@ -5,7 +5,8 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
. "github.com/go-skynet/LocalAI/pkg/gallery"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
. "github.com/mudler/LocalAI/core/gallery"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"gopkg.in/yaml.v3"
|
||||
@@ -20,7 +21,7 @@ var _ = Describe("Model test", func() {
|
||||
defer os.RemoveAll(tempdir)
|
||||
c, err := ReadConfigFile(filepath.Join(os.Getenv("FIXTURES"), "gallery_simple.yaml"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = InstallModel(tempdir, "", c, map[string]interface{}{}, func(string, string, string, float64) {})
|
||||
err = InstallModel(tempdir, "", c, map[string]interface{}{}, func(string, string, string, float64) {}, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
for _, f := range []string{"cerebras", "cerebras-completion.tmpl", "cerebras-chat.tmpl", "cerebras.yaml"} {
|
||||
@@ -54,7 +55,7 @@ var _ = Describe("Model test", func() {
|
||||
err = os.WriteFile(galleryFilePath, out, 0600)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(filepath.IsAbs(galleryFilePath)).To(BeTrue(), galleryFilePath)
|
||||
galleries := []Gallery{
|
||||
galleries := []config.Gallery{
|
||||
{
|
||||
Name: "test",
|
||||
URL: "file://" + galleryFilePath,
|
||||
@@ -68,7 +69,7 @@ var _ = Describe("Model test", func() {
|
||||
Expect(models[0].URL).To(Equal("https://raw.githubusercontent.com/go-skynet/model-gallery/main/bert-embeddings.yaml"))
|
||||
Expect(models[0].Installed).To(BeFalse())
|
||||
|
||||
err = InstallModelFromGallery(galleries, "test@bert", tempdir, GalleryModel{}, func(s1, s2, s3 string, f float64) {})
|
||||
err = InstallModelFromGallery(galleries, "test@bert", tempdir, GalleryModel{}, func(s1, s2, s3 string, f float64) {}, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
dat, err := os.ReadFile(filepath.Join(tempdir, "bert.yaml"))
|
||||
@@ -105,7 +106,7 @@ var _ = Describe("Model test", func() {
|
||||
c, err := ReadConfigFile(filepath.Join(os.Getenv("FIXTURES"), "gallery_simple.yaml"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = InstallModel(tempdir, "foo", c, map[string]interface{}{}, func(string, string, string, float64) {})
|
||||
err = InstallModel(tempdir, "foo", c, map[string]interface{}{}, func(string, string, string, float64) {}, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
for _, f := range []string{"cerebras", "cerebras-completion.tmpl", "cerebras-chat.tmpl", "foo.yaml"} {
|
||||
@@ -121,7 +122,7 @@ var _ = Describe("Model test", func() {
|
||||
c, err := ReadConfigFile(filepath.Join(os.Getenv("FIXTURES"), "gallery_simple.yaml"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = InstallModel(tempdir, "foo", c, map[string]interface{}{"backend": "foo"}, func(string, string, string, float64) {})
|
||||
err = InstallModel(tempdir, "foo", c, map[string]interface{}{"backend": "foo"}, func(string, string, string, float64) {}, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
for _, f := range []string{"cerebras", "cerebras-completion.tmpl", "cerebras-chat.tmpl", "foo.yaml"} {
|
||||
@@ -147,7 +148,7 @@ var _ = Describe("Model test", func() {
|
||||
c, err := ReadConfigFile(filepath.Join(os.Getenv("FIXTURES"), "gallery_simple.yaml"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = InstallModel(tempdir, "../../../foo", c, map[string]interface{}{}, func(string, string, string, float64) {})
|
||||
err = InstallModel(tempdir, "../../../foo", c, map[string]interface{}{}, func(string, string, string, float64) {}, true)
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
})
|
||||
@@ -1,5 +1,7 @@
|
||||
package gallery
|
||||
|
||||
import "github.com/mudler/LocalAI/core/config"
|
||||
|
||||
type GalleryOp struct {
|
||||
Id string
|
||||
GalleryModelName string
|
||||
@@ -7,7 +9,7 @@ type GalleryOp struct {
|
||||
Delete bool
|
||||
|
||||
Req GalleryModel
|
||||
Galleries []Gallery
|
||||
Galleries []config.Gallery
|
||||
}
|
||||
|
||||
type GalleryOpStatus struct {
|
||||
@@ -3,6 +3,8 @@ package gallery
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
)
|
||||
|
||||
// GalleryModel is the struct used to represent a model in the gallery returned by the endpoint.
|
||||
@@ -23,7 +25,7 @@ type GalleryModel struct {
|
||||
// AdditionalFiles are used to add additional files to the model
|
||||
AdditionalFiles []File `json:"files,omitempty" yaml:"files,omitempty"`
|
||||
// Gallery is a reference to the gallery which contains the model
|
||||
Gallery Gallery `json:"gallery,omitempty" yaml:"gallery,omitempty"`
|
||||
Gallery config.Gallery `json:"gallery,omitempty" yaml:"gallery,omitempty"`
|
||||
// Installed is used to indicate if the model is installed or not
|
||||
Installed bool `json:"installed,omitempty" yaml:"installed,omitempty"`
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
package gallery_test
|
||||
|
||||
import (
|
||||
. "github.com/go-skynet/LocalAI/pkg/gallery"
|
||||
. "github.com/mudler/LocalAI/core/gallery"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
@@ -6,16 +6,16 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/utils"
|
||||
"github.com/mudler/LocalAI/pkg/utils"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/http/endpoints/localai"
|
||||
"github.com/go-skynet/LocalAI/core/http/endpoints/openai"
|
||||
"github.com/go-skynet/LocalAI/core/http/routes"
|
||||
"github.com/mudler/LocalAI/core/http/endpoints/localai"
|
||||
"github.com/mudler/LocalAI/core/http/endpoints/openai"
|
||||
"github.com/mudler/LocalAI/core/http/routes"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
"github.com/go-skynet/LocalAI/core/services"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
"github.com/mudler/LocalAI/core/services"
|
||||
"github.com/mudler/LocalAI/pkg/model"
|
||||
|
||||
"github.com/gofiber/contrib/fiberzerolog"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
|
||||
@@ -13,15 +13,15 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
. "github.com/go-skynet/LocalAI/core/http"
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
"github.com/go-skynet/LocalAI/core/startup"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
. "github.com/mudler/LocalAI/core/http"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
"github.com/mudler/LocalAI/core/startup"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/downloader"
|
||||
"github.com/go-skynet/LocalAI/pkg/gallery"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/mudler/LocalAI/core/gallery"
|
||||
"github.com/mudler/LocalAI/pkg/downloader"
|
||||
"github.com/mudler/LocalAI/pkg/model"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"gopkg.in/yaml.v3"
|
||||
@@ -74,7 +74,7 @@ func getModelStatus(url string) (response map[string]interface{}) {
|
||||
|
||||
func getModels(url string) (response []gallery.GalleryModel) {
|
||||
// TODO: No tests currently seem to exercise file:// urls. Fix?
|
||||
downloader.GetURI(url, "", func(url string, i []byte) error {
|
||||
downloader.DownloadAndUnmarshal(url, "", func(url string, i []byte) error {
|
||||
// Unmarshal YAML data into a struct
|
||||
return json.Unmarshal(i, &response)
|
||||
})
|
||||
@@ -247,7 +247,7 @@ var _ = Describe("API test", func() {
|
||||
err = os.WriteFile(filepath.Join(modelDir, "gallery_simple.yaml"), out, 0600)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
galleries := []gallery.Gallery{
|
||||
galleries := []config.Gallery{
|
||||
{
|
||||
Name: "test",
|
||||
URL: "file://" + filepath.Join(modelDir, "gallery_simple.yaml"),
|
||||
@@ -603,7 +603,7 @@ var _ = Describe("API test", func() {
|
||||
|
||||
c, cancel = context.WithCancel(context.Background())
|
||||
|
||||
galleries := []gallery.Gallery{
|
||||
galleries := []config.Gallery{
|
||||
{
|
||||
Name: "model-gallery",
|
||||
URL: "https://raw.githubusercontent.com/go-skynet/model-gallery/main/index.yaml",
|
||||
|
||||
@@ -4,8 +4,10 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/services"
|
||||
"github.com/mudler/LocalAI/pkg/model"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
@@ -13,7 +15,7 @@ import (
|
||||
// If no model is specified, it will take the first available
|
||||
// Takes a model string as input which should be the one received from the user request.
|
||||
// It returns the model name resolved from the context and an error if any.
|
||||
func ModelFromContext(ctx *fiber.Ctx, loader *model.ModelLoader, modelInput string, firstModel bool) (string, error) {
|
||||
func ModelFromContext(ctx *fiber.Ctx, cl *config.BackendConfigLoader, loader *model.ModelLoader, modelInput string, firstModel bool) (string, error) {
|
||||
if ctx.Params("model") != "" {
|
||||
modelInput = ctx.Params("model")
|
||||
}
|
||||
@@ -24,7 +26,7 @@ func ModelFromContext(ctx *fiber.Ctx, loader *model.ModelLoader, modelInput stri
|
||||
|
||||
// If no model was specified, take the first available
|
||||
if modelInput == "" && !bearerExists && firstModel {
|
||||
models, _ := loader.ListModels()
|
||||
models, _ := services.ListModels(cl, loader, "", true)
|
||||
if len(models) > 0 {
|
||||
modelInput = models[0]
|
||||
log.Debug().Msgf("No model specified, using: %s", modelInput)
|
||||
|
||||
@@ -6,15 +6,24 @@ import (
|
||||
|
||||
"github.com/chasefleming/elem-go"
|
||||
"github.com/chasefleming/elem-go/attrs"
|
||||
"github.com/go-skynet/LocalAI/core/services"
|
||||
"github.com/go-skynet/LocalAI/pkg/gallery"
|
||||
"github.com/go-skynet/LocalAI/pkg/xsync"
|
||||
"github.com/mudler/LocalAI/core/gallery"
|
||||
"github.com/mudler/LocalAI/core/p2p"
|
||||
"github.com/mudler/LocalAI/core/services"
|
||||
"github.com/mudler/LocalAI/pkg/xsync"
|
||||
)
|
||||
|
||||
const (
|
||||
noImage = "https://upload.wikimedia.org/wikipedia/commons/6/65/No-Image-Placeholder.svg"
|
||||
)
|
||||
|
||||
func renderElements(n []elem.Node) string {
|
||||
render := ""
|
||||
for _, r := range n {
|
||||
render += r.Render()
|
||||
}
|
||||
return render
|
||||
}
|
||||
|
||||
func DoneProgress(galleryID, text string, showDelete bool) string {
|
||||
var modelName = galleryID
|
||||
// Split by @ and grab the name
|
||||
@@ -72,6 +81,135 @@ func ProgressBar(progress string) string {
|
||||
).Render()
|
||||
}
|
||||
|
||||
func P2PNodeStats(nodes []p2p.NodeData) string {
|
||||
/*
|
||||
<div class="bg-gray-800 p-6 rounded-lg shadow-lg text-left">
|
||||
<p class="text-xl font-semibold text-gray-200">Total Workers Detected: {{ len .Nodes }}</p>
|
||||
{{ $online := 0 }}
|
||||
{{ range .Nodes }}
|
||||
{{ if .IsOnline }}
|
||||
{{ $online = add $online 1 }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
<p class="text-xl font-semibold text-gray-200">Total Online Workers: {{$online}}</p>
|
||||
</div>
|
||||
*/
|
||||
|
||||
online := 0
|
||||
for _, n := range nodes {
|
||||
if n.IsOnline() {
|
||||
online++
|
||||
}
|
||||
}
|
||||
|
||||
class := "text-green-500"
|
||||
if online == 0 {
|
||||
class = "text-red-500"
|
||||
}
|
||||
/*
|
||||
<i class="fas fa-circle animate-pulse text-green-500 ml-2 mr-1"></i>
|
||||
*/
|
||||
circle := elem.I(attrs.Props{
|
||||
"class": "fas fa-circle animate-pulse " + class + " ml-2 mr-1",
|
||||
})
|
||||
nodesElements := []elem.Node{
|
||||
elem.Span(
|
||||
attrs.Props{
|
||||
"class": class,
|
||||
},
|
||||
circle,
|
||||
elem.Text(fmt.Sprintf("%d", online)),
|
||||
),
|
||||
elem.Span(
|
||||
attrs.Props{
|
||||
"class": "text-gray-200",
|
||||
},
|
||||
elem.Text(fmt.Sprintf("/%d", len(nodes))),
|
||||
),
|
||||
}
|
||||
|
||||
return renderElements(nodesElements)
|
||||
}
|
||||
|
||||
func P2PNodeBoxes(nodes []p2p.NodeData) string {
|
||||
/*
|
||||
<div class="bg-gray-800 p-4 rounded-lg shadow-lg text-left">
|
||||
<div class="flex items-center mb-2">
|
||||
<i class="fas fa-desktop text-gray-400 mr-2"></i>
|
||||
<span class="text-gray-200 font-semibold">{{.ID}}</span>
|
||||
</div>
|
||||
<p class="text-sm text-gray-400 mt-2 flex items-center">
|
||||
Status:
|
||||
<i class="fas fa-circle {{ if .IsOnline }}text-green-500{{ else }}text-red-500{{ end }} ml-2 mr-1"></i>
|
||||
<span class="{{ if .IsOnline }}text-green-400{{ else }}text-red-400{{ end }}">
|
||||
{{ if .IsOnline }}Online{{ else }}Offline{{ end }}
|
||||
</span>
|
||||
</p>
|
||||
</div>
|
||||
*/
|
||||
|
||||
nodesElements := []elem.Node{}
|
||||
|
||||
for _, n := range nodes {
|
||||
|
||||
nodesElements = append(nodesElements,
|
||||
elem.Div(
|
||||
attrs.Props{
|
||||
"class": "bg-gray-700 p-6 rounded-lg shadow-lg text-left",
|
||||
},
|
||||
elem.P(
|
||||
attrs.Props{
|
||||
"class": "text-sm text-gray-400 mt-2 flex",
|
||||
},
|
||||
elem.I(
|
||||
attrs.Props{
|
||||
"class": "fas fa-desktop text-gray-400 mr-2",
|
||||
},
|
||||
),
|
||||
elem.Text("Name: "),
|
||||
elem.Span(
|
||||
attrs.Props{
|
||||
"class": "text-gray-200 font-semibold ml-2 mr-1",
|
||||
},
|
||||
elem.Text(n.ID),
|
||||
),
|
||||
elem.Text("Status: "),
|
||||
elem.If(
|
||||
n.IsOnline(),
|
||||
elem.I(
|
||||
attrs.Props{
|
||||
"class": "fas fa-circle animate-pulse text-green-500 ml-2 mr-1",
|
||||
},
|
||||
),
|
||||
elem.I(
|
||||
attrs.Props{
|
||||
"class": "fas fa-circle animate-pulse text-red-500 ml-2 mr-1",
|
||||
},
|
||||
),
|
||||
),
|
||||
elem.If(
|
||||
n.IsOnline(),
|
||||
elem.Span(
|
||||
attrs.Props{
|
||||
"class": "text-green-400",
|
||||
},
|
||||
|
||||
elem.Text("Online"),
|
||||
),
|
||||
elem.Span(
|
||||
attrs.Props{
|
||||
"class": "text-red-400",
|
||||
},
|
||||
elem.Text("Offline"),
|
||||
),
|
||||
),
|
||||
),
|
||||
))
|
||||
}
|
||||
|
||||
return renderElements(nodesElements)
|
||||
}
|
||||
|
||||
func StartProgressBar(uid, progress, text string) string {
|
||||
if progress == "" {
|
||||
progress = "0"
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
package elevenlabs
|
||||
|
||||
import (
|
||||
"github.com/go-skynet/LocalAI/core/backend"
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
fiberContext "github.com/go-skynet/LocalAI/core/http/ctx"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/mudler/LocalAI/core/backend"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
fiberContext "github.com/mudler/LocalAI/core/http/ctx"
|
||||
"github.com/mudler/LocalAI/pkg/model"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
@@ -28,7 +28,7 @@ func TTSEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfi
|
||||
return err
|
||||
}
|
||||
|
||||
modelFile, err := fiberContext.ModelFromContext(c, ml, input.ModelID, false)
|
||||
modelFile, err := fiberContext.ModelFromContext(c, cl, ml, input.ModelID, false)
|
||||
if err != nil {
|
||||
modelFile = input.ModelID
|
||||
log.Warn().Msgf("Model not found in context: %s", input.ModelID)
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
package jina
|
||||
|
||||
import (
|
||||
"github.com/go-skynet/LocalAI/core/backend"
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/backend"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
|
||||
fiberContext "github.com/go-skynet/LocalAI/core/http/ctx"
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
"github.com/go-skynet/LocalAI/pkg/grpc/proto"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
fiberContext "github.com/mudler/LocalAI/core/http/ctx"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
"github.com/mudler/LocalAI/pkg/grpc/proto"
|
||||
"github.com/mudler/LocalAI/pkg/model"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
@@ -28,7 +28,7 @@ func JINARerankEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, a
|
||||
return err
|
||||
}
|
||||
|
||||
modelFile, err := fiberContext.ModelFromContext(c, ml, input.Model, false)
|
||||
modelFile, err := fiberContext.ModelFromContext(c, cl, ml, input.Model, false)
|
||||
if err != nil {
|
||||
modelFile = input.Model
|
||||
log.Warn().Msgf("Model not found in context: %s", input.Model)
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
package localai
|
||||
|
||||
import (
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
"github.com/go-skynet/LocalAI/core/services"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
"github.com/mudler/LocalAI/core/services"
|
||||
)
|
||||
|
||||
func BackendMonitorEndpoint(bm *services.BackendMonitorService) func(c *fiber.Ctx) error {
|
||||
|
||||
@@ -5,15 +5,16 @@ import (
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/services"
|
||||
"github.com/go-skynet/LocalAI/pkg/gallery"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/google/uuid"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/gallery"
|
||||
"github.com/mudler/LocalAI/core/services"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
type ModelGalleryEndpointService struct {
|
||||
galleries []gallery.Gallery
|
||||
galleries []config.Gallery
|
||||
modelPath string
|
||||
galleryApplier *services.GalleryService
|
||||
}
|
||||
@@ -24,7 +25,7 @@ type GalleryModel struct {
|
||||
gallery.GalleryModel
|
||||
}
|
||||
|
||||
func CreateModelGalleryEndpointService(galleries []gallery.Gallery, modelPath string, galleryApplier *services.GalleryService) ModelGalleryEndpointService {
|
||||
func CreateModelGalleryEndpointService(galleries []config.Gallery, modelPath string, galleryApplier *services.GalleryService) ModelGalleryEndpointService {
|
||||
return ModelGalleryEndpointService{
|
||||
galleries: galleries,
|
||||
modelPath: modelPath,
|
||||
@@ -129,12 +130,12 @@ func (mgs *ModelGalleryEndpointService) ListModelGalleriesEndpoint() func(c *fib
|
||||
|
||||
func (mgs *ModelGalleryEndpointService) AddModelGalleryEndpoint() func(c *fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
input := new(gallery.Gallery)
|
||||
input := new(config.Gallery)
|
||||
// Get input data from the request body
|
||||
if err := c.BodyParser(input); err != nil {
|
||||
return err
|
||||
}
|
||||
if slices.ContainsFunc(mgs.galleries, func(gallery gallery.Gallery) bool {
|
||||
if slices.ContainsFunc(mgs.galleries, func(gallery config.Gallery) bool {
|
||||
return gallery.Name == input.Name
|
||||
}) {
|
||||
return fmt.Errorf("%s already exists", input.Name)
|
||||
@@ -151,17 +152,17 @@ func (mgs *ModelGalleryEndpointService) AddModelGalleryEndpoint() func(c *fiber.
|
||||
|
||||
func (mgs *ModelGalleryEndpointService) RemoveModelGalleryEndpoint() func(c *fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
input := new(gallery.Gallery)
|
||||
input := new(config.Gallery)
|
||||
// Get input data from the request body
|
||||
if err := c.BodyParser(input); err != nil {
|
||||
return err
|
||||
}
|
||||
if !slices.ContainsFunc(mgs.galleries, func(gallery gallery.Gallery) bool {
|
||||
if !slices.ContainsFunc(mgs.galleries, func(gallery config.Gallery) bool {
|
||||
return gallery.Name == input.Name
|
||||
}) {
|
||||
return fmt.Errorf("%s is not currently registered", input.Name)
|
||||
}
|
||||
mgs.galleries = slices.DeleteFunc(mgs.galleries, func(gallery gallery.Gallery) bool {
|
||||
mgs.galleries = slices.DeleteFunc(mgs.galleries, func(gallery config.Gallery) bool {
|
||||
return gallery.Name == input.Name
|
||||
})
|
||||
return c.Send(nil)
|
||||
|
||||
@@ -3,9 +3,9 @@ package localai
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/services"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/gofiber/fiber/v2/middleware/adaptor"
|
||||
"github.com/mudler/LocalAI/core/services"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
package localai
|
||||
|
||||
import (
|
||||
"github.com/go-skynet/LocalAI/core/backend"
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/go-skynet/LocalAI/pkg/store"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/mudler/LocalAI/core/backend"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
"github.com/mudler/LocalAI/pkg/model"
|
||||
"github.com/mudler/LocalAI/pkg/store"
|
||||
)
|
||||
|
||||
func StoresSetEndpoint(sl *model.ModelLoader, appConfig *config.ApplicationConfig) func(c *fiber.Ctx) error {
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
package localai
|
||||
|
||||
import (
|
||||
"github.com/go-skynet/LocalAI/core/backend"
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
fiberContext "github.com/go-skynet/LocalAI/core/http/ctx"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/mudler/LocalAI/core/backend"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
fiberContext "github.com/mudler/LocalAI/core/http/ctx"
|
||||
"github.com/mudler/LocalAI/pkg/model"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
@@ -29,7 +29,7 @@ func TTSEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfi
|
||||
return err
|
||||
}
|
||||
|
||||
modelFile, err := fiberContext.ModelFromContext(c, ml, input.Model, false)
|
||||
modelFile, err := fiberContext.ModelFromContext(c, cl, ml, input.Model, false)
|
||||
if err != nil {
|
||||
modelFile = input.Model
|
||||
log.Warn().Msgf("Model not found in context: %s", input.Model)
|
||||
|
||||
@@ -1,17 +1,19 @@
|
||||
package localai
|
||||
|
||||
import (
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/internal"
|
||||
"github.com/go-skynet/LocalAI/pkg/gallery"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/gallery"
|
||||
"github.com/mudler/LocalAI/core/p2p"
|
||||
"github.com/mudler/LocalAI/core/services"
|
||||
"github.com/mudler/LocalAI/internal"
|
||||
"github.com/mudler/LocalAI/pkg/model"
|
||||
)
|
||||
|
||||
func WelcomeEndpoint(appConfig *config.ApplicationConfig,
|
||||
cl *config.BackendConfigLoader, ml *model.ModelLoader, modelStatus func() (map[string]string, map[string]string)) func(*fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
models, _ := ml.ListModels()
|
||||
models, _ := services.ListModels(cl, ml, "", true)
|
||||
backendConfigs := cl.GetAllBackendConfigs()
|
||||
|
||||
galleryConfigs := map[string]*gallery.Config{}
|
||||
@@ -33,6 +35,7 @@ func WelcomeEndpoint(appConfig *config.ApplicationConfig,
|
||||
"Models": models,
|
||||
"ModelsConfig": backendConfigs,
|
||||
"GalleryConfig": galleryConfigs,
|
||||
"IsP2PEnabled": p2p.IsP2PEnabled(),
|
||||
"ApplicationConfig": appConfig,
|
||||
"ProcessingModels": processingModels,
|
||||
"TaskTypes": taskTypes,
|
||||
|
||||
@@ -9,10 +9,11 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
model "github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/go-skynet/LocalAI/pkg/utils"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/services"
|
||||
model "github.com/mudler/LocalAI/pkg/model"
|
||||
"github.com/mudler/LocalAI/pkg/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
@@ -79,7 +80,7 @@ func CreateAssistantEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoad
|
||||
return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{"error": "Cannot parse JSON"})
|
||||
}
|
||||
|
||||
if !modelExists(ml, request.Model) {
|
||||
if !modelExists(cl, ml, request.Model) {
|
||||
log.Warn().Msgf("Model: %s was not found in list of models.", request.Model)
|
||||
return c.Status(fiber.StatusBadRequest).SendString("Model " + request.Model + " not found")
|
||||
}
|
||||
@@ -213,9 +214,9 @@ func filterAssistantsAfterID(assistants []Assistant, id string) []Assistant {
|
||||
return filteredAssistants
|
||||
}
|
||||
|
||||
func modelExists(ml *model.ModelLoader, modelName string) (found bool) {
|
||||
func modelExists(cl *config.BackendConfigLoader, ml *model.ModelLoader, modelName string) (found bool) {
|
||||
found = false
|
||||
models, err := ml.ListModels()
|
||||
models, err := services.ListModels(cl, ml, "", true)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -339,7 +340,7 @@ func CreateAssistantFileEndpoint(cl *config.BackendConfigLoader, ml *model.Model
|
||||
}
|
||||
}
|
||||
|
||||
return c.Status(fiber.StatusNotFound).SendString(fmt.Sprintf("Unable to find "))
|
||||
return c.Status(fiber.StatusNotFound).SendString(fmt.Sprintf("Unable to find %q", assistantID))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
@@ -13,9 +12,9 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/pkg/model"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -183,7 +182,7 @@ func TestAssistantEndpoints(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expectedStatus, response.StatusCode)
|
||||
if tt.expectedStatus != fiber.StatusOK {
|
||||
all, _ := ioutil.ReadAll(response.Body)
|
||||
all, _ := io.ReadAll(response.Body)
|
||||
assert.Equal(t, tt.expectedStringResult, string(all))
|
||||
} else {
|
||||
var result []Assistant
|
||||
@@ -279,6 +278,7 @@ func TestAssistantEndpoints(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
var getAssistant Assistant
|
||||
err = json.NewDecoder(modifyResponse.Body).Decode(&getAssistant)
|
||||
assert.NoError(t, err)
|
||||
|
||||
t.Cleanup(cleanupAllAssistants(t, app, []string{getAssistant.ID}))
|
||||
|
||||
@@ -391,7 +391,10 @@ func createAssistantFile(app *fiber.App, afr AssistantFileRequest, assistantId s
|
||||
}
|
||||
|
||||
var assistantFile AssistantFile
|
||||
all, err := ioutil.ReadAll(resp.Body)
|
||||
all, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return AssistantFile{}, resp, err
|
||||
}
|
||||
err = json.NewDecoder(strings.NewReader(string(all))).Decode(&assistantFile)
|
||||
if err != nil {
|
||||
return AssistantFile{}, resp, err
|
||||
@@ -422,8 +425,7 @@ func createAssistant(app *fiber.App, ar AssistantRequest) (Assistant, *http.Resp
|
||||
|
||||
var resultAssistant Assistant
|
||||
err = json.NewDecoder(strings.NewReader(string(bodyString))).Decode(&resultAssistant)
|
||||
|
||||
return resultAssistant, resp, nil
|
||||
return resultAssistant, resp, err
|
||||
}
|
||||
|
||||
func cleanupAllAssistants(t *testing.T, app *fiber.App, ids []string) func() {
|
||||
|
||||
@@ -8,13 +8,13 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/backend"
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
"github.com/go-skynet/LocalAI/pkg/functions"
|
||||
model "github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/google/uuid"
|
||||
"github.com/mudler/LocalAI/core/backend"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
"github.com/mudler/LocalAI/pkg/functions"
|
||||
model "github.com/mudler/LocalAI/pkg/model"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/valyala/fasthttp"
|
||||
)
|
||||
@@ -159,7 +159,7 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup
|
||||
}
|
||||
|
||||
return func(c *fiber.Ctx) error {
|
||||
modelFile, input, err := readRequest(c, ml, startupOptions, true)
|
||||
modelFile, input, err := readRequest(c, cl, ml, startupOptions, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
|
||||
@@ -8,14 +8,14 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/backend"
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/backend"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
"github.com/go-skynet/LocalAI/pkg/functions"
|
||||
model "github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/google/uuid"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
"github.com/mudler/LocalAI/pkg/functions"
|
||||
model "github.com/mudler/LocalAI/pkg/model"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/valyala/fasthttp"
|
||||
)
|
||||
@@ -57,7 +57,7 @@ func CompletionEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, a
|
||||
}
|
||||
|
||||
return func(c *fiber.Ctx) error {
|
||||
modelFile, input, err := readRequest(c, ml, appConfig, true)
|
||||
modelFile, input, err := readRequest(c, cl, ml, appConfig, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
|
||||
@@ -5,20 +5,20 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/backend"
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/backend"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
model "github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/google/uuid"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
model "github.com/mudler/LocalAI/pkg/model"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func EditEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) func(c *fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
modelFile, input, err := readRequest(c, ml, appConfig, true)
|
||||
modelFile, input, err := readRequest(c, cl, ml, appConfig, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
|
||||
@@ -5,12 +5,12 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/backend"
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/mudler/LocalAI/core/backend"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/pkg/model"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
"github.com/google/uuid"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/rs/zerolog/log"
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
// @Router /v1/embeddings [post]
|
||||
func EmbeddingsEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) func(c *fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
model, input, err := readRequest(c, ml, appConfig, true)
|
||||
model, input, err := readRequest(c, cl, ml, appConfig, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
|
||||
@@ -8,10 +8,10 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/utils"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/mudler/LocalAI/pkg/utils"
|
||||
)
|
||||
|
||||
var UploadedFiles []File
|
||||
@@ -123,7 +123,10 @@ func getFileFromRequest(c *fiber.Ctx) (*File, error) {
|
||||
return nil, fmt.Errorf("unable to find file id %s", id)
|
||||
}
|
||||
|
||||
// GetFilesEndpoint https://platform.openai.com/docs/api-reference/files/retrieve
|
||||
// GetFilesEndpoint is the OpenAI API endpoint to get files https://platform.openai.com/docs/api-reference/files/retrieve
|
||||
// @Summary Returns information about a specific file.
|
||||
// @Success 200 {object} File "Response"
|
||||
// @Router /v1/files/{file_id} [get]
|
||||
func GetFilesEndpoint(cm *config.BackendConfigLoader, appConfig *config.ApplicationConfig) func(c *fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
file, err := getFileFromRequest(c)
|
||||
@@ -135,13 +138,17 @@ func GetFilesEndpoint(cm *config.BackendConfigLoader, appConfig *config.Applicat
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteFilesEndpoint https://platform.openai.com/docs/api-reference/files/delete
|
||||
type DeleteStatus struct {
|
||||
Id string
|
||||
Object string
|
||||
Deleted bool
|
||||
}
|
||||
|
||||
// DeleteFilesEndpoint is the OpenAI API endpoint to delete files https://platform.openai.com/docs/api-reference/files/delete
|
||||
// @Summary Delete a file.
|
||||
// @Success 200 {object} DeleteStatus "Response"
|
||||
// @Router /v1/files/{file_id} [delete]
|
||||
func DeleteFilesEndpoint(cm *config.BackendConfigLoader, appConfig *config.ApplicationConfig) func(c *fiber.Ctx) error {
|
||||
type DeleteStatus struct {
|
||||
Id string
|
||||
Object string
|
||||
Deleted bool
|
||||
}
|
||||
|
||||
return func(c *fiber.Ctx) error {
|
||||
file, err := getFileFromRequest(c)
|
||||
@@ -174,7 +181,11 @@ func DeleteFilesEndpoint(cm *config.BackendConfigLoader, appConfig *config.Appli
|
||||
}
|
||||
}
|
||||
|
||||
// GetFilesContentsEndpoint https://platform.openai.com/docs/api-reference/files/retrieve-contents
|
||||
// GetFilesContentsEndpoint is the OpenAI API endpoint to get files content https://platform.openai.com/docs/api-reference/files/retrieve-contents
|
||||
// @Summary Returns information about a specific file.
|
||||
// @Success 200 {string} binary "file"
|
||||
// @Router /v1/files/{file_id}/content [get]
|
||||
// GetFilesContentsEndpoint
|
||||
func GetFilesContentsEndpoint(cm *config.BackendConfigLoader, appConfig *config.ApplicationConfig) func(c *fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
file, err := getFileFromRequest(c)
|
||||
|
||||
@@ -13,10 +13,10 @@ import (
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
|
||||
utils2 "github.com/go-skynet/LocalAI/pkg/utils"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
utils2 "github.com/mudler/LocalAI/pkg/utils"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"testing"
|
||||
|
||||
@@ -13,14 +13,14 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
"github.com/google/uuid"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/backend"
|
||||
"github.com/mudler/LocalAI/core/backend"
|
||||
|
||||
model "github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
model "github.com/mudler/LocalAI/pkg/model"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
@@ -66,7 +66,7 @@ func downloadFile(url string) (string, error) {
|
||||
// @Router /v1/images/generations [post]
|
||||
func ImageEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, appConfig *config.ApplicationConfig) func(c *fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
m, input, err := readRequest(c, ml, appConfig, false)
|
||||
m, input, err := readRequest(c, cl, ml, appConfig, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"github.com/go-skynet/LocalAI/core/backend"
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/backend"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
model "github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
model "github.com/mudler/LocalAI/pkg/model"
|
||||
)
|
||||
|
||||
func ComputeChoices(
|
||||
|
||||
@@ -1,12 +1,18 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
"github.com/go-skynet/LocalAI/core/services"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
"github.com/mudler/LocalAI/core/services"
|
||||
model "github.com/mudler/LocalAI/pkg/model"
|
||||
)
|
||||
|
||||
func ListModelsEndpoint(lms *services.ListModelsService) func(ctx *fiber.Ctx) error {
|
||||
// ListModelsEndpoint is the OpenAI Models API endpoint https://platform.openai.com/docs/api-reference/models
|
||||
// @Summary List and describe the various models available in the API.
|
||||
// @Success 200 {object} schema.ModelsDataResponse "Response"
|
||||
// @Router /v1/models [get]
|
||||
func ListModelsEndpoint(bcl *config.BackendConfigLoader, ml *model.ModelLoader) func(ctx *fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
// If blank, no filter is applied.
|
||||
filter := c.Query("filter")
|
||||
@@ -14,16 +20,30 @@ func ListModelsEndpoint(lms *services.ListModelsService) func(ctx *fiber.Ctx) er
|
||||
// By default, exclude any loose files that are already referenced by a configuration file.
|
||||
excludeConfigured := c.QueryBool("excludeConfigured", true)
|
||||
|
||||
dataModels, err := lms.ListModels(filter, excludeConfigured)
|
||||
dataModels, err := modelList(bcl, ml, filter, excludeConfigured)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.JSON(struct {
|
||||
Object string `json:"object"`
|
||||
Data []schema.OpenAIModel `json:"data"`
|
||||
}{
|
||||
return c.JSON(schema.ModelsDataResponse{
|
||||
Object: "list",
|
||||
Data: dataModels,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func modelList(bcl *config.BackendConfigLoader, ml *model.ModelLoader, filter string, excludeConfigured bool) ([]schema.OpenAIModel, error) {
|
||||
|
||||
models, err := services.ListModels(bcl, ml, filter, excludeConfigured)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dataModels := []schema.OpenAIModel{}
|
||||
|
||||
// Then iterate through the loose files:
|
||||
for _, m := range models {
|
||||
dataModels = append(dataModels, schema.OpenAIModel{ID: m, Object: "model"})
|
||||
}
|
||||
|
||||
return dataModels, nil
|
||||
}
|
||||
|
||||
@@ -2,23 +2,20 @@ package openai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
fiberContext "github.com/go-skynet/LocalAI/core/http/ctx"
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
"github.com/go-skynet/LocalAI/pkg/functions"
|
||||
model "github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
fiberContext "github.com/mudler/LocalAI/core/http/ctx"
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
"github.com/mudler/LocalAI/pkg/functions"
|
||||
"github.com/mudler/LocalAI/pkg/model"
|
||||
"github.com/mudler/LocalAI/pkg/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func readRequest(c *fiber.Ctx, ml *model.ModelLoader, o *config.ApplicationConfig, firstModel bool) (string, *schema.OpenAIRequest, error) {
|
||||
func readRequest(c *fiber.Ctx, cl *config.BackendConfigLoader, ml *model.ModelLoader, o *config.ApplicationConfig, firstModel bool) (string, *schema.OpenAIRequest, error) {
|
||||
input := new(schema.OpenAIRequest)
|
||||
|
||||
// Get input data from the request body
|
||||
@@ -34,46 +31,11 @@ func readRequest(c *fiber.Ctx, ml *model.ModelLoader, o *config.ApplicationConfi
|
||||
|
||||
log.Debug().Msgf("Request received: %s", string(received))
|
||||
|
||||
modelFile, err := fiberContext.ModelFromContext(c, ml, input.Model, firstModel)
|
||||
modelFile, err := fiberContext.ModelFromContext(c, cl, ml, input.Model, firstModel)
|
||||
|
||||
return modelFile, input, err
|
||||
}
|
||||
|
||||
// this function check if the string is an URL, if it's an URL downloads the image in memory
|
||||
// encodes it in base64 and returns the base64 string
|
||||
func getBase64Image(s string) (string, error) {
|
||||
if strings.HasPrefix(s, "http") {
|
||||
// download the image
|
||||
resp, err := http.Get(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// read the image data into memory
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// encode the image data in base64
|
||||
encoded := base64.StdEncoding.EncodeToString(data)
|
||||
|
||||
// return the base64 string
|
||||
return encoded, nil
|
||||
}
|
||||
|
||||
// if the string instead is prefixed with "data:image/...;base64,", drop it
|
||||
dropPrefix := []string{"data:image/jpeg;base64,", "data:image/png;base64,"}
|
||||
for _, prefix := range dropPrefix {
|
||||
if strings.HasPrefix(s, prefix) {
|
||||
return strings.ReplaceAll(s, prefix, ""), nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("not valid string")
|
||||
}
|
||||
|
||||
func updateRequestConfig(config *config.BackendConfig, input *schema.OpenAIRequest) {
|
||||
if input.Echo {
|
||||
config.Echo = input.Echo
|
||||
@@ -187,7 +149,7 @@ func updateRequestConfig(config *config.BackendConfig, input *schema.OpenAIReque
|
||||
input.Messages[i].StringContent = pp.Text
|
||||
} else if pp.Type == "image_url" {
|
||||
// Detect if pp.ImageURL is an URL, if it is download the image and encode it in base64:
|
||||
base64, err := getBase64Image(pp.ImageURL.URL)
|
||||
base64, err := utils.GetImageURLAsBase64(pp.ImageURL.URL)
|
||||
if err == nil {
|
||||
input.Messages[i].StringImages = append(input.Messages[i].StringImages, base64) // TODO: make sure that we only return base64 stuff
|
||||
// set a placeholder for each image
|
||||
@@ -295,5 +257,9 @@ func mergeRequestWithConfig(modelFile string, input *schema.OpenAIRequest, cm *c
|
||||
// Set the parameters for the language model prediction
|
||||
updateRequestConfig(cfg, input)
|
||||
|
||||
if !cfg.Validate() {
|
||||
return nil, nil, fmt.Errorf("failed to validate config")
|
||||
}
|
||||
|
||||
return cfg, input, err
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user