mirror of
https://github.com/mudler/LocalAI.git
synced 2026-02-03 03:02:38 -05:00
Compare commits
83 Commits
v2.19.3
...
ci/better_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f9ddc31b77 | ||
|
|
1494ba13e6 | ||
|
|
77c8152cbf | ||
|
|
7bf5cc50b5 | ||
|
|
ada35e428e | ||
|
|
de1f010f01 | ||
|
|
e1e221b6e5 | ||
|
|
9818d2d1e1 | ||
|
|
416aec3db6 | ||
|
|
a02fb001f9 | ||
|
|
f0ed4aff1a | ||
|
|
30916e8eec | ||
|
|
57c96fe05e | ||
|
|
22ffe1a083 | ||
|
|
dc38b1f71e | ||
|
|
4c31e4567a | ||
|
|
1c0bbb92b2 | ||
|
|
62176de6d2 | ||
|
|
55318cca0f | ||
|
|
094a6fccd8 | ||
|
|
42fe864cb4 | ||
|
|
ed322bf59f | ||
|
|
f15a93b19b | ||
|
|
6e1ec08f46 | ||
|
|
e2e2a8e447 | ||
|
|
1788fc8d4a | ||
|
|
12d6d2d177 | ||
|
|
d1a123954b | ||
|
|
8f0bf9810a | ||
|
|
c2576d0879 | ||
|
|
797c1739ce | ||
|
|
a36b721ca6 | ||
|
|
fc50a90f6a | ||
|
|
2b55dd2c4f | ||
|
|
4c8957de63 | ||
|
|
01d83129a2 | ||
|
|
5afd2de87e | ||
|
|
d792cf115b | ||
|
|
e4b91e9dbb | ||
|
|
d590532d7f | ||
|
|
26f393bd99 | ||
|
|
af0545834f | ||
|
|
c492a9735a | ||
|
|
05c75ca617 | ||
|
|
4c7e8f4d54 | ||
|
|
115b523732 | ||
|
|
4767057088 | ||
|
|
33bc1e8b19 | ||
|
|
8845524d01 | ||
|
|
92faf5fd1d | ||
|
|
2775edb3f0 | ||
|
|
98ffc00926 | ||
|
|
9b21f0d6ad | ||
|
|
57ea7f81bb | ||
|
|
274487c5eb | ||
|
|
17634b394b | ||
|
|
2d59c99d31 | ||
|
|
abcbbbed2d | ||
|
|
f1e90575f3 | ||
|
|
a7dbeb36ca | ||
|
|
d50c72a657 | ||
|
|
12b470f00a | ||
|
|
198bc6d939 | ||
|
|
3feb869025 | ||
|
|
f24fac43da | ||
|
|
9c96a73d93 | ||
|
|
45233937b7 | ||
|
|
f822bebfd8 | ||
|
|
0dd02b2ad7 | ||
|
|
9948ff2715 | ||
|
|
0da042dc2b | ||
|
|
5c747a16c4 | ||
|
|
40604e877c | ||
|
|
3dfed64a15 | ||
|
|
e5f91fbba2 | ||
|
|
4700c9df92 | ||
|
|
6f8d6f601a | ||
|
|
8a39707b36 | ||
|
|
e7df875db3 | ||
|
|
cb042713e8 | ||
|
|
7c4e526853 | ||
|
|
3a70cf311b | ||
|
|
5d08b9ac68 |
13
.github/bump_deps.sh
vendored
13
.github/bump_deps.sh
vendored
@@ -6,4 +6,17 @@ VAR=$3
|
||||
|
||||
LAST_COMMIT=$(curl -s -H "Accept: application/vnd.github.VERSION.sha" "https://api.github.com/repos/$REPO/commits/$BRANCH")
|
||||
|
||||
# Read $VAR from Makefile (only first match)
|
||||
set +e
|
||||
CURRENT_COMMIT="$(grep -m1 "^$VAR?=" Makefile | cut -d'=' -f2)"
|
||||
set -e
|
||||
|
||||
sed -i Makefile -e "s/$VAR?=.*/$VAR?=$LAST_COMMIT/"
|
||||
|
||||
if [ -z "$CURRENT_COMMIT" ]; then
|
||||
echo "Could not find $VAR in Makefile."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Updated $VAR from $CURRENT_COMMIT to $LAST_COMMIT." > "$REPO_message.txt"
|
||||
echo "https://github.com/$REPO/compare/$CURRENT_COMMIT..$LAST_COMMIT" >> "$REPO_message.txt"
|
||||
8
.github/workflows/bump_deps.yaml
vendored
8
.github/workflows/bump_deps.yaml
vendored
@@ -40,8 +40,14 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Bump dependencies 🔧
|
||||
id: bump
|
||||
run: |
|
||||
bash .github/bump_deps.sh ${{ matrix.repository }} ${{ matrix.branch }} ${{ matrix.variable }}
|
||||
{
|
||||
echo 'message<<EOF'
|
||||
cat "${{ matrix.repository }}_message.txt"
|
||||
echo EOF
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
with:
|
||||
@@ -50,7 +56,7 @@ jobs:
|
||||
commit-message: ':arrow_up: Update ${{ matrix.repository }}'
|
||||
title: 'chore: :arrow_up: Update ${{ matrix.repository }}'
|
||||
branch: "update/${{ matrix.variable }}"
|
||||
body: Bump of ${{ matrix.repository }} version
|
||||
body: ${{ steps.bump.outputs.message }}
|
||||
signoff: true
|
||||
|
||||
|
||||
|
||||
10
Makefile
10
Makefile
@@ -8,7 +8,7 @@ DETECT_LIBS?=true
|
||||
# llama.cpp versions
|
||||
GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp
|
||||
GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be
|
||||
CPPLLAMA_VERSION?=4730faca618ff9cee0780580145e3cbe86f24876
|
||||
CPPLLAMA_VERSION?=0d6fb52be0c1b7e77eb855f3adc4952771c8ce4c
|
||||
|
||||
# gpt4all version
|
||||
GPT4ALL_REPO?=https://github.com/nomic-ai/gpt4all
|
||||
@@ -20,7 +20,7 @@ RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6
|
||||
|
||||
# whisper.cpp version
|
||||
WHISPER_REPO?=https://github.com/ggerganov/whisper.cpp
|
||||
WHISPER_CPP_VERSION?=6739eb83c3ca5cf40d24c6fe8442a761a1eb6248
|
||||
WHISPER_CPP_VERSION?=fe36c909715e6751277ddb020e7892c7670b61d4
|
||||
|
||||
# bert.cpp version
|
||||
BERT_REPO?=https://github.com/go-skynet/go-bert.cpp
|
||||
@@ -783,9 +783,6 @@ else
|
||||
echo "BUILD_GRPC_FOR_BACKEND_LLAMA is not defined."
|
||||
LLAMA_VERSION=$(CPPLLAMA_VERSION) $(MAKE) -C backend/cpp/${VARIANT} grpc-server
|
||||
endif
|
||||
ifneq ($(UPX),)
|
||||
$(UPX) backend/cpp/${VARIANT}/grpc-server
|
||||
endif
|
||||
|
||||
# This target is for manually building a variant with-auto detected flags
|
||||
backend-assets/grpc/llama-cpp: backend-assets/grpc backend/cpp/llama/llama.cpp
|
||||
@@ -858,9 +855,6 @@ backend-assets/grpc/llama-cpp-grpc: backend-assets/grpc backend/cpp/llama/llama.
|
||||
backend-assets/util/llama-cpp-rpc-server: backend-assets/grpc/llama-cpp-grpc
|
||||
mkdir -p backend-assets/util/
|
||||
cp -rf backend/cpp/llama-grpc/llama.cpp/build/bin/rpc-server backend-assets/util/llama-cpp-rpc-server
|
||||
ifneq ($(UPX),)
|
||||
$(UPX) backend-assets/util/llama-cpp-rpc-server
|
||||
endif
|
||||
|
||||
backend-assets/grpc/llama-ggml: sources/go-llama.cpp sources/go-llama.cpp/libbinding.a backend-assets/grpc
|
||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(CURDIR)/sources/go-llama.cpp LIBRARY_PATH=$(CURDIR)/sources/go-llama.cpp \
|
||||
|
||||
@@ -84,6 +84,7 @@ docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-cpu
|
||||
|
||||
Hot topics (looking for contributors):
|
||||
|
||||
- 🔥🔥 Distributed, P2P Global community pools: https://github.com/mudler/LocalAI/issues/3113
|
||||
- WebUI improvements: https://github.com/mudler/LocalAI/issues/2156
|
||||
- Backends v2: https://github.com/mudler/LocalAI/issues/1126
|
||||
- Improving UX v2: https://github.com/mudler/LocalAI/issues/1373
|
||||
@@ -150,6 +151,7 @@ Other:
|
||||
|
||||
## :book: 🎥 [Media, Blogs, Social](https://localai.io/basics/news/#media-blogs-social)
|
||||
|
||||
- [Run Visual studio code with LocalAI (SUSE)](https://www.suse.com/c/running-ai-locally/)
|
||||
- 🆕 [Run LocalAI on Jetson Nano Devkit](https://mudler.pm/posts/local-ai-jetson-nano-devkit/)
|
||||
- [Run LocalAI on AWS EKS with Pulumi](https://www.pulumi.com/blog/low-code-llm-apps-with-local-ai-flowise-and-pulumi/)
|
||||
- [Run LocalAI on AWS](https://staleks.hashnode.dev/installing-localai-on-aws-ec2-instance)
|
||||
|
||||
2
backend/python/autogptq/requirements-cublas11.txt
Normal file
2
backend/python/autogptq/requirements-cublas11.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch
|
||||
1
backend/python/autogptq/requirements-cublas12.txt
Normal file
1
backend/python/autogptq/requirements-cublas12.txt
Normal file
@@ -0,0 +1 @@
|
||||
torch
|
||||
@@ -2,4 +2,4 @@
|
||||
intel-extension-for-pytorch
|
||||
torch
|
||||
optimum[openvino]
|
||||
setuptools==70.3.0 # https://github.com/mudler/LocalAI/issues/2406
|
||||
setuptools==72.1.0 # https://github.com/mudler/LocalAI/issues/2406
|
||||
@@ -1,7 +1,6 @@
|
||||
accelerate
|
||||
auto-gptq==0.7.1
|
||||
grpcio==1.65.1
|
||||
grpcio==1.65.4
|
||||
protobuf
|
||||
torch
|
||||
certifi
|
||||
transformers
|
||||
3
backend/python/bark/requirements-cublas11.txt
Normal file
3
backend/python/bark/requirements-cublas11.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch
|
||||
torchaudio
|
||||
2
backend/python/bark/requirements-cublas12.txt
Normal file
2
backend/python/bark/requirements-cublas12.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
torch
|
||||
torchaudio
|
||||
@@ -1,6 +1,6 @@
|
||||
accelerate
|
||||
bark==0.1.5
|
||||
grpcio==1.65.1
|
||||
grpcio==1.65.4
|
||||
protobuf
|
||||
certifi
|
||||
transformers
|
||||
@@ -122,6 +122,13 @@ function installRequirements() {
|
||||
requirementFiles+=("${MY_DIR}/requirements-${BUILD_PROFILE}.txt")
|
||||
fi
|
||||
|
||||
# if BUILD_TYPE is empty, we are a CPU build, so we should try to install the CPU requirements
|
||||
if [ "x${BUILD_TYPE}" == "x" ]; then
|
||||
requirementFiles+=("${MY_DIR}/requirements-cpu.txt")
|
||||
fi
|
||||
|
||||
requirementFiles+=("${MY_DIR}/requirements-after.txt")
|
||||
|
||||
for reqFile in ${requirementFiles[@]}; do
|
||||
if [ -f ${reqFile} ]; then
|
||||
echo "starting requirements install for ${reqFile}"
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
grpcio==1.65.1
|
||||
grpcio==1.65.4
|
||||
protobuf
|
||||
3
backend/python/coqui/requirements-cublas11.txt
Normal file
3
backend/python/coqui/requirements-cublas11.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch
|
||||
torchaudio
|
||||
2
backend/python/coqui/requirements-cublas12.txt
Normal file
2
backend/python/coqui/requirements-cublas12.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
torch
|
||||
torchaudio
|
||||
@@ -3,4 +3,4 @@ intel-extension-for-pytorch
|
||||
torch
|
||||
torchaudio
|
||||
optimum[openvino]
|
||||
setuptools==70.3.0 # https://github.com/mudler/LocalAI/issues/2406
|
||||
setuptools==72.1.0 # https://github.com/mudler/LocalAI/issues/2406
|
||||
@@ -1,6 +1,6 @@
|
||||
accelerate
|
||||
TTS==0.22.0
|
||||
grpcio==1.65.1
|
||||
grpcio==1.65.4
|
||||
protobuf
|
||||
certifi
|
||||
transformers
|
||||
2
backend/python/diffusers/requirements-cublas11.txt
Normal file
2
backend/python/diffusers/requirements-cublas11.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch
|
||||
1
backend/python/diffusers/requirements-cublas12.txt
Normal file
1
backend/python/diffusers/requirements-cublas12.txt
Normal file
@@ -0,0 +1 @@
|
||||
torch
|
||||
@@ -3,11 +3,10 @@ accelerate
|
||||
compel
|
||||
peft
|
||||
diffusers
|
||||
grpcio==1.65.1
|
||||
grpcio==1.65.4
|
||||
opencv-python
|
||||
pillow
|
||||
protobuf
|
||||
sentencepiece
|
||||
torch
|
||||
transformers
|
||||
certifi
|
||||
|
||||
2
backend/python/exllama/requirements-cublas11.txt
Normal file
2
backend/python/exllama/requirements-cublas11.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch
|
||||
1
backend/python/exllama/requirements-cublas12.txt
Normal file
1
backend/python/exllama/requirements-cublas12.txt
Normal file
@@ -0,0 +1 @@
|
||||
torch
|
||||
@@ -1,6 +1,5 @@
|
||||
grpcio==1.65.0
|
||||
protobuf
|
||||
torch
|
||||
transformers
|
||||
certifi
|
||||
setuptools
|
||||
2
backend/python/exllama2/requirements-cublas11.txt
Normal file
2
backend/python/exllama2/requirements-cublas11.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch
|
||||
1
backend/python/exllama2/requirements-cublas12.txt
Normal file
1
backend/python/exllama2/requirements-cublas12.txt
Normal file
@@ -0,0 +1 @@
|
||||
torch
|
||||
@@ -1,7 +1,6 @@
|
||||
accelerate
|
||||
grpcio==1.65.1
|
||||
grpcio==1.65.4
|
||||
protobuf
|
||||
certifi
|
||||
torch
|
||||
wheel
|
||||
setuptools
|
||||
2
backend/python/mamba/requirements-after.txt
Normal file
2
backend/python/mamba/requirements-after.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
causal-conv1d==1.4.0
|
||||
mamba-ssm==2.2.2
|
||||
1
backend/python/mamba/requirements-cpu.txt
Normal file
1
backend/python/mamba/requirements-cpu.txt
Normal file
@@ -0,0 +1 @@
|
||||
torch
|
||||
2
backend/python/mamba/requirements-cublas11.txt
Normal file
2
backend/python/mamba/requirements-cublas11.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch
|
||||
1
backend/python/mamba/requirements-cublas12.txt
Normal file
1
backend/python/mamba/requirements-cublas12.txt
Normal file
@@ -0,0 +1 @@
|
||||
torch
|
||||
@@ -3,5 +3,4 @@
|
||||
# https://github.com/Dao-AILab/causal-conv1d/issues/24
|
||||
packaging
|
||||
setuptools
|
||||
wheel
|
||||
torch==2.3.1
|
||||
wheel
|
||||
@@ -1,5 +1,3 @@
|
||||
causal-conv1d==1.4.0
|
||||
mamba-ssm==2.2.2
|
||||
grpcio==1.65.1
|
||||
protobuf
|
||||
certifi
|
||||
|
||||
2
backend/python/openvoice/requirements-cublas11.txt
Normal file
2
backend/python/openvoice/requirements-cublas11.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch
|
||||
1
backend/python/openvoice/requirements-cublas12.txt
Normal file
1
backend/python/openvoice/requirements-cublas12.txt
Normal file
@@ -0,0 +1 @@
|
||||
torch
|
||||
@@ -2,7 +2,7 @@
|
||||
intel-extension-for-pytorch
|
||||
torch
|
||||
optimum[openvino]
|
||||
grpcio==1.65.1
|
||||
grpcio==1.65.4
|
||||
protobuf
|
||||
librosa==0.9.1
|
||||
faster-whisper==1.0.3
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
grpcio==1.65.1
|
||||
grpcio==1.65.4
|
||||
protobuf
|
||||
librosa
|
||||
faster-whisper
|
||||
|
||||
@@ -5,7 +5,7 @@ source $(dirname $0)/../common/libbackend.sh
|
||||
|
||||
# Download checkpoints if not present
|
||||
if [ ! -d "checkpoints_v2" ]; then
|
||||
wget https://myshell-public-repo-hosting.s3.amazonaws.com/openvoice/checkpoints_v2_0417.zip -O checkpoints_v2.zip
|
||||
wget https://myshell-public-repo-host.s3.amazonaws.com/openvoice/checkpoints_v2_0417.zip -O checkpoints_v2.zip
|
||||
unzip checkpoints_v2.zip
|
||||
fi
|
||||
|
||||
|
||||
3
backend/python/parler-tts/requirements-cublas11.txt
Normal file
3
backend/python/parler-tts/requirements-cublas11.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch
|
||||
torchaudio
|
||||
2
backend/python/parler-tts/requirements-cublas12.txt
Normal file
2
backend/python/parler-tts/requirements-cublas12.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
torch
|
||||
torchaudio
|
||||
@@ -3,4 +3,4 @@ intel-extension-for-pytorch
|
||||
torch
|
||||
torchaudio
|
||||
optimum[openvino]
|
||||
setuptools==70.3.0 # https://github.com/mudler/LocalAI/issues/2406
|
||||
setuptools==72.1.0 # https://github.com/mudler/LocalAI/issues/2406
|
||||
@@ -1,7 +1,6 @@
|
||||
accelerate
|
||||
grpcio==1.65.1
|
||||
protobuf
|
||||
torch
|
||||
git+https://github.com/huggingface/parler-tts.git@10016fb0300c0dc31a0fb70e26f3affee7b62f16
|
||||
certifi
|
||||
transformers
|
||||
2
backend/python/petals/requirements-cublas11.txt
Normal file
2
backend/python/petals/requirements-cublas11.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch
|
||||
1
backend/python/petals/requirements-cublas12.txt
Normal file
1
backend/python/petals/requirements-cublas12.txt
Normal file
@@ -0,0 +1 @@
|
||||
torch
|
||||
@@ -2,4 +2,4 @@
|
||||
intel-extension-for-pytorch
|
||||
torch
|
||||
optimum[openvino]
|
||||
setuptools==70.3.0 # https://github.com/mudler/LocalAI/issues/2406
|
||||
setuptools==72.1.0 # https://github.com/mudler/LocalAI/issues/2406
|
||||
2
backend/python/rerankers/requirements-cublas11.txt
Normal file
2
backend/python/rerankers/requirements-cublas11.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch
|
||||
1
backend/python/rerankers/requirements-cublas12.txt
Normal file
1
backend/python/rerankers/requirements-cublas12.txt
Normal file
@@ -0,0 +1 @@
|
||||
torch
|
||||
@@ -2,4 +2,4 @@
|
||||
intel-extension-for-pytorch
|
||||
torch
|
||||
optimum[openvino]
|
||||
setuptools==70.3.0 # https://github.com/mudler/LocalAI/issues/2406
|
||||
setuptools==72.1.0 # https://github.com/mudler/LocalAI/issues/2406
|
||||
@@ -1,6 +1,6 @@
|
||||
accelerate
|
||||
rerankers[transformers]
|
||||
grpcio==1.65.1
|
||||
grpcio==1.65.4
|
||||
protobuf
|
||||
certifi
|
||||
transformers
|
||||
@@ -0,0 +1,2 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch
|
||||
@@ -0,0 +1 @@
|
||||
torch
|
||||
@@ -0,0 +1,2 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch
|
||||
@@ -0,0 +1 @@
|
||||
torch
|
||||
@@ -1,7 +1,6 @@
|
||||
accelerate
|
||||
transformers
|
||||
grpcio==1.65.1
|
||||
grpcio==1.65.4
|
||||
protobuf
|
||||
torch
|
||||
scipy==1.14.0
|
||||
certifi
|
||||
2
backend/python/transformers/requirements-cublas11.txt
Normal file
2
backend/python/transformers/requirements-cublas11.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch
|
||||
1
backend/python/transformers/requirements-cublas12.txt
Normal file
1
backend/python/transformers/requirements-cublas12.txt
Normal file
@@ -0,0 +1 @@
|
||||
torch
|
||||
@@ -1,8 +1,7 @@
|
||||
accelerate
|
||||
transformers
|
||||
grpcio==1.65.1
|
||||
grpcio==1.65.4
|
||||
protobuf
|
||||
torch
|
||||
certifi
|
||||
intel-extension-for-transformers
|
||||
bitsandbytes
|
||||
|
||||
3
backend/python/vall-e-x/requirements-cublas11.txt
Normal file
3
backend/python/vall-e-x/requirements-cublas11.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch
|
||||
torchaudio
|
||||
2
backend/python/vall-e-x/requirements-cublas12.txt
Normal file
2
backend/python/vall-e-x/requirements-cublas12.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
torch
|
||||
torchaudio
|
||||
@@ -3,4 +3,4 @@ intel-extension-for-pytorch
|
||||
torch
|
||||
torchaudio
|
||||
optimum[openvino]
|
||||
setuptools==70.3.0 # https://github.com/mudler/LocalAI/issues/2406
|
||||
setuptools==72.1.0 # https://github.com/mudler/LocalAI/issues/2406
|
||||
@@ -1,4 +1,4 @@
|
||||
accelerate
|
||||
grpcio==1.65.1
|
||||
grpcio==1.65.4
|
||||
protobuf
|
||||
certifi
|
||||
@@ -1 +0,0 @@
|
||||
flash-attn
|
||||
3
backend/python/vllm/requirements-cublas11.txt
Normal file
3
backend/python/vllm/requirements-cublas11.txt
Normal file
@@ -0,0 +1,3 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cu118
|
||||
torch
|
||||
flash-attn
|
||||
2
backend/python/vllm/requirements-cublas12.txt
Normal file
2
backend/python/vllm/requirements-cublas12.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
torch
|
||||
flash-attn
|
||||
@@ -1,6 +1,6 @@
|
||||
accelerate
|
||||
vllm
|
||||
grpcio==1.65.1
|
||||
grpcio==1.65.4
|
||||
protobuf
|
||||
certifi
|
||||
transformers
|
||||
|
||||
@@ -83,7 +83,9 @@ func (mi *ModelsInstall) Run(ctx *cliContext.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if !downloader.LooksLikeOCI(modelName) {
|
||||
modelURI := downloader.URI(modelName)
|
||||
|
||||
if !modelURI.LooksLikeOCI() {
|
||||
model := gallery.FindModel(models, modelName, mi.ModelsPath)
|
||||
if model == nil {
|
||||
log.Error().Str("model", modelName).Msg("model not found")
|
||||
|
||||
@@ -86,8 +86,8 @@ func (hfscmd *HFScanCMD) Run(ctx *cliContext.Context) error {
|
||||
var errs error = nil
|
||||
for _, uri := range hfscmd.ToScan {
|
||||
log.Info().Str("uri", uri).Msg("scanning specific uri")
|
||||
scanResults, err := downloader.HuggingFaceScan(uri)
|
||||
if err != nil && !errors.Is(err, downloader.ErrNonHuggingFaceFile) {
|
||||
scanResults, err := downloader.HuggingFaceScan(downloader.URI(uri))
|
||||
if err != nil && errors.Is(err, downloader.ErrUnsafeFilesFound) {
|
||||
log.Error().Err(err).Strs("clamAV", scanResults.ClamAVInfectedFiles).Strs("pickles", scanResults.DangerousPickles).Msg("! WARNING ! A known-vulnerable model is included in this repo!")
|
||||
errs = errors.Join(errs, err)
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
"github.com/mudler/LocalAI/pkg/downloader"
|
||||
"github.com/mudler/LocalAI/pkg/functions"
|
||||
"github.com/mudler/LocalAI/pkg/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -72,9 +71,9 @@ type BackendConfig struct {
|
||||
}
|
||||
|
||||
type File struct {
|
||||
Filename string `yaml:"filename" json:"filename"`
|
||||
SHA256 string `yaml:"sha256" json:"sha256"`
|
||||
URI string `yaml:"uri" json:"uri"`
|
||||
Filename string `yaml:"filename" json:"filename"`
|
||||
SHA256 string `yaml:"sha256" json:"sha256"`
|
||||
URI downloader.URI `yaml:"uri" json:"uri"`
|
||||
}
|
||||
|
||||
type VallE struct {
|
||||
@@ -213,28 +212,32 @@ func (c *BackendConfig) ShouldCallSpecificFunction() bool {
|
||||
// MMProjFileName returns the filename of the MMProj file
|
||||
// If the MMProj is a URL, it will return the MD5 of the URL which is the filename
|
||||
func (c *BackendConfig) MMProjFileName() string {
|
||||
modelURL := downloader.ConvertURL(c.MMProj)
|
||||
if downloader.LooksLikeURL(modelURL) {
|
||||
return utils.MD5(modelURL)
|
||||
uri := downloader.URI(c.MMProj)
|
||||
if uri.LooksLikeURL() {
|
||||
f, _ := uri.FilenameFromUrl()
|
||||
return f
|
||||
}
|
||||
|
||||
return c.MMProj
|
||||
}
|
||||
|
||||
func (c *BackendConfig) IsMMProjURL() bool {
|
||||
return downloader.LooksLikeURL(downloader.ConvertURL(c.MMProj))
|
||||
uri := downloader.URI(c.MMProj)
|
||||
return uri.LooksLikeURL()
|
||||
}
|
||||
|
||||
func (c *BackendConfig) IsModelURL() bool {
|
||||
return downloader.LooksLikeURL(downloader.ConvertURL(c.Model))
|
||||
uri := downloader.URI(c.Model)
|
||||
return uri.LooksLikeURL()
|
||||
}
|
||||
|
||||
// ModelFileName returns the filename of the model
|
||||
// If the model is a URL, it will return the MD5 of the URL which is the filename
|
||||
func (c *BackendConfig) ModelFileName() string {
|
||||
modelURL := downloader.ConvertURL(c.Model)
|
||||
if downloader.LooksLikeURL(modelURL) {
|
||||
return utils.MD5(modelURL)
|
||||
uri := downloader.URI(c.Model)
|
||||
if uri.LooksLikeURL() {
|
||||
f, _ := uri.FilenameFromUrl()
|
||||
return f
|
||||
}
|
||||
|
||||
return c.Model
|
||||
|
||||
@@ -244,7 +244,7 @@ func (bcl *BackendConfigLoader) Preload(modelPath string) error {
|
||||
// Create file path
|
||||
filePath := filepath.Join(modelPath, file.Filename)
|
||||
|
||||
if err := downloader.DownloadFile(file.URI, filePath, file.SHA256, i, len(config.DownloadFiles), status); err != nil {
|
||||
if err := file.URI.DownloadFile(filePath, file.SHA256, i, len(config.DownloadFiles), status); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -252,10 +252,10 @@ func (bcl *BackendConfigLoader) Preload(modelPath string) error {
|
||||
// If the model is an URL, expand it, and download the file
|
||||
if config.IsModelURL() {
|
||||
modelFileName := config.ModelFileName()
|
||||
modelURL := downloader.ConvertURL(config.Model)
|
||||
uri := downloader.URI(config.Model)
|
||||
// check if file exists
|
||||
if _, err := os.Stat(filepath.Join(modelPath, modelFileName)); errors.Is(err, os.ErrNotExist) {
|
||||
err := downloader.DownloadFile(modelURL, filepath.Join(modelPath, modelFileName), "", 0, 0, status)
|
||||
err := uri.DownloadFile(filepath.Join(modelPath, modelFileName), "", 0, 0, status)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -269,10 +269,10 @@ func (bcl *BackendConfigLoader) Preload(modelPath string) error {
|
||||
|
||||
if config.IsMMProjURL() {
|
||||
modelFileName := config.MMProjFileName()
|
||||
modelURL := downloader.ConvertURL(config.MMProj)
|
||||
uri := downloader.URI(config.MMProj)
|
||||
// check if file exists
|
||||
if _, err := os.Stat(filepath.Join(modelPath, modelFileName)); errors.Is(err, os.ErrNotExist) {
|
||||
err := downloader.DownloadFile(modelURL, filepath.Join(modelPath, modelFileName), "", 0, 0, status)
|
||||
err := uri.DownloadFile(filepath.Join(modelPath, modelFileName), "", 0, 0, status)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -26,15 +26,17 @@ const (
|
||||
type settingsConfig struct {
|
||||
StopWords []string
|
||||
TemplateConfig TemplateConfig
|
||||
RepeatPenalty float64
|
||||
}
|
||||
|
||||
// default settings to adopt with a given model family
|
||||
var defaultsSettings map[familyType]settingsConfig = map[familyType]settingsConfig{
|
||||
Gemma: {
|
||||
RepeatPenalty: 1.0,
|
||||
StopWords: []string{"<|im_end|>", "<end_of_turn>", "<start_of_turn>"},
|
||||
TemplateConfig: TemplateConfig{
|
||||
Chat: "{{.Input }}\n<|start_of_turn|>model\n",
|
||||
ChatMessage: "<|start_of_turn|>{{if eq .RoleName \"assistant\" }}model{{else}}{{ .RoleName }}{{end}}\n{{ if .Content -}}\n{{.Content -}}\n{{ end -}}<|end_of_turn|>",
|
||||
Chat: "{{.Input }}\n<start_of_turn>model\n",
|
||||
ChatMessage: "<start_of_turn>{{if eq .RoleName \"assistant\" }}model{{else}}{{ .RoleName }}{{end}}\n{{ if .Content -}}\n{{.Content -}}\n{{ end -}}<end_of_turn>",
|
||||
Completion: "{{.Input}}",
|
||||
},
|
||||
},
|
||||
@@ -192,6 +194,9 @@ func guessDefaultsFromFile(cfg *BackendConfig, modelPath string) {
|
||||
if len(cfg.StopWords) == 0 {
|
||||
cfg.StopWords = settings.StopWords
|
||||
}
|
||||
if cfg.RepeatPenalty == 0.0 {
|
||||
cfg.RepeatPenalty = settings.RepeatPenalty
|
||||
}
|
||||
} else {
|
||||
log.Debug().Any("family", family).Msgf("guessDefaultsFromFile: no template found for family")
|
||||
}
|
||||
@@ -219,7 +224,7 @@ func identifyFamily(f *gguf.GGUFFile) familyType {
|
||||
commandR := arch == "command-r" && eosTokenID == 255001
|
||||
qwen2 := arch == "qwen2"
|
||||
phi3 := arch == "phi-3"
|
||||
gemma := strings.HasPrefix(f.Model().Name, "gemma")
|
||||
gemma := strings.HasPrefix(arch, "gemma") || strings.Contains(strings.ToLower(f.Model().Name), "gemma")
|
||||
deepseek2 := arch == "deepseek2"
|
||||
|
||||
switch {
|
||||
|
||||
@@ -37,7 +37,8 @@ func main() {
|
||||
|
||||
// download the assets
|
||||
for _, asset := range assets {
|
||||
if err := downloader.DownloadFile(asset.URL, filepath.Join(destPath, asset.FileName), asset.SHA, 1, 1, utils.DisplayDownloadFunction); err != nil {
|
||||
uri := downloader.URI(asset.URL)
|
||||
if err := uri.DownloadFile(filepath.Join(destPath, asset.FileName), asset.SHA, 1, 1, utils.DisplayDownloadFunction); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -131,7 +131,8 @@ func AvailableGalleryModels(galleries []config.Gallery, basePath string) ([]*Gal
|
||||
|
||||
func findGalleryURLFromReferenceURL(url string, basePath string) (string, error) {
|
||||
var refFile string
|
||||
err := downloader.DownloadAndUnmarshal(url, basePath, func(url string, d []byte) error {
|
||||
uri := downloader.URI(url)
|
||||
err := uri.DownloadAndUnmarshal(basePath, func(url string, d []byte) error {
|
||||
refFile = string(d)
|
||||
if len(refFile) == 0 {
|
||||
return fmt.Errorf("invalid reference file at url %s: %s", url, d)
|
||||
@@ -153,8 +154,9 @@ func getGalleryModels(gallery config.Gallery, basePath string) ([]*GalleryModel,
|
||||
return models, err
|
||||
}
|
||||
}
|
||||
uri := downloader.URI(gallery.URL)
|
||||
|
||||
err := downloader.DownloadAndUnmarshal(gallery.URL, basePath, func(url string, d []byte) error {
|
||||
err := uri.DownloadAndUnmarshal(basePath, func(url string, d []byte) error {
|
||||
return yaml.Unmarshal(d, &models)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -252,8 +254,8 @@ func SafetyScanGalleryModels(galleries []config.Gallery, basePath string) error
|
||||
|
||||
func SafetyScanGalleryModel(galleryModel *GalleryModel) error {
|
||||
for _, file := range galleryModel.AdditionalFiles {
|
||||
scanResults, err := downloader.HuggingFaceScan(file.URI)
|
||||
if err != nil && !errors.Is(err, downloader.ErrNonHuggingFaceFile) {
|
||||
scanResults, err := downloader.HuggingFaceScan(downloader.URI(file.URI))
|
||||
if err != nil && errors.Is(err, downloader.ErrUnsafeFilesFound) {
|
||||
log.Error().Str("model", galleryModel.Name).Strs("clamAV", scanResults.ClamAVInfectedFiles).Strs("pickles", scanResults.DangerousPickles).Msg("Contains unsafe file(s)!")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -68,7 +68,8 @@ type PromptTemplate struct {
|
||||
|
||||
func GetGalleryConfigFromURL(url string, basePath string) (Config, error) {
|
||||
var config Config
|
||||
err := downloader.DownloadAndUnmarshal(url, basePath, func(url string, d []byte) error {
|
||||
uri := downloader.URI(url)
|
||||
err := uri.DownloadAndUnmarshal(basePath, func(url string, d []byte) error {
|
||||
return yaml.Unmarshal(d, &config)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -118,14 +119,14 @@ func InstallModel(basePath, nameOverride string, config *Config, configOverrides
|
||||
filePath := filepath.Join(basePath, file.Filename)
|
||||
|
||||
if enforceScan {
|
||||
scanResults, err := downloader.HuggingFaceScan(file.URI)
|
||||
if err != nil && !errors.Is(err, downloader.ErrNonHuggingFaceFile) {
|
||||
scanResults, err := downloader.HuggingFaceScan(downloader.URI(file.URI))
|
||||
if err != nil && errors.Is(err, downloader.ErrUnsafeFilesFound) {
|
||||
log.Error().Str("model", config.Name).Strs("clamAV", scanResults.ClamAVInfectedFiles).Strs("pickles", scanResults.DangerousPickles).Msg("Contains unsafe file(s)!")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := downloader.DownloadFile(file.URI, filePath, file.SHA256, i, len(config.Files), downloadStatus); err != nil {
|
||||
uri := downloader.URI(file.URI)
|
||||
if err := uri.DownloadFile(filePath, file.SHA256, i, len(config.Files), downloadStatus); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,8 +73,9 @@ func getModelStatus(url string) (response map[string]interface{}) {
|
||||
}
|
||||
|
||||
func getModels(url string) (response []gallery.GalleryModel) {
|
||||
uri := downloader.URI(url)
|
||||
// TODO: No tests currently seem to exercise file:// urls. Fix?
|
||||
downloader.DownloadAndUnmarshal(url, "", func(url string, i []byte) error {
|
||||
uri.DownloadAndUnmarshal("", func(url string, i []byte) error {
|
||||
// Unmarshal YAML data into a struct
|
||||
return json.Unmarshal(i, &response)
|
||||
})
|
||||
|
||||
@@ -17,7 +17,10 @@ func WelcomeEndpoint(appConfig *config.ApplicationConfig,
|
||||
backendConfigs := cl.GetAllBackendConfigs()
|
||||
|
||||
galleryConfigs := map[string]*gallery.Config{}
|
||||
modelsWithBackendConfig := map[string]interface{}{}
|
||||
|
||||
for _, m := range backendConfigs {
|
||||
modelsWithBackendConfig[m.Name] = nil
|
||||
|
||||
cfg, err := gallery.GetLocalModelConfiguration(ml.ModelPath, m.Name)
|
||||
if err != nil {
|
||||
@@ -32,7 +35,7 @@ func WelcomeEndpoint(appConfig *config.ApplicationConfig,
|
||||
modelsWithoutConfig := []string{}
|
||||
|
||||
for _, m := range models {
|
||||
if _, ok := galleryConfigs[m]; !ok {
|
||||
if _, ok := modelsWithBackendConfig[m]; !ok {
|
||||
modelsWithoutConfig = append(modelsWithoutConfig, m)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
{
|
||||
"version": "v2.19.2"
|
||||
"version": "v2.19.4"
|
||||
}
|
||||
|
||||
4
docs/static/install.sh
vendored
4
docs/static/install.sh
vendored
@@ -194,7 +194,7 @@ install_container_toolkit_yum() {
|
||||
curl -s -L https://nvidia.github.io/libnvidia-container/stable/rpm/nvidia-container-toolkit.repo | \
|
||||
$SUDO tee /etc/yum.repos.d/nvidia-container-toolkit.repo
|
||||
|
||||
if [ "$PACKAGE_MANAGER" == "dnf" ]; then
|
||||
if [ "$PACKAGE_MANAGER" = "dnf" ]; then
|
||||
$SUDO $PACKAGE_MANAGER config-manager --enable nvidia-container-toolkit-experimental
|
||||
else
|
||||
$SUDO $PACKAGE_MANAGER -y install yum-utils
|
||||
@@ -629,7 +629,7 @@ case "$ARCH" in
|
||||
*) fatal "Unsupported architecture: $ARCH" ;;
|
||||
esac
|
||||
|
||||
if [ "$OS" == "Darwin" ]; then
|
||||
if [ "$OS" = "Darwin" ]; then
|
||||
install_binary_darwin
|
||||
exit 0
|
||||
fi
|
||||
|
||||
2
docs/themes/hugo-theme-relearn
vendored
2
docs/themes/hugo-theme-relearn
vendored
Submodule docs/themes/hugo-theme-relearn updated: 7aec99b38d...8b14837336
@@ -38,8 +38,8 @@ func init() {
|
||||
|
||||
func GetRemoteLibraryShorteners(url string, basePath string) (map[string]string, error) {
|
||||
remoteLibrary := map[string]string{}
|
||||
|
||||
err := downloader.DownloadAndUnmarshal(url, basePath, func(_ string, i []byte) error {
|
||||
uri := downloader.URI(url)
|
||||
err := uri.DownloadAndUnmarshal(basePath, func(_ string, i []byte) error {
|
||||
return yaml.Unmarshal(i, &remoteLibrary)
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
llama_index==0.10.56
|
||||
llama_index==0.10.59
|
||||
requests==2.32.3
|
||||
weaviate_client==4.6.7
|
||||
transformers
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
langchain==0.2.10
|
||||
langchain==0.2.12
|
||||
openai==1.37.0
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
langchain==0.2.10
|
||||
langchain==0.2.12
|
||||
openai==1.37.0
|
||||
chromadb==0.5.4
|
||||
chromadb==0.5.5
|
||||
llama-index==0.10.56
|
||||
@@ -10,7 +10,7 @@ debugpy==1.8.2
|
||||
frozenlist==1.4.1
|
||||
greenlet==3.0.3
|
||||
idna==3.7
|
||||
langchain==0.2.10
|
||||
langchain==0.2.12
|
||||
langchain-community==0.2.9
|
||||
marshmallow==3.21.3
|
||||
marshmallow-enum==1.5.1
|
||||
@@ -18,13 +18,13 @@ multidict==6.0.5
|
||||
mypy-extensions==1.0.0
|
||||
numexpr==2.10.1
|
||||
numpy==2.0.1
|
||||
openai==1.37.0
|
||||
openai==1.37.1
|
||||
openapi-schema-pydantic==1.2.4
|
||||
packaging>=23.2
|
||||
pydantic==2.8.2
|
||||
PyYAML==6.0.1
|
||||
requests==2.32.3
|
||||
SQLAlchemy==2.0.31
|
||||
SQLAlchemy==2.0.32
|
||||
tenacity==8.5.0
|
||||
tqdm==4.66.4
|
||||
typing-inspect==0.9.0
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
streamlit==1.36.0
|
||||
streamlit==1.37.1
|
||||
requests
|
||||
17
gallery/alpaca.yaml
Normal file
17
gallery/alpaca.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
name: "alpaca"
|
||||
|
||||
config_file: |
|
||||
context_size: 4096
|
||||
f16: true
|
||||
mmap: true
|
||||
template:
|
||||
chat: |
|
||||
Below is an instruction that describes a task. Write a response that appropriately completes the request.
|
||||
|
||||
### Instruction:
|
||||
{{.Input}}
|
||||
|
||||
### Response:
|
||||
completion: |
|
||||
{{.Input}}
|
||||
@@ -69,8 +69,8 @@
|
||||
model: Meta-Llama-3.1-8B-Claude-iMat-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: Meta-Llama-3.1-8B-Claude-iMat-Q4_K_M.gguf
|
||||
sha256: 8de80021b9438f0925a41ae73f77cb73fcfa30090e03a0919ce23d2b9818e9c7
|
||||
uri: huggingface://InferenceIllusionist/Meta-Llama-3.1-8B-Claude-iMat-GGUF/Meta-Llama-3.1-8B-Claude-iMat-Q4_K_M.gguf
|
||||
sha256: 6d175432f66d10dfed9737f73a5073d513d18e1ee7bd4b9cf2a59deb359f36ff
|
||||
- !!merge <<: *llama31
|
||||
name: "meta-llama-3.1-8b-instruct-abliterated"
|
||||
icon: https://i.imgur.com/KhorYYG.png
|
||||
@@ -84,8 +84,8 @@
|
||||
model: meta-llama-3.1-8b-instruct-abliterated.Q4_K_M.gguf
|
||||
files:
|
||||
- filename: meta-llama-3.1-8b-instruct-abliterated.Q4_K_M.gguf
|
||||
sha256: 18cca47adfb3954af2b49e3aa2ce1604158337aff45fab2e7654039b65c7683e
|
||||
uri: huggingface://mlabonne/Meta-Llama-3.1-8B-Instruct-abliterated-GGUF/meta-llama-3.1-8b-instruct-abliterated.Q4_K_M.gguf
|
||||
sha256: 2e1fd6d93b19cc6548b2b8ed2d3f1f34b432ee0573f3dcf358bbaab4f23c760b
|
||||
- !!merge <<: *llama31
|
||||
name: "llama-3.1-70b-japanese-instruct-2407"
|
||||
urls:
|
||||
@@ -145,6 +145,185 @@
|
||||
- filename: llama3.1-8b-fireplace2-q4_k_m.gguf
|
||||
sha256: 54527fd2474b576086ea31e759214ab240abe2429ae623a02d7ba825cc8cb13e
|
||||
uri: huggingface://mudler/Llama3.1-8B-Fireplace2-Q4_K_M-GGUF/llama3.1-8b-fireplace2-q4_k_m.gguf
|
||||
- !!merge <<: *llama31
|
||||
name: "sekhmet_aleph-l3.1-8b-v0.1-i1"
|
||||
icon: https://cdn-uploads.huggingface.co/production/uploads/642265bc01c62c1e4102dc36/SVyiW4mu495ngqszJGWRl.png
|
||||
urls:
|
||||
- https://huggingface.co/Nitral-Archive/Sekhmet_Aleph-L3.1-8B-v0.1
|
||||
- https://huggingface.co/mradermacher/Sekhmet_Aleph-L3.1-8B-v0.1-i1-GGUF
|
||||
overrides:
|
||||
parameters:
|
||||
model: Sekhmet_Aleph-L3.1-8B-v0.1.i1-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: Sekhmet_Aleph-L3.1-8B-v0.1.i1-Q4_K_M.gguf
|
||||
sha256: 5b6f4eaa2091bf13a2b563a54a3f87b22efa7f2862362537c956c70da6e11cea
|
||||
uri: huggingface://mradermacher/Sekhmet_Aleph-L3.1-8B-v0.1-i1-GGUF/Sekhmet_Aleph-L3.1-8B-v0.1.i1-Q4_K_M.gguf
|
||||
- !!merge <<: *llama31
|
||||
name: "l3.1-8b-llamoutcast-i1"
|
||||
icon: https://files.catbox.moe/ecgn0m.jpg
|
||||
urls:
|
||||
- https://huggingface.co/Envoid/L3.1-8B-Llamoutcast
|
||||
- https://huggingface.co/mradermacher/L3.1-8B-Llamoutcast-i1-GGUF
|
||||
description: |
|
||||
Warning: this model is utterly cursed.
|
||||
Llamoutcast
|
||||
|
||||
This model was originally intended to be a DADA finetune of Llama-3.1-8B-Instruct but the results were unsatisfactory. So it received some additional finetuning on a rawtext dataset and now it is utterly cursed.
|
||||
|
||||
It responds to Llama-3 Instruct formatting.
|
||||
overrides:
|
||||
parameters:
|
||||
model: L3.1-8B-Llamoutcast.i1-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: L3.1-8B-Llamoutcast.i1-Q4_K_M.gguf
|
||||
sha256: 438ca0a7e9470f5ee40f3b14dc2da41b1cafc4ad4315dead3eb57924109d5cf6
|
||||
uri: huggingface://mradermacher/L3.1-8B-Llamoutcast-i1-GGUF/L3.1-8B-Llamoutcast.i1-Q4_K_M.gguf
|
||||
- !!merge <<: *llama31
|
||||
name: "llama-guard-3-8b"
|
||||
urls:
|
||||
- https://huggingface.co/meta-llama/Llama-Guard-3-8B
|
||||
- https://huggingface.co/QuantFactory/Llama-Guard-3-8B-GGUF
|
||||
description: |
|
||||
Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification. Similar to previous versions, it can be used to classify content in both LLM inputs (prompt classification) and in LLM responses (response classification). It acts as an LLM – it generates text in its output that indicates whether a given prompt or response is safe or unsafe, and if unsafe, it also lists the content categories violated.
|
||||
|
||||
Llama Guard 3 was aligned to safeguard against the MLCommons standardized hazards taxonomy and designed to support Llama 3.1 capabilities. Specifically, it provides content moderation in 8 languages, and was optimized to support safety and security for search and code interpreter tool calls.
|
||||
overrides:
|
||||
parameters:
|
||||
model: Llama-Guard-3-8B.Q4_K_M.gguf
|
||||
files:
|
||||
- filename: Llama-Guard-3-8B.Q4_K_M.gguf
|
||||
sha256: c5ea8760a1e544eea66a8915fcc3fbd2c67357ea2ee6871a9e6a6c33b64d4981
|
||||
uri: huggingface://QuantFactory/Llama-Guard-3-8B-GGUF/Llama-Guard-3-8B.Q4_K_M.gguf
|
||||
- !!merge <<: *llama31
|
||||
name: "genius-llama3.1-i1"
|
||||
icon: https://github.com/fangyuan-ksgk/GeniusUpload/assets/66006349/7272c93e-9806-461c-a3d0-2e50ef2b7af0
|
||||
urls:
|
||||
- https://huggingface.co/Ksgk-fy/Genius-Llama3.1
|
||||
- https://huggingface.co/mradermacher/Genius-Llama3.1-i1-GGUF
|
||||
description: |
|
||||
Finetuned Llama-3.1 base on Lex Fridman's podcast transcript.
|
||||
overrides:
|
||||
parameters:
|
||||
model: Genius-Llama3.1.i1-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: Genius-Llama3.1.i1-Q4_K_M.gguf
|
||||
sha256: a272bb2a6ab7ed565738733fb8af8e345b177eba9e76ce615ea845c25ebf8cd5
|
||||
uri: huggingface://mradermacher/Genius-Llama3.1-i1-GGUF/Genius-Llama3.1.i1-Q4_K_M.gguf
|
||||
- !!merge <<: *llama31
|
||||
name: "llama3.1-8b-chinese-chat"
|
||||
urls:
|
||||
- https://huggingface.co/shenzhi-wang/Llama3.1-8B-Chinese-Chat
|
||||
- https://huggingface.co/QuantFactory/Llama3.1-8B-Chinese-Chat-GGUF
|
||||
description: |
|
||||
llama3.1-8B-Chinese-Chat is an instruction-tuned language model for Chinese & English users with various abilities such as roleplaying & tool-using built upon the Meta-Llama-3.1-8B-Instruct model. Developers: [Shenzhi Wang](https://shenzhi-wang.netlify.app)*, [Yaowei Zheng](https://github.com/hiyouga)*, Guoyin Wang (in.ai), Shiji Song, Gao Huang. (*: Equal Contribution) - License: [Llama-3.1 License](https://huggingface.co/meta-llama/Meta-Llla...
|
||||
m-3.1-8B/blob/main/LICENSE) - Base Model: Meta-Llama-3.1-8B-Instruct - Model Size: 8.03B - Context length: 128K(reported by [Meta-Llama-3.1-8B-Instruct model](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct), untested for our Chinese model)
|
||||
overrides:
|
||||
parameters:
|
||||
model: Llama3.1-8B-Chinese-Chat.Q4_K_M.gguf
|
||||
files:
|
||||
- filename: Llama3.1-8B-Chinese-Chat.Q4_K_M.gguf
|
||||
sha256: 824847b6cca82c4d60107c6a059d80ba975a68543e6effd98880435436ddba06
|
||||
uri: huggingface://QuantFactory/Llama3.1-8B-Chinese-Chat-GGUF/Llama3.1-8B-Chinese-Chat.Q4_K_M.gguf
|
||||
- !!merge <<: *llama31
|
||||
name: "llama3.1-70b-chinese-chat"
|
||||
urls:
|
||||
- https://huggingface.co/shenzhi-wang/Llama3.1-70B-Chinese-Chat
|
||||
- https://huggingface.co/mradermacher/Llama3.1-70B-Chinese-Chat-GGUF
|
||||
description: |
|
||||
"Llama3.1-70B-Chinese-Chat" is a 70-billion parameter large language model pre-trained on a large corpus of Chinese text data. It is designed for chat and dialog applications, and can generate human-like responses to various prompts and inputs. The model is based on the Llama3.1 architecture and has been fine-tuned for Chinese language understanding and generation. It can be used for a wide range of natural language processing tasks, including language translation, text summarization, question answering, and more.
|
||||
overrides:
|
||||
parameters:
|
||||
model: Llama3.1-70B-Chinese-Chat.Q4_K_M.gguf
|
||||
files:
|
||||
- filename: Llama3.1-70B-Chinese-Chat.Q4_K_M.gguf
|
||||
sha256: 395cff3cce2b092f840b68eb6e31f4c8b670bc8e3854bbb230df8334369e671d
|
||||
uri: huggingface://mradermacher/Llama3.1-70B-Chinese-Chat-GGUF/Llama3.1-70B-Chinese-Chat.Q4_K_M.gguf
|
||||
- !!merge <<: *llama31
|
||||
name: "meta-llama-3.1-instruct-9.99b-brainstorm-10x-form-3"
|
||||
urls:
|
||||
- https://huggingface.co/DavidAU/Meta-Llama-3.1-Instruct-9.99B-BRAINSTORM-10x-FORM-3-GGUF
|
||||
description: |
|
||||
The Meta-Llama-3.1-8B Instruct model is a large language model trained on a diverse range of text data, with the goal of generating high-quality and coherent text in response to user input. This model is enhanced through a process called "Brainstorm", which involves expanding and recalibrating the model's reasoning center to improve its creative and generative capabilities. The resulting model is capable of generating detailed, vivid, and nuanced text, with a focus on prose quality, conceptually complex responses, and a deeper understanding of the user's intent. The Brainstorm process is designed to enhance the model's performance in creative writing, roleplaying, and story generation, and to improve its ability to generate coherent and engaging text in a wide range of contexts. The model is based on the Llama3 architecture and has been fine-tuned using the Instruct framework, which provides it with a strong foundation for understanding natural language instructions and generating appropriate responses. The model can be used for a variety of tasks, including creative writing,Generating coherent and detailed text, exploring different perspectives and scenarios, and brainstorming ideas.
|
||||
overrides:
|
||||
parameters:
|
||||
model: Meta-Llama-3.1-8B-Instruct-Instruct-exp10-3-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: Meta-Llama-3.1-8B-Instruct-Instruct-exp10-3-Q4_K_M.gguf
|
||||
sha256: f52ff984100b1ff6acfbd7ed1df770064118274a54ae5d48749400a662113615
|
||||
uri: huggingface://DavidAU/Meta-Llama-3.1-Instruct-9.99B-BRAINSTORM-10x-FORM-3-GGUF/Meta-Llama-3.1-8B-Instruct-Instruct-exp10-3-Q4_K_M.gguf
|
||||
- !!merge <<: *llama31
|
||||
name: "llama-3.1-techne-rp-8b-v1"
|
||||
icon: https://cdn-uploads.huggingface.co/production/uploads/633a809fa4a8f33508dce32c/BMdwgJ6cHZWbiGL48Q-Wq.png
|
||||
urls:
|
||||
- https://huggingface.co/athirdpath/Llama-3.1-Techne-RP-8b-v1
|
||||
- https://huggingface.co/mradermacher/Llama-3.1-Techne-RP-8b-v1-GGUF
|
||||
description: |
|
||||
athirdpath/Llama-3.1-Instruct_NSFW-pretrained_e1-plus_reddit was further trained in the order below:
|
||||
SFT
|
||||
|
||||
Doctor-Shotgun/no-robots-sharegpt
|
||||
grimulkan/LimaRP-augmented
|
||||
Inv/c2-logs-cleaned-deslopped
|
||||
|
||||
DPO
|
||||
|
||||
jondurbin/truthy-dpo-v0.1
|
||||
Undi95/Weyaxi-humanish-dpo-project-noemoji
|
||||
athirdpath/DPO_Pairs-Roleplay-Llama3-NSFW
|
||||
overrides:
|
||||
parameters:
|
||||
model: Llama-3.1-Techne-RP-8b-v1.Q4_K_M.gguf
|
||||
files:
|
||||
- filename: Llama-3.1-Techne-RP-8b-v1.Q4_K_M.gguf
|
||||
sha256: 6557c5d5091f2507d19ab1f8bfb9ceb4e1536a755ab70f148b18aeb33741580f
|
||||
uri: huggingface://mradermacher/Llama-3.1-Techne-RP-8b-v1-GGUF/Llama-3.1-Techne-RP-8b-v1.Q4_K_M.gguf
|
||||
- !!merge <<: *llama31
|
||||
icon: https://i.ibb.co/9hwFrvL/BLMs-Wkx-NQf-W-46-FZDg-ILhg.jpg
|
||||
name: "llama-spark"
|
||||
urls:
|
||||
- https://huggingface.co/arcee-ai/Llama-Spark
|
||||
- https://huggingface.co/arcee-ai/Llama-Spark-GGUF
|
||||
description: |
|
||||
Llama-Spark is a powerful conversational AI model developed by Arcee.ai. It's built on the foundation of Llama-3.1-8B and merges the power of our Tome Dataset with Llama-3.1-8B-Instruct, resulting in a remarkable conversationalist that punches well above its 8B parameter weight class.
|
||||
overrides:
|
||||
parameters:
|
||||
model: llama-spark-dpo-v0.3-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: llama-spark-dpo-v0.3-Q4_K_M.gguf
|
||||
sha256: 41367168bbdc4b16eb80efcbee4dacc941781ee8748065940167fe6947b4e4c3
|
||||
uri: huggingface://arcee-ai/Llama-Spark-GGUF/llama-spark-dpo-v0.3-Q4_K_M.gguf
|
||||
- !!merge <<: *llama31
|
||||
name: "l3.1-70b-glitz-v0.2-i1"
|
||||
icon: https://cdn-uploads.huggingface.co/production/uploads/634262af8d8089ebaefd410e/q2dOUnzc1GRbZp3YfzGXB.png
|
||||
urls:
|
||||
- https://huggingface.co/Fizzarolli/L3.1-70b-glitz-v0.2
|
||||
- https://huggingface.co/mradermacher/L3.1-70b-glitz-v0.2-i1-GGUF
|
||||
description: |
|
||||
this is an experimental l3.1 70b finetuning run... that crashed midway through. however, the results are still interesting, so i wanted to publish them :3
|
||||
overrides:
|
||||
parameters:
|
||||
model: L3.1-70b-glitz-v0.2.i1-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: L3.1-70b-glitz-v0.2.i1-Q4_K_M.gguf
|
||||
sha256: 585efc83e7f6893043be2487fc09c914a381fb463ce97942ef2f25ae85103bcd
|
||||
uri: huggingface://mradermacher/L3.1-70b-glitz-v0.2-i1-GGUF/L3.1-70b-glitz-v0.2.i1-Q4_K_M.gguf
|
||||
- !!merge <<: *llama31
|
||||
name: "humanish-roleplay-llama-3.1-8b-i1"
|
||||
icon: https://cdn-uploads.huggingface.co/production/uploads/5fad8602b8423e1d80b8a965/VPwtjS3BtjEEEq7ck4kAQ.webp
|
||||
urls:
|
||||
- https://huggingface.co/mradermacher/Humanish-Roleplay-Llama-3.1-8B-i1-GGUF
|
||||
description: |
|
||||
A DPO-tuned Llama-3.1 to behave more "humanish", i.e., avoiding all the AI assistant slop. It also works for role-play (RP). To achieve this, the model was fine-tuned over a series of datasets:
|
||||
General conversations from Claude Opus, from Undi95/Meta-Llama-3.1-8B-Claude
|
||||
Undi95/Weyaxi-humanish-dpo-project-noemoji, to make the model react as a human, rejecting assistant-like or too neutral responses.
|
||||
ResplendentAI/NSFW_RP_Format_DPO, to steer the model towards using the *action* format in RP settings. Works best if in the first message you also use this format naturally (see example)
|
||||
overrides:
|
||||
parameters:
|
||||
model: Humanish-Roleplay-Llama-3.1-8B.i1-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: Humanish-Roleplay-Llama-3.1-8B.i1-Q4_K_M.gguf
|
||||
sha256: 18cf753684e5226b51f3defc708852ca4924f50dc8bc31c9a7d0a036a477b7a7
|
||||
uri: huggingface://mradermacher/Humanish-Roleplay-Llama-3.1-8B-i1-GGUF/Humanish-Roleplay-Llama-3.1-8B.i1-Q4_K_M.gguf
|
||||
## Uncensored models
|
||||
- !!merge <<: *llama31
|
||||
name: "darkidol-llama-3.1-8b-instruct-1.0-uncensored-i1"
|
||||
@@ -173,8 +352,8 @@
|
||||
model: DarkIdol-Llama-3.1-8B-Instruct-1.0-Uncensored.i1-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: DarkIdol-Llama-3.1-8B-Instruct-1.0-Uncensored.i1-Q4_K_M.gguf
|
||||
sha256: 6730efc0628c7534189487b52ed5a358a0a2c3ecb062824eccc8e0444eaa212f
|
||||
uri: huggingface://mradermacher/DarkIdol-Llama-3.1-8B-Instruct-1.0-Uncensored-i1-GGUF/DarkIdol-Llama-3.1-8B-Instruct-1.0-Uncensored.i1-Q4_K_M.gguf
|
||||
sha256: 9632316d735365087f36083dec320a71995650deb86cf74f39ab071e43114eb8
|
||||
- !!merge <<: *llama31
|
||||
name: "darkidol-llama-3.1-8b-instruct-1.1-uncensored-iq-imatrix-request"
|
||||
icon: https://cdn-uploads.huggingface.co/production/uploads/65d4cf2693a0a3744a27536c/iDV5GTVJbjkvMp1set-ZC.png
|
||||
@@ -237,6 +416,59 @@
|
||||
- filename: Lumimaid-v0.2-8B.Q4_K_M.gguf
|
||||
sha256: c8024fcb49c71410903d0d076a1048249fa48b31637bac5177bf5c3f3d603d85
|
||||
uri: huggingface://mradermacher/Lumimaid-v0.2-8B-GGUF/Lumimaid-v0.2-8B.Q4_K_M.gguf
|
||||
- !!merge <<: *llama31
|
||||
name: "lumimaid-v0.2-70b-i1"
|
||||
icon: https://cdn-uploads.huggingface.co/production/uploads/63ab1241ad514ca8d1430003/HY1KTq6FMAm-CwmY8-ndO.png
|
||||
urls:
|
||||
- https://huggingface.co/NeverSleep/Lumimaid-v0.2-70B
|
||||
- https://huggingface.co/mradermacher/Lumimaid-v0.2-70B-i1-GGUF
|
||||
description: |
|
||||
This model is based on: Meta-Llama-3.1-8B-Instruct
|
||||
|
||||
Wandb: https://wandb.ai/undis95/Lumi-Llama-3-1-8B?nw=nwuserundis95
|
||||
|
||||
Lumimaid 0.1 -> 0.2 is a HUGE step up dataset wise.
|
||||
|
||||
As some people have told us our models are sloppy, Ikari decided to say fuck it and literally nuke all chats out with most slop.
|
||||
|
||||
Our dataset stayed the same since day one, we added data over time, cleaned them, and repeat. After not releasing model for a while because we were never satisfied, we think it's time to come back!
|
||||
overrides:
|
||||
parameters:
|
||||
model: Lumimaid-v0.2-70B.i1-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: Lumimaid-v0.2-70B.i1-Q4_K_M.gguf
|
||||
sha256: 4857da8685cb0f3d2b8b8c91fb0c07b35b863eb7c185e93ed83ac338e095cbb5
|
||||
uri: huggingface://mradermacher/Lumimaid-v0.2-70B-i1-GGUF/Lumimaid-v0.2-70B.i1-Q4_K_M.gguf
|
||||
- !!merge <<: *llama31
|
||||
name: "l3.1-8b-celeste-v1.5"
|
||||
icon: https://cdn-uploads.huggingface.co/production/uploads/630cf5d14ca0a22768bbe10c/QcU3xEgVu18jeFtMFxIw-.webp
|
||||
urls:
|
||||
- https://huggingface.co/nothingiisreal/L3.1-8B-Celeste-V1.5
|
||||
- https://huggingface.co/bartowski/L3.1-8B-Celeste-V1.5-GGUF
|
||||
description: |
|
||||
The LLM model is a large language model trained on a combination of datasets including nothingiisreal/c2-logs-cleaned, kalomaze/Opus_Instruct_25k, and nothingiisreal/Reddit-Dirty-And-WritingPrompts. The training was performed on a combination of English-language data using the Hugging Face Transformers library.
|
||||
Trained on LLaMA 3.1 8B Instruct at 8K context using a new mix of Reddit Writing Prompts, Kalo's Opus 25K Instruct and c2 logs cleaned This version has the highest coherency and is very strong on OOC: instruct following.
|
||||
overrides:
|
||||
parameters:
|
||||
model: L3.1-8B-Celeste-V1.5-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: L3.1-8B-Celeste-V1.5-Q4_K_M.gguf
|
||||
sha256: a408dfbbd91ed5561f70d3129af040dfd06704d6c7fa21146aa9f09714aafbc6
|
||||
uri: huggingface://bartowski/L3.1-8B-Celeste-V1.5-GGUF/L3.1-8B-Celeste-V1.5-Q4_K_M.gguf
|
||||
- !!merge <<: *llama31
|
||||
icon: https://cdn-uploads.huggingface.co/production/uploads/659c4ecb413a1376bee2f661/szz8sIxofYzSe5XPet2pO.png
|
||||
name: "kumiho-v1-rp-uwu-8b"
|
||||
urls:
|
||||
- https://huggingface.co/juvi21/Kumiho-v1-rp-UwU-8B-GGUF
|
||||
description: |
|
||||
Meet Kumiho-V1 uwu. Kumiho-V1-rp-UwU aims to be a generalist model with specialization in roleplay and writing capabilities. It is finetuned and merged with various models, with a heavy base of Meta's LLaMA 3.1-8B as base model, and Claude 3.5 Sonnet and Claude 3 Opus generated synthetic data.
|
||||
overrides:
|
||||
parameters:
|
||||
model: Kumiho-v1-rp-UwU-8B-gguf-q4_k_m.gguf
|
||||
files:
|
||||
- filename: Kumiho-v1-rp-UwU-8B-gguf-q4_k_m.gguf
|
||||
sha256: a1deb46675418277cf785a406cd1508fec556ff6e4d45d2231eb2a82986d52d0
|
||||
uri: huggingface://juvi21/Kumiho-v1-rp-UwU-8B-GGUF/Kumiho-v1-rp-UwU-8B-gguf-q4_k_m.gguf
|
||||
- &deepseek
|
||||
## Deepseek
|
||||
url: "github:mudler/LocalAI/gallery/deepseek.yaml@master"
|
||||
@@ -515,6 +747,40 @@
|
||||
- filename: StellarDong-72b.i1-Q4_K_M.gguf
|
||||
sha256: 4c5012f0a034f40a044904891343ade2594f29c28a8a9d8052916de4dc5a61df
|
||||
uri: huggingface://mradermacher/StellarDong-72b-i1-GGUF/StellarDong-72b.i1-Q4_K_M.gguf
|
||||
- !!merge <<: *qwen2
|
||||
name: "magnum-32b-v1-i1"
|
||||
icon: https://cdn-uploads.huggingface.co/production/uploads/635567189c72a7e742f1419c/PK7xRSd18Du0bX-w_t-9c.png
|
||||
urls:
|
||||
- https://huggingface.co/anthracite-org/magnum-32b-v1
|
||||
- https://huggingface.co/mradermacher/magnum-32b-v1-i1-GGUF
|
||||
description: |
|
||||
This is the second in a series of models designed to replicate the prose quality of the Claude 3 models, specifically Sonnet and Opus. This model is fine-tuned on top of Qwen1.5 32B.
|
||||
overrides:
|
||||
parameters:
|
||||
model: magnum-32b-v1.i1-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: magnum-32b-v1.i1-Q4_K_M.gguf
|
||||
sha256: a31704ce0d7e5b774f155522b9ab7ef6015a4ece4e9056bf4dfc6cac561ff0a3
|
||||
uri: huggingface://mradermacher/magnum-32b-v1-i1-GGUF/magnum-32b-v1.i1-Q4_K_M.gguf
|
||||
- !!merge <<: *qwen2
|
||||
name: "tifa-7b-qwen2-v0.1"
|
||||
urls:
|
||||
- https://huggingface.co/Tifa-RP/Tifa-7B-Qwen2-v0.1-GGUF
|
||||
description: |
|
||||
The Tifa role-playing language model is a high-performance language model based on a self-developed 220B model distillation, with a new base model of qwen2-7B. The model has been converted to gguf format for running in the Ollama framework, providing excellent dialogue and text generation capabilities.
|
||||
|
||||
The original model was trained on a large-scale industrial dataset and then fine-tuned with 400GB of novel data and 20GB of multi-round dialogue directive data to achieve good role-playing effects.
|
||||
|
||||
The Tifa model is suitable for multi-round dialogue processing, role-playing and scenario simulation, EFX industrial knowledge integration, and high-quality literary creation.
|
||||
|
||||
Note: The Tifa model is in Chinese and English, with 7.6% of the data in Chinese role-playing and 4.2% in English role-playing. The model has been trained with a mix of EFX industrial field parameters and question-answer dialogues generated from 220B model outputs since 2023. The recommended quantization method is f16, as it retains more detail and accuracy in the model's performance.
|
||||
overrides:
|
||||
parameters:
|
||||
model: tifa-7b-qwen2-v0.1.q4_k_m.gguf
|
||||
files:
|
||||
- filename: tifa-7b-qwen2-v0.1.q4_k_m.gguf
|
||||
sha256: 1f5adbe8cb0a6400f51abdca3bf4e32284ebff73cc681a43abb35c0a6ccd3820
|
||||
uri: huggingface://Tifa-RP/Tifa-7B-Qwen2-v0.1-GGUF/tifa-7b-qwen2-v0.1.q4_k_m.gguf
|
||||
- &mistral03
|
||||
## START Mistral
|
||||
url: "github:mudler/LocalAI/gallery/mistral-0.3.yaml@master"
|
||||
@@ -637,6 +903,26 @@
|
||||
- filename: lumimaid-v0.2-12b-q4_k_m.gguf
|
||||
sha256: f72299858a07e52be920b86d42ddcfcd5008b961d601ef6fd6a98a3377adccbf
|
||||
uri: huggingface://mudler/Lumimaid-v0.2-12B-Q4_K_M-GGUF/lumimaid-v0.2-12b-q4_k_m.gguf
|
||||
- !!merge <<: *mistral03
|
||||
url: "github:mudler/LocalAI/gallery/chatml.yaml@master"
|
||||
name: "mn-12b-celeste-v1.9"
|
||||
icon: https://cdn-uploads.huggingface.co/production/uploads/630cf5d14ca0a22768bbe10c/QcU3xEgVu18jeFtMFxIw-.webp
|
||||
urls:
|
||||
- https://huggingface.co/nothingiisreal/MN-12B-Celeste-V1.9
|
||||
- https://huggingface.co/mradermacher/MN-12B-Celeste-V1.9-GGUF
|
||||
description: |
|
||||
Mistral Nemo 12B Celeste V1.9
|
||||
|
||||
This is a story writing and roleplaying model trained on Mistral NeMo 12B Instruct at 8K context using Reddit Writing Prompts, Kalo's Opus 25K Instruct and c2 logs cleaned
|
||||
|
||||
This version has improved NSFW, smarter and more active narration. It's also trained with ChatML tokens so there should be no EOS bleeding whatsoever.
|
||||
overrides:
|
||||
parameters:
|
||||
model: MN-12B-Celeste-V1.9.Q4_K_M.gguf
|
||||
files:
|
||||
- filename: MN-12B-Celeste-V1.9.Q4_K_M.gguf
|
||||
sha256: 019daeaa63d82d55d1ea623b9c255deea6793af4044bb4994d2b4d09e8959f7b
|
||||
uri: huggingface://mradermacher/MN-12B-Celeste-V1.9-GGUF/MN-12B-Celeste-V1.9.Q4_K_M.gguf
|
||||
- &mudler
|
||||
### START mudler's LocalAI specific-models
|
||||
url: "github:mudler/LocalAI/gallery/mudler.yaml@master"
|
||||
@@ -1052,6 +1338,114 @@
|
||||
- filename: Gemmoy-9B-G2-MK.3.i1-Q4_K_M.gguf
|
||||
sha256: 0d1004a246fbda7f1408a6841129b73c4100e697bd0a6806fc698eabbb0802a1
|
||||
uri: huggingface://mradermacher/Gemmoy-9B-G2-MK.3-i1-GGUF/Gemmoy-9B-G2-MK.3.i1-Q4_K_M.gguf
|
||||
- !!merge <<: *gemma
|
||||
name: "sunfall-simpo-9b"
|
||||
urls:
|
||||
- https://huggingface.co/mradermacher/sunfall-SimPO-9B-GGUF
|
||||
description: |
|
||||
Crazy idea that what if you put the LoRA from crestf411/sunfall-peft on top of princeton-nlp/gemma-2-9b-it-SimPO and therefore this exists solely for that purpose alone in the universe.
|
||||
overrides:
|
||||
parameters:
|
||||
model: sunfall-SimPO-9B.Q4_K_M.gguf
|
||||
files:
|
||||
- filename: sunfall-SimPO-9B.Q4_K_M.gguf
|
||||
sha256: 810c51c6ce34107706d921531b97cfa409cd53c215d18b88bce7cdb617f73ceb
|
||||
uri: huggingface://mradermacher/sunfall-SimPO-9B-GGUF/sunfall-SimPO-9B.Q4_K_M.gguf
|
||||
- !!merge <<: *gemma
|
||||
name: "sunfall-simpo-9b-i1"
|
||||
urls:
|
||||
- https://huggingface.co/mradermacher/sunfall-SimPO-9B-i1-GGUF
|
||||
description: |
|
||||
Crazy idea that what if you put the LoRA from crestf411/sunfall-peft on top of princeton-nlp/gemma-2-9b-it-SimPO and therefore this exists solely for that purpose alone in the universe.
|
||||
overrides:
|
||||
parameters:
|
||||
model: sunfall-SimPO-9B.i1-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: sunfall-SimPO-9B.i1-Q4_K_M.gguf
|
||||
sha256: edde9df372a9a5b2316dc6822dc2f52f5a2059103dd7f08072e5a5355c5f5d0b
|
||||
uri: huggingface://mradermacher/sunfall-SimPO-9B-i1-GGUF/sunfall-SimPO-9B.i1-Q4_K_M.gguf
|
||||
- !!merge <<: *gemma
|
||||
name: "seeker-9b"
|
||||
icon: https://huggingface.co/lodrick-the-lafted/seeker-9b/resolve/main/seeker.webp
|
||||
urls:
|
||||
- https://huggingface.co/lodrick-the-lafted/seeker-9b
|
||||
- https://huggingface.co/mradermacher/seeker-9b-GGUF
|
||||
description: |
|
||||
The LLM model is the "Seeker-9b" model, which is a large language model trained on a diverse range of text data. It has 9 billion parameters and is based on the "lodrick-the-lafted" repository. The model is capable of generating text and can be used for a variety of natural language processing tasks such as language translation, text summarization, and text generation. It supports the English language and is available under the Apache-2.0 license.
|
||||
overrides:
|
||||
parameters:
|
||||
model: seeker-9b.Q4_K_M.gguf
|
||||
files:
|
||||
- filename: seeker-9b.Q4_K_M.gguf
|
||||
sha256: 7658e5bdad96dc8d232f83cff7c3fe5fa993defbfd3e728dcc7436352574a00a
|
||||
uri: huggingface://mradermacher/seeker-9b-GGUF/seeker-9b.Q4_K_M.gguf
|
||||
- !!merge <<: *gemma
|
||||
name: "gemmasutra-pro-27b-v1"
|
||||
icon: https://cdn-uploads.huggingface.co/production/uploads/65f2fd1c25b848bd061b5c2e/w0Oi8TReoQNT3ljm5Wf6c.webp
|
||||
urls:
|
||||
- https://huggingface.co/TheDrummer/Gemmasutra-Pro-27B-v1
|
||||
- https://huggingface.co/mradermacher/Gemmasutra-Pro-27B-v1-GGUF
|
||||
description: |
|
||||
An RP model with impressive flexibility. Finetuned by yours truly.
|
||||
overrides:
|
||||
parameters:
|
||||
model: Gemmasutra-Pro-27B-v1.Q4_K_M.gguf
|
||||
files:
|
||||
- filename: Gemmasutra-Pro-27B-v1.Q4_K_M.gguf
|
||||
sha256: 336a2fbf142849fcc20e432123433807b6c7b09988652ef583a63636a0f90218
|
||||
uri: huggingface://mradermacher/Gemmasutra-Pro-27B-v1-GGUF/Gemmasutra-Pro-27B-v1.Q4_K_M.gguf
|
||||
- !!merge <<: *gemma
|
||||
name: "gemmasutra-mini-2b-v1"
|
||||
icon: https://cdn-uploads.huggingface.co/production/uploads/65f2fd1c25b848bd061b5c2e/w0Oi8TReoQNT3ljm5Wf6c.webp
|
||||
urls:
|
||||
- https://huggingface.co/TheDrummer/Gemmasutra-Mini-2B-v1-GGUF
|
||||
description: |
|
||||
It is a small, 2 billion parameter language model that has been trained for role-playing purposes. The model is designed to work well in various settings, such as in the browser, on a laptop, or even on a Raspberry Pi. It has been fine-tuned for RP use and claims to provide a satisfying experience, even in low-resource environments. The model is uncensored and unaligned, and it can be used with the Gemma Instruct template or with chat completion. For the best experience, it is recommended to modify the template to support the `system` role. The model also features examples of its output, highlighting its versatility and creativity.
|
||||
overrides:
|
||||
parameters:
|
||||
model: Gemmasutra-Mini-2B-v1i-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: Gemmasutra-Mini-2B-v1i-Q4_K_M.gguf
|
||||
sha256: 29ba3db911fbadef4452ba757ddd9ce58fb892b7a872f19eefd0743c961797fb
|
||||
uri: huggingface://TheDrummer/Gemmasutra-Mini-2B-v1-GGUF/Gemmasutra-Mini-2B-v1i-Q4_K_M.gguf
|
||||
- !!merge <<: *gemma
|
||||
name: "tarnished-9b-i1"
|
||||
icon: https://huggingface.co/lodrick-the-lafted/tarnished-9b/resolve/main/nox.jpg
|
||||
urls:
|
||||
- https://huggingface.co/lodrick-the-lafted/tarnished-9b
|
||||
- https://huggingface.co/mradermacher/tarnished-9b-i1-GGUF
|
||||
description: |
|
||||
Ah, so you've heard whispers on the winds, have you? 🧐
|
||||
|
||||
Imagine this:
|
||||
Tarnished-9b, a name that echoes with the rasp of coin-hungry merchants and the clatter of forgotten machinery. This LLM speaks with the voice of those who straddle the line between worlds, who've tasted the bittersweet nectar of eldritch power and the tang of the Interdimensional Trade Council.
|
||||
|
||||
It's a tongue that dances with secrets, a whisperer of lore lost and found. Its words may guide you through the twisting paths of history, revealing truths hidden beneath layers of dust and time.
|
||||
|
||||
But be warned, Tarnished One! For knowledge comes at a price. The LLM's gaze can pierce the veil of reality, but it can also lure you into the labyrinthine depths of madness.
|
||||
|
||||
Dare you tread this path?
|
||||
overrides:
|
||||
parameters:
|
||||
model: tarnished-9b.i1-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: tarnished-9b.i1-Q4_K_M.gguf
|
||||
sha256: 62ab09124b3f6698bd94ef966533ae5d427d87f6bdc09f6f46917def96420a0c
|
||||
uri: huggingface://mradermacher/tarnished-9b-i1-GGUF/tarnished-9b.i1-Q4_K_M.gguf
|
||||
- !!merge <<: *gemma
|
||||
name: "shieldgemma-9b-i1"
|
||||
urls:
|
||||
- https://huggingface.co/google/shieldgemma-9b
|
||||
- https://huggingface.co/mradermacher/shieldgemma-9b-i1-GGUF
|
||||
description: |
|
||||
ShieldGemma is a series of safety content moderation models built upon Gemma 2 that target four harm categories (sexually explicit, dangerous content, hate, and harassment). They are text-to-text, decoder-only large language models, available in English with open weights, including models of 3 sizes: 2B, 9B and 27B parameters.
|
||||
overrides:
|
||||
parameters:
|
||||
model: shieldgemma-9b.i1-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: shieldgemma-9b.i1-Q4_K_M.gguf
|
||||
sha256: ffa7eaadcc0c7d0544fda5b0d86bba3ffa3431b673e5b2135f421cfe65bd8732
|
||||
uri: huggingface://mradermacher/shieldgemma-9b-i1-GGUF/shieldgemma-9b.i1-Q4_K_M.gguf
|
||||
- &llama3
|
||||
url: "github:mudler/LocalAI/gallery/llama3-instruct.yaml@master"
|
||||
icon: https://cdn-uploads.huggingface.co/production/uploads/642cc1c253e76b4c2286c58e/aJJxKus1wP5N-euvHEUq7.png
|
||||
@@ -2182,6 +2576,81 @@
|
||||
- filename: L3-Stheno-Maid-Blackroot-Grand-HORROR-16B-D_AU-Q4_K_M.gguf
|
||||
sha256: ae29f38d73dfb04415821405cf8b319fc42d78d0cdd0da91db147d12e68030fe
|
||||
uri: huggingface://DavidAU/L3-Stheno-Maid-Blackroot-Grand-HORROR-16B-GGUF/L3-Stheno-Maid-Blackroot-Grand-HORROR-16B-D_AU-Q4_K_M.gguf
|
||||
- !!merge <<: *llama3
|
||||
name: "meta-llama-3-instruct-12.2b-brainstorm-20x-form-8"
|
||||
urls:
|
||||
- https://huggingface.co/DavidAU/Meta-Llama-3-Instruct-12.2B-BRAINSTORM-20x-FORM-8-GGUF
|
||||
description: |
|
||||
Meta-Llama-3-8B Instruct (now at 12.2B) with Brainstorm process that increases its performance at the core level for any creative use case. It has calibrations that allow it to exceed the logic solving abilities of the original model. The Brainstorm process expands the reasoning center of the LLM, reassembles and calibrates it, introducing subtle changes into the reasoning process. This enhances the model's detail, concept, connection to the "world", general concept connections, prose quality, and prose length without affecting instruction following. It improves coherence, description, simile, metaphors, emotional engagement, and takes fewer liberties with instructions while following them more closely. The model's performance is further enhanced by other technologies like "Ultra" (precision), "Neo Imatrix" (custom imatrix datasets), and "X-quants" (custom application of the imatrix process). It has been tested on multiple LLaMA2, LLaMA3, and Mistral models of various parameter sizes.
|
||||
overrides:
|
||||
parameters:
|
||||
model: Meta-Llama-3-8B-Instruct-exp20-8-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: Meta-Llama-3-8B-Instruct-exp20-8-Q4_K_M.gguf
|
||||
sha256: 5568ab6195ab5da703f728cc118108ddcbe97255e3ba4a543b531acdf082b999
|
||||
uri: huggingface://DavidAU/Meta-Llama-3-Instruct-12.2B-BRAINSTORM-20x-FORM-8-GGUF/Meta-Llama-3-8B-Instruct-exp20-8-Q4_K_M.gguf
|
||||
- !!merge <<: *llama3
|
||||
name: "loki-base-i1"
|
||||
urls:
|
||||
- https://huggingface.co/MrRobotoAI/Loki-base
|
||||
- https://huggingface.co/mradermacher/Loki-base-i1-GGUF
|
||||
description: |
|
||||
Merge of several models using mergekit:
|
||||
- model: abacusai/Llama-3-Smaug-8B
|
||||
- model: Aculi/Llama3-Sophie
|
||||
- model: ajibawa-2023/Uncensored-Frank-Llama-3-8B
|
||||
- model: Blackroot/Llama-3-Gamma-Twist
|
||||
- model: Casual-Autopsy/L3-Super-Nova-RP-8B
|
||||
- model: Casual-Autopsy/L3-Umbral-Mind-RP-v3.0-8B
|
||||
- model: cgato/L3-TheSpice-8b-v0.8.3
|
||||
- model: ChaoticNeutrals/Hathor_Respawn-L3-8B-v0.8
|
||||
- model: ChaoticNeutrals/Hathor_RP-v.01-L3-8B
|
||||
- model: chargoddard/prometheus-2-llama-3-8b
|
||||
- model: chujiezheng/Llama-3-Instruct-8B-SimPO-ExPO
|
||||
- model: chujiezheng/LLaMA3-iterative-DPO-final-ExPO
|
||||
- model: Fizzarolli/L3-8b-Rosier-v1
|
||||
- model: flammenai/Mahou-1.2a-llama3-8B
|
||||
- model: HaitameLaf/Llama-3-8B-StoryGenerator
|
||||
- model: HPAI-BSC/Llama3-Aloe-8B-Alpha
|
||||
- model: iRyanBell/ARC1
|
||||
- model: iRyanBell/ARC1-II
|
||||
- model: lemon07r/Llama-3-RedMagic4-8B
|
||||
- model: lemon07r/Lllama-3-RedElixir-8B
|
||||
- model: Locutusque/Llama-3-Hercules-5.0-8B
|
||||
- model: Magpie-Align/Llama-3-8B-Magpie-Pro-MT-SFT-v0.1
|
||||
- model: maldv/badger-lambda-llama-3-8b
|
||||
- model: maldv/badger-mu-llama-3-8b
|
||||
- model: maldv/badger-writer-llama-3-8b
|
||||
- model: mlabonne/NeuralDaredevil-8B-abliterated
|
||||
- model: MrRobotoAI/Fiction-Writer-6
|
||||
- model: MrRobotoAI/Unholy-Thoth-8B-v2
|
||||
- model: nbeerbower/llama-3-spicy-abliterated-stella-8B
|
||||
- model: NeverSleep/Llama-3-Lumimaid-8B-v0.1
|
||||
- model: NeverSleep/Llama-3-Lumimaid-8B-v0.1-OAS
|
||||
- model: Nitral-AI/Hathor_Sofit-L3-8B-v1
|
||||
- model: Nitral-AI/Hathor_Stable-v0.2-L3-8B
|
||||
- model: Nitral-AI/Hathor_Tahsin-L3-8B-v0.85
|
||||
- model: Nitral-AI/Poppy_Porpoise-0.72-L3-8B
|
||||
- model: nothingiisreal/L3-8B-Instruct-Abliterated-DWP
|
||||
- model: nothingiisreal/L3-8B-Stheno-Horny-v3.3-32K
|
||||
- model: NousResearch/Hermes-2-Theta-Llama-3-8B
|
||||
- model: OwenArli/Awanllm-Llama-3-8B-Cumulus-v1.0
|
||||
- model: refuelai/Llama-3-Refueled
|
||||
- model: ResplendentAI/Nymph_8B
|
||||
- model: shauray/Llama3-8B-DPO-uncensored
|
||||
- model: SicariusSicariiStuff/LLAMA-3_8B_Unaligned_Alpha
|
||||
- model: TIGER-Lab/MAmmoTH2-8B-Plus
|
||||
- model: Undi95/Llama-3-LewdPlay-8B
|
||||
- model: Undi95/Meta-Llama-3-8B-hf
|
||||
- model: VAGOsolutions/Llama-3-SauerkrautLM-8b-Instruct
|
||||
- model: WhiteRabbitNeo/Llama-3-WhiteRabbitNeo-8B-v2.0
|
||||
overrides:
|
||||
parameters:
|
||||
model: Loki-base.i1-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: Loki-base.i1-Q4_K_M.gguf
|
||||
sha256: 60a4357fa399bfd18aa841cc529da09439791331d117a4f06f0467d002b385bb
|
||||
uri: huggingface://mradermacher/Loki-base-i1-GGUF/Loki-base.i1-Q4_K_M.gguf
|
||||
- &dolphin
|
||||
name: "dolphin-2.9-llama3-8b"
|
||||
url: "github:mudler/LocalAI/gallery/hermes-2-pro-mistral.yaml@master"
|
||||
@@ -3602,6 +4071,19 @@
|
||||
- filename: calme-2.4-llama3-70b.Q4_K_M.gguf
|
||||
sha256: 0b44ac8a88395dfc60f1b9d3cfffc0ffef74ec0a302e610ef91fc787187568f2
|
||||
uri: huggingface://mradermacher/calme-2.4-llama3-70b-GGUF/calme-2.4-llama3-70b.Q4_K_M.gguf
|
||||
- !!merge <<: *llama3
|
||||
name: "meta-llama-3-instruct-8.9b-brainstorm-5x-form-11"
|
||||
urls:
|
||||
- https://huggingface.co/DavidAU/Meta-Llama-3-Instruct-8.9B-BRAINSTORM-5x-FORM-11-GGUF
|
||||
description: |
|
||||
Meta-Llama-3-8B Instruct (now at 8.9B) is an enhanced version of the LLM model, specifically designed for creative use cases such as story writing, roleplaying, and fiction. This model has been augmented through the "Brainstorm" process, which involves expanding and calibrating the reasoning center of the LLM to improve its performance in various creative tasks. The enhancements brought by this process include more detailed and nuanced descriptions, stronger prose, and a greater sense of immersion in the story. The model is capable of generating long and vivid content, with fewer clichés and more focused, coherent narratives. Users can provide more instructions and details to elicit stronger and more engaging responses from the model. The "Brainstorm" process has been tested on multiple LLM models, including Llama2, Llama3, and Mistral, as well as on individual models like Llama3 Instruct, Mistral Instruct, and custom fine-tuned models.
|
||||
overrides:
|
||||
parameters:
|
||||
model: Meta-Llama-3-8B-Instruct-exp5-11-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: Meta-Llama-3-8B-Instruct-exp5-11-Q4_K_M.gguf
|
||||
sha256: 5dd81b8b809667d10036499affdd1461cf95af50b405cbc9f800b421a4b60e98
|
||||
uri: huggingface://DavidAU/Meta-Llama-3-Instruct-8.9B-BRAINSTORM-5x-FORM-11-GGUF/Meta-Llama-3-8B-Instruct-exp5-11-Q4_K_M.gguf
|
||||
- &command-R
|
||||
### START Command-r
|
||||
url: "github:mudler/LocalAI/gallery/command-r.yaml@master"
|
||||
@@ -4170,6 +4652,28 @@
|
||||
- filename: "Codestral-22B-v0.1-Q4_K_M.gguf"
|
||||
uri: "huggingface://bartowski/Codestral-22B-v0.1-GGUF/Codestral-22B-v0.1-Q4_K_M.gguf"
|
||||
sha256: 003e48ed892850b80994fcddca2bd6b833b092a4ef2db2853c33a3144245e06c
|
||||
- !!merge <<: *codellama
|
||||
url: "github:mudler/LocalAI/gallery/alpaca.yaml@master"
|
||||
icon: https://huggingface.co/Nan-Do/LeetCodeWizard_7B_V1.1/resolve/main/LeetCodeWizardLogo.png
|
||||
name: "leetcodewizard_7b_v1.1-i1"
|
||||
urls:
|
||||
- https://huggingface.co/Nan-Do/LeetCodeWizard_7B_V1.1
|
||||
- https://huggingface.co/mradermacher/LeetCodeWizard_7B_V1.1-i1-GGUF
|
||||
description: |
|
||||
LeetCodeWizard is a coding large language model specifically trained to solve and explain Leetcode (or any) programming problems.
|
||||
This model is a fine-tuned version of the WizardCoder-Python-7B with a dataset of Leetcode problems\
|
||||
Model capabilities:
|
||||
|
||||
It should be able to solve most of the problems found at Leetcode and even pass the sample interviews they offer on the site.
|
||||
|
||||
It can write both the code and the explanations for the solutions.
|
||||
overrides:
|
||||
parameters:
|
||||
model: LeetCodeWizard_7B_V1.1.i1-Q4_K_M.gguf
|
||||
files:
|
||||
- filename: LeetCodeWizard_7B_V1.1.i1-Q4_K_M.gguf
|
||||
sha256: 19720d8e1ba89d32c6f88ed6518caf0251f9e3ec011297929c801efc5ea979f4
|
||||
uri: huggingface://mradermacher/LeetCodeWizard_7B_V1.1-i1-GGUF/LeetCodeWizard_7B_V1.1.i1-Q4_K_M.gguf
|
||||
- &llm-compiler
|
||||
url: "github:mudler/LocalAI/gallery/codellama.yaml@master"
|
||||
name: "llm-compiler-13b-imat"
|
||||
|
||||
49
pkg/downloader/huggingface.go
Normal file
49
pkg/downloader/huggingface.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package downloader
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type HuggingFaceScanResult struct {
|
||||
RepositoryId string `json:"repositoryId"`
|
||||
Revision string `json:"revision"`
|
||||
HasUnsafeFiles bool `json:"hasUnsafeFile"`
|
||||
ClamAVInfectedFiles []string `json:"clamAVInfectedFiles"`
|
||||
DangerousPickles []string `json:"dangerousPickles"`
|
||||
ScansDone bool `json:"scansDone"`
|
||||
}
|
||||
|
||||
var ErrNonHuggingFaceFile = errors.New("not a huggingface repo")
|
||||
var ErrUnsafeFilesFound = errors.New("unsafe files found")
|
||||
|
||||
func HuggingFaceScan(uri URI) (*HuggingFaceScanResult, error) {
|
||||
cleanParts := strings.Split(uri.ResolveURL(), "/")
|
||||
if len(cleanParts) <= 4 || cleanParts[2] != "huggingface.co" {
|
||||
return nil, ErrNonHuggingFaceFile
|
||||
}
|
||||
results, err := http.Get(fmt.Sprintf("https://huggingface.co/api/models/%s/%s/scan", cleanParts[3], cleanParts[4]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if results.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("unexpected status code during HuggingFaceScan: %d", results.StatusCode)
|
||||
}
|
||||
scanResult := &HuggingFaceScanResult{}
|
||||
bodyBytes, err := io.ReadAll(results.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = json.Unmarshal(bodyBytes, scanResult)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if scanResult.HasUnsafeFiles {
|
||||
return scanResult, ErrUnsafeFilesFound
|
||||
}
|
||||
return scanResult, nil
|
||||
}
|
||||
@@ -2,12 +2,10 @@ package downloader
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
@@ -28,13 +26,16 @@ const (
|
||||
HTTPSPrefix = "https://"
|
||||
GithubURI = "github:"
|
||||
GithubURI2 = "github://"
|
||||
LocalPrefix = "file://"
|
||||
)
|
||||
|
||||
func DownloadAndUnmarshal(url string, basePath string, f func(url string, i []byte) error) error {
|
||||
url = ConvertURL(url)
|
||||
type URI string
|
||||
|
||||
if strings.HasPrefix(url, "file://") {
|
||||
rawURL := strings.TrimPrefix(url, "file://")
|
||||
func (uri URI) DownloadAndUnmarshal(basePath string, f func(url string, i []byte) error) error {
|
||||
url := uri.ResolveURL()
|
||||
|
||||
if strings.HasPrefix(url, LocalPrefix) {
|
||||
rawURL := strings.TrimPrefix(url, LocalPrefix)
|
||||
// checks if the file is symbolic, and resolve if so - otherwise, this function returns the path unmodified.
|
||||
resolvedFile, err := filepath.EvalSymlinks(rawURL)
|
||||
if err != nil {
|
||||
@@ -78,24 +79,54 @@ func DownloadAndUnmarshal(url string, basePath string, f func(url string, i []by
|
||||
return f(url, body)
|
||||
}
|
||||
|
||||
func LooksLikeURL(s string) bool {
|
||||
return strings.HasPrefix(s, HTTPPrefix) ||
|
||||
strings.HasPrefix(s, HTTPSPrefix) ||
|
||||
strings.HasPrefix(s, HuggingFacePrefix) ||
|
||||
strings.HasPrefix(s, GithubURI) ||
|
||||
strings.HasPrefix(s, OllamaPrefix) ||
|
||||
strings.HasPrefix(s, OCIPrefix) ||
|
||||
strings.HasPrefix(s, GithubURI2)
|
||||
func (u URI) FilenameFromUrl() (string, error) {
|
||||
f, err := filenameFromUrl(string(u))
|
||||
if err != nil || f == "" {
|
||||
f = utils.MD5(string(u))
|
||||
if strings.HasSuffix(string(u), ".yaml") || strings.HasSuffix(string(u), ".yml") {
|
||||
f = f + ".yaml"
|
||||
}
|
||||
err = nil
|
||||
}
|
||||
|
||||
return f, err
|
||||
}
|
||||
|
||||
func LooksLikeOCI(s string) bool {
|
||||
return strings.HasPrefix(s, OCIPrefix) || strings.HasPrefix(s, OllamaPrefix)
|
||||
func filenameFromUrl(urlstr string) (string, error) {
|
||||
// strip anything after @
|
||||
if strings.Contains(urlstr, "@") {
|
||||
urlstr = strings.Split(urlstr, "@")[0]
|
||||
}
|
||||
|
||||
u, err := url.Parse(urlstr)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error due to parsing url: %w", err)
|
||||
}
|
||||
x, err := url.QueryUnescape(u.EscapedPath())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error due to escaping: %w", err)
|
||||
}
|
||||
return filepath.Base(x), nil
|
||||
}
|
||||
|
||||
func ConvertURL(s string) string {
|
||||
func (u URI) LooksLikeURL() bool {
|
||||
return strings.HasPrefix(string(u), HTTPPrefix) ||
|
||||
strings.HasPrefix(string(u), HTTPSPrefix) ||
|
||||
strings.HasPrefix(string(u), HuggingFacePrefix) ||
|
||||
strings.HasPrefix(string(u), GithubURI) ||
|
||||
strings.HasPrefix(string(u), OllamaPrefix) ||
|
||||
strings.HasPrefix(string(u), OCIPrefix) ||
|
||||
strings.HasPrefix(string(u), GithubURI2)
|
||||
}
|
||||
|
||||
func (s URI) LooksLikeOCI() bool {
|
||||
return strings.HasPrefix(string(s), OCIPrefix) || strings.HasPrefix(string(s), OllamaPrefix)
|
||||
}
|
||||
|
||||
func (s URI) ResolveURL() string {
|
||||
switch {
|
||||
case strings.HasPrefix(s, GithubURI2):
|
||||
repository := strings.Replace(s, GithubURI2, "", 1)
|
||||
case strings.HasPrefix(string(s), GithubURI2):
|
||||
repository := strings.Replace(string(s), GithubURI2, "", 1)
|
||||
|
||||
repoParts := strings.Split(repository, "@")
|
||||
branch := "main"
|
||||
@@ -110,8 +141,8 @@ func ConvertURL(s string) string {
|
||||
projectPath := strings.Join(repoPath[2:], "/")
|
||||
|
||||
return fmt.Sprintf("https://raw.githubusercontent.com/%s/%s/%s/%s", org, project, branch, projectPath)
|
||||
case strings.HasPrefix(s, GithubURI):
|
||||
parts := strings.Split(s, ":")
|
||||
case strings.HasPrefix(string(s), GithubURI):
|
||||
parts := strings.Split(string(s), ":")
|
||||
repoParts := strings.Split(parts[1], "@")
|
||||
branch := "main"
|
||||
|
||||
@@ -125,8 +156,8 @@ func ConvertURL(s string) string {
|
||||
projectPath := strings.Join(repoPath[2:], "/")
|
||||
|
||||
return fmt.Sprintf("https://raw.githubusercontent.com/%s/%s/%s/%s", org, project, branch, projectPath)
|
||||
case strings.HasPrefix(s, HuggingFacePrefix):
|
||||
repository := strings.Replace(s, HuggingFacePrefix, "", 1)
|
||||
case strings.HasPrefix(string(s), HuggingFacePrefix):
|
||||
repository := strings.Replace(string(s), HuggingFacePrefix, "", 1)
|
||||
// convert repository to a full URL.
|
||||
// e.g. TheBloke/Mixtral-8x7B-v0.1-GGUF/mixtral-8x7b-v0.1.Q2_K.gguf@main -> https://huggingface.co/TheBloke/Mixtral-8x7B-v0.1-GGUF/resolve/main/mixtral-8x7b-v0.1.Q2_K.gguf
|
||||
owner := strings.Split(repository, "/")[0]
|
||||
@@ -144,7 +175,7 @@ func ConvertURL(s string) string {
|
||||
return fmt.Sprintf("https://huggingface.co/%s/%s/resolve/%s/%s", owner, repo, branch, filepath)
|
||||
}
|
||||
|
||||
return s
|
||||
return string(s)
|
||||
}
|
||||
|
||||
func removePartialFile(tmpFilePath string) error {
|
||||
@@ -161,9 +192,9 @@ func removePartialFile(tmpFilePath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func DownloadFile(url string, filePath, sha string, fileN, total int, downloadStatus func(string, string, string, float64)) error {
|
||||
url = ConvertURL(url)
|
||||
if LooksLikeOCI(url) {
|
||||
func (uri URI) DownloadFile(filePath, sha string, fileN, total int, downloadStatus func(string, string, string, float64)) error {
|
||||
url := uri.ResolveURL()
|
||||
if uri.LooksLikeOCI() {
|
||||
progressStatus := func(desc ocispec.Descriptor) io.Writer {
|
||||
return &progressWriter{
|
||||
fileName: filePath,
|
||||
@@ -298,37 +329,6 @@ func DownloadFile(url string, filePath, sha string, fileN, total int, downloadSt
|
||||
return nil
|
||||
}
|
||||
|
||||
// this function check if the string is an URL, if it's an URL downloads the image in memory
|
||||
// encodes it in base64 and returns the base64 string
|
||||
func GetBase64Image(s string) (string, error) {
|
||||
if strings.HasPrefix(s, "http") {
|
||||
// download the image
|
||||
resp, err := http.Get(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// read the image data into memory
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// encode the image data in base64
|
||||
encoded := base64.StdEncoding.EncodeToString(data)
|
||||
|
||||
// return the base64 string
|
||||
return encoded, nil
|
||||
}
|
||||
|
||||
// if the string instead is prefixed with "data:image/jpeg;base64,", drop it
|
||||
if strings.HasPrefix(s, "data:image/jpeg;base64,") {
|
||||
return strings.ReplaceAll(s, "data:image/jpeg;base64,", ""), nil
|
||||
}
|
||||
return "", fmt.Errorf("not valid string")
|
||||
}
|
||||
|
||||
func formatBytes(bytes int64) string {
|
||||
const unit = 1024
|
||||
if bytes < unit {
|
||||
@@ -356,42 +356,3 @@ func calculateSHA(filePath string) (string, error) {
|
||||
|
||||
return fmt.Sprintf("%x", hash.Sum(nil)), nil
|
||||
}
|
||||
|
||||
type HuggingFaceScanResult struct {
|
||||
RepositoryId string `json:"repositoryId"`
|
||||
Revision string `json:"revision"`
|
||||
HasUnsafeFiles bool `json:"hasUnsafeFile"`
|
||||
ClamAVInfectedFiles []string `json:"clamAVInfectedFiles"`
|
||||
DangerousPickles []string `json:"dangerousPickles"`
|
||||
ScansDone bool `json:"scansDone"`
|
||||
}
|
||||
|
||||
var ErrNonHuggingFaceFile = errors.New("not a huggingface repo")
|
||||
var ErrUnsafeFilesFound = errors.New("unsafe files found")
|
||||
|
||||
func HuggingFaceScan(uri string) (*HuggingFaceScanResult, error) {
|
||||
cleanParts := strings.Split(ConvertURL(uri), "/")
|
||||
if len(cleanParts) <= 4 || cleanParts[2] != "huggingface.co" {
|
||||
return nil, ErrNonHuggingFaceFile
|
||||
}
|
||||
results, err := http.Get(fmt.Sprintf("https://huggingface.co/api/models/%s/%s/scan", cleanParts[3], cleanParts[4]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if results.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("unexpected status code during HuggingFaceScan: %d", results.StatusCode)
|
||||
}
|
||||
scanResult := &HuggingFaceScanResult{}
|
||||
bodyBytes, err := io.ReadAll(results.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = json.Unmarshal(bodyBytes, scanResult)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if scanResult.HasUnsafeFiles {
|
||||
return scanResult, ErrUnsafeFilesFound
|
||||
}
|
||||
return scanResult, nil
|
||||
}
|
||||
|
||||
@@ -9,24 +9,28 @@ import (
|
||||
var _ = Describe("Gallery API tests", func() {
|
||||
Context("URI", func() {
|
||||
It("parses github with a branch", func() {
|
||||
uri := URI("github:go-skynet/model-gallery/gpt4all-j.yaml")
|
||||
Expect(
|
||||
DownloadAndUnmarshal("github:go-skynet/model-gallery/gpt4all-j.yaml", "", func(url string, i []byte) error {
|
||||
uri.DownloadAndUnmarshal("", func(url string, i []byte) error {
|
||||
Expect(url).To(Equal("https://raw.githubusercontent.com/go-skynet/model-gallery/main/gpt4all-j.yaml"))
|
||||
return nil
|
||||
}),
|
||||
).ToNot(HaveOccurred())
|
||||
})
|
||||
It("parses github without a branch", func() {
|
||||
uri := URI("github:go-skynet/model-gallery/gpt4all-j.yaml@main")
|
||||
|
||||
Expect(
|
||||
DownloadAndUnmarshal("github:go-skynet/model-gallery/gpt4all-j.yaml@main", "", func(url string, i []byte) error {
|
||||
uri.DownloadAndUnmarshal("", func(url string, i []byte) error {
|
||||
Expect(url).To(Equal("https://raw.githubusercontent.com/go-skynet/model-gallery/main/gpt4all-j.yaml"))
|
||||
return nil
|
||||
}),
|
||||
).ToNot(HaveOccurred())
|
||||
})
|
||||
It("parses github with urls", func() {
|
||||
uri := URI("https://raw.githubusercontent.com/go-skynet/model-gallery/main/gpt4all-j.yaml")
|
||||
Expect(
|
||||
DownloadAndUnmarshal("https://raw.githubusercontent.com/go-skynet/model-gallery/main/gpt4all-j.yaml", "", func(url string, i []byte) error {
|
||||
uri.DownloadAndUnmarshal("", func(url string, i []byte) error {
|
||||
Expect(url).To(Equal("https://raw.githubusercontent.com/go-skynet/model-gallery/main/gpt4all-j.yaml"))
|
||||
return nil
|
||||
}),
|
||||
|
||||
@@ -3,7 +3,6 @@ package startup
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -23,21 +22,21 @@ func InstallModels(galleries []config.Gallery, modelLibraryURL string, modelPath
|
||||
// create an error that groups all errors
|
||||
var err error
|
||||
|
||||
for _, url := range models {
|
||||
lib, _ := embedded.GetRemoteLibraryShorteners(modelLibraryURL, modelPath)
|
||||
|
||||
for _, url := range models {
|
||||
// As a best effort, try to resolve the model from the remote library
|
||||
// if it's not resolved we try with the other method below
|
||||
if modelLibraryURL != "" {
|
||||
lib, err := embedded.GetRemoteLibraryShorteners(modelLibraryURL, modelPath)
|
||||
if err == nil {
|
||||
if lib[url] != "" {
|
||||
log.Debug().Msgf("[startup] model configuration is defined remotely: %s (%s)", url, lib[url])
|
||||
url = lib[url]
|
||||
}
|
||||
if lib[url] != "" {
|
||||
log.Debug().Msgf("[startup] model configuration is defined remotely: %s (%s)", url, lib[url])
|
||||
url = lib[url]
|
||||
}
|
||||
}
|
||||
|
||||
url = embedded.ModelShortURL(url)
|
||||
uri := downloader.URI(url)
|
||||
|
||||
switch {
|
||||
case embedded.ExistsInModelsLibrary(url):
|
||||
modelYAML, e := embedded.ResolveContent(url)
|
||||
@@ -55,7 +54,7 @@ func InstallModels(galleries []config.Gallery, modelLibraryURL string, modelPath
|
||||
log.Error().Err(e).Str("filepath", modelDefinitionFilePath).Msg("error writing model definition")
|
||||
err = errors.Join(err, e)
|
||||
}
|
||||
case downloader.LooksLikeOCI(url):
|
||||
case uri.LooksLikeOCI():
|
||||
log.Debug().Msgf("[startup] resolved OCI model to download: %s", url)
|
||||
|
||||
// convert OCI image name to a file name.
|
||||
@@ -67,7 +66,7 @@ func InstallModels(galleries []config.Gallery, modelLibraryURL string, modelPath
|
||||
// check if file exists
|
||||
if _, e := os.Stat(filepath.Join(modelPath, ociName)); errors.Is(e, os.ErrNotExist) {
|
||||
modelDefinitionFilePath := filepath.Join(modelPath, ociName)
|
||||
e := downloader.DownloadFile(url, modelDefinitionFilePath, "", 0, 0, func(fileName, current, total string, percent float64) {
|
||||
e := uri.DownloadFile(modelDefinitionFilePath, "", 0, 0, func(fileName, current, total string, percent float64) {
|
||||
utils.DisplayDownloadFunction(fileName, current, total, percent)
|
||||
})
|
||||
if e != nil {
|
||||
@@ -77,19 +76,15 @@ func InstallModels(galleries []config.Gallery, modelLibraryURL string, modelPath
|
||||
}
|
||||
|
||||
log.Info().Msgf("[startup] installed model from OCI repository: %s", ociName)
|
||||
case downloader.LooksLikeURL(url):
|
||||
case uri.LooksLikeURL():
|
||||
log.Debug().Msgf("[startup] downloading %s", url)
|
||||
|
||||
// Extract filename from URL
|
||||
fileName, e := filenameFromUrl(url)
|
||||
if e != nil || fileName == "" {
|
||||
fileName = utils.MD5(url)
|
||||
if strings.HasSuffix(url, ".yaml") || strings.HasSuffix(url, ".yml") {
|
||||
fileName = fileName + ".yaml"
|
||||
}
|
||||
fileName, e := uri.FilenameFromUrl()
|
||||
if e != nil {
|
||||
log.Warn().Err(e).Str("url", url).Msg("error extracting filename from URL")
|
||||
//err = errors.Join(err, e)
|
||||
//continue
|
||||
err = errors.Join(err, e)
|
||||
continue
|
||||
}
|
||||
|
||||
modelPath := filepath.Join(modelPath, fileName)
|
||||
@@ -102,7 +97,7 @@ func InstallModels(galleries []config.Gallery, modelLibraryURL string, modelPath
|
||||
|
||||
// check if file exists
|
||||
if _, e := os.Stat(modelPath); errors.Is(e, os.ErrNotExist) {
|
||||
e := downloader.DownloadFile(url, modelPath, "", 0, 0, func(fileName, current, total string, percent float64) {
|
||||
e := uri.DownloadFile(modelPath, "", 0, 0, func(fileName, current, total string, percent float64) {
|
||||
utils.DisplayDownloadFunction(fileName, current, total, percent)
|
||||
})
|
||||
if e != nil {
|
||||
@@ -167,20 +162,3 @@ func installModel(galleries []config.Gallery, modelName, modelPath string, downl
|
||||
|
||||
return nil, true
|
||||
}
|
||||
|
||||
func filenameFromUrl(urlstr string) (string, error) {
|
||||
// strip anything after @
|
||||
if strings.Contains(urlstr, "@") {
|
||||
urlstr = strings.Split(urlstr, "@")[0]
|
||||
}
|
||||
|
||||
u, err := url.Parse(urlstr)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error due to parsing url: %w", err)
|
||||
}
|
||||
x, err := url.QueryUnescape(u.EscapedPath())
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error due to escaping: %w", err)
|
||||
}
|
||||
return filepath.Base(x), nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user