mirror of
https://github.com/mudler/LocalAI.git
synced 2026-02-03 03:02:38 -05:00
Compare commits
1 Commits
v2.16.0
...
functions_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ac47aeaddd |
5
.env
5
.env
@@ -71,11 +71,6 @@
|
||||
### Define the number of parallel LLAMA.cpp workers (Defaults to 1)
|
||||
# LLAMACPP_PARALLEL=1
|
||||
|
||||
### Define a list of GRPC Servers for llama-cpp workers to distribute the load
|
||||
# https://github.com/ggerganov/llama.cpp/pull/6829
|
||||
# https://github.com/ggerganov/llama.cpp/blob/master/examples/rpc/README.md
|
||||
# LLAMACPP_GRPC_SERVERS=""
|
||||
|
||||
### Enable to run parallel requests
|
||||
# LOCALAI_PARALLEL_REQUESTS=true
|
||||
|
||||
|
||||
33
.github/checksum_checker.sh
vendored
33
.github/checksum_checker.sh
vendored
@@ -16,7 +16,7 @@ function check_and_update_checksum() {
|
||||
# Download the file and calculate new checksum using Python
|
||||
new_checksum=$(python3 -c "
|
||||
import hashlib
|
||||
from huggingface_hub import hf_hub_download, get_paths_info
|
||||
from huggingface_hub import hf_hub_download
|
||||
import requests
|
||||
import sys
|
||||
import os
|
||||
@@ -46,24 +46,13 @@ def calculate_sha256(file_path):
|
||||
|
||||
download_type, repo_id_or_url = parse_uri(uri)
|
||||
|
||||
new_checksum = None
|
||||
|
||||
# Decide download method based on URI type
|
||||
if download_type == 'huggingface':
|
||||
# Use HF API to pull sha
|
||||
for file in get_paths_info(repo_id_or_url, [file_name], repo_type='model'):
|
||||
try:
|
||||
new_checksum = file.lfs.sha256
|
||||
break
|
||||
except Exception as e:
|
||||
print(f'Error from Hugging Face Hub: {str(e)}', file=sys.stderr)
|
||||
sys.exit(2)
|
||||
if new_checksum is None:
|
||||
try:
|
||||
file_path = hf_hub_download(repo_id=repo_id_or_url, filename=file_name)
|
||||
except Exception as e:
|
||||
print(f'Error from Hugging Face Hub: {str(e)}', file=sys.stderr)
|
||||
sys.exit(2)
|
||||
try:
|
||||
file_path = hf_hub_download(repo_id=repo_id_or_url, filename=file_name)
|
||||
except Exception as e:
|
||||
print(f'Error from Hugging Face Hub: {str(e)}', file=sys.stderr)
|
||||
sys.exit(2)
|
||||
else:
|
||||
response = requests.get(repo_id_or_url)
|
||||
if response.status_code == 200:
|
||||
@@ -77,13 +66,9 @@ else:
|
||||
print(f'Error downloading file: {response.status_code}', file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
if new_checksum is None:
|
||||
new_checksum = calculate_sha256(file_path)
|
||||
print(new_checksum)
|
||||
os.remove(file_path)
|
||||
else:
|
||||
print(new_checksum)
|
||||
|
||||
print(calculate_sha256(file_path))
|
||||
# Clean up the downloaded file
|
||||
os.remove(file_path)
|
||||
")
|
||||
|
||||
if [[ "$new_checksum" == "" ]]; then
|
||||
|
||||
2
.github/workflows/generate_grpc_cache.yaml
vendored
2
.github/workflows/generate_grpc_cache.yaml
vendored
@@ -84,7 +84,7 @@ jobs:
|
||||
build-args: |
|
||||
GRPC_BASE_IMAGE=${{ matrix.grpc-base-image }}
|
||||
GRPC_MAKEFLAGS=--jobs=4 --output-sync=target
|
||||
GRPC_VERSION=v1.64.0
|
||||
GRPC_VERSION=v1.63.0
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
cache-to: type=gha,ignore-error=true
|
||||
|
||||
59
.github/workflows/generate_intel_image.yaml
vendored
59
.github/workflows/generate_intel_image.yaml
vendored
@@ -1,59 +0,0 @@
|
||||
name: 'generate and publish intel docker caches'
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
concurrency:
|
||||
group: intel-cache-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
generate_caches:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- base-image: intel/oneapi-basekit:2024.1.0-devel-ubuntu22.04
|
||||
runs-on: 'ubuntu-latest'
|
||||
platforms: 'linux/amd64'
|
||||
runs-on: ${{matrix.runs-on}}
|
||||
steps:
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@master
|
||||
with:
|
||||
platforms: all
|
||||
- name: Login to DockerHub
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Login to quay
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||
password: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@master
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Cache Intel images
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
builder: ${{ steps.buildx.outputs.name }}
|
||||
build-args: |
|
||||
BASE_IMAGE=${{ matrix.base-image }}
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
tags: quay.io/go-skynet/intel-oneapi-base:latest
|
||||
push: true
|
||||
target: intel
|
||||
platforms: ${{ matrix.platforms }}
|
||||
4
.github/workflows/image-pr.yml
vendored
4
.github/workflows/image-pr.yml
vendored
@@ -68,7 +68,7 @@ jobs:
|
||||
- build-type: 'sycl_f16'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
base-image: "intel/oneapi-basekit:2024.1.0-devel-ubuntu22.04"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
tag-suffix: 'sycl-f16-ffmpeg'
|
||||
ffmpeg: 'true'
|
||||
@@ -110,7 +110,7 @@ jobs:
|
||||
- build-type: 'sycl_f16'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
base-image: "intel/oneapi-basekit:2024.1.0-devel-ubuntu22.04"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
tag-suffix: 'sycl-f16-ffmpeg-core'
|
||||
ffmpeg: 'true'
|
||||
|
||||
12
.github/workflows/image.yml
vendored
12
.github/workflows/image.yml
vendored
@@ -148,7 +148,7 @@ jobs:
|
||||
- build-type: 'sycl_f16'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
base-image: "intel/oneapi-basekit:2024.1.0-devel-ubuntu22.04"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
tag-suffix: '-sycl-f16-ffmpeg'
|
||||
ffmpeg: 'true'
|
||||
@@ -161,7 +161,7 @@ jobs:
|
||||
- build-type: 'sycl_f32'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
base-image: "intel/oneapi-basekit:2024.1.0-devel-ubuntu22.04"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
tag-suffix: '-sycl-f32-ffmpeg'
|
||||
ffmpeg: 'true'
|
||||
@@ -175,7 +175,7 @@ jobs:
|
||||
- build-type: 'sycl_f16'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
base-image: "intel/oneapi-basekit:2024.1.0-devel-ubuntu22.04"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
tag-suffix: '-sycl-f16-core'
|
||||
ffmpeg: 'false'
|
||||
@@ -185,7 +185,7 @@ jobs:
|
||||
- build-type: 'sycl_f32'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
base-image: "intel/oneapi-basekit:2024.1.0-devel-ubuntu22.04"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
tag-suffix: '-sycl-f32-core'
|
||||
ffmpeg: 'false'
|
||||
@@ -195,7 +195,7 @@ jobs:
|
||||
- build-type: 'sycl_f16'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
base-image: "intel/oneapi-basekit:2024.1.0-devel-ubuntu22.04"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
tag-suffix: '-sycl-f16-ffmpeg-core'
|
||||
ffmpeg: 'true'
|
||||
@@ -205,7 +205,7 @@ jobs:
|
||||
- build-type: 'sycl_f32'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
|
||||
base-image: "intel/oneapi-basekit:2024.1.0-devel-ubuntu22.04"
|
||||
grpc-base-image: "ubuntu:22.04"
|
||||
tag-suffix: '-sycl-f32-ffmpeg-core'
|
||||
ffmpeg: 'true'
|
||||
|
||||
3
.github/workflows/image_build.yml
vendored
3
.github/workflows/image_build.yml
vendored
@@ -174,6 +174,7 @@ jobs:
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{raw}}
|
||||
flavor: |
|
||||
latest=${{ inputs.tag-latest }}
|
||||
suffix=${{ inputs.aio }}
|
||||
|
||||
- name: Set up QEMU
|
||||
@@ -217,7 +218,7 @@ jobs:
|
||||
BASE_IMAGE=${{ inputs.base-image }}
|
||||
GRPC_BASE_IMAGE=${{ inputs.grpc-base-image || inputs.base-image }}
|
||||
GRPC_MAKEFLAGS=--jobs=4 --output-sync=target
|
||||
GRPC_VERSION=v1.64.0
|
||||
GRPC_VERSION=v1.63.0
|
||||
MAKEFLAGS=${{ inputs.makeflags }}
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
|
||||
12
.github/workflows/release.yaml
vendored
12
.github/workflows/release.yaml
vendored
@@ -5,7 +5,7 @@ on:
|
||||
- pull_request
|
||||
|
||||
env:
|
||||
GRPC_VERSION: v1.64.0
|
||||
GRPC_VERSION: v1.63.0
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
@@ -29,7 +29,7 @@ jobs:
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential ffmpeg protobuf-compiler ccache
|
||||
sudo apt-get install build-essential ffmpeg protobuf-compiler
|
||||
- name: Install CUDA Dependencies
|
||||
run: |
|
||||
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb
|
||||
@@ -61,7 +61,7 @@ jobs:
|
||||
go install google.golang.org/protobuf/cmd/protoc-gen-go@latest
|
||||
export PATH=$PATH:$GOPATH/bin
|
||||
export PATH=/usr/local/cuda/bin:$PATH
|
||||
GO_TAGS=p2p make dist
|
||||
make dist
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: LocalAI-linux
|
||||
@@ -86,7 +86,7 @@ jobs:
|
||||
cache: false
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get install -y --no-install-recommends libopencv-dev protobuf-compiler ccache
|
||||
sudo apt-get install -y --no-install-recommends libopencv-dev protobuf-compiler
|
||||
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest
|
||||
go install google.golang.org/protobuf/cmd/protoc-gen-go@latest
|
||||
- name: Build stablediffusion
|
||||
@@ -94,8 +94,6 @@ jobs:
|
||||
export PATH=$PATH:$GOPATH/bin
|
||||
make backend-assets/grpc/stablediffusion
|
||||
mkdir -p release && cp backend-assets/grpc/stablediffusion release
|
||||
env:
|
||||
GO_TAGS: stablediffusion
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: stablediffusion
|
||||
@@ -123,7 +121,7 @@ jobs:
|
||||
export C_INCLUDE_PATH=/usr/local/include
|
||||
export CPLUS_INCLUDE_PATH=/usr/local/include
|
||||
export PATH=$PATH:$GOPATH/bin
|
||||
GO_TAGS=p2p make dist
|
||||
make dist
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: LocalAI-MacOS-arm64
|
||||
|
||||
44
.github/workflows/test-extra.yml
vendored
44
.github/workflows/test-extra.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
sudo apt-get install -y libopencv-dev
|
||||
pip install --user grpcio-tools==1.64.0
|
||||
pip install --user grpcio-tools==1.63.0
|
||||
|
||||
- name: Test transformers
|
||||
run: |
|
||||
@@ -51,7 +51,7 @@ jobs:
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
sudo apt-get install -y libopencv-dev
|
||||
pip install --user grpcio-tools==1.64.0
|
||||
pip install --user grpcio-tools==1.63.0
|
||||
|
||||
- name: Test sentencetransformers
|
||||
run: |
|
||||
@@ -74,7 +74,7 @@ jobs:
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
sudo apt-get install -y libopencv-dev
|
||||
pip install --user grpcio-tools==1.64.0
|
||||
pip install --user grpcio-tools==1.63.0
|
||||
|
||||
- name: Test rerankers
|
||||
run: |
|
||||
@@ -96,7 +96,7 @@ jobs:
|
||||
sudo apt-get install -y libopencv-dev
|
||||
# Install UV
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
pip install --user grpcio-tools==1.64.0
|
||||
pip install --user grpcio-tools==1.63.0
|
||||
- name: Test diffusers
|
||||
run: |
|
||||
make --jobs=5 --output-sync=target -C backend/python/diffusers
|
||||
@@ -117,34 +117,12 @@ jobs:
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
sudo apt-get install -y libopencv-dev
|
||||
pip install --user grpcio-tools==1.64.0
|
||||
pip install --user grpcio-tools==1.63.0
|
||||
|
||||
- name: Test parler-tts
|
||||
run: |
|
||||
make --jobs=5 --output-sync=target -C backend/python/parler-tts
|
||||
make --jobs=5 --output-sync=target -C backend/python/parler-tts test
|
||||
|
||||
tests-openvoice:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential ffmpeg
|
||||
# Install UV
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
sudo apt-get install -y libopencv-dev
|
||||
pip install --user grpcio-tools==1.64.0
|
||||
|
||||
- name: Test openvoice
|
||||
run: |
|
||||
make --jobs=5 --output-sync=target -C backend/python/openvoice
|
||||
make --jobs=5 --output-sync=target -C backend/python/openvoice test
|
||||
|
||||
tests-transformers-musicgen:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -161,7 +139,7 @@ jobs:
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
sudo apt-get install -y libopencv-dev
|
||||
pip install --user grpcio-tools==1.64.0
|
||||
pip install --user grpcio-tools==1.63.0
|
||||
|
||||
- name: Test transformers-musicgen
|
||||
run: |
|
||||
@@ -185,7 +163,7 @@ jobs:
|
||||
# curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
# sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
# sudo apt-get install -y libopencv-dev
|
||||
# pip install --user grpcio-tools==1.64.0
|
||||
# pip install --user grpcio-tools==1.63.0
|
||||
|
||||
# - name: Test petals
|
||||
# run: |
|
||||
@@ -249,7 +227,7 @@ jobs:
|
||||
# curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
# sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
# sudo apt-get install -y libopencv-dev
|
||||
# pip install --user grpcio-tools==1.64.0
|
||||
# pip install --user grpcio-tools==1.63.0
|
||||
|
||||
# - name: Test bark
|
||||
# run: |
|
||||
@@ -274,7 +252,7 @@ jobs:
|
||||
# curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
# sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
# sudo apt-get install -y libopencv-dev
|
||||
# pip install --user grpcio-tools==1.64.0
|
||||
# pip install --user grpcio-tools==1.63.0
|
||||
# - name: Test vllm
|
||||
# run: |
|
||||
# make --jobs=5 --output-sync=target -C backend/python/vllm
|
||||
@@ -294,7 +272,7 @@ jobs:
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
|
||||
sudo apt-get install -y libopencv-dev
|
||||
pip install --user grpcio-tools==1.64.0
|
||||
pip install --user grpcio-tools==1.63.0
|
||||
- name: Test vall-e-x
|
||||
run: |
|
||||
make --jobs=5 --output-sync=target -C backend/python/vall-e-x
|
||||
@@ -314,7 +292,7 @@ jobs:
|
||||
sudo apt-get install -y ca-certificates cmake curl patch espeak espeak-ng python3-pip
|
||||
# Install UV
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
pip install --user grpcio-tools==1.64.0
|
||||
pip install --user grpcio-tools==1.63.0
|
||||
- name: Test coqui
|
||||
run: |
|
||||
make --jobs=5 --output-sync=target -C backend/python/coqui
|
||||
|
||||
4
.github/workflows/test.yml
vendored
4
.github/workflows/test.yml
vendored
@@ -10,7 +10,7 @@ on:
|
||||
- '*'
|
||||
|
||||
env:
|
||||
GRPC_VERSION: v1.64.0
|
||||
GRPC_VERSION: v1.63.0
|
||||
|
||||
concurrency:
|
||||
group: ci-tests-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
@@ -213,7 +213,7 @@ jobs:
|
||||
- name: Dependencies
|
||||
run: |
|
||||
brew install protobuf grpc make protoc-gen-go protoc-gen-go-grpc
|
||||
pip install --user grpcio-tools==1.64.0
|
||||
pip install --user grpcio-tools==1.63.0
|
||||
- name: Test
|
||||
run: |
|
||||
export C_INCLUDE_PATH=/usr/local/include
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -6,9 +6,6 @@ get-sources
|
||||
prepare-sources
|
||||
/backend/cpp/llama/grpc-server
|
||||
/backend/cpp/llama/llama.cpp
|
||||
/backend/cpp/llama-*
|
||||
|
||||
*.log
|
||||
|
||||
go-ggml-transformers
|
||||
go-gpt2
|
||||
@@ -52,4 +49,4 @@ prepare
|
||||
.scannerwork
|
||||
|
||||
# backend virtual environments
|
||||
**/venv
|
||||
**/venv
|
||||
21
Dockerfile
21
Dockerfile
@@ -1,7 +1,6 @@
|
||||
ARG IMAGE_TYPE=extras
|
||||
ARG BASE_IMAGE=ubuntu:22.04
|
||||
ARG GRPC_BASE_IMAGE=${BASE_IMAGE}
|
||||
ARG INTEL_BASE_IMAGE=${BASE_IMAGE}
|
||||
|
||||
# The requirements-core target is common to all images. It should not be placed in requirements-core unless every single build will use it.
|
||||
FROM ${BASE_IMAGE} AS requirements-core
|
||||
@@ -13,13 +12,13 @@ ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV EXTERNAL_GRPC_BACKENDS="coqui:/build/backend/python/coqui/run.sh,huggingface-embeddings:/build/backend/python/sentencetransformers/run.sh,petals:/build/backend/python/petals/run.sh,transformers:/build/backend/python/transformers/run.sh,sentencetransformers:/build/backend/python/sentencetransformers/run.sh,rerankers:/build/backend/python/rerankers/run.sh,autogptq:/build/backend/python/autogptq/run.sh,bark:/build/backend/python/bark/run.sh,diffusers:/build/backend/python/diffusers/run.sh,exllama:/build/backend/python/exllama/run.sh,openvoice:/build/backend/python/openvoice/run.sh,vall-e-x:/build/backend/python/vall-e-x/run.sh,vllm:/build/backend/python/vllm/run.sh,mamba:/build/backend/python/mamba/run.sh,exllama2:/build/backend/python/exllama2/run.sh,transformers-musicgen:/build/backend/python/transformers-musicgen/run.sh,parler-tts:/build/backend/python/parler-tts/run.sh"
|
||||
ENV EXTERNAL_GRPC_BACKENDS="coqui:/build/backend/python/coqui/run.sh,huggingface-embeddings:/build/backend/python/sentencetransformers/run.sh,petals:/build/backend/python/petals/run.sh,transformers:/build/backend/python/transformers/run.sh,sentencetransformers:/build/backend/python/sentencetransformers/run.sh,rerankers:/build/backend/python/rerankers/run.sh,autogptq:/build/backend/python/autogptq/run.sh,bark:/build/backend/python/bark/run.sh,diffusers:/build/backend/python/diffusers/run.sh,exllama:/build/backend/python/exllama/run.sh,vall-e-x:/build/backend/python/vall-e-x/run.sh,vllm:/build/backend/python/vllm/run.sh,mamba:/build/backend/python/mamba/run.sh,exllama2:/build/backend/python/exllama2/run.sh,transformers-musicgen:/build/backend/python/transformers-musicgen/run.sh,parler-tts:/build/backend/python/parler-tts/run.sh"
|
||||
|
||||
ARG GO_TAGS="stablediffusion tinydream tts"
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
ccache \
|
||||
ca-certificates \
|
||||
cmake \
|
||||
curl \
|
||||
@@ -146,17 +145,6 @@ RUN if [ "${BUILD_TYPE}" = "hipblas" ]; then \
|
||||
###################################
|
||||
###################################
|
||||
|
||||
# Temporary workaround for Intel's repository to work correctly
|
||||
# https://community.intel.com/t5/Intel-oneAPI-Math-Kernel-Library/APT-Repository-not-working-signatures-invalid/m-p/1599436/highlight/true#M36143
|
||||
# This is a temporary workaround until Intel fixes their repository
|
||||
FROM ${INTEL_BASE_IMAGE} AS intel
|
||||
RUN wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | \
|
||||
gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
|
||||
RUN echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy/lts/2350 unified" > /etc/apt/sources.list.d/intel-graphics.list
|
||||
|
||||
###################################
|
||||
###################################
|
||||
|
||||
# The grpc target does one thing, it builds and installs GRPC. This is in it's own layer so that it can be effectively cached by CI.
|
||||
# You probably don't need to change anything here, and if you do, make sure that CI is adjusted so that the cache continues to work.
|
||||
FROM ${GRPC_BASE_IMAGE} AS grpc
|
||||
@@ -196,7 +184,7 @@ RUN git clone --recurse-submodules --jobs 4 -b ${GRPC_VERSION} --depth 1 --shall
|
||||
# Adjustments to the build process should likely be made here.
|
||||
FROM requirements-drivers AS builder
|
||||
|
||||
ARG GO_TAGS="stablediffusion tts p2p"
|
||||
ARG GO_TAGS="stablediffusion tts"
|
||||
ARG GRPC_BACKENDS
|
||||
ARG MAKEFLAGS
|
||||
|
||||
@@ -317,9 +305,6 @@ RUN if [[ ( "${EXTRA_BACKENDS}" =~ "coqui" || -z "${EXTRA_BACKENDS}" ) && "$IMAG
|
||||
RUN if [[ ( "${EXTRA_BACKENDS}" =~ "vall-e-x" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
||||
make -C backend/python/vall-e-x \
|
||||
; fi && \
|
||||
if [[ ( "${EXTRA_BACKENDS}" =~ "openvoice" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
||||
make -C backend/python/openvoice \
|
||||
; fi && \
|
||||
if [[ ( "${EXTRA_BACKENDS}" =~ "petals" || -z "${EXTRA_BACKENDS}" ) && "$IMAGE_TYPE" == "extras" ]]; then \
|
||||
make -C backend/python/petals \
|
||||
; fi && \
|
||||
|
||||
40
Makefile
40
Makefile
@@ -5,7 +5,7 @@ BINARY_NAME=local-ai
|
||||
|
||||
# llama.cpp versions
|
||||
GOLLAMA_STABLE_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be
|
||||
CPPLLAMA_VERSION?=74f33adf5f8b20b08fc5a6aa17ce081abe86ef2f
|
||||
CPPLLAMA_VERSION?=dc685be46622a8fabfd57cfa804237c8f15679b8
|
||||
|
||||
# gpt4all version
|
||||
GPT4ALL_REPO?=https://github.com/nomic-ai/gpt4all
|
||||
@@ -16,10 +16,10 @@ RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp
|
||||
RWKV_VERSION?=661e7ae26d442f5cfebd2a0881b44e8c55949ec6
|
||||
|
||||
# whisper.cpp version
|
||||
WHISPER_CPP_VERSION?=22d46b7ba4620e2db1281e210d0186863cffcec0
|
||||
WHISPER_CPP_VERSION?=4ef8d9f44eb402c528ab6d990ab50a9f4f666347
|
||||
|
||||
# bert.cpp version
|
||||
BERT_VERSION?=710044b124545415f555e4260d16b146c725a6e4
|
||||
BERT_VERSION?=6abe312cded14042f6b7c3cd8edf082713334a4d
|
||||
|
||||
# go-piper version
|
||||
PIPER_VERSION?=9d0100873a7dbb0824dfea40e8cec70a1b110759
|
||||
@@ -158,8 +158,6 @@ ALL_GRPC_BACKENDS+=backend-assets/grpc/llama-cpp-avx
|
||||
ALL_GRPC_BACKENDS+=backend-assets/grpc/llama-cpp-avx2
|
||||
ALL_GRPC_BACKENDS+=backend-assets/grpc/llama-cpp-fallback
|
||||
ALL_GRPC_BACKENDS+=backend-assets/grpc/llama-ggml
|
||||
ALL_GRPC_BACKENDS+=backend-assets/grpc/llama-cpp-grpc
|
||||
ALL_GRPC_BACKENDS+=backend-assets/util/llama-cpp-rpc-server
|
||||
ALL_GRPC_BACKENDS+=backend-assets/grpc/gpt4all
|
||||
ALL_GRPC_BACKENDS+=backend-assets/grpc/rwkv
|
||||
ALL_GRPC_BACKENDS+=backend-assets/grpc/whisper
|
||||
@@ -316,7 +314,7 @@ build: prepare backend-assets grpcs ## Build the project
|
||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o $(BINARY_NAME) ./
|
||||
|
||||
build-minimal:
|
||||
BUILD_GRPC_FOR_BACKEND_LLAMA=true GRPC_BACKENDS="backend-assets/grpc/llama-cpp-avx2" GO_TAGS=none $(MAKE) build
|
||||
BUILD_GRPC_FOR_BACKEND_LLAMA=true GRPC_BACKENDS="backend-assets/grpc/llama-cpp" GO_TAGS=none $(MAKE) build
|
||||
|
||||
build-api:
|
||||
BUILD_GRPC_FOR_BACKEND_LLAMA=true BUILD_API_ONLY=true GO_TAGS=none $(MAKE) build
|
||||
@@ -333,10 +331,8 @@ endif
|
||||
# if BUILD_ID is empty, then we don't append it to the binary name
|
||||
ifeq ($(BUILD_ID),)
|
||||
cp $(BINARY_NAME) release/$(BINARY_NAME)-$(OS)-$(ARCH)
|
||||
shasum -a 256 release/$(BINARY_NAME)-$(OS)-$(ARCH) > release/$(BINARY_NAME)-$(OS)-$(ARCH).sha256
|
||||
else
|
||||
cp $(BINARY_NAME) release/$(BINARY_NAME)-$(BUILD_ID)-$(OS)-$(ARCH)
|
||||
shasum -a 256 release/$(BINARY_NAME)-$(BUILD_ID)-$(OS)-$(ARCH) > release/$(BINARY_NAME)-$(BUILD_ID)-$(OS)-$(ARCH).sha256
|
||||
endif
|
||||
|
||||
osx-signed: build
|
||||
@@ -456,10 +452,10 @@ protogen-go-clean:
|
||||
$(RM) bin/*
|
||||
|
||||
.PHONY: protogen-python
|
||||
protogen-python: autogptq-protogen bark-protogen coqui-protogen diffusers-protogen exllama-protogen exllama2-protogen mamba-protogen petals-protogen rerankers-protogen sentencetransformers-protogen transformers-protogen parler-tts-protogen transformers-musicgen-protogen vall-e-x-protogen vllm-protogen openvoice-protogen
|
||||
protogen-python: autogptq-protogen bark-protogen coqui-protogen diffusers-protogen exllama-protogen exllama2-protogen mamba-protogen petals-protogen rerankers-protogen sentencetransformers-protogen transformers-protogen parler-tts-protogen transformers-musicgen-protogen vall-e-x-protogen vllm-protogen
|
||||
|
||||
.PHONY: protogen-python-clean
|
||||
protogen-python-clean: autogptq-protogen-clean bark-protogen-clean coqui-protogen-clean diffusers-protogen-clean exllama-protogen-clean exllama2-protogen-clean mamba-protogen-clean petals-protogen-clean sentencetransformers-protogen-clean rerankers-protogen-clean transformers-protogen-clean transformers-musicgen-protogen-clean parler-tts-protogen-clean vall-e-x-protogen-clean vllm-protogen-clean openvoice-protogen-clean
|
||||
protogen-python-clean: autogptq-protogen-clean bark-protogen-clean coqui-protogen-clean diffusers-protogen-clean exllama-protogen-clean exllama2-protogen-clean mamba-protogen-clean petals-protogen-clean sentencetransformers-protogen-clean rerankers-protogen-clean transformers-protogen-clean transformers-musicgen-protogen-clean parler-tts-protogen-clean vall-e-x-protogen-clean vllm-protogen-clean
|
||||
|
||||
.PHONY: autogptq-protogen
|
||||
autogptq-protogen:
|
||||
@@ -573,14 +569,6 @@ vall-e-x-protogen:
|
||||
vall-e-x-protogen-clean:
|
||||
$(MAKE) -C backend/python/vall-e-x protogen-clean
|
||||
|
||||
.PHONY: openvoice-protogen
|
||||
openvoice-protogen:
|
||||
$(MAKE) -C backend/python/openvoice protogen
|
||||
|
||||
.PHONY: openvoice-protogen-clean
|
||||
openvoice-protogen-clean:
|
||||
$(MAKE) -C backend/python/openvoice protogen-clean
|
||||
|
||||
.PHONY: vllm-protogen
|
||||
vllm-protogen:
|
||||
$(MAKE) -C backend/python/vllm protogen
|
||||
@@ -604,7 +592,6 @@ prepare-extra-conda-environments: protogen-python
|
||||
$(MAKE) -C backend/python/transformers-musicgen
|
||||
$(MAKE) -C backend/python/parler-tts
|
||||
$(MAKE) -C backend/python/vall-e-x
|
||||
$(MAKE) -C backend/python/openvoice
|
||||
$(MAKE) -C backend/python/exllama
|
||||
$(MAKE) -C backend/python/petals
|
||||
$(MAKE) -C backend/python/exllama2
|
||||
@@ -704,17 +691,6 @@ backend-assets/grpc/llama-cpp-cuda: backend-assets/grpc
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DLLAMA_CUDA=ON" $(MAKE) VARIANT="llama-cuda" build-llama-cpp-grpc-server
|
||||
cp -rfv backend/cpp/llama-cuda/grpc-server backend-assets/grpc/llama-cpp-cuda
|
||||
|
||||
backend-assets/grpc/llama-cpp-grpc: backend-assets/grpc
|
||||
cp -rf backend/cpp/llama backend/cpp/llama-grpc
|
||||
$(MAKE) -C backend/cpp/llama-grpc purge
|
||||
$(info ${GREEN}I llama-cpp build info:grpc${RESET})
|
||||
CMAKE_ARGS="$(CMAKE_ARGS) -DLLAMA_RPC=ON -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off" $(MAKE) VARIANT="llama-grpc" build-llama-cpp-grpc-server
|
||||
cp -rfv backend/cpp/llama-grpc/grpc-server backend-assets/grpc/llama-cpp-grpc
|
||||
|
||||
backend-assets/util/llama-cpp-rpc-server: backend-assets/grpc/llama-cpp-grpc
|
||||
mkdir -p backend-assets/util/
|
||||
cp -rf backend/cpp/llama-grpc/llama.cpp/build/bin/rpc-server backend-assets/util/llama-cpp-rpc-server
|
||||
|
||||
backend-assets/grpc/llama-ggml: sources/go-llama.cpp sources/go-llama.cpp/libbinding.a backend-assets/grpc
|
||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=$(CURDIR)/sources/go-llama.cpp LIBRARY_PATH=$(CURDIR)/sources/go-llama.cpp \
|
||||
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o backend-assets/grpc/llama-ggml ./backend/go/llm/llama-ggml/
|
||||
@@ -788,7 +764,3 @@ docker-image-intel-xpu:
|
||||
.PHONY: swagger
|
||||
swagger:
|
||||
swag init -g core/http/app.go --output swagger
|
||||
|
||||
.PHONY: gen-assets
|
||||
gen-assets:
|
||||
$(GOCMD) run core/dependencies_manager/manager.go embedded/webui_static.yaml core/http/static/assets
|
||||
39
README.md
39
README.md
@@ -46,31 +46,18 @@
|
||||
|
||||
**LocalAI** is the free, Open Source OpenAI alternative. LocalAI act as a drop-in replacement REST API that’s compatible with OpenAI (Elevenlabs, Anthropic... ) API specifications for local AI inferencing. It allows you to run LLMs, generate images, audio (and not only) locally or on-prem with consumer grade hardware, supporting multiple model families. Does not require GPU. It is created and maintained by [Ettore Di Giacinto](https://github.com/mudler).
|
||||
|
||||

|
||||
|
||||
```bash
|
||||
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-cpu
|
||||
# Alternative images:
|
||||
# - if you have an Nvidia GPU:
|
||||
# docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
# - without preconfigured models
|
||||
# docker run -ti --name local-ai -p 8080:8080 localai/localai:latest
|
||||
# - without preconfigured models for Nvidia GPUs
|
||||
# docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-gpu-nvidia-cuda-12
|
||||
```
|
||||
|
||||
[💻 Getting started](https://localai.io/basics/getting_started/index.html)
|
||||
|
||||
## 🔥🔥 Hot topics / Roadmap
|
||||
|
||||
[Roadmap](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap)
|
||||
|
||||
- 🔥🔥 Decentralized llama.cpp: https://github.com/mudler/LocalAI/pull/2343 (peer2peer llama.cpp!)
|
||||
- 🔥🔥 Openvoice: https://github.com/mudler/LocalAI/pull/2334
|
||||
- 🆕 Function calls without grammars and mixed mode: https://github.com/mudler/LocalAI/pull/2328
|
||||
- 🔥🔥 Distributed inferencing: https://github.com/mudler/LocalAI/pull/2324
|
||||
- Chat, TTS, and Image generation in the WebUI: https://github.com/mudler/LocalAI/pull/2222
|
||||
- Reranker API: https://github.com/mudler/LocalAI/pull/2121
|
||||
- Gallery WebUI: https://github.com/mudler/LocalAI/pull/2104
|
||||
- llama3: https://github.com/mudler/LocalAI/discussions/2076
|
||||
- Parler-TTS: https://github.com/mudler/LocalAI/pull/2027
|
||||
- Openvino support: https://github.com/mudler/LocalAI/pull/1892
|
||||
- Vector store: https://github.com/mudler/LocalAI/pull/1795
|
||||
- All-in-one container image: https://github.com/mudler/LocalAI/issues/1855
|
||||
|
||||
Hot topics (looking for contributors):
|
||||
|
||||
@@ -83,6 +70,18 @@ Hot topics (looking for contributors):
|
||||
|
||||
If you want to help and contribute, issues up for grabs: https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3A%22up+for+grabs%22
|
||||
|
||||
## 💻 [Getting started](https://localai.io/basics/getting_started/index.html)
|
||||
|
||||
For a detailed step-by-step introduction, refer to the [Getting Started](https://localai.io/basics/getting_started/index.html) guide.
|
||||
|
||||
For those in a hurry, here's a straightforward one-liner to launch a LocalAI AIO(All-in-one) Image using `docker`:
|
||||
|
||||
```bash
|
||||
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-cpu
|
||||
# or, if you have an Nvidia GPU:
|
||||
# docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-12
|
||||
```
|
||||
|
||||
## 🚀 [Features](https://localai.io/features/)
|
||||
|
||||
- 📖 [Text generation with GPTs](https://localai.io/features/text-generation/) (`llama.cpp`, `gpt4all.cpp`, ... [:book: and more](https://localai.io/model-compatibility/index.html#model-compatibility-table))
|
||||
@@ -117,7 +116,7 @@ Other:
|
||||
- VSCode extension https://github.com/badgooooor/localai-vscode-plugin
|
||||
- Terminal utility https://github.com/djcopley/ShellOracle
|
||||
- Local Smart assistant https://github.com/mudler/LocalAGI
|
||||
- Home Assistant https://github.com/sammcj/homeassistant-localai / https://github.com/drndos/hass-openai-custom-conversation / https://github.com/valentinfrlch/ha-gpt4vision
|
||||
- Home Assistant https://github.com/sammcj/homeassistant-localai / https://github.com/drndos/hass-openai-custom-conversation
|
||||
- Discord bot https://github.com/mudler/LocalAGI/tree/main/examples/discord
|
||||
- Slack bot https://github.com/mudler/LocalAGI/tree/main/examples/slack
|
||||
- Telegram bot https://github.com/mudler/LocalAI/tree/master/examples/telegram-bot
|
||||
|
||||
@@ -2,63 +2,8 @@ name: gpt-4
|
||||
mmap: true
|
||||
parameters:
|
||||
model: huggingface://NousResearch/Hermes-2-Pro-Llama-3-8B-GGUF/Hermes-2-Pro-Llama-3-8B-Q4_K_M.gguf
|
||||
context_size: 8192
|
||||
|
||||
stopwords:
|
||||
- "<|im_end|>"
|
||||
- "<dummy32000>"
|
||||
- "</tool_call>"
|
||||
- "<|eot_id|>"
|
||||
- "<|end_of_text|>"
|
||||
|
||||
function:
|
||||
# disable injecting the "answer" tool
|
||||
disable_no_action: true
|
||||
|
||||
grammar:
|
||||
# This allows the grammar to also return messages
|
||||
mixed_mode: true
|
||||
# Suffix to add to the grammar
|
||||
#prefix: '<tool_call>\n'
|
||||
# Force parallel calls in the grammar
|
||||
# parallel_calls: true
|
||||
|
||||
return_name_in_function_response: true
|
||||
# Without grammar uncomment the lines below
|
||||
# Warning: this is relying only on the capability of the
|
||||
# LLM model to generate the correct function call.
|
||||
json_regex_match:
|
||||
- "(?s)<tool_call>(.*?)</tool_call>"
|
||||
- "(?s)<tool_call>(.*?)"
|
||||
replace_llm_results:
|
||||
# Drop the scratchpad content from responses
|
||||
- key: "(?s)<scratchpad>.*</scratchpad>"
|
||||
value: ""
|
||||
replace_function_results:
|
||||
# Replace everything that is not JSON array or object
|
||||
#
|
||||
- key: '(?s)^[^{\[]*'
|
||||
value: ""
|
||||
- key: '(?s)[^}\]]*$'
|
||||
value: ""
|
||||
- key: "'([^']*?)'"
|
||||
value: "_DQUOTE_${1}_DQUOTE_"
|
||||
- key: '\\"'
|
||||
value: "__TEMP_QUOTE__"
|
||||
- key: "\'"
|
||||
value: "'"
|
||||
- key: "_DQUOTE_"
|
||||
value: '"'
|
||||
- key: "__TEMP_QUOTE__"
|
||||
value: '"'
|
||||
# Drop the scratchpad content from responses
|
||||
- key: "(?s)<scratchpad>.*</scratchpad>"
|
||||
value: ""
|
||||
|
||||
template:
|
||||
chat: |
|
||||
{{.Input -}}
|
||||
<|im_start|>assistant
|
||||
chat_message: |
|
||||
<|im_start|>{{if eq .RoleName "assistant"}}assistant{{else if eq .RoleName "system"}}system{{else if eq .RoleName "tool"}}tool{{else if eq .RoleName "user"}}user{{end}}
|
||||
{{- if .FunctionCall }}
|
||||
@@ -77,25 +22,38 @@ template:
|
||||
{{- else if eq .RoleName "tool" }}
|
||||
</tool_response>
|
||||
{{- end }}<|im_end|>
|
||||
completion: |
|
||||
{{.Input}}
|
||||
function: |-
|
||||
# https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B-GGUF#prompt-format-for-function-calling
|
||||
function: |
|
||||
<|im_start|>system
|
||||
You are a function calling AI model.
|
||||
Here are the available tools:
|
||||
You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools:
|
||||
<tools>
|
||||
{{range .Functions}}
|
||||
{'type': 'function', 'function': {'name': '{{.Name}}', 'description': '{{.Description}}', 'parameters': {{toJson .Parameters}} }}
|
||||
{{end}}
|
||||
</tools>
|
||||
You should call the tools provided to you sequentially
|
||||
Please use <scratchpad> XML tags to record your reasoning and planning before you call the functions as follows:
|
||||
<scratchpad>
|
||||
{step-by-step reasoning and plan in bullet points}
|
||||
</scratchpad>
|
||||
For each function call return a json object with function name and arguments within <tool_call> XML tags as follows:
|
||||
Use the following pydantic model json schema for each tool call you will make:
|
||||
{'title': 'FunctionCall', 'type': 'object', 'properties': {'arguments': {'title': 'Arguments', 'type': 'object'}, 'name': {'title': 'Name', 'type': 'string'}}, 'required': ['arguments', 'name']}
|
||||
For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
|
||||
<tool_call>
|
||||
{"arguments": <args-dict>, "name": <function-name>}
|
||||
{'arguments': <args-dict>, 'name': <function-name>}
|
||||
</tool_call><|im_end|>
|
||||
{{.Input -}}
|
||||
<|im_start|>assistant
|
||||
<tool_call>
|
||||
chat: |
|
||||
{{.Input -}}
|
||||
<|im_start|>assistant
|
||||
completion: |
|
||||
{{.Input}}
|
||||
context_size: 4096
|
||||
f16: true
|
||||
stopwords:
|
||||
- <|im_end|>
|
||||
- <dummy32000>
|
||||
- "\n</tool_call>"
|
||||
- "\n\n\n"
|
||||
usage: |
|
||||
curl http://localhost:8080/v1/chat/completions -H "Content-Type: application/json" -d '{
|
||||
"model": "gpt-4",
|
||||
"messages": [{"role": "user", "content": "How are you doing?", "temperature": 0.1}]
|
||||
}'
|
||||
|
||||
@@ -2,63 +2,8 @@ name: gpt-4
|
||||
mmap: true
|
||||
parameters:
|
||||
model: huggingface://NousResearch/Hermes-2-Pro-Llama-3-8B-GGUF/Hermes-2-Pro-Llama-3-8B-Q4_K_M.gguf
|
||||
context_size: 8192
|
||||
|
||||
stopwords:
|
||||
- "<|im_end|>"
|
||||
- "<dummy32000>"
|
||||
- "</tool_call>"
|
||||
- "<|eot_id|>"
|
||||
- "<|end_of_text|>"
|
||||
|
||||
function:
|
||||
# disable injecting the "answer" tool
|
||||
disable_no_action: true
|
||||
|
||||
grammar:
|
||||
# This allows the grammar to also return messages
|
||||
mixed_mode: true
|
||||
# Suffix to add to the grammar
|
||||
#prefix: '<tool_call>\n'
|
||||
# Force parallel calls in the grammar
|
||||
# parallel_calls: true
|
||||
|
||||
return_name_in_function_response: true
|
||||
# Without grammar uncomment the lines below
|
||||
# Warning: this is relying only on the capability of the
|
||||
# LLM model to generate the correct function call.
|
||||
json_regex_match:
|
||||
- "(?s)<tool_call>(.*?)</tool_call>"
|
||||
- "(?s)<tool_call>(.*?)"
|
||||
replace_llm_results:
|
||||
# Drop the scratchpad content from responses
|
||||
- key: "(?s)<scratchpad>.*</scratchpad>"
|
||||
value: ""
|
||||
replace_function_results:
|
||||
# Replace everything that is not JSON array or object
|
||||
#
|
||||
- key: '(?s)^[^{\[]*'
|
||||
value: ""
|
||||
- key: '(?s)[^}\]]*$'
|
||||
value: ""
|
||||
- key: "'([^']*?)'"
|
||||
value: "_DQUOTE_${1}_DQUOTE_"
|
||||
- key: '\\"'
|
||||
value: "__TEMP_QUOTE__"
|
||||
- key: "\'"
|
||||
value: "'"
|
||||
- key: "_DQUOTE_"
|
||||
value: '"'
|
||||
- key: "__TEMP_QUOTE__"
|
||||
value: '"'
|
||||
# Drop the scratchpad content from responses
|
||||
- key: "(?s)<scratchpad>.*</scratchpad>"
|
||||
value: ""
|
||||
|
||||
template:
|
||||
chat: |
|
||||
{{.Input -}}
|
||||
<|im_start|>assistant
|
||||
chat_message: |
|
||||
<|im_start|>{{if eq .RoleName "assistant"}}assistant{{else if eq .RoleName "system"}}system{{else if eq .RoleName "tool"}}tool{{else if eq .RoleName "user"}}user{{end}}
|
||||
{{- if .FunctionCall }}
|
||||
@@ -77,25 +22,38 @@ template:
|
||||
{{- else if eq .RoleName "tool" }}
|
||||
</tool_response>
|
||||
{{- end }}<|im_end|>
|
||||
completion: |
|
||||
{{.Input}}
|
||||
function: |-
|
||||
# https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B-GGUF#prompt-format-for-function-calling
|
||||
function: |
|
||||
<|im_start|>system
|
||||
You are a function calling AI model.
|
||||
Here are the available tools:
|
||||
You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools:
|
||||
<tools>
|
||||
{{range .Functions}}
|
||||
{'type': 'function', 'function': {'name': '{{.Name}}', 'description': '{{.Description}}', 'parameters': {{toJson .Parameters}} }}
|
||||
{{end}}
|
||||
</tools>
|
||||
You should call the tools provided to you sequentially
|
||||
Please use <scratchpad> XML tags to record your reasoning and planning before you call the functions as follows:
|
||||
<scratchpad>
|
||||
{step-by-step reasoning and plan in bullet points}
|
||||
</scratchpad>
|
||||
For each function call return a json object with function name and arguments within <tool_call> XML tags as follows:
|
||||
Use the following pydantic model json schema for each tool call you will make:
|
||||
{'title': 'FunctionCall', 'type': 'object', 'properties': {'arguments': {'title': 'Arguments', 'type': 'object'}, 'name': {'title': 'Name', 'type': 'string'}}, 'required': ['arguments', 'name']}
|
||||
For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
|
||||
<tool_call>
|
||||
{"arguments": <args-dict>, "name": <function-name>}
|
||||
{'arguments': <args-dict>, 'name': <function-name>}
|
||||
</tool_call><|im_end|>
|
||||
{{.Input -}}
|
||||
<|im_start|>assistant
|
||||
<|im_start|>assistant
|
||||
<tool_call>
|
||||
chat: |
|
||||
{{.Input -}}
|
||||
<|im_start|>assistant
|
||||
completion: |
|
||||
{{.Input}}
|
||||
context_size: 4096
|
||||
f16: true
|
||||
stopwords:
|
||||
- <|im_end|>
|
||||
- <dummy32000>
|
||||
- "\n</tool_call>"
|
||||
- "\n\n\n"
|
||||
usage: |
|
||||
curl http://localhost:8080/v1/chat/completions -H "Content-Type: application/json" -d '{
|
||||
"model": "gpt-4",
|
||||
"messages": [{"role": "user", "content": "How are you doing?", "temperature": 0.1}]
|
||||
}'
|
||||
|
||||
@@ -1,66 +1,10 @@
|
||||
name: gpt-4
|
||||
mmap: false
|
||||
context_size: 8192
|
||||
|
||||
f16: false
|
||||
parameters:
|
||||
model: huggingface://NousResearch/Hermes-2-Pro-Llama-3-8B-GGUF/Hermes-2-Pro-Llama-3-8B-Q4_K_M.gguf
|
||||
|
||||
stopwords:
|
||||
- "<|im_end|>"
|
||||
- "<dummy32000>"
|
||||
- "</tool_call>"
|
||||
- "<|eot_id|>"
|
||||
- "<|end_of_text|>"
|
||||
|
||||
function:
|
||||
# disable injecting the "answer" tool
|
||||
disable_no_action: true
|
||||
|
||||
grammar:
|
||||
# This allows the grammar to also return messages
|
||||
mixed_mode: true
|
||||
# Suffix to add to the grammar
|
||||
#prefix: '<tool_call>\n'
|
||||
# Force parallel calls in the grammar
|
||||
# parallel_calls: true
|
||||
|
||||
return_name_in_function_response: true
|
||||
# Without grammar uncomment the lines below
|
||||
# Warning: this is relying only on the capability of the
|
||||
# LLM model to generate the correct function call.
|
||||
json_regex_match:
|
||||
- "(?s)<tool_call>(.*?)</tool_call>"
|
||||
- "(?s)<tool_call>(.*?)"
|
||||
replace_llm_results:
|
||||
# Drop the scratchpad content from responses
|
||||
- key: "(?s)<scratchpad>.*</scratchpad>"
|
||||
value: ""
|
||||
replace_function_results:
|
||||
# Replace everything that is not JSON array or object
|
||||
#
|
||||
- key: '(?s)^[^{\[]*'
|
||||
value: ""
|
||||
- key: '(?s)[^}\]]*$'
|
||||
value: ""
|
||||
- key: "'([^']*?)'"
|
||||
value: "_DQUOTE_${1}_DQUOTE_"
|
||||
- key: '\\"'
|
||||
value: "__TEMP_QUOTE__"
|
||||
- key: "\'"
|
||||
value: "'"
|
||||
- key: "_DQUOTE_"
|
||||
value: '"'
|
||||
- key: "__TEMP_QUOTE__"
|
||||
value: '"'
|
||||
# Drop the scratchpad content from responses
|
||||
- key: "(?s)<scratchpad>.*</scratchpad>"
|
||||
value: ""
|
||||
|
||||
template:
|
||||
chat: |
|
||||
{{.Input -}}
|
||||
<|im_start|>assistant
|
||||
chat_message: |
|
||||
<|im_start|>{{if eq .RoleName "assistant"}}assistant{{else if eq .RoleName "system"}}system{{else if eq .RoleName "tool"}}tool{{else if eq .RoleName "user"}}user{{end}}
|
||||
{{- if .FunctionCall }}
|
||||
@@ -79,25 +23,37 @@ template:
|
||||
{{- else if eq .RoleName "tool" }}
|
||||
</tool_response>
|
||||
{{- end }}<|im_end|>
|
||||
completion: |
|
||||
{{.Input}}
|
||||
function: |-
|
||||
# https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B-GGUF#prompt-format-for-function-calling
|
||||
function: |
|
||||
<|im_start|>system
|
||||
You are a function calling AI model.
|
||||
Here are the available tools:
|
||||
You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools:
|
||||
<tools>
|
||||
{{range .Functions}}
|
||||
{'type': 'function', 'function': {'name': '{{.Name}}', 'description': '{{.Description}}', 'parameters': {{toJson .Parameters}} }}
|
||||
{{end}}
|
||||
</tools>
|
||||
You should call the tools provided to you sequentially
|
||||
Please use <scratchpad> XML tags to record your reasoning and planning before you call the functions as follows:
|
||||
<scratchpad>
|
||||
{step-by-step reasoning and plan in bullet points}
|
||||
</scratchpad>
|
||||
For each function call return a json object with function name and arguments within <tool_call> XML tags as follows:
|
||||
Use the following pydantic model json schema for each tool call you will make:
|
||||
{'title': 'FunctionCall', 'type': 'object', 'properties': {'arguments': {'title': 'Arguments', 'type': 'object'}, 'name': {'title': 'Name', 'type': 'string'}}, 'required': ['arguments', 'name']}
|
||||
For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
|
||||
<tool_call>
|
||||
{"arguments": <args-dict>, "name": <function-name>}
|
||||
{'arguments': <args-dict>, 'name': <function-name>}
|
||||
</tool_call><|im_end|>
|
||||
{{.Input -}}
|
||||
<|im_start|>assistant
|
||||
<tool_call>
|
||||
chat: |
|
||||
{{.Input -}}
|
||||
<|im_start|>assistant
|
||||
completion: |
|
||||
{{.Input}}
|
||||
context_size: 4096
|
||||
stopwords:
|
||||
- <|im_end|>
|
||||
- "\n</tool_call>"
|
||||
- <dummy32000>
|
||||
- "\n\n\n"
|
||||
usage: |
|
||||
curl http://localhost:8080/v1/chat/completions -H "Content-Type: application/json" -d '{
|
||||
"model": "gpt-4",
|
||||
"messages": [{"role": "user", "content": "How are you doing?", "temperature": 0.1}]
|
||||
}'
|
||||
|
||||
@@ -791,7 +791,7 @@ struct llama_server_context
|
||||
sampler_names.emplace_back(sampler_name);
|
||||
}
|
||||
}
|
||||
slot->sparams.samplers_sequence = llama_sampling_types_from_names(sampler_names, false);
|
||||
slot->sparams.samplers_sequence = sampler_types_from_names(sampler_names, false);
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -1146,7 +1146,7 @@ struct llama_server_context
|
||||
std::vector<std::string> samplers_sequence;
|
||||
for (const auto &sampler_type : slot.sparams.samplers_sequence)
|
||||
{
|
||||
samplers_sequence.emplace_back(llama_sampling_type_to_str(sampler_type));
|
||||
samplers_sequence.emplace_back(sampler_type_to_name_string(sampler_type));
|
||||
}
|
||||
|
||||
return json {
|
||||
@@ -2217,12 +2217,6 @@ static void params_parse(const backend::ModelOptions* request,
|
||||
} else {
|
||||
params.n_parallel = 1;
|
||||
}
|
||||
|
||||
const char *llama_grpc_servers = std::getenv("LLAMACPP_GRPC_SERVERS");
|
||||
if (llama_grpc_servers != NULL) {
|
||||
params.rpc_servers = std::string(llama_grpc_servers);
|
||||
}
|
||||
|
||||
// TODO: Add yarn
|
||||
|
||||
if (!request->tensorsplit().empty()) {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch
|
||||
torch
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
optimum[openvino]
|
||||
@@ -1,6 +1,6 @@
|
||||
accelerate
|
||||
auto-gptq==0.7.1
|
||||
grpcio==1.64.0
|
||||
grpcio==1.63.0
|
||||
protobuf
|
||||
torch
|
||||
certifi
|
||||
|
||||
@@ -2,5 +2,4 @@
|
||||
intel-extension-for-pytorch
|
||||
torch
|
||||
torchaudio
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
optimum[openvino]
|
||||
@@ -1,6 +1,6 @@
|
||||
accelerate
|
||||
bark==0.1.5
|
||||
grpcio==1.64.0
|
||||
grpcio==1.63.0
|
||||
protobuf
|
||||
certifi
|
||||
transformers
|
||||
@@ -1,2 +1,2 @@
|
||||
grpcio==1.64.0
|
||||
grpcio==1.63.0
|
||||
protobuf
|
||||
@@ -2,5 +2,4 @@
|
||||
intel-extension-for-pytorch
|
||||
torch
|
||||
torchaudio
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
optimum[openvino]
|
||||
@@ -1,6 +1,6 @@
|
||||
accelerate
|
||||
TTS==0.22.0
|
||||
grpcio==1.64.0
|
||||
grpcio==1.63.0
|
||||
protobuf
|
||||
certifi
|
||||
transformers
|
||||
@@ -2,5 +2,4 @@
|
||||
intel-extension-for-pytorch
|
||||
torch
|
||||
torchvision
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
optimum[openvino]
|
||||
@@ -1,7 +1,7 @@
|
||||
accelerate
|
||||
compel
|
||||
diffusers
|
||||
grpcio==1.64.0
|
||||
grpcio==1.63.0
|
||||
opencv-python
|
||||
pillow
|
||||
protobuf
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
grpcio==1.64.0
|
||||
grpcio==1.63.0
|
||||
protobuf
|
||||
torch
|
||||
transformers
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
accelerate
|
||||
grpcio==1.64.0
|
||||
grpcio==1.63.0
|
||||
protobuf
|
||||
certifi
|
||||
torch
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
causal-conv1d==1.2.0.post2
|
||||
mamba-ssm==1.2.0.post1
|
||||
grpcio==1.64.0
|
||||
grpcio==1.63.0
|
||||
protobuf
|
||||
certifi
|
||||
transformers
|
||||
@@ -1,25 +0,0 @@
|
||||
.DEFAULT_GOAL := install
|
||||
|
||||
.PHONY: install
|
||||
install: protogen
|
||||
bash install.sh
|
||||
|
||||
.PHONY: protogen
|
||||
protogen: backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
backend_pb2_grpc.py backend_pb2.py:
|
||||
python3 -m grpc_tools.protoc -I../.. --python_out=. --grpc_python_out=. backend.proto
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
rm -rf venv __pycache__
|
||||
|
||||
.PHONY: test
|
||||
test: protogen
|
||||
@echo "Testing openvoice..."
|
||||
bash test.sh
|
||||
@echo "openvoice tested."
|
||||
@@ -1,158 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Extra gRPC server for OpenVoice models.
|
||||
"""
|
||||
from concurrent import futures
|
||||
|
||||
import argparse
|
||||
import signal
|
||||
import sys
|
||||
import os
|
||||
import torch
|
||||
from openvoice import se_extractor
|
||||
from openvoice.api import ToneColorConverter
|
||||
from melo.api import TTS
|
||||
|
||||
import time
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
|
||||
import grpc
|
||||
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
|
||||
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
|
||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
||||
|
||||
# Implement the BackendServicer class with the service methods
|
||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
"""
|
||||
A gRPC servicer for the backend service.
|
||||
|
||||
This class implements the gRPC methods for the backend service, including Health, LoadModel, and Embedding.
|
||||
"""
|
||||
def Health(self, request, context):
|
||||
"""
|
||||
A gRPC method that returns the health status of the backend service.
|
||||
|
||||
Args:
|
||||
request: A HealthRequest object that contains the request parameters.
|
||||
context: A grpc.ServicerContext object that provides information about the RPC.
|
||||
|
||||
Returns:
|
||||
A Reply object that contains the health status of the backend service.
|
||||
"""
|
||||
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
||||
|
||||
def LoadModel(self, request, context):
|
||||
"""
|
||||
A gRPC method that loads a model into memory.
|
||||
|
||||
Args:
|
||||
request: A LoadModelRequest object that contains the request parameters.
|
||||
context: A grpc.ServicerContext object that provides information about the RPC.
|
||||
|
||||
Returns:
|
||||
A Result object that contains the result of the LoadModel operation.
|
||||
"""
|
||||
model_name = request.Model
|
||||
try:
|
||||
|
||||
self.clonedVoice = False
|
||||
# Assume directory from request.ModelFile.
|
||||
# Only if request.LoraAdapter it's not an absolute path
|
||||
if request.AudioPath and request.ModelFile != "" and not os.path.isabs(request.AudioPath):
|
||||
# get base path of modelFile
|
||||
modelFileBase = os.path.dirname(request.ModelFile)
|
||||
request.AudioPath = os.path.join(modelFileBase, request.AudioPath)
|
||||
if request.AudioPath != "":
|
||||
self.clonedVoice = True
|
||||
|
||||
self.modelpath = request.ModelFile
|
||||
self.speaker = request.Type
|
||||
self.ClonedVoicePath = request.AudioPath
|
||||
|
||||
ckpt_converter = request.Model+'/converter'
|
||||
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
||||
self.device = device
|
||||
self.tone_color_converter = None
|
||||
if self.clonedVoice:
|
||||
self.tone_color_converter = ToneColorConverter(f'{ckpt_converter}/config.json', device=device)
|
||||
self.tone_color_converter.load_ckpt(f'{ckpt_converter}/checkpoint.pth')
|
||||
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
|
||||
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
||||
|
||||
def TTS(self, request, context):
|
||||
model_name = request.model
|
||||
if model_name == "":
|
||||
return backend_pb2.Result(success=False, message="request.model is required")
|
||||
try:
|
||||
# Speed is adjustable
|
||||
speed = 1.0
|
||||
voice = "EN"
|
||||
if request.voice:
|
||||
voice = request.voice
|
||||
model = TTS(language=voice, device=self.device)
|
||||
speaker_ids = model.hps.data.spk2id
|
||||
speaker_key = self.speaker
|
||||
modelpath = self.modelpath
|
||||
for s in speaker_ids.keys():
|
||||
print(f"Speaker: {s} - ID: {speaker_ids[s]}")
|
||||
speaker_id = speaker_ids[speaker_key]
|
||||
speaker_key = speaker_key.lower().replace('_', '-')
|
||||
source_se = torch.load(f'{modelpath}/base_speakers/ses/{speaker_key}.pth', map_location=self.device)
|
||||
model.tts_to_file(request.text, speaker_id, request.dst, speed=speed)
|
||||
if self.clonedVoice:
|
||||
reference_speaker = self.ClonedVoicePath
|
||||
target_se, audio_name = se_extractor.get_se(reference_speaker, self.tone_color_converter, vad=False)
|
||||
# Run the tone color converter
|
||||
encode_message = "@MyShell"
|
||||
self.tone_color_converter.convert(
|
||||
audio_src_path=request.dst,
|
||||
src_se=source_se,
|
||||
tgt_se=target_se,
|
||||
output_path=request.dst,
|
||||
message=encode_message)
|
||||
|
||||
print("[OpenVoice] TTS generated!", file=sys.stderr)
|
||||
print("[OpenVoice] TTS saved to", request.dst, file=sys.stderr)
|
||||
print(request, file=sys.stderr)
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
return backend_pb2.Result(success=True)
|
||||
|
||||
def serve(address):
|
||||
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS))
|
||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||
server.add_insecure_port(address)
|
||||
server.start()
|
||||
print("[OpenVoice] Server started. Listening on: " + address, file=sys.stderr)
|
||||
|
||||
# Define the signal handler function
|
||||
def signal_handler(sig, frame):
|
||||
print("[OpenVoice] Received termination signal. Shutting down...")
|
||||
server.stop(0)
|
||||
sys.exit(0)
|
||||
|
||||
# Set the signal handlers for SIGINT and SIGTERM
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
try:
|
||||
while True:
|
||||
time.sleep(_ONE_DAY_IN_SECONDS)
|
||||
except KeyboardInterrupt:
|
||||
server.stop(0)
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Run the gRPC server.")
|
||||
parser.add_argument(
|
||||
"--addr", default="localhost:50051", help="The address to bind the server to."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
print(f"[OpenVoice] startup: {args}", file=sys.stderr)
|
||||
serve(args.addr)
|
||||
@@ -1,16 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
source $(dirname $0)/../common/libbackend.sh
|
||||
|
||||
# This is here because the Intel pip index is broken and returns 200 status codes for every package name, it just doesn't return any package links.
|
||||
# This makes uv think that the package exists in the Intel pip index, and by default it stops looking at other pip indexes once it finds a match.
|
||||
# We need uv to continue falling through to the pypi default index to find optimum[openvino] in the pypi index
|
||||
# the --upgrade actually allows us to *downgrade* torch to the version provided in the Intel pip index
|
||||
if [ "x${BUILD_PROFILE}" == "xintel" ]; then
|
||||
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
|
||||
fi
|
||||
|
||||
installRequirements
|
||||
|
||||
python -m unidic download
|
||||
@@ -1,23 +0,0 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch
|
||||
torch
|
||||
optimum[openvino]
|
||||
grpcio==1.64.0
|
||||
protobuf
|
||||
librosa==0.9.1
|
||||
faster-whisper==0.9.0
|
||||
pydub==0.25.1
|
||||
wavmark==0.0.3
|
||||
numpy==1.22.0
|
||||
eng_to_ipa==0.0.2
|
||||
inflect==7.0.0
|
||||
unidecode==1.3.7
|
||||
whisper-timestamped==1.14.2
|
||||
openai
|
||||
python-dotenv
|
||||
pypinyin==0.50.0
|
||||
cn2an==0.5.22
|
||||
jieba==0.42.1
|
||||
gradio==3.48.0
|
||||
langid==1.1.6
|
||||
git+https://github.com/myshell-ai/MeloTTS.git
|
||||
@@ -1,20 +0,0 @@
|
||||
grpcio==1.64.0
|
||||
protobuf
|
||||
librosa==0.9.1
|
||||
faster-whisper==0.9.0
|
||||
pydub==0.25.1
|
||||
wavmark==0.0.3
|
||||
numpy==1.22.0
|
||||
eng_to_ipa==0.0.2
|
||||
inflect==7.0.0
|
||||
unidecode==1.3.7
|
||||
whisper-timestamped==1.14.2
|
||||
openai
|
||||
python-dotenv
|
||||
pypinyin==0.50.0
|
||||
cn2an==0.5.22
|
||||
jieba==0.42.1
|
||||
gradio==3.48.0
|
||||
langid==1.1.6
|
||||
git+https://github.com/myshell-ai/MeloTTS.git
|
||||
git+https://github.com/myshell-ai/OpenVoice.git
|
||||
@@ -1,4 +0,0 @@
|
||||
#!/bin/bash
|
||||
source $(dirname $0)/../common/libbackend.sh
|
||||
|
||||
startBackend $@
|
||||
@@ -1,82 +0,0 @@
|
||||
"""
|
||||
A test script to test the gRPC service
|
||||
"""
|
||||
import unittest
|
||||
import subprocess
|
||||
import time
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
|
||||
import grpc
|
||||
|
||||
|
||||
class TestBackendServicer(unittest.TestCase):
|
||||
"""
|
||||
TestBackendServicer is the class that tests the gRPC service
|
||||
"""
|
||||
def setUp(self):
|
||||
"""
|
||||
This method sets up the gRPC service by starting the server
|
||||
"""
|
||||
self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"])
|
||||
time.sleep(10)
|
||||
|
||||
def tearDown(self) -> None:
|
||||
"""
|
||||
This method tears down the gRPC service by terminating the server
|
||||
"""
|
||||
self.service.terminate()
|
||||
self.service.wait()
|
||||
|
||||
def test_server_startup(self):
|
||||
"""
|
||||
This method tests if the server starts up successfully
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.Health(backend_pb2.HealthMessage())
|
||||
self.assertEqual(response.message, b'OK')
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("Server failed to start")
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
def test_load_model(self):
|
||||
"""
|
||||
This method tests if the model is loaded successfully
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="checkpoints_v2",
|
||||
Type="en-us"))
|
||||
self.assertTrue(response.success)
|
||||
self.assertEqual(response.message, "Model loaded successfully")
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("LoadModel service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
def test_tts(self):
|
||||
"""
|
||||
This method tests if the embeddings are generated successfully
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="dingzhen"))
|
||||
self.assertTrue(response.success)
|
||||
tts_request = backend_pb2.TTSRequest(text="80s TV news production music hit for tonight's biggest story", voice="EN")
|
||||
tts_response = stub.TTS(tts_request)
|
||||
self.assertIsNotNone(tts_response)
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("TTS service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
@@ -1,12 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
source $(dirname $0)/../common/libbackend.sh
|
||||
|
||||
# Download checkpoints if not present
|
||||
if [ ! -d "checkpoints_v2" ]; then
|
||||
wget https://myshell-public-repo-hosting.s3.amazonaws.com/openvoice/checkpoints_v2_0417.zip -O checkpoints_v2.zip
|
||||
unzip checkpoints_v2.zip
|
||||
fi
|
||||
|
||||
runUnittests
|
||||
@@ -2,5 +2,4 @@
|
||||
intel-extension-for-pytorch
|
||||
torch
|
||||
torchaudio
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
optimum[openvino]
|
||||
@@ -1,5 +1,5 @@
|
||||
accelerate
|
||||
grpcio==1.64.0
|
||||
grpcio==1.63.0
|
||||
protobuf
|
||||
torch
|
||||
git+https://github.com/huggingface/parler-tts.git@10016fb0300c0dc31a0fb70e26f3affee7b62f16
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch
|
||||
torch
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
optimum[openvino]
|
||||
@@ -1,5 +1,4 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch
|
||||
torch
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
optimum[openvino]
|
||||
@@ -1,6 +1,6 @@
|
||||
accelerate
|
||||
rerankers[transformers]
|
||||
grpcio==1.64.0
|
||||
grpcio==1.63.0
|
||||
protobuf
|
||||
certifi
|
||||
transformers
|
||||
@@ -1,5 +1,4 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch
|
||||
torch
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
optimum[openvino]
|
||||
@@ -1,6 +1,6 @@
|
||||
accelerate
|
||||
sentence-transformers==2.5.1
|
||||
transformers
|
||||
grpcio==1.64.0
|
||||
grpcio==1.63.0
|
||||
protobuf
|
||||
certifi
|
||||
@@ -1,5 +1,4 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch
|
||||
torch
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
optimum[openvino]
|
||||
@@ -1,6 +1,6 @@
|
||||
accelerate
|
||||
transformers
|
||||
grpcio==1.64.0
|
||||
grpcio==1.63.0
|
||||
protobuf
|
||||
torch
|
||||
scipy==1.13.0
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
accelerate
|
||||
transformers
|
||||
grpcio==1.64.0
|
||||
grpcio==1.63.0
|
||||
protobuf
|
||||
torch
|
||||
certifi
|
||||
@@ -2,5 +2,4 @@
|
||||
intel-extension-for-pytorch
|
||||
torch
|
||||
torchaudio
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
optimum[openvino]
|
||||
@@ -1,4 +1,4 @@
|
||||
accelerate
|
||||
grpcio==1.64.0
|
||||
grpcio==1.63.0
|
||||
protobuf
|
||||
certifi
|
||||
@@ -1,5 +1,4 @@
|
||||
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
|
||||
intel-extension-for-pytorch
|
||||
torch
|
||||
optimum[openvino]
|
||||
setuptools
|
||||
optimum[openvino]
|
||||
@@ -1,6 +1,6 @@
|
||||
accelerate
|
||||
vllm
|
||||
grpcio==1.64.0
|
||||
grpcio==1.63.0
|
||||
protobuf
|
||||
certifi
|
||||
transformers
|
||||
|
||||
@@ -1,16 +1,20 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
cliContext "github.com/go-skynet/LocalAI/core/cli/context"
|
||||
"github.com/go-skynet/LocalAI/core/cli/worker"
|
||||
)
|
||||
import "embed"
|
||||
|
||||
type Context struct {
|
||||
Debug bool `env:"LOCALAI_DEBUG,DEBUG" default:"false" hidden:"" help:"DEPRECATED, use --log-level=debug instead. Enable debug logging"`
|
||||
LogLevel *string `env:"LOCALAI_LOG_LEVEL" enum:"error,warn,info,debug,trace" help:"Set the level of logs to output [${enum}]"`
|
||||
|
||||
// This field is not a command line argument/flag, the struct tag excludes it from the parsed CLI
|
||||
BackendAssets embed.FS `kong:"-"`
|
||||
}
|
||||
|
||||
var CLI struct {
|
||||
cliContext.Context `embed:""`
|
||||
Context `embed:""`
|
||||
|
||||
Run RunCMD `cmd:"" help:"Run LocalAI, this the default command if no other command is specified. Run 'local-ai run --help' for more information" default:"withargs"`
|
||||
Models ModelsCMD `cmd:"" help:"Manage LocalAI models and definitions"`
|
||||
TTS TTSCMD `cmd:"" help:"Convert text to speech"`
|
||||
Transcript TranscriptCMD `cmd:"" help:"Convert audio to text"`
|
||||
Worker worker.Worker `cmd:"" help:"Run workers to distribute workload (llama.cpp-only)"`
|
||||
}
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
package cliContext
|
||||
|
||||
import "embed"
|
||||
|
||||
type Context struct {
|
||||
Debug bool `env:"LOCALAI_DEBUG,DEBUG" default:"false" hidden:"" help:"DEPRECATED, use --log-level=debug instead. Enable debug logging"`
|
||||
LogLevel *string `env:"LOCALAI_LOG_LEVEL" enum:"error,warn,info,debug,trace" help:"Set the level of logs to output [${enum}]"`
|
||||
|
||||
// This field is not a command line argument/flag, the struct tag excludes it from the parsed CLI
|
||||
BackendAssets embed.FS `kong:"-"`
|
||||
}
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
cliContext "github.com/go-skynet/LocalAI/core/cli/context"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/gallery"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/schollz/progressbar/v3"
|
||||
@@ -31,7 +29,7 @@ type ModelsCMD struct {
|
||||
Install ModelsInstall `cmd:"" help:"Install a model from the gallery"`
|
||||
}
|
||||
|
||||
func (ml *ModelsList) Run(ctx *cliContext.Context) error {
|
||||
func (ml *ModelsList) Run(ctx *Context) error {
|
||||
var galleries []gallery.Gallery
|
||||
if err := json.Unmarshal([]byte(ml.Galleries), &galleries); err != nil {
|
||||
log.Error().Err(err).Msg("unable to load galleries")
|
||||
@@ -51,7 +49,7 @@ func (ml *ModelsList) Run(ctx *cliContext.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mi *ModelsInstall) Run(ctx *cliContext.Context) error {
|
||||
func (mi *ModelsInstall) Run(ctx *Context) error {
|
||||
modelName := mi.ModelArgs[0]
|
||||
|
||||
var galleries []gallery.Gallery
|
||||
|
||||
@@ -1,15 +1,12 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
cliContext "github.com/go-skynet/LocalAI/core/cli/context"
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/core/http"
|
||||
"github.com/go-skynet/LocalAI/core/p2p"
|
||||
"github.com/go-skynet/LocalAI/core/startup"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
@@ -40,14 +37,13 @@ type RunCMD struct {
|
||||
Threads int `env:"LOCALAI_THREADS,THREADS" short:"t" default:"4" help:"Number of threads used for parallel computation. Usage of the number of physical cores in the system is suggested" group:"performance"`
|
||||
ContextSize int `env:"LOCALAI_CONTEXT_SIZE,CONTEXT_SIZE" default:"512" help:"Default context size for models" group:"performance"`
|
||||
|
||||
Address string `env:"LOCALAI_ADDRESS,ADDRESS" default:":8080" help:"Bind address for the API server" group:"api"`
|
||||
CORS bool `env:"LOCALAI_CORS,CORS" help:"" group:"api"`
|
||||
CORSAllowOrigins string `env:"LOCALAI_CORS_ALLOW_ORIGINS,CORS_ALLOW_ORIGINS" group:"api"`
|
||||
UploadLimit int `env:"LOCALAI_UPLOAD_LIMIT,UPLOAD_LIMIT" default:"15" help:"Default upload-limit in MB" group:"api"`
|
||||
APIKeys []string `env:"LOCALAI_API_KEY,API_KEY" help:"List of API Keys to enable API authentication. When this is set, all the requests must be authenticated with one of these API keys" group:"api"`
|
||||
DisableWebUI bool `env:"LOCALAI_DISABLE_WEBUI,DISABLE_WEBUI" default:"false" help:"Disable webui" group:"api"`
|
||||
Peer2Peer bool `env:"LOCALAI_P2P,P2P" name:"p2p" default:"false" help:"Enable P2P mode" group:"p2p"`
|
||||
Peer2PeerToken string `env:"LOCALAI_P2P_TOKEN,P2P_TOKEN" name:"p2ptoken" help:"Token for P2P mode (optional)" group:"p2p"`
|
||||
Address string `env:"LOCALAI_ADDRESS,ADDRESS" default:":8080" help:"Bind address for the API server" group:"api"`
|
||||
CORS bool `env:"LOCALAI_CORS,CORS" help:"" group:"api"`
|
||||
CORSAllowOrigins string `env:"LOCALAI_CORS_ALLOW_ORIGINS,CORS_ALLOW_ORIGINS" group:"api"`
|
||||
UploadLimit int `env:"LOCALAI_UPLOAD_LIMIT,UPLOAD_LIMIT" default:"15" help:"Default upload-limit in MB" group:"api"`
|
||||
APIKeys []string `env:"LOCALAI_API_KEY,API_KEY" help:"List of API Keys to enable API authentication. When this is set, all the requests must be authenticated with one of these API keys" group:"api"`
|
||||
DisableWebUI bool `env:"LOCALAI_DISABLE_WEBUI,DISABLE_WEBUI" default:"false" help:"Disable webui" group:"api"`
|
||||
|
||||
ParallelRequests bool `env:"LOCALAI_PARALLEL_REQUESTS,PARALLEL_REQUESTS" help:"Enable backends to handle multiple requests in parallel if they support it (e.g.: llama.cpp or vllm)" group:"backends"`
|
||||
SingleActiveBackend bool `env:"LOCALAI_SINGLE_ACTIVE_BACKEND,SINGLE_ACTIVE_BACKEND" help:"Allow only one backend to be run at a time" group:"backends"`
|
||||
PreloadBackendOnly bool `env:"LOCALAI_PRELOAD_BACKEND_ONLY,PRELOAD_BACKEND_ONLY" default:"false" help:"Do not launch the API services, only the preloaded models / backends are started (useful for multi-node setups)" group:"backends"`
|
||||
@@ -58,7 +54,7 @@ type RunCMD struct {
|
||||
WatchdogBusyTimeout string `env:"LOCALAI_WATCHDOG_BUSY_TIMEOUT,WATCHDOG_BUSY_TIMEOUT" default:"5m" help:"Threshold beyond which a busy backend should be stopped" group:"backends"`
|
||||
}
|
||||
|
||||
func (r *RunCMD) Run(ctx *cliContext.Context) error {
|
||||
func (r *RunCMD) Run(ctx *Context) error {
|
||||
opts := []config.AppOption{
|
||||
config.WithConfigFile(r.ModelsConfigFile),
|
||||
config.WithJSONStringPreload(r.PreloadModels),
|
||||
@@ -85,31 +81,6 @@ func (r *RunCMD) Run(ctx *cliContext.Context) error {
|
||||
config.WithModelsURL(append(r.Models, r.ModelArgs...)...),
|
||||
}
|
||||
|
||||
if r.Peer2Peer || r.Peer2PeerToken != "" {
|
||||
log.Info().Msg("P2P mode enabled")
|
||||
token := r.Peer2PeerToken
|
||||
if token == "" {
|
||||
// IF no token is provided, and p2p is enabled,
|
||||
// we generate one and wait for the user to pick up the token (this is for interactive)
|
||||
log.Info().Msg("No token provided, generating one")
|
||||
token = p2p.GenerateToken()
|
||||
log.Info().Msg("Generated Token:")
|
||||
fmt.Println(token)
|
||||
|
||||
log.Info().Msg("To use the token, you can run the following command in another node or terminal:")
|
||||
fmt.Printf("export TOKEN=\"%s\"\nlocal-ai worker p2p-llama-cpp-rpc\n", token)
|
||||
|
||||
// Ask for user confirmation
|
||||
log.Info().Msg("Press a button to proceed")
|
||||
var input string
|
||||
fmt.Scanln(&input)
|
||||
}
|
||||
log.Info().Msg("Starting P2P server discovery...")
|
||||
if err := p2p.LLamaCPPRPCServerDiscoverer(context.Background(), token); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
idleWatchDog := r.EnableWatchdogIdle
|
||||
busyWatchDog := r.EnableWatchdogBusy
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/backend"
|
||||
cliContext "github.com/go-skynet/LocalAI/core/cli/context"
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/rs/zerolog/log"
|
||||
@@ -23,7 +22,7 @@ type TranscriptCMD struct {
|
||||
BackendAssetsPath string `env:"LOCALAI_BACKEND_ASSETS_PATH,BACKEND_ASSETS_PATH" type:"path" default:"/tmp/localai/backend_data" help:"Path used to extract libraries that are required by some of the backends in runtime" group:"storage"`
|
||||
}
|
||||
|
||||
func (t *TranscriptCMD) Run(ctx *cliContext.Context) error {
|
||||
func (t *TranscriptCMD) Run(ctx *Context) error {
|
||||
opts := &config.ApplicationConfig{
|
||||
ModelPath: t.ModelsPath,
|
||||
Context: context.Background(),
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/backend"
|
||||
cliContext "github.com/go-skynet/LocalAI/core/cli/context"
|
||||
"github.com/go-skynet/LocalAI/core/config"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/rs/zerolog/log"
|
||||
@@ -25,7 +24,7 @@ type TTSCMD struct {
|
||||
BackendAssetsPath string `env:"LOCALAI_BACKEND_ASSETS_PATH,BACKEND_ASSETS_PATH" type:"path" default:"/tmp/localai/backend_data" help:"Path used to extract libraries that are required by some of the backends in runtime" group:"storage"`
|
||||
}
|
||||
|
||||
func (t *TTSCMD) Run(ctx *cliContext.Context) error {
|
||||
func (t *TTSCMD) Run(ctx *Context) error {
|
||||
outputFile := t.OutputFile
|
||||
outputDir := t.BackendAssetsPath
|
||||
if outputFile != "" {
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
package worker
|
||||
|
||||
type WorkerFlags struct {
|
||||
BackendAssetsPath string `env:"LOCALAI_BACKEND_ASSETS_PATH,BACKEND_ASSETS_PATH" type:"path" default:"/tmp/localai/backend_data" help:"Path used to extract libraries that are required by some of the backends in runtime" group:"storage"`
|
||||
}
|
||||
|
||||
type Worker struct {
|
||||
P2P P2P `cmd:"" name:"p2p-llama-cpp-rpc" help:"Starts a LocalAI llama.cpp worker in P2P mode (requires a token)"`
|
||||
LLamaCPP LLamaCPP `cmd:"" name:"llama-cpp-rpc" help:"Starts a llama.cpp worker in standalone mode"`
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
cliContext "github.com/go-skynet/LocalAI/core/cli/context"
|
||||
"github.com/go-skynet/LocalAI/pkg/assets"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
type LLamaCPP struct {
|
||||
Args []string `arg:"" optional:"" name:"models" help:"Model configuration URLs to load"`
|
||||
WorkerFlags `embed:""`
|
||||
}
|
||||
|
||||
func (r *LLamaCPP) Run(ctx *cliContext.Context) error {
|
||||
// Extract files from the embedded FS
|
||||
err := assets.ExtractFiles(ctx.BackendAssets, r.BackendAssetsPath)
|
||||
log.Debug().Msgf("Extracting backend assets files to %s", r.BackendAssetsPath)
|
||||
if err != nil {
|
||||
log.Warn().Msgf("Failed extracting backend assets files: %s (might be required for some backends to work properly, like gpt4all)", err)
|
||||
}
|
||||
|
||||
if len(os.Args) < 4 {
|
||||
return fmt.Errorf("usage: local-ai worker llama-cpp-rpc -- <llama-rpc-server-args>")
|
||||
}
|
||||
|
||||
return syscall.Exec(
|
||||
assets.ResolvePath(
|
||||
r.BackendAssetsPath,
|
||||
"util",
|
||||
"llama-cpp-rpc-server",
|
||||
),
|
||||
append([]string{
|
||||
assets.ResolvePath(
|
||||
r.BackendAssetsPath,
|
||||
"util",
|
||||
"llama-cpp-rpc-server",
|
||||
)}, os.Args[4:]...),
|
||||
os.Environ())
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
//go:build !p2p
|
||||
// +build !p2p
|
||||
|
||||
package worker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
cliContext "github.com/go-skynet/LocalAI/core/cli/context"
|
||||
)
|
||||
|
||||
type P2P struct{}
|
||||
|
||||
func (r *P2P) Run(ctx *cliContext.Context) error {
|
||||
return fmt.Errorf("p2p mode is not enabled in this build")
|
||||
}
|
||||
@@ -1,104 +0,0 @@
|
||||
//go:build p2p
|
||||
// +build p2p
|
||||
|
||||
package worker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
cliContext "github.com/go-skynet/LocalAI/core/cli/context"
|
||||
"github.com/go-skynet/LocalAI/core/p2p"
|
||||
"github.com/go-skynet/LocalAI/pkg/assets"
|
||||
"github.com/phayes/freeport"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
type P2P struct {
|
||||
WorkerFlags `embed:""`
|
||||
Token string `env:"LOCALAI_TOKEN,TOKEN" help:"JSON list of galleries"`
|
||||
NoRunner bool `env:"LOCALAI_NO_RUNNER,NO_RUNNER" help:"Do not start the llama-cpp-rpc-server"`
|
||||
RunnerAddress string `env:"LOCALAI_RUNNER_ADDRESS,RUNNER_ADDRESS" help:"Address of the llama-cpp-rpc-server"`
|
||||
RunnerPort string `env:"LOCALAI_RUNNER_PORT,RUNNER_PORT" help:"Port of the llama-cpp-rpc-server"`
|
||||
ExtraLLamaCPPArgs []string `env:"LOCALAI_EXTRA_LLAMA_CPP_ARGS,EXTRA_LLAMA_CPP_ARGS" help:"Extra arguments to pass to llama-cpp-rpc-server"`
|
||||
}
|
||||
|
||||
func (r *P2P) Run(ctx *cliContext.Context) error {
|
||||
// Extract files from the embedded FS
|
||||
err := assets.ExtractFiles(ctx.BackendAssets, r.BackendAssetsPath)
|
||||
log.Debug().Msgf("Extracting backend assets files to %s", r.BackendAssetsPath)
|
||||
if err != nil {
|
||||
log.Warn().Msgf("Failed extracting backend assets files: %s (might be required for some backends to work properly, like gpt4all)", err)
|
||||
}
|
||||
|
||||
// Check if the token is set
|
||||
// as we always need it.
|
||||
if r.Token == "" {
|
||||
return fmt.Errorf("Token is required")
|
||||
}
|
||||
|
||||
port, err := freeport.GetFreePort()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
address := "127.0.0.1"
|
||||
|
||||
if r.NoRunner {
|
||||
// Let override which port and address to bind if the user
|
||||
// configure the llama-cpp service on its own
|
||||
p := fmt.Sprint(port)
|
||||
if r.RunnerAddress != "" {
|
||||
address = r.RunnerAddress
|
||||
}
|
||||
if r.RunnerPort != "" {
|
||||
p = r.RunnerPort
|
||||
}
|
||||
|
||||
err = p2p.BindLLamaCPPWorker(context.Background(), address, p, r.Token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info().Msgf("You need to start llama-cpp-rpc-server on '%s:%s'", address, p)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start llama.cpp directly from the version we have pre-packaged
|
||||
go func() {
|
||||
for {
|
||||
log.Info().Msgf("Starting llama-cpp-rpc-server on '%s:%d'", address, port)
|
||||
cmd := exec.Command(
|
||||
assets.ResolvePath(
|
||||
r.BackendAssetsPath,
|
||||
"util",
|
||||
"llama-cpp-rpc-server",
|
||||
),
|
||||
append([]string{"--host", address, "--port", fmt.Sprint(port)}, r.ExtraLLamaCPPArgs...)...,
|
||||
)
|
||||
|
||||
cmd.Env = os.Environ()
|
||||
|
||||
cmd.Stderr = os.Stdout
|
||||
cmd.Stdout = os.Stdout
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to start llama-cpp-rpc-server")
|
||||
}
|
||||
|
||||
cmd.Wait()
|
||||
}
|
||||
}()
|
||||
|
||||
err = p2p.BindLLamaCPPWorker(context.Background(), address, fmt.Sprint(port), r.Token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
@@ -2,8 +2,6 @@ package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/go-skynet/LocalAI/core/schema"
|
||||
"github.com/go-skynet/LocalAI/pkg/downloader"
|
||||
@@ -95,8 +93,6 @@ type Diffusers struct {
|
||||
ControlNet string `yaml:"control_net"`
|
||||
}
|
||||
|
||||
// LLMConfig is a struct that holds the configuration that are
|
||||
// generic for most of the LLM backends.
|
||||
type LLMConfig struct {
|
||||
SystemPrompt string `yaml:"system_prompt"`
|
||||
TensorSplit string `yaml:"tensor_split"`
|
||||
@@ -148,7 +144,6 @@ type LLMConfig struct {
|
||||
YarnBetaSlow float32 `yaml:"yarn_beta_slow"`
|
||||
}
|
||||
|
||||
// AutoGPTQ is a struct that holds the configuration specific to the AutoGPTQ backend
|
||||
type AutoGPTQ struct {
|
||||
ModelBaseName string `yaml:"model_base_name"`
|
||||
Device string `yaml:"device"`
|
||||
@@ -156,31 +151,13 @@ type AutoGPTQ struct {
|
||||
UseFastTokenizer bool `yaml:"use_fast_tokenizer"`
|
||||
}
|
||||
|
||||
// TemplateConfig is a struct that holds the configuration of the templating system
|
||||
type TemplateConfig struct {
|
||||
// Chat is the template used in the chat completion endpoint
|
||||
Chat string `yaml:"chat"`
|
||||
|
||||
// ChatMessage is the template used for chat messages
|
||||
ChatMessage string `yaml:"chat_message"`
|
||||
|
||||
// Completion is the template used for completion requests
|
||||
Completion string `yaml:"completion"`
|
||||
|
||||
// Edit is the template used for edit completion requests
|
||||
Edit string `yaml:"edit"`
|
||||
|
||||
// Functions is the template used when tools are present in the client requests
|
||||
Functions string `yaml:"function"`
|
||||
|
||||
// UseTokenizerTemplate is a flag that indicates if the tokenizer template should be used.
|
||||
// Note: this is mostly consumed for backends such as vllm and transformers
|
||||
// that can use the tokenizers specified in the JSON config files of the models
|
||||
UseTokenizerTemplate bool `yaml:"use_tokenizer_template"`
|
||||
|
||||
// JoinChatMessagesByCharacter is a string that will be used to join chat messages together.
|
||||
// It defaults to \n
|
||||
JoinChatMessagesByCharacter *string `yaml:"join_chat_messages_by_character"`
|
||||
Chat string `yaml:"chat"`
|
||||
ChatMessage string `yaml:"chat_message"`
|
||||
Completion string `yaml:"completion"`
|
||||
Edit string `yaml:"edit"`
|
||||
Functions string `yaml:"function"`
|
||||
UseTokenizerTemplate bool `yaml:"use_tokenizer_template"`
|
||||
}
|
||||
|
||||
func (c *BackendConfig) SetFunctionCallString(s string) {
|
||||
@@ -358,34 +335,3 @@ func (cfg *BackendConfig) SetDefaults(opts ...ConfigLoaderOption) {
|
||||
cfg.Debug = &trueV
|
||||
}
|
||||
}
|
||||
|
||||
func (c *BackendConfig) Validate() bool {
|
||||
downloadedFileNames := []string{}
|
||||
for _, f := range c.DownloadFiles {
|
||||
downloadedFileNames = append(downloadedFileNames, f.Filename)
|
||||
}
|
||||
validationTargets := []string{c.Backend, c.Model, c.MMProj}
|
||||
validationTargets = append(validationTargets, downloadedFileNames...)
|
||||
// Simple validation to make sure the model can be correctly loaded
|
||||
for _, n := range validationTargets {
|
||||
if n == "" {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(n, string(os.PathSeparator)) ||
|
||||
strings.Contains(n, "..") {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if c.Name == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
if c.Backend != "" {
|
||||
// a regex that checks that is a string name with no special characters, except '-' and '_'
|
||||
re := regexp.MustCompile(`^[a-zA-Z0-9-_]+$`)
|
||||
return re.MatchString(c.Backend)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -23,12 +23,6 @@ type BackendConfigLoader struct {
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func NewBackendConfigLoader() *BackendConfigLoader {
|
||||
return &BackendConfigLoader{
|
||||
configs: make(map[string]BackendConfig),
|
||||
}
|
||||
}
|
||||
|
||||
type LoadOptions struct {
|
||||
debug bool
|
||||
threads, ctxSize int
|
||||
@@ -67,8 +61,46 @@ func (lo *LoadOptions) Apply(options ...ConfigLoaderOption) {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: either in the next PR or the next commit, I want to merge these down into a single function that looks at the first few characters of the file to determine if we need to deserialize to []BackendConfig or BackendConfig
|
||||
func readMultipleBackendConfigsFromFile(file string, opts ...ConfigLoaderOption) ([]*BackendConfig, error) {
|
||||
// Load a config file for a model
|
||||
func (cl *BackendConfigLoader) LoadBackendConfigFileByName(modelName, modelPath string, opts ...ConfigLoaderOption) (*BackendConfig, error) {
|
||||
|
||||
// Load a config file if present after the model name
|
||||
cfg := &BackendConfig{
|
||||
PredictionOptions: schema.PredictionOptions{
|
||||
Model: modelName,
|
||||
},
|
||||
}
|
||||
|
||||
cfgExisting, exists := cl.GetBackendConfig(modelName)
|
||||
if exists {
|
||||
cfg = &cfgExisting
|
||||
} else {
|
||||
// Try loading a model config file
|
||||
modelConfig := filepath.Join(modelPath, modelName+".yaml")
|
||||
if _, err := os.Stat(modelConfig); err == nil {
|
||||
if err := cl.LoadBackendConfig(
|
||||
modelConfig, opts...,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("failed loading model config (%s) %s", modelConfig, err.Error())
|
||||
}
|
||||
cfgExisting, exists = cl.GetBackendConfig(modelName)
|
||||
if exists {
|
||||
cfg = &cfgExisting
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cfg.SetDefaults(opts...)
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func NewBackendConfigLoader() *BackendConfigLoader {
|
||||
return &BackendConfigLoader{
|
||||
configs: make(map[string]BackendConfig),
|
||||
}
|
||||
}
|
||||
func ReadBackendConfigFile(file string, opts ...ConfigLoaderOption) ([]*BackendConfig, error) {
|
||||
c := &[]*BackendConfig{}
|
||||
f, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
@@ -85,7 +117,7 @@ func readMultipleBackendConfigsFromFile(file string, opts ...ConfigLoaderOption)
|
||||
return *c, nil
|
||||
}
|
||||
|
||||
func readBackendConfigFromFile(file string, opts ...ConfigLoaderOption) (*BackendConfig, error) {
|
||||
func ReadBackendConfig(file string, opts ...ConfigLoaderOption) (*BackendConfig, error) {
|
||||
lo := &LoadOptions{}
|
||||
lo.Apply(opts...)
|
||||
|
||||
@@ -102,86 +134,44 @@ func readBackendConfigFromFile(file string, opts ...ConfigLoaderOption) (*Backen
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Load a config file for a model
|
||||
func (bcl *BackendConfigLoader) LoadBackendConfigFileByName(modelName, modelPath string, opts ...ConfigLoaderOption) (*BackendConfig, error) {
|
||||
|
||||
// Load a config file if present after the model name
|
||||
cfg := &BackendConfig{
|
||||
PredictionOptions: schema.PredictionOptions{
|
||||
Model: modelName,
|
||||
},
|
||||
}
|
||||
|
||||
cfgExisting, exists := bcl.GetBackendConfig(modelName)
|
||||
if exists {
|
||||
cfg = &cfgExisting
|
||||
} else {
|
||||
// Try loading a model config file
|
||||
modelConfig := filepath.Join(modelPath, modelName+".yaml")
|
||||
if _, err := os.Stat(modelConfig); err == nil {
|
||||
if err := bcl.LoadBackendConfig(
|
||||
modelConfig, opts...,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("failed loading model config (%s) %s", modelConfig, err.Error())
|
||||
}
|
||||
cfgExisting, exists = bcl.GetBackendConfig(modelName)
|
||||
if exists {
|
||||
cfg = &cfgExisting
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cfg.SetDefaults(opts...)
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// This format is currently only used when reading a single file at startup, passed in via ApplicationConfig.ConfigFile
|
||||
func (bcl *BackendConfigLoader) LoadMultipleBackendConfigsSingleFile(file string, opts ...ConfigLoaderOption) error {
|
||||
bcl.Lock()
|
||||
defer bcl.Unlock()
|
||||
c, err := readMultipleBackendConfigsFromFile(file, opts...)
|
||||
func (cm *BackendConfigLoader) LoadBackendConfigFile(file string, opts ...ConfigLoaderOption) error {
|
||||
cm.Lock()
|
||||
defer cm.Unlock()
|
||||
c, err := ReadBackendConfigFile(file, opts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot load config file: %w", err)
|
||||
}
|
||||
|
||||
for _, cc := range c {
|
||||
if cc.Validate() {
|
||||
bcl.configs[cc.Name] = *cc
|
||||
}
|
||||
cm.configs[cc.Name] = *cc
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bcl *BackendConfigLoader) LoadBackendConfig(file string, opts ...ConfigLoaderOption) error {
|
||||
bcl.Lock()
|
||||
defer bcl.Unlock()
|
||||
c, err := readBackendConfigFromFile(file, opts...)
|
||||
func (cl *BackendConfigLoader) LoadBackendConfig(file string, opts ...ConfigLoaderOption) error {
|
||||
cl.Lock()
|
||||
defer cl.Unlock()
|
||||
c, err := ReadBackendConfig(file, opts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read config file: %w", err)
|
||||
}
|
||||
|
||||
if c.Validate() {
|
||||
bcl.configs[c.Name] = *c
|
||||
} else {
|
||||
return fmt.Errorf("config is not valid")
|
||||
}
|
||||
|
||||
cl.configs[c.Name] = *c
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bcl *BackendConfigLoader) GetBackendConfig(m string) (BackendConfig, bool) {
|
||||
bcl.Lock()
|
||||
defer bcl.Unlock()
|
||||
v, exists := bcl.configs[m]
|
||||
func (cl *BackendConfigLoader) GetBackendConfig(m string) (BackendConfig, bool) {
|
||||
cl.Lock()
|
||||
defer cl.Unlock()
|
||||
v, exists := cl.configs[m]
|
||||
return v, exists
|
||||
}
|
||||
|
||||
func (bcl *BackendConfigLoader) GetAllBackendConfigs() []BackendConfig {
|
||||
bcl.Lock()
|
||||
defer bcl.Unlock()
|
||||
func (cl *BackendConfigLoader) GetAllBackendConfigs() []BackendConfig {
|
||||
cl.Lock()
|
||||
defer cl.Unlock()
|
||||
var res []BackendConfig
|
||||
for _, v := range bcl.configs {
|
||||
for _, v := range cl.configs {
|
||||
res = append(res, v)
|
||||
}
|
||||
|
||||
@@ -192,16 +182,26 @@ func (bcl *BackendConfigLoader) GetAllBackendConfigs() []BackendConfig {
|
||||
return res
|
||||
}
|
||||
|
||||
func (bcl *BackendConfigLoader) RemoveBackendConfig(m string) {
|
||||
bcl.Lock()
|
||||
defer bcl.Unlock()
|
||||
delete(bcl.configs, m)
|
||||
func (cl *BackendConfigLoader) RemoveBackendConfig(m string) {
|
||||
cl.Lock()
|
||||
defer cl.Unlock()
|
||||
delete(cl.configs, m)
|
||||
}
|
||||
|
||||
func (cl *BackendConfigLoader) ListBackendConfigs() []string {
|
||||
cl.Lock()
|
||||
defer cl.Unlock()
|
||||
var res []string
|
||||
for k := range cl.configs {
|
||||
res = append(res, k)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// Preload prepare models if they are not local but url or huggingface repositories
|
||||
func (bcl *BackendConfigLoader) Preload(modelPath string) error {
|
||||
bcl.Lock()
|
||||
defer bcl.Unlock()
|
||||
func (cl *BackendConfigLoader) Preload(modelPath string) error {
|
||||
cl.Lock()
|
||||
defer cl.Unlock()
|
||||
|
||||
status := func(fileName, current, total string, percent float64) {
|
||||
utils.DisplayDownloadFunction(fileName, current, total, percent)
|
||||
@@ -223,7 +223,7 @@ func (bcl *BackendConfigLoader) Preload(modelPath string) error {
|
||||
}
|
||||
}
|
||||
|
||||
for i, config := range bcl.configs {
|
||||
for i, config := range cl.configs {
|
||||
|
||||
// Download files and verify their SHA
|
||||
for i, file := range config.DownloadFiles {
|
||||
@@ -252,10 +252,10 @@ func (bcl *BackendConfigLoader) Preload(modelPath string) error {
|
||||
}
|
||||
}
|
||||
|
||||
cc := bcl.configs[i]
|
||||
cc := cl.configs[i]
|
||||
c := &cc
|
||||
c.PredictionOptions.Model = modelFileName
|
||||
bcl.configs[i] = *c
|
||||
cl.configs[i] = *c
|
||||
}
|
||||
|
||||
if config.IsMMProjURL() {
|
||||
@@ -269,22 +269,22 @@ func (bcl *BackendConfigLoader) Preload(modelPath string) error {
|
||||
}
|
||||
}
|
||||
|
||||
cc := bcl.configs[i]
|
||||
cc := cl.configs[i]
|
||||
c := &cc
|
||||
c.MMProj = modelFileName
|
||||
bcl.configs[i] = *c
|
||||
cl.configs[i] = *c
|
||||
}
|
||||
|
||||
if bcl.configs[i].Name != "" {
|
||||
glamText(fmt.Sprintf("**Model name**: _%s_", bcl.configs[i].Name))
|
||||
if cl.configs[i].Name != "" {
|
||||
glamText(fmt.Sprintf("**Model name**: _%s_", cl.configs[i].Name))
|
||||
}
|
||||
if bcl.configs[i].Description != "" {
|
||||
if cl.configs[i].Description != "" {
|
||||
//glamText("**Description**")
|
||||
glamText(bcl.configs[i].Description)
|
||||
glamText(cl.configs[i].Description)
|
||||
}
|
||||
if bcl.configs[i].Usage != "" {
|
||||
if cl.configs[i].Usage != "" {
|
||||
//glamText("**Usage**")
|
||||
glamText(bcl.configs[i].Usage)
|
||||
glamText(cl.configs[i].Usage)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -292,12 +292,12 @@ func (bcl *BackendConfigLoader) Preload(modelPath string) error {
|
||||
|
||||
// LoadBackendConfigsFromPath reads all the configurations of the models from a path
|
||||
// (non-recursive)
|
||||
func (bcl *BackendConfigLoader) LoadBackendConfigsFromPath(path string, opts ...ConfigLoaderOption) error {
|
||||
bcl.Lock()
|
||||
defer bcl.Unlock()
|
||||
func (cm *BackendConfigLoader) LoadBackendConfigsFromPath(path string, opts ...ConfigLoaderOption) error {
|
||||
cm.Lock()
|
||||
defer cm.Unlock()
|
||||
entries, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read directory '%s': %w", path, err)
|
||||
return err
|
||||
}
|
||||
files := make([]fs.FileInfo, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
@@ -313,15 +313,9 @@ func (bcl *BackendConfigLoader) LoadBackendConfigsFromPath(path string, opts ...
|
||||
strings.HasPrefix(file.Name(), ".") {
|
||||
continue
|
||||
}
|
||||
c, err := readBackendConfigFromFile(filepath.Join(path, file.Name()), opts...)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("cannot read config file: %s", file.Name())
|
||||
continue
|
||||
}
|
||||
if c.Validate() {
|
||||
bcl.configs[c.Name] = *c
|
||||
} else {
|
||||
log.Error().Err(err).Msgf("config is not valid")
|
||||
c, err := ReadBackendConfig(filepath.Join(path, file.Name()), opts...)
|
||||
if err == nil {
|
||||
cm.configs[c.Name] = *c
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("Test cases for config related functions", func() {
|
||||
Context("Test Read configuration functions", func() {
|
||||
It("Test Validate", func() {
|
||||
tmp, err := os.CreateTemp("", "config.yaml")
|
||||
Expect(err).To(BeNil())
|
||||
defer os.Remove(tmp.Name())
|
||||
_, err = tmp.WriteString(
|
||||
`backend: "foo-bar"
|
||||
parameters:
|
||||
model: "foo-bar"`)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
config, err := readBackendConfigFromFile(tmp.Name())
|
||||
Expect(err).To(BeNil())
|
||||
Expect(config).ToNot(BeNil())
|
||||
Expect(config.Validate()).To(BeFalse())
|
||||
})
|
||||
It("Test Validate", func() {
|
||||
tmp, err := os.CreateTemp("", "config.yaml")
|
||||
Expect(err).To(BeNil())
|
||||
defer os.Remove(tmp.Name())
|
||||
_, err = tmp.WriteString(
|
||||
`name: bar-baz
|
||||
backend: "foo-bar"
|
||||
parameters:
|
||||
model: "foo-bar"`)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
config, err := readBackendConfigFromFile(tmp.Name())
|
||||
Expect(err).To(BeNil())
|
||||
Expect(config).ToNot(BeNil())
|
||||
// two configs in config.yaml
|
||||
Expect(config.Name).To(Equal("bar-baz"))
|
||||
Expect(config.Validate()).To(BeTrue())
|
||||
|
||||
// download https://raw.githubusercontent.com/mudler/LocalAI/master/embedded/models/hermes-2-pro-mistral.yaml
|
||||
httpClient := http.Client{}
|
||||
resp, err := httpClient.Get("https://raw.githubusercontent.com/mudler/LocalAI/master/embedded/models/hermes-2-pro-mistral.yaml")
|
||||
Expect(err).To(BeNil())
|
||||
defer resp.Body.Close()
|
||||
tmp, err = os.CreateTemp("", "config.yaml")
|
||||
Expect(err).To(BeNil())
|
||||
defer os.Remove(tmp.Name())
|
||||
_, err = io.Copy(tmp, resp.Body)
|
||||
Expect(err).To(BeNil())
|
||||
config, err = readBackendConfigFromFile(tmp.Name())
|
||||
Expect(err).To(BeNil())
|
||||
Expect(config).ToNot(BeNil())
|
||||
// two configs in config.yaml
|
||||
Expect(config.Name).To(Equal("hermes-2-pro-mistral"))
|
||||
Expect(config.Validate()).To(BeTrue())
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,13 +0,0 @@
|
||||
package config_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestConfig(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Config test suite")
|
||||
}
|
||||
@@ -1,8 +1,10 @@
|
||||
package config
|
||||
package config_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
. "github.com/go-skynet/LocalAI/core/config"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
@@ -15,8 +17,8 @@ var _ = Describe("Test cases for config related functions", func() {
|
||||
|
||||
Context("Test Read configuration functions", func() {
|
||||
configFile = os.Getenv("CONFIG_FILE")
|
||||
It("Test readConfigFile", func() {
|
||||
config, err := readMultipleBackendConfigsFromFile(configFile)
|
||||
It("Test ReadConfigFile", func() {
|
||||
config, err := ReadBackendConfigFile(configFile)
|
||||
Expect(err).To(BeNil())
|
||||
Expect(config).ToNot(BeNil())
|
||||
// two configs in config.yaml
|
||||
@@ -25,28 +27,26 @@ var _ = Describe("Test cases for config related functions", func() {
|
||||
})
|
||||
|
||||
It("Test LoadConfigs", func() {
|
||||
|
||||
bcl := NewBackendConfigLoader()
|
||||
err := bcl.LoadBackendConfigsFromPath(os.Getenv("MODELS_PATH"))
|
||||
|
||||
cm := NewBackendConfigLoader()
|
||||
opts := NewApplicationConfig()
|
||||
err := cm.LoadBackendConfigsFromPath(opts.ModelPath)
|
||||
Expect(err).To(BeNil())
|
||||
configs := bcl.GetAllBackendConfigs()
|
||||
loadedModelNames := []string{}
|
||||
for _, v := range configs {
|
||||
loadedModelNames = append(loadedModelNames, v.Name)
|
||||
}
|
||||
Expect(configs).ToNot(BeNil())
|
||||
Expect(cm.ListBackendConfigs()).ToNot(BeNil())
|
||||
|
||||
Expect(loadedModelNames).To(ContainElements("code-search-ada-code-001"))
|
||||
// config should includes gpt4all models's api.config
|
||||
Expect(cm.ListBackendConfigs()).To(ContainElements("gpt4all"))
|
||||
|
||||
// config should includes gpt2 models's api.config
|
||||
Expect(cm.ListBackendConfigs()).To(ContainElements("gpt4all-2"))
|
||||
|
||||
// config should includes text-embedding-ada-002 models's api.config
|
||||
Expect(loadedModelNames).To(ContainElements("text-embedding-ada-002"))
|
||||
Expect(cm.ListBackendConfigs()).To(ContainElements("text-embedding-ada-002"))
|
||||
|
||||
// config should includes rwkv_test models's api.config
|
||||
Expect(loadedModelNames).To(ContainElements("rwkv_test"))
|
||||
Expect(cm.ListBackendConfigs()).To(ContainElements("rwkv_test"))
|
||||
|
||||
// config should includes whisper-1 models's api.config
|
||||
Expect(loadedModelNames).To(ContainElements("whisper-1"))
|
||||
Expect(cm.ListBackendConfigs()).To(ContainElements("whisper-1"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/downloader"
|
||||
"github.com/go-skynet/LocalAI/pkg/utils"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
type Asset struct {
|
||||
FileName string `yaml:"filename"`
|
||||
URL string `yaml:"url"`
|
||||
SHA string `yaml:"sha"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
// read the YAML file which contains a list of assets
|
||||
// and download them in the asset path
|
||||
assets := []Asset{}
|
||||
|
||||
assetFile := os.Args[1]
|
||||
destPath := os.Args[2]
|
||||
|
||||
// read the YAML file
|
||||
f, err := os.ReadFile(assetFile)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// unmarshal the YAML data into a struct
|
||||
if err := yaml.Unmarshal(f, &assets); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// download the assets
|
||||
for _, asset := range assets {
|
||||
if err := downloader.DownloadFile(asset.URL, filepath.Join(destPath, asset.FileName), asset.SHA, 1, 1, utils.DisplayDownloadFunction); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("Finished downloading assets")
|
||||
}
|
||||
@@ -67,7 +67,6 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup
|
||||
return true
|
||||
})
|
||||
|
||||
result = functions.CleanupLLMResult(result, config.FunctionsConfig)
|
||||
results := functions.ParseFunctionCall(result, config.FunctionsConfig)
|
||||
noActionToRun := len(results) > 0 && results[0].Name == noAction || len(results) == 0
|
||||
|
||||
@@ -193,7 +192,7 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup
|
||||
}
|
||||
|
||||
switch {
|
||||
case !config.FunctionsConfig.GrammarConfig.NoGrammar && shouldUseFn:
|
||||
case !config.FunctionsConfig.NoGrammar && shouldUseFn:
|
||||
noActionGrammar := functions.Function{
|
||||
Name: noActionName,
|
||||
Description: noActionDescription,
|
||||
@@ -220,15 +219,15 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup
|
||||
// Handle if we should return "name" instead of "functions"
|
||||
if config.FunctionsConfig.FunctionName {
|
||||
jsStruct := funcs.ToJSONNameStructure()
|
||||
config.Grammar = jsStruct.Grammar(config.FunctionsConfig.GrammarConfig.Options()...)
|
||||
config.Grammar = jsStruct.Grammar("", config.FunctionsConfig.ParallelCalls, config.FunctionsConfig.GrammarMessage)
|
||||
} else {
|
||||
jsStruct := funcs.ToJSONFunctionStructure()
|
||||
config.Grammar = jsStruct.Grammar(config.FunctionsConfig.GrammarConfig.Options()...)
|
||||
config.Grammar = jsStruct.Grammar("", config.FunctionsConfig.ParallelCalls, config.FunctionsConfig.GrammarMessage)
|
||||
}
|
||||
case input.JSONFunctionGrammarObject != nil:
|
||||
config.Grammar = input.JSONFunctionGrammarObject.Grammar(config.FunctionsConfig.GrammarConfig.Options()...)
|
||||
config.Grammar = input.JSONFunctionGrammarObject.Grammar("", config.FunctionsConfig.ParallelCalls, config.FunctionsConfig.GrammarMessage)
|
||||
case input.JSONFunctionGrammarObjectName != nil:
|
||||
config.Grammar = input.JSONFunctionGrammarObjectName.Grammar(config.FunctionsConfig.GrammarConfig.Options()...)
|
||||
config.Grammar = input.JSONFunctionGrammarObjectName.Grammar("", config.FunctionsConfig.ParallelCalls, config.FunctionsConfig.GrammarMessage)
|
||||
default:
|
||||
// Force picking one of the functions by the request
|
||||
if config.FunctionToCall() != "" {
|
||||
@@ -350,12 +349,7 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup
|
||||
mess = append(mess, content)
|
||||
}
|
||||
|
||||
joinCharacter := "\n"
|
||||
if config.TemplateConfig.JoinChatMessagesByCharacter != nil {
|
||||
joinCharacter = *config.TemplateConfig.JoinChatMessagesByCharacter
|
||||
}
|
||||
|
||||
predInput = strings.Join(mess, joinCharacter)
|
||||
predInput = strings.Join(mess, "\n")
|
||||
log.Debug().Msgf("Prompt (before templating): %s", predInput)
|
||||
|
||||
templateFile := ""
|
||||
@@ -471,7 +465,6 @@ func ChatEndpoint(cl *config.BackendConfigLoader, ml *model.ModelLoader, startup
|
||||
return
|
||||
}
|
||||
|
||||
s = functions.CleanupLLMResult(s, config.FunctionsConfig)
|
||||
results := functions.ParseFunctionCall(s, config.FunctionsConfig)
|
||||
noActionsToRun := len(results) > 0 && results[0].Name == noActionName || len(results) == 0
|
||||
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
File diff suppressed because one or more lines are too long
@@ -1,39 +0,0 @@
|
||||
/*
|
||||
https://fonts.googleapis.com/css2?family=Inter:wght@400;600;700&family=Roboto:wght@400;500&display=swap
|
||||
*/
|
||||
|
||||
@font-face {
|
||||
font-family: 'Inter';
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
font-display: swap;
|
||||
src: url(/static/assets/UcCO3FwrK3iLTeHuS_fvQtMwCp50KnMw2boKoduKmMEVuLyfMZg.ttf) format('truetype');
|
||||
}
|
||||
@font-face {
|
||||
font-family: 'Inter';
|
||||
font-style: normal;
|
||||
font-weight: 600;
|
||||
font-display: swap;
|
||||
src: url(/static/assets/UcCO3FwrK3iLTeHuS_fvQtMwCp50KnMw2boKoduKmMEVuGKYMZg.ttf) format('truetype');
|
||||
}
|
||||
@font-face {
|
||||
font-family: 'Inter';
|
||||
font-style: normal;
|
||||
font-weight: 700;
|
||||
font-display: swap;
|
||||
src: url(/static/assets/UcCO3FwrK3iLTeHuS_fvQtMwCp50KnMw2boKoduKmMEVuFuYMZg.ttf) format('truetype');
|
||||
}
|
||||
@font-face {
|
||||
font-family: 'Roboto';
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
font-display: swap;
|
||||
src: url(/static/assets/KFOmCnqEu92Fr1Me5Q.ttf) format('truetype');
|
||||
}
|
||||
@font-face {
|
||||
font-family: 'Roboto';
|
||||
font-style: normal;
|
||||
font-weight: 500;
|
||||
font-display: swap;
|
||||
src: url(/static/assets/KFOlCnqEu92Fr1MmEU9vAw.ttf) format('truetype');
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
/*
|
||||
https://fonts.googleapis.com/css?family=Roboto:300,400,500,700,900&display=swap
|
||||
*/
|
||||
|
||||
@font-face {
|
||||
font-family: 'Roboto';
|
||||
font-style: normal;
|
||||
font-weight: 300;
|
||||
font-display: swap;
|
||||
src: url(/static/assets//KFOlCnqEu92Fr1MmSU5fBBc9.ttf) format('truetype');
|
||||
}
|
||||
@font-face {
|
||||
font-family: 'Roboto';
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
font-display: swap;
|
||||
src: url(/static/assets//KFOmCnqEu92Fr1Mu4mxP.ttf) format('truetype');
|
||||
}
|
||||
@font-face {
|
||||
font-family: 'Roboto';
|
||||
font-style: normal;
|
||||
font-weight: 500;
|
||||
font-display: swap;
|
||||
src: url(/static/assets//KFOlCnqEu92Fr1MmEU9fBBc9.ttf) format('truetype');
|
||||
}
|
||||
@font-face {
|
||||
font-family: 'Roboto';
|
||||
font-style: normal;
|
||||
font-weight: 700;
|
||||
font-display: swap;
|
||||
src: url(/static/assets//KFOlCnqEu92Fr1MmWUlfBBc9.ttf) format('truetype');
|
||||
}
|
||||
@font-face {
|
||||
font-family: 'Roboto';
|
||||
font-style: normal;
|
||||
font-weight: 900;
|
||||
font-display: swap;
|
||||
src: url(/static/assets//KFOlCnqEu92Fr1MmYUtfBBc9.ttf) format('truetype');
|
||||
}
|
||||
6
core/http/static/assets/fontawesome.css
vendored
6
core/http/static/assets/fontawesome.css
vendored
File diff suppressed because one or more lines are too long
8030
core/http/static/assets/fontawesome/css/all.css
vendored
8030
core/http/static/assets/fontawesome/css/all.css
vendored
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
1594
core/http/static/assets/fontawesome/css/brands.css
vendored
1594
core/http/static/assets/fontawesome/css/brands.css
vendored
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
6375
core/http/static/assets/fontawesome/css/fontawesome.css
vendored
6375
core/http/static/assets/fontawesome/css/fontawesome.css
vendored
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
@@ -1,19 +0,0 @@
|
||||
/*!
|
||||
* Font Awesome Free 6.5.2 by @fontawesome - https://fontawesome.com
|
||||
* License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License)
|
||||
* Copyright 2024 Fonticons, Inc.
|
||||
*/
|
||||
:root, :host {
|
||||
--fa-style-family-classic: 'Font Awesome 6 Free';
|
||||
--fa-font-regular: normal 400 1em/1 'Font Awesome 6 Free'; }
|
||||
|
||||
@font-face {
|
||||
font-family: 'Font Awesome 6 Free';
|
||||
font-style: normal;
|
||||
font-weight: 400;
|
||||
font-display: block;
|
||||
src: url("../webfonts/fa-regular-400.woff2") format("woff2"), url("../webfonts/fa-regular-400.ttf") format("truetype"); }
|
||||
|
||||
.far,
|
||||
.fa-regular {
|
||||
font-weight: 400; }
|
||||
@@ -1,6 +0,0 @@
|
||||
/*!
|
||||
* Font Awesome Free 6.5.2 by @fontawesome - https://fontawesome.com
|
||||
* License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License)
|
||||
* Copyright 2024 Fonticons, Inc.
|
||||
*/
|
||||
:host,:root{--fa-style-family-classic:"Font Awesome 6 Free";--fa-font-regular:normal 400 1em/1 "Font Awesome 6 Free"}@font-face{font-family:"Font Awesome 6 Free";font-style:normal;font-weight:400;font-display:block;src:url(../webfonts/fa-regular-400.woff2) format("woff2"),url(../webfonts/fa-regular-400.ttf) format("truetype")}.fa-regular,.far{font-weight:400}
|
||||
@@ -1,19 +0,0 @@
|
||||
/*!
|
||||
* Font Awesome Free 6.5.2 by @fontawesome - https://fontawesome.com
|
||||
* License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License)
|
||||
* Copyright 2024 Fonticons, Inc.
|
||||
*/
|
||||
:root, :host {
|
||||
--fa-style-family-classic: 'Font Awesome 6 Free';
|
||||
--fa-font-solid: normal 900 1em/1 'Font Awesome 6 Free'; }
|
||||
|
||||
@font-face {
|
||||
font-family: 'Font Awesome 6 Free';
|
||||
font-style: normal;
|
||||
font-weight: 900;
|
||||
font-display: block;
|
||||
src: url("../webfonts/fa-solid-900.woff2") format("woff2"), url("../webfonts/fa-solid-900.ttf") format("truetype"); }
|
||||
|
||||
.fas,
|
||||
.fa-solid {
|
||||
font-weight: 900; }
|
||||
@@ -1,6 +0,0 @@
|
||||
/*!
|
||||
* Font Awesome Free 6.5.2 by @fontawesome - https://fontawesome.com
|
||||
* License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License)
|
||||
* Copyright 2024 Fonticons, Inc.
|
||||
*/
|
||||
:host,:root{--fa-style-family-classic:"Font Awesome 6 Free";--fa-font-solid:normal 900 1em/1 "Font Awesome 6 Free"}@font-face{font-family:"Font Awesome 6 Free";font-style:normal;font-weight:900;font-display:block;src:url(../webfonts/fa-solid-900.woff2) format("woff2"),url(../webfonts/fa-solid-900.ttf) format("truetype")}.fa-solid,.fas{font-weight:900}
|
||||
@@ -1,640 +0,0 @@
|
||||
/*!
|
||||
* Font Awesome Free 6.5.2 by @fontawesome - https://fontawesome.com
|
||||
* License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License)
|
||||
* Copyright 2024 Fonticons, Inc.
|
||||
*/
|
||||
:root, :host {
|
||||
--fa-font-solid: normal 900 1em/1 'Font Awesome 6 Solid';
|
||||
--fa-font-regular: normal 400 1em/1 'Font Awesome 6 Regular';
|
||||
--fa-font-light: normal 300 1em/1 'Font Awesome 6 Light';
|
||||
--fa-font-thin: normal 100 1em/1 'Font Awesome 6 Thin';
|
||||
--fa-font-duotone: normal 900 1em/1 'Font Awesome 6 Duotone';
|
||||
--fa-font-sharp-solid: normal 900 1em/1 'Font Awesome 6 Sharp';
|
||||
--fa-font-sharp-regular: normal 400 1em/1 'Font Awesome 6 Sharp';
|
||||
--fa-font-sharp-light: normal 300 1em/1 'Font Awesome 6 Sharp';
|
||||
--fa-font-sharp-thin: normal 100 1em/1 'Font Awesome 6 Sharp';
|
||||
--fa-font-brands: normal 400 1em/1 'Font Awesome 6 Brands'; }
|
||||
|
||||
svg:not(:root).svg-inline--fa, svg:not(:host).svg-inline--fa {
|
||||
overflow: visible;
|
||||
box-sizing: content-box; }
|
||||
|
||||
.svg-inline--fa {
|
||||
display: var(--fa-display, inline-block);
|
||||
height: 1em;
|
||||
overflow: visible;
|
||||
vertical-align: -.125em; }
|
||||
.svg-inline--fa.fa-2xs {
|
||||
vertical-align: 0.1em; }
|
||||
.svg-inline--fa.fa-xs {
|
||||
vertical-align: 0em; }
|
||||
.svg-inline--fa.fa-sm {
|
||||
vertical-align: -0.07143em; }
|
||||
.svg-inline--fa.fa-lg {
|
||||
vertical-align: -0.2em; }
|
||||
.svg-inline--fa.fa-xl {
|
||||
vertical-align: -0.25em; }
|
||||
.svg-inline--fa.fa-2xl {
|
||||
vertical-align: -0.3125em; }
|
||||
.svg-inline--fa.fa-pull-left {
|
||||
margin-right: var(--fa-pull-margin, 0.3em);
|
||||
width: auto; }
|
||||
.svg-inline--fa.fa-pull-right {
|
||||
margin-left: var(--fa-pull-margin, 0.3em);
|
||||
width: auto; }
|
||||
.svg-inline--fa.fa-li {
|
||||
width: var(--fa-li-width, 2em);
|
||||
top: 0.25em; }
|
||||
.svg-inline--fa.fa-fw {
|
||||
width: var(--fa-fw-width, 1.25em); }
|
||||
|
||||
.fa-layers svg.svg-inline--fa {
|
||||
bottom: 0;
|
||||
left: 0;
|
||||
margin: auto;
|
||||
position: absolute;
|
||||
right: 0;
|
||||
top: 0; }
|
||||
|
||||
.fa-layers-text, .fa-layers-counter {
|
||||
display: inline-block;
|
||||
position: absolute;
|
||||
text-align: center; }
|
||||
|
||||
.fa-layers {
|
||||
display: inline-block;
|
||||
height: 1em;
|
||||
position: relative;
|
||||
text-align: center;
|
||||
vertical-align: -.125em;
|
||||
width: 1em; }
|
||||
.fa-layers svg.svg-inline--fa {
|
||||
-webkit-transform-origin: center center;
|
||||
transform-origin: center center; }
|
||||
|
||||
.fa-layers-text {
|
||||
left: 50%;
|
||||
top: 50%;
|
||||
-webkit-transform: translate(-50%, -50%);
|
||||
transform: translate(-50%, -50%);
|
||||
-webkit-transform-origin: center center;
|
||||
transform-origin: center center; }
|
||||
|
||||
.fa-layers-counter {
|
||||
background-color: var(--fa-counter-background-color, #ff253a);
|
||||
border-radius: var(--fa-counter-border-radius, 1em);
|
||||
box-sizing: border-box;
|
||||
color: var(--fa-inverse, #fff);
|
||||
line-height: var(--fa-counter-line-height, 1);
|
||||
max-width: var(--fa-counter-max-width, 5em);
|
||||
min-width: var(--fa-counter-min-width, 1.5em);
|
||||
overflow: hidden;
|
||||
padding: var(--fa-counter-padding, 0.25em 0.5em);
|
||||
right: var(--fa-right, 0);
|
||||
text-overflow: ellipsis;
|
||||
top: var(--fa-top, 0);
|
||||
-webkit-transform: scale(var(--fa-counter-scale, 0.25));
|
||||
transform: scale(var(--fa-counter-scale, 0.25));
|
||||
-webkit-transform-origin: top right;
|
||||
transform-origin: top right; }
|
||||
|
||||
.fa-layers-bottom-right {
|
||||
bottom: var(--fa-bottom, 0);
|
||||
right: var(--fa-right, 0);
|
||||
top: auto;
|
||||
-webkit-transform: scale(var(--fa-layers-scale, 0.25));
|
||||
transform: scale(var(--fa-layers-scale, 0.25));
|
||||
-webkit-transform-origin: bottom right;
|
||||
transform-origin: bottom right; }
|
||||
|
||||
.fa-layers-bottom-left {
|
||||
bottom: var(--fa-bottom, 0);
|
||||
left: var(--fa-left, 0);
|
||||
right: auto;
|
||||
top: auto;
|
||||
-webkit-transform: scale(var(--fa-layers-scale, 0.25));
|
||||
transform: scale(var(--fa-layers-scale, 0.25));
|
||||
-webkit-transform-origin: bottom left;
|
||||
transform-origin: bottom left; }
|
||||
|
||||
.fa-layers-top-right {
|
||||
top: var(--fa-top, 0);
|
||||
right: var(--fa-right, 0);
|
||||
-webkit-transform: scale(var(--fa-layers-scale, 0.25));
|
||||
transform: scale(var(--fa-layers-scale, 0.25));
|
||||
-webkit-transform-origin: top right;
|
||||
transform-origin: top right; }
|
||||
|
||||
.fa-layers-top-left {
|
||||
left: var(--fa-left, 0);
|
||||
right: auto;
|
||||
top: var(--fa-top, 0);
|
||||
-webkit-transform: scale(var(--fa-layers-scale, 0.25));
|
||||
transform: scale(var(--fa-layers-scale, 0.25));
|
||||
-webkit-transform-origin: top left;
|
||||
transform-origin: top left; }
|
||||
|
||||
.fa-1x {
|
||||
font-size: 1em; }
|
||||
|
||||
.fa-2x {
|
||||
font-size: 2em; }
|
||||
|
||||
.fa-3x {
|
||||
font-size: 3em; }
|
||||
|
||||
.fa-4x {
|
||||
font-size: 4em; }
|
||||
|
||||
.fa-5x {
|
||||
font-size: 5em; }
|
||||
|
||||
.fa-6x {
|
||||
font-size: 6em; }
|
||||
|
||||
.fa-7x {
|
||||
font-size: 7em; }
|
||||
|
||||
.fa-8x {
|
||||
font-size: 8em; }
|
||||
|
||||
.fa-9x {
|
||||
font-size: 9em; }
|
||||
|
||||
.fa-10x {
|
||||
font-size: 10em; }
|
||||
|
||||
.fa-2xs {
|
||||
font-size: 0.625em;
|
||||
line-height: 0.1em;
|
||||
vertical-align: 0.225em; }
|
||||
|
||||
.fa-xs {
|
||||
font-size: 0.75em;
|
||||
line-height: 0.08333em;
|
||||
vertical-align: 0.125em; }
|
||||
|
||||
.fa-sm {
|
||||
font-size: 0.875em;
|
||||
line-height: 0.07143em;
|
||||
vertical-align: 0.05357em; }
|
||||
|
||||
.fa-lg {
|
||||
font-size: 1.25em;
|
||||
line-height: 0.05em;
|
||||
vertical-align: -0.075em; }
|
||||
|
||||
.fa-xl {
|
||||
font-size: 1.5em;
|
||||
line-height: 0.04167em;
|
||||
vertical-align: -0.125em; }
|
||||
|
||||
.fa-2xl {
|
||||
font-size: 2em;
|
||||
line-height: 0.03125em;
|
||||
vertical-align: -0.1875em; }
|
||||
|
||||
.fa-fw {
|
||||
text-align: center;
|
||||
width: 1.25em; }
|
||||
|
||||
.fa-ul {
|
||||
list-style-type: none;
|
||||
margin-left: var(--fa-li-margin, 2.5em);
|
||||
padding-left: 0; }
|
||||
.fa-ul > li {
|
||||
position: relative; }
|
||||
|
||||
.fa-li {
|
||||
left: calc(var(--fa-li-width, 2em) * -1);
|
||||
position: absolute;
|
||||
text-align: center;
|
||||
width: var(--fa-li-width, 2em);
|
||||
line-height: inherit; }
|
||||
|
||||
.fa-border {
|
||||
border-color: var(--fa-border-color, #eee);
|
||||
border-radius: var(--fa-border-radius, 0.1em);
|
||||
border-style: var(--fa-border-style, solid);
|
||||
border-width: var(--fa-border-width, 0.08em);
|
||||
padding: var(--fa-border-padding, 0.2em 0.25em 0.15em); }
|
||||
|
||||
.fa-pull-left {
|
||||
float: left;
|
||||
margin-right: var(--fa-pull-margin, 0.3em); }
|
||||
|
||||
.fa-pull-right {
|
||||
float: right;
|
||||
margin-left: var(--fa-pull-margin, 0.3em); }
|
||||
|
||||
.fa-beat {
|
||||
-webkit-animation-name: fa-beat;
|
||||
animation-name: fa-beat;
|
||||
-webkit-animation-delay: var(--fa-animation-delay, 0s);
|
||||
animation-delay: var(--fa-animation-delay, 0s);
|
||||
-webkit-animation-direction: var(--fa-animation-direction, normal);
|
||||
animation-direction: var(--fa-animation-direction, normal);
|
||||
-webkit-animation-duration: var(--fa-animation-duration, 1s);
|
||||
animation-duration: var(--fa-animation-duration, 1s);
|
||||
-webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite);
|
||||
animation-iteration-count: var(--fa-animation-iteration-count, infinite);
|
||||
-webkit-animation-timing-function: var(--fa-animation-timing, ease-in-out);
|
||||
animation-timing-function: var(--fa-animation-timing, ease-in-out); }
|
||||
|
||||
.fa-bounce {
|
||||
-webkit-animation-name: fa-bounce;
|
||||
animation-name: fa-bounce;
|
||||
-webkit-animation-delay: var(--fa-animation-delay, 0s);
|
||||
animation-delay: var(--fa-animation-delay, 0s);
|
||||
-webkit-animation-direction: var(--fa-animation-direction, normal);
|
||||
animation-direction: var(--fa-animation-direction, normal);
|
||||
-webkit-animation-duration: var(--fa-animation-duration, 1s);
|
||||
animation-duration: var(--fa-animation-duration, 1s);
|
||||
-webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite);
|
||||
animation-iteration-count: var(--fa-animation-iteration-count, infinite);
|
||||
-webkit-animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.28, 0.84, 0.42, 1));
|
||||
animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.28, 0.84, 0.42, 1)); }
|
||||
|
||||
.fa-fade {
|
||||
-webkit-animation-name: fa-fade;
|
||||
animation-name: fa-fade;
|
||||
-webkit-animation-delay: var(--fa-animation-delay, 0s);
|
||||
animation-delay: var(--fa-animation-delay, 0s);
|
||||
-webkit-animation-direction: var(--fa-animation-direction, normal);
|
||||
animation-direction: var(--fa-animation-direction, normal);
|
||||
-webkit-animation-duration: var(--fa-animation-duration, 1s);
|
||||
animation-duration: var(--fa-animation-duration, 1s);
|
||||
-webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite);
|
||||
animation-iteration-count: var(--fa-animation-iteration-count, infinite);
|
||||
-webkit-animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.4, 0, 0.6, 1));
|
||||
animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.4, 0, 0.6, 1)); }
|
||||
|
||||
.fa-beat-fade {
|
||||
-webkit-animation-name: fa-beat-fade;
|
||||
animation-name: fa-beat-fade;
|
||||
-webkit-animation-delay: var(--fa-animation-delay, 0s);
|
||||
animation-delay: var(--fa-animation-delay, 0s);
|
||||
-webkit-animation-direction: var(--fa-animation-direction, normal);
|
||||
animation-direction: var(--fa-animation-direction, normal);
|
||||
-webkit-animation-duration: var(--fa-animation-duration, 1s);
|
||||
animation-duration: var(--fa-animation-duration, 1s);
|
||||
-webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite);
|
||||
animation-iteration-count: var(--fa-animation-iteration-count, infinite);
|
||||
-webkit-animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.4, 0, 0.6, 1));
|
||||
animation-timing-function: var(--fa-animation-timing, cubic-bezier(0.4, 0, 0.6, 1)); }
|
||||
|
||||
.fa-flip {
|
||||
-webkit-animation-name: fa-flip;
|
||||
animation-name: fa-flip;
|
||||
-webkit-animation-delay: var(--fa-animation-delay, 0s);
|
||||
animation-delay: var(--fa-animation-delay, 0s);
|
||||
-webkit-animation-direction: var(--fa-animation-direction, normal);
|
||||
animation-direction: var(--fa-animation-direction, normal);
|
||||
-webkit-animation-duration: var(--fa-animation-duration, 1s);
|
||||
animation-duration: var(--fa-animation-duration, 1s);
|
||||
-webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite);
|
||||
animation-iteration-count: var(--fa-animation-iteration-count, infinite);
|
||||
-webkit-animation-timing-function: var(--fa-animation-timing, ease-in-out);
|
||||
animation-timing-function: var(--fa-animation-timing, ease-in-out); }
|
||||
|
||||
.fa-shake {
|
||||
-webkit-animation-name: fa-shake;
|
||||
animation-name: fa-shake;
|
||||
-webkit-animation-delay: var(--fa-animation-delay, 0s);
|
||||
animation-delay: var(--fa-animation-delay, 0s);
|
||||
-webkit-animation-direction: var(--fa-animation-direction, normal);
|
||||
animation-direction: var(--fa-animation-direction, normal);
|
||||
-webkit-animation-duration: var(--fa-animation-duration, 1s);
|
||||
animation-duration: var(--fa-animation-duration, 1s);
|
||||
-webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite);
|
||||
animation-iteration-count: var(--fa-animation-iteration-count, infinite);
|
||||
-webkit-animation-timing-function: var(--fa-animation-timing, linear);
|
||||
animation-timing-function: var(--fa-animation-timing, linear); }
|
||||
|
||||
.fa-spin {
|
||||
-webkit-animation-name: fa-spin;
|
||||
animation-name: fa-spin;
|
||||
-webkit-animation-delay: var(--fa-animation-delay, 0s);
|
||||
animation-delay: var(--fa-animation-delay, 0s);
|
||||
-webkit-animation-direction: var(--fa-animation-direction, normal);
|
||||
animation-direction: var(--fa-animation-direction, normal);
|
||||
-webkit-animation-duration: var(--fa-animation-duration, 2s);
|
||||
animation-duration: var(--fa-animation-duration, 2s);
|
||||
-webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite);
|
||||
animation-iteration-count: var(--fa-animation-iteration-count, infinite);
|
||||
-webkit-animation-timing-function: var(--fa-animation-timing, linear);
|
||||
animation-timing-function: var(--fa-animation-timing, linear); }
|
||||
|
||||
.fa-spin-reverse {
|
||||
--fa-animation-direction: reverse; }
|
||||
|
||||
.fa-pulse,
|
||||
.fa-spin-pulse {
|
||||
-webkit-animation-name: fa-spin;
|
||||
animation-name: fa-spin;
|
||||
-webkit-animation-direction: var(--fa-animation-direction, normal);
|
||||
animation-direction: var(--fa-animation-direction, normal);
|
||||
-webkit-animation-duration: var(--fa-animation-duration, 1s);
|
||||
animation-duration: var(--fa-animation-duration, 1s);
|
||||
-webkit-animation-iteration-count: var(--fa-animation-iteration-count, infinite);
|
||||
animation-iteration-count: var(--fa-animation-iteration-count, infinite);
|
||||
-webkit-animation-timing-function: var(--fa-animation-timing, steps(8));
|
||||
animation-timing-function: var(--fa-animation-timing, steps(8)); }
|
||||
|
||||
@media (prefers-reduced-motion: reduce) {
|
||||
.fa-beat,
|
||||
.fa-bounce,
|
||||
.fa-fade,
|
||||
.fa-beat-fade,
|
||||
.fa-flip,
|
||||
.fa-pulse,
|
||||
.fa-shake,
|
||||
.fa-spin,
|
||||
.fa-spin-pulse {
|
||||
-webkit-animation-delay: -1ms;
|
||||
animation-delay: -1ms;
|
||||
-webkit-animation-duration: 1ms;
|
||||
animation-duration: 1ms;
|
||||
-webkit-animation-iteration-count: 1;
|
||||
animation-iteration-count: 1;
|
||||
-webkit-transition-delay: 0s;
|
||||
transition-delay: 0s;
|
||||
-webkit-transition-duration: 0s;
|
||||
transition-duration: 0s; } }
|
||||
|
||||
@-webkit-keyframes fa-beat {
|
||||
0%, 90% {
|
||||
-webkit-transform: scale(1);
|
||||
transform: scale(1); }
|
||||
45% {
|
||||
-webkit-transform: scale(var(--fa-beat-scale, 1.25));
|
||||
transform: scale(var(--fa-beat-scale, 1.25)); } }
|
||||
|
||||
@keyframes fa-beat {
|
||||
0%, 90% {
|
||||
-webkit-transform: scale(1);
|
||||
transform: scale(1); }
|
||||
45% {
|
||||
-webkit-transform: scale(var(--fa-beat-scale, 1.25));
|
||||
transform: scale(var(--fa-beat-scale, 1.25)); } }
|
||||
|
||||
@-webkit-keyframes fa-bounce {
|
||||
0% {
|
||||
-webkit-transform: scale(1, 1) translateY(0);
|
||||
transform: scale(1, 1) translateY(0); }
|
||||
10% {
|
||||
-webkit-transform: scale(var(--fa-bounce-start-scale-x, 1.1), var(--fa-bounce-start-scale-y, 0.9)) translateY(0);
|
||||
transform: scale(var(--fa-bounce-start-scale-x, 1.1), var(--fa-bounce-start-scale-y, 0.9)) translateY(0); }
|
||||
30% {
|
||||
-webkit-transform: scale(var(--fa-bounce-jump-scale-x, 0.9), var(--fa-bounce-jump-scale-y, 1.1)) translateY(var(--fa-bounce-height, -0.5em));
|
||||
transform: scale(var(--fa-bounce-jump-scale-x, 0.9), var(--fa-bounce-jump-scale-y, 1.1)) translateY(var(--fa-bounce-height, -0.5em)); }
|
||||
50% {
|
||||
-webkit-transform: scale(var(--fa-bounce-land-scale-x, 1.05), var(--fa-bounce-land-scale-y, 0.95)) translateY(0);
|
||||
transform: scale(var(--fa-bounce-land-scale-x, 1.05), var(--fa-bounce-land-scale-y, 0.95)) translateY(0); }
|
||||
57% {
|
||||
-webkit-transform: scale(1, 1) translateY(var(--fa-bounce-rebound, -0.125em));
|
||||
transform: scale(1, 1) translateY(var(--fa-bounce-rebound, -0.125em)); }
|
||||
64% {
|
||||
-webkit-transform: scale(1, 1) translateY(0);
|
||||
transform: scale(1, 1) translateY(0); }
|
||||
100% {
|
||||
-webkit-transform: scale(1, 1) translateY(0);
|
||||
transform: scale(1, 1) translateY(0); } }
|
||||
|
||||
@keyframes fa-bounce {
|
||||
0% {
|
||||
-webkit-transform: scale(1, 1) translateY(0);
|
||||
transform: scale(1, 1) translateY(0); }
|
||||
10% {
|
||||
-webkit-transform: scale(var(--fa-bounce-start-scale-x, 1.1), var(--fa-bounce-start-scale-y, 0.9)) translateY(0);
|
||||
transform: scale(var(--fa-bounce-start-scale-x, 1.1), var(--fa-bounce-start-scale-y, 0.9)) translateY(0); }
|
||||
30% {
|
||||
-webkit-transform: scale(var(--fa-bounce-jump-scale-x, 0.9), var(--fa-bounce-jump-scale-y, 1.1)) translateY(var(--fa-bounce-height, -0.5em));
|
||||
transform: scale(var(--fa-bounce-jump-scale-x, 0.9), var(--fa-bounce-jump-scale-y, 1.1)) translateY(var(--fa-bounce-height, -0.5em)); }
|
||||
50% {
|
||||
-webkit-transform: scale(var(--fa-bounce-land-scale-x, 1.05), var(--fa-bounce-land-scale-y, 0.95)) translateY(0);
|
||||
transform: scale(var(--fa-bounce-land-scale-x, 1.05), var(--fa-bounce-land-scale-y, 0.95)) translateY(0); }
|
||||
57% {
|
||||
-webkit-transform: scale(1, 1) translateY(var(--fa-bounce-rebound, -0.125em));
|
||||
transform: scale(1, 1) translateY(var(--fa-bounce-rebound, -0.125em)); }
|
||||
64% {
|
||||
-webkit-transform: scale(1, 1) translateY(0);
|
||||
transform: scale(1, 1) translateY(0); }
|
||||
100% {
|
||||
-webkit-transform: scale(1, 1) translateY(0);
|
||||
transform: scale(1, 1) translateY(0); } }
|
||||
|
||||
@-webkit-keyframes fa-fade {
|
||||
50% {
|
||||
opacity: var(--fa-fade-opacity, 0.4); } }
|
||||
|
||||
@keyframes fa-fade {
|
||||
50% {
|
||||
opacity: var(--fa-fade-opacity, 0.4); } }
|
||||
|
||||
@-webkit-keyframes fa-beat-fade {
|
||||
0%, 100% {
|
||||
opacity: var(--fa-beat-fade-opacity, 0.4);
|
||||
-webkit-transform: scale(1);
|
||||
transform: scale(1); }
|
||||
50% {
|
||||
opacity: 1;
|
||||
-webkit-transform: scale(var(--fa-beat-fade-scale, 1.125));
|
||||
transform: scale(var(--fa-beat-fade-scale, 1.125)); } }
|
||||
|
||||
@keyframes fa-beat-fade {
|
||||
0%, 100% {
|
||||
opacity: var(--fa-beat-fade-opacity, 0.4);
|
||||
-webkit-transform: scale(1);
|
||||
transform: scale(1); }
|
||||
50% {
|
||||
opacity: 1;
|
||||
-webkit-transform: scale(var(--fa-beat-fade-scale, 1.125));
|
||||
transform: scale(var(--fa-beat-fade-scale, 1.125)); } }
|
||||
|
||||
@-webkit-keyframes fa-flip {
|
||||
50% {
|
||||
-webkit-transform: rotate3d(var(--fa-flip-x, 0), var(--fa-flip-y, 1), var(--fa-flip-z, 0), var(--fa-flip-angle, -180deg));
|
||||
transform: rotate3d(var(--fa-flip-x, 0), var(--fa-flip-y, 1), var(--fa-flip-z, 0), var(--fa-flip-angle, -180deg)); } }
|
||||
|
||||
@keyframes fa-flip {
|
||||
50% {
|
||||
-webkit-transform: rotate3d(var(--fa-flip-x, 0), var(--fa-flip-y, 1), var(--fa-flip-z, 0), var(--fa-flip-angle, -180deg));
|
||||
transform: rotate3d(var(--fa-flip-x, 0), var(--fa-flip-y, 1), var(--fa-flip-z, 0), var(--fa-flip-angle, -180deg)); } }
|
||||
|
||||
@-webkit-keyframes fa-shake {
|
||||
0% {
|
||||
-webkit-transform: rotate(-15deg);
|
||||
transform: rotate(-15deg); }
|
||||
4% {
|
||||
-webkit-transform: rotate(15deg);
|
||||
transform: rotate(15deg); }
|
||||
8%, 24% {
|
||||
-webkit-transform: rotate(-18deg);
|
||||
transform: rotate(-18deg); }
|
||||
12%, 28% {
|
||||
-webkit-transform: rotate(18deg);
|
||||
transform: rotate(18deg); }
|
||||
16% {
|
||||
-webkit-transform: rotate(-22deg);
|
||||
transform: rotate(-22deg); }
|
||||
20% {
|
||||
-webkit-transform: rotate(22deg);
|
||||
transform: rotate(22deg); }
|
||||
32% {
|
||||
-webkit-transform: rotate(-12deg);
|
||||
transform: rotate(-12deg); }
|
||||
36% {
|
||||
-webkit-transform: rotate(12deg);
|
||||
transform: rotate(12deg); }
|
||||
40%, 100% {
|
||||
-webkit-transform: rotate(0deg);
|
||||
transform: rotate(0deg); } }
|
||||
|
||||
@keyframes fa-shake {
|
||||
0% {
|
||||
-webkit-transform: rotate(-15deg);
|
||||
transform: rotate(-15deg); }
|
||||
4% {
|
||||
-webkit-transform: rotate(15deg);
|
||||
transform: rotate(15deg); }
|
||||
8%, 24% {
|
||||
-webkit-transform: rotate(-18deg);
|
||||
transform: rotate(-18deg); }
|
||||
12%, 28% {
|
||||
-webkit-transform: rotate(18deg);
|
||||
transform: rotate(18deg); }
|
||||
16% {
|
||||
-webkit-transform: rotate(-22deg);
|
||||
transform: rotate(-22deg); }
|
||||
20% {
|
||||
-webkit-transform: rotate(22deg);
|
||||
transform: rotate(22deg); }
|
||||
32% {
|
||||
-webkit-transform: rotate(-12deg);
|
||||
transform: rotate(-12deg); }
|
||||
36% {
|
||||
-webkit-transform: rotate(12deg);
|
||||
transform: rotate(12deg); }
|
||||
40%, 100% {
|
||||
-webkit-transform: rotate(0deg);
|
||||
transform: rotate(0deg); } }
|
||||
|
||||
@-webkit-keyframes fa-spin {
|
||||
0% {
|
||||
-webkit-transform: rotate(0deg);
|
||||
transform: rotate(0deg); }
|
||||
100% {
|
||||
-webkit-transform: rotate(360deg);
|
||||
transform: rotate(360deg); } }
|
||||
|
||||
@keyframes fa-spin {
|
||||
0% {
|
||||
-webkit-transform: rotate(0deg);
|
||||
transform: rotate(0deg); }
|
||||
100% {
|
||||
-webkit-transform: rotate(360deg);
|
||||
transform: rotate(360deg); } }
|
||||
|
||||
.fa-rotate-90 {
|
||||
-webkit-transform: rotate(90deg);
|
||||
transform: rotate(90deg); }
|
||||
|
||||
.fa-rotate-180 {
|
||||
-webkit-transform: rotate(180deg);
|
||||
transform: rotate(180deg); }
|
||||
|
||||
.fa-rotate-270 {
|
||||
-webkit-transform: rotate(270deg);
|
||||
transform: rotate(270deg); }
|
||||
|
||||
.fa-flip-horizontal {
|
||||
-webkit-transform: scale(-1, 1);
|
||||
transform: scale(-1, 1); }
|
||||
|
||||
.fa-flip-vertical {
|
||||
-webkit-transform: scale(1, -1);
|
||||
transform: scale(1, -1); }
|
||||
|
||||
.fa-flip-both,
|
||||
.fa-flip-horizontal.fa-flip-vertical {
|
||||
-webkit-transform: scale(-1, -1);
|
||||
transform: scale(-1, -1); }
|
||||
|
||||
.fa-rotate-by {
|
||||
-webkit-transform: rotate(var(--fa-rotate-angle, 0));
|
||||
transform: rotate(var(--fa-rotate-angle, 0)); }
|
||||
|
||||
.fa-stack {
|
||||
display: inline-block;
|
||||
vertical-align: middle;
|
||||
height: 2em;
|
||||
position: relative;
|
||||
width: 2.5em; }
|
||||
|
||||
.fa-stack-1x,
|
||||
.fa-stack-2x {
|
||||
bottom: 0;
|
||||
left: 0;
|
||||
margin: auto;
|
||||
position: absolute;
|
||||
right: 0;
|
||||
top: 0;
|
||||
z-index: var(--fa-stack-z-index, auto); }
|
||||
|
||||
.svg-inline--fa.fa-stack-1x {
|
||||
height: 1em;
|
||||
width: 1.25em; }
|
||||
|
||||
.svg-inline--fa.fa-stack-2x {
|
||||
height: 2em;
|
||||
width: 2.5em; }
|
||||
|
||||
.fa-inverse {
|
||||
color: var(--fa-inverse, #fff); }
|
||||
|
||||
.sr-only,
|
||||
.fa-sr-only {
|
||||
position: absolute;
|
||||
width: 1px;
|
||||
height: 1px;
|
||||
padding: 0;
|
||||
margin: -1px;
|
||||
overflow: hidden;
|
||||
clip: rect(0, 0, 0, 0);
|
||||
white-space: nowrap;
|
||||
border-width: 0; }
|
||||
|
||||
.sr-only-focusable:not(:focus),
|
||||
.fa-sr-only-focusable:not(:focus) {
|
||||
position: absolute;
|
||||
width: 1px;
|
||||
height: 1px;
|
||||
padding: 0;
|
||||
margin: -1px;
|
||||
overflow: hidden;
|
||||
clip: rect(0, 0, 0, 0);
|
||||
white-space: nowrap;
|
||||
border-width: 0; }
|
||||
|
||||
.svg-inline--fa .fa-primary {
|
||||
fill: var(--fa-primary-color, currentColor);
|
||||
opacity: var(--fa-primary-opacity, 1); }
|
||||
|
||||
.svg-inline--fa .fa-secondary {
|
||||
fill: var(--fa-secondary-color, currentColor);
|
||||
opacity: var(--fa-secondary-opacity, 0.4); }
|
||||
|
||||
.svg-inline--fa.fa-swap-opacity .fa-primary {
|
||||
opacity: var(--fa-secondary-opacity, 0.4); }
|
||||
|
||||
.svg-inline--fa.fa-swap-opacity .fa-secondary {
|
||||
opacity: var(--fa-primary-opacity, 1); }
|
||||
|
||||
.svg-inline--fa mask .fa-primary,
|
||||
.svg-inline--fa mask .fa-secondary {
|
||||
fill: black; }
|
||||
|
||||
.fad.fa-inverse,
|
||||
.fa-duotone.fa-inverse {
|
||||
color: var(--fa-inverse, #fff); }
|
||||
File diff suppressed because one or more lines are too long
@@ -1,26 +0,0 @@
|
||||
/*!
|
||||
* Font Awesome Free 6.5.2 by @fontawesome - https://fontawesome.com
|
||||
* License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License)
|
||||
* Copyright 2024 Fonticons, Inc.
|
||||
*/
|
||||
@font-face {
|
||||
font-family: 'FontAwesome';
|
||||
font-display: block;
|
||||
src: url("../webfonts/fa-solid-900.woff2") format("woff2"), url("../webfonts/fa-solid-900.ttf") format("truetype"); }
|
||||
|
||||
@font-face {
|
||||
font-family: 'FontAwesome';
|
||||
font-display: block;
|
||||
src: url("../webfonts/fa-brands-400.woff2") format("woff2"), url("../webfonts/fa-brands-400.ttf") format("truetype"); }
|
||||
|
||||
@font-face {
|
||||
font-family: 'FontAwesome';
|
||||
font-display: block;
|
||||
src: url("../webfonts/fa-regular-400.woff2") format("woff2"), url("../webfonts/fa-regular-400.ttf") format("truetype");
|
||||
unicode-range: U+F003,U+F006,U+F014,U+F016-F017,U+F01A-F01B,U+F01D,U+F022,U+F03E,U+F044,U+F046,U+F05C-F05D,U+F06E,U+F070,U+F087-F088,U+F08A,U+F094,U+F096-F097,U+F09D,U+F0A0,U+F0A2,U+F0A4-F0A7,U+F0C5,U+F0C7,U+F0E5-F0E6,U+F0EB,U+F0F6-F0F8,U+F10C,U+F114-F115,U+F118-F11A,U+F11C-F11D,U+F133,U+F147,U+F14E,U+F150-F152,U+F185-F186,U+F18E,U+F190-F192,U+F196,U+F1C1-F1C9,U+F1D9,U+F1DB,U+F1E3,U+F1EA,U+F1F7,U+F1F9,U+F20A,U+F247-F248,U+F24A,U+F24D,U+F255-F25B,U+F25D,U+F271-F274,U+F278,U+F27B,U+F28C,U+F28E,U+F29C,U+F2B5,U+F2B7,U+F2BA,U+F2BC,U+F2BE,U+F2C0-F2C1,U+F2C3,U+F2D0,U+F2D2,U+F2D4,U+F2DC; }
|
||||
|
||||
@font-face {
|
||||
font-family: 'FontAwesome';
|
||||
font-display: block;
|
||||
src: url("../webfonts/fa-v4compatibility.woff2") format("woff2"), url("../webfonts/fa-v4compatibility.ttf") format("truetype");
|
||||
unicode-range: U+F041,U+F047,U+F065-F066,U+F07D-F07E,U+F080,U+F08B,U+F08E,U+F090,U+F09A,U+F0AC,U+F0AE,U+F0B2,U+F0D0,U+F0D6,U+F0E4,U+F0EC,U+F10A-F10B,U+F123,U+F13E,U+F148-F149,U+F14C,U+F156,U+F15E,U+F160-F161,U+F163,U+F175-F178,U+F195,U+F1F8,U+F219,U+F27A; }
|
||||
@@ -1,6 +0,0 @@
|
||||
/*!
|
||||
* Font Awesome Free 6.5.2 by @fontawesome - https://fontawesome.com
|
||||
* License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License)
|
||||
* Copyright 2024 Fonticons, Inc.
|
||||
*/
|
||||
@font-face{font-family:"FontAwesome";font-display:block;src:url(../webfonts/fa-solid-900.woff2) format("woff2"),url(../webfonts/fa-solid-900.ttf) format("truetype")}@font-face{font-family:"FontAwesome";font-display:block;src:url(../webfonts/fa-brands-400.woff2) format("woff2"),url(../webfonts/fa-brands-400.ttf) format("truetype")}@font-face{font-family:"FontAwesome";font-display:block;src:url(../webfonts/fa-regular-400.woff2) format("woff2"),url(../webfonts/fa-regular-400.ttf) format("truetype");unicode-range:u+f003,u+f006,u+f014,u+f016-f017,u+f01a-f01b,u+f01d,u+f022,u+f03e,u+f044,u+f046,u+f05c-f05d,u+f06e,u+f070,u+f087-f088,u+f08a,u+f094,u+f096-f097,u+f09d,u+f0a0,u+f0a2,u+f0a4-f0a7,u+f0c5,u+f0c7,u+f0e5-f0e6,u+f0eb,u+f0f6-f0f8,u+f10c,u+f114-f115,u+f118-f11a,u+f11c-f11d,u+f133,u+f147,u+f14e,u+f150-f152,u+f185-f186,u+f18e,u+f190-f192,u+f196,u+f1c1-f1c9,u+f1d9,u+f1db,u+f1e3,u+f1ea,u+f1f7,u+f1f9,u+f20a,u+f247-f248,u+f24a,u+f24d,u+f255-f25b,u+f25d,u+f271-f274,u+f278,u+f27b,u+f28c,u+f28e,u+f29c,u+f2b5,u+f2b7,u+f2ba,u+f2bc,u+f2be,u+f2c0-f2c1,u+f2c3,u+f2d0,u+f2d2,u+f2d4,u+f2dc}@font-face{font-family:"FontAwesome";font-display:block;src:url(../webfonts/fa-v4compatibility.woff2) format("woff2"),url(../webfonts/fa-v4compatibility.ttf) format("truetype");unicode-range:u+f041,u+f047,u+f065-f066,u+f07d-f07e,u+f080,u+f08b,u+f08e,u+f090,u+f09a,u+f0ac,u+f0ae,u+f0b2,u+f0d0,u+f0d6,u+f0e4,u+f0ec,u+f10a-f10b,u+f123,u+f13e,u+f148-f149,u+f14c,u+f156,u+f15e,u+f160-f161,u+f163,u+f175-f178,u+f195,u+f1f8,u+f219,u+f27a}
|
||||
2194
core/http/static/assets/fontawesome/css/v4-shims.css
vendored
2194
core/http/static/assets/fontawesome/css/v4-shims.css
vendored
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
@@ -1,22 +0,0 @@
|
||||
/*!
|
||||
* Font Awesome Free 6.5.2 by @fontawesome - https://fontawesome.com
|
||||
* License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License)
|
||||
* Copyright 2024 Fonticons, Inc.
|
||||
*/
|
||||
@font-face {
|
||||
font-family: 'Font Awesome 5 Brands';
|
||||
font-display: block;
|
||||
font-weight: 400;
|
||||
src: url("../webfonts/fa-brands-400.woff2") format("woff2"), url("../webfonts/fa-brands-400.ttf") format("truetype"); }
|
||||
|
||||
@font-face {
|
||||
font-family: 'Font Awesome 5 Free';
|
||||
font-display: block;
|
||||
font-weight: 900;
|
||||
src: url("../webfonts/fa-solid-900.woff2") format("woff2"), url("../webfonts/fa-solid-900.ttf") format("truetype"); }
|
||||
|
||||
@font-face {
|
||||
font-family: 'Font Awesome 5 Free';
|
||||
font-display: block;
|
||||
font-weight: 400;
|
||||
src: url("../webfonts/fa-regular-400.woff2") format("woff2"), url("../webfonts/fa-regular-400.ttf") format("truetype"); }
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user