Compare commits

..

69 Commits

Author SHA1 Message Date
renovate[bot]
77613169da fix(deps): update github.com/go-skynet/go-llama.cpp digest to 37ef81d (#523)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-06 20:54:55 +02:00
ci-robbot [bot]
2630e251ce ⬆️ Update ggerganov/whisper.cpp (#520)
Signed-off-by: GitHub <noreply@github.com>
Co-authored-by: mudler <mudler@users.noreply.github.com>
2023-06-06 19:16:42 +02:00
ci-robbot [bot]
0909a0637e feat: update llama.cpp to support k-quants (#521)
Signed-off-by: GitHub <noreply@github.com>
Co-authored-by: mudler <mudler@users.noreply.github.com>
2023-06-06 18:15:17 +02:00
Ettore Di Giacinto
d62aef2016 feat: add experimental support for falcon-7b (#516)
Signed-off-by: mudler <mudler@mocaccino.org>
2023-06-06 17:23:19 +02:00
ci-robbot [bot]
25e9483add ⬆️ Update donomii/go-rwkv.cpp (#511)
Signed-off-by: GitHub <noreply@github.com>
Co-authored-by: mudler <mudler@users.noreply.github.com>
2023-06-06 16:02:09 +02:00
renovate[bot]
c1be2bdeeb fix(deps): update github.com/donomii/go-rwkv.cpp digest to 1e18b24 (#489)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-06 15:34:18 +02:00
renovate[bot]
49a2b30350 fix(deps): update github.com/ggerganov/whisper.cpp/bindings/go digest to 57543c1 (#514)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-06 11:07:13 +02:00
renovate[bot]
472cd0fc2f fix(deps): update github.com/nomic-ai/gpt4all/gpt4all-bindings/golang digest to 266f13a (#500)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-06 10:04:44 +02:00
renovate[bot]
dc9c43b6dd fix(deps): update github.com/go-skynet/go-llama.cpp digest to cca84ed (#513)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-06 10:04:17 +02:00
renovate[bot]
e1e23a6302 fix(deps): update github.com/mudler/go-stable-diffusion digest to d89260f (#506)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-06 00:35:16 +02:00
ci-robbot [bot]
2e916abe15 ⬆️ Update go-skynet/go-llama.cpp (#512)
Signed-off-by: GitHub <noreply@github.com>
Co-authored-by: mudler <mudler@users.noreply.github.com>
2023-06-06 00:35:01 +02:00
renovate[bot]
3ebdb9b67e fix(deps): update module github.com/sashabaranov/go-openai to v1.10.0 (#510)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-05 21:24:04 +02:00
renovate[bot]
01f5046caf fix(deps): update github.com/tmc/langchaingo digest to 4afed6d (#508)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-05 17:22:21 +02:00
renovate[bot]
ac17d544e0 fix(deps): update github.com/go-skynet/go-llama.cpp digest to b1a4256 (#505)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-05 17:21:57 +02:00
Ettore Di Giacinto
b447a2a719 feat: support upscaled image generation with esrgan (#509) 2023-06-05 17:21:38 +02:00
Ettore Di Giacinto
ec4fd1d219 fix gpt4all, add metal GPU support (#507) 2023-06-05 14:26:20 +02:00
Ettore Di Giacinto
b503725dc7 fix: downgrade gpt4all (#503)
Signed-off-by: mudler <mudler@mocaccino.org>
2023-06-05 09:42:50 +02:00
ci-robbot [bot]
e873fc7b71 ⬆️ Update go-skynet/go-ggml-transformers.cpp (#501)
Signed-off-by: GitHub <noreply@github.com>
Co-authored-by: mudler <mudler@users.noreply.github.com>
2023-06-05 00:07:48 +02:00
renovate[bot]
3070e9503a fix(deps): update github.com/go-skynet/bloomz.cpp digest to 1834e77 (#414)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-04 22:53:09 +02:00
Ettore Di Giacinto
d9130def39 fix: correctly assign ffmpeg image tag (#499) 2023-06-04 21:07:12 +02:00
renovate[bot]
cdf0a6e766 fix(deps): update github.com/nomic-ai/gpt4all/gpt4all-bindings/golang digest to bbe195e (#497)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-04 19:37:08 +02:00
renovate[bot]
a0e0ac887f fix(deps): update github.com/go-skynet/go-bert.cpp digest to 0548994 (#451)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-04 19:36:56 +02:00
Ettore Di Giacinto
4ddc956462 deps: update rwkv, switch back to upstream (#494) 2023-06-04 17:25:35 +02:00
renovate[bot]
203fd7b2e8 fix(deps): update github.com/go-skynet/go-ggml-transformers.cpp digest to 6fb862c (#490)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-04 14:16:09 +02:00
Ettore Di Giacinto
1bb85377e4 feat: add ffmpeg images (#492)
Signed-off-by: mudler <mudler@mocaccino.org>
2023-06-04 14:00:21 +02:00
renovate[bot]
3892fafc2d fix(deps): update github.com/nomic-ai/gpt4all/gpt4all-bindings/golang digest to bc624f5 (#486)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-04 01:57:20 +02:00
renovate[bot]
8a34679a13 fix(deps): update github.com/go-skynet/go-llama.cpp digest to 3f10005 (#485)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-04 01:57:10 +02:00
ci-robbot [bot]
b64c1d8ac1 ⬆️ Update nomic-ai/gpt4all (#488)
Signed-off-by: GitHub <noreply@github.com>
Co-authored-by: mudler <mudler@users.noreply.github.com>
2023-06-04 01:56:59 +02:00
Ettore Di Giacinto
8fb86c13bc feat: Enable static builds for Linux binaries (#487)
Signed-off-by: mudler <mudler@mocaccino.org>
2023-06-03 23:46:07 +02:00
ci-robbot [bot]
05edf59c91 ⬆️ Update nomic-ai/gpt4all (#483)
Signed-off-by: GitHub <noreply@github.com>
Co-authored-by: mudler <mudler@users.noreply.github.com>
2023-06-03 18:30:30 +02:00
ci-robbot [bot]
b9f1f85433 ⬆️ Update go-skynet/go-llama.cpp (#482)
Signed-off-by: GitHub <noreply@github.com>
Co-authored-by: mudler <mudler@users.noreply.github.com>
2023-06-03 18:30:18 +02:00
renovate[bot]
f8e2e76698 fix(deps): update github.com/nomic-ai/gpt4all/gpt4all-bindings/golang digest to 25ee51e (#478)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-03 14:25:55 +02:00
ci-robbot [bot]
29856f7527 ⬆️ Update nomic-ai/gpt4all (#479)
Signed-off-by: GitHub <noreply@github.com>
Co-authored-by: mudler <mudler@users.noreply.github.com>
2023-06-03 14:25:42 +02:00
Sébastien Prud'homme
aa6cdf16c8 fix: display help with correct default values (#481)
Signed-off-by: Sébastien Prud'homme <sebastien.prudhomme@gmail.com>
2023-06-03 14:25:30 +02:00
Samuel Maynard
96794851b3 feat: add support for Stream: true to completionEndpoint (#465) 2023-06-03 00:27:03 +02:00
renovate[bot]
51a1a721b3 fix(deps): update github.com/nomic-ai/gpt4all/gpt4all-bindings/golang digest to be9f6ad (#477)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-02 19:31:21 +02:00
renovate[bot]
695f3e5758 fix(deps): update github.com/nomic-ai/gpt4all/gpt4all-bindings/golang digest to 031d714 (#464)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-02 13:32:27 +02:00
Ettore Di Giacinto
e875c1f64a fix: fix the make run target (#476)
Signed-off-by: mudler <mudler@mocaccino.org>
2023-06-02 09:57:01 +02:00
Ettore Di Giacinto
19f92d7d55 fix: Bump and fix rwkv build (#475) 2023-06-02 08:53:57 +02:00
Ettore Di Giacinto
5a8dd40918 feat: Enable stablediffusion by default in container images (#474)
Signed-off-by: mudler <mudler@mocaccino.org>
2023-06-02 08:53:45 +02:00
renovate[bot]
1b766ab89c fix(deps): update module github.com/urfave/cli/v2 to v2.25.5 (#399)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-01 23:44:17 +02:00
ci-robbot [bot]
a63d6f6364 ⬆️ Update ggerganov/whisper.cpp (#473)
Signed-off-by: GitHub <noreply@github.com>
Co-authored-by: mudler <mudler@users.noreply.github.com>
2023-06-01 23:44:05 +02:00
ci-robbot [bot]
4422ca2235 ⬆️ Update go-skynet/go-ggml-transformers.cpp (#459)
Signed-off-by: GitHub <noreply@github.com>
Co-authored-by: mudler <mudler@users.noreply.github.com>
2023-06-01 23:43:15 +02:00
Ettore Di Giacinto
78ad4813df feat: Update gpt4all, support multiple implementations in runtime (#472)
Signed-off-by: mudler <mudler@mocaccino.org>
2023-06-01 23:38:52 +02:00
renovate[bot]
42d753846e fix(deps): update github.com/ggerganov/whisper.cpp/bindings/go digest to 5b9e59b (#469)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-01 16:23:34 +02:00
ci-robbot [bot]
5c018c0437 ⬆️ Update ggerganov/whisper.cpp (#468)
Signed-off-by: GitHub <noreply@github.com>
Co-authored-by: mudler <mudler@users.noreply.github.com>
2023-06-01 16:23:16 +02:00
renovate[bot]
07cee3f6ef fix(deps): update github.com/donomii/go-rwkv.cpp digest to 3b28b09 (#467)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-01 16:21:32 +02:00
ci-robbot [bot]
c5cb2ff268 ⬆️ Update go-skynet/go-bert.cpp (#463)
Signed-off-by: GitHub <noreply@github.com>
Co-authored-by: mudler <mudler@users.noreply.github.com>
2023-06-01 16:21:13 +02:00
Aisuko
c8a4a4f4e9 feat: Add new test cases for LoadConfigs (#447)
Signed-off-by: Aisuko <urakiny@gmail.com>
2023-06-01 16:20:45 +02:00
Pavel Zloi
3ba07a5928 feat: add LangChainGo Huggingface backend (#446)
Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2023-06-01 12:00:06 +02:00
renovate[bot]
7282668da1 fix(deps): update github.com/ggerganov/whisper.cpp/bindings/go digest to 3f7436e (#466)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-01 09:59:11 +02:00
renovate[bot]
451e803444 fix(deps): update github.com/go-skynet/go-llama.cpp digest to 10caf37 (#455)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-01 01:05:24 +02:00
Ettore Di Giacinto
d70c55231b docs: Update README with model gallery url 2023-06-01 01:04:07 +02:00
ci-robbot [bot]
275c124701 ⬆️ Update go-skynet/go-llama.cpp (#458)
Signed-off-by: GitHub <noreply@github.com>
Co-authored-by: mudler <mudler@users.noreply.github.com>
2023-05-31 22:59:02 +02:00
ci-robbot [bot]
87a6bbd251 ⬆️ Update ggerganov/whisper.cpp (#462)
Signed-off-by: GitHub <noreply@github.com>
Co-authored-by: mudler <mudler@users.noreply.github.com>
2023-05-31 22:58:44 +02:00
renovate[bot]
8fd4c7afcc fix(deps): update github.com/ggerganov/whisper.cpp/bindings/go digest to ce6f747 (#450)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-05-31 19:55:46 +02:00
Sébastien Prud'homme
eee3f83d98 ci: build Docker image variants (#456)
Signed-off-by: Sébastien Prud'homme <sebastien.prudhomme@gmail.com>
2023-05-31 19:51:02 +02:00
renovate[bot]
28ee180283 fix(deps): update github.com/go-skynet/go-ggml-transformers.cpp digest to 17b0655 (#454)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-05-31 14:59:47 +02:00
renovate[bot]
432b0223f1 fix(deps): update github.com/donomii/go-rwkv.cpp digest to c43cdf5 (#453)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-05-31 14:59:36 +02:00
renovate[bot]
16050a32c7 fix(deps): update github.com/nomic-ai/gpt4all/gpt4all-bindings/golang digest to 5f94020 (#435)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-05-31 12:00:01 +02:00
renovate[bot]
898ca62b55 fix(deps): update module github.com/onsi/ginkgo/v2 to v2.9.7 (#445)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-05-31 08:49:47 +02:00
ci-robbot [bot]
5623a7c331 ⬆️ Update go-skynet/go-bert.cpp (#418)
Signed-off-by: GitHub <noreply@github.com>
Co-authored-by: mudler <mudler@users.noreply.github.com>
2023-05-31 00:45:07 +02:00
ci-robbot [bot]
9e3ca6d1a3 ⬆️ Update nomic-ai/gpt4all (#422)
Signed-off-by: GitHub <noreply@github.com>
Co-authored-by: mudler <mudler@users.noreply.github.com>
2023-05-31 00:44:52 +02:00
ci-robbot [bot]
fa58965bbc ⬆️ Update ggerganov/whisper.cpp (#419)
Signed-off-by: GitHub <noreply@github.com>
Co-authored-by: mudler <mudler@users.noreply.github.com>
2023-05-30 23:04:53 +02:00
renovate[bot]
b8ef9028f1 fix(deps): update github.com/go-skynet/go-llama.cpp digest to 62b6c07 (#441)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-05-30 23:04:34 +02:00
ci-robbot [bot]
f711d35377 ⬆️ Update go-skynet/go-ggml-transformers.cpp (#442)
Signed-off-by: GitHub <noreply@github.com>
Co-authored-by: mudler <mudler@users.noreply.github.com>
2023-05-30 23:04:10 +02:00
ci-robbot [bot]
abd3c62194 ⬆️ Update go-skynet/go-llama.cpp (#443)
Signed-off-by: GitHub <noreply@github.com>
Co-authored-by: mudler <mudler@users.noreply.github.com>
2023-05-30 23:03:48 +02:00
Ettore Di Giacinto
2f3c3b1867 examples: keep old example around (#439) 2023-05-30 18:34:43 +02:00
Ettore Di Giacinto
11af09faf3 examples: use gallery in chatbot-ui, add flowise (#438)
Signed-off-by: mudler <mudler@mocaccino.org>
2023-05-30 18:29:28 +02:00
39 changed files with 938 additions and 142 deletions

View File

@@ -1,3 +1,5 @@
.git
.idea
models
examples/chatbot-ui/models
examples/rwkv/models

View File

@@ -15,34 +15,65 @@ concurrency:
jobs:
docker:
strategy:
matrix:
include:
- build-type: ''
platforms: 'linux/amd64,linux/arm64'
tag-latest: 'auto'
tag-suffix: ''
ffmpeg: ''
- build-type: 'cublas'
cuda-major-version: 11
cuda-minor-version: 7
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-cublas-cuda11'
ffmpeg: ''
- build-type: 'cublas'
cuda-major-version: 12
cuda-minor-version: 1
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-cublas-cuda12'
ffmpeg: ''
- build-type: ''
platforms: 'linux/amd64,linux/arm64'
tag-latest: 'false'
tag-suffix: '-ffmpeg'
ffmpeg: 'true'
- build-type: 'cublas'
cuda-major-version: 11
cuda-minor-version: 7
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-cublas-cuda11-ffmpeg'
ffmpeg: 'true'
- build-type: 'cublas'
cuda-major-version: 12
cuda-minor-version: 1
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-cublas-cuda12-ffmpeg'
ffmpeg: 'true'
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=quay.io/go-skynet/local-ai
VERSION=master
SHORTREF=${GITHUB_SHA::8}
# If this is git tag, use the tag name as a docker tag
if [[ $GITHUB_REF == refs/tags/* ]]; then
VERSION=${GITHUB_REF#refs/tags/}
fi
TAGS="${DOCKER_IMAGE}:${VERSION},${DOCKER_IMAGE}:${SHORTREF}"
# If the VERSION looks like a version number, assume that
# this is the most recent version of the image and also
# tag it 'latest'.
if [[ $VERSION =~ ^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
TAGS="$TAGS,${DOCKER_IMAGE}:latest"
fi
# Set output parameters.
echo ::set-output name=tags::${TAGS}
echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Docker meta
id: meta
uses: docker/metadata-action@v4
with:
images: quay.io/go-skynet/local-ai
tags: |
type=ref,event=branch
type=semver,pattern={{raw}}
type=sha
flavor: |
latest=${{ matrix.tag-latest }}
suffix=${{ matrix.tag-suffix }}
- name: Set up QEMU
uses: docker/setup-qemu-action@master
@@ -60,23 +91,19 @@ jobs:
registry: quay.io
username: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
password: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
- name: Build
if: github.event_name != 'pull_request'
- name: Build and push
uses: docker/build-push-action@v4
with:
builder: ${{ steps.buildx.outputs.name }}
build-args: |
BUILD_TYPE=${{ matrix.build-type }}
CUDA_MAJOR_VERSION=${{ matrix.cuda-major-version }}
CUDA_MINOR_VERSION=${{ matrix.cuda-minor-version }}
FFMPEG=${{ matrix.ffmpeg }}
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: ${{ steps.prep.outputs.tags }}
- name: Build PRs
if: github.event_name == 'pull_request'
uses: docker/build-push-action@v4
with:
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64
push: false
tags: ${{ steps.prep.outputs.tags }}
platforms: ${{ matrix.platforms }}
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

View File

@@ -32,7 +32,7 @@ jobs:
CMAKE_ARGS: "${{ matrix.defines }}"
BUILD_ID: "${{ matrix.build }}"
run: |
make dist
STATIC=true make dist
- uses: actions/upload-artifact@v3
with:
name: ${{ matrix.build }}

4
.gitignore vendored
View File

@@ -24,3 +24,7 @@ release/
# just in case
.DS_Store
.idea
# Generated during build
backend-assets/

View File

@@ -3,7 +3,7 @@ ARG GO_VERSION=1.20
FROM golang:$GO_VERSION as builder
ARG BUILD_TYPE=
ARG GO_TAGS=
ARG GO_TAGS=stablediffusion
ARG CUDA_MAJOR_VERSION=11
ARG CUDA_MINOR_VERSION=7
@@ -45,9 +45,10 @@ RUN make build
FROM golang:$GO_VERSION
ARG BUILD_TYPE=
ARG GO_TAGS=
ARG GO_TAGS=stablediffusion
ARG CUDA_MAJOR_VERSION=11
ARG CUDA_MINOR_VERSION=7
ARG FFMPEG=
ENV BUILD_TYPE=${BUILD_TYPE}
ENV GO_TAGS=${GO_TAGS}
@@ -73,6 +74,12 @@ RUN if [ "${BUILD_TYPE}" = "cublas" ]; then \
apt-get update && \
apt-get install -y cuda-nvcc-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
; fi
# Add FFmpeg
RUN if [ "${FFMPEG}" = "true" ]; then \
apt-get install -y ffmpeg \
; fi
ENV PATH /usr/local/cuda/bin:${PATH}
# OpenBLAS requirements

View File

@@ -3,19 +3,19 @@ GOTEST=$(GOCMD) test
GOVET=$(GOCMD) vet
BINARY_NAME=local-ai
GOLLAMA_VERSION?=4bd3910005a593a6db237bc82c506d6d9fb81b18
GPT4ALL_REPO?=https://github.com/nomic-ai/gpt4all
GPT4ALL_VERSION?=73db20ba85fbbdc66a56e2619394c0eea40dc72b
GOGGMLTRANSFORMERS_VERSION?=695f97befe14f0107d8da1c11f5b84912e0754b6
GOLLAMA_VERSION?=37ef81d01ae0848575e416e48b41d112ef0d520e
GPT4ALL_REPO?=https://github.com/go-skynet/gpt4all
GPT4ALL_VERSION?=f7498c9
GOGGMLTRANSFORMERS_VERSION?=bd765bb6f3b38a63f915f3725e488aad492eedd4
RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp
RWKV_VERSION?=ccb05c3e1c6efd098017d114dcb58ab3262b40b2
WHISPER_CPP_VERSION?=9b926844e3ae0ca6a0d13573b2e0349be1a4b573
BERT_VERSION?=cea1ed76a7f48ef386a8e369f6c82c48cdf2d551
RWKV_VERSION?=1e18b2490e7e32f6b00e16f6a9ec0dd3a3d09266
WHISPER_CPP_VERSION?=57543c169e27312e7546d07ed0d8c6eb806ebc36
BERT_VERSION?=0548994371f7081e45fcf8d472f3941a12f179aa
BLOOMZ_VERSION?=1834e77b83faafe912ad4092ccf7f77937349e2f
BUILD_TYPE?=
export BUILD_TYPE?=
CGO_LDFLAGS?=
CUDA_LIBPATH?=/usr/local/cuda/lib64/
STABLEDIFFUSION_VERSION?=c0748eca3642d58bcf9521108bcee46959c647dc
STABLEDIFFUSION_VERSION?=d89260f598afb809279bc72aa0107b4292587632
GO_TAGS?=
BUILD_ID?=git
LD_FLAGS=?=
@@ -41,6 +41,11 @@ ifeq ($(BUILD_TYPE),cublas)
export LLAMA_CUBLAS=1
endif
ifeq ($(BUILD_TYPE),metal)
CGO_LDFLAGS+=-framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
export LLAMA_METAL=1
endif
ifeq ($(BUILD_TYPE),clblas)
CGO_LDFLAGS+=-lOpenCL -lclblast
endif
@@ -63,22 +68,19 @@ gpt4all:
git clone --recurse-submodules $(GPT4ALL_REPO) gpt4all
cd gpt4all && git checkout -b build $(GPT4ALL_VERSION) && git submodule update --init --recursive --depth 1
# This is hackish, but needed as both go-llama and go-gpt4allj have their own version of ggml..
@find ./gpt4all -type f -name "*.c" -exec sed -i'' -e 's/ggml_/ggml_gptj_/g' {} +
@find ./gpt4all -type f -name "*.cpp" -exec sed -i'' -e 's/ggml_/ggml_gptj_/g' {} +
@find ./gpt4all -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_gptj_/g' {} +
@find ./gpt4all -type f -name "*.cpp" -exec sed -i'' -e 's/gpt_/gptj_/g' {} +
@find ./gpt4all -type f -name "*.h" -exec sed -i'' -e 's/gpt_/gptj_/g' {} +
@find ./gpt4all -type f -name "*.h" -exec sed -i'' -e 's/set_console_color/set_gptj_console_color/g' {} +
@find ./gpt4all -type f -name "*.cpp" -exec sed -i'' -e 's/set_console_color/set_gptj_console_color/g' {} +
@find ./gpt4all -type f -name "*.cpp" -exec sed -i'' -e 's/llama_/gptjllama_/g' {} +
@find ./gpt4all -type f -name "*.go" -exec sed -i'' -e 's/llama_/gptjllama_/g' {} +
@find ./gpt4all -type f -name "*.h" -exec sed -i'' -e 's/llama_/gptjllama_/g' {} +
@find ./gpt4all -type f -name "*.txt" -exec sed -i'' -e 's/llama_/gptjllama_/g' {} +
@find ./gpt4all -type f -name "*.cpp" -exec sed -i'' -e 's/json_/json_gptj_/g' {} +
@find ./gpt4all -type f -name "*.cpp" -exec sed -i'' -e 's/void replace/void json_gptj_replace/g' {} +
@find ./gpt4all -type f -name "*.cpp" -exec sed -i'' -e 's/::replace/::json_gptj_replace/g' {} +
@find ./gpt4all -type f -name "*.cpp" -exec sed -i'' -e 's/regex_escape/gpt4allregex_escape/g' {} +
mv ./gpt4all/gpt4all-backend/llama.cpp/llama_util.h ./gpt4all/gpt4all-backend/llama.cpp/gptjllama_util.h
@find ./gpt4all -type f -name "*.c" -exec sed -i'' -e 's/ggml_/ggml_gpt4all_/g' {} +
@find ./gpt4all -type f -name "*.cpp" -exec sed -i'' -e 's/ggml_/ggml_gpt4all_/g' {} +
@find ./gpt4all -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_gpt4all_/g' {} +
@find ./gpt4all -type f -name "*.c" -exec sed -i'' -e 's/llama_/llama_gpt4all_/g' {} +
@find ./gpt4all -type f -name "*.cpp" -exec sed -i'' -e 's/llama_/llama_gpt4all_/g' {} +
@find ./gpt4all -type f -name "*.h" -exec sed -i'' -e 's/llama_/llama_gpt4all_/g' {} +
@find ./gpt4all/gpt4all-backend -type f -name "llama_util.h" -execdir mv {} "llama_gpt4all_util.h" \;
@find ./gpt4all -type f -name "*.cmake" -exec sed -i'' -e 's/llama_util/llama_gpt4all_util/g' {} +
@find ./gpt4all -type f -name "*.txt" -exec sed -i'' -e 's/llama_util/llama_gpt4all_util/g' {} +
@find ./gpt4all/gpt4all-bindings/golang -type f -name "*.cpp" -exec sed -i'' -e 's/load_model/load_gpt4all_model/g' {} +
@find ./gpt4all/gpt4all-bindings/golang -type f -name "*.go" -exec sed -i'' -e 's/load_model/load_gpt4all_model/g' {} +
@find ./gpt4all/gpt4all-bindings/golang -type f -name "*.h" -exec sed -i'' -e 's/load_model/load_gpt4all_model/g' {} +
## BERT embeddings
go-bert:
@@ -105,7 +107,7 @@ go-rwkv:
@find ./go-rwkv -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_rwkv_/g' {} +
go-rwkv/librwkv.a: go-rwkv
cd go-rwkv && cd rwkv.cpp && cmake . -DRWKV_BUILD_SHARED_LIBRARY=OFF && cmake --build . && cp librwkv.a .. && cp ggml/src/libggml.a ..
cd go-rwkv && cd rwkv.cpp && cmake . -DRWKV_BUILD_SHARED_LIBRARY=OFF && cmake --build . && cp librwkv.a ..
## bloomz
bloomz:
@@ -124,6 +126,12 @@ bloomz/libbloomz.a: bloomz
go-bert/libgobert.a: go-bert
$(MAKE) -C go-bert libgobert.a
backend-assets/gpt4all: gpt4all/gpt4all-bindings/golang/libgpt4all.a
mkdir -p backend-assets/gpt4all
@cp gpt4all/gpt4all-bindings/golang/buildllm/*.so backend-assets/gpt4all/ || true
@cp gpt4all/gpt4all-bindings/golang/buildllm/*.dylib backend-assets/gpt4all/ || true
@cp gpt4all/gpt4all-bindings/golang/buildllm/*.dll backend-assets/gpt4all/ || true
gpt4all/gpt4all-bindings/golang/libgpt4all.a: gpt4all
$(MAKE) -C gpt4all/gpt4all-bindings/golang/ libgpt4all.a
@@ -188,7 +196,7 @@ rebuild: ## Rebuilds the project
$(MAKE) -C bloomz clean
$(MAKE) build
prepare: prepare-sources gpt4all/gpt4all-bindings/golang/libgpt4all.a $(OPTIONAL_TARGETS) go-llama/libbinding.a go-bert/libgobert.a go-ggml-transformers/libtransformers.a go-rwkv/librwkv.a whisper.cpp/libwhisper.a bloomz/libbloomz.a ## Prepares for building
prepare: prepare-sources backend-assets/gpt4all $(OPTIONAL_TARGETS) go-llama/libbinding.a go-bert/libgobert.a go-ggml-transformers/libtransformers.a go-rwkv/librwkv.a whisper.cpp/libwhisper.a bloomz/libbloomz.a ## Prepares for building
clean: ## Remove build related file
rm -fr ./go-llama
@@ -196,6 +204,7 @@ clean: ## Remove build related file
rm -rf ./go-gpt2
rm -rf ./go-stable-diffusion
rm -rf ./go-ggml-transformers
rm -rf ./backend-assets
rm -rf ./go-rwkv
rm -rf ./go-bert
rm -rf ./bloomz
@@ -220,7 +229,7 @@ generic-build: ## Build the project using generic
## Run
run: prepare ## run local-ai
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} $(GOCMD) run ./main.go
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} $(GOCMD) run ./
test-models/testmodel:
mkdir test-models
@@ -234,8 +243,10 @@ test-models/testmodel:
cp tests/models_fixtures/* test-models
test: prepare test-models/testmodel
cp -r backend-assets api
cp tests/models_fixtures/* test-models
C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --flakeAttempts 5 -v -r ./api ./pkg
C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="!gpt4all" --flake-attempts 5 -v -r ./api ./pkg
C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="gpt4all" --flake-attempts 5 -v -r ./api ./pkg
## Help:
help: ## Show this help.

View File

@@ -28,7 +28,7 @@ LocalAI was created by [Ettore Di Giacinto](https://github.com/mudler/) and is a
| ![Screenshot from 2023-04-26 23-59-55](https://user-images.githubusercontent.com/2420543/234715439-98d12e03-d3ce-4f94-ab54-2b256808e05e.png) | ![b6441997879](https://github.com/go-skynet/LocalAI/assets/2420543/d50af51c-51b7-4f39-b6c2-bf04c403894c) |
See the [Getting started](https://localai.io/basics/getting_started/index.html) and [examples](https://github.com/go-skynet/LocalAI/tree/master/examples/) sections to learn how to use LocalAI. For a list of curated models check out the [model gallery](https://github.com/go-skynet/model-gallery).
See the [Getting started](https://localai.io/basics/getting_started/index.html) and [examples](https://github.com/go-skynet/LocalAI/tree/master/examples/) sections to learn how to use LocalAI. For a list of curated models check out the [model gallery](https://localai.io/models/).
## News
@@ -169,6 +169,7 @@ Feel free to open up a PR to get your project listed!
- [Spark](https://github.com/cedriking/spark)
- [autogpt4all](https://github.com/aorumbayev/autogpt4all)
- [Mods](https://github.com/charmbracelet/mods)
- [Flowise](https://github.com/FlowiseAI/Flowise)
## Short-term roadmap

View File

@@ -66,6 +66,13 @@ func App(opts ...AppOption) (*fiber.App, error) {
log.Debug().Msgf("Model: %s (config: %+v)", v, cfg)
}
}
if options.assetsDestination != "" {
if err := PrepareBackendAssets(options.backendAssets, options.assetsDestination); err != nil {
log.Warn().Msgf("Failed extracting backend assets files: %s (might be required for some backends to work properly, like gpt4all)", err)
}
}
// Default middleware config
app.Use(recover.New())

View File

@@ -3,6 +3,7 @@ package api_test
import (
"bytes"
"context"
"embed"
"encoding/json"
"fmt"
"io/ioutil"
@@ -95,6 +96,9 @@ func postModelApplyRequest(url string, request modelApplyRequest) (response map[
return
}
//go:embed backend-assets/*
var backendAssets embed.FS
var _ = Describe("API test", func() {
var app *fiber.App
@@ -114,7 +118,7 @@ var _ = Describe("API test", func() {
modelLoader = model.NewModelLoader(tmpdir)
c, cancel = context.WithCancel(context.Background())
app, err = App(WithContext(c), WithModelLoader(modelLoader))
app, err = App(WithContext(c), WithModelLoader(modelLoader), WithBackendAssets(backendAssets), WithBackendAssetsOutput(tmpdir))
Expect(err).ToNot(HaveOccurred())
go app.Listen("127.0.0.1:9090")
@@ -191,6 +195,32 @@ var _ = Describe("API test", func() {
Expect(err).ToNot(HaveOccurred())
Expect(content["backend"]).To(Equal("bert-embeddings"))
})
It("runs gpt4all", Label("gpt4all"), func() {
if runtime.GOOS != "linux" {
Skip("test supported only on linux")
}
response := postModelApplyRequest("http://127.0.0.1:9090/models/apply", modelApplyRequest{
URL: "github:go-skynet/model-gallery/gpt4all-j.yaml",
Name: "gpt4all-j",
Overrides: map[string]string{},
})
Expect(response["uuid"]).ToNot(BeEmpty(), fmt.Sprint(response))
uuid := response["uuid"].(string)
Eventually(func() bool {
response := getModelStatus("http://127.0.0.1:9090/models/jobs/" + uuid)
fmt.Println(response)
return response["processed"].(bool)
}, "360s").Should(Equal(true))
resp, err := client.CreateChatCompletion(context.TODO(), openai.ChatCompletionRequest{Model: "gpt4all-j", Messages: []openai.ChatCompletionMessage{openai.ChatCompletionMessage{Role: "user", Content: "How are you?"}}})
Expect(err).ToNot(HaveOccurred())
Expect(len(resp.Choices)).To(Equal(1))
Expect(resp.Choices[0].Message.Content).To(ContainSubstring("well"))
})
})
})
@@ -257,7 +287,7 @@ var _ = Describe("API test", func() {
It("returns errors", func() {
_, err := client.CreateCompletion(context.TODO(), openai.CompletionRequest{Model: "foomodel", Prompt: "abcdedfghikl"})
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("error, status code: 500, message: could not load model - all backends returned error: 12 errors occurred:"))
Expect(err.Error()).To(ContainSubstring("error, status code: 500, message: could not load model - all backends returned error: 11 errors occurred:"))
})
It("transcribes audio", func() {
if runtime.GOOS != "linux" {

27
api/backend_assets.go Normal file
View File

@@ -0,0 +1,27 @@
package api
import (
"embed"
"os"
"path/filepath"
"github.com/go-skynet/LocalAI/pkg/assets"
"github.com/rs/zerolog/log"
)
func PrepareBackendAssets(backendAssets embed.FS, dst string) error {
// Extract files from the embedded FS
err := assets.ExtractFiles(backendAssets, dst)
if err != nil {
return err
}
// Set GPT4ALL libs where we extracted the files
// https://github.com/nomic-ai/gpt4all/commit/27e80e1d10985490c9fd4214e4bf458cfcf70896
gpt4alldir := filepath.Join(dst, "backend-assets", "gpt4all")
os.Setenv("GPT4ALL_IMPLEMENTATIONS_PATH", gpt4alldir)
log.Debug().Msgf("GPT4ALL_IMPLEMENTATIONS_PATH: %s", gpt4alldir)
return nil
}

View File

@@ -3,7 +3,7 @@ package api
import (
"encoding/json"
"fmt"
"io/ioutil"
"io/fs"
"os"
"path/filepath"
"strings"
@@ -130,11 +130,18 @@ func (cm ConfigMerger) ListConfigs() []string {
func (cm ConfigMerger) LoadConfigs(path string) error {
cm.Lock()
defer cm.Unlock()
files, err := ioutil.ReadDir(path)
entries, err := os.ReadDir(path)
if err != nil {
return err
}
files := make([]fs.FileInfo, 0, len(entries))
for _, entry := range entries {
info, err := entry.Info()
if err != nil {
return err
}
files = append(files, info)
}
for _, file := range files {
// Skip templates, YAML and .keep files
if !strings.Contains(file.Name(), ".yaml") {

View File

@@ -3,6 +3,7 @@ package api
import (
"os"
"github.com/go-skynet/LocalAI/pkg/model"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
@@ -20,8 +21,34 @@ var _ = Describe("Test cases for config related functions", func() {
Expect(err).To(BeNil())
Expect(config).ToNot(BeNil())
// two configs in config.yaml
Expect(len(config)).To(Equal(2))
Expect(config[0].Name).To(Equal("list1"))
Expect(config[1].Name).To(Equal("list2"))
})
It("Test LoadConfigs", func() {
cm := NewConfigMerger()
options := newOptions()
modelLoader := model.NewModelLoader(os.Getenv("MODELS_PATH"))
WithModelLoader(modelLoader)(options)
err := cm.LoadConfigs(options.loader.ModelPath)
Expect(err).To(BeNil())
Expect(cm.configs).ToNot(BeNil())
// config should includes gpt4all models's api.config
Expect(cm.configs).To(HaveKey("gpt4all"))
// config should includes gpt2 models's api.config
Expect(cm.configs).To(HaveKey("gpt4all-2"))
// config should includes text-embedding-ada-002 models's api.config
Expect(cm.configs).To(HaveKey("text-embedding-ada-002"))
// config should includes rwkv_test models's api.config
Expect(cm.configs).To(HaveKey("rwkv_test"))
// config should includes whisper-1 models's api.config
Expect(cm.configs).To(HaveKey("whisper-1"))
})
})
})

View File

@@ -4,6 +4,7 @@ import (
"bufio"
"bytes"
"encoding/base64"
"errors"
"encoding/json"
"fmt"
"io"
@@ -143,13 +144,29 @@ func defaultRequest(modelFile string) OpenAIRequest {
// https://platform.openai.com/docs/api-reference/completions
func completionEndpoint(cm *ConfigMerger, o *Option) func(c *fiber.Ctx) error {
return func(c *fiber.Ctx) error {
process := func(s string, req *OpenAIRequest, config *Config, loader *model.ModelLoader, responses chan OpenAIResponse) {
ComputeChoices(s, req, config, loader, func(s string, c *[]Choice) {}, func(s string) bool {
resp := OpenAIResponse{
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
Choices: []Choice{{Text: s}},
Object: "text_completion",
}
log.Debug().Msgf("Sending goroutine: %s", s)
responses <- resp
return true
})
close(responses)
}
return func(c *fiber.Ctx) error {
model, input, err := readInput(c, o.loader, true)
if err != nil {
return fmt.Errorf("failed reading parameters from request:%w", err)
}
log.Debug().Msgf("`input`: %+v", input)
config, input, err := readConfig(model, input, cm, o.loader, o.debug, o.threads, o.ctxSize, o.f16)
if err != nil {
return fmt.Errorf("failed reading parameters from request:%w", err)
@@ -157,12 +174,67 @@ func completionEndpoint(cm *ConfigMerger, o *Option) func(c *fiber.Ctx) error {
log.Debug().Msgf("Parameter Config: %+v", config)
if input.Stream {
log.Debug().Msgf("Stream request received")
c.Context().SetContentType("text/event-stream")
//c.Response().Header.SetContentType(fiber.MIMETextHTMLCharsetUTF8)
//c.Set("Content-Type", "text/event-stream")
c.Set("Cache-Control", "no-cache")
c.Set("Connection", "keep-alive")
c.Set("Transfer-Encoding", "chunked")
}
templateFile := config.Model
if config.TemplateConfig.Completion != "" {
templateFile = config.TemplateConfig.Completion
}
if input.Stream {
if (len(config.PromptStrings) > 1) {
return errors.New("cannot handle more than 1 `PromptStrings` when `Stream`ing")
}
predInput := config.PromptStrings[0]
// A model can have a "file.bin.tmpl" file associated with a prompt template prefix
templatedInput, err := o.loader.TemplatePrefix(templateFile, struct {
Input string
}{Input: predInput})
if err == nil {
predInput = templatedInput
log.Debug().Msgf("Template found, input modified to: %s", predInput)
}
responses := make(chan OpenAIResponse)
go process(predInput, input, config, o.loader, responses)
c.Context().SetBodyStreamWriter(fasthttp.StreamWriter(func(w *bufio.Writer) {
for ev := range responses {
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
enc.Encode(ev)
log.Debug().Msgf("Sending chunk: %s", buf.String())
fmt.Fprintf(w, "data: %v\n", buf.String())
w.Flush()
}
resp := &OpenAIResponse{
Model: input.Model, // we have to return what the user sent here, due to OpenAI spec.
Choices: []Choice{{FinishReason: "stop"}},
}
respData, _ := json.Marshal(resp)
w.WriteString(fmt.Sprintf("data: %s\n\n", respData))
w.WriteString("data: [DONE]\n\n")
w.Flush()
}))
return nil
}
var result []Choice
for _, i := range config.PromptStrings {
// A model can have a "file.bin.tmpl" file associated with a prompt template prefix

View File

@@ -2,6 +2,7 @@ package api
import (
"context"
"embed"
model "github.com/go-skynet/LocalAI/pkg/model"
)
@@ -18,6 +19,9 @@ type Option struct {
preloadJSONModels string
preloadModelsFromPath string
corsAllowOrigins string
backendAssets embed.FS
assetsDestination string
}
type AppOption func(*Option)
@@ -49,6 +53,18 @@ func WithCorsAllowOrigins(b string) AppOption {
}
}
func WithBackendAssetsOutput(out string) AppOption {
return func(o *Option) {
o.assetsDestination = out
}
}
func WithBackendAssets(f embed.FS) AppOption {
return func(o *Option) {
o.backendAssets = f
}
}
func WithContext(ctx context.Context) AppOption {
return func(o *Option) {
o.context = ctx

View File

@@ -9,6 +9,7 @@ import (
"sync"
"github.com/donomii/go-rwkv.cpp"
"github.com/go-skynet/LocalAI/pkg/langchain"
model "github.com/go-skynet/LocalAI/pkg/model"
"github.com/go-skynet/LocalAI/pkg/stablediffusion"
"github.com/go-skynet/bloomz.cpp"
@@ -367,6 +368,30 @@ func ModelInference(s string, loader *model.ModelLoader, c Config, tokenCallback
predictOptions = append(predictOptions, bloomz.SetSeed(c.Seed))
}
return model.Predict(
s,
predictOptions...,
)
}
case *transformers.Falcon:
fn = func() (string, error) {
// Generate the prediction using the language model
predictOptions := []transformers.PredictOption{
transformers.SetTemperature(c.Temperature),
transformers.SetTopP(c.TopP),
transformers.SetTopK(c.TopK),
transformers.SetTokens(c.Maxtokens),
transformers.SetThreads(c.Threads),
}
if c.Batch != 0 {
predictOptions = append(predictOptions, transformers.SetBatch(c.Batch))
}
if c.Seed != 0 {
predictOptions = append(predictOptions, transformers.SetSeed(c.Seed))
}
return model.Predict(
s,
predictOptions...,
@@ -494,6 +519,23 @@ func ModelInference(s string, loader *model.ModelLoader, c Config, tokenCallback
model.SetTokenCallback(nil)
return str, er
}
case *langchain.HuggingFace:
fn = func() (string, error) {
// Generate the prediction using the language model
predictOptions := []langchain.PredictOption{
langchain.SetModel(c.Model),
langchain.SetMaxTokens(c.Maxtokens),
langchain.SetTemperature(c.Temperature),
langchain.SetStopWords(c.StopWords),
}
pred, er := model.PredictHuggingFace(s, predictOptions...)
if er != nil {
return "", er
}
return pred.Completion, nil
}
}
return func() (string, error) {

6
assets.go Normal file
View File

@@ -0,0 +1,6 @@
package main
import "embed"
//go:embed backend-assets/*
var backendAssets embed.FS

View File

@@ -22,6 +22,16 @@ This integration shows how to use LocalAI with [mckaywrigley/chatbot-ui](https:/
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui/)
There is also a separate example to show how to manually setup a model: [example](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui-manual/)
### Flowise
_by [@mudler](https://github.com/mudler)_
This example shows how to use [FlowiseAI/Flowise](https://github.com/FlowiseAI/Flowise) with LocalAI.
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/flowise/)
### Discord bot
_by [@mudler](https://github.com/mudler)_

View File

@@ -0,0 +1,48 @@
# chatbot-ui
Example of integration with [mckaywrigley/chatbot-ui](https://github.com/mckaywrigley/chatbot-ui).
![Screenshot from 2023-04-26 23-59-55](https://user-images.githubusercontent.com/2420543/234715439-98d12e03-d3ce-4f94-ab54-2b256808e05e.png)
## Setup
```bash
# Clone LocalAI
git clone https://github.com/go-skynet/LocalAI
cd LocalAI/examples/chatbot-ui
# (optional) Checkout a specific LocalAI tag
# git checkout -b build <TAG>
# Download gpt4all-j to models/
wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j
# start with docker-compose
docker-compose up -d --pull always
# or you can build the images with:
# docker-compose up -d --build
```
## Pointing chatbot-ui to a separately managed LocalAI service
If you want to use the [chatbot-ui example](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui) with an externally managed LocalAI service, you can alter the `docker-compose` file so that it looks like the below. You will notice the file is smaller, because we have removed the section that would normally start the LocalAI service. Take care to update the IP address (or FQDN) that the chatbot-ui service tries to access (marked `<<LOCALAI_IP>>` below):
```
version: '3.6'
services:
chatgpt:
image: ghcr.io/mckaywrigley/chatbot-ui:main
ports:
- 3000:3000
environment:
- 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX'
- 'OPENAI_API_HOST=http://<<LOCALAI_IP>>:8080'
```
Once you've edited the Dockerfile, you can start it with `docker compose up`, then browse to `http://localhost:3000`.
## Accessing chatbot-ui
Open http://localhost:3000 for the Web UI.

View File

@@ -0,0 +1,24 @@
version: '3.6'
services:
api:
image: quay.io/go-skynet/local-ai:latest
build:
context: ../../
dockerfile: Dockerfile
ports:
- 8080:8080
environment:
- DEBUG=true
- MODELS_PATH=/models
volumes:
- ./models:/models:cached
command: ["/usr/bin/local-ai" ]
chatgpt:
image: ghcr.io/mckaywrigley/chatbot-ui:main
ports:
- 3000:3000
environment:
- 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX'
- 'OPENAI_API_HOST=http://api:8080'

View File

@@ -4,22 +4,18 @@ Example of integration with [mckaywrigley/chatbot-ui](https://github.com/mckaywr
![Screenshot from 2023-04-26 23-59-55](https://user-images.githubusercontent.com/2420543/234715439-98d12e03-d3ce-4f94-ab54-2b256808e05e.png)
## Setup
## Run
In this example LocalAI will download the gpt4all model and set it up as "gpt-3.5-turbo". See the `docker-compose.yaml`
```bash
# Clone LocalAI
git clone https://github.com/go-skynet/LocalAI
cd LocalAI/examples/chatbot-ui
# (optional) Checkout a specific LocalAI tag
# git checkout -b build <TAG>
# Download gpt4all-j to models/
wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j
# start with docker-compose
docker-compose up -d --pull always
docker-compose up --pull always
# or you can build the images with:
# docker-compose up -d --build
```

View File

@@ -3,6 +3,14 @@ version: '3.6'
services:
api:
image: quay.io/go-skynet/local-ai:latest
# As initially LocalAI will download the models defined in PRELOAD_MODELS
# you might need to tweak the healthcheck values here according to your network connection.
# Here we give a timespan of 20m to download all the required files.
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"]
interval: 1m
timeout: 20m
retries: 20
build:
context: ../../
dockerfile: Dockerfile
@@ -11,11 +19,16 @@ services:
environment:
- DEBUG=true
- MODELS_PATH=/models
# You can preload different models here as well.
# See: https://github.com/go-skynet/model-gallery
- 'PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}]'
volumes:
- ./models:/models:cached
command: ["/usr/bin/local-ai" ]
chatgpt:
depends_on:
api:
condition: service_healthy
image: ghcr.io/mckaywrigley/chatbot-ui:main
ports:
- 3000:3000

View File

@@ -0,0 +1,26 @@
# flowise
Example of integration with [FlowiseAI/Flowise](https://github.com/FlowiseAI/Flowise).
![Screenshot from 2023-05-30 18-01-03](https://github.com/go-skynet/LocalAI/assets/2420543/02458782-0549-4131-971c-95ee56ec1af8)
You can check a demo video in the Flowise PR: https://github.com/FlowiseAI/Flowise/pull/123
## Run
In this example LocalAI will download the gpt4all model and set it up as "gpt-3.5-turbo". See the `docker-compose.yaml`
```bash
# Clone LocalAI
git clone https://github.com/go-skynet/LocalAI
cd LocalAI/examples/flowise
# start with docker-compose
docker-compose up --pull always
```
## Accessing flowise
Open http://localhost:3000.

View File

@@ -0,0 +1,37 @@
version: '3.6'
services:
api:
image: quay.io/go-skynet/local-ai:latest
# As initially LocalAI will download the models defined in PRELOAD_MODELS
# you might need to tweak the healthcheck values here according to your network connection.
# Here we give a timespan of 20m to download all the required files.
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"]
interval: 1m
timeout: 20m
retries: 20
build:
context: ../../
dockerfile: Dockerfile
ports:
- 8080:8080
environment:
- DEBUG=true
- MODELS_PATH=/models
# You can preload different models here as well.
# See: https://github.com/go-skynet/model-gallery
- 'PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}]'
volumes:
- ./models:/models:cached
command: ["/usr/bin/local-ai" ]
flowise:
depends_on:
api:
condition: service_healthy
image: flowiseai/flowise
ports:
- 3000:3000
volumes:
- ~/.flowise:/root/.flowise
command: /bin/sh -c "sleep 3; flowise start"

View File

@@ -0,0 +1,68 @@
# Data query example
Example of integration with HuggingFace Inference API with help of [langchaingo](https://github.com/tmc/langchaingo).
## Setup
Download the LocalAI and start the API:
```bash
# Clone LocalAI
git clone https://github.com/go-skynet/LocalAI
cd LocalAI/examples/langchain-huggingface
docker-compose up -d
```
Node: Ensure you've set `HUGGINGFACEHUB_API_TOKEN` environment variable, you can generate it
on [Settings / Access Tokens](https://huggingface.co/settings/tokens) page of HuggingFace site.
This is an example `.env` file for LocalAI:
```ini
MODELS_PATH=/models
CONTEXT_SIZE=512
HUGGINGFACEHUB_API_TOKEN=hg_123456
```
## Using remote models
Now you can use any remote models available via HuggingFace API, for example let's enable using of
[gpt2](https://huggingface.co/gpt2) model in `gpt-3.5-turbo.yaml` config:
```yml
name: gpt-3.5-turbo
parameters:
model: gpt2
top_k: 80
temperature: 0.2
top_p: 0.7
context_size: 1024
backend: "langchain-huggingface"
stopwords:
- "HUMAN:"
- "GPT:"
roles:
user: " "
system: " "
template:
completion: completion
chat: gpt4all
```
Here is you can see in field `parameters.model` equal `gpt2` and `backend` equal `langchain-huggingface`.
## How to use
```shell
# Now API is accessible at localhost:8080
curl http://localhost:8080/v1/models
# {"object":"list","data":[{"id":"gpt-3.5-turbo","object":"model"}]}
curl http://localhost:8080/v1/completions -H "Content-Type: application/json" -d '{
"model": "gpt-3.5-turbo",
"prompt": "A long time ago in a galaxy far, far away",
"temperature": 0.7
}'
```

View File

@@ -0,0 +1,15 @@
version: '3.6'
services:
api:
image: quay.io/go-skynet/local-ai:latest
build:
context: ../../
dockerfile: Dockerfile
ports:
- 8080:8080
env_file:
- ../../.env
volumes:
- ./models:/models:cached
command: ["/usr/bin/local-ai"]

View File

@@ -0,0 +1 @@
{{.Input}}

View File

@@ -0,0 +1,17 @@
name: gpt-3.5-turbo
parameters:
model: gpt2
top_k: 80
temperature: 0.2
top_p: 0.7
context_size: 1024
backend: "langchain-huggingface"
stopwords:
- "HUMAN:"
- "GPT:"
roles:
user: " "
system: " "
template:
completion: completion
chat: gpt4all

View File

@@ -0,0 +1,4 @@
The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response.
### Prompt:
{{.Input}}
### Response:

23
go.mod
View File

@@ -3,26 +3,27 @@ module github.com/go-skynet/LocalAI
go 1.19
require (
github.com/donomii/go-rwkv.cpp v0.0.0-20230529074347-ccb05c3e1c6e
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230528233858-d7c936b44a80
github.com/donomii/go-rwkv.cpp v0.0.0-20230604202420-1e18b2490e7e
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230606002726-57543c169e27
github.com/go-audio/wav v1.1.0
github.com/go-skynet/bloomz.cpp v0.0.0-20230510223001-e9366e82abdf
github.com/go-skynet/go-bert.cpp v0.0.0-20230529074307-771b4a085972
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230529215936-13ccc22621bb
github.com/go-skynet/go-llama.cpp v0.0.0-20230529221033-4afcaf28f36f
github.com/go-skynet/bloomz.cpp v0.0.0-20230529155654-1834e77b83fa
github.com/go-skynet/go-bert.cpp v0.0.0-20230531070950-0548994371f7
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230604074754-6fb862c72bc0
github.com/go-skynet/go-llama.cpp v0.0.0-20230606152241-37ef81d01ae0
github.com/gofiber/fiber/v2 v2.46.0
github.com/google/uuid v1.3.0
github.com/hashicorp/go-multierror v1.1.1
github.com/imdario/mergo v0.3.16
github.com/mudler/go-stable-diffusion v0.0.0-20230516152536-c0748eca3642
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230528235700-9eb81cb54922
github.com/onsi/ginkgo/v2 v2.9.5
github.com/mudler/go-stable-diffusion v0.0.0-20230605122230-d89260f598af
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230605194130-266f13aee9d8
github.com/onsi/ginkgo/v2 v2.9.7
github.com/onsi/gomega v1.27.7
github.com/otiai10/openaigo v1.1.0
github.com/rs/zerolog v1.29.1
github.com/sashabaranov/go-openai v1.9.5
github.com/sashabaranov/go-openai v1.10.0
github.com/swaggo/swag v1.16.1
github.com/urfave/cli/v2 v2.25.3
github.com/tmc/langchaingo v0.0.0-20230605114752-4afed6d7be4a
github.com/urfave/cli/v2 v2.25.5
github.com/valyala/fasthttp v1.47.0
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1

64
go.sum
View File

@@ -20,6 +20,12 @@ github.com/donomii/go-rwkv.cpp v0.0.0-20230515123100-6fdd0c338e56 h1:s8/MZdicstK
github.com/donomii/go-rwkv.cpp v0.0.0-20230515123100-6fdd0c338e56/go.mod h1:gWy7FIWioqYmYxkaoFyBnaKApeZVrUkHhv9EV9pz4dM=
github.com/donomii/go-rwkv.cpp v0.0.0-20230529074347-ccb05c3e1c6e h1:YbcLoxAwS0r7otEqU/d8bArubmfEJaG7dZPp0Aa52Io=
github.com/donomii/go-rwkv.cpp v0.0.0-20230529074347-ccb05c3e1c6e/go.mod h1:gWy7FIWioqYmYxkaoFyBnaKApeZVrUkHhv9EV9pz4dM=
github.com/donomii/go-rwkv.cpp v0.0.0-20230531084548-c43cdf5fc5bf h1:upCz8WYdzMeJg0qywUaVaGndY+niuicj5j6V4pvhNS4=
github.com/donomii/go-rwkv.cpp v0.0.0-20230531084548-c43cdf5fc5bf/go.mod h1:gWy7FIWioqYmYxkaoFyBnaKApeZVrUkHhv9EV9pz4dM=
github.com/donomii/go-rwkv.cpp v0.0.0-20230601111443-3b28b09469fc h1:RCGGh/zw+K09sjCIYHUV7lFenxONml+LS02RdN+AkwI=
github.com/donomii/go-rwkv.cpp v0.0.0-20230601111443-3b28b09469fc/go.mod h1:gWy7FIWioqYmYxkaoFyBnaKApeZVrUkHhv9EV9pz4dM=
github.com/donomii/go-rwkv.cpp v0.0.0-20230604202420-1e18b2490e7e h1:Qne1BO0ltmyJcsizxZ61SV+uwuD1F8NztsfBDHOd0LI=
github.com/donomii/go-rwkv.cpp v0.0.0-20230604202420-1e18b2490e7e/go.mod h1:gWy7FIWioqYmYxkaoFyBnaKApeZVrUkHhv9EV9pz4dM=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230520182345-041be06d5881 h1:dafqVivljYk51VLFnnpTXJnfWDe637EobWZ1l8PyEf8=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230520182345-041be06d5881/go.mod h1:QIjZ9OktHFG7p+/m3sMvrAJKKdWrr1fZIK0rM6HZlyo=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230523110439-77eab3fbfe5e h1:4PMorQuoUGAXmIzCtnNOHaasyLokXdgd8jUWwsraFTo=
@@ -30,6 +36,14 @@ github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230527074028-9b926844e3ae
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230527074028-9b926844e3ae/go.mod h1:QIjZ9OktHFG7p+/m3sMvrAJKKdWrr1fZIK0rM6HZlyo=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230528233858-d7c936b44a80 h1:IeeVcNaQHdcG+GPg+meOPFvtonvO8p/HBzTrZGjpWZk=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230528233858-d7c936b44a80/go.mod h1:QIjZ9OktHFG7p+/m3sMvrAJKKdWrr1fZIK0rM6HZlyo=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230531071314-ce6f7470649f h1:oGTI2SlcA7oGPFsmkS1m8psq3uKNnhhJ/MZ2ZWVZDe0=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230531071314-ce6f7470649f/go.mod h1:QIjZ9OktHFG7p+/m3sMvrAJKKdWrr1fZIK0rM6HZlyo=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230601065548-3f7436e8a096 h1:TD7v8FnwWCWlOsrkpnumsbxsflyhTI3rSm2HInqqSAI=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230601065548-3f7436e8a096/go.mod h1:QIjZ9OktHFG7p+/m3sMvrAJKKdWrr1fZIK0rM6HZlyo=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230601124500-5b9e59bc07dd h1:os3FeYEIB4j5m5QlbFC3HkVcaAmLxNXz48uIfQAexm0=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230601124500-5b9e59bc07dd/go.mod h1:QIjZ9OktHFG7p+/m3sMvrAJKKdWrr1fZIK0rM6HZlyo=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230606002726-57543c169e27 h1:boeMTUUBtnLU8JElZJHXrsUzROJar9/t6vGOFjkrhhI=
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230606002726-57543c169e27/go.mod h1:QIjZ9OktHFG7p+/m3sMvrAJKKdWrr1fZIK0rM6HZlyo=
github.com/go-audio/audio v1.0.0 h1:zS9vebldgbQqktK4H0lUqWrG8P0NxCJVqcj7ZpNnwd4=
github.com/go-audio/audio v1.0.0/go.mod h1:6uAu0+H2lHkwdGsAY+j2wHPNPpPoeg5AaEFh9FlA+Zs=
github.com/go-audio/riff v1.0.0 h1:d8iCGbDvox9BfLagY94fBynxSPHO80LmZCaOsmKxokA=
@@ -50,10 +64,14 @@ github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyr
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-skynet/bloomz.cpp v0.0.0-20230510223001-e9366e82abdf h1:VJfSn8hIDE+K5+h38M3iAyFXrxpRExMKRdTk33UDxsw=
github.com/go-skynet/bloomz.cpp v0.0.0-20230510223001-e9366e82abdf/go.mod h1:wc0fJ9V04yiYTfgKvE5RUUSRQ5Kzi0Bo4I+U3nNOUuA=
github.com/go-skynet/bloomz.cpp v0.0.0-20230529155654-1834e77b83fa h1:gxr68r/6EWroay4iI81jxqGCDbKotY4+CiwdUkBz2NQ=
github.com/go-skynet/bloomz.cpp v0.0.0-20230529155654-1834e77b83fa/go.mod h1:wc0fJ9V04yiYTfgKvE5RUUSRQ5Kzi0Bo4I+U3nNOUuA=
github.com/go-skynet/go-bert.cpp v0.0.0-20230516063724-cea1ed76a7f4 h1:+3KPDf4Wv1VHOkzAfZnlj9qakLSYggTpm80AswhD/FU=
github.com/go-skynet/go-bert.cpp v0.0.0-20230516063724-cea1ed76a7f4/go.mod h1:VY0s5KoAI2jRCvQXKuDeEEe8KG7VaWifSNJSk+E1KtY=
github.com/go-skynet/go-bert.cpp v0.0.0-20230529074307-771b4a085972 h1:eiE1CTqanNjpNWF2xp9GvNZXgKgRzNaUSyFZGMLu8Vo=
github.com/go-skynet/go-bert.cpp v0.0.0-20230529074307-771b4a085972/go.mod h1:IQrVVZiAuWpneNrahrGu3m7VVaKLDIvQGp+Q6B8jw5g=
github.com/go-skynet/go-bert.cpp v0.0.0-20230531070950-0548994371f7 h1:hm5rOxRf2Y8zmQTBgtDabLoprYHHQHmZ8ui8i4KQSgU=
github.com/go-skynet/go-bert.cpp v0.0.0-20230531070950-0548994371f7/go.mod h1:55l02IF2kD+LGEH4yXzmPPygeuWiUIo8Nbh/+ZU9cb0=
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230523173010-f89d7c22df6b h1:uKICsAbdRJxMPZ4RXltwOwXPRDO1/d/pdGR3gEEUV9M=
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230523173010-f89d7c22df6b/go.mod h1:hjmO5UfipWl6xkPT54acOs9DDto8GPV81IvsBcvRjsA=
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230524084634-c4c581f1853c h1:jXUOCh2K4OzRItTtHzdxvkylE9r1szRSleRpXCNvraY=
@@ -64,6 +82,10 @@ github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230529072326-695f97befe14
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230529072326-695f97befe14/go.mod h1:Rz967+t+aY6S+TBiW/WI8FM/C1WEMM+DamSMtKRxVAM=
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230529215936-13ccc22621bb h1:slNlMT8xB6w0QaMroTsqkNzNovUOEkpNpCawB7IjBFY=
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230529215936-13ccc22621bb/go.mod h1:SI+oF2+THMydq8Vo4+EzKJaQwtfWOy+lr7yWPP6FR2U=
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230531065233-17b065584ef8 h1:LK1DAgJsNMRUWaPpFOnE8XSF70UBybr3zGOvzP8Pdok=
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230531065233-17b065584ef8/go.mod h1:/JbU8HZU+tUOp+1bQAeXf3AyRXm+p3UwhccoJwCTI9A=
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230604074754-6fb862c72bc0 h1:PKwuqqVsvSPY4W9H9r3iHVpsmMWL1MQ7I5qpiY7eh0E=
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230604074754-6fb862c72bc0/go.mod h1:/JbU8HZU+tUOp+1bQAeXf3AyRXm+p3UwhccoJwCTI9A=
github.com/go-skynet/go-gpt2.cpp v0.0.0-20230523153133-3eb3a32c0874 h1:/6QWh2oarU7iPSpXj/3bLlkKptyxjKTRrNtGUrh8vhI=
github.com/go-skynet/go-gpt2.cpp v0.0.0-20230523153133-3eb3a32c0874/go.mod h1:1Wj/xbkMfwQSOrhNYK178IzqQHstZbRfhx4s8p1M5VM=
github.com/go-skynet/go-llama.cpp v0.0.0-20230520155239-ccf23adfb278 h1:st4ow9JKy3UuhkwutrbWof2vMFU/YxwBCLYZ1IxJ2Po=
@@ -76,6 +98,18 @@ github.com/go-skynet/go-llama.cpp v0.0.0-20230529120000-4bd3910005a5 h1:AbKnkgzk
github.com/go-skynet/go-llama.cpp v0.0.0-20230529120000-4bd3910005a5/go.mod h1:oA0r4BW8ndyjTMGi1tulsNd7sdg3Ql8MaVFuT1zF6ws=
github.com/go-skynet/go-llama.cpp v0.0.0-20230529221033-4afcaf28f36f h1:HmXiNF9Sy+34aSjaJ2/JN+goDgbT2XyLjdiG2EOMvaE=
github.com/go-skynet/go-llama.cpp v0.0.0-20230529221033-4afcaf28f36f/go.mod h1:oA0r4BW8ndyjTMGi1tulsNd7sdg3Ql8MaVFuT1zF6ws=
github.com/go-skynet/go-llama.cpp v0.0.0-20230530191504-62b6c079a47d h1:daPcVEptc/6arcS/QV4QDCdYiwMGCiiR5rnzUs63WK0=
github.com/go-skynet/go-llama.cpp v0.0.0-20230530191504-62b6c079a47d/go.mod h1:oA0r4BW8ndyjTMGi1tulsNd7sdg3Ql8MaVFuT1zF6ws=
github.com/go-skynet/go-llama.cpp v0.0.0-20230531065249-10caf37d8b73 h1:swwsrYpPYOsyGFrX/0nhaYa93aHH6I61HpSJpQkN1tY=
github.com/go-skynet/go-llama.cpp v0.0.0-20230531065249-10caf37d8b73/go.mod h1:ddYIvPZyj3Vf4XkfZimVRRehZu2isd0JXfK3EemVQPk=
github.com/go-skynet/go-llama.cpp v0.0.0-20230603122627-3f10005b70c6 h1:w+S5j+znKE8ZKogSp0tcdmYO/v94Wym0g9Os+iWEu2w=
github.com/go-skynet/go-llama.cpp v0.0.0-20230603122627-3f10005b70c6/go.mod h1:ddYIvPZyj3Vf4XkfZimVRRehZu2isd0JXfK3EemVQPk=
github.com/go-skynet/go-llama.cpp v0.0.0-20230604235446-b1a425611fde h1:bnWCcst0K5lgK2MCJbxV81xPSiK4fiob9f4k2RjYN8A=
github.com/go-skynet/go-llama.cpp v0.0.0-20230604235446-b1a425611fde/go.mod h1:ddYIvPZyj3Vf4XkfZimVRRehZu2isd0JXfK3EemVQPk=
github.com/go-skynet/go-llama.cpp v0.0.0-20230605193043-cca84ed55fb9 h1:PHsmllTb3sHrcdNDAPCJpIX6rijmO7bfMBqEy+ugJZ4=
github.com/go-skynet/go-llama.cpp v0.0.0-20230605193043-cca84ed55fb9/go.mod h1:ddYIvPZyj3Vf4XkfZimVRRehZu2isd0JXfK3EemVQPk=
github.com/go-skynet/go-llama.cpp v0.0.0-20230606152241-37ef81d01ae0 h1:fX6UZ3u8vtTtTZIG0LSS5Ai/lbyNXCQHqyDX6IgGwbQ=
github.com/go-skynet/go-llama.cpp v0.0.0-20230606152241-37ef81d01ae0/go.mod h1:ddYIvPZyj3Vf4XkfZimVRRehZu2isd0JXfK3EemVQPk=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
@@ -121,6 +155,10 @@ github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWV
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mudler/go-stable-diffusion v0.0.0-20230516152536-c0748eca3642 h1:KTkh3lOUsGqQyP4v+oa38sPFdrZtNnM4HaxTb3epdYs=
github.com/mudler/go-stable-diffusion v0.0.0-20230516152536-c0748eca3642/go.mod h1:8ufRkpz/S/9ahkaxzZ5i4WMgO9w4InEhuRoT7vK5Rnw=
github.com/mudler/go-stable-diffusion v0.0.0-20230605114250-a6706a426a90 h1:rxKtdI8RCZ41ZNbUh9jyBBy2pi3ukQP88ZzsrSVnpxY=
github.com/mudler/go-stable-diffusion v0.0.0-20230605114250-a6706a426a90/go.mod h1:8ufRkpz/S/9ahkaxzZ5i4WMgO9w4InEhuRoT7vK5Rnw=
github.com/mudler/go-stable-diffusion v0.0.0-20230605122230-d89260f598af h1:XFq6OUqsWQam0OrEr05okXsJK/TQur3zoZTHbiZD3Ks=
github.com/mudler/go-stable-diffusion v0.0.0-20230605122230-d89260f598af/go.mod h1:8ufRkpz/S/9ahkaxzZ5i4WMgO9w4InEhuRoT7vK5Rnw=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230522220313-2ce22208a3dd h1:is/rE0YD8oEWcX3fQ+VxoS3fD0LqFEmTxh8XZegYYsA=
@@ -137,8 +175,26 @@ github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230526132403-a6f3e9
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230526132403-a6f3e94458e2/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230528235700-9eb81cb54922 h1:teYhrXxFY28gyBm6QMcYewA0KvLXqkUsgxJcYelaxbg=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230528235700-9eb81cb54922/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230531011104-5f940208e4f5 h1:99cF+V5wk7IInDAEM9HAlSHdLf/xoJR529Wr8lAG5KQ=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230531011104-5f940208e4f5/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230601151908-5175db27813c h1:KXYqUH6bdYbxnF67l8wayctaCZ4BQJQOsUyNke7HC0A=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230601151908-5175db27813c/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230601213413-031d7149a7fd h1:VTPLKWrmiwYnSHfZh2KHqwSbMeM3D50J6VmDznyY3Ak=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230601213413-031d7149a7fd/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230602151000-be9f6ad54342 h1:Nca3BDITw9yrhMksPL5VKpj+nOUmDXTy7qB7tHJy0R8=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230602151000-be9f6ad54342/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230603001950-25ee51e2ca3a h1:ALsGoIFe2IZLMD+y0/ds7Spn8e9qiucQ9hod0zTRmfk=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230603001950-25ee51e2ca3a/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230603140917-bc624f5389d6 h1:GcwtLT80QuxAC7Dg+EpCQv1k/2Abhw8kvxQn3vuit5Q=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230603140917-bc624f5389d6/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230604125924-bbe195ee0207 h1:3ObPrftXDNkEN5M87IXxRlhA13x/44CuVaHXppsNDUg=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230604125924-bbe195ee0207/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230605194130-266f13aee9d8 h1:7SqRnb44CN9QQtZxdFTTgaSqsWVbtFRrHLbKhrTEXlM=
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230605194130-266f13aee9d8/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q=
github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
github.com/onsi/ginkgo/v2 v2.9.7 h1:06xGQy5www2oN160RtEZoTvnP2sPhEfePYmCDc2szss=
github.com/onsi/ginkgo/v2 v2.9.7/go.mod h1:cxrmXWykAwTwhQsJOPfdIDiJ+l2RYq7U8hFU+M/1uw0=
github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU=
github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4=
github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks=
@@ -162,6 +218,8 @@ github.com/sashabaranov/go-openai v1.9.4 h1:KanoCEoowAI45jVXlenMCckutSRr39qOmSi9
github.com/sashabaranov/go-openai v1.9.4/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg=
github.com/sashabaranov/go-openai v1.9.5 h1:z1VCMXsfnug+U0ceTTIXr/L26AYl9jafqA9lptlSX0c=
github.com/sashabaranov/go-openai v1.9.5/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg=
github.com/sashabaranov/go-openai v1.10.0 h1:uUD3EOKDdGa6geMVbe2Trj9/ckF9sCV5jpQM19f7GM8=
github.com/sashabaranov/go-openai v1.10.0/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg=
github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94 h1:rmMl4fXJhKMNWl+K+r/fq4FbbKI+Ia2m9hYBLm2h4G4=
github.com/savsgio/dictpool v0.0.0-20221023140959-7bf2e61cea94/go.mod h1:90zrgN3D/WJsDd1iXHT96alCoN2KJo6/4x1DZC3wZs8=
github.com/savsgio/gotils v0.0.0-20220530130905-52f3993e8d6d/go.mod h1:Gy+0tqhJvgGlqnTF8CVGP0AaGRjwBtXs/a5PA0Y3+A4=
@@ -176,8 +234,14 @@ github.com/swaggo/swag v1.16.1/go.mod h1:9/LMvHycG3NFHfR6LwvikHv5iFvmPADQ359cKik
github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw=
github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0=
github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw=
github.com/tmc/langchaingo v0.0.0-20230530193922-fb062652f841 h1:IVlfKPZzq3W1G+CkhZgN5VjmHnAeB3YqEvxyNPPCZXY=
github.com/tmc/langchaingo v0.0.0-20230530193922-fb062652f841/go.mod h1:6l1WoyqVDwkv7cFlY3gfcTv8yVowVyuutKv8PGlQCWI=
github.com/tmc/langchaingo v0.0.0-20230605114752-4afed6d7be4a h1:YtKJTKbM3qu60+ZxLtyeCl0RvdG7LKbyF8TT7nzV6Gg=
github.com/tmc/langchaingo v0.0.0-20230605114752-4afed6d7be4a/go.mod h1:6l1WoyqVDwkv7cFlY3gfcTv8yVowVyuutKv8PGlQCWI=
github.com/urfave/cli/v2 v2.25.3 h1:VJkt6wvEBOoSjPFQvOkv6iWIrsJyCrKGtCtxXWwmGeY=
github.com/urfave/cli/v2 v2.25.3/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
github.com/urfave/cli/v2 v2.25.5 h1:d0NIAyhh5shGscroL7ek/Ya9QYQE0KNabJgiUinIQkc=
github.com/urfave/cli/v2 v2.25.5/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.47.0 h1:y7moDoxYzMooFpT5aHgNgVOQDrS3qlkfiP9mDtGGK9c=

74
main.go
View File

@@ -42,55 +42,61 @@ func main() {
EnvVars: []string{"CORS_ALLOW_ORIGINS"},
},
&cli.IntFlag{
Name: "threads",
DefaultText: "Number of threads used for parallel computation. Usage of the number of physical cores in the system is suggested.",
EnvVars: []string{"THREADS"},
Value: 4,
Name: "threads",
Usage: "Number of threads used for parallel computation. Usage of the number of physical cores in the system is suggested.",
EnvVars: []string{"THREADS"},
Value: 4,
},
&cli.StringFlag{
Name: "models-path",
DefaultText: "Path containing models used for inferencing",
EnvVars: []string{"MODELS_PATH"},
Value: filepath.Join(path, "models"),
Name: "models-path",
Usage: "Path containing models used for inferencing",
EnvVars: []string{"MODELS_PATH"},
Value: filepath.Join(path, "models"),
},
&cli.StringFlag{
Name: "preload-models",
DefaultText: "A List of models to apply in JSON at start",
EnvVars: []string{"PRELOAD_MODELS"},
Name: "preload-models",
Usage: "A List of models to apply in JSON at start",
EnvVars: []string{"PRELOAD_MODELS"},
},
&cli.StringFlag{
Name: "preload-models-config",
DefaultText: "A List of models to apply at startup. Path to a YAML config file",
EnvVars: []string{"PRELOAD_MODELS_CONFIG"},
Name: "preload-models-config",
Usage: "A List of models to apply at startup. Path to a YAML config file",
EnvVars: []string{"PRELOAD_MODELS_CONFIG"},
},
&cli.StringFlag{
Name: "config-file",
DefaultText: "Config file",
EnvVars: []string{"CONFIG_FILE"},
Name: "config-file",
Usage: "Config file",
EnvVars: []string{"CONFIG_FILE"},
},
&cli.StringFlag{
Name: "address",
DefaultText: "Bind address for the API server.",
EnvVars: []string{"ADDRESS"},
Value: ":8080",
Name: "address",
Usage: "Bind address for the API server.",
EnvVars: []string{"ADDRESS"},
Value: ":8080",
},
&cli.StringFlag{
Name: "image-path",
DefaultText: "Image directory",
EnvVars: []string{"IMAGE_PATH"},
Value: "",
Name: "image-path",
Usage: "Image directory",
EnvVars: []string{"IMAGE_PATH"},
Value: "",
},
&cli.StringFlag{
Name: "backend-assets-path",
Usage: "Path used to extract libraries that are required by some of the backends in runtime.",
EnvVars: []string{"BACKEND_ASSETS_PATH"},
Value: "/tmp/localai/backend_data",
},
&cli.IntFlag{
Name: "context-size",
DefaultText: "Default context size of the model",
EnvVars: []string{"CONTEXT_SIZE"},
Value: 512,
Name: "context-size",
Usage: "Default context size of the model",
EnvVars: []string{"CONTEXT_SIZE"},
Value: 512,
},
&cli.IntFlag{
Name: "upload-limit",
DefaultText: "Default upload-limit. MB",
EnvVars: []string{"UPLOAD_LIMIT"},
Value: 15,
Name: "upload-limit",
Usage: "Default upload-limit. MB",
EnvVars: []string{"UPLOAD_LIMIT"},
Value: 15,
},
},
Description: `
@@ -124,6 +130,8 @@ It uses llama.cpp, ggml and gpt4all as backend with golang c bindings.
api.WithCors(ctx.Bool("cors")),
api.WithCorsAllowOrigins(ctx.String("cors-allow-origins")),
api.WithThreads(ctx.Int("threads")),
api.WithBackendAssets(backendAssets),
api.WithBackendAssetsOutput(ctx.String("backend-assets-path")),
api.WithUploadLimitMB(ctx.Int("upload-limit")))
if err != nil {
return err

51
pkg/assets/extract.go Normal file
View File

@@ -0,0 +1,51 @@
package assets
import (
"embed"
"fmt"
"io/fs"
"os"
"path/filepath"
)
func ExtractFiles(content embed.FS, extractDir string) error {
// Create the target directory if it doesn't exist
err := os.MkdirAll(extractDir, 0755)
if err != nil {
return fmt.Errorf("failed to create directory: %v", err)
}
// Walk through the embedded FS and extract files
err = fs.WalkDir(content, ".", func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
// Reconstruct the directory structure in the target directory
targetFile := filepath.Join(extractDir, path)
if d.IsDir() {
// Create the directory in the target directory
err := os.MkdirAll(targetFile, 0755)
if err != nil {
return fmt.Errorf("failed to create directory: %v", err)
}
return nil
}
// Read the file from the embedded FS
fileData, err := content.ReadFile(path)
if err != nil {
return fmt.Errorf("failed to read file: %v", err)
}
// Create the file in the target directory
err = os.WriteFile(targetFile, fileData, 0644)
if err != nil {
return fmt.Errorf("failed to write file: %v", err)
}
return nil
})
return err
}

View File

@@ -0,0 +1,47 @@
package langchain
import (
"context"
"github.com/tmc/langchaingo/llms"
"github.com/tmc/langchaingo/llms/huggingface"
)
type HuggingFace struct {
modelPath string
}
func NewHuggingFace(repoId string) (*HuggingFace, error) {
return &HuggingFace{
modelPath: repoId,
}, nil
}
func (s *HuggingFace) PredictHuggingFace(text string, opts ...PredictOption) (*Predict, error) {
po := NewPredictOptions(opts...)
// Init client
llm, err := huggingface.New()
if err != nil {
return nil, err
}
// Convert from LocalAI to LangChainGo format of options
co := []llms.CallOption{
llms.WithModel(po.Model),
llms.WithMaxTokens(po.MaxTokens),
llms.WithTemperature(po.Temperature),
llms.WithStopWords(po.StopWords),
}
// Call Inference API
ctx := context.Background()
completion, err := llm.Call(ctx, text, co...)
if err != nil {
return nil, err
}
return &Predict{
Completion: completion,
}, nil
}

View File

@@ -0,0 +1,57 @@
package langchain
type PredictOptions struct {
Model string `json:"model"`
// MaxTokens is the maximum number of tokens to generate.
MaxTokens int `json:"max_tokens"`
// Temperature is the temperature for sampling, between 0 and 1.
Temperature float64 `json:"temperature"`
// StopWords is a list of words to stop on.
StopWords []string `json:"stop_words"`
}
type PredictOption func(p *PredictOptions)
var DefaultOptions = PredictOptions{
Model: "gpt2",
MaxTokens: 200,
Temperature: 0.96,
StopWords: nil,
}
type Predict struct {
Completion string
}
func SetModel(model string) PredictOption {
return func(o *PredictOptions) {
o.Model = model
}
}
func SetTemperature(temperature float64) PredictOption {
return func(o *PredictOptions) {
o.Temperature = temperature
}
}
func SetMaxTokens(maxTokens int) PredictOption {
return func(o *PredictOptions) {
o.MaxTokens = maxTokens
}
}
func SetStopWords(stopWords []string) PredictOption {
return func(o *PredictOptions) {
o.StopWords = stopWords
}
}
// NewPredictOptions Create a new PredictOptions object with the given options.
func NewPredictOptions(opts ...PredictOption) PredictOptions {
p := DefaultOptions
for _, opt := range opts {
opt(&p)
}
return p
}

View File

@@ -7,6 +7,7 @@ import (
rwkv "github.com/donomii/go-rwkv.cpp"
whisper "github.com/ggerganov/whisper.cpp/bindings/go/pkg/whisper"
"github.com/go-skynet/LocalAI/pkg/langchain"
"github.com/go-skynet/LocalAI/pkg/stablediffusion"
bloomz "github.com/go-skynet/bloomz.cpp"
bert "github.com/go-skynet/go-bert.cpp"
@@ -32,17 +33,18 @@ const (
Gpt4AllLlamaBackend = "gpt4all-llama"
Gpt4AllMptBackend = "gpt4all-mpt"
Gpt4AllJBackend = "gpt4all-j"
Gpt4All = "gpt4all"
FalconBackend = "falcon"
BertEmbeddingsBackend = "bert-embeddings"
RwkvBackend = "rwkv"
WhisperBackend = "whisper"
StableDiffusionBackend = "stablediffusion"
LCHuggingFaceBackend = "langchain-huggingface"
)
var backends []string = []string{
var autoLoadBackends []string = []string{
LlamaBackend,
Gpt4AllLlamaBackend,
Gpt4AllMptBackend,
Gpt4AllJBackend,
Gpt4All,
RwkvBackend,
GPTNeoXBackend,
WhisperBackend,
@@ -50,6 +52,7 @@ var backends []string = []string{
GPTJBackend,
Gpt2Backend,
DollyBackend,
FalconBackend,
MPTBackend,
ReplitBackend,
StarcoderBackend,
@@ -80,6 +83,10 @@ var gptJ = func(modelFile string) (interface{}, error) {
return transformers.NewGPTJ(modelFile)
}
var falcon = func(modelFile string) (interface{}, error) {
return transformers.NewFalcon(modelFile)
}
var bertEmbeddings = func(modelFile string) (interface{}, error) {
return bert.New(modelFile)
}
@@ -100,6 +107,10 @@ var whisperModel = func(modelFile string) (interface{}, error) {
return whisper.New(modelFile)
}
var lcHuggingFace = func(repoId string) (interface{}, error) {
return langchain.NewHuggingFace(repoId)
}
func llamaLM(opts ...llama.ModelOption) func(string) (interface{}, error) {
return func(s string) (interface{}, error) {
return llama.New(s, opts...)
@@ -139,6 +150,8 @@ func (ml *ModelLoader) BackendLoader(backendString string, modelFile string, lla
return ml.LoadModel(modelFile, mpt)
case Gpt2Backend:
return ml.LoadModel(modelFile, transformersLM)
case FalconBackend:
return ml.LoadModel(modelFile, falcon)
case GPTNeoXBackend:
return ml.LoadModel(modelFile, gptNeoX)
case ReplitBackend:
@@ -147,18 +160,16 @@ func (ml *ModelLoader) BackendLoader(backendString string, modelFile string, lla
return ml.LoadModel(modelFile, stableDiffusion)
case StarcoderBackend:
return ml.LoadModel(modelFile, starCoder)
case Gpt4AllLlamaBackend:
return ml.LoadModel(modelFile, gpt4allLM(gpt4all.SetThreads(int(threads)), gpt4all.SetModelType(gpt4all.LLaMAType)))
case Gpt4AllMptBackend:
return ml.LoadModel(modelFile, gpt4allLM(gpt4all.SetThreads(int(threads)), gpt4all.SetModelType(gpt4all.MPTType)))
case Gpt4AllJBackend:
return ml.LoadModel(modelFile, gpt4allLM(gpt4all.SetThreads(int(threads)), gpt4all.SetModelType(gpt4all.GPTJType)))
case Gpt4AllLlamaBackend, Gpt4AllMptBackend, Gpt4AllJBackend, Gpt4All:
return ml.LoadModel(modelFile, gpt4allLM(gpt4all.SetThreads(int(threads))))
case BertEmbeddingsBackend:
return ml.LoadModel(modelFile, bertEmbeddings)
case RwkvBackend:
return ml.LoadModel(modelFile, rwkvLM(filepath.Join(ml.ModelPath, modelFile+tokenizerSuffix), threads))
case WhisperBackend:
return ml.LoadModel(modelFile, whisperModel)
case LCHuggingFaceBackend:
return ml.LoadModel(modelFile, lcHuggingFace)
default:
return nil, fmt.Errorf("backend unsupported: %s", backendString)
}
@@ -177,7 +188,7 @@ func (ml *ModelLoader) GreedyLoader(modelFile string, llamaOpts []llama.ModelOpt
ml.mu.Unlock()
var err error
for _, b := range backends {
for _, b := range autoLoadBackends {
if b == BloomzBackend || b == WhisperBackend || b == RwkvBackend { // do not autoload bloomz/whisper/rwkv
continue
}

View File

@@ -8,6 +8,18 @@ import (
)
func GenerateImage(height, width, mode, step, seed int, positive_prompt, negative_prompt, dst, asset_dir string) error {
if height > 512 || width > 512 {
return stableDiffusion.GenerateImageUpscaled(
height,
width,
step,
seed,
positive_prompt,
negative_prompt,
dst,
asset_dir,
)
}
return stableDiffusion.GenerateImage(
height,
width,