mirror of
https://github.com/mudler/LocalAI.git
synced 2026-02-03 11:13:31 -05:00
Compare commits
355 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2f5feb4841 | ||
|
|
4e3c319e83 | ||
|
|
d0025a7483 | ||
|
|
db0b29be51 | ||
|
|
7da07e8af9 | ||
|
|
6da892758b | ||
|
|
5e88930475 | ||
|
|
97b02f9765 | ||
|
|
7ee1b10dfb | ||
|
|
3932c15823 | ||
|
|
618fd1d417 | ||
|
|
151a6cf4c2 | ||
|
|
1766de814c | ||
|
|
0b351d6da2 | ||
|
|
6623ce9942 | ||
|
|
1dbc190fa6 | ||
|
|
46b9445fa6 | ||
|
|
d3d3187e51 | ||
|
|
6c94f3cd67 | ||
|
|
295f3030a9 | ||
|
|
1ba88258a9 | ||
|
|
10ddd72b58 | ||
|
|
1b7990d5d9 | ||
|
|
9f50b8024d | ||
|
|
7b9dcb05d4 | ||
|
|
e37361985c | ||
|
|
467e88d305 | ||
|
|
fe4a8fbc74 | ||
|
|
2328bbaea1 | ||
|
|
4cc834adcd | ||
|
|
5e49ff5072 | ||
|
|
f98680a18a | ||
|
|
2880221bb3 | ||
|
|
27887c74d8 | ||
|
|
6306885fe7 | ||
|
|
2a11f16c0f | ||
|
|
2297504fb3 | ||
|
|
897ac6e4e5 | ||
|
|
f20c12a1c0 | ||
|
|
5dea31385c | ||
|
|
58f0f63926 | ||
|
|
ed2bf48a6d | ||
|
|
e6c8ebb65c | ||
|
|
119733892e | ||
|
|
437f563128 | ||
|
|
ecad2261c8 | ||
|
|
182323a7fb | ||
|
|
30d06f9b12 | ||
|
|
6bb562272d | ||
|
|
3b3164b039 | ||
|
|
6f0bdbd01c | ||
|
|
ce2a1799ab | ||
|
|
d088bd3034 | ||
|
|
806e4c3a63 | ||
|
|
8532ce2002 | ||
|
|
84946e9275 | ||
|
|
c9bbba4872 | ||
|
|
ea9a651573 | ||
|
|
5abbb134d9 | ||
|
|
694dd4ad9e | ||
|
|
4af48e548a | ||
|
|
079dc197c7 | ||
|
|
77613169da | ||
|
|
2630e251ce | ||
|
|
0909a0637e | ||
|
|
d62aef2016 | ||
|
|
25e9483add | ||
|
|
c1be2bdeeb | ||
|
|
49a2b30350 | ||
|
|
472cd0fc2f | ||
|
|
dc9c43b6dd | ||
|
|
e1e23a6302 | ||
|
|
2e916abe15 | ||
|
|
3ebdb9b67e | ||
|
|
01f5046caf | ||
|
|
ac17d544e0 | ||
|
|
b447a2a719 | ||
|
|
ec4fd1d219 | ||
|
|
b503725dc7 | ||
|
|
e873fc7b71 | ||
|
|
3070e9503a | ||
|
|
d9130def39 | ||
|
|
cdf0a6e766 | ||
|
|
a0e0ac887f | ||
|
|
4ddc956462 | ||
|
|
203fd7b2e8 | ||
|
|
1bb85377e4 | ||
|
|
3892fafc2d | ||
|
|
8a34679a13 | ||
|
|
b64c1d8ac1 | ||
|
|
8fb86c13bc | ||
|
|
05edf59c91 | ||
|
|
b9f1f85433 | ||
|
|
f8e2e76698 | ||
|
|
29856f7527 | ||
|
|
aa6cdf16c8 | ||
|
|
96794851b3 | ||
|
|
51a1a721b3 | ||
|
|
695f3e5758 | ||
|
|
e875c1f64a | ||
|
|
19f92d7d55 | ||
|
|
5a8dd40918 | ||
|
|
1b766ab89c | ||
|
|
a63d6f6364 | ||
|
|
4422ca2235 | ||
|
|
78ad4813df | ||
|
|
42d753846e | ||
|
|
5c018c0437 | ||
|
|
07cee3f6ef | ||
|
|
c5cb2ff268 | ||
|
|
c8a4a4f4e9 | ||
|
|
3ba07a5928 | ||
|
|
7282668da1 | ||
|
|
451e803444 | ||
|
|
d70c55231b | ||
|
|
275c124701 | ||
|
|
87a6bbd251 | ||
|
|
8fd4c7afcc | ||
|
|
eee3f83d98 | ||
|
|
28ee180283 | ||
|
|
432b0223f1 | ||
|
|
16050a32c7 | ||
|
|
898ca62b55 | ||
|
|
5623a7c331 | ||
|
|
9e3ca6d1a3 | ||
|
|
fa58965bbc | ||
|
|
b8ef9028f1 | ||
|
|
f711d35377 | ||
|
|
abd3c62194 | ||
|
|
2f3c3b1867 | ||
|
|
11af09faf3 | ||
|
|
577d36b596 | ||
|
|
6d71dd7d98 | ||
|
|
49ce24984c | ||
|
|
f401181cb5 | ||
|
|
ff8295a97c | ||
|
|
aacb96df7a | ||
|
|
ca9115d6d0 | ||
|
|
2c91837865 | ||
|
|
2272324fd6 | ||
|
|
171b50bb1c | ||
|
|
04d6bd7922 | ||
|
|
2abdac7003 | ||
|
|
190f01dbe3 | ||
|
|
18a701355c | ||
|
|
3911957d34 | ||
|
|
f5146bde18 | ||
|
|
b57ea10c94 | ||
|
|
821cfed6c0 | ||
|
|
728f297bb8 | ||
|
|
4c0013fd79 | ||
|
|
65d06285d8 | ||
|
|
e0d1a8995d | ||
|
|
425beea6c5 | ||
|
|
cdfb930a69 | ||
|
|
09641b9790 | ||
|
|
aac9a57500 | ||
|
|
59f7953249 | ||
|
|
217dbb448e | ||
|
|
76c881043e | ||
|
|
835a20610b | ||
|
|
74e808b8c3 | ||
|
|
53c83f2fae | ||
|
|
62365fa31d | ||
|
|
a44c8e9b4e | ||
|
|
320e430c7f | ||
|
|
8615646827 | ||
|
|
925d7c3057 | ||
|
|
e350924ac1 | ||
|
|
e891a46740 | ||
|
|
cd9285bbe6 | ||
|
|
917ff13c86 | ||
|
|
2a40f44023 | ||
|
|
c22d06c780 | ||
|
|
babbd23744 | ||
|
|
eee41cbe2b | ||
|
|
bf54b78270 | ||
|
|
589dfae89f | ||
|
|
c8cc197ddd | ||
|
|
76c561a908 | ||
|
|
04797a80e1 | ||
|
|
29583a5ea5 | ||
|
|
d12c1f7a4a | ||
|
|
505572dae8 | ||
|
|
3ddea794e1 | ||
|
|
10e03bde35 | ||
|
|
e969604d75 | ||
|
|
c822e18f0d | ||
|
|
891af1c524 | ||
|
|
5807d0b766 | ||
|
|
9decd0813c | ||
|
|
43d3fb3eba | ||
|
|
f5f8c687be | ||
|
|
9e5cd0f10b | ||
|
|
231a3e7c02 | ||
|
|
57172e2e30 | ||
|
|
043399dd07 | ||
|
|
6b19356740 | ||
|
|
1cbe6a7067 | ||
|
|
2912f9870f | ||
|
|
9630be56e1 | ||
|
|
4aa78843c0 | ||
|
|
b36d9f3776 | ||
|
|
6f54cab3f0 | ||
|
|
ed5df1e68e | ||
|
|
3c07e11e73 | ||
|
|
91bdad1d12 | ||
|
|
482a83886e | ||
|
|
b8f52d67e1 | ||
|
|
9ed82199c5 | ||
|
|
864aaf8c4d | ||
|
|
c7056756d5 | ||
|
|
93cc8569c3 | ||
|
|
05a3d569b0 | ||
|
|
7bc08797f9 | ||
|
|
5b22704799 | ||
|
|
9609e4392b | ||
|
|
d0c033d09b | ||
|
|
4e381cbe92 | ||
|
|
ffaf3b1d36 | ||
|
|
465a3b755d | ||
|
|
91fc52bfb7 | ||
|
|
b425954b9e | ||
|
|
2e64ed6255 | ||
|
|
bf3d936aea | ||
|
|
19deea986a | ||
|
|
aa7a18f131 | ||
|
|
837ce2cb31 | ||
|
|
cadce540f9 | ||
|
|
1fade53a61 | ||
|
|
207ce81e4a | ||
|
|
fc59f74849 | ||
|
|
9d3c5ead93 | ||
|
|
549a01b62e | ||
|
|
5a6d9d4e5b | ||
|
|
1a7587ee48 | ||
|
|
cc9aa9eb3f | ||
|
|
5617e50ebc | ||
|
|
b83e8b950d | ||
|
|
d15fc5371a | ||
|
|
3f739575d8 | ||
|
|
7e4616646f | ||
|
|
44ffaf86ad | ||
|
|
d096644c67 | ||
|
|
1428600de4 | ||
|
|
17b18df600 | ||
|
|
cd81dbae1c | ||
|
|
76be06ed56 | ||
|
|
c2026e01c0 | ||
|
|
cdca286be1 | ||
|
|
41de6efca9 | ||
|
|
63a4ccebdc | ||
|
|
9237c1e91d | ||
|
|
9d051c5d4f | ||
|
|
acd03d15f2 | ||
|
|
a035de2fdd | ||
|
|
76a1267799 | ||
|
|
e533b008d4 | ||
|
|
a4380228e3 | ||
|
|
2a9d7474ce | ||
|
|
850a690290 | ||
|
|
39edd9ff73 | ||
|
|
b82bbbfc6b | ||
|
|
023c065812 | ||
|
|
a627a6c4e2 | ||
|
|
6c9ddff8e9 | ||
|
|
c5318587b8 | ||
|
|
c3622299ce | ||
|
|
de36a48861 | ||
|
|
961ca93219 | ||
|
|
557ccc5ad8 | ||
|
|
2488c445b6 | ||
|
|
b4241d0a0d | ||
|
|
8250391e49 | ||
|
|
fd1df4e971 | ||
|
|
5115b2faa3 | ||
|
|
93e82a8bf4 | ||
|
|
4413defca5 | ||
|
|
f359e1c6c4 | ||
|
|
1bc87d582d | ||
|
|
a86a383357 | ||
|
|
16f02c7b30 | ||
|
|
fe2706890c | ||
|
|
85f0f8227d | ||
|
|
59e3c02002 | ||
|
|
032dee256f | ||
|
|
6b5e2b2bf5 | ||
|
|
6fc303de87 | ||
|
|
6ad6e4873d | ||
|
|
d6d7391da8 | ||
|
|
11675932ac | ||
|
|
f02202e1e1 | ||
|
|
f8ee20991c | ||
|
|
e6db14e2f1 | ||
|
|
d00886abea | ||
|
|
4873d2bfa1 | ||
|
|
9f426578cf | ||
|
|
9d01b695a8 | ||
|
|
93829ab228 | ||
|
|
dd234f86d5 | ||
|
|
3daff6f1aa | ||
|
|
89dfa0f5fc | ||
|
|
bc03c492a0 | ||
|
|
f50a4c1454 | ||
|
|
d13d4d95ce | ||
|
|
428790ec06 | ||
|
|
4f551ce414 | ||
|
|
6ed7b10273 | ||
|
|
02979566ee | ||
|
|
cbdcc839f3 | ||
|
|
e1c8f087f4 | ||
|
|
3a90ea44a5 | ||
|
|
e55492475d | ||
|
|
07ec2e441d | ||
|
|
38d7e0b43c | ||
|
|
3411bfd00d | ||
|
|
7e5fe35ae4 | ||
|
|
8c8cf38d4d | ||
|
|
75b25297fd | ||
|
|
009ee47fe2 | ||
|
|
ec2adc2c03 | ||
|
|
ad301e6ed7 | ||
|
|
d094381e5d | ||
|
|
3ff9bbd217 | ||
|
|
e62ee2bc06 | ||
|
|
b49721cdd1 | ||
|
|
64c0a7967f | ||
|
|
e96eadab40 | ||
|
|
e73283121b | ||
|
|
857d13e8d6 | ||
|
|
91db3d4d5c | ||
|
|
961cf29217 | ||
|
|
c839b334eb | ||
|
|
714bfcd45b | ||
|
|
77ce8b953e | ||
|
|
01ada95941 | ||
|
|
eabdc5042a | ||
|
|
96267d9437 | ||
|
|
9497a24127 | ||
|
|
fdf75c6d0e | ||
|
|
6352308882 | ||
|
|
a8172a0f4e | ||
|
|
ebcd10d66f | ||
|
|
885642915f | ||
|
|
2e424491c0 | ||
|
|
aa6faef8f7 | ||
|
|
b3254baf60 | ||
|
|
0a43d27f0e | ||
|
|
3fe11fe24d | ||
|
|
af18fdc749 | ||
|
|
32b5eddd7d | ||
|
|
07c3aa1869 | ||
|
|
e59bad89e7 | ||
|
|
b971807980 | ||
|
|
c974dad799 |
@@ -1,3 +0,0 @@
|
||||
ARG GO_VERSION=1.20
|
||||
FROM mcr.microsoft.com/devcontainers/go:0-$GO_VERSION-bullseye
|
||||
RUN apt-get update && apt-get install -y cmake
|
||||
@@ -1,46 +0,0 @@
|
||||
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
|
||||
// README at: https://github.com/devcontainers/templates/tree/main/src/docker-existing-docker-compose
|
||||
{
|
||||
"name": "Existing Docker Compose (Extend)",
|
||||
|
||||
// Update the 'dockerComposeFile' list if you have more compose files or use different names.
|
||||
// The .devcontainer/docker-compose.yml file contains any overrides you need/want to make.
|
||||
"dockerComposeFile": [
|
||||
"../docker-compose.yaml",
|
||||
"docker-compose.yml"
|
||||
],
|
||||
|
||||
// The 'service' property is the name of the service for the container that VS Code should
|
||||
// use. Update this value and .devcontainer/docker-compose.yml to the real service name.
|
||||
"service": "api",
|
||||
|
||||
// The optional 'workspaceFolder' property is the path VS Code should open by default when
|
||||
// connected. This is typically a file mount in .devcontainer/docker-compose.yml
|
||||
"workspaceFolder": "/workspace",
|
||||
|
||||
"features": {
|
||||
"ghcr.io/devcontainers/features/go:1": {},
|
||||
"ghcr.io/azutake/devcontainer-features/go-packages-install:0": {}
|
||||
},
|
||||
|
||||
// Features to add to the dev container. More info: https://containers.dev/features.
|
||||
// "features": {},
|
||||
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
// "forwardPorts": [],
|
||||
|
||||
// Uncomment the next line if you want start specific services in your Docker Compose config.
|
||||
// "runServices": [],
|
||||
|
||||
// Uncomment the next line if you want to keep your containers running after VS Code shuts down.
|
||||
// "shutdownAction": "none",
|
||||
|
||||
// Uncomment the next line to run commands after the container is created.
|
||||
"postCreateCommand": "make prepare"
|
||||
|
||||
// Configure tool-specific properties.
|
||||
// "customizations": {},
|
||||
|
||||
// Uncomment to connect as an existing user other than the container default. More info: https://aka.ms/dev-containers-non-root.
|
||||
// "remoteUser": "devcontainer"
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
version: '3.6'
|
||||
services:
|
||||
# Update this to the name of the service you want to work with in your docker-compose.yml file
|
||||
api:
|
||||
# Uncomment if you want to override the service's Dockerfile to one in the .devcontainer
|
||||
# folder. Note that the path of the Dockerfile and context is relative to the *primary*
|
||||
# docker-compose.yml file (the first in the devcontainer.json "dockerComposeFile"
|
||||
# array). The sample below assumes your primary file is in the root of your project.
|
||||
#
|
||||
build:
|
||||
context: .
|
||||
dockerfile: .devcontainer/Dockerfile
|
||||
|
||||
volumes:
|
||||
# Update this to wherever you want VS Code to mount the folder of your project
|
||||
- .:/workspace:cached
|
||||
|
||||
# Uncomment the next four lines if you will use a ptrace-based debugger like C++, Go, and Rust.
|
||||
# cap_add:
|
||||
# - SYS_PTRACE
|
||||
# security_opt:
|
||||
# - seccomp:unconfined
|
||||
|
||||
# Overrides default command so things don't shut down after the process ends.
|
||||
command: /bin/sh -c "while sleep 1000; do :; done"
|
||||
|
||||
@@ -1,2 +1,6 @@
|
||||
.git
|
||||
.idea
|
||||
models
|
||||
examples/chatbot-ui/models
|
||||
examples/chatbot-ui/models
|
||||
examples/rwkv/models
|
||||
examples/**/models
|
||||
|
||||
27
.env
27
.env
@@ -1,5 +1,30 @@
|
||||
## Set number of threads.
|
||||
## Note: prefer the number of physical cores. Overbooking the CPU degrades performance notably.
|
||||
# THREADS=14
|
||||
|
||||
## Specify a different bind address (defaults to ":8080")
|
||||
# ADDRESS=127.0.0.1:8080
|
||||
|
||||
## Default models context size
|
||||
# CONTEXT_SIZE=512
|
||||
|
||||
## Default path for models
|
||||
MODELS_PATH=/models
|
||||
|
||||
## Enable debug mode
|
||||
# DEBUG=true
|
||||
# BUILD_TYPE=generic
|
||||
|
||||
## Specify a build type. Available: cublas, openblas.
|
||||
# BUILD_TYPE=openblas
|
||||
|
||||
## Uncomment and set to false to disable rebuilding from source
|
||||
# REBUILD=false
|
||||
|
||||
## Enable image generation with stablediffusion (requires REBUILD=true)
|
||||
# GO_TAGS=stablediffusion
|
||||
|
||||
## Path where to store generated images
|
||||
# IMAGE_PATH=/tmp
|
||||
|
||||
## Specify a default upload limit in MB (whisper)
|
||||
# UPLOAD_LIMIT
|
||||
31
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
31
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: mudler
|
||||
|
||||
---
|
||||
|
||||
<!-- Thanks for helping us to improve LocalAI! We welcome all bug reports. Please fill out each area of the template so we can better help you. Comments like this will be hidden when you post but you can delete them if you wish. -->
|
||||
|
||||
**LocalAI version:**
|
||||
<!-- Container Image or LocalAI tag/commit -->
|
||||
|
||||
**Environment, CPU architecture, OS, and Version:**
|
||||
<!-- Provide the output from "uname -a", HW specs, if it's a VM -->
|
||||
|
||||
**Describe the bug**
|
||||
<!-- A clear and concise description of what the bug is. -->
|
||||
|
||||
**To Reproduce**
|
||||
<!-- Steps to reproduce the behavior, including the LocalAI command used, if any -->
|
||||
|
||||
**Expected behavior**
|
||||
<!-- A clear and concise description of what you expected to happen. -->
|
||||
|
||||
**Logs**
|
||||
<!-- If applicable, add logs while running LocalAI in debug mode (`--debug` or `DEBUG=true`) to help explain your problem. -->
|
||||
|
||||
**Additional context**
|
||||
<!-- Add any other context about the problem here. -->
|
||||
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Community Support
|
||||
url: https://github.com/go-skynet/LocalAI/discussions
|
||||
about: Please ask and answer questions here.
|
||||
- name: Discord
|
||||
url: https://discord.gg/uJAeKSAGDy
|
||||
about: Join our community on Discord!
|
||||
22
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
22
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: enhancement
|
||||
assignees: mudler
|
||||
|
||||
---
|
||||
|
||||
<!-- Thanks for helping us to improve LocalAI! We welcome all feature requests. Please fill out each area of the template so we can better help you. Comments like this will be hidden when you post but you can delete them if you wish. -->
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
|
||||
|
||||
**Describe the solution you'd like**
|
||||
<!-- A clear and concise description of what you want to happen. -->
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
<!-- A clear and concise description of any alternative solutions or features you've considered. -->
|
||||
|
||||
**Additional context**
|
||||
<!-- Add any other context or screenshots about the feature request here. -->
|
||||
23
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
23
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
**Description**
|
||||
|
||||
This PR fixes #
|
||||
|
||||
**Notes for Reviewers**
|
||||
|
||||
|
||||
**[Signed commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)**
|
||||
- [ ] Yes, I signed my commits.
|
||||
|
||||
|
||||
<!--
|
||||
Thank you for contributing to LocalAI!
|
||||
|
||||
Contributing Conventions:
|
||||
|
||||
1. Include descriptive PR titles with [<component-name>] prepended.
|
||||
2. Build and test your changes before submitting a PR.
|
||||
3. Sign your commits
|
||||
|
||||
By following the community's contribution conventions upfront, the review process will
|
||||
be accelerated and your PR merged more quickly.
|
||||
-->
|
||||
9
.github/bump_deps.sh
vendored
Executable file
9
.github/bump_deps.sh
vendored
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
set -xe
|
||||
REPO=$1
|
||||
BRANCH=$2
|
||||
VAR=$3
|
||||
|
||||
LAST_COMMIT=$(curl -s -H "Accept: application/vnd.github.VERSION.sha" "https://api.github.com/repos/$REPO/commits/$BRANCH")
|
||||
|
||||
sed -i Makefile -e "s/$VAR?=.*/$VAR?=$LAST_COMMIT/"
|
||||
24
.github/release.yml
vendored
Normal file
24
.github/release.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
# .github/release.yml
|
||||
|
||||
changelog:
|
||||
exclude:
|
||||
labels:
|
||||
- ignore-for-release
|
||||
categories:
|
||||
- title: Breaking Changes 🛠
|
||||
labels:
|
||||
- Semver-Major
|
||||
- breaking-change
|
||||
- title: "Bug fixes :bug:"
|
||||
labels:
|
||||
- bug
|
||||
- title: Exciting New Features 🎉
|
||||
labels:
|
||||
- Semver-Minor
|
||||
- enhancement
|
||||
- title: 👒 Dependencies
|
||||
labels:
|
||||
- dependencies
|
||||
- title: Other Changes
|
||||
labels:
|
||||
- "*"
|
||||
18
.github/stale.yml
vendored
Normal file
18
.github/stale.yml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
# Number of days of inactivity before an issue becomes stale
|
||||
daysUntilStale: 45
|
||||
# Number of days of inactivity before a stale issue is closed
|
||||
daysUntilClose: 10
|
||||
# Issues with these labels will never be considered stale
|
||||
exemptLabels:
|
||||
- issue/willfix
|
||||
# Label to use when marking an issue as stale
|
||||
staleLabel: issue/stale
|
||||
# Comment to post when marking an issue as stale. Set to `false` to disable
|
||||
markComment: >
|
||||
This issue has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you
|
||||
for your contributions.
|
||||
# Comment to post when closing a stale issue. Set to `false` to disable
|
||||
closeComment: >
|
||||
This issue is being automatically closed due to inactivity.
|
||||
However, you may choose to reopen this issue.
|
||||
51
.github/workflows/bump_deps.yaml
vendored
Normal file
51
.github/workflows/bump_deps.yaml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
name: Bump dependencies
|
||||
on:
|
||||
schedule:
|
||||
- cron: 0 20 * * *
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
bump:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- repository: "go-skynet/go-llama.cpp"
|
||||
variable: "GOLLAMA_VERSION"
|
||||
branch: "master"
|
||||
- repository: "go-skynet/go-ggml-transformers.cpp"
|
||||
variable: "GOGGMLTRANSFORMERS_VERSION"
|
||||
branch: "master"
|
||||
- repository: "donomii/go-rwkv.cpp"
|
||||
variable: "RWKV_VERSION"
|
||||
branch: "main"
|
||||
- repository: "ggerganov/whisper.cpp"
|
||||
variable: "WHISPER_CPP_VERSION"
|
||||
branch: "master"
|
||||
- repository: "go-skynet/go-bert.cpp"
|
||||
variable: "BERT_VERSION"
|
||||
branch: "master"
|
||||
- repository: "go-skynet/bloomz.cpp"
|
||||
variable: "BLOOMZ_VERSION"
|
||||
branch: "main"
|
||||
- repository: "nomic-ai/gpt4all"
|
||||
variable: "GPT4ALL_VERSION"
|
||||
branch: "main"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Bump dependencies 🔧
|
||||
run: |
|
||||
bash .github/bump_deps.sh ${{ matrix.repository }} ${{ matrix.branch }} ${{ matrix.variable }}
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v5
|
||||
with:
|
||||
token: ${{ secrets.UPDATE_BOT_TOKEN }}
|
||||
push-to-fork: ci-forks/LocalAI
|
||||
commit-message: ':arrow_up: Update ${{ matrix.repository }}'
|
||||
title: ':arrow_up: Update ${{ matrix.repository }}'
|
||||
branch: "update/${{ matrix.variable }}"
|
||||
body: Bump of ${{ matrix.repository }} version
|
||||
signoff: true
|
||||
|
||||
|
||||
|
||||
107
.github/workflows/image.yml
vendored
107
.github/workflows/image.yml
vendored
@@ -9,36 +9,71 @@ on:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
concurrency:
|
||||
group: ci-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- build-type: ''
|
||||
platforms: 'linux/amd64,linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: ''
|
||||
ffmpeg: ''
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: 11
|
||||
cuda-minor-version: 7
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-cublas-cuda11'
|
||||
ffmpeg: ''
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: 12
|
||||
cuda-minor-version: 1
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-cublas-cuda12'
|
||||
ffmpeg: ''
|
||||
- build-type: ''
|
||||
platforms: 'linux/amd64,linux/arm64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-ffmpeg'
|
||||
ffmpeg: 'true'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: 11
|
||||
cuda-minor-version: 7
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-cublas-cuda11-ffmpeg'
|
||||
ffmpeg: 'true'
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: 12
|
||||
cuda-minor-version: 1
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-cublas-cuda12-ffmpeg'
|
||||
ffmpeg: 'true'
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
DOCKER_IMAGE=quay.io/go-skynet/local-ai
|
||||
VERSION=master
|
||||
SHORTREF=${GITHUB_SHA::8}
|
||||
|
||||
# If this is git tag, use the tag name as a docker tag
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
fi
|
||||
TAGS="${DOCKER_IMAGE}:${VERSION},${DOCKER_IMAGE}:${SHORTREF}"
|
||||
|
||||
# If the VERSION looks like a version number, assume that
|
||||
# this is the most recent version of the image and also
|
||||
# tag it 'latest'.
|
||||
if [[ $VERSION =~ ^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
|
||||
TAGS="$TAGS,${DOCKER_IMAGE}:latest"
|
||||
fi
|
||||
|
||||
# Set output parameters.
|
||||
echo ::set-output name=tags::${TAGS}
|
||||
echo ::set-output name=docker_image::${DOCKER_IMAGE}
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: quay.io/go-skynet/local-ai
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{raw}}
|
||||
type=sha
|
||||
flavor: |
|
||||
latest=${{ matrix.tag-latest }}
|
||||
suffix=${{ matrix.tag-suffix }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@master
|
||||
@@ -56,23 +91,19 @@ jobs:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
|
||||
password: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
|
||||
- name: Build
|
||||
if: github.event_name != 'pull_request'
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
builder: ${{ steps.buildx.outputs.name }}
|
||||
build-args: |
|
||||
BUILD_TYPE=${{ matrix.build-type }}
|
||||
CUDA_MAJOR_VERSION=${{ matrix.cuda-major-version }}
|
||||
CUDA_MINOR_VERSION=${{ matrix.cuda-minor-version }}
|
||||
FFMPEG=${{ matrix.ffmpeg }}
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: ${{ steps.prep.outputs.tags }}
|
||||
- name: Build PRs
|
||||
if: github.event_name == 'pull_request'
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
builder: ${{ steps.buildx.outputs.name }}
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
platforms: linux/amd64
|
||||
push: false
|
||||
tags: ${{ steps.prep.outputs.tags }}
|
||||
platforms: ${{ matrix.platforms }}
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
|
||||
79
.github/workflows/release.yaml
vendored
Normal file
79
.github/workflows/release.yaml
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
name: Build and Release
|
||||
|
||||
on: push
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
build-linux:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- build: 'avx2'
|
||||
defines: ''
|
||||
- build: 'avx'
|
||||
defines: '-DLLAMA_AVX2=OFF'
|
||||
- build: 'avx512'
|
||||
defines: '-DLLAMA_AVX512=ON'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: true
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential ffmpeg
|
||||
- name: Build
|
||||
id: build
|
||||
env:
|
||||
CMAKE_ARGS: "${{ matrix.defines }}"
|
||||
BUILD_ID: "${{ matrix.build }}"
|
||||
run: |
|
||||
STATIC=true make dist
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.build }}
|
||||
path: release/
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
files: |
|
||||
release/*
|
||||
|
||||
build-macOS:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- build: 'avx2'
|
||||
defines: ''
|
||||
- build: 'avx'
|
||||
defines: '-DLLAMA_AVX2=OFF'
|
||||
- build: 'avx512'
|
||||
defines: '-DLLAMA_AVX512=ON'
|
||||
runs-on: macOS-latest
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: true
|
||||
- name: Build
|
||||
id: build
|
||||
env:
|
||||
CMAKE_ARGS: "${{ matrix.defines }}"
|
||||
BUILD_ID: "${{ matrix.build }}"
|
||||
run: |
|
||||
make dist
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.build }}
|
||||
path: release/
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
files: |
|
||||
release/*
|
||||
26
.github/workflows/release.yml.disabled
vendored
26
.github/workflows/release.yml.disabled
vendored
@@ -1,26 +0,0 @@
|
||||
name: goreleaser
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.18
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v4
|
||||
with:
|
||||
version: latest
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
12
.github/workflows/test.yml
vendored
12
.github/workflows/test.yml
vendored
@@ -9,6 +9,10 @@ on:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
concurrency:
|
||||
group: ci-tests-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
ubuntu-latest:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -21,7 +25,7 @@ jobs:
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential
|
||||
sudo apt-get install build-essential ffmpeg
|
||||
- name: Test
|
||||
run: |
|
||||
make test
|
||||
@@ -35,10 +39,6 @@ jobs:
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- name: Dependencies
|
||||
run: |
|
||||
brew update
|
||||
brew install sdl2
|
||||
- name: Test
|
||||
run: |
|
||||
make test
|
||||
CMAKE_ARGS="-DLLAMA_F16C=OFF -DLLAMA_AVX512=OFF -DLLAMA_AVX2=OFF -DLLAMA_FMA=OFF" make test
|
||||
22
.gitignore
vendored
22
.gitignore
vendored
@@ -1,7 +1,13 @@
|
||||
# go-llama build artifacts
|
||||
go-llama
|
||||
go-gpt4all-j
|
||||
gpt4all
|
||||
go-stable-diffusion
|
||||
go-ggml-transformers
|
||||
go-gpt2
|
||||
go-rwkv
|
||||
whisper.cpp
|
||||
bloomz
|
||||
go-bert
|
||||
|
||||
# LocalAI build binary
|
||||
LocalAI
|
||||
@@ -11,4 +17,16 @@ local-ai
|
||||
|
||||
# Ignore models
|
||||
models/*
|
||||
test-models/
|
||||
test-models/
|
||||
test-dir/
|
||||
|
||||
release/
|
||||
|
||||
# just in case
|
||||
.DS_Store
|
||||
.idea
|
||||
|
||||
# Generated during build
|
||||
backend-assets/
|
||||
|
||||
/ggml-metal.metal
|
||||
@@ -1,15 +0,0 @@
|
||||
# Make sure to check the documentation at http://goreleaser.com
|
||||
project_name: local-ai
|
||||
builds:
|
||||
- ldflags:
|
||||
- -w -s
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
- darwin
|
||||
- windows
|
||||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
binary: '{{ .ProjectName }}'
|
||||
19
.vscode/launch.json
vendored
19
.vscode/launch.json
vendored
@@ -2,7 +2,20 @@
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Launch Go",
|
||||
"name": "Python: Current File",
|
||||
"type": "python",
|
||||
"request": "launch",
|
||||
"program": "${file}",
|
||||
"console": "integratedTerminal",
|
||||
"justMyCode": false,
|
||||
"cwd": "${workspaceFolder}/examples/langchain-chroma",
|
||||
"env": {
|
||||
"OPENAI_API_BASE": "http://localhost:8080/v1",
|
||||
"OPENAI_API_KEY": "abc"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Launch LocalAI API",
|
||||
"type": "go",
|
||||
"request": "launch",
|
||||
"mode": "debug",
|
||||
@@ -11,8 +24,8 @@
|
||||
"api"
|
||||
],
|
||||
"env": {
|
||||
"C_INCLUDE_PATH": "/workspace/go-llama:/workspace/go-gpt4all-j:/workspace/go-gpt2",
|
||||
"LIBRARY_PATH": "/workspace/go-llama:/workspace/go-gpt4all-j:/workspace/go-gpt2",
|
||||
"C_INCLUDE_PATH": "${workspaceFolder}/go-llama:${workspaceFolder}/go-stable-diffusion/:${workspaceFolder}/gpt4all/gpt4all-bindings/golang/:${workspaceFolder}/go-gpt2:${workspaceFolder}/go-rwkv:${workspaceFolder}/whisper.cpp:${workspaceFolder}/go-bert:${workspaceFolder}/bloomz",
|
||||
"LIBRARY_PATH": "${workspaceFolder}/go-llama:${workspaceFolder}/go-stable-diffusion/:${workspaceFolder}/gpt4all/gpt4all-bindings/golang/:${workspaceFolder}/go-gpt2:${workspaceFolder}/go-rwkv:${workspaceFolder}/whisper.cpp:${workspaceFolder}/go-bert:${workspaceFolder}/bloomz",
|
||||
"DEBUG": "true"
|
||||
}
|
||||
}
|
||||
|
||||
71
Dockerfile
71
Dockerfile
@@ -1,9 +1,70 @@
|
||||
ARG GO_VERSION=1.20
|
||||
ARG BUILD_TYPE=
|
||||
FROM golang:$GO_VERSION
|
||||
ARG GO_VERSION=1.20-bullseye
|
||||
|
||||
FROM golang:$GO_VERSION as requirements
|
||||
|
||||
ARG BUILD_TYPE
|
||||
ARG CUDA_MAJOR_VERSION=11
|
||||
ARG CUDA_MINOR_VERSION=7
|
||||
|
||||
ENV BUILD_TYPE=${BUILD_TYPE}
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y ca-certificates cmake curl patch
|
||||
|
||||
# CuBLAS requirements
|
||||
RUN if [ "${BUILD_TYPE}" = "cublas" ]; then \
|
||||
apt-get install -y software-properties-common && \
|
||||
apt-add-repository contrib && \
|
||||
curl -O https://developer.download.nvidia.com/compute/cuda/repos/debian11/x86_64/cuda-keyring_1.0-1_all.deb && \
|
||||
dpkg -i cuda-keyring_1.0-1_all.deb && \
|
||||
rm -f cuda-keyring_1.0-1_all.deb && \
|
||||
apt-get update && \
|
||||
apt-get install -y cuda-nvcc-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
|
||||
; fi
|
||||
ENV PATH /usr/local/cuda/bin:${PATH}
|
||||
|
||||
# OpenBLAS requirements
|
||||
RUN apt-get install -y libopenblas-dev
|
||||
|
||||
# Stable Diffusion requirements
|
||||
RUN apt-get install -y libopencv-dev && \
|
||||
ln -s /usr/include/opencv4/opencv2 /usr/include/opencv2
|
||||
|
||||
FROM requirements as builder
|
||||
|
||||
ARG GO_TAGS=stablediffusion
|
||||
|
||||
ENV GO_TAGS=${GO_TAGS}
|
||||
ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
|
||||
ENV NVIDIA_REQUIRE_CUDA="cuda>=${CUDA_MAJOR_VERSION}.0"
|
||||
ENV NVIDIA_VISIBLE_DEVICES=all
|
||||
|
||||
WORKDIR /build
|
||||
RUN apt-get update && apt-get install -y cmake
|
||||
|
||||
COPY . .
|
||||
RUN make build
|
||||
|
||||
FROM requirements
|
||||
|
||||
ARG FFMPEG
|
||||
|
||||
ENV REBUILD=true
|
||||
ENV HEALTHCHECK_ENDPOINT=http://localhost:8080/readyz
|
||||
|
||||
# Add FFmpeg
|
||||
RUN if [ "${FFMPEG}" = "true" ]; then \
|
||||
apt-get install -y ffmpeg \
|
||||
; fi
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
COPY . .
|
||||
RUN make prepare-sources
|
||||
COPY --from=builder /build/local-ai ./
|
||||
|
||||
# Define the health check command
|
||||
HEALTHCHECK --interval=1m --timeout=10m --retries=10 \
|
||||
CMD curl -f $HEALTHCHECK_ENDPOINT || exit 1
|
||||
|
||||
EXPOSE 8080
|
||||
ENTRYPOINT [ "/build/entrypoint.sh" ]
|
||||
ENTRYPOINT [ "/build/entrypoint.sh" ]
|
||||
@@ -1,14 +0,0 @@
|
||||
ARG GO_VERSION=1.20
|
||||
ARG DEBIAN_VERSION=11
|
||||
ARG BUILD_TYPE=
|
||||
|
||||
FROM golang:$GO_VERSION as builder
|
||||
WORKDIR /build
|
||||
RUN apt-get update && apt-get install -y cmake
|
||||
COPY . .
|
||||
RUN make build
|
||||
|
||||
FROM debian:$DEBIAN_VERSION
|
||||
COPY --from=builder /build/local-ai /usr/bin/local-ai
|
||||
EXPOSE 8080
|
||||
ENTRYPOINT [ "/usr/bin/local-ai" ]
|
||||
2
LICENSE
2
LICENSE
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2023 go-skynet authors
|
||||
Copyright (c) 2023 Ettore Di Giacinto
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
236
Makefile
236
Makefile
@@ -2,134 +2,256 @@ GOCMD=go
|
||||
GOTEST=$(GOCMD) test
|
||||
GOVET=$(GOCMD) vet
|
||||
BINARY_NAME=local-ai
|
||||
# renovate: datasource=github-tags depName=go-skynet/go-llama.cpp
|
||||
GOLLAMA_VERSION?=llama.cpp-f4cef87
|
||||
# renovate: datasource=git-refs packageNameTemplate=https://github.com/go-skynet/go-gpt4all-j.cpp currentValueTemplate=master depNameTemplate=go-gpt4all-j.cpp
|
||||
GOGPT4ALLJ_VERSION?=1f7bff57f66cb7062e40d0ac3abd2217815e5109
|
||||
# renovate: datasource=git-refs packageNameTemplate=https://github.com/go-skynet/go-gpt2.cpp currentValueTemplate=master depNameTemplate=go-gpt2.cpp
|
||||
GOGPT2_VERSION?=245a5bfe6708ab80dc5c733dcdbfbe3cfd2acdaa
|
||||
|
||||
GOLLAMA_VERSION?=7ad833b67070fd3ec46d838f5e38d21111013f98
|
||||
GPT4ALL_REPO?=https://github.com/nomic-ai/gpt4all
|
||||
GPT4ALL_VERSION?=2b6cc99a31a124f1f27f2dc6515b94b84d35b254
|
||||
GOGGMLTRANSFORMERS_VERSION?=661669258dd0a752f3f3607358b168bc1d928135
|
||||
RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp
|
||||
RWKV_VERSION?=af62fcc432be2847acb6e0688b2c2491d6588d58
|
||||
RWKV_VERSION?=930a774fa0152426ed2279cb1005b3490bb0eba6
|
||||
WHISPER_CPP_VERSION?=57543c169e27312e7546d07ed0d8c6eb806ebc36
|
||||
BERT_VERSION?=6069103f54b9969c02e789d0fb12a23bd614285f
|
||||
BLOOMZ_VERSION?=1834e77b83faafe912ad4092ccf7f77937349e2f
|
||||
export BUILD_TYPE?=
|
||||
CGO_LDFLAGS?=
|
||||
CUDA_LIBPATH?=/usr/local/cuda/lib64/
|
||||
STABLEDIFFUSION_VERSION?=d89260f598afb809279bc72aa0107b4292587632
|
||||
GO_TAGS?=
|
||||
BUILD_ID?=git
|
||||
LD_FLAGS=?=
|
||||
OPTIONAL_TARGETS?=
|
||||
|
||||
OS := $(shell uname -s)
|
||||
ARCH := $(shell uname -m)
|
||||
GREEN := $(shell tput -Txterm setaf 2)
|
||||
YELLOW := $(shell tput -Txterm setaf 3)
|
||||
WHITE := $(shell tput -Txterm setaf 7)
|
||||
CYAN := $(shell tput -Txterm setaf 6)
|
||||
RESET := $(shell tput -Txterm sgr0)
|
||||
|
||||
C_INCLUDE_PATH=$(shell pwd)/go-llama:$(shell pwd)/go-gpt4all-j:$(shell pwd)/go-gpt2:$(shell pwd)/go-rwkv
|
||||
LIBRARY_PATH=$(shell pwd)/go-llama:$(shell pwd)/go-gpt4all-j:$(shell pwd)/go-gpt2:$(shell pwd)/go-rwkv
|
||||
C_INCLUDE_PATH=$(shell pwd)/go-llama:$(shell pwd)/go-stable-diffusion/:$(shell pwd)/gpt4all/gpt4all-bindings/golang/:$(shell pwd)/go-ggml-transformers:$(shell pwd)/go-rwkv:$(shell pwd)/whisper.cpp:$(shell pwd)/go-bert:$(shell pwd)/bloomz
|
||||
LIBRARY_PATH=$(shell pwd)/go-llama:$(shell pwd)/go-stable-diffusion/:$(shell pwd)/gpt4all/gpt4all-bindings/golang/:$(shell pwd)/go-ggml-transformers:$(shell pwd)/go-rwkv:$(shell pwd)/whisper.cpp:$(shell pwd)/go-bert:$(shell pwd)/bloomz
|
||||
|
||||
# Use this if you want to set the default behavior
|
||||
ifndef BUILD_TYPE
|
||||
BUILD_TYPE:=default
|
||||
ifeq ($(BUILD_TYPE),openblas)
|
||||
CGO_LDFLAGS+=-lopenblas
|
||||
endif
|
||||
|
||||
ifeq ($(BUILD_TYPE), "generic")
|
||||
GENERIC_PREFIX:=generic-
|
||||
else
|
||||
GENERIC_PREFIX:=
|
||||
ifeq ($(BUILD_TYPE),cublas)
|
||||
CGO_LDFLAGS+=-lcublas -lcudart -L$(CUDA_LIBPATH)
|
||||
export LLAMA_CUBLAS=1
|
||||
endif
|
||||
|
||||
ifeq ($(BUILD_TYPE),metal)
|
||||
CGO_LDFLAGS+=-framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
|
||||
export LLAMA_METAL=1
|
||||
endif
|
||||
|
||||
ifeq ($(BUILD_TYPE),clblas)
|
||||
CGO_LDFLAGS+=-lOpenCL -lclblast
|
||||
endif
|
||||
|
||||
# glibc-static or glibc-devel-static required
|
||||
ifeq ($(STATIC),true)
|
||||
LD_FLAGS=-linkmode external -extldflags -static
|
||||
endif
|
||||
|
||||
ifeq ($(GO_TAGS),stablediffusion)
|
||||
OPTIONAL_TARGETS+=go-stable-diffusion/libstablediffusion.a
|
||||
endif
|
||||
|
||||
.PHONY: all test build vendor
|
||||
|
||||
all: help
|
||||
|
||||
## GPT4ALL-J
|
||||
go-gpt4all-j:
|
||||
git clone --recurse-submodules https://github.com/go-skynet/go-gpt4all-j.cpp go-gpt4all-j
|
||||
cd go-gpt4all-j && git checkout -b build $(GOGPT4ALLJ_VERSION) && git submodule update --init --recursive --depth 1
|
||||
## GPT4ALL
|
||||
gpt4all:
|
||||
git clone --recurse-submodules $(GPT4ALL_REPO) gpt4all
|
||||
cd gpt4all && git checkout -b build $(GPT4ALL_VERSION) && git submodule update --init --recursive --depth 1
|
||||
# This is hackish, but needed as both go-llama and go-gpt4allj have their own version of ggml..
|
||||
@find ./go-gpt4all-j -type f -name "*.c" -exec sed -i'' -e 's/ggml_/ggml_gptj_/g' {} +
|
||||
@find ./go-gpt4all-j -type f -name "*.cpp" -exec sed -i'' -e 's/ggml_/ggml_gptj_/g' {} +
|
||||
@find ./go-gpt4all-j -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_gptj_/g' {} +
|
||||
@find ./go-gpt4all-j -type f -name "*.cpp" -exec sed -i'' -e 's/gpt_/gptj_/g' {} +
|
||||
@find ./go-gpt4all-j -type f -name "*.h" -exec sed -i'' -e 's/gpt_/gptj_/g' {} +
|
||||
@find ./go-gpt4all-j -type f -name "*.cpp" -exec sed -i'' -e 's/json_/json_gptj_/g' {} +
|
||||
@find ./go-gpt4all-j -type f -name "*.cpp" -exec sed -i'' -e 's/void replace/void json_gptj_replace/g' {} +
|
||||
@find ./go-gpt4all-j -type f -name "*.cpp" -exec sed -i'' -e 's/::replace/::json_gptj_replace/g' {} +
|
||||
@find ./gpt4all -type f -name "*.c" -exec sed -i'' -e 's/ggml_/ggml_gpt4all_/g' {} +
|
||||
@find ./gpt4all -type f -name "*.cpp" -exec sed -i'' -e 's/ggml_/ggml_gpt4all_/g' {} +
|
||||
@find ./gpt4all -type f -name "*.m" -exec sed -i'' -e 's/ggml_/ggml_gpt4all_/g' {} +
|
||||
@find ./gpt4all -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_gpt4all_/g' {} +
|
||||
@find ./gpt4all -type f -name "*.c" -exec sed -i'' -e 's/llama_/llama_gpt4all_/g' {} +
|
||||
@find ./gpt4all -type f -name "*.cpp" -exec sed -i'' -e 's/llama_/llama_gpt4all_/g' {} +
|
||||
@find ./gpt4all -type f -name "*.h" -exec sed -i'' -e 's/llama_/llama_gpt4all_/g' {} +
|
||||
@find ./gpt4all/gpt4all-backend -type f -name "llama_util.h" -execdir mv {} "llama_gpt4all_util.h" \;
|
||||
@find ./gpt4all -type f -name "*.cmake" -exec sed -i'' -e 's/llama_util/llama_gpt4all_util/g' {} +
|
||||
@find ./gpt4all -type f -name "*.txt" -exec sed -i'' -e 's/llama_util/llama_gpt4all_util/g' {} +
|
||||
@find ./gpt4all/gpt4all-bindings/golang -type f -name "*.cpp" -exec sed -i'' -e 's/load_model/load_gpt4all_model/g' {} +
|
||||
@find ./gpt4all/gpt4all-bindings/golang -type f -name "*.go" -exec sed -i'' -e 's/load_model/load_gpt4all_model/g' {} +
|
||||
@find ./gpt4all/gpt4all-bindings/golang -type f -name "*.h" -exec sed -i'' -e 's/load_model/load_gpt4all_model/g' {} +
|
||||
|
||||
|
||||
## BERT embeddings
|
||||
go-bert:
|
||||
git clone --recurse-submodules https://github.com/go-skynet/go-bert.cpp go-bert
|
||||
cd go-bert && git checkout -b build $(BERT_VERSION) && git submodule update --init --recursive --depth 1
|
||||
@find ./go-bert -type f -name "*.c" -exec sed -i'' -e 's/ggml_/ggml_bert_/g' {} +
|
||||
@find ./go-bert -type f -name "*.cpp" -exec sed -i'' -e 's/ggml_/ggml_bert_/g' {} +
|
||||
@find ./go-bert -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_bert_/g' {} +
|
||||
|
||||
## stable diffusion
|
||||
go-stable-diffusion:
|
||||
git clone --recurse-submodules https://github.com/mudler/go-stable-diffusion go-stable-diffusion
|
||||
cd go-stable-diffusion && git checkout -b build $(STABLEDIFFUSION_VERSION) && git submodule update --init --recursive --depth 1
|
||||
|
||||
go-stable-diffusion/libstablediffusion.a:
|
||||
$(MAKE) -C go-stable-diffusion libstablediffusion.a
|
||||
|
||||
## RWKV
|
||||
go-rwkv:
|
||||
git clone --recurse-submodules $(RWKV_REPO) go-rwkv
|
||||
cd go-rwkv && git checkout -b build $(RWKV_VERSION) && git submodule update --init --recursive --depth 1
|
||||
@find ./go-rwkv -type f -name "*.c" -exec sed -i'' -e 's/ggml_/ggml_rwkv_/g' {} +
|
||||
@find ./go-rwkv -type f -name "*.cpp" -exec sed -i'' -e 's/ggml_/ggml_rwkv_/g' {} +
|
||||
@find ./go-rwkv -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_rwkv_/g' {} +
|
||||
|
||||
go-rwkv/librwkv.a: go-rwkv
|
||||
cd go-rwkv && cd rwkv.cpp && cmake . -DRWKV_BUILD_SHARED_LIBRARY=OFF && cmake --build . && cp librwkv.a .. && cp ggml/src/libggml.a ..
|
||||
cd go-rwkv && cd rwkv.cpp && cmake . -DRWKV_BUILD_SHARED_LIBRARY=OFF && cmake --build . && cp librwkv.a ..
|
||||
|
||||
go-gpt4all-j/libgptj.a: go-gpt4all-j
|
||||
$(MAKE) -C go-gpt4all-j $(GENERIC_PREFIX)libgptj.a
|
||||
## bloomz
|
||||
bloomz:
|
||||
git clone --recurse-submodules https://github.com/go-skynet/bloomz.cpp bloomz
|
||||
@find ./bloomz -type f -name "*.c" -exec sed -i'' -e 's/ggml_/ggml_bloomz_/g' {} +
|
||||
@find ./bloomz -type f -name "*.cpp" -exec sed -i'' -e 's/ggml_/ggml_bloomz_/g' {} +
|
||||
@find ./bloomz -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_bloomz_/g' {} +
|
||||
@find ./bloomz -type f -name "*.cpp" -exec sed -i'' -e 's/gpt_/gpt_bloomz_/g' {} +
|
||||
@find ./bloomz -type f -name "*.h" -exec sed -i'' -e 's/gpt_/gpt_bloomz_/g' {} +
|
||||
@find ./bloomz -type f -name "*.cpp" -exec sed -i'' -e 's/void replace/void json_bloomz_replace/g' {} +
|
||||
@find ./bloomz -type f -name "*.cpp" -exec sed -i'' -e 's/::replace/::json_bloomz_replace/g' {} +
|
||||
|
||||
bloomz/libbloomz.a: bloomz
|
||||
cd bloomz && make libbloomz.a
|
||||
|
||||
go-bert/libgobert.a: go-bert
|
||||
$(MAKE) -C go-bert libgobert.a
|
||||
|
||||
backend-assets/gpt4all: gpt4all/gpt4all-bindings/golang/libgpt4all.a
|
||||
mkdir -p backend-assets/gpt4all
|
||||
@cp gpt4all/gpt4all-bindings/golang/buildllm/*.so backend-assets/gpt4all/ || true
|
||||
@cp gpt4all/gpt4all-bindings/golang/buildllm/*.dylib backend-assets/gpt4all/ || true
|
||||
@cp gpt4all/gpt4all-bindings/golang/buildllm/*.dll backend-assets/gpt4all/ || true
|
||||
|
||||
gpt4all/gpt4all-bindings/golang/libgpt4all.a: gpt4all
|
||||
$(MAKE) -C gpt4all/gpt4all-bindings/golang/ libgpt4all.a
|
||||
|
||||
## CEREBRAS GPT
|
||||
go-gpt2:
|
||||
git clone --recurse-submodules https://github.com/go-skynet/go-gpt2.cpp go-gpt2
|
||||
cd go-gpt2 && git checkout -b build $(GOGPT2_VERSION) && git submodule update --init --recursive --depth 1
|
||||
go-ggml-transformers:
|
||||
git clone --recurse-submodules https://github.com/go-skynet/go-ggml-transformers.cpp go-ggml-transformers
|
||||
cd go-ggml-transformers && git checkout -b build $(GOGPT2_VERSION) && git submodule update --init --recursive --depth 1
|
||||
# This is hackish, but needed as both go-llama and go-gpt4allj have their own version of ggml..
|
||||
@find ./go-gpt2 -type f -name "*.c" -exec sed -i'' -e 's/ggml_/ggml_gpt2_/g' {} +
|
||||
@find ./go-gpt2 -type f -name "*.cpp" -exec sed -i'' -e 's/ggml_/ggml_gpt2_/g' {} +
|
||||
@find ./go-gpt2 -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_gpt2_/g' {} +
|
||||
@find ./go-gpt2 -type f -name "*.cpp" -exec sed -i'' -e 's/gpt_/gpt2_/g' {} +
|
||||
@find ./go-gpt2 -type f -name "*.h" -exec sed -i'' -e 's/gpt_/gpt2_/g' {} +
|
||||
@find ./go-gpt2 -type f -name "*.cpp" -exec sed -i'' -e 's/json_/json_gpt2_/g' {} +
|
||||
@find ./go-ggml-transformers -type f -name "*.c" -exec sed -i'' -e 's/ggml_/ggml_gpt2_/g' {} +
|
||||
@find ./go-ggml-transformers -type f -name "*.cpp" -exec sed -i'' -e 's/ggml_/ggml_gpt2_/g' {} +
|
||||
@find ./go-ggml-transformers -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_gpt2_/g' {} +
|
||||
@find ./go-ggml-transformers -type f -name "*.cpp" -exec sed -i'' -e 's/gpt_print_usage/gpt2_print_usage/g' {} +
|
||||
@find ./go-ggml-transformers -type f -name "*.h" -exec sed -i'' -e 's/gpt_print_usage/gpt2_print_usage/g' {} +
|
||||
@find ./go-ggml-transformers -type f -name "*.cpp" -exec sed -i'' -e 's/gpt_params_parse/gpt2_params_parse/g' {} +
|
||||
@find ./go-ggml-transformers -type f -name "*.h" -exec sed -i'' -e 's/gpt_params_parse/gpt2_params_parse/g' {} +
|
||||
@find ./go-ggml-transformers -type f -name "*.cpp" -exec sed -i'' -e 's/gpt_random_prompt/gpt2_random_prompt/g' {} +
|
||||
@find ./go-ggml-transformers -type f -name "*.h" -exec sed -i'' -e 's/gpt_random_prompt/gpt2_random_prompt/g' {} +
|
||||
@find ./go-ggml-transformers -type f -name "*.cpp" -exec sed -i'' -e 's/json_/json_gpt2_/g' {} +
|
||||
|
||||
go-gpt2/libgpt2.a: go-gpt2
|
||||
$(MAKE) -C go-gpt2 $(GENERIC_PREFIX)libgpt2.a
|
||||
go-ggml-transformers/libtransformers.a: go-ggml-transformers
|
||||
$(MAKE) -C go-ggml-transformers libtransformers.a
|
||||
|
||||
whisper.cpp:
|
||||
git clone https://github.com/ggerganov/whisper.cpp.git
|
||||
cd whisper.cpp && git checkout -b build $(WHISPER_CPP_VERSION) && git submodule update --init --recursive --depth 1
|
||||
@find ./whisper.cpp -type f -name "*.c" -exec sed -i'' -e 's/ggml_/ggml_whisper_/g' {} +
|
||||
@find ./whisper.cpp -type f -name "*.cpp" -exec sed -i'' -e 's/ggml_/ggml_whisper_/g' {} +
|
||||
@find ./whisper.cpp -type f -name "*.h" -exec sed -i'' -e 's/ggml_/ggml_whisper_/g' {} +
|
||||
|
||||
whisper.cpp/libwhisper.a: whisper.cpp
|
||||
cd whisper.cpp && make libwhisper.a
|
||||
|
||||
go-llama:
|
||||
git clone -b $(GOLLAMA_VERSION) --recurse-submodules https://github.com/go-skynet/go-llama.cpp go-llama
|
||||
git clone --recurse-submodules https://github.com/go-skynet/go-llama.cpp go-llama
|
||||
cd go-llama && git checkout -b build $(GOLLAMA_VERSION) && git submodule update --init --recursive --depth 1
|
||||
|
||||
go-llama/libbinding.a: go-llama
|
||||
$(MAKE) -C go-llama $(GENERIC_PREFIX)libbinding.a
|
||||
$(MAKE) -C go-llama BUILD_TYPE=$(BUILD_TYPE) libbinding.a
|
||||
|
||||
replace:
|
||||
$(GOCMD) mod edit -replace github.com/go-skynet/go-llama.cpp=$(shell pwd)/go-llama
|
||||
$(GOCMD) mod edit -replace github.com/go-skynet/go-gpt4all-j.cpp=$(shell pwd)/go-gpt4all-j
|
||||
$(GOCMD) mod edit -replace github.com/go-skynet/go-gpt2.cpp=$(shell pwd)/go-gpt2
|
||||
$(GOCMD) mod edit -replace github.com/nomic-ai/gpt4all/gpt4all-bindings/golang=$(shell pwd)/gpt4all/gpt4all-bindings/golang
|
||||
$(GOCMD) mod edit -replace github.com/go-skynet/go-ggml-transformers.cpp=$(shell pwd)/go-ggml-transformers
|
||||
$(GOCMD) mod edit -replace github.com/donomii/go-rwkv.cpp=$(shell pwd)/go-rwkv
|
||||
$(GOCMD) mod edit -replace github.com/ggerganov/whisper.cpp=$(shell pwd)/whisper.cpp
|
||||
$(GOCMD) mod edit -replace github.com/go-skynet/go-bert.cpp=$(shell pwd)/go-bert
|
||||
$(GOCMD) mod edit -replace github.com/go-skynet/bloomz.cpp=$(shell pwd)/bloomz
|
||||
$(GOCMD) mod edit -replace github.com/mudler/go-stable-diffusion=$(shell pwd)/go-stable-diffusion
|
||||
|
||||
prepare-sources: go-llama go-gpt2 go-gpt4all-j go-rwkv
|
||||
prepare-sources: go-llama go-ggml-transformers gpt4all go-rwkv whisper.cpp go-bert bloomz go-stable-diffusion replace
|
||||
$(GOCMD) mod download
|
||||
|
||||
## GENERIC
|
||||
rebuild: ## Rebuilds the project
|
||||
$(MAKE) -C go-llama clean
|
||||
$(MAKE) -C go-gpt4all-j clean
|
||||
$(MAKE) -C go-gpt2 clean
|
||||
$(MAKE) -C gpt4all/gpt4all-bindings/golang/ clean
|
||||
$(MAKE) -C go-ggml-transformers clean
|
||||
$(MAKE) -C go-rwkv clean
|
||||
$(MAKE) -C whisper.cpp clean
|
||||
$(MAKE) -C go-stable-diffusion clean
|
||||
$(MAKE) -C go-bert clean
|
||||
$(MAKE) -C bloomz clean
|
||||
$(MAKE) build
|
||||
|
||||
prepare: prepare-sources go-llama/libbinding.a go-gpt4all-j/libgptj.a go-gpt2/libgpt2.a go-rwkv/librwkv.a replace ## Prepares for building
|
||||
prepare: prepare-sources backend-assets/gpt4all $(OPTIONAL_TARGETS) go-llama/libbinding.a go-bert/libgobert.a go-ggml-transformers/libtransformers.a go-rwkv/librwkv.a whisper.cpp/libwhisper.a bloomz/libbloomz.a ## Prepares for building
|
||||
|
||||
clean: ## Remove build related file
|
||||
rm -fr ./go-llama
|
||||
rm -rf ./go-gpt4all-j
|
||||
rm -rf ./gpt4all
|
||||
rm -rf ./go-gpt2
|
||||
rm -rf ./go-stable-diffusion
|
||||
rm -rf ./go-ggml-transformers
|
||||
rm -rf ./backend-assets
|
||||
rm -rf ./go-rwkv
|
||||
rm -rf ./go-bert
|
||||
rm -rf ./bloomz
|
||||
rm -rf ./whisper.cpp
|
||||
rm -rf $(BINARY_NAME)
|
||||
rm -rf release/
|
||||
|
||||
## Build:
|
||||
|
||||
build: prepare ## Build the project
|
||||
$(info ${GREEN}I local-ai build info:${RESET})
|
||||
$(info ${GREEN}I BUILD_TYPE: ${YELLOW}$(BUILD_TYPE)${RESET})
|
||||
C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} $(GOCMD) build -o $(BINARY_NAME) ./
|
||||
$(info ${GREEN}I GO_TAGS: ${YELLOW}$(GO_TAGS)${RESET})
|
||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o $(BINARY_NAME) ./
|
||||
ifeq ($(BUILD_TYPE),metal)
|
||||
cp go-llama/build/bin/ggml-metal.metal .
|
||||
endif
|
||||
|
||||
dist: build
|
||||
mkdir -p release
|
||||
cp $(BINARY_NAME) release/$(BINARY_NAME)-$(BUILD_ID)-$(OS)-$(ARCH)
|
||||
|
||||
generic-build: ## Build the project using generic
|
||||
BUILD_TYPE="generic" $(MAKE) build
|
||||
|
||||
## Run
|
||||
run: prepare ## run local-ai
|
||||
C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} $(GOCMD) run ./main.go
|
||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} $(GOCMD) run ./
|
||||
|
||||
test-models/testmodel:
|
||||
mkdir test-models
|
||||
wget https://huggingface.co/concedo/cerebras-111M-ggml/resolve/main/cerberas-111m-q4_0.bin -O test-models/testmodel
|
||||
cp tests/fixtures/* test-models
|
||||
mkdir test-dir
|
||||
wget https://huggingface.co/nnakasato/ggml-model-test/resolve/main/ggml-model-q4.bin -O test-models/testmodel
|
||||
wget https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-base.en.bin -O test-models/whisper-en
|
||||
wget https://huggingface.co/skeskinen/ggml/resolve/main/all-MiniLM-L6-v2/ggml-model-q4_0.bin -O test-models/bert
|
||||
wget https://cdn.openai.com/whisper/draft-20220913a/micro-machines.wav -O test-dir/audio.wav
|
||||
wget https://huggingface.co/mudler/rwkv-4-raven-1.5B-ggml/resolve/main/RWKV-4-Raven-1B5-v11-Eng99%2525-Other1%2525-20230425-ctx4096_Q4_0.bin -O test-models/rwkv
|
||||
wget https://raw.githubusercontent.com/saharNooby/rwkv.cpp/5eb8f09c146ea8124633ab041d9ea0b1f1db4459/rwkv/20B_tokenizer.json -O test-models/rwkv.tokenizer.json
|
||||
cp tests/models_fixtures/* test-models
|
||||
|
||||
test: prepare test-models/testmodel
|
||||
cp tests/fixtures/* test-models
|
||||
@C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models $(GOCMD) test -v -timeout 30m ./...
|
||||
cp -r backend-assets api
|
||||
cp tests/models_fixtures/* test-models
|
||||
C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="!gpt4all && !llama" --flake-attempts 5 -v -r ./api ./pkg
|
||||
C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="gpt4all" --flake-attempts 5 -v -r ./api ./pkg
|
||||
C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --label-filter="llama" --flake-attempts 5 -v -r ./api ./pkg
|
||||
|
||||
## Help:
|
||||
help: ## Show this help.
|
||||
|
||||
516
README.md
516
README.md
@@ -9,67 +9,51 @@
|
||||
|
||||
[](https://discord.gg/uJAeKSAGDy)
|
||||
|
||||
**LocalAI** is a drop-in replacement REST API compatible with OpenAI for local CPU inferencing. It allows to run models locally or on-prem with consumer grade hardware. It is based on [llama.cpp](https://github.com/ggerganov/llama.cpp), [gpt4all](https://github.com/nomic-ai/gpt4all), [rwkv.cpp](https://github.com/saharNooby/rwkv.cpp) and [ggml](https://github.com/ggerganov/ggml), including support GPT4ALL-J which is licensed under Apache 2.0.
|
||||
**LocalAI** is a drop-in replacement REST API that’s compatible with OpenAI API specifications for local inferencing. It allows you to run LLMs (and not only) locally or on-prem with consumer grade hardware, supporting multiple model families that are compatible with the ggml format. Does not require GPU.
|
||||
|
||||
- OpenAI compatible API
|
||||
- Supports multiple-models
|
||||
For a list of the supported model families, please see [the model compatibility table](https://localai.io/model-compatibility/index.html#model-compatibility-table).
|
||||
|
||||
In a nutshell:
|
||||
|
||||
- Local, OpenAI drop-in alternative REST API. You own your data.
|
||||
- NO GPU required. NO Internet access is required either. Optional, GPU Acceleration is available in `llama.cpp`-compatible LLMs. [See building instructions](https://localai.io/basics/build/index.html).
|
||||
- Supports multiple models, Audio transcription, Text generation with GPTs, Image generation with stable diffusion (experimental)
|
||||
- Once loaded the first time, it keep models loaded in memory for faster inference
|
||||
- Support for prompt templates
|
||||
- Doesn't shell-out, but uses C bindings for a faster inference and better performance.
|
||||
- Doesn't shell-out, but uses C++ bindings for a faster inference and better performance.
|
||||
|
||||
LocalAI is a community-driven project, focused on making the AI accessible to anyone. Any contribution, feedback and PR is welcome! It was initially created by [mudler](https://github.com/mudler/) at the [SpectroCloud OSS Office](https://github.com/spectrocloud).
|
||||
LocalAI was created by [Ettore Di Giacinto](https://github.com/mudler/) and is a community-driven project, focused on making the AI accessible to anyone. Any contribution, feedback and PR is welcome!
|
||||
|
||||
### News
|
||||
| [ChatGPT OSS alternative](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui) | [Image generation](https://localai.io/api-endpoints/index.html#image-generation) |
|
||||
|------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------|
|
||||
|  |  |
|
||||
|
||||
- 02-05-2023: Support for `rwkv.cpp` models ( https://github.com/go-skynet/LocalAI/pull/158 ) and for `/edits` endpoint
|
||||
- 01-05-2023: Support for SSE stream of tokens in `llama.cpp` backends ( https://github.com/go-skynet/LocalAI/pull/152 )
|
||||
|
||||
### Socials and community chatter
|
||||
See the [Getting started](https://localai.io/basics/getting_started/index.html) and [examples](https://github.com/go-skynet/LocalAI/tree/master/examples/) sections to learn how to use LocalAI. For a list of curated models check out the [model gallery](https://localai.io/models/).
|
||||
|
||||
- Follow [@LocalAI_API](https://twitter.com/LocalAI_API) on twitter.
|
||||
## News
|
||||
|
||||
- [Reddit post](https://www.reddit.com/r/selfhosted/comments/12w4p2f/localai_openai_compatible_api_to_run_llm_models/) about LocalAI.
|
||||
- 🔥🔥🔥 06-06-2023: **v1.18.0**: Many updates, new features, and much more 🚀, check out the [Changelog](https://localai.io/basics/news/index.html#-06-06-2023-__v1180__-)!
|
||||
- 29-05-2023: LocalAI now has a website, [https://localai.io](https://localai.io)! check the news in the [dedicated section](https://localai.io/basics/news/index.html)!
|
||||
|
||||
For latest news, follow also on Twitter [@LocalAI_API](https://twitter.com/LocalAI_API) and [@mudler_it](https://twitter.com/mudler_it)
|
||||
|
||||
## Contribute and help
|
||||
|
||||
To help the project you can:
|
||||
|
||||
- Upvote the [Reddit post](https://www.reddit.com/r/selfhosted/comments/12w4p2f/localai_openai_compatible_api_to_run_llm_models/) about LocalAI.
|
||||
|
||||
- [Hacker news post](https://news.ycombinator.com/item?id=35726934) - help us out by voting if you like this project.
|
||||
|
||||
- [Tutorial to use k8sgpt with LocalAI](https://medium.com/@tyler_97636/k8sgpt-localai-unlock-kubernetes-superpowers-for-free-584790de9b65) - excellent usecase for localAI, using AI to analyse Kubernetes clusters.
|
||||
- If you have technological skills and want to contribute to development, have a look at the open issues. If you are new you can have a look at the [good-first-issue](https://github.com/go-skynet/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) and [help-wanted](https://github.com/go-skynet/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) labels.
|
||||
|
||||
## Model compatibility
|
||||
|
||||
It is compatible with the models supported by [llama.cpp](https://github.com/ggerganov/llama.cpp) supports also [GPT4ALL-J](https://github.com/nomic-ai/gpt4all) and [cerebras-GPT with ggml](https://huggingface.co/lxe/Cerebras-GPT-2.7B-Alpaca-SP-ggml).
|
||||
|
||||
Tested with:
|
||||
- Vicuna
|
||||
- Alpaca
|
||||
- [GPT4ALL](https://github.com/nomic-ai/gpt4all)
|
||||
- [GPT4ALL-J](https://gpt4all.io/models/ggml-gpt4all-j.bin)
|
||||
- Koala
|
||||
- [cerebras-GPT with ggml](https://huggingface.co/lxe/Cerebras-GPT-2.7B-Alpaca-SP-ggml)
|
||||
- [RWKV](https://github.com/BlinkDL/RWKV-LM) models with [rwkv.cpp](https://github.com/saharNooby/rwkv.cpp)
|
||||
|
||||
It should also be compatible with StableLM and GPTNeoX ggml models (untested)
|
||||
|
||||
Note: You might need to convert older models to the new format, see [here](https://github.com/ggerganov/llama.cpp#using-gpt4all) for instance to run `gpt4all`.
|
||||
|
||||
### RWKV
|
||||
|
||||
<details>
|
||||
|
||||
For `rwkv` models, you need to put also the associated tokenizer along with the ggml model:
|
||||
|
||||
```
|
||||
ls models
|
||||
36464540 -rw-r--r-- 1 mudler mudler 1.2G May 3 10:51 rwkv_small
|
||||
36464543 -rw-r--r-- 1 mudler mudler 2.4M May 3 10:51 rwkv_small.tokenizer.json
|
||||
```
|
||||
|
||||
</details>
|
||||
- If you don't have technological skills you can still help improving documentation or add examples or share your user-stories with our community, any help and contribution is welcome!
|
||||
|
||||
## Usage
|
||||
|
||||
> `LocalAI` comes by default as a container image. You can check out all the available images with corresponding tags [here](https://quay.io/repository/go-skynet/local-ai?tab=tags&tag=latest).
|
||||
Check out the [Getting started](https://localai.io/basics/getting_started/index.html) section. Here below you will find generic, quick instructions to get ready and use LocalAI.
|
||||
|
||||
The easiest way to run LocalAI is by using `docker-compose`:
|
||||
The easiest way to run LocalAI is by using `docker-compose` (to build locally, see [building LocalAI](https://localai.io/basics/build/index.html)):
|
||||
|
||||
```bash
|
||||
|
||||
@@ -87,7 +71,9 @@ cp your-model.bin models/
|
||||
# vim .env
|
||||
|
||||
# start with docker-compose
|
||||
docker-compose up -d --build
|
||||
docker-compose up -d --pull always
|
||||
# or you can build the images with:
|
||||
# docker-compose up -d --build
|
||||
|
||||
# Now API is accessible at localhost:8080
|
||||
curl http://localhost:8080/v1/models
|
||||
@@ -123,8 +109,9 @@ cp -rf prompt-templates/ggml-gpt4all-j.tmpl models/
|
||||
# vim .env
|
||||
|
||||
# start with docker-compose
|
||||
docker-compose up -d --build
|
||||
|
||||
docker-compose up -d --pull always
|
||||
# or you can build the images with:
|
||||
# docker-compose up -d --build
|
||||
# Now API is accessible at localhost:8080
|
||||
curl http://localhost:8080/v1/models
|
||||
# {"object":"list","data":[{"id":"ggml-gpt4all-j","object":"model"}]}
|
||||
@@ -139,188 +126,6 @@ curl http://localhost:8080/v1/chat/completions -H "Content-Type: application/jso
|
||||
```
|
||||
</details>
|
||||
|
||||
To build locally, run `make build` (see below).
|
||||
|
||||
### Other examples
|
||||
|
||||

|
||||
|
||||
To see other examples on how to integrate with other projects for instance chatbot-ui, see: [examples](https://github.com/go-skynet/LocalAI/tree/master/examples/).
|
||||
|
||||
|
||||
### Advanced configuration
|
||||
|
||||
LocalAI can be configured to serve user-defined models with a set of default parameters and templates.
|
||||
|
||||
<details>
|
||||
|
||||
You can create multiple `yaml` files in the models path or either specify a single YAML configuration file.
|
||||
Consider the following `models` folder in the `example/chatbot-ui`:
|
||||
|
||||
```
|
||||
base ❯ ls -liah examples/chatbot-ui/models
|
||||
36487587 drwxr-xr-x 2 mudler mudler 4.0K May 3 12:27 .
|
||||
36487586 drwxr-xr-x 3 mudler mudler 4.0K May 3 10:42 ..
|
||||
36465214 -rw-r--r-- 1 mudler mudler 10 Apr 27 07:46 completion.tmpl
|
||||
36464855 -rw-r--r-- 1 mudler mudler 3.6G Apr 27 00:08 ggml-gpt4all-j
|
||||
36464537 -rw-r--r-- 1 mudler mudler 245 May 3 10:42 gpt-3.5-turbo.yaml
|
||||
36467388 -rw-r--r-- 1 mudler mudler 180 Apr 27 07:46 gpt4all.tmpl
|
||||
```
|
||||
|
||||
In the `gpt-3.5-turbo.yaml` file it is defined the `gpt-3.5-turbo` model which is an alias to use `gpt4all-j` with pre-defined options.
|
||||
|
||||
For instance, consider the following that declares `gpt-3.5-turbo` backed by the `ggml-gpt4all-j` model:
|
||||
|
||||
```yaml
|
||||
name: gpt-3.5-turbo
|
||||
# Default model parameters
|
||||
parameters:
|
||||
# Relative to the models path
|
||||
model: ggml-gpt4all-j
|
||||
# temperature
|
||||
temperature: 0.3
|
||||
# all the OpenAI request options here..
|
||||
|
||||
# Default context size
|
||||
context_size: 512
|
||||
threads: 10
|
||||
# Define a backend (optional). By default it will try to guess the backend the first time the model is interacted with.
|
||||
backend: gptj # available: llama, stablelm, gpt2, gptj rwkv
|
||||
# stopwords (if supported by the backend)
|
||||
stopwords:
|
||||
- "HUMAN:"
|
||||
- "### Response:"
|
||||
# define chat roles
|
||||
roles:
|
||||
user: "HUMAN:"
|
||||
system: "GPT:"
|
||||
template:
|
||||
# template file ".tmpl" with the prompt template to use by default on the endpoint call. Note there is no extension in the files
|
||||
completion: completion
|
||||
chat: ggml-gpt4all-j
|
||||
```
|
||||
|
||||
Specifying a `config-file` via CLI allows to declare models in a single file as a list, for instance:
|
||||
|
||||
```yaml
|
||||
- name: list1
|
||||
parameters:
|
||||
model: testmodel
|
||||
context_size: 512
|
||||
threads: 10
|
||||
stopwords:
|
||||
- "HUMAN:"
|
||||
- "### Response:"
|
||||
roles:
|
||||
user: "HUMAN:"
|
||||
system: "GPT:"
|
||||
template:
|
||||
completion: completion
|
||||
chat: ggml-gpt4all-j
|
||||
- name: list2
|
||||
parameters:
|
||||
model: testmodel
|
||||
context_size: 512
|
||||
threads: 10
|
||||
stopwords:
|
||||
- "HUMAN:"
|
||||
- "### Response:"
|
||||
roles:
|
||||
user: "HUMAN:"
|
||||
system: "GPT:"
|
||||
template:
|
||||
completion: completion
|
||||
chat: ggml-gpt4all-j
|
||||
```
|
||||
|
||||
See also [chatbot-ui](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui) as an example on how to use config files.
|
||||
|
||||
</details>
|
||||
|
||||
### Prompt templates
|
||||
|
||||
The API doesn't inject a default prompt for talking to the model. You have to use a prompt similar to what's described in the standford-alpaca docs: https://github.com/tatsu-lab/stanford_alpaca#data-release.
|
||||
|
||||
<details>
|
||||
You can use a default template for every model present in your model path, by creating a corresponding file with the `.tmpl` suffix next to your model. For instance, if the model is called `foo.bin`, you can create a sibling file, `foo.bin.tmpl` which will be used as a default prompt and can be used with alpaca:
|
||||
|
||||
```
|
||||
The below instruction describes a task. Write a response that appropriately completes the request.
|
||||
|
||||
### Instruction:
|
||||
{{.Input}}
|
||||
|
||||
### Response:
|
||||
```
|
||||
|
||||
See the [prompt-templates](https://github.com/go-skynet/LocalAI/tree/master/prompt-templates) directory in this repository for templates for some of the most popular models.
|
||||
|
||||
|
||||
For the edit endpoint, an example template for alpaca-based models can be:
|
||||
|
||||
```yaml
|
||||
Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
|
||||
|
||||
### Instruction:
|
||||
{{.Instruction}}
|
||||
|
||||
### Input:
|
||||
{{.Input}}
|
||||
|
||||
### Response:
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### CLI
|
||||
|
||||
You can control LocalAI with command line arguments, to specify a binding address, or the number of threads.
|
||||
|
||||
<details>
|
||||
|
||||
Usage:
|
||||
|
||||
```
|
||||
local-ai --models-path <model_path> [--address <address>] [--threads <num_threads>]
|
||||
```
|
||||
|
||||
| Parameter | Environment Variable | Default Value | Description |
|
||||
| ------------ | -------------------- | ------------- | -------------------------------------- |
|
||||
| models-path | MODELS_PATH | | The path where you have models (ending with `.bin`). |
|
||||
| threads | THREADS | Number of Physical cores | The number of threads to use for text generation. |
|
||||
| address | ADDRESS | :8080 | The address and port to listen on. |
|
||||
| context-size | CONTEXT_SIZE | 512 | Default token context size. |
|
||||
| debug | DEBUG | false | Enable debug mode. |
|
||||
| config-file | CONFIG_FILE | empty | Path to a LocalAI config file. |
|
||||
|
||||
</details>
|
||||
|
||||
## Setup
|
||||
|
||||
Currently LocalAI comes as a container image and can be used with docker or a container engine of choice. You can check out all the available images with corresponding tags [here](https://quay.io/repository/go-skynet/local-ai?tab=tags&tag=latest).
|
||||
|
||||
### Docker
|
||||
|
||||
<details>
|
||||
Example of starting the API with `docker`:
|
||||
|
||||
```bash
|
||||
docker run -p 8080:8080 -ti --rm quay.io/go-skynet/local-ai:latest --models-path /path/to/models --context-size 700 --threads 4
|
||||
```
|
||||
|
||||
You should see:
|
||||
```
|
||||
┌───────────────────────────────────────────────────┐
|
||||
│ Fiber v2.42.0 │
|
||||
│ http://127.0.0.1:8080 │
|
||||
│ (bound on host 0.0.0.0 and port 8080) │
|
||||
│ │
|
||||
│ Handlers ............. 1 Processes ........... 1 │
|
||||
│ Prefork ....... Disabled PID ................. 1 │
|
||||
└───────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Build locally
|
||||
|
||||
@@ -330,8 +135,8 @@ In order to build the `LocalAI` container image locally you can use `docker`:
|
||||
|
||||
```
|
||||
# build the image
|
||||
docker build -t LocalAI .
|
||||
docker run LocalAI
|
||||
docker build -t localai .
|
||||
docker run localai
|
||||
```
|
||||
|
||||
Or you can build the binary with `make`:
|
||||
@@ -342,228 +147,19 @@ make build
|
||||
|
||||
</details>
|
||||
|
||||
### Build on mac
|
||||
|
||||
Building on Mac (M1 or M2) works, but you may need to install some prerequisites using `brew`.
|
||||
|
||||
<details>
|
||||
|
||||
The below has been tested by one mac user and found to work. Note that this doesn't use docker to run the server:
|
||||
|
||||
```
|
||||
# install build dependencies
|
||||
brew install cmake
|
||||
brew install go
|
||||
|
||||
# clone the repo
|
||||
git clone https://github.com/go-skynet/LocalAI.git
|
||||
|
||||
cd LocalAI
|
||||
|
||||
# build the binary
|
||||
make build
|
||||
|
||||
# Download gpt4all-j to models/
|
||||
wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j
|
||||
|
||||
# Use a template from the examples
|
||||
cp -rf prompt-templates/ggml-gpt4all-j.tmpl models/
|
||||
|
||||
# Run LocalAI
|
||||
./local-ai --models-path ./models/ --debug
|
||||
|
||||
# Now API is accessible at localhost:8080
|
||||
curl http://localhost:8080/v1/models
|
||||
|
||||
curl http://localhost:8080/v1/chat/completions -H "Content-Type: application/json" -d '{
|
||||
"model": "ggml-gpt4all-j",
|
||||
"messages": [{"role": "user", "content": "How are you?"}],
|
||||
"temperature": 0.9
|
||||
}'
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Windows compatibility
|
||||
|
||||
It should work, however you need to make sure you give enough resources to the container. See https://github.com/go-skynet/LocalAI/issues/2
|
||||
See the [build section](https://localai.io/basics/build/index.html) in our documentation for detailed instructions.
|
||||
|
||||
### Run LocalAI in Kubernetes
|
||||
|
||||
LocalAI can be installed inside Kubernetes with helm.
|
||||
LocalAI can be installed inside Kubernetes with helm. See [installation instructions](https://localai.io/basics/getting_started/index.html#run-localai-in-kubernetes).
|
||||
|
||||
<details>
|
||||
## Supported API endpoints
|
||||
|
||||
1. Add the helm repo
|
||||
```bash
|
||||
helm repo add go-skynet https://go-skynet.github.io/helm-charts/
|
||||
```
|
||||
1. Create a values files with your settings:
|
||||
```bash
|
||||
cat <<EOF > values.yaml
|
||||
deployment:
|
||||
image: quay.io/go-skynet/local-ai:latest
|
||||
env:
|
||||
threads: 4
|
||||
contextSize: 1024
|
||||
modelsPath: "/models"
|
||||
# Optionally create a PVC, mount the PV to the LocalAI Deployment,
|
||||
# and download a model to prepopulate the models directory
|
||||
modelsVolume:
|
||||
enabled: true
|
||||
url: "https://gpt4all.io/models/ggml-gpt4all-j.bin"
|
||||
pvc:
|
||||
size: 6Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
auth:
|
||||
# Optional value for HTTP basic access authentication header
|
||||
basic: "" # 'username:password' base64 encoded
|
||||
service:
|
||||
type: ClusterIP
|
||||
annotations: {}
|
||||
# If using an AWS load balancer, you'll need to override the default 60s load balancer idle timeout
|
||||
# service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "1200"
|
||||
EOF
|
||||
```
|
||||
3. Install the helm chart:
|
||||
```bash
|
||||
helm repo update
|
||||
helm install local-ai go-skynet/local-ai -f values.yaml
|
||||
```
|
||||
|
||||
Check out also the [helm chart repository on GitHub](https://github.com/go-skynet/helm-charts).
|
||||
|
||||
</details>
|
||||
|
||||
## Supported OpenAI API endpoints
|
||||
|
||||
You can check out the [OpenAI API reference](https://platform.openai.com/docs/api-reference/chat/create).
|
||||
|
||||
Following the list of endpoints/parameters supported.
|
||||
|
||||
Note:
|
||||
|
||||
- You can also specify the model as part of the OpenAI token.
|
||||
- If only one model is available, the API will use it for all the requests.
|
||||
|
||||
### Chat completions
|
||||
|
||||
<details>
|
||||
For example, to generate a chat completion, you can send a POST request to the `/v1/chat/completions` endpoint with the instruction as the request body:
|
||||
|
||||
```
|
||||
curl http://localhost:8080/v1/chat/completions -H "Content-Type: application/json" -d '{
|
||||
"model": "ggml-koala-7b-model-q4_0-r2.bin",
|
||||
"messages": [{"role": "user", "content": "Say this is a test!"}],
|
||||
"temperature": 0.7
|
||||
}'
|
||||
```
|
||||
|
||||
Available additional parameters: `top_p`, `top_k`, `max_tokens`
|
||||
</details>
|
||||
|
||||
### Edit completions
|
||||
|
||||
<details>
|
||||
To generate an edit completion you can send a POST request to the `/v1/edits` endpoint with the instruction as the request body:
|
||||
|
||||
```
|
||||
curl http://localhost:8080/v1/edits -H "Content-Type: application/json" -d '{
|
||||
"model": "ggml-koala-7b-model-q4_0-r2.bin",
|
||||
"instruction": "rephrase",
|
||||
"input": "Black cat jumped out of the window",
|
||||
"temperature": 0.7
|
||||
}'
|
||||
```
|
||||
|
||||
Available additional parameters: `top_p`, `top_k`, `max_tokens`.
|
||||
|
||||
</details>
|
||||
|
||||
### Completions
|
||||
|
||||
<details>
|
||||
|
||||
To generate a completion, you can send a POST request to the `/v1/completions` endpoint with the instruction as per the request body:
|
||||
|
||||
```
|
||||
curl http://localhost:8080/v1/completions -H "Content-Type: application/json" -d '{
|
||||
"model": "ggml-koala-7b-model-q4_0-r2.bin",
|
||||
"prompt": "A long time ago in a galaxy far, far away",
|
||||
"temperature": 0.7
|
||||
}'
|
||||
```
|
||||
|
||||
Available additional parameters: `top_p`, `top_k`, `max_tokens`
|
||||
|
||||
</details>
|
||||
|
||||
### List models
|
||||
|
||||
<details>
|
||||
You can list all the models available with:
|
||||
|
||||
```
|
||||
curl http://localhost:8080/v1/models
|
||||
```
|
||||
|
||||
</details>
|
||||
See the [list of the supported API endpoints](https://localai.io/api-endpoints/index.html) and how to configure image generation and audio transcription.
|
||||
|
||||
## Frequently asked questions
|
||||
|
||||
Here are answers to some of the most common questions.
|
||||
|
||||
|
||||
### How do I get models?
|
||||
|
||||
<details>
|
||||
|
||||
Most ggml-based models should work, but newer models may require additions to the API. If a model doesn't work, please feel free to open up issues. However, be cautious about downloading models from the internet and directly onto your machine, as there may be security vulnerabilities in lama.cpp or ggml that could be maliciously exploited. Some models can be found on Hugging Face: https://huggingface.co/models?search=ggml, or models from gpt4all should also work: https://github.com/nomic-ai/gpt4all.
|
||||
|
||||
</details>
|
||||
|
||||
### What's the difference with Serge, or XXX?
|
||||
|
||||
|
||||
<details>
|
||||
|
||||
LocalAI is a multi-model solution that doesn't focus on a specific model type (e.g., llama.cpp or alpaca.cpp), and it handles all of these internally for faster inference, easy to set up locally and deploy to Kubernetes.
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
### Can I use it with a Discord bot, or XXX?
|
||||
|
||||
<details>
|
||||
|
||||
Yes! If the client uses OpenAI and supports setting a different base URL to send requests to, you can use the LocalAI endpoint. This allows to use this with every application that was supposed to work with OpenAI, but without changing the application!
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
### Can this leverage GPUs?
|
||||
|
||||
<details>
|
||||
|
||||
Not currently, as ggml doesn't support GPUs yet: https://github.com/ggerganov/llama.cpp/discussions/915.
|
||||
|
||||
</details>
|
||||
|
||||
### Where is the webUI?
|
||||
|
||||
<details>
|
||||
We are working on to have a good out of the box experience - however as LocalAI is an API you can already plug it into existing projects that provides are UI interfaces to OpenAI's APIs. There are several already on github, and should be compatible with LocalAI already (as it mimics the OpenAI API)
|
||||
|
||||
</details>
|
||||
|
||||
### Does it work with AutoGPT?
|
||||
|
||||
<details>
|
||||
|
||||
AutoGPT currently doesn't allow to set a different API URL, but there is a PR open for it, so this should be possible soon!
|
||||
|
||||
</details>
|
||||
See [the FAQ](https://localai.io/faq/index.html) section for a list of common questions.
|
||||
|
||||
## Projects already using LocalAI to run local models
|
||||
|
||||
@@ -571,11 +167,10 @@ Feel free to open up a PR to get your project listed!
|
||||
|
||||
- [Kairos](https://github.com/kairos-io/kairos)
|
||||
- [k8sgpt](https://github.com/k8sgpt-ai/k8sgpt#running-local-models)
|
||||
|
||||
## Blog posts and other articles
|
||||
|
||||
- https://medium.com/@tyler_97636/k8sgpt-localai-unlock-kubernetes-superpowers-for-free-584790de9b65
|
||||
- https://kairos.io/docs/examples/localai/
|
||||
- [Spark](https://github.com/cedriking/spark)
|
||||
- [autogpt4all](https://github.com/aorumbayev/autogpt4all)
|
||||
- [Mods](https://github.com/charmbracelet/mods)
|
||||
- [Flowise](https://github.com/FlowiseAI/Flowise)
|
||||
|
||||
## Short-term roadmap
|
||||
|
||||
@@ -585,6 +180,9 @@ Feel free to open up a PR to get your project listed!
|
||||
- [x] Multi-model support
|
||||
- [x] Have a webUI!
|
||||
- [x] Allow configuration of defaults for models.
|
||||
- [x] Support for embeddings
|
||||
- [x] Support for audio transcription with https://github.com/ggerganov/whisper.cpp
|
||||
- [ ] GPU/CUDA support ( https://github.com/go-skynet/LocalAI/issues/69 )
|
||||
- [ ] Enable automatic downloading of models from a curated gallery, with only free-licensed models, directly from the webui.
|
||||
|
||||
## Star history
|
||||
@@ -593,23 +191,25 @@ Feel free to open up a PR to get your project listed!
|
||||
|
||||
## License
|
||||
|
||||
LocalAI is a community-driven project. It was initially created by [mudler](https://github.com/mudler/) at the [SpectroCloud OSS Office](https://github.com/spectrocloud).
|
||||
LocalAI is a community-driven project created by [Ettore Di Giacinto](https://github.com/mudler/).
|
||||
|
||||
MIT
|
||||
|
||||
## Golang bindings used
|
||||
## Author
|
||||
|
||||
- [go-skynet/go-llama.cpp](https://github.com/go-skynet/go-llama.cpp)
|
||||
- [go-skynet/go-gpt4all-j.cpp](https://github.com/go-skynet/go-gpt4all-j.cpp)
|
||||
- [go-skynet/go-gpt2.cpp](https://github.com/go-skynet/go-gpt2.cpp)
|
||||
- [donomii/go-rwkv.cpp](https://github.com/donomii/go-rwkv.cpp)
|
||||
Ettore Di Giacinto and others
|
||||
|
||||
## Acknowledgements
|
||||
|
||||
LocalAI couldn't have been built without the help of great software already available from the community. Thank you!
|
||||
|
||||
- [llama.cpp](https://github.com/ggerganov/llama.cpp)
|
||||
- https://github.com/tatsu-lab/stanford_alpaca
|
||||
- https://github.com/cornelk/llama-go for the initial ideas
|
||||
- https://github.com/antimatter15/alpaca.cpp for the light model version (this is compatible and tested only with that checkpoint model!)
|
||||
- https://github.com/antimatter15/alpaca.cpp
|
||||
- https://github.com/EdVince/Stable-Diffusion-NCNN
|
||||
- https://github.com/ggerganov/whisper.cpp
|
||||
- https://github.com/saharNooby/rwkv.cpp
|
||||
|
||||
## Contributors
|
||||
|
||||
|
||||
117
api/api.go
117
api/api.go
@@ -3,23 +3,27 @@ package api
|
||||
import (
|
||||
"errors"
|
||||
|
||||
model "github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/go-skynet/LocalAI/pkg/assets"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/gofiber/fiber/v2/middleware/cors"
|
||||
"github.com/gofiber/fiber/v2/middleware/logger"
|
||||
"github.com/gofiber/fiber/v2/middleware/recover"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func App(configFile string, loader *model.ModelLoader, threads, ctxSize int, f16 bool, debug, disableMessage bool) *fiber.App {
|
||||
func App(opts ...AppOption) (*fiber.App, error) {
|
||||
options := newOptions(opts...)
|
||||
|
||||
zerolog.SetGlobalLevel(zerolog.InfoLevel)
|
||||
if debug {
|
||||
if options.debug {
|
||||
zerolog.SetGlobalLevel(zerolog.DebugLevel)
|
||||
}
|
||||
|
||||
// Return errors as JSON responses
|
||||
app := fiber.New(fiber.Config{
|
||||
DisableStartupMessage: disableMessage,
|
||||
BodyLimit: options.uploadLimitMB * 1024 * 1024, // this is the default limit of 4MB
|
||||
DisableStartupMessage: options.disableMessage,
|
||||
// Override default error handler
|
||||
ErrorHandler: func(ctx *fiber.Ctx, err error) error {
|
||||
// Status code defaults to 500
|
||||
@@ -40,38 +44,109 @@ func App(configFile string, loader *model.ModelLoader, threads, ctxSize int, f16
|
||||
},
|
||||
})
|
||||
|
||||
cm := make(ConfigMerger)
|
||||
if err := cm.LoadConfigs(loader.ModelPath); err != nil {
|
||||
if options.debug {
|
||||
app.Use(logger.New(logger.Config{
|
||||
Format: "[${ip}]:${port} ${status} - ${method} ${path}\n",
|
||||
}))
|
||||
}
|
||||
|
||||
cm := NewConfigMerger()
|
||||
if err := cm.LoadConfigs(options.loader.ModelPath); err != nil {
|
||||
log.Error().Msgf("error loading config files: %s", err.Error())
|
||||
}
|
||||
|
||||
if configFile != "" {
|
||||
if err := cm.LoadConfigFile(configFile); err != nil {
|
||||
if options.configFile != "" {
|
||||
if err := cm.LoadConfigFile(options.configFile); err != nil {
|
||||
log.Error().Msgf("error loading config file: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if debug {
|
||||
for k, v := range cm {
|
||||
log.Debug().Msgf("Model: %s (config: %+v)", k, v)
|
||||
if options.debug {
|
||||
for _, v := range cm.ListConfigs() {
|
||||
cfg, _ := cm.GetConfig(v)
|
||||
log.Debug().Msgf("Model: %s (config: %+v)", v, cfg)
|
||||
}
|
||||
}
|
||||
|
||||
if options.assetsDestination != "" {
|
||||
// Extract files from the embedded FS
|
||||
err := assets.ExtractFiles(options.backendAssets, options.assetsDestination)
|
||||
if err != nil {
|
||||
log.Warn().Msgf("Failed extracting backend assets files: %s (might be required for some backends to work properly, like gpt4all)", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Default middleware config
|
||||
app.Use(recover.New())
|
||||
app.Use(cors.New())
|
||||
|
||||
if options.preloadJSONModels != "" {
|
||||
if err := ApplyGalleryFromString(options.loader.ModelPath, options.preloadJSONModels, cm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if options.preloadModelsFromPath != "" {
|
||||
if err := ApplyGalleryFromFile(options.loader.ModelPath, options.preloadModelsFromPath, cm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if options.cors {
|
||||
if options.corsAllowOrigins == "" {
|
||||
app.Use(cors.New())
|
||||
} else {
|
||||
app.Use(cors.New(cors.Config{
|
||||
AllowOrigins: options.corsAllowOrigins,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
// LocalAI API endpoints
|
||||
applier := newGalleryApplier(options.loader.ModelPath)
|
||||
applier.start(options.context, cm)
|
||||
app.Post("/models/apply", applyModelGallery(options.loader.ModelPath, cm, applier.C))
|
||||
app.Get("/models/jobs/:uuid", getOpStatus(applier))
|
||||
|
||||
// openAI compatible API endpoint
|
||||
app.Post("/v1/chat/completions", chatEndpoint(cm, debug, loader, threads, ctxSize, f16))
|
||||
app.Post("/chat/completions", chatEndpoint(cm, debug, loader, threads, ctxSize, f16))
|
||||
|
||||
app.Post("/v1/edits", editEndpoint(cm, debug, loader, threads, ctxSize, f16))
|
||||
app.Post("/edits", editEndpoint(cm, debug, loader, threads, ctxSize, f16))
|
||||
// chat
|
||||
app.Post("/v1/chat/completions", chatEndpoint(cm, options))
|
||||
app.Post("/chat/completions", chatEndpoint(cm, options))
|
||||
|
||||
app.Post("/v1/completions", completionEndpoint(cm, debug, loader, threads, ctxSize, f16))
|
||||
app.Post("/completions", completionEndpoint(cm, debug, loader, threads, ctxSize, f16))
|
||||
// edit
|
||||
app.Post("/v1/edits", editEndpoint(cm, options))
|
||||
app.Post("/edits", editEndpoint(cm, options))
|
||||
|
||||
app.Get("/v1/models", listModels(loader, cm))
|
||||
app.Get("/models", listModels(loader, cm))
|
||||
// completion
|
||||
app.Post("/v1/completions", completionEndpoint(cm, options))
|
||||
app.Post("/completions", completionEndpoint(cm, options))
|
||||
|
||||
return app
|
||||
// embeddings
|
||||
app.Post("/v1/embeddings", embeddingsEndpoint(cm, options))
|
||||
app.Post("/embeddings", embeddingsEndpoint(cm, options))
|
||||
app.Post("/v1/engines/:model/embeddings", embeddingsEndpoint(cm, options))
|
||||
|
||||
// audio
|
||||
app.Post("/v1/audio/transcriptions", transcriptEndpoint(cm, options))
|
||||
|
||||
// images
|
||||
app.Post("/v1/images/generations", imageEndpoint(cm, options))
|
||||
|
||||
if options.imageDir != "" {
|
||||
app.Static("/generated-images", options.imageDir)
|
||||
}
|
||||
|
||||
ok := func(c *fiber.Ctx) error {
|
||||
return c.SendStatus(200)
|
||||
}
|
||||
|
||||
// Kubernetes health checks
|
||||
app.Get("/healthz", ok)
|
||||
app.Get("/readyz", ok)
|
||||
|
||||
// models
|
||||
app.Get("/v1/models", listModels(options.loader, cm))
|
||||
app.Get("/models", listModels(options.loader, cm))
|
||||
|
||||
return app, nil
|
||||
}
|
||||
|
||||
307
api/api_test.go
307
api/api_test.go
@@ -1,29 +1,264 @@
|
||||
package api_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"embed"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
. "github.com/go-skynet/LocalAI/api"
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
openaigo "github.com/otiai10/openaigo"
|
||||
"github.com/sashabaranov/go-openai"
|
||||
)
|
||||
|
||||
type modelApplyRequest struct {
|
||||
URL string `json:"url"`
|
||||
Name string `json:"name"`
|
||||
Overrides map[string]string `json:"overrides"`
|
||||
}
|
||||
|
||||
func getModelStatus(url string) (response map[string]interface{}) {
|
||||
// Create the HTTP request
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
fmt.Println("Error creating request:", err)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
fmt.Println("Error reading response body:", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Unmarshal the response into a map[string]interface{}
|
||||
err = json.Unmarshal(body, &response)
|
||||
if err != nil {
|
||||
fmt.Println("Error unmarshaling JSON response:", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
func postModelApplyRequest(url string, request modelApplyRequest) (response map[string]interface{}) {
|
||||
|
||||
//url := "http://localhost:AI/models/apply"
|
||||
|
||||
// Create the request payload
|
||||
|
||||
payload, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
fmt.Println("Error marshaling JSON:", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Create the HTTP request
|
||||
req, err := http.NewRequest("POST", url, bytes.NewBuffer(payload))
|
||||
if err != nil {
|
||||
fmt.Println("Error creating request:", err)
|
||||
return
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
// Make the request
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
fmt.Println("Error making request:", err)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
fmt.Println("Error reading response body:", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Unmarshal the response into a map[string]interface{}
|
||||
err = json.Unmarshal(body, &response)
|
||||
if err != nil {
|
||||
fmt.Println("Error unmarshaling JSON response:", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
//go:embed backend-assets/*
|
||||
var backendAssets embed.FS
|
||||
|
||||
var _ = Describe("API test", func() {
|
||||
|
||||
var app *fiber.App
|
||||
var modelLoader *model.ModelLoader
|
||||
var client *openai.Client
|
||||
var client2 *openaigo.Client
|
||||
var c context.Context
|
||||
var cancel context.CancelFunc
|
||||
var tmpdir string
|
||||
|
||||
Context("API with ephemeral models", func() {
|
||||
BeforeEach(func() {
|
||||
var err error
|
||||
tmpdir, err = os.MkdirTemp("", "")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
modelLoader = model.NewModelLoader(tmpdir)
|
||||
c, cancel = context.WithCancel(context.Background())
|
||||
|
||||
app, err = App(WithContext(c), WithModelLoader(modelLoader), WithBackendAssets(backendAssets), WithBackendAssetsOutput(tmpdir))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
go app.Listen("127.0.0.1:9090")
|
||||
|
||||
defaultConfig := openai.DefaultConfig("")
|
||||
defaultConfig.BaseURL = "http://127.0.0.1:9090/v1"
|
||||
|
||||
client2 = openaigo.NewClient("")
|
||||
client2.BaseURL = defaultConfig.BaseURL
|
||||
|
||||
// Wait for API to be ready
|
||||
client = openai.NewClientWithConfig(defaultConfig)
|
||||
Eventually(func() error {
|
||||
_, err := client.ListModels(context.TODO())
|
||||
return err
|
||||
}, "2m").ShouldNot(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
cancel()
|
||||
app.Shutdown()
|
||||
os.RemoveAll(tmpdir)
|
||||
})
|
||||
|
||||
Context("Applying models", func() {
|
||||
It("overrides models", func() {
|
||||
response := postModelApplyRequest("http://127.0.0.1:9090/models/apply", modelApplyRequest{
|
||||
URL: "https://raw.githubusercontent.com/go-skynet/model-gallery/main/bert-embeddings.yaml",
|
||||
Name: "bert",
|
||||
Overrides: map[string]string{
|
||||
"backend": "llama",
|
||||
},
|
||||
})
|
||||
|
||||
Expect(response["uuid"]).ToNot(BeEmpty(), fmt.Sprint(response))
|
||||
|
||||
uuid := response["uuid"].(string)
|
||||
|
||||
Eventually(func() bool {
|
||||
response := getModelStatus("http://127.0.0.1:9090/models/jobs/" + uuid)
|
||||
fmt.Println(response)
|
||||
return response["processed"].(bool)
|
||||
}, "360s").Should(Equal(true))
|
||||
|
||||
dat, err := os.ReadFile(filepath.Join(tmpdir, "bert.yaml"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
content := map[string]interface{}{}
|
||||
err = yaml.Unmarshal(dat, &content)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content["backend"]).To(Equal("llama"))
|
||||
})
|
||||
It("apply models without overrides", func() {
|
||||
response := postModelApplyRequest("http://127.0.0.1:9090/models/apply", modelApplyRequest{
|
||||
URL: "https://raw.githubusercontent.com/go-skynet/model-gallery/main/bert-embeddings.yaml",
|
||||
Name: "bert",
|
||||
Overrides: map[string]string{},
|
||||
})
|
||||
|
||||
Expect(response["uuid"]).ToNot(BeEmpty(), fmt.Sprint(response))
|
||||
|
||||
uuid := response["uuid"].(string)
|
||||
|
||||
Eventually(func() bool {
|
||||
response := getModelStatus("http://127.0.0.1:9090/models/jobs/" + uuid)
|
||||
fmt.Println(response)
|
||||
return response["processed"].(bool)
|
||||
}, "360s").Should(Equal(true))
|
||||
|
||||
dat, err := os.ReadFile(filepath.Join(tmpdir, "bert.yaml"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
content := map[string]interface{}{}
|
||||
err = yaml.Unmarshal(dat, &content)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content["backend"]).To(Equal("bert-embeddings"))
|
||||
})
|
||||
|
||||
It("runs openllama", Label("llama"), func() {
|
||||
if runtime.GOOS != "linux" {
|
||||
Skip("test supported only on linux")
|
||||
}
|
||||
response := postModelApplyRequest("http://127.0.0.1:9090/models/apply", modelApplyRequest{
|
||||
URL: "github:go-skynet/model-gallery/openllama_3b.yaml",
|
||||
Name: "openllama_3b",
|
||||
Overrides: map[string]string{},
|
||||
})
|
||||
|
||||
Expect(response["uuid"]).ToNot(BeEmpty(), fmt.Sprint(response))
|
||||
|
||||
uuid := response["uuid"].(string)
|
||||
|
||||
Eventually(func() bool {
|
||||
response := getModelStatus("http://127.0.0.1:9090/models/jobs/" + uuid)
|
||||
fmt.Println(response)
|
||||
return response["processed"].(bool)
|
||||
}, "360s").Should(Equal(true))
|
||||
|
||||
resp, err := client.CreateCompletion(context.TODO(), openai.CompletionRequest{Model: "openllama_3b", Prompt: "Count up to five: one, two, three, four, "})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(resp.Choices)).To(Equal(1))
|
||||
Expect(resp.Choices[0].Text).To(ContainSubstring("five"))
|
||||
})
|
||||
|
||||
It("runs gpt4all", Label("gpt4all"), func() {
|
||||
if runtime.GOOS != "linux" {
|
||||
Skip("test supported only on linux")
|
||||
}
|
||||
|
||||
response := postModelApplyRequest("http://127.0.0.1:9090/models/apply", modelApplyRequest{
|
||||
URL: "github:go-skynet/model-gallery/gpt4all-j.yaml",
|
||||
Name: "gpt4all-j",
|
||||
Overrides: map[string]string{},
|
||||
})
|
||||
|
||||
Expect(response["uuid"]).ToNot(BeEmpty(), fmt.Sprint(response))
|
||||
|
||||
uuid := response["uuid"].(string)
|
||||
|
||||
Eventually(func() bool {
|
||||
response := getModelStatus("http://127.0.0.1:9090/models/jobs/" + uuid)
|
||||
fmt.Println(response)
|
||||
return response["processed"].(bool)
|
||||
}, "360s").Should(Equal(true))
|
||||
|
||||
resp, err := client.CreateChatCompletion(context.TODO(), openai.ChatCompletionRequest{Model: "gpt4all-j", Messages: []openai.ChatCompletionMessage{openai.ChatCompletionMessage{Role: "user", Content: "How are you?"}}})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(resp.Choices)).To(Equal(1))
|
||||
Expect(resp.Choices[0].Message.Content).To(ContainSubstring("well"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Context("API query", func() {
|
||||
BeforeEach(func() {
|
||||
modelLoader = model.NewModelLoader(os.Getenv("MODELS_PATH"))
|
||||
app = App("", modelLoader, 1, 512, false, true, true)
|
||||
c, cancel = context.WithCancel(context.Background())
|
||||
|
||||
var err error
|
||||
app, err = App(WithContext(c), WithModelLoader(modelLoader))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
go app.Listen("127.0.0.1:9090")
|
||||
|
||||
defaultConfig := openai.DefaultConfig("")
|
||||
@@ -40,13 +275,13 @@ var _ = Describe("API test", func() {
|
||||
}, "2m").ShouldNot(HaveOccurred())
|
||||
})
|
||||
AfterEach(func() {
|
||||
cancel()
|
||||
app.Shutdown()
|
||||
})
|
||||
It("returns the models list", func() {
|
||||
models, err := client.ListModels(context.TODO())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(models.Models)).To(Equal(3))
|
||||
Expect(models.Models[0].ID).To(Equal("testmodel"))
|
||||
Expect(len(models.Models)).To(Equal(10))
|
||||
})
|
||||
It("can generate completions", func() {
|
||||
resp, err := client.CreateCompletion(context.TODO(), openai.CompletionRequest{Model: "testmodel", Prompt: "abcdedfghikl"})
|
||||
@@ -79,15 +314,71 @@ var _ = Describe("API test", func() {
|
||||
It("returns errors", func() {
|
||||
_, err := client.CreateCompletion(context.TODO(), openai.CompletionRequest{Model: "foomodel", Prompt: "abcdedfghikl"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("error, status code: 500, message: could not load model - all backends returned error: 5 errors occurred:"))
|
||||
Expect(err.Error()).To(ContainSubstring("error, status code: 500, message: could not load model - all backends returned error: 11 errors occurred:"))
|
||||
})
|
||||
It("transcribes audio", func() {
|
||||
if runtime.GOOS != "linux" {
|
||||
Skip("test supported only on linux")
|
||||
}
|
||||
resp, err := client.CreateTranscription(
|
||||
context.Background(),
|
||||
openai.AudioRequest{
|
||||
Model: openai.Whisper1,
|
||||
FilePath: filepath.Join(os.Getenv("TEST_DIR"), "audio.wav"),
|
||||
},
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(resp.Text).To(ContainSubstring("This is the Micro Machine Man presenting"))
|
||||
})
|
||||
|
||||
It("calculate embeddings", func() {
|
||||
if runtime.GOOS != "linux" {
|
||||
Skip("test supported only on linux")
|
||||
}
|
||||
resp, err := client.CreateEmbeddings(
|
||||
context.Background(),
|
||||
openai.EmbeddingRequest{
|
||||
Model: openai.AdaEmbeddingV2,
|
||||
Input: []string{"sun", "cat"},
|
||||
},
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(resp.Data[0].Embedding)).To(BeNumerically("==", 384))
|
||||
Expect(len(resp.Data[1].Embedding)).To(BeNumerically("==", 384))
|
||||
|
||||
sunEmbedding := resp.Data[0].Embedding
|
||||
resp2, err := client.CreateEmbeddings(
|
||||
context.Background(),
|
||||
openai.EmbeddingRequest{
|
||||
Model: openai.AdaEmbeddingV2,
|
||||
Input: []string{"sun"},
|
||||
},
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(resp2.Data[0].Embedding).To(Equal(sunEmbedding))
|
||||
})
|
||||
|
||||
Context("backends", func() {
|
||||
It("runs rwkv", func() {
|
||||
if runtime.GOOS != "linux" {
|
||||
Skip("test supported only on linux")
|
||||
}
|
||||
resp, err := client.CreateCompletion(context.TODO(), openai.CompletionRequest{Model: "rwkv_test", Prompt: "Count up to five: one, two, three, four,"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(resp.Choices) > 0).To(BeTrue())
|
||||
Expect(resp.Choices[0].Text).To(Equal(" five."))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Context("Config file", func() {
|
||||
BeforeEach(func() {
|
||||
modelLoader = model.NewModelLoader(os.Getenv("MODELS_PATH"))
|
||||
app = App(os.Getenv("CONFIG_FILE"), modelLoader, 1, 512, false, true, true)
|
||||
c, cancel = context.WithCancel(context.Background())
|
||||
|
||||
var err error
|
||||
app, err = App(WithContext(c), WithModelLoader(modelLoader), WithConfigFile(os.Getenv("CONFIG_FILE")))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
go app.Listen("127.0.0.1:9090")
|
||||
|
||||
defaultConfig := openai.DefaultConfig("")
|
||||
@@ -102,14 +393,13 @@ var _ = Describe("API test", func() {
|
||||
}, "2m").ShouldNot(HaveOccurred())
|
||||
})
|
||||
AfterEach(func() {
|
||||
cancel()
|
||||
app.Shutdown()
|
||||
})
|
||||
It("can generate chat completions from config file", func() {
|
||||
|
||||
models, err := client.ListModels(context.TODO())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(models.Models)).To(Equal(5))
|
||||
Expect(models.Models[0].ID).To(Equal("testmodel"))
|
||||
Expect(len(models.Models)).To(Equal(12))
|
||||
})
|
||||
It("can generate chat completions from config file", func() {
|
||||
resp, err := client.CreateChatCompletion(context.TODO(), openai.ChatCompletionRequest{Model: "list1", Messages: []openai.ChatCompletionMessage{openai.ChatCompletionMessage{Role: "user", Content: "abcdedfghikl"}}})
|
||||
@@ -134,5 +424,6 @@ var _ = Describe("API test", func() {
|
||||
Expect(len(resp.Choices)).To(Equal(1))
|
||||
Expect(resp.Choices[0].Text).ToNot(BeEmpty())
|
||||
})
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
267
api/config.go
267
api/config.go
@@ -1,12 +1,17 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
model "github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
@@ -21,8 +26,27 @@ type Config struct {
|
||||
Threads int `yaml:"threads"`
|
||||
Debug bool `yaml:"debug"`
|
||||
Roles map[string]string `yaml:"roles"`
|
||||
Embeddings bool `yaml:"embeddings"`
|
||||
Backend string `yaml:"backend"`
|
||||
TemplateConfig TemplateConfig `yaml:"template"`
|
||||
MirostatETA float64 `yaml:"mirostat_eta"`
|
||||
MirostatTAU float64 `yaml:"mirostat_tau"`
|
||||
Mirostat int `yaml:"mirostat"`
|
||||
NGPULayers int `yaml:"gpu_layers"`
|
||||
MMap bool `yaml:"mmap"`
|
||||
MMlock bool `yaml:"mmlock"`
|
||||
LowVRAM bool `yaml:"low_vram"`
|
||||
|
||||
TensorSplit string `yaml:"tensor_split"`
|
||||
MainGPU string `yaml:"main_gpu"`
|
||||
ImageGenerationAssets string `yaml:"asset_dir"`
|
||||
|
||||
PromptCachePath string `yaml:"prompt_cache_path"`
|
||||
PromptCacheAll bool `yaml:"prompt_cache_all"`
|
||||
PromptCacheRO bool `yaml:"prompt_cache_ro"`
|
||||
|
||||
PromptStrings, InputStrings []string
|
||||
InputToken [][]int
|
||||
}
|
||||
|
||||
type TemplateConfig struct {
|
||||
@@ -31,8 +55,22 @@ type TemplateConfig struct {
|
||||
Edit string `yaml:"edit"`
|
||||
}
|
||||
|
||||
type ConfigMerger map[string]Config
|
||||
type ConfigMerger struct {
|
||||
configs map[string]Config
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func defaultConfig(modelFile string) *Config {
|
||||
return &Config{
|
||||
OpenAIRequest: defaultRequest(modelFile),
|
||||
}
|
||||
}
|
||||
|
||||
func NewConfigMerger() *ConfigMerger {
|
||||
return &ConfigMerger{
|
||||
configs: make(map[string]Config),
|
||||
}
|
||||
}
|
||||
func ReadConfigFile(file string) ([]*Config, error) {
|
||||
c := &[]*Config{}
|
||||
f, err := os.ReadFile(file)
|
||||
@@ -60,33 +98,63 @@ func ReadConfig(file string) (*Config, error) {
|
||||
}
|
||||
|
||||
func (cm ConfigMerger) LoadConfigFile(file string) error {
|
||||
cm.Lock()
|
||||
defer cm.Unlock()
|
||||
c, err := ReadConfigFile(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot load config file: %w", err)
|
||||
}
|
||||
|
||||
for _, cc := range c {
|
||||
cm[cc.Name] = *cc
|
||||
cm.configs[cc.Name] = *cc
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm ConfigMerger) LoadConfig(file string) error {
|
||||
cm.Lock()
|
||||
defer cm.Unlock()
|
||||
c, err := ReadConfig(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read config file: %w", err)
|
||||
}
|
||||
|
||||
cm[c.Name] = *c
|
||||
cm.configs[c.Name] = *c
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm ConfigMerger) GetConfig(m string) (Config, bool) {
|
||||
cm.Lock()
|
||||
defer cm.Unlock()
|
||||
v, exists := cm.configs[m]
|
||||
return v, exists
|
||||
}
|
||||
|
||||
func (cm ConfigMerger) ListConfigs() []string {
|
||||
cm.Lock()
|
||||
defer cm.Unlock()
|
||||
var res []string
|
||||
for k := range cm.configs {
|
||||
res = append(res, k)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (cm ConfigMerger) LoadConfigs(path string) error {
|
||||
files, err := ioutil.ReadDir(path)
|
||||
cm.Lock()
|
||||
defer cm.Unlock()
|
||||
entries, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
files := make([]fs.FileInfo, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
info, err := entry.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
files = append(files, info)
|
||||
}
|
||||
for _, file := range files {
|
||||
// Skip templates, YAML and .keep files
|
||||
if !strings.Contains(file.Name(), ".yaml") {
|
||||
@@ -94,9 +162,194 @@ func (cm ConfigMerger) LoadConfigs(path string) error {
|
||||
}
|
||||
c, err := ReadConfig(filepath.Join(path, file.Name()))
|
||||
if err == nil {
|
||||
cm[c.Name] = *c
|
||||
cm.configs[c.Name] = *c
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateConfig(config *Config, input *OpenAIRequest) {
|
||||
if input.Echo {
|
||||
config.Echo = input.Echo
|
||||
}
|
||||
if input.TopK != 0 {
|
||||
config.TopK = input.TopK
|
||||
}
|
||||
if input.TopP != 0 {
|
||||
config.TopP = input.TopP
|
||||
}
|
||||
|
||||
if input.Temperature != 0 {
|
||||
config.Temperature = input.Temperature
|
||||
}
|
||||
|
||||
if input.Maxtokens != 0 {
|
||||
config.Maxtokens = input.Maxtokens
|
||||
}
|
||||
|
||||
switch stop := input.Stop.(type) {
|
||||
case string:
|
||||
if stop != "" {
|
||||
config.StopWords = append(config.StopWords, stop)
|
||||
}
|
||||
case []interface{}:
|
||||
for _, pp := range stop {
|
||||
if s, ok := pp.(string); ok {
|
||||
config.StopWords = append(config.StopWords, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if input.RepeatPenalty != 0 {
|
||||
config.RepeatPenalty = input.RepeatPenalty
|
||||
}
|
||||
|
||||
if input.Keep != 0 {
|
||||
config.Keep = input.Keep
|
||||
}
|
||||
|
||||
if input.Batch != 0 {
|
||||
config.Batch = input.Batch
|
||||
}
|
||||
|
||||
if input.F16 {
|
||||
config.F16 = input.F16
|
||||
}
|
||||
|
||||
if input.IgnoreEOS {
|
||||
config.IgnoreEOS = input.IgnoreEOS
|
||||
}
|
||||
|
||||
if input.Seed != 0 {
|
||||
config.Seed = input.Seed
|
||||
}
|
||||
|
||||
if input.Mirostat != 0 {
|
||||
config.Mirostat = input.Mirostat
|
||||
}
|
||||
|
||||
if input.MirostatETA != 0 {
|
||||
config.MirostatETA = input.MirostatETA
|
||||
}
|
||||
|
||||
if input.MirostatTAU != 0 {
|
||||
config.MirostatTAU = input.MirostatTAU
|
||||
}
|
||||
|
||||
if input.TypicalP != 0 {
|
||||
config.TypicalP = input.TypicalP
|
||||
}
|
||||
|
||||
switch inputs := input.Input.(type) {
|
||||
case string:
|
||||
if inputs != "" {
|
||||
config.InputStrings = append(config.InputStrings, inputs)
|
||||
}
|
||||
case []interface{}:
|
||||
for _, pp := range inputs {
|
||||
switch i := pp.(type) {
|
||||
case string:
|
||||
config.InputStrings = append(config.InputStrings, i)
|
||||
case []interface{}:
|
||||
tokens := []int{}
|
||||
for _, ii := range i {
|
||||
tokens = append(tokens, int(ii.(float64)))
|
||||
}
|
||||
config.InputToken = append(config.InputToken, tokens)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch p := input.Prompt.(type) {
|
||||
case string:
|
||||
config.PromptStrings = append(config.PromptStrings, p)
|
||||
case []interface{}:
|
||||
for _, pp := range p {
|
||||
if s, ok := pp.(string); ok {
|
||||
config.PromptStrings = append(config.PromptStrings, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
func readInput(c *fiber.Ctx, loader *model.ModelLoader, randomModel bool) (string, *OpenAIRequest, error) {
|
||||
input := new(OpenAIRequest)
|
||||
// Get input data from the request body
|
||||
if err := c.BodyParser(input); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
modelFile := input.Model
|
||||
|
||||
if c.Params("model") != "" {
|
||||
modelFile = c.Params("model")
|
||||
}
|
||||
|
||||
received, _ := json.Marshal(input)
|
||||
|
||||
log.Debug().Msgf("Request received: %s", string(received))
|
||||
|
||||
// Set model from bearer token, if available
|
||||
bearer := strings.TrimLeft(c.Get("authorization"), "Bearer ")
|
||||
bearerExists := bearer != "" && loader.ExistsInModelPath(bearer)
|
||||
|
||||
// If no model was specified, take the first available
|
||||
if modelFile == "" && !bearerExists && randomModel {
|
||||
models, _ := loader.ListModels()
|
||||
if len(models) > 0 {
|
||||
modelFile = models[0]
|
||||
log.Debug().Msgf("No model specified, using: %s", modelFile)
|
||||
} else {
|
||||
log.Debug().Msgf("No model specified, returning error")
|
||||
return "", nil, fmt.Errorf("no model specified")
|
||||
}
|
||||
}
|
||||
|
||||
// If a model is found in bearer token takes precedence
|
||||
if bearerExists {
|
||||
log.Debug().Msgf("Using model from bearer token: %s", bearer)
|
||||
modelFile = bearer
|
||||
}
|
||||
return modelFile, input, nil
|
||||
}
|
||||
|
||||
func readConfig(modelFile string, input *OpenAIRequest, cm *ConfigMerger, loader *model.ModelLoader, debug bool, threads, ctx int, f16 bool) (*Config, *OpenAIRequest, error) {
|
||||
// Load a config file if present after the model name
|
||||
modelConfig := filepath.Join(loader.ModelPath, modelFile+".yaml")
|
||||
if _, err := os.Stat(modelConfig); err == nil {
|
||||
if err := cm.LoadConfig(modelConfig); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed loading model config (%s) %s", modelConfig, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
var config *Config
|
||||
cfg, exists := cm.GetConfig(modelFile)
|
||||
if !exists {
|
||||
config = defaultConfig(modelFile)
|
||||
config.ContextSize = ctx
|
||||
config.Threads = threads
|
||||
config.F16 = f16
|
||||
config.Debug = debug
|
||||
} else {
|
||||
config = &cfg
|
||||
}
|
||||
|
||||
// Set the parameters for the language model prediction
|
||||
updateConfig(config, input)
|
||||
|
||||
// Don't allow 0 as setting
|
||||
if config.Threads == 0 {
|
||||
if threads != 0 {
|
||||
config.Threads = threads
|
||||
} else {
|
||||
config.Threads = 4
|
||||
}
|
||||
}
|
||||
|
||||
// Enforce debug flag if passed from CLI
|
||||
if debug {
|
||||
config.Debug = true
|
||||
}
|
||||
|
||||
return config, input, nil
|
||||
}
|
||||
|
||||
54
api/config_test.go
Normal file
54
api/config_test.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/model"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("Test cases for config related functions", func() {
|
||||
|
||||
var (
|
||||
configFile string
|
||||
)
|
||||
|
||||
Context("Test Read configuration functions", func() {
|
||||
configFile = os.Getenv("CONFIG_FILE")
|
||||
It("Test ReadConfigFile", func() {
|
||||
config, err := ReadConfigFile(configFile)
|
||||
Expect(err).To(BeNil())
|
||||
Expect(config).ToNot(BeNil())
|
||||
// two configs in config.yaml
|
||||
Expect(config[0].Name).To(Equal("list1"))
|
||||
Expect(config[1].Name).To(Equal("list2"))
|
||||
})
|
||||
|
||||
It("Test LoadConfigs", func() {
|
||||
cm := NewConfigMerger()
|
||||
options := newOptions()
|
||||
modelLoader := model.NewModelLoader(os.Getenv("MODELS_PATH"))
|
||||
WithModelLoader(modelLoader)(options)
|
||||
|
||||
err := cm.LoadConfigs(options.loader.ModelPath)
|
||||
Expect(err).To(BeNil())
|
||||
Expect(cm.configs).ToNot(BeNil())
|
||||
|
||||
// config should includes gpt4all models's api.config
|
||||
Expect(cm.configs).To(HaveKey("gpt4all"))
|
||||
|
||||
// config should includes gpt2 models's api.config
|
||||
Expect(cm.configs).To(HaveKey("gpt4all-2"))
|
||||
|
||||
// config should includes text-embedding-ada-002 models's api.config
|
||||
Expect(cm.configs).To(HaveKey("text-embedding-ada-002"))
|
||||
|
||||
// config should includes rwkv_test models's api.config
|
||||
Expect(cm.configs).To(HaveKey("rwkv_test"))
|
||||
|
||||
// config should includes whisper-1 models's api.config
|
||||
Expect(cm.configs).To(HaveKey("whisper-1"))
|
||||
})
|
||||
})
|
||||
})
|
||||
267
api/gallery.go
Normal file
267
api/gallery.go
Normal file
@@ -0,0 +1,267 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-skynet/LocalAI/pkg/gallery"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/google/uuid"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
type galleryOp struct {
|
||||
req ApplyGalleryModelRequest
|
||||
id string
|
||||
}
|
||||
|
||||
type galleryOpStatus struct {
|
||||
Error error `json:"error"`
|
||||
Processed bool `json:"processed"`
|
||||
Message string `json:"message"`
|
||||
Progress float64 `json:"progress"`
|
||||
TotalFileSize string `json:"file_size"`
|
||||
DownloadedFileSize string `json:"downloaded_size"`
|
||||
}
|
||||
|
||||
type galleryApplier struct {
|
||||
modelPath string
|
||||
sync.Mutex
|
||||
C chan galleryOp
|
||||
statuses map[string]*galleryOpStatus
|
||||
}
|
||||
|
||||
func newGalleryApplier(modelPath string) *galleryApplier {
|
||||
return &galleryApplier{
|
||||
modelPath: modelPath,
|
||||
C: make(chan galleryOp),
|
||||
statuses: make(map[string]*galleryOpStatus),
|
||||
}
|
||||
}
|
||||
|
||||
func applyGallery(modelPath string, req ApplyGalleryModelRequest, cm *ConfigMerger, downloadStatus func(string, string, string, float64)) error {
|
||||
url, err := req.DecodeURL()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Send a GET request to the URL
|
||||
response, err := http.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
// Read the response body
|
||||
body, err := ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshal YAML data into a Config struct
|
||||
var config gallery.Config
|
||||
err = yaml.Unmarshal(body, &config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
config.Files = append(config.Files, req.AdditionalFiles...)
|
||||
|
||||
if err := gallery.Apply(modelPath, req.Name, &config, req.Overrides, downloadStatus); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Reload models
|
||||
return cm.LoadConfigs(modelPath)
|
||||
}
|
||||
|
||||
func (g *galleryApplier) updatestatus(s string, op *galleryOpStatus) {
|
||||
g.Lock()
|
||||
defer g.Unlock()
|
||||
g.statuses[s] = op
|
||||
}
|
||||
|
||||
func (g *galleryApplier) getstatus(s string) *galleryOpStatus {
|
||||
g.Lock()
|
||||
defer g.Unlock()
|
||||
|
||||
return g.statuses[s]
|
||||
}
|
||||
|
||||
func (g *galleryApplier) start(c context.Context, cm *ConfigMerger) {
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-c.Done():
|
||||
return
|
||||
case op := <-g.C:
|
||||
g.updatestatus(op.id, &galleryOpStatus{Message: "processing", Progress: 0})
|
||||
|
||||
updateError := func(e error) {
|
||||
g.updatestatus(op.id, &galleryOpStatus{Error: e, Processed: true})
|
||||
}
|
||||
|
||||
if err := applyGallery(g.modelPath, op.req, cm, func(fileName string, current string, total string, percentage float64) {
|
||||
g.updatestatus(op.id, &galleryOpStatus{Message: "processing", Progress: percentage, TotalFileSize: total, DownloadedFileSize: current})
|
||||
displayDownload(fileName, current, total, percentage)
|
||||
}); err != nil {
|
||||
updateError(err)
|
||||
continue
|
||||
}
|
||||
|
||||
g.updatestatus(op.id, &galleryOpStatus{Processed: true, Message: "completed", Progress: 100})
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
var lastProgress time.Time = time.Now()
|
||||
var startTime time.Time = time.Now()
|
||||
|
||||
func displayDownload(fileName string, current string, total string, percentage float64) {
|
||||
currentTime := time.Now()
|
||||
|
||||
if currentTime.Sub(lastProgress) >= 5*time.Second {
|
||||
|
||||
lastProgress = currentTime
|
||||
|
||||
// calculate ETA based on percentage and elapsed time
|
||||
var eta time.Duration
|
||||
if percentage > 0 {
|
||||
elapsed := currentTime.Sub(startTime)
|
||||
eta = time.Duration(float64(elapsed)*(100/percentage) - float64(elapsed))
|
||||
}
|
||||
|
||||
if total != "" {
|
||||
log.Debug().Msgf("Downloading %s: %s/%s (%.2f%%) ETA: %s", fileName, current, total, percentage, eta)
|
||||
} else {
|
||||
log.Debug().Msgf("Downloading: %s", current)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ApplyGalleryFromFile(modelPath, s string, cm *ConfigMerger) error {
|
||||
dat, err := os.ReadFile(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var requests []ApplyGalleryModelRequest
|
||||
err = json.Unmarshal(dat, &requests)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, r := range requests {
|
||||
if err := applyGallery(modelPath, r, cm, displayDownload); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ApplyGalleryFromString(modelPath, s string, cm *ConfigMerger) error {
|
||||
var requests []ApplyGalleryModelRequest
|
||||
err := json.Unmarshal([]byte(s), &requests)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, r := range requests {
|
||||
if err := applyGallery(modelPath, r, cm, displayDownload); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// endpoints
|
||||
|
||||
type ApplyGalleryModelRequest struct {
|
||||
URL string `json:"url"`
|
||||
Name string `json:"name"`
|
||||
Overrides map[string]interface{} `json:"overrides"`
|
||||
AdditionalFiles []gallery.File `json:"files"`
|
||||
}
|
||||
|
||||
const (
|
||||
githubURI = "github:"
|
||||
)
|
||||
|
||||
func (request ApplyGalleryModelRequest) DecodeURL() (string, error) {
|
||||
input := request.URL
|
||||
var rawURL string
|
||||
|
||||
if strings.HasPrefix(input, githubURI) {
|
||||
parts := strings.Split(input, ":")
|
||||
repoParts := strings.Split(parts[1], "@")
|
||||
branch := "main"
|
||||
|
||||
if len(repoParts) > 1 {
|
||||
branch = repoParts[1]
|
||||
}
|
||||
|
||||
repoPath := strings.Split(repoParts[0], "/")
|
||||
org := repoPath[0]
|
||||
project := repoPath[1]
|
||||
projectPath := strings.Join(repoPath[2:], "/")
|
||||
|
||||
rawURL = fmt.Sprintf("https://raw.githubusercontent.com/%s/%s/%s/%s", org, project, branch, projectPath)
|
||||
} else if strings.HasPrefix(input, "http://") || strings.HasPrefix(input, "https://") {
|
||||
// Handle regular URLs
|
||||
u, err := url.Parse(input)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid URL: %w", err)
|
||||
}
|
||||
rawURL = u.String()
|
||||
} else {
|
||||
return "", fmt.Errorf("invalid URL format")
|
||||
}
|
||||
|
||||
return rawURL, nil
|
||||
}
|
||||
|
||||
func getOpStatus(g *galleryApplier) func(c *fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
|
||||
status := g.getstatus(c.Params("uuid"))
|
||||
if status == nil {
|
||||
return fmt.Errorf("could not find any status for ID")
|
||||
}
|
||||
|
||||
return c.JSON(status)
|
||||
}
|
||||
}
|
||||
|
||||
func applyModelGallery(modelPath string, cm *ConfigMerger, g chan galleryOp) func(c *fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
input := new(ApplyGalleryModelRequest)
|
||||
// Get input data from the request body
|
||||
if err := c.BodyParser(input); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
uuid, err := uuid.NewUUID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
g <- galleryOp{
|
||||
req: *input,
|
||||
id: uuid.String(),
|
||||
}
|
||||
return c.JSON(struct {
|
||||
ID string `json:"uuid"`
|
||||
StatusURL string `json:"status"`
|
||||
}{ID: uuid.String(), StatusURL: c.BaseURL() + "/models/jobs/" + uuid.String()})
|
||||
}
|
||||
}
|
||||
30
api/gallery_test.go
Normal file
30
api/gallery_test.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package api_test
|
||||
|
||||
import (
|
||||
. "github.com/go-skynet/LocalAI/api"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("Gallery API tests", func() {
|
||||
Context("requests", func() {
|
||||
It("parses github with a branch", func() {
|
||||
req := ApplyGalleryModelRequest{URL: "github:go-skynet/model-gallery/gpt4all-j.yaml@main"}
|
||||
str, err := req.DecodeURL()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(str).To(Equal("https://raw.githubusercontent.com/go-skynet/model-gallery/main/gpt4all-j.yaml"))
|
||||
})
|
||||
It("parses github without a branch", func() {
|
||||
req := ApplyGalleryModelRequest{URL: "github:go-skynet/model-gallery/gpt4all-j.yaml"}
|
||||
str, err := req.DecodeURL()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(str).To(Equal("https://raw.githubusercontent.com/go-skynet/model-gallery/main/gpt4all-j.yaml"))
|
||||
})
|
||||
It("parses URLS", func() {
|
||||
req := ApplyGalleryModelRequest{URL: "https://raw.githubusercontent.com/go-skynet/model-gallery/main/gpt4all-j.yaml"}
|
||||
str, err := req.DecodeURL()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(str).To(Equal("https://raw.githubusercontent.com/go-skynet/model-gallery/main/gpt4all-j.yaml"))
|
||||
})
|
||||
})
|
||||
})
|
||||
658
api/openai.go
658
api/openai.go
@@ -3,13 +3,23 @@ package api
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ggerganov/whisper.cpp/bindings/go/pkg/whisper"
|
||||
model "github.com/go-skynet/LocalAI/pkg/model"
|
||||
whisperutil "github.com/go-skynet/LocalAI/pkg/whisper"
|
||||
llama "github.com/go-skynet/go-llama.cpp"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/valyala/fasthttp"
|
||||
@@ -27,12 +37,31 @@ type ErrorResponse struct {
|
||||
Error *APIError `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
type OpenAIUsage struct {
|
||||
PromptTokens int `json:"prompt_tokens"`
|
||||
CompletionTokens int `json:"completion_tokens"`
|
||||
TotalTokens int `json:"total_tokens"`
|
||||
}
|
||||
|
||||
type Item struct {
|
||||
Embedding []float32 `json:"embedding"`
|
||||
Index int `json:"index"`
|
||||
Object string `json:"object,omitempty"`
|
||||
|
||||
// Images
|
||||
URL string `json:"url,omitempty"`
|
||||
B64JSON string `json:"b64_json,omitempty"`
|
||||
}
|
||||
|
||||
type OpenAIResponse struct {
|
||||
Created int `json:"created,omitempty"`
|
||||
Object string `json:"object,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
Model string `json:"model,omitempty"`
|
||||
Choices []Choice `json:"choices,omitempty"`
|
||||
Data []Item `json:"data,omitempty"`
|
||||
|
||||
Usage OpenAIUsage `json:"usage"`
|
||||
}
|
||||
|
||||
type Choice struct {
|
||||
@@ -56,12 +85,19 @@ type OpenAIModel struct {
|
||||
type OpenAIRequest struct {
|
||||
Model string `json:"model" yaml:"model"`
|
||||
|
||||
// Prompt is read only by completion API calls
|
||||
// whisper
|
||||
File string `json:"file" validate:"required"`
|
||||
Language string `json:"language"`
|
||||
//whisper/image
|
||||
ResponseFormat string `json:"response_format"`
|
||||
// image
|
||||
Size string `json:"size"`
|
||||
// Prompt is read only by completion/image API calls
|
||||
Prompt interface{} `json:"prompt" yaml:"prompt"`
|
||||
|
||||
// Edit endpoint
|
||||
Instruction string `json:"instruction" yaml:"instruction"`
|
||||
Input string `json:"input" yaml:"input"`
|
||||
Instruction string `json:"instruction" yaml:"instruction"`
|
||||
Input interface{} `json:"input" yaml:"input"`
|
||||
|
||||
Stop interface{} `json:"stop" yaml:"stop"`
|
||||
|
||||
@@ -85,7 +121,20 @@ type OpenAIRequest struct {
|
||||
RepeatPenalty float64 `json:"repeat_penalty" yaml:"repeat_penalty"`
|
||||
Keep int `json:"n_keep" yaml:"n_keep"`
|
||||
|
||||
MirostatETA float64 `json:"mirostat_eta" yaml:"mirostat_eta"`
|
||||
MirostatTAU float64 `json:"mirostat_tau" yaml:"mirostat_tau"`
|
||||
Mirostat int `json:"mirostat" yaml:"mirostat"`
|
||||
|
||||
FrequencyPenalty float64 `json:"frequency_penalty" yaml:"frequency_penalty"`
|
||||
TFZ float64 `json:"tfz" yaml:"tfz"`
|
||||
|
||||
Seed int `json:"seed" yaml:"seed"`
|
||||
|
||||
// Image (not supported by OpenAI)
|
||||
Mode int `json:"mode"`
|
||||
Step int `json:"step"`
|
||||
|
||||
TypicalP float64 `json:"typical_p" yaml:"typical_p"`
|
||||
}
|
||||
|
||||
func defaultRequest(modelFile string) OpenAIRequest {
|
||||
@@ -98,156 +147,46 @@ func defaultRequest(modelFile string) OpenAIRequest {
|
||||
}
|
||||
}
|
||||
|
||||
func updateConfig(config *Config, input *OpenAIRequest) {
|
||||
if input.Echo {
|
||||
config.Echo = input.Echo
|
||||
}
|
||||
if input.TopK != 0 {
|
||||
config.TopK = input.TopK
|
||||
}
|
||||
if input.TopP != 0 {
|
||||
config.TopP = input.TopP
|
||||
}
|
||||
|
||||
if input.Temperature != 0 {
|
||||
config.Temperature = input.Temperature
|
||||
}
|
||||
|
||||
if input.Maxtokens != 0 {
|
||||
config.Maxtokens = input.Maxtokens
|
||||
}
|
||||
|
||||
switch stop := input.Stop.(type) {
|
||||
case string:
|
||||
if stop != "" {
|
||||
config.StopWords = append(config.StopWords, stop)
|
||||
}
|
||||
case []interface{}:
|
||||
for _, pp := range stop {
|
||||
if s, ok := pp.(string); ok {
|
||||
config.StopWords = append(config.StopWords, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if input.RepeatPenalty != 0 {
|
||||
config.RepeatPenalty = input.RepeatPenalty
|
||||
}
|
||||
|
||||
if input.Keep != 0 {
|
||||
config.Keep = input.Keep
|
||||
}
|
||||
|
||||
if input.Batch != 0 {
|
||||
config.Batch = input.Batch
|
||||
}
|
||||
|
||||
if input.F16 {
|
||||
config.F16 = input.F16
|
||||
}
|
||||
|
||||
if input.IgnoreEOS {
|
||||
config.IgnoreEOS = input.IgnoreEOS
|
||||
}
|
||||
|
||||
if input.Seed != 0 {
|
||||
config.Seed = input.Seed
|
||||
}
|
||||
}
|
||||
|
||||
func readConfig(cm ConfigMerger, c *fiber.Ctx, loader *model.ModelLoader, debug bool, threads, ctx int, f16 bool) (*Config, *OpenAIRequest, error) {
|
||||
input := new(OpenAIRequest)
|
||||
// Get input data from the request body
|
||||
if err := c.BodyParser(input); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
modelFile := input.Model
|
||||
received, _ := json.Marshal(input)
|
||||
|
||||
log.Debug().Msgf("Request received: %s", string(received))
|
||||
|
||||
// Set model from bearer token, if available
|
||||
bearer := strings.TrimLeft(c.Get("authorization"), "Bearer ")
|
||||
bearerExists := bearer != "" && loader.ExistsInModelPath(bearer)
|
||||
|
||||
// If no model was specified, take the first available
|
||||
if modelFile == "" && !bearerExists {
|
||||
models, _ := loader.ListModels()
|
||||
if len(models) > 0 {
|
||||
modelFile = models[0]
|
||||
log.Debug().Msgf("No model specified, using: %s", modelFile)
|
||||
} else {
|
||||
log.Debug().Msgf("No model specified, returning error")
|
||||
return nil, nil, fmt.Errorf("no model specified")
|
||||
}
|
||||
}
|
||||
|
||||
// If a model is found in bearer token takes precedence
|
||||
if bearerExists {
|
||||
log.Debug().Msgf("Using model from bearer token: %s", bearer)
|
||||
modelFile = bearer
|
||||
}
|
||||
|
||||
// Load a config file if present after the model name
|
||||
modelConfig := filepath.Join(loader.ModelPath, modelFile+".yaml")
|
||||
if _, err := os.Stat(modelConfig); err == nil {
|
||||
if err := cm.LoadConfig(modelConfig); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed loading model config (%s) %s", modelConfig, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
var config *Config
|
||||
cfg, exists := cm[modelFile]
|
||||
if !exists {
|
||||
config = &Config{
|
||||
OpenAIRequest: defaultRequest(modelFile),
|
||||
}
|
||||
} else {
|
||||
config = &cfg
|
||||
}
|
||||
|
||||
// Set the parameters for the language model prediction
|
||||
updateConfig(config, input)
|
||||
|
||||
if threads != 0 {
|
||||
config.Threads = threads
|
||||
}
|
||||
if ctx != 0 {
|
||||
config.ContextSize = ctx
|
||||
}
|
||||
if f16 {
|
||||
config.F16 = true
|
||||
}
|
||||
|
||||
if debug {
|
||||
config.Debug = true
|
||||
}
|
||||
|
||||
return config, input, nil
|
||||
}
|
||||
|
||||
// https://platform.openai.com/docs/api-reference/completions
|
||||
func completionEndpoint(cm ConfigMerger, debug bool, loader *model.ModelLoader, threads, ctx int, f16 bool) func(c *fiber.Ctx) error {
|
||||
func completionEndpoint(cm *ConfigMerger, o *Option) func(c *fiber.Ctx) error {
|
||||
process := func(s string, req *OpenAIRequest, config *Config, loader *model.ModelLoader, responses chan OpenAIResponse) {
|
||||
ComputeChoices(s, req, config, o, loader, func(s string, c *[]Choice) {}, func(s string) bool {
|
||||
resp := OpenAIResponse{
|
||||
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
|
||||
Choices: []Choice{{Text: s}},
|
||||
Object: "text_completion",
|
||||
}
|
||||
log.Debug().Msgf("Sending goroutine: %s", s)
|
||||
|
||||
responses <- resp
|
||||
return true
|
||||
})
|
||||
close(responses)
|
||||
}
|
||||
|
||||
return func(c *fiber.Ctx) error {
|
||||
config, input, err := readConfig(cm, c, loader, debug, threads, ctx, f16)
|
||||
model, input, err := readInput(c, o.loader, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
|
||||
log.Debug().Msgf("`input`: %+v", input)
|
||||
|
||||
config, input, err := readConfig(model, input, cm, o.loader, o.debug, o.threads, o.ctxSize, o.f16)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
|
||||
log.Debug().Msgf("Parameter Config: %+v", config)
|
||||
|
||||
predInput := []string{}
|
||||
|
||||
switch p := input.Prompt.(type) {
|
||||
case string:
|
||||
predInput = append(predInput, p)
|
||||
case []interface{}:
|
||||
for _, pp := range p {
|
||||
if s, ok := pp.(string); ok {
|
||||
predInput = append(predInput, s)
|
||||
}
|
||||
}
|
||||
if input.Stream {
|
||||
log.Debug().Msgf("Stream request received")
|
||||
c.Context().SetContentType("text/event-stream")
|
||||
//c.Response().Header.SetContentType(fiber.MIMETextHTMLCharsetUTF8)
|
||||
//c.Set("Content-Type", "text/event-stream")
|
||||
c.Set("Cache-Control", "no-cache")
|
||||
c.Set("Connection", "keep-alive")
|
||||
c.Set("Transfer-Encoding", "chunked")
|
||||
}
|
||||
|
||||
templateFile := config.Model
|
||||
@@ -256,10 +195,55 @@ func completionEndpoint(cm ConfigMerger, debug bool, loader *model.ModelLoader,
|
||||
templateFile = config.TemplateConfig.Completion
|
||||
}
|
||||
|
||||
var result []Choice
|
||||
for _, i := range predInput {
|
||||
if input.Stream {
|
||||
if len(config.PromptStrings) > 1 {
|
||||
return errors.New("cannot handle more than 1 `PromptStrings` when `Stream`ing")
|
||||
}
|
||||
|
||||
predInput := config.PromptStrings[0]
|
||||
|
||||
// A model can have a "file.bin.tmpl" file associated with a prompt template prefix
|
||||
templatedInput, err := loader.TemplatePrefix(templateFile, struct {
|
||||
templatedInput, err := o.loader.TemplatePrefix(templateFile, struct {
|
||||
Input string
|
||||
}{Input: predInput})
|
||||
if err == nil {
|
||||
predInput = templatedInput
|
||||
log.Debug().Msgf("Template found, input modified to: %s", predInput)
|
||||
}
|
||||
|
||||
responses := make(chan OpenAIResponse)
|
||||
|
||||
go process(predInput, input, config, o.loader, responses)
|
||||
|
||||
c.Context().SetBodyStreamWriter(fasthttp.StreamWriter(func(w *bufio.Writer) {
|
||||
|
||||
for ev := range responses {
|
||||
var buf bytes.Buffer
|
||||
enc := json.NewEncoder(&buf)
|
||||
enc.Encode(ev)
|
||||
|
||||
log.Debug().Msgf("Sending chunk: %s", buf.String())
|
||||
fmt.Fprintf(w, "data: %v\n", buf.String())
|
||||
w.Flush()
|
||||
}
|
||||
|
||||
resp := &OpenAIResponse{
|
||||
Model: input.Model, // we have to return what the user sent here, due to OpenAI spec.
|
||||
Choices: []Choice{{FinishReason: "stop"}},
|
||||
}
|
||||
respData, _ := json.Marshal(resp)
|
||||
|
||||
w.WriteString(fmt.Sprintf("data: %s\n\n", respData))
|
||||
w.WriteString("data: [DONE]\n\n")
|
||||
w.Flush()
|
||||
}))
|
||||
return nil
|
||||
}
|
||||
|
||||
var result []Choice
|
||||
for _, i := range config.PromptStrings {
|
||||
// A model can have a "file.bin.tmpl" file associated with a prompt template prefix
|
||||
templatedInput, err := o.loader.TemplatePrefix(templateFile, struct {
|
||||
Input string
|
||||
}{Input: i})
|
||||
if err == nil {
|
||||
@@ -267,7 +251,7 @@ func completionEndpoint(cm ConfigMerger, debug bool, loader *model.ModelLoader,
|
||||
log.Debug().Msgf("Template found, input modified to: %s", i)
|
||||
}
|
||||
|
||||
r, err := ComputeChoices(i, input, config, loader, func(s string, c *[]Choice) {
|
||||
r, err := ComputeChoices(i, input, config, o, o.loader, func(s string, c *[]Choice) {
|
||||
*c = append(*c, Choice{Text: s})
|
||||
}, nil)
|
||||
if err != nil {
|
||||
@@ -291,9 +275,94 @@ func completionEndpoint(cm ConfigMerger, debug bool, loader *model.ModelLoader,
|
||||
}
|
||||
}
|
||||
|
||||
func chatEndpoint(cm ConfigMerger, debug bool, loader *model.ModelLoader, threads, ctx int, f16 bool) func(c *fiber.Ctx) error {
|
||||
// https://platform.openai.com/docs/api-reference/embeddings
|
||||
func embeddingsEndpoint(cm *ConfigMerger, o *Option) func(c *fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
config, input, err := readConfig(cm, c, loader, debug, threads, ctx, f16)
|
||||
model, input, err := readInput(c, o.loader, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
|
||||
config, input, err := readConfig(model, input, cm, o.loader, o.debug, o.threads, o.ctxSize, o.f16)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
|
||||
log.Debug().Msgf("Parameter Config: %+v", config)
|
||||
items := []Item{}
|
||||
|
||||
for i, s := range config.InputToken {
|
||||
// get the model function to call for the result
|
||||
embedFn, err := ModelEmbedding("", s, o.loader, *config, o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
embeddings, err := embedFn()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
items = append(items, Item{Embedding: embeddings, Index: i, Object: "embedding"})
|
||||
}
|
||||
|
||||
for i, s := range config.InputStrings {
|
||||
// get the model function to call for the result
|
||||
embedFn, err := ModelEmbedding(s, []int{}, o.loader, *config, o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
embeddings, err := embedFn()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
items = append(items, Item{Embedding: embeddings, Index: i, Object: "embedding"})
|
||||
}
|
||||
|
||||
resp := &OpenAIResponse{
|
||||
Model: input.Model, // we have to return what the user sent here, due to OpenAI spec.
|
||||
Data: items,
|
||||
Object: "list",
|
||||
}
|
||||
|
||||
jsonResult, _ := json.Marshal(resp)
|
||||
log.Debug().Msgf("Response: %s", jsonResult)
|
||||
|
||||
// Return the prediction in the response body
|
||||
return c.JSON(resp)
|
||||
}
|
||||
}
|
||||
|
||||
func chatEndpoint(cm *ConfigMerger, o *Option) func(c *fiber.Ctx) error {
|
||||
|
||||
process := func(s string, req *OpenAIRequest, config *Config, loader *model.ModelLoader, responses chan OpenAIResponse) {
|
||||
initialMessage := OpenAIResponse{
|
||||
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
|
||||
Choices: []Choice{{Delta: &Message{Role: "assistant"}}},
|
||||
Object: "chat.completion.chunk",
|
||||
}
|
||||
responses <- initialMessage
|
||||
|
||||
ComputeChoices(s, req, config, o, loader, func(s string, c *[]Choice) {}, func(s string) bool {
|
||||
resp := OpenAIResponse{
|
||||
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
|
||||
Choices: []Choice{{Delta: &Message{Content: s}}},
|
||||
Object: "chat.completion.chunk",
|
||||
}
|
||||
log.Debug().Msgf("Sending goroutine: %s", s)
|
||||
|
||||
responses <- resp
|
||||
return true
|
||||
})
|
||||
close(responses)
|
||||
}
|
||||
return func(c *fiber.Ctx) error {
|
||||
model, input, err := readInput(c, o.loader, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
|
||||
config, input, err := readConfig(model, input, cm, o.loader, o.debug, o.threads, o.ctxSize, o.f16)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
@@ -304,12 +373,14 @@ func chatEndpoint(cm ConfigMerger, debug bool, loader *model.ModelLoader, thread
|
||||
|
||||
mess := []string{}
|
||||
for _, i := range input.Messages {
|
||||
var content string
|
||||
r := config.Roles[i.Role]
|
||||
if r == "" {
|
||||
r = i.Role
|
||||
if r != "" {
|
||||
content = fmt.Sprint(r, " ", i.Content)
|
||||
} else {
|
||||
content = i.Content
|
||||
}
|
||||
|
||||
content := fmt.Sprint(r, " ", i.Content)
|
||||
mess = append(mess, content)
|
||||
}
|
||||
|
||||
@@ -332,7 +403,7 @@ func chatEndpoint(cm ConfigMerger, debug bool, loader *model.ModelLoader, thread
|
||||
}
|
||||
|
||||
// A model can have a "file.bin.tmpl" file associated with a prompt template prefix
|
||||
templatedInput, err := loader.TemplatePrefix(templateFile, struct {
|
||||
templatedInput, err := o.loader.TemplatePrefix(templateFile, struct {
|
||||
Input string
|
||||
}{Input: predInput})
|
||||
if err == nil {
|
||||
@@ -343,19 +414,7 @@ func chatEndpoint(cm ConfigMerger, debug bool, loader *model.ModelLoader, thread
|
||||
if input.Stream {
|
||||
responses := make(chan OpenAIResponse)
|
||||
|
||||
go func() {
|
||||
ComputeChoices(predInput, input, config, loader, func(s string, c *[]Choice) {}, func(s string) bool {
|
||||
resp := OpenAIResponse{
|
||||
Model: input.Model, // we have to return what the user sent here, due to OpenAI spec.
|
||||
Choices: []Choice{{Delta: &Message{Role: "assistant", Content: s}}},
|
||||
Object: "chat.completion.chunk",
|
||||
}
|
||||
|
||||
responses <- resp
|
||||
return true
|
||||
})
|
||||
close(responses)
|
||||
}()
|
||||
go process(predInput, input, config, o.loader, responses)
|
||||
|
||||
c.Context().SetBodyStreamWriter(fasthttp.StreamWriter(func(w *bufio.Writer) {
|
||||
|
||||
@@ -364,13 +423,11 @@ func chatEndpoint(cm ConfigMerger, debug bool, loader *model.ModelLoader, thread
|
||||
enc := json.NewEncoder(&buf)
|
||||
enc.Encode(ev)
|
||||
|
||||
fmt.Fprintf(w, "event: data\n\n")
|
||||
fmt.Fprintf(w, "data: %v\n\n", buf.String())
|
||||
log.Debug().Msgf("Sending chunk: %s", buf.String())
|
||||
fmt.Fprintf(w, "data: %v\n", buf.String())
|
||||
w.Flush()
|
||||
}
|
||||
|
||||
w.WriteString("event: data\n\n")
|
||||
resp := &OpenAIResponse{
|
||||
Model: input.Model, // we have to return what the user sent here, due to OpenAI spec.
|
||||
Choices: []Choice{{FinishReason: "stop"}},
|
||||
@@ -378,12 +435,13 @@ func chatEndpoint(cm ConfigMerger, debug bool, loader *model.ModelLoader, thread
|
||||
respData, _ := json.Marshal(resp)
|
||||
|
||||
w.WriteString(fmt.Sprintf("data: %s\n\n", respData))
|
||||
w.WriteString("data: [DONE]\n\n")
|
||||
w.Flush()
|
||||
}))
|
||||
return nil
|
||||
}
|
||||
|
||||
result, err := ComputeChoices(predInput, input, config, loader, func(s string, c *[]Choice) {
|
||||
result, err := ComputeChoices(predInput, input, config, o, o.loader, func(s string, c *[]Choice) {
|
||||
*c = append(*c, Choice{Message: &Message{Role: "assistant", Content: s}})
|
||||
}, nil)
|
||||
if err != nil {
|
||||
@@ -395,43 +453,54 @@ func chatEndpoint(cm ConfigMerger, debug bool, loader *model.ModelLoader, thread
|
||||
Choices: result,
|
||||
Object: "chat.completion",
|
||||
}
|
||||
respData, _ := json.Marshal(resp)
|
||||
log.Debug().Msgf("Response: %s", respData)
|
||||
|
||||
// Return the prediction in the response body
|
||||
return c.JSON(resp)
|
||||
}
|
||||
}
|
||||
|
||||
func editEndpoint(cm ConfigMerger, debug bool, loader *model.ModelLoader, threads, ctx int, f16 bool) func(c *fiber.Ctx) error {
|
||||
func editEndpoint(cm *ConfigMerger, o *Option) func(c *fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
config, input, err := readConfig(cm, c, loader, debug, threads, ctx, f16)
|
||||
model, input, err := readInput(c, o.loader, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
|
||||
config, input, err := readConfig(model, input, cm, o.loader, o.debug, o.threads, o.ctxSize, o.f16)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
|
||||
log.Debug().Msgf("Parameter Config: %+v", config)
|
||||
|
||||
predInput := input.Input
|
||||
templateFile := config.Model
|
||||
|
||||
if config.TemplateConfig.Edit != "" {
|
||||
templateFile = config.TemplateConfig.Edit
|
||||
}
|
||||
|
||||
// A model can have a "file.bin.tmpl" file associated with a prompt template prefix
|
||||
templatedInput, err := loader.TemplatePrefix(templateFile, struct {
|
||||
Input string
|
||||
Instruction string
|
||||
}{Input: predInput, Instruction: input.Instruction})
|
||||
if err == nil {
|
||||
predInput = templatedInput
|
||||
log.Debug().Msgf("Template found, input modified to: %s", predInput)
|
||||
}
|
||||
var result []Choice
|
||||
for _, i := range config.InputStrings {
|
||||
// A model can have a "file.bin.tmpl" file associated with a prompt template prefix
|
||||
templatedInput, err := o.loader.TemplatePrefix(templateFile, struct {
|
||||
Input string
|
||||
Instruction string
|
||||
}{Input: i})
|
||||
if err == nil {
|
||||
i = templatedInput
|
||||
log.Debug().Msgf("Template found, input modified to: %s", i)
|
||||
}
|
||||
|
||||
result, err := ComputeChoices(predInput, input, config, loader, func(s string, c *[]Choice) {
|
||||
*c = append(*c, Choice{Text: s})
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
r, err := ComputeChoices(i, input, config, o, o.loader, func(s string, c *[]Choice) {
|
||||
*c = append(*c, Choice{Text: s})
|
||||
}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result = append(result, r...)
|
||||
}
|
||||
|
||||
resp := &OpenAIResponse{
|
||||
@@ -448,7 +517,214 @@ func editEndpoint(cm ConfigMerger, debug bool, loader *model.ModelLoader, thread
|
||||
}
|
||||
}
|
||||
|
||||
func listModels(loader *model.ModelLoader, cm ConfigMerger) func(ctx *fiber.Ctx) error {
|
||||
// https://platform.openai.com/docs/api-reference/images/create
|
||||
|
||||
/*
|
||||
*
|
||||
|
||||
curl http://localhost:8080/v1/images/generations \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"prompt": "A cute baby sea otter",
|
||||
"n": 1,
|
||||
"size": "512x512"
|
||||
}'
|
||||
|
||||
*
|
||||
*/
|
||||
func imageEndpoint(cm *ConfigMerger, o *Option) func(c *fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
m, input, err := readInput(c, o.loader, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
|
||||
if m == "" {
|
||||
m = model.StableDiffusionBackend
|
||||
}
|
||||
log.Debug().Msgf("Loading model: %+v", m)
|
||||
|
||||
config, input, err := readConfig(m, input, cm, o.loader, o.debug, 0, 0, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
|
||||
log.Debug().Msgf("Parameter Config: %+v", config)
|
||||
|
||||
// XXX: Only stablediffusion is supported for now
|
||||
if config.Backend == "" {
|
||||
config.Backend = model.StableDiffusionBackend
|
||||
}
|
||||
|
||||
sizeParts := strings.Split(input.Size, "x")
|
||||
if len(sizeParts) != 2 {
|
||||
return fmt.Errorf("Invalid value for 'size'")
|
||||
}
|
||||
width, err := strconv.Atoi(sizeParts[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid value for 'size'")
|
||||
}
|
||||
height, err := strconv.Atoi(sizeParts[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid value for 'size'")
|
||||
}
|
||||
|
||||
b64JSON := false
|
||||
if input.ResponseFormat == "b64_json" {
|
||||
b64JSON = true
|
||||
}
|
||||
|
||||
var result []Item
|
||||
for _, i := range config.PromptStrings {
|
||||
n := input.N
|
||||
if input.N == 0 {
|
||||
n = 1
|
||||
}
|
||||
for j := 0; j < n; j++ {
|
||||
prompts := strings.Split(i, "|")
|
||||
positive_prompt := prompts[0]
|
||||
negative_prompt := ""
|
||||
if len(prompts) > 1 {
|
||||
negative_prompt = prompts[1]
|
||||
}
|
||||
|
||||
mode := 0
|
||||
step := 15
|
||||
|
||||
if input.Mode != 0 {
|
||||
mode = input.Mode
|
||||
}
|
||||
|
||||
if input.Step != 0 {
|
||||
step = input.Step
|
||||
}
|
||||
|
||||
tempDir := ""
|
||||
if !b64JSON {
|
||||
tempDir = o.imageDir
|
||||
}
|
||||
// Create a temporary file
|
||||
outputFile, err := ioutil.TempFile(tempDir, "b64")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
outputFile.Close()
|
||||
output := outputFile.Name() + ".png"
|
||||
// Rename the temporary file
|
||||
err = os.Rename(outputFile.Name(), output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
baseURL := c.BaseURL()
|
||||
|
||||
fn, err := ImageGeneration(height, width, mode, step, input.Seed, positive_prompt, negative_prompt, output, o.loader, *config, o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := fn(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
item := &Item{}
|
||||
|
||||
if b64JSON {
|
||||
defer os.RemoveAll(output)
|
||||
data, err := os.ReadFile(output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
item.B64JSON = base64.StdEncoding.EncodeToString(data)
|
||||
} else {
|
||||
base := filepath.Base(output)
|
||||
item.URL = baseURL + "/generated-images/" + base
|
||||
}
|
||||
|
||||
result = append(result, *item)
|
||||
}
|
||||
}
|
||||
|
||||
resp := &OpenAIResponse{
|
||||
Data: result,
|
||||
}
|
||||
|
||||
jsonResult, _ := json.Marshal(resp)
|
||||
log.Debug().Msgf("Response: %s", jsonResult)
|
||||
|
||||
// Return the prediction in the response body
|
||||
return c.JSON(resp)
|
||||
}
|
||||
}
|
||||
|
||||
// https://platform.openai.com/docs/api-reference/audio/create
|
||||
func transcriptEndpoint(cm *ConfigMerger, o *Option) func(c *fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
m, input, err := readInput(c, o.loader, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
|
||||
config, input, err := readConfig(m, input, cm, o.loader, o.debug, o.threads, o.ctxSize, o.f16)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
// retrieve the file data from the request
|
||||
file, err := c.FormFile("file")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := file.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
dir, err := os.MkdirTemp("", "whisper")
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
dst := filepath.Join(dir, path.Base(file.Filename))
|
||||
dstFile, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(dstFile, f); err != nil {
|
||||
log.Debug().Msgf("Audio file copying error %+v - %+v - err %+v", file.Filename, dst, err)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debug().Msgf("Audio file copied to: %+v", dst)
|
||||
|
||||
whisperModel, err := o.loader.BackendLoader(model.WhisperBackend, config.Model, []llama.ModelOption{}, uint32(config.Threads), o.assetsDestination)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if whisperModel == nil {
|
||||
return fmt.Errorf("could not load whisper model")
|
||||
}
|
||||
|
||||
w, ok := whisperModel.(whisper.Model)
|
||||
if !ok {
|
||||
return fmt.Errorf("loader returned non-whisper object")
|
||||
}
|
||||
|
||||
tr, err := whisperutil.Transcript(w, dst, input.Language, uint(config.Threads))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debug().Msgf("Trascribed: %+v", tr)
|
||||
// TODO: handle different outputs here
|
||||
return c.Status(http.StatusOK).JSON(fiber.Map{"text": tr})
|
||||
}
|
||||
}
|
||||
|
||||
func listModels(loader *model.ModelLoader, cm *ConfigMerger) func(ctx *fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
models, err := loader.ListModels()
|
||||
if err != nil {
|
||||
@@ -462,7 +738,7 @@ func listModels(loader *model.ModelLoader, cm ConfigMerger) func(ctx *fiber.Ctx)
|
||||
dataModels = append(dataModels, OpenAIModel{ID: m, Object: "model"})
|
||||
}
|
||||
|
||||
for k := range cm {
|
||||
for _, k := range cm.ListConfigs() {
|
||||
if _, exists := mm[k]; !exists {
|
||||
dataModels = append(dataModels, OpenAIModel{ID: k, Object: "model"})
|
||||
}
|
||||
|
||||
137
api/options.go
Normal file
137
api/options.go
Normal file
@@ -0,0 +1,137 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"embed"
|
||||
|
||||
model "github.com/go-skynet/LocalAI/pkg/model"
|
||||
)
|
||||
|
||||
type Option struct {
|
||||
context context.Context
|
||||
configFile string
|
||||
loader *model.ModelLoader
|
||||
uploadLimitMB, threads, ctxSize int
|
||||
f16 bool
|
||||
debug, disableMessage bool
|
||||
imageDir string
|
||||
cors bool
|
||||
preloadJSONModels string
|
||||
preloadModelsFromPath string
|
||||
corsAllowOrigins string
|
||||
|
||||
backendAssets embed.FS
|
||||
assetsDestination string
|
||||
}
|
||||
|
||||
type AppOption func(*Option)
|
||||
|
||||
func newOptions(o ...AppOption) *Option {
|
||||
opt := &Option{
|
||||
context: context.Background(),
|
||||
uploadLimitMB: 15,
|
||||
threads: 1,
|
||||
ctxSize: 512,
|
||||
debug: true,
|
||||
disableMessage: true,
|
||||
}
|
||||
for _, oo := range o {
|
||||
oo(opt)
|
||||
}
|
||||
return opt
|
||||
}
|
||||
|
||||
func WithCors(b bool) AppOption {
|
||||
return func(o *Option) {
|
||||
o.cors = b
|
||||
}
|
||||
}
|
||||
|
||||
func WithCorsAllowOrigins(b string) AppOption {
|
||||
return func(o *Option) {
|
||||
o.corsAllowOrigins = b
|
||||
}
|
||||
}
|
||||
|
||||
func WithBackendAssetsOutput(out string) AppOption {
|
||||
return func(o *Option) {
|
||||
o.assetsDestination = out
|
||||
}
|
||||
}
|
||||
|
||||
func WithBackendAssets(f embed.FS) AppOption {
|
||||
return func(o *Option) {
|
||||
o.backendAssets = f
|
||||
}
|
||||
}
|
||||
|
||||
func WithContext(ctx context.Context) AppOption {
|
||||
return func(o *Option) {
|
||||
o.context = ctx
|
||||
}
|
||||
}
|
||||
|
||||
func WithYAMLConfigPreload(configFile string) AppOption {
|
||||
return func(o *Option) {
|
||||
o.preloadModelsFromPath = configFile
|
||||
}
|
||||
}
|
||||
|
||||
func WithJSONStringPreload(configFile string) AppOption {
|
||||
return func(o *Option) {
|
||||
o.preloadJSONModels = configFile
|
||||
}
|
||||
}
|
||||
func WithConfigFile(configFile string) AppOption {
|
||||
return func(o *Option) {
|
||||
o.configFile = configFile
|
||||
}
|
||||
}
|
||||
|
||||
func WithModelLoader(loader *model.ModelLoader) AppOption {
|
||||
return func(o *Option) {
|
||||
o.loader = loader
|
||||
}
|
||||
}
|
||||
|
||||
func WithUploadLimitMB(limit int) AppOption {
|
||||
return func(o *Option) {
|
||||
o.uploadLimitMB = limit
|
||||
}
|
||||
}
|
||||
|
||||
func WithThreads(threads int) AppOption {
|
||||
return func(o *Option) {
|
||||
o.threads = threads
|
||||
}
|
||||
}
|
||||
|
||||
func WithContextSize(ctxSize int) AppOption {
|
||||
return func(o *Option) {
|
||||
o.ctxSize = ctxSize
|
||||
}
|
||||
}
|
||||
|
||||
func WithF16(f16 bool) AppOption {
|
||||
return func(o *Option) {
|
||||
o.f16 = f16
|
||||
}
|
||||
}
|
||||
|
||||
func WithDebug(debug bool) AppOption {
|
||||
return func(o *Option) {
|
||||
o.debug = debug
|
||||
}
|
||||
}
|
||||
|
||||
func WithDisableMessage(disableMessage bool) AppOption {
|
||||
return func(o *Option) {
|
||||
o.disableMessage = disableMessage
|
||||
}
|
||||
}
|
||||
|
||||
func WithImageDir(imageDir string) AppOption {
|
||||
return func(o *Option) {
|
||||
o.imageDir = imageDir
|
||||
}
|
||||
}
|
||||
@@ -2,107 +2,28 @@ package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/donomii/go-rwkv.cpp"
|
||||
"github.com/go-skynet/LocalAI/pkg/langchain"
|
||||
model "github.com/go-skynet/LocalAI/pkg/model"
|
||||
gpt2 "github.com/go-skynet/go-gpt2.cpp"
|
||||
gptj "github.com/go-skynet/go-gpt4all-j.cpp"
|
||||
"github.com/go-skynet/LocalAI/pkg/stablediffusion"
|
||||
"github.com/go-skynet/bloomz.cpp"
|
||||
bert "github.com/go-skynet/go-bert.cpp"
|
||||
transformers "github.com/go-skynet/go-ggml-transformers.cpp"
|
||||
llama "github.com/go-skynet/go-llama.cpp"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
gpt4all "github.com/nomic-ai/gpt4all/gpt4all-bindings/golang"
|
||||
)
|
||||
|
||||
const tokenizerSuffix = ".tokenizer.json"
|
||||
|
||||
// mutex still needed, see: https://github.com/ggerganov/llama.cpp/discussions/784
|
||||
var mutexMap sync.Mutex
|
||||
var mutexes map[string]*sync.Mutex = make(map[string]*sync.Mutex)
|
||||
|
||||
var loadedModels map[string]interface{} = map[string]interface{}{}
|
||||
var muModels sync.Mutex
|
||||
|
||||
func backendLoader(backendString string, loader *model.ModelLoader, modelFile string, llamaOpts []llama.ModelOption, threads uint32) (model interface{}, err error) {
|
||||
switch strings.ToLower(backendString) {
|
||||
case "llama":
|
||||
return loader.LoadLLaMAModel(modelFile, llamaOpts...)
|
||||
case "stablelm":
|
||||
return loader.LoadStableLMModel(modelFile)
|
||||
case "gpt2":
|
||||
return loader.LoadGPT2Model(modelFile)
|
||||
case "gptj":
|
||||
return loader.LoadGPTJModel(modelFile)
|
||||
case "rwkv":
|
||||
return loader.LoadRWKV(modelFile, modelFile+tokenizerSuffix, threads)
|
||||
default:
|
||||
return nil, fmt.Errorf("backend unsupported: %s", backendString)
|
||||
}
|
||||
}
|
||||
|
||||
func greedyLoader(loader *model.ModelLoader, modelFile string, llamaOpts []llama.ModelOption, threads uint32) (model interface{}, err error) {
|
||||
updateModels := func(model interface{}) {
|
||||
muModels.Lock()
|
||||
defer muModels.Unlock()
|
||||
loadedModels[modelFile] = model
|
||||
}
|
||||
|
||||
muModels.Lock()
|
||||
m, exists := loadedModels[modelFile]
|
||||
if exists {
|
||||
muModels.Unlock()
|
||||
return m, nil
|
||||
}
|
||||
muModels.Unlock()
|
||||
|
||||
model, modelerr := loader.LoadLLaMAModel(modelFile, llamaOpts...)
|
||||
if modelerr == nil {
|
||||
updateModels(model)
|
||||
return model, nil
|
||||
} else {
|
||||
err = multierror.Append(err, modelerr)
|
||||
}
|
||||
|
||||
model, modelerr = loader.LoadGPTJModel(modelFile)
|
||||
if modelerr == nil {
|
||||
updateModels(model)
|
||||
return model, nil
|
||||
} else {
|
||||
err = multierror.Append(err, modelerr)
|
||||
}
|
||||
|
||||
model, modelerr = loader.LoadGPT2Model(modelFile)
|
||||
if modelerr == nil {
|
||||
updateModels(model)
|
||||
return model, nil
|
||||
} else {
|
||||
err = multierror.Append(err, modelerr)
|
||||
}
|
||||
|
||||
model, modelerr = loader.LoadStableLMModel(modelFile)
|
||||
if modelerr == nil {
|
||||
updateModels(model)
|
||||
return model, nil
|
||||
} else {
|
||||
err = multierror.Append(err, modelerr)
|
||||
}
|
||||
|
||||
model, modelerr = loader.LoadRWKV(modelFile, modelFile+tokenizerSuffix, threads)
|
||||
if modelerr == nil {
|
||||
updateModels(model)
|
||||
return model, nil
|
||||
} else {
|
||||
err = multierror.Append(err, modelerr)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("could not load model - all backends returned error: %s", err.Error())
|
||||
}
|
||||
|
||||
func ModelInference(s string, loader *model.ModelLoader, c Config, tokenCallback func(string) bool) (func() (string, error), error) {
|
||||
supportStreams := false
|
||||
modelFile := c.Model
|
||||
|
||||
// Try to load the model
|
||||
func defaultLLamaOpts(c Config) []llama.ModelOption {
|
||||
llamaOpts := []llama.ModelOption{}
|
||||
if c.ContextSize != 0 {
|
||||
llamaOpts = append(llamaOpts, llama.SetContext(c.ContextSize))
|
||||
@@ -110,13 +31,234 @@ func ModelInference(s string, loader *model.ModelLoader, c Config, tokenCallback
|
||||
if c.F16 {
|
||||
llamaOpts = append(llamaOpts, llama.EnableF16Memory)
|
||||
}
|
||||
if c.Embeddings {
|
||||
llamaOpts = append(llamaOpts, llama.EnableEmbeddings)
|
||||
}
|
||||
|
||||
if c.NGPULayers != 0 {
|
||||
llamaOpts = append(llamaOpts, llama.SetGPULayers(c.NGPULayers))
|
||||
}
|
||||
|
||||
llamaOpts = append(llamaOpts, llama.SetMMap(c.MMap))
|
||||
llamaOpts = append(llamaOpts, llama.SetMainGPU(c.MainGPU))
|
||||
llamaOpts = append(llamaOpts, llama.SetTensorSplit(c.TensorSplit))
|
||||
if c.Batch != 0 {
|
||||
llamaOpts = append(llamaOpts, llama.SetNBatch(c.Batch))
|
||||
} else {
|
||||
llamaOpts = append(llamaOpts, llama.SetNBatch(512))
|
||||
}
|
||||
|
||||
if c.LowVRAM {
|
||||
llamaOpts = append(llamaOpts, llama.EnabelLowVRAM)
|
||||
}
|
||||
|
||||
return llamaOpts
|
||||
}
|
||||
|
||||
func ImageGeneration(height, width, mode, step, seed int, positive_prompt, negative_prompt, dst string, loader *model.ModelLoader, c Config, o *Option) (func() error, error) {
|
||||
if c.Backend != model.StableDiffusionBackend {
|
||||
return nil, fmt.Errorf("endpoint only working with stablediffusion models")
|
||||
}
|
||||
inferenceModel, err := loader.BackendLoader(c.Backend, c.ImageGenerationAssets, []llama.ModelOption{}, uint32(c.Threads), o.assetsDestination)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var fn func() error
|
||||
switch model := inferenceModel.(type) {
|
||||
case *stablediffusion.StableDiffusion:
|
||||
fn = func() error {
|
||||
return model.GenerateImage(height, width, mode, step, seed, positive_prompt, negative_prompt, dst)
|
||||
}
|
||||
|
||||
default:
|
||||
fn = func() error {
|
||||
return fmt.Errorf("creation of images not supported by the backend")
|
||||
}
|
||||
}
|
||||
|
||||
return func() error {
|
||||
// This is still needed, see: https://github.com/ggerganov/llama.cpp/discussions/784
|
||||
mutexMap.Lock()
|
||||
l, ok := mutexes[c.Backend]
|
||||
if !ok {
|
||||
m := &sync.Mutex{}
|
||||
mutexes[c.Backend] = m
|
||||
l = m
|
||||
}
|
||||
mutexMap.Unlock()
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
|
||||
return fn()
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ModelEmbedding(s string, tokens []int, loader *model.ModelLoader, c Config, o *Option) (func() ([]float32, error), error) {
|
||||
if !c.Embeddings {
|
||||
return nil, fmt.Errorf("endpoint disabled for this model by API configuration")
|
||||
}
|
||||
|
||||
modelFile := c.Model
|
||||
|
||||
llamaOpts := defaultLLamaOpts(c)
|
||||
|
||||
var inferenceModel interface{}
|
||||
var err error
|
||||
if c.Backend == "" {
|
||||
inferenceModel, err = greedyLoader(loader, modelFile, llamaOpts, uint32(c.Threads))
|
||||
inferenceModel, err = loader.GreedyLoader(modelFile, llamaOpts, uint32(c.Threads), o.assetsDestination)
|
||||
} else {
|
||||
inferenceModel, err = backendLoader(c.Backend, loader, modelFile, llamaOpts, uint32(c.Threads))
|
||||
inferenceModel, err = loader.BackendLoader(c.Backend, modelFile, llamaOpts, uint32(c.Threads), o.assetsDestination)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var fn func() ([]float32, error)
|
||||
switch model := inferenceModel.(type) {
|
||||
case *llama.LLama:
|
||||
fn = func() ([]float32, error) {
|
||||
predictOptions := buildLLamaPredictOptions(c, loader.ModelPath)
|
||||
if len(tokens) > 0 {
|
||||
return model.TokenEmbeddings(tokens, predictOptions...)
|
||||
}
|
||||
return model.Embeddings(s, predictOptions...)
|
||||
}
|
||||
// bert embeddings
|
||||
case *bert.Bert:
|
||||
fn = func() ([]float32, error) {
|
||||
if len(tokens) > 0 {
|
||||
return model.TokenEmbeddings(tokens, bert.SetThreads(c.Threads))
|
||||
}
|
||||
return model.Embeddings(s, bert.SetThreads(c.Threads))
|
||||
}
|
||||
default:
|
||||
fn = func() ([]float32, error) {
|
||||
return nil, fmt.Errorf("embeddings not supported by the backend")
|
||||
}
|
||||
}
|
||||
|
||||
return func() ([]float32, error) {
|
||||
// This is still needed, see: https://github.com/ggerganov/llama.cpp/discussions/784
|
||||
mutexMap.Lock()
|
||||
l, ok := mutexes[modelFile]
|
||||
if !ok {
|
||||
m := &sync.Mutex{}
|
||||
mutexes[modelFile] = m
|
||||
l = m
|
||||
}
|
||||
mutexMap.Unlock()
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
|
||||
embeds, err := fn()
|
||||
if err != nil {
|
||||
return embeds, err
|
||||
}
|
||||
// Remove trailing 0s
|
||||
for i := len(embeds) - 1; i >= 0; i-- {
|
||||
if embeds[i] == 0.0 {
|
||||
embeds = embeds[:i]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return embeds, nil
|
||||
}, nil
|
||||
}
|
||||
|
||||
func buildLLamaPredictOptions(c Config, modelPath string) []llama.PredictOption {
|
||||
// Generate the prediction using the language model
|
||||
predictOptions := []llama.PredictOption{
|
||||
llama.SetTemperature(c.Temperature),
|
||||
llama.SetTopP(c.TopP),
|
||||
llama.SetTopK(c.TopK),
|
||||
llama.SetTokens(c.Maxtokens),
|
||||
llama.SetThreads(c.Threads),
|
||||
}
|
||||
|
||||
if c.PromptCacheAll {
|
||||
predictOptions = append(predictOptions, llama.EnablePromptCacheAll)
|
||||
}
|
||||
|
||||
if c.PromptCacheRO {
|
||||
predictOptions = append(predictOptions, llama.EnablePromptCacheRO)
|
||||
}
|
||||
|
||||
if c.PromptCachePath != "" {
|
||||
// Create parent directory
|
||||
p := filepath.Join(modelPath, c.PromptCachePath)
|
||||
os.MkdirAll(filepath.Dir(p), 0755)
|
||||
predictOptions = append(predictOptions, llama.SetPathPromptCache(p))
|
||||
}
|
||||
|
||||
if c.Mirostat != 0 {
|
||||
predictOptions = append(predictOptions, llama.SetMirostat(c.Mirostat))
|
||||
}
|
||||
|
||||
if c.MirostatETA != 0 {
|
||||
predictOptions = append(predictOptions, llama.SetMirostatETA(c.MirostatETA))
|
||||
}
|
||||
|
||||
if c.MirostatTAU != 0 {
|
||||
predictOptions = append(predictOptions, llama.SetMirostatTAU(c.MirostatTAU))
|
||||
}
|
||||
|
||||
if c.Debug {
|
||||
predictOptions = append(predictOptions, llama.Debug)
|
||||
}
|
||||
|
||||
predictOptions = append(predictOptions, llama.SetStopWords(c.StopWords...))
|
||||
|
||||
if c.RepeatPenalty != 0 {
|
||||
predictOptions = append(predictOptions, llama.SetPenalty(c.RepeatPenalty))
|
||||
}
|
||||
|
||||
if c.Keep != 0 {
|
||||
predictOptions = append(predictOptions, llama.SetNKeep(c.Keep))
|
||||
}
|
||||
|
||||
if c.Batch != 0 {
|
||||
predictOptions = append(predictOptions, llama.SetBatch(c.Batch))
|
||||
}
|
||||
|
||||
if c.F16 {
|
||||
predictOptions = append(predictOptions, llama.EnableF16KV)
|
||||
}
|
||||
|
||||
if c.IgnoreEOS {
|
||||
predictOptions = append(predictOptions, llama.IgnoreEOS)
|
||||
}
|
||||
|
||||
if c.Seed != 0 {
|
||||
predictOptions = append(predictOptions, llama.SetSeed(c.Seed))
|
||||
}
|
||||
|
||||
//predictOptions = append(predictOptions, llama.SetLogitBias(c.Seed))
|
||||
|
||||
predictOptions = append(predictOptions, llama.SetFrequencyPenalty(c.FrequencyPenalty))
|
||||
predictOptions = append(predictOptions, llama.SetMlock(c.MMlock))
|
||||
predictOptions = append(predictOptions, llama.SetMemoryMap(c.MMap))
|
||||
predictOptions = append(predictOptions, llama.SetPredictionMainGPU(c.MainGPU))
|
||||
predictOptions = append(predictOptions, llama.SetPredictionTensorSplit(c.TensorSplit))
|
||||
predictOptions = append(predictOptions, llama.SetTailFreeSamplingZ(c.TFZ))
|
||||
predictOptions = append(predictOptions, llama.SetTypicalP(c.TypicalP))
|
||||
|
||||
return predictOptions
|
||||
}
|
||||
|
||||
func ModelInference(s string, loader *model.ModelLoader, c Config, o *Option, tokenCallback func(string) bool) (func() (string, error), error) {
|
||||
supportStreams := false
|
||||
modelFile := c.Model
|
||||
|
||||
llamaOpts := defaultLLamaOpts(c)
|
||||
|
||||
var inferenceModel interface{}
|
||||
var err error
|
||||
if c.Backend == "" {
|
||||
inferenceModel, err = loader.GreedyLoader(modelFile, llamaOpts, uint32(c.Threads), o.assetsDestination)
|
||||
} else {
|
||||
inferenceModel, err = loader.BackendLoader(c.Backend, modelFile, llamaOpts, uint32(c.Threads), o.assetsDestination)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -129,33 +271,36 @@ func ModelInference(s string, loader *model.ModelLoader, c Config, tokenCallback
|
||||
supportStreams = true
|
||||
|
||||
fn = func() (string, error) {
|
||||
//model.ProcessInput("You are a chatbot that is very good at chatting. blah blah blah")
|
||||
stopWord := "\n"
|
||||
if len(c.StopWords) > 0 {
|
||||
stopWord = c.StopWords[0]
|
||||
}
|
||||
|
||||
if err := model.ProcessInput(s); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
response := model.GenerateResponse(c.Maxtokens, stopWord, float32(c.Temperature), float32(c.TopP), tokenCallback)
|
||||
|
||||
return response, nil
|
||||
}
|
||||
case *gpt2.StableLM:
|
||||
case *transformers.GPTNeoX:
|
||||
fn = func() (string, error) {
|
||||
// Generate the prediction using the language model
|
||||
predictOptions := []gpt2.PredictOption{
|
||||
gpt2.SetTemperature(c.Temperature),
|
||||
gpt2.SetTopP(c.TopP),
|
||||
gpt2.SetTopK(c.TopK),
|
||||
gpt2.SetTokens(c.Maxtokens),
|
||||
gpt2.SetThreads(c.Threads),
|
||||
predictOptions := []transformers.PredictOption{
|
||||
transformers.SetTemperature(c.Temperature),
|
||||
transformers.SetTopP(c.TopP),
|
||||
transformers.SetTopK(c.TopK),
|
||||
transformers.SetTokens(c.Maxtokens),
|
||||
transformers.SetThreads(c.Threads),
|
||||
}
|
||||
|
||||
if c.Batch != 0 {
|
||||
predictOptions = append(predictOptions, gpt2.SetBatch(c.Batch))
|
||||
predictOptions = append(predictOptions, transformers.SetBatch(c.Batch))
|
||||
}
|
||||
|
||||
if c.Seed != 0 {
|
||||
predictOptions = append(predictOptions, gpt2.SetSeed(c.Seed))
|
||||
predictOptions = append(predictOptions, transformers.SetSeed(c.Seed))
|
||||
}
|
||||
|
||||
return model.Predict(
|
||||
@@ -163,23 +308,23 @@ func ModelInference(s string, loader *model.ModelLoader, c Config, tokenCallback
|
||||
predictOptions...,
|
||||
)
|
||||
}
|
||||
case *gpt2.GPT2:
|
||||
case *transformers.Replit:
|
||||
fn = func() (string, error) {
|
||||
// Generate the prediction using the language model
|
||||
predictOptions := []gpt2.PredictOption{
|
||||
gpt2.SetTemperature(c.Temperature),
|
||||
gpt2.SetTopP(c.TopP),
|
||||
gpt2.SetTopK(c.TopK),
|
||||
gpt2.SetTokens(c.Maxtokens),
|
||||
gpt2.SetThreads(c.Threads),
|
||||
predictOptions := []transformers.PredictOption{
|
||||
transformers.SetTemperature(c.Temperature),
|
||||
transformers.SetTopP(c.TopP),
|
||||
transformers.SetTopK(c.TopK),
|
||||
transformers.SetTokens(c.Maxtokens),
|
||||
transformers.SetThreads(c.Threads),
|
||||
}
|
||||
|
||||
if c.Batch != 0 {
|
||||
predictOptions = append(predictOptions, gpt2.SetBatch(c.Batch))
|
||||
predictOptions = append(predictOptions, transformers.SetBatch(c.Batch))
|
||||
}
|
||||
|
||||
if c.Seed != 0 {
|
||||
predictOptions = append(predictOptions, gpt2.SetSeed(c.Seed))
|
||||
predictOptions = append(predictOptions, transformers.SetSeed(c.Seed))
|
||||
}
|
||||
|
||||
return model.Predict(
|
||||
@@ -187,23 +332,23 @@ func ModelInference(s string, loader *model.ModelLoader, c Config, tokenCallback
|
||||
predictOptions...,
|
||||
)
|
||||
}
|
||||
case *gptj.GPTJ:
|
||||
case *transformers.Starcoder:
|
||||
fn = func() (string, error) {
|
||||
// Generate the prediction using the language model
|
||||
predictOptions := []gptj.PredictOption{
|
||||
gptj.SetTemperature(c.Temperature),
|
||||
gptj.SetTopP(c.TopP),
|
||||
gptj.SetTopK(c.TopK),
|
||||
gptj.SetTokens(c.Maxtokens),
|
||||
gptj.SetThreads(c.Threads),
|
||||
predictOptions := []transformers.PredictOption{
|
||||
transformers.SetTemperature(c.Temperature),
|
||||
transformers.SetTopP(c.TopP),
|
||||
transformers.SetTopK(c.TopK),
|
||||
transformers.SetTokens(c.Maxtokens),
|
||||
transformers.SetThreads(c.Threads),
|
||||
}
|
||||
|
||||
if c.Batch != 0 {
|
||||
predictOptions = append(predictOptions, gptj.SetBatch(c.Batch))
|
||||
predictOptions = append(predictOptions, transformers.SetBatch(c.Batch))
|
||||
}
|
||||
|
||||
if c.Seed != 0 {
|
||||
predictOptions = append(predictOptions, gptj.SetSeed(c.Seed))
|
||||
predictOptions = append(predictOptions, transformers.SetSeed(c.Seed))
|
||||
}
|
||||
|
||||
return model.Predict(
|
||||
@@ -211,6 +356,176 @@ func ModelInference(s string, loader *model.ModelLoader, c Config, tokenCallback
|
||||
predictOptions...,
|
||||
)
|
||||
}
|
||||
case *transformers.MPT:
|
||||
fn = func() (string, error) {
|
||||
// Generate the prediction using the language model
|
||||
predictOptions := []transformers.PredictOption{
|
||||
transformers.SetTemperature(c.Temperature),
|
||||
transformers.SetTopP(c.TopP),
|
||||
transformers.SetTopK(c.TopK),
|
||||
transformers.SetTokens(c.Maxtokens),
|
||||
transformers.SetThreads(c.Threads),
|
||||
}
|
||||
|
||||
if c.Batch != 0 {
|
||||
predictOptions = append(predictOptions, transformers.SetBatch(c.Batch))
|
||||
}
|
||||
|
||||
if c.Seed != 0 {
|
||||
predictOptions = append(predictOptions, transformers.SetSeed(c.Seed))
|
||||
}
|
||||
|
||||
return model.Predict(
|
||||
s,
|
||||
predictOptions...,
|
||||
)
|
||||
}
|
||||
case *bloomz.Bloomz:
|
||||
fn = func() (string, error) {
|
||||
// Generate the prediction using the language model
|
||||
predictOptions := []bloomz.PredictOption{
|
||||
bloomz.SetTemperature(c.Temperature),
|
||||
bloomz.SetTopP(c.TopP),
|
||||
bloomz.SetTopK(c.TopK),
|
||||
bloomz.SetTokens(c.Maxtokens),
|
||||
bloomz.SetThreads(c.Threads),
|
||||
}
|
||||
|
||||
if c.Seed != 0 {
|
||||
predictOptions = append(predictOptions, bloomz.SetSeed(c.Seed))
|
||||
}
|
||||
|
||||
return model.Predict(
|
||||
s,
|
||||
predictOptions...,
|
||||
)
|
||||
}
|
||||
case *transformers.Falcon:
|
||||
fn = func() (string, error) {
|
||||
// Generate the prediction using the language model
|
||||
predictOptions := []transformers.PredictOption{
|
||||
transformers.SetTemperature(c.Temperature),
|
||||
transformers.SetTopP(c.TopP),
|
||||
transformers.SetTopK(c.TopK),
|
||||
transformers.SetTokens(c.Maxtokens),
|
||||
transformers.SetThreads(c.Threads),
|
||||
}
|
||||
|
||||
if c.Batch != 0 {
|
||||
predictOptions = append(predictOptions, transformers.SetBatch(c.Batch))
|
||||
}
|
||||
|
||||
if c.Seed != 0 {
|
||||
predictOptions = append(predictOptions, transformers.SetSeed(c.Seed))
|
||||
}
|
||||
|
||||
return model.Predict(
|
||||
s,
|
||||
predictOptions...,
|
||||
)
|
||||
}
|
||||
case *transformers.GPTJ:
|
||||
fn = func() (string, error) {
|
||||
// Generate the prediction using the language model
|
||||
predictOptions := []transformers.PredictOption{
|
||||
transformers.SetTemperature(c.Temperature),
|
||||
transformers.SetTopP(c.TopP),
|
||||
transformers.SetTopK(c.TopK),
|
||||
transformers.SetTokens(c.Maxtokens),
|
||||
transformers.SetThreads(c.Threads),
|
||||
}
|
||||
|
||||
if c.Batch != 0 {
|
||||
predictOptions = append(predictOptions, transformers.SetBatch(c.Batch))
|
||||
}
|
||||
|
||||
if c.Seed != 0 {
|
||||
predictOptions = append(predictOptions, transformers.SetSeed(c.Seed))
|
||||
}
|
||||
|
||||
return model.Predict(
|
||||
s,
|
||||
predictOptions...,
|
||||
)
|
||||
}
|
||||
case *transformers.Dolly:
|
||||
fn = func() (string, error) {
|
||||
// Generate the prediction using the language model
|
||||
predictOptions := []transformers.PredictOption{
|
||||
transformers.SetTemperature(c.Temperature),
|
||||
transformers.SetTopP(c.TopP),
|
||||
transformers.SetTopK(c.TopK),
|
||||
transformers.SetTokens(c.Maxtokens),
|
||||
transformers.SetThreads(c.Threads),
|
||||
}
|
||||
|
||||
if c.Batch != 0 {
|
||||
predictOptions = append(predictOptions, transformers.SetBatch(c.Batch))
|
||||
}
|
||||
|
||||
if c.Seed != 0 {
|
||||
predictOptions = append(predictOptions, transformers.SetSeed(c.Seed))
|
||||
}
|
||||
|
||||
return model.Predict(
|
||||
s,
|
||||
predictOptions...,
|
||||
)
|
||||
}
|
||||
case *transformers.GPT2:
|
||||
fn = func() (string, error) {
|
||||
// Generate the prediction using the language model
|
||||
predictOptions := []transformers.PredictOption{
|
||||
transformers.SetTemperature(c.Temperature),
|
||||
transformers.SetTopP(c.TopP),
|
||||
transformers.SetTopK(c.TopK),
|
||||
transformers.SetTokens(c.Maxtokens),
|
||||
transformers.SetThreads(c.Threads),
|
||||
}
|
||||
|
||||
if c.Batch != 0 {
|
||||
predictOptions = append(predictOptions, transformers.SetBatch(c.Batch))
|
||||
}
|
||||
|
||||
if c.Seed != 0 {
|
||||
predictOptions = append(predictOptions, transformers.SetSeed(c.Seed))
|
||||
}
|
||||
|
||||
return model.Predict(
|
||||
s,
|
||||
predictOptions...,
|
||||
)
|
||||
}
|
||||
case *gpt4all.Model:
|
||||
supportStreams = true
|
||||
|
||||
fn = func() (string, error) {
|
||||
if tokenCallback != nil {
|
||||
model.SetTokenCallback(tokenCallback)
|
||||
}
|
||||
|
||||
// Generate the prediction using the language model
|
||||
predictOptions := []gpt4all.PredictOption{
|
||||
gpt4all.SetTemperature(c.Temperature),
|
||||
gpt4all.SetTopP(c.TopP),
|
||||
gpt4all.SetTopK(c.TopK),
|
||||
gpt4all.SetTokens(c.Maxtokens),
|
||||
}
|
||||
|
||||
if c.Batch != 0 {
|
||||
predictOptions = append(predictOptions, gpt4all.SetBatch(c.Batch))
|
||||
}
|
||||
|
||||
str, er := model.Predict(
|
||||
s,
|
||||
predictOptions...,
|
||||
)
|
||||
// Seems that if we don't free the callback explicitly we leave functions registered (that might try to send on closed channels)
|
||||
// For instance otherwise the API returns: {"error":{"code":500,"message":"send on closed channel","type":""}}
|
||||
// after a stream event has occurred
|
||||
model.SetTokenCallback(nil)
|
||||
return str, er
|
||||
}
|
||||
case *llama.LLama:
|
||||
supportStreams = true
|
||||
fn = func() (string, error) {
|
||||
@@ -219,49 +534,34 @@ func ModelInference(s string, loader *model.ModelLoader, c Config, tokenCallback
|
||||
model.SetTokenCallback(tokenCallback)
|
||||
}
|
||||
|
||||
// Generate the prediction using the language model
|
||||
predictOptions := []llama.PredictOption{
|
||||
llama.SetTemperature(c.Temperature),
|
||||
llama.SetTopP(c.TopP),
|
||||
llama.SetTopK(c.TopK),
|
||||
llama.SetTokens(c.Maxtokens),
|
||||
llama.SetThreads(c.Threads),
|
||||
}
|
||||
predictOptions := buildLLamaPredictOptions(c, loader.ModelPath)
|
||||
|
||||
if c.Debug {
|
||||
predictOptions = append(predictOptions, llama.Debug)
|
||||
}
|
||||
|
||||
predictOptions = append(predictOptions, llama.SetStopWords(c.StopWords...))
|
||||
|
||||
if c.RepeatPenalty != 0 {
|
||||
predictOptions = append(predictOptions, llama.SetPenalty(c.RepeatPenalty))
|
||||
}
|
||||
|
||||
if c.Keep != 0 {
|
||||
predictOptions = append(predictOptions, llama.SetNKeep(c.Keep))
|
||||
}
|
||||
|
||||
if c.Batch != 0 {
|
||||
predictOptions = append(predictOptions, llama.SetBatch(c.Batch))
|
||||
}
|
||||
|
||||
if c.F16 {
|
||||
predictOptions = append(predictOptions, llama.EnableF16KV)
|
||||
}
|
||||
|
||||
if c.IgnoreEOS {
|
||||
predictOptions = append(predictOptions, llama.IgnoreEOS)
|
||||
}
|
||||
|
||||
if c.Seed != 0 {
|
||||
predictOptions = append(predictOptions, llama.SetSeed(c.Seed))
|
||||
}
|
||||
|
||||
return model.Predict(
|
||||
str, er := model.Predict(
|
||||
s,
|
||||
predictOptions...,
|
||||
)
|
||||
// Seems that if we don't free the callback explicitly we leave functions registered (that might try to send on closed channels)
|
||||
// For instance otherwise the API returns: {"error":{"code":500,"message":"send on closed channel","type":""}}
|
||||
// after a stream event has occurred
|
||||
model.SetTokenCallback(nil)
|
||||
return str, er
|
||||
}
|
||||
case *langchain.HuggingFace:
|
||||
fn = func() (string, error) {
|
||||
|
||||
// Generate the prediction using the language model
|
||||
predictOptions := []langchain.PredictOption{
|
||||
langchain.SetModel(c.Model),
|
||||
langchain.SetMaxTokens(c.Maxtokens),
|
||||
langchain.SetTemperature(c.Temperature),
|
||||
langchain.SetStopWords(c.StopWords),
|
||||
}
|
||||
|
||||
pred, er := model.PredictHuggingFace(s, predictOptions...)
|
||||
if er != nil {
|
||||
return "", er
|
||||
}
|
||||
return pred.Completion, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -286,7 +586,7 @@ func ModelInference(s string, loader *model.ModelLoader, c Config, tokenCallback
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ComputeChoices(predInput string, input *OpenAIRequest, config *Config, loader *model.ModelLoader, cb func(string, *[]Choice), tokenCallback func(string) bool) ([]Choice, error) {
|
||||
func ComputeChoices(predInput string, input *OpenAIRequest, config *Config, o *Option, loader *model.ModelLoader, cb func(string, *[]Choice), tokenCallback func(string) bool) ([]Choice, error) {
|
||||
result := []Choice{}
|
||||
|
||||
n := input.N
|
||||
@@ -296,7 +596,7 @@ func ComputeChoices(predInput string, input *OpenAIRequest, config *Config, load
|
||||
}
|
||||
|
||||
// get the model function to call for the result
|
||||
predFunc, err := ModelInference(predInput, loader, *config, tokenCallback)
|
||||
predFunc, err := ModelInference(predInput, loader, *config, o, tokenCallback)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
6
assets.go
Normal file
6
assets.go
Normal file
@@ -0,0 +1,6 @@
|
||||
package main
|
||||
|
||||
import "embed"
|
||||
|
||||
//go:embed backend-assets/*
|
||||
var backendAssets embed.FS
|
||||
@@ -5,7 +5,7 @@ services:
|
||||
image: quay.io/go-skynet/local-ai:latest
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.dev
|
||||
dockerfile: Dockerfile
|
||||
ports:
|
||||
- 8080:8080
|
||||
env_file:
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
cd /build
|
||||
|
||||
make build
|
||||
if [ "$REBUILD" != "false" ]; then
|
||||
rm -rf ./local-ai
|
||||
make build
|
||||
fi
|
||||
|
||||
./local-ai "$@"
|
||||
@@ -2,12 +2,135 @@
|
||||
|
||||
Here is a list of projects that can easily be integrated with the LocalAI backend.
|
||||
|
||||
## Projects
|
||||
### Projects
|
||||
|
||||
- [chatbot-ui](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui/) (by [@mkellerman](https://github.com/mkellerman))
|
||||
- [discord-bot](https://github.com/go-skynet/LocalAI/tree/master/examples/discord-bot/) (by [@mudler](https://github.com/mudler))
|
||||
- [langchain](https://github.com/go-skynet/LocalAI/tree/master/examples/langchain/) (by [@dave-gray101](https://github.com/dave-gray101))
|
||||
- [slack-bot](https://github.com/go-skynet/LocalAI/tree/master/examples/slack-bot/) (by [@mudler](https://github.com/mudler))
|
||||
### AutoGPT
|
||||
|
||||
_by [@mudler](https://github.com/mudler)_
|
||||
|
||||
This example shows how to use AutoGPT with LocalAI.
|
||||
|
||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/autoGPT/)
|
||||
|
||||
### Chatbot-UI
|
||||
|
||||
_by [@mkellerman](https://github.com/mkellerman)_
|
||||
|
||||

|
||||
|
||||
This integration shows how to use LocalAI with [mckaywrigley/chatbot-ui](https://github.com/mckaywrigley/chatbot-ui).
|
||||
|
||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui/)
|
||||
|
||||
There is also a separate example to show how to manually setup a model: [example](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui-manual/)
|
||||
|
||||
### K8sGPT
|
||||
|
||||
_by [@mudler](https://github.com/mudler)_
|
||||
|
||||
This example show how to use LocalAI inside Kubernetes with [k8sgpt](https://k8sgpt.ai).
|
||||
|
||||

|
||||
|
||||
### Flowise
|
||||
|
||||
_by [@mudler](https://github.com/mudler)_
|
||||
|
||||
This example shows how to use [FlowiseAI/Flowise](https://github.com/FlowiseAI/Flowise) with LocalAI.
|
||||
|
||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/flowise/)
|
||||
|
||||
### Discord bot
|
||||
|
||||
_by [@mudler](https://github.com/mudler)_
|
||||
|
||||
Run a discord bot which lets you talk directly with a model
|
||||
|
||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/discord-bot/), or for a live demo you can talk with our bot in #random-bot in our discord server.
|
||||
|
||||
### Langchain
|
||||
|
||||
_by [@dave-gray101](https://github.com/dave-gray101)_
|
||||
|
||||
A ready to use example to show e2e how to integrate LocalAI with langchain
|
||||
|
||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/langchain/)
|
||||
|
||||
### Langchain Python
|
||||
|
||||
_by [@mudler](https://github.com/mudler)_
|
||||
|
||||
A ready to use example to show e2e how to integrate LocalAI with langchain
|
||||
|
||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/langchain-python/)
|
||||
|
||||
### LocalAI WebUI
|
||||
|
||||
_by [@dhruvgera](https://github.com/dhruvgera)_
|
||||
|
||||

|
||||
|
||||
A light, community-maintained web interface for LocalAI
|
||||
|
||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/localai-webui/)
|
||||
|
||||
### How to run rwkv models
|
||||
|
||||
_by [@mudler](https://github.com/mudler)_
|
||||
|
||||
A full example on how to run RWKV models with LocalAI
|
||||
|
||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/rwkv/)
|
||||
|
||||
### PrivateGPT
|
||||
|
||||
_by [@mudler](https://github.com/mudler)_
|
||||
|
||||
A full example on how to run PrivateGPT with LocalAI
|
||||
|
||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/privateGPT/)
|
||||
|
||||
### Slack bot
|
||||
|
||||
_by [@mudler](https://github.com/mudler)_
|
||||
|
||||
Run a slack bot which lets you talk directly with a model
|
||||
|
||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/slack-bot/)
|
||||
|
||||
### Question answering on documents with llama-index
|
||||
|
||||
_by [@mudler](https://github.com/mudler)_
|
||||
|
||||
Shows how to integrate with [Llama-Index](https://gpt-index.readthedocs.io/en/stable/getting_started/installation.html) to enable question answering on a set of documents.
|
||||
|
||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/query_data/)
|
||||
|
||||
### Question answering on documents with langchain and chroma
|
||||
|
||||
_by [@mudler](https://github.com/mudler)_
|
||||
|
||||
Shows how to integrate with `Langchain` and `Chroma` to enable question answering on a set of documents.
|
||||
|
||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/langchain-chroma/)
|
||||
|
||||
### Telegram bot
|
||||
|
||||
_by [@mudler](https://github.com/mudler)
|
||||
|
||||

|
||||
|
||||
Use LocalAI to power a Telegram bot assistant, with Image generation and audio support!
|
||||
|
||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/telegram-bot/)
|
||||
|
||||
### Template for Runpod.io
|
||||
|
||||
_by [@fHachenberg](https://github.com/fHachenberg)_
|
||||
|
||||
Allows to run any LocalAI-compatible model as a backend on the servers of https://runpod.io
|
||||
|
||||
[Check it out here](https://runpod.io/gsc?template=uv9mtqnrd0&ref=984wlcra)
|
||||
|
||||
## Want to contribute?
|
||||
|
||||
|
||||
5
examples/autoGPT/.env
Normal file
5
examples/autoGPT/.env
Normal file
@@ -0,0 +1,5 @@
|
||||
OPENAI_API_KEY=sk---anystringhere
|
||||
OPENAI_API_BASE=http://api:8080/v1
|
||||
# Models to preload at start
|
||||
# Here we configure gpt4all as gpt-3.5-turbo and bert as embeddings
|
||||
PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}, { "url": "github:go-skynet/model-gallery/bert-embeddings.yaml", "name": "text-embedding-ada-002"}]
|
||||
32
examples/autoGPT/README.md
Normal file
32
examples/autoGPT/README.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# AutoGPT
|
||||
|
||||
Example of integration with [AutoGPT](https://github.com/Significant-Gravitas/Auto-GPT).
|
||||
|
||||
## Run
|
||||
|
||||
```bash
|
||||
# Clone LocalAI
|
||||
git clone https://github.com/go-skynet/LocalAI
|
||||
|
||||
cd LocalAI/examples/autoGPT
|
||||
|
||||
docker-compose run --rm auto-gpt
|
||||
```
|
||||
|
||||
Note: The example automatically downloads the `gpt4all` model as it is under a permissive license. The GPT4All model does not seem to be enough to run AutoGPT. WizardLM-7b-uncensored seems to perform better (with `f16: true`).
|
||||
|
||||
See the `.env` configuration file to set a different model with the [model-gallery](https://github.com/go-skynet/model-gallery) by editing `PRELOAD_MODELS`.
|
||||
|
||||
## Without docker
|
||||
|
||||
Run AutoGPT with `OPENAI_API_BASE` pointing to the LocalAI endpoint. If you run it locally for instance:
|
||||
|
||||
```
|
||||
OPENAI_API_BASE=http://localhost:8080 python ...
|
||||
```
|
||||
|
||||
Note: you need a model named `gpt-3.5-turbo` and `text-embedding-ada-002`. You can preload those in LocalAI at start by setting in the env:
|
||||
|
||||
```
|
||||
PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}, { "url": "github:go-skynet/model-gallery/bert-embeddings.yaml", "name": "text-embedding-ada-002"}]
|
||||
```
|
||||
42
examples/autoGPT/docker-compose.yaml
Normal file
42
examples/autoGPT/docker-compose.yaml
Normal file
@@ -0,0 +1,42 @@
|
||||
version: "3.9"
|
||||
services:
|
||||
api:
|
||||
image: quay.io/go-skynet/local-ai:latest
|
||||
ports:
|
||||
- 8080:8080
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
- DEBUG=true
|
||||
- MODELS_PATH=/models
|
||||
volumes:
|
||||
- ./models:/models:cached
|
||||
command: ["/usr/bin/local-ai" ]
|
||||
auto-gpt:
|
||||
image: significantgravitas/auto-gpt
|
||||
depends_on:
|
||||
api:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_started
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
MEMORY_BACKEND: ${MEMORY_BACKEND:-redis}
|
||||
REDIS_HOST: ${REDIS_HOST:-redis}
|
||||
profiles: ["exclude-from-up"]
|
||||
volumes:
|
||||
- ./auto_gpt_workspace:/app/autogpt/auto_gpt_workspace
|
||||
- ./data:/app/data
|
||||
## allow auto-gpt to write logs to disk
|
||||
- ./logs:/app/logs
|
||||
## uncomment following lines if you want to make use of these files
|
||||
## you must have them existing in the same folder as this docker-compose.yml
|
||||
#- type: bind
|
||||
# source: ./azure.yaml
|
||||
# target: /app/azure.yaml
|
||||
#- type: bind
|
||||
# source: ./ai_settings.yaml
|
||||
# target: /app/ai_settings.yaml
|
||||
redis:
|
||||
image: "redis/redis-stack-server:latest"
|
||||
48
examples/chatbot-ui-manual/README.md
Normal file
48
examples/chatbot-ui-manual/README.md
Normal file
@@ -0,0 +1,48 @@
|
||||
# chatbot-ui
|
||||
|
||||
Example of integration with [mckaywrigley/chatbot-ui](https://github.com/mckaywrigley/chatbot-ui).
|
||||
|
||||

|
||||
|
||||
## Setup
|
||||
|
||||
```bash
|
||||
# Clone LocalAI
|
||||
git clone https://github.com/go-skynet/LocalAI
|
||||
|
||||
cd LocalAI/examples/chatbot-ui
|
||||
|
||||
# (optional) Checkout a specific LocalAI tag
|
||||
# git checkout -b build <TAG>
|
||||
|
||||
# Download gpt4all-j to models/
|
||||
wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j
|
||||
|
||||
# start with docker-compose
|
||||
docker-compose up -d --pull always
|
||||
# or you can build the images with:
|
||||
# docker-compose up -d --build
|
||||
```
|
||||
|
||||
## Pointing chatbot-ui to a separately managed LocalAI service
|
||||
|
||||
If you want to use the [chatbot-ui example](https://github.com/go-skynet/LocalAI/tree/master/examples/chatbot-ui) with an externally managed LocalAI service, you can alter the `docker-compose` file so that it looks like the below. You will notice the file is smaller, because we have removed the section that would normally start the LocalAI service. Take care to update the IP address (or FQDN) that the chatbot-ui service tries to access (marked `<<LOCALAI_IP>>` below):
|
||||
```
|
||||
version: '3.6'
|
||||
|
||||
services:
|
||||
chatgpt:
|
||||
image: ghcr.io/mckaywrigley/chatbot-ui:main
|
||||
ports:
|
||||
- 3000:3000
|
||||
environment:
|
||||
- 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX'
|
||||
- 'OPENAI_API_HOST=http://<<LOCALAI_IP>>:8080'
|
||||
```
|
||||
|
||||
Once you've edited the Dockerfile, you can start it with `docker compose up`, then browse to `http://localhost:3000`.
|
||||
|
||||
## Accessing chatbot-ui
|
||||
|
||||
Open http://localhost:3000 for the Web UI.
|
||||
|
||||
24
examples/chatbot-ui-manual/docker-compose.yaml
Normal file
24
examples/chatbot-ui-manual/docker-compose.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
version: '3.6'
|
||||
|
||||
services:
|
||||
api:
|
||||
image: quay.io/go-skynet/local-ai:latest
|
||||
build:
|
||||
context: ../../
|
||||
dockerfile: Dockerfile
|
||||
ports:
|
||||
- 8080:8080
|
||||
environment:
|
||||
- DEBUG=true
|
||||
- MODELS_PATH=/models
|
||||
volumes:
|
||||
- ./models:/models:cached
|
||||
command: ["/usr/bin/local-ai" ]
|
||||
|
||||
chatgpt:
|
||||
image: ghcr.io/mckaywrigley/chatbot-ui:main
|
||||
ports:
|
||||
- 3000:3000
|
||||
environment:
|
||||
- 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX'
|
||||
- 'OPENAI_API_HOST=http://api:8080'
|
||||
@@ -5,7 +5,6 @@ parameters:
|
||||
temperature: 0.2
|
||||
top_p: 0.7
|
||||
context_size: 1024
|
||||
threads: 14
|
||||
stopwords:
|
||||
- "HUMAN:"
|
||||
- "GPT:"
|
||||
@@ -4,22 +4,20 @@ Example of integration with [mckaywrigley/chatbot-ui](https://github.com/mckaywr
|
||||
|
||||

|
||||
|
||||
## Setup
|
||||
## Run
|
||||
|
||||
In this example LocalAI will download the gpt4all model and set it up as "gpt-3.5-turbo". See the `docker-compose.yaml`
|
||||
```bash
|
||||
# Clone LocalAI
|
||||
git clone https://github.com/go-skynet/LocalAI
|
||||
|
||||
cd LocalAI/examples/chatbot-ui
|
||||
|
||||
# (optional) Checkout a specific LocalAI tag
|
||||
# git checkout -b build <TAG>
|
||||
|
||||
# Download gpt4all-j to models/
|
||||
wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j
|
||||
|
||||
# start with docker-compose
|
||||
docker-compose up -d --build
|
||||
docker-compose up --pull always
|
||||
|
||||
# or you can build the images with:
|
||||
# docker-compose up -d --build
|
||||
```
|
||||
|
||||
## Pointing chatbot-ui to a separately managed LocalAI service
|
||||
|
||||
@@ -3,19 +3,32 @@ version: '3.6'
|
||||
services:
|
||||
api:
|
||||
image: quay.io/go-skynet/local-ai:latest
|
||||
# As initially LocalAI will download the models defined in PRELOAD_MODELS
|
||||
# you might need to tweak the healthcheck values here according to your network connection.
|
||||
# Here we give a timespan of 20m to download all the required files.
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"]
|
||||
interval: 1m
|
||||
timeout: 20m
|
||||
retries: 20
|
||||
build:
|
||||
context: ../../
|
||||
dockerfile: Dockerfile.dev
|
||||
dockerfile: Dockerfile
|
||||
ports:
|
||||
- 8080:8080
|
||||
environment:
|
||||
- DEBUG=true
|
||||
- MODELS_PATH=/models
|
||||
# You can preload different models here as well.
|
||||
# See: https://github.com/go-skynet/model-gallery
|
||||
- 'PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}]'
|
||||
volumes:
|
||||
- ./models:/models:cached
|
||||
command: ["/usr/bin/local-ai" ]
|
||||
|
||||
chatgpt:
|
||||
depends_on:
|
||||
api:
|
||||
condition: service_healthy
|
||||
image: ghcr.io/mckaywrigley/chatbot-ui:main
|
||||
ports:
|
||||
- 3000:3000
|
||||
|
||||
@@ -5,7 +5,7 @@ services:
|
||||
image: quay.io/go-skynet/local-ai:latest
|
||||
build:
|
||||
context: ../../
|
||||
dockerfile: Dockerfile.dev
|
||||
dockerfile: Dockerfile
|
||||
ports:
|
||||
- 8080:8080
|
||||
environment:
|
||||
|
||||
30
examples/flowise/README.md
Normal file
30
examples/flowise/README.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# flowise
|
||||
|
||||
Example of integration with [FlowiseAI/Flowise](https://github.com/FlowiseAI/Flowise).
|
||||
|
||||

|
||||
|
||||
You can check a demo video in the Flowise PR: https://github.com/FlowiseAI/Flowise/pull/123
|
||||
|
||||
## Run
|
||||
|
||||
In this example LocalAI will download the gpt4all model and set it up as "gpt-3.5-turbo". See the `docker-compose.yaml`
|
||||
```bash
|
||||
# Clone LocalAI
|
||||
git clone https://github.com/go-skynet/LocalAI
|
||||
|
||||
cd LocalAI/examples/flowise
|
||||
|
||||
# start with docker-compose
|
||||
docker-compose up --pull always
|
||||
|
||||
```
|
||||
|
||||
## Accessing flowise
|
||||
|
||||
Open http://localhost:3000.
|
||||
|
||||
## Using LocalAI
|
||||
|
||||
Search for LocalAI in the integration, and use the `http://api:8080/` as URL.
|
||||
|
||||
37
examples/flowise/docker-compose.yaml
Normal file
37
examples/flowise/docker-compose.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
version: '3.6'
|
||||
|
||||
services:
|
||||
api:
|
||||
image: quay.io/go-skynet/local-ai:latest
|
||||
# As initially LocalAI will download the models defined in PRELOAD_MODELS
|
||||
# you might need to tweak the healthcheck values here according to your network connection.
|
||||
# Here we give a timespan of 20m to download all the required files.
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"]
|
||||
interval: 1m
|
||||
timeout: 20m
|
||||
retries: 20
|
||||
build:
|
||||
context: ../../
|
||||
dockerfile: Dockerfile
|
||||
ports:
|
||||
- 8080:8080
|
||||
environment:
|
||||
- DEBUG=true
|
||||
- MODELS_PATH=/models
|
||||
# You can preload different models here as well.
|
||||
# See: https://github.com/go-skynet/model-gallery
|
||||
- 'PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}]'
|
||||
volumes:
|
||||
- ./models:/models:cached
|
||||
command: ["/usr/bin/local-ai" ]
|
||||
flowise:
|
||||
depends_on:
|
||||
api:
|
||||
condition: service_healthy
|
||||
image: flowiseai/flowise
|
||||
ports:
|
||||
- 3000:3000
|
||||
volumes:
|
||||
- ~/.flowise:/root/.flowise
|
||||
command: /bin/sh -c "sleep 3; flowise start"
|
||||
70
examples/k8sgpt/README.md
Normal file
70
examples/k8sgpt/README.md
Normal file
@@ -0,0 +1,70 @@
|
||||
# k8sgpt example
|
||||
|
||||
This example show how to use LocalAI with k8sgpt
|
||||
|
||||

|
||||
|
||||
## Create the cluster locally with Kind (optional)
|
||||
|
||||
If you want to test this locally without a remote Kubernetes cluster, you can use kind.
|
||||
|
||||
Install [kind](https://kind.sigs.k8s.io/) and create a cluster:
|
||||
|
||||
```
|
||||
kind create cluster
|
||||
```
|
||||
|
||||
## Setup LocalAI
|
||||
|
||||
We will use [helm](https://helm.sh/docs/intro/install/):
|
||||
|
||||
```
|
||||
helm repo add go-skynet https://go-skynet.github.io/helm-charts/
|
||||
helm repo update
|
||||
|
||||
# Clone LocalAI
|
||||
git clone https://github.com/go-skynet/LocalAI
|
||||
|
||||
cd LocalAI/examples/k8sgpt
|
||||
|
||||
# modify values.yaml preload_models with the models you want to install.
|
||||
# CHANGE the URL to a model in huggingface.
|
||||
helm install local-ai go-skynet/local-ai --create-namespace --namespace local-ai --values values.yaml
|
||||
```
|
||||
|
||||
## Setup K8sGPT
|
||||
|
||||
```
|
||||
# Install k8sgpt
|
||||
helm repo add k8sgpt https://charts.k8sgpt.ai/
|
||||
helm repo update
|
||||
helm install release k8sgpt/k8sgpt-operator -n k8sgpt-operator-system --create-namespace
|
||||
```
|
||||
|
||||
Apply the k8sgpt-operator configuration:
|
||||
|
||||
```
|
||||
kubectl apply -f - << EOF
|
||||
apiVersion: core.k8sgpt.ai/v1alpha1
|
||||
kind: K8sGPT
|
||||
metadata:
|
||||
name: k8sgpt-local-ai
|
||||
namespace: default
|
||||
spec:
|
||||
backend: localai
|
||||
baseUrl: http://local-ai.local-ai.svc.cluster.local:8080/v1
|
||||
noCache: false
|
||||
model: gpt-3.5-turbo
|
||||
noCache: false
|
||||
version: v0.3.0
|
||||
enableAI: true
|
||||
EOF
|
||||
```
|
||||
|
||||
## Test
|
||||
|
||||
Apply a broken pod:
|
||||
|
||||
```
|
||||
kubectl apply -f broken-pod.yaml
|
||||
```
|
||||
14
examples/k8sgpt/broken-pod.yaml
Normal file
14
examples/k8sgpt/broken-pod.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: broken-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: broken-pod
|
||||
image: nginx:1.a.b.c
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 90
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: 3
|
||||
95
examples/k8sgpt/values.yaml
Normal file
95
examples/k8sgpt/values.yaml
Normal file
@@ -0,0 +1,95 @@
|
||||
replicaCount: 1
|
||||
|
||||
deployment:
|
||||
# https://quay.io/repository/go-skynet/local-ai?tab=tags
|
||||
image: quay.io/go-skynet/local-ai:latest
|
||||
env:
|
||||
threads: 4
|
||||
debug: "true"
|
||||
context_size: 512
|
||||
preload_models: '[{ "url": "github:go-skynet/model-gallery/wizard.yaml", "name": "gpt-3.5-turbo", "overrides": { "parameters": { "model": "WizardLM-7B-uncensored.ggmlv3.q5_1" }},"files": [ { "uri": "https://huggingface.co//WizardLM-7B-uncensored-GGML/resolve/main/WizardLM-7B-uncensored.ggmlv3.q5_1.bin", "sha256": "d92a509d83a8ea5e08ba4c2dbaf08f29015932dc2accd627ce0665ac72c2bb2b", "filename": "WizardLM-7B-uncensored.ggmlv3.q5_1" }]}]'
|
||||
modelsPath: "/models"
|
||||
|
||||
resources:
|
||||
{}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
# Prompt templates to include
|
||||
# Note: the keys of this map will be the names of the prompt template files
|
||||
promptTemplates:
|
||||
{}
|
||||
# ggml-gpt4all-j.tmpl: |
|
||||
# The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response.
|
||||
# ### Prompt:
|
||||
# {{.Input}}
|
||||
# ### Response:
|
||||
|
||||
# Models to download at runtime
|
||||
models:
|
||||
# Whether to force download models even if they already exist
|
||||
forceDownload: false
|
||||
|
||||
# The list of URLs to download models from
|
||||
# Note: the name of the file will be the name of the loaded model
|
||||
list:
|
||||
#- url: "https://gpt4all.io/models/ggml-gpt4all-j.bin"
|
||||
# basicAuth: base64EncodedCredentials
|
||||
|
||||
# Persistent storage for models and prompt templates.
|
||||
# PVC and HostPath are mutually exclusive. If both are enabled,
|
||||
# PVC configuration takes precedence. If neither are enabled, ephemeral
|
||||
# storage is used.
|
||||
persistence:
|
||||
pvc:
|
||||
enabled: false
|
||||
size: 6Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
|
||||
annotations: {}
|
||||
|
||||
# Optional
|
||||
storageClass: ~
|
||||
|
||||
hostPath:
|
||||
enabled: false
|
||||
path: "/models"
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 8080
|
||||
annotations: {}
|
||||
# If using an AWS load balancer, you'll need to override the default 60s load balancer idle timeout
|
||||
# service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "1200"
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
className: ""
|
||||
annotations:
|
||||
{}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: chart-example.local
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
5
examples/langchain-chroma/.env.example
Normal file
5
examples/langchain-chroma/.env.example
Normal file
@@ -0,0 +1,5 @@
|
||||
THREADS=4
|
||||
CONTEXT_SIZE=512
|
||||
MODELS_PATH=/models
|
||||
DEBUG=true
|
||||
# BUILD_TYPE=generic
|
||||
4
examples/langchain-chroma/.gitignore
vendored
Normal file
4
examples/langchain-chroma/.gitignore
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
db/
|
||||
state_of_the_union.txt
|
||||
models/bert
|
||||
models/ggml-gpt4all-j
|
||||
63
examples/langchain-chroma/README.md
Normal file
63
examples/langchain-chroma/README.md
Normal file
@@ -0,0 +1,63 @@
|
||||
# Data query example
|
||||
|
||||
This example makes use of [langchain and chroma](https://blog.langchain.dev/langchain-chroma/) to enable question answering on a set of documents.
|
||||
|
||||
## Setup
|
||||
|
||||
Download the models and start the API:
|
||||
|
||||
```bash
|
||||
# Clone LocalAI
|
||||
git clone https://github.com/go-skynet/LocalAI
|
||||
|
||||
cd LocalAI/examples/langchain-chroma
|
||||
|
||||
wget https://huggingface.co/skeskinen/ggml/resolve/main/all-MiniLM-L6-v2/ggml-model-q4_0.bin -O models/bert
|
||||
wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j
|
||||
|
||||
# configure your .env
|
||||
# NOTE: ensure that THREADS does not exceed your machine's CPU cores
|
||||
mv .env.example .env
|
||||
|
||||
# start with docker-compose
|
||||
docker-compose up -d --build
|
||||
|
||||
# tail the logs & wait until the build completes
|
||||
docker logs -f langchain-chroma-api-1
|
||||
```
|
||||
|
||||
### Python requirements
|
||||
|
||||
```
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
### Create a storage
|
||||
|
||||
In this step we will create a local vector database from our document set, so later we can ask questions on it with the LLM.
|
||||
|
||||
Note: **OPENAI_API_KEY** is not required. However the library might fail if no API_KEY is passed by, so an arbitrary string can be used.
|
||||
|
||||
```bash
|
||||
export OPENAI_API_BASE=http://localhost:8080/v1
|
||||
export OPENAI_API_KEY=sk-
|
||||
|
||||
wget https://raw.githubusercontent.com/hwchase17/chat-your-data/master/state_of_the_union.txt
|
||||
python store.py
|
||||
```
|
||||
|
||||
After it finishes, a directory "db" will be created with the vector index database.
|
||||
|
||||
## Query
|
||||
|
||||
We can now query the dataset.
|
||||
|
||||
```bash
|
||||
export OPENAI_API_BASE=http://localhost:8080/v1
|
||||
export OPENAI_API_KEY=sk-
|
||||
|
||||
python query.py
|
||||
# President Trump recently stated during a press conference regarding tax reform legislation that "we're getting rid of all these loopholes." He also mentioned that he wants to simplify the system further through changes such as increasing the standard deduction amount and making other adjustments aimed at reducing taxpayers' overall burden.
|
||||
```
|
||||
|
||||
Keep in mind now things are hit or miss!
|
||||
15
examples/langchain-chroma/docker-compose.yml
Normal file
15
examples/langchain-chroma/docker-compose.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
version: '3.6'
|
||||
|
||||
services:
|
||||
api:
|
||||
image: quay.io/go-skynet/local-ai:latest
|
||||
build:
|
||||
context: ../../
|
||||
dockerfile: Dockerfile
|
||||
ports:
|
||||
- 8080:8080
|
||||
env_file:
|
||||
- ../../.env
|
||||
volumes:
|
||||
- ./models:/models:cached
|
||||
command: ["/usr/bin/local-ai"]
|
||||
6
examples/langchain-chroma/models/embeddings.yaml
Normal file
6
examples/langchain-chroma/models/embeddings.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
name: text-embedding-ada-002
|
||||
parameters:
|
||||
model: bert
|
||||
threads: 4
|
||||
backend: bert-embeddings
|
||||
embeddings: true
|
||||
16
examples/langchain-chroma/models/gpt-3.5-turbo.yaml
Normal file
16
examples/langchain-chroma/models/gpt-3.5-turbo.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
name: gpt-3.5-turbo
|
||||
parameters:
|
||||
model: ggml-gpt4all-j
|
||||
top_k: 80
|
||||
temperature: 0.2
|
||||
top_p: 0.7
|
||||
context_size: 1024
|
||||
stopwords:
|
||||
- "HUMAN:"
|
||||
- "GPT:"
|
||||
roles:
|
||||
user: " "
|
||||
system: " "
|
||||
template:
|
||||
completion: completion
|
||||
chat: gpt4all
|
||||
23
examples/langchain-chroma/query.py
Normal file
23
examples/langchain-chroma/query.py
Normal file
@@ -0,0 +1,23 @@
|
||||
|
||||
import os
|
||||
from langchain.vectorstores import Chroma
|
||||
from langchain.embeddings import OpenAIEmbeddings
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain.chains import RetrievalQA
|
||||
from langchain.vectorstores.base import VectorStoreRetriever
|
||||
|
||||
base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1')
|
||||
|
||||
# Load and process the text
|
||||
embedding = OpenAIEmbeddings()
|
||||
persist_directory = 'db'
|
||||
|
||||
# Now we can load the persisted database from disk, and use it as normal.
|
||||
llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_base=base_path)
|
||||
vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding)
|
||||
retriever = VectorStoreRetriever(vectorstore=vectordb)
|
||||
qa = RetrievalQA.from_llm(llm=llm, retriever=retriever)
|
||||
|
||||
query = "What the president said about taxes ?"
|
||||
print(qa.run(query))
|
||||
|
||||
4
examples/langchain-chroma/requirements.txt
Normal file
4
examples/langchain-chroma/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
langchain==0.0.160
|
||||
openai==0.27.6
|
||||
chromadb==0.3.21
|
||||
llama-index==0.6.2
|
||||
25
examples/langchain-chroma/store.py
Executable file
25
examples/langchain-chroma/store.py
Executable file
@@ -0,0 +1,25 @@
|
||||
|
||||
import os
|
||||
from langchain.vectorstores import Chroma
|
||||
from langchain.embeddings import OpenAIEmbeddings
|
||||
from langchain.text_splitter import CharacterTextSplitter
|
||||
from langchain.document_loaders import TextLoader
|
||||
|
||||
base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1')
|
||||
|
||||
# Load and process the text
|
||||
loader = TextLoader('state_of_the_union.txt')
|
||||
documents = loader.load()
|
||||
|
||||
text_splitter = CharacterTextSplitter(chunk_size=300, chunk_overlap=70)
|
||||
texts = text_splitter.split_documents(documents)
|
||||
|
||||
# Embed and store the texts
|
||||
# Supplying a persist_directory will store the embeddings on disk
|
||||
persist_directory = 'db'
|
||||
|
||||
embedding = OpenAIEmbeddings(model="text-embedding-ada-002")
|
||||
vectordb = Chroma.from_documents(documents=texts, embedding=embedding, persist_directory=persist_directory)
|
||||
|
||||
vectordb.persist()
|
||||
vectordb = None
|
||||
68
examples/langchain-huggingface/README.md
Normal file
68
examples/langchain-huggingface/README.md
Normal file
@@ -0,0 +1,68 @@
|
||||
# Data query example
|
||||
|
||||
Example of integration with HuggingFace Inference API with help of [langchaingo](https://github.com/tmc/langchaingo).
|
||||
|
||||
## Setup
|
||||
|
||||
Download the LocalAI and start the API:
|
||||
|
||||
```bash
|
||||
# Clone LocalAI
|
||||
git clone https://github.com/go-skynet/LocalAI
|
||||
|
||||
cd LocalAI/examples/langchain-huggingface
|
||||
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
Node: Ensure you've set `HUGGINGFACEHUB_API_TOKEN` environment variable, you can generate it
|
||||
on [Settings / Access Tokens](https://huggingface.co/settings/tokens) page of HuggingFace site.
|
||||
|
||||
This is an example `.env` file for LocalAI:
|
||||
|
||||
```ini
|
||||
MODELS_PATH=/models
|
||||
CONTEXT_SIZE=512
|
||||
HUGGINGFACEHUB_API_TOKEN=hg_123456
|
||||
```
|
||||
|
||||
## Using remote models
|
||||
|
||||
Now you can use any remote models available via HuggingFace API, for example let's enable using of
|
||||
[gpt2](https://huggingface.co/gpt2) model in `gpt-3.5-turbo.yaml` config:
|
||||
|
||||
```yml
|
||||
name: gpt-3.5-turbo
|
||||
parameters:
|
||||
model: gpt2
|
||||
top_k: 80
|
||||
temperature: 0.2
|
||||
top_p: 0.7
|
||||
context_size: 1024
|
||||
backend: "langchain-huggingface"
|
||||
stopwords:
|
||||
- "HUMAN:"
|
||||
- "GPT:"
|
||||
roles:
|
||||
user: " "
|
||||
system: " "
|
||||
template:
|
||||
completion: completion
|
||||
chat: gpt4all
|
||||
```
|
||||
|
||||
Here is you can see in field `parameters.model` equal `gpt2` and `backend` equal `langchain-huggingface`.
|
||||
|
||||
## How to use
|
||||
|
||||
```shell
|
||||
# Now API is accessible at localhost:8080
|
||||
curl http://localhost:8080/v1/models
|
||||
# {"object":"list","data":[{"id":"gpt-3.5-turbo","object":"model"}]}
|
||||
|
||||
curl http://localhost:8080/v1/completions -H "Content-Type: application/json" -d '{
|
||||
"model": "gpt-3.5-turbo",
|
||||
"prompt": "A long time ago in a galaxy far, far away",
|
||||
"temperature": 0.7
|
||||
}'
|
||||
```
|
||||
15
examples/langchain-huggingface/docker-compose.yml
Normal file
15
examples/langchain-huggingface/docker-compose.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
version: '3.6'
|
||||
|
||||
services:
|
||||
api:
|
||||
image: quay.io/go-skynet/local-ai:latest
|
||||
build:
|
||||
context: ../../
|
||||
dockerfile: Dockerfile
|
||||
ports:
|
||||
- 8080:8080
|
||||
env_file:
|
||||
- ../../.env
|
||||
volumes:
|
||||
- ./models:/models:cached
|
||||
command: ["/usr/bin/local-ai"]
|
||||
1
examples/langchain-huggingface/models/completion.tmpl
Normal file
1
examples/langchain-huggingface/models/completion.tmpl
Normal file
@@ -0,0 +1 @@
|
||||
{{.Input}}
|
||||
17
examples/langchain-huggingface/models/gpt-3.5-turbo.yaml
Normal file
17
examples/langchain-huggingface/models/gpt-3.5-turbo.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
name: gpt-3.5-turbo
|
||||
parameters:
|
||||
model: gpt2
|
||||
top_k: 80
|
||||
temperature: 0.2
|
||||
top_p: 0.7
|
||||
context_size: 1024
|
||||
backend: "langchain-huggingface"
|
||||
stopwords:
|
||||
- "HUMAN:"
|
||||
- "GPT:"
|
||||
roles:
|
||||
user: " "
|
||||
system: " "
|
||||
template:
|
||||
completion: completion
|
||||
chat: gpt4all
|
||||
4
examples/langchain-huggingface/models/gpt4all.tmpl
Normal file
4
examples/langchain-huggingface/models/gpt4all.tmpl
Normal file
@@ -0,0 +1,4 @@
|
||||
The prompt below is a question to answer, a task to complete, or a conversation to respond to; decide which and write an appropriate response.
|
||||
### Prompt:
|
||||
{{.Input}}
|
||||
### Response:
|
||||
29
examples/langchain-python/README.md
Normal file
29
examples/langchain-python/README.md
Normal file
@@ -0,0 +1,29 @@
|
||||
## Langchain-python
|
||||
|
||||
Langchain example from [quickstart](https://python.langchain.com/en/latest/getting_started/getting_started.html).
|
||||
|
||||
To interact with langchain, you can just set the `OPENAI_API_BASE` URL and provide a token with a random string.
|
||||
|
||||
See the example below:
|
||||
|
||||
```
|
||||
# Clone LocalAI
|
||||
git clone https://github.com/go-skynet/LocalAI
|
||||
|
||||
cd LocalAI/examples/langchain-python
|
||||
|
||||
# start with docker-compose
|
||||
docker-compose up --pull always
|
||||
|
||||
pip install langchain
|
||||
pip install openai
|
||||
|
||||
export OPENAI_API_BASE=http://localhost:8080
|
||||
# Note: **OPENAI_API_KEY** is not required. However the library might fail if no API_KEY is passed by, so an arbitrary string can be used.
|
||||
export OPENAI_API_KEY=sk-
|
||||
|
||||
python test.py
|
||||
# A good company name for a company that makes colorful socks would be "Colorsocks".
|
||||
|
||||
python agent.py
|
||||
```
|
||||
44
examples/langchain-python/agent.py
Normal file
44
examples/langchain-python/agent.py
Normal file
@@ -0,0 +1,44 @@
|
||||
## This is a fork/based from https://gist.github.com/wiseman/4a706428eaabf4af1002a07a114f61d6
|
||||
|
||||
from io import StringIO
|
||||
import sys
|
||||
import os
|
||||
from typing import Dict, Optional
|
||||
|
||||
from langchain.agents import load_tools
|
||||
from langchain.agents import initialize_agent
|
||||
from langchain.agents.tools import Tool
|
||||
from langchain.llms import OpenAI
|
||||
|
||||
base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1')
|
||||
model_name = os.environ.get('MODEL_NAME', 'gpt-3.5-turbo')
|
||||
|
||||
class PythonREPL:
|
||||
"""Simulates a standalone Python REPL."""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def run(self, command: str) -> str:
|
||||
"""Run command and returns anything printed."""
|
||||
old_stdout = sys.stdout
|
||||
sys.stdout = mystdout = StringIO()
|
||||
try:
|
||||
exec(command, globals())
|
||||
sys.stdout = old_stdout
|
||||
output = mystdout.getvalue()
|
||||
except Exception as e:
|
||||
sys.stdout = old_stdout
|
||||
output = str(e)
|
||||
return output
|
||||
|
||||
llm = OpenAI(temperature=0.0, openai_api_base=base_path, model_name=model_name)
|
||||
python_repl = Tool(
|
||||
"Python REPL",
|
||||
PythonREPL().run,
|
||||
"""A Python shell. Use this to execute python commands. Input should be a valid python command.
|
||||
If you expect output it should be printed out.""",
|
||||
)
|
||||
tools = [python_repl]
|
||||
agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True)
|
||||
agent.run("What is the 10th fibonacci number?")
|
||||
27
examples/langchain-python/docker-compose.yaml
Normal file
27
examples/langchain-python/docker-compose.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
version: '3.6'
|
||||
|
||||
services:
|
||||
api:
|
||||
image: quay.io/go-skynet/local-ai:latest
|
||||
# As initially LocalAI will download the models defined in PRELOAD_MODELS
|
||||
# you might need to tweak the healthcheck values here according to your network connection.
|
||||
# Here we give a timespan of 20m to download all the required files.
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8080/readyz"]
|
||||
interval: 1m
|
||||
timeout: 20m
|
||||
retries: 20
|
||||
build:
|
||||
context: ../../
|
||||
dockerfile: Dockerfile
|
||||
ports:
|
||||
- 8080:8080
|
||||
environment:
|
||||
- DEBUG=true
|
||||
- MODELS_PATH=/models
|
||||
# You can preload different models here as well.
|
||||
# See: https://github.com/go-skynet/model-gallery
|
||||
- 'PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}]'
|
||||
volumes:
|
||||
- ./models:/models:cached
|
||||
command: ["/usr/bin/local-ai" ]
|
||||
6
examples/langchain-python/test.py
Normal file
6
examples/langchain-python/test.py
Normal file
@@ -0,0 +1,6 @@
|
||||
|
||||
from langchain.llms import OpenAI
|
||||
|
||||
llm = OpenAI(temperature=0.9,model_name="gpt-3.5-turbo")
|
||||
text = "What would be a good company name for a company that makes colorful socks?"
|
||||
print(llm(text))
|
||||
5
examples/langchain/PY.Dockerfile
Normal file
5
examples/langchain/PY.Dockerfile
Normal file
@@ -0,0 +1,5 @@
|
||||
FROM python:3.10-bullseye
|
||||
COPY ./langchainpy-localai-example /app
|
||||
WORKDIR /app
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
ENTRYPOINT [ "python", "./full_demo.py" ];
|
||||
@@ -1,10 +1,6 @@
|
||||
# langchain
|
||||
|
||||
Example of using langchain in TypeScript, with the standard OpenAI llm module, and LocalAI.
|
||||
|
||||
Example for python langchain to follow at a later date
|
||||
|
||||
Set up to make it easy to modify the `index.mts` file to look like any langchain example file.
|
||||
Example of using langchain, with the standard OpenAI llm module, and LocalAI. Has docker compose profiles for both the Typescript and Python versions.
|
||||
|
||||
**Please Note** - This is a tech demo example at this time. ggml-gpt4all-j has pretty terrible results for most langchain applications with the settings used in this example.
|
||||
|
||||
@@ -22,10 +18,13 @@ cd LocalAI/examples/langchain
|
||||
# Download gpt4all-j to models/
|
||||
wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j
|
||||
|
||||
# start with docker-compose
|
||||
docker-compose up --build
|
||||
# start with docker-compose for typescript!
|
||||
docker-compose --profile ts up --build
|
||||
|
||||
# or start with docker-compose for python!
|
||||
docker-compose --profile py up --build
|
||||
```
|
||||
|
||||
## Copyright
|
||||
|
||||
Some of the example code in index.mts is adapted from the langchainjs project and is Copyright (c) Harrison Chase. Used under the terms of the MIT license, as is the remainder of this code.
|
||||
Some of the example code in index.mts and full_demo.py is adapted from the langchainjs project and is Copyright (c) Harrison Chase. Used under the terms of the MIT license, as is the remainder of this code.
|
||||
@@ -5,7 +5,7 @@ services:
|
||||
image: quay.io/go-skynet/local-ai:latest
|
||||
build:
|
||||
context: ../../
|
||||
dockerfile: Dockerfile.dev
|
||||
dockerfile: Dockerfile
|
||||
ports:
|
||||
- 8080:8080
|
||||
environment:
|
||||
@@ -15,11 +15,29 @@ services:
|
||||
- ./models:/models:cached
|
||||
command: ["/usr/bin/local-ai" ]
|
||||
|
||||
langchainjs:
|
||||
js:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: JS.Dockerfile
|
||||
profiles:
|
||||
- js
|
||||
- ts
|
||||
depends_on:
|
||||
- "api"
|
||||
environment:
|
||||
- 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX'
|
||||
- 'OPENAI_API_HOST=http://api:8080/v1'
|
||||
- 'OPENAI_API_BASE=http://api:8080/v1'
|
||||
- 'MODEL_NAME=gpt-3.5-turbo' #gpt-3.5-turbo' # ggml-gpt4all-j' # ggml-koala-13B-4bit-128g'
|
||||
|
||||
py:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: PY.Dockerfile
|
||||
profiles:
|
||||
- py
|
||||
depends_on:
|
||||
- "api"
|
||||
environment:
|
||||
- 'OPENAI_API_KEY=sk-XXXXXXXXXXXXXXXXXXXX'
|
||||
- 'OPENAI_API_BASE=http://api:8080/v1'
|
||||
- 'MODEL_NAME=gpt-3.5-turbo' #gpt-3.5-turbo' # ggml-gpt4all-j' # ggml-koala-13B-4bit-128g'
|
||||
@@ -13,7 +13,7 @@
|
||||
"typeorm": "^0.3.15"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^18.16.3",
|
||||
"@types/node": "^18.16.4",
|
||||
"typescript": "^5.0.4"
|
||||
}
|
||||
},
|
||||
@@ -45,9 +45,9 @@
|
||||
"integrity": "sha512-Uy0+khmZqUrUGm5dmMqVlnvufZRSK0FbYzVgp0UMstm+F5+W2/jnEEQyc9vo1ZR/E5ZI/B1WjjoTqBqwJL6Krw=="
|
||||
},
|
||||
"node_modules/@types/node": {
|
||||
"version": "18.16.3",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.16.3.tgz",
|
||||
"integrity": "sha512-OPs5WnnT1xkCBiuQrZA4+YAV4HEJejmHneyraIaxsbev5yCEr6KMwINNFP9wQeFIw8FWcoTqF3vQsa5CDaI+8Q==",
|
||||
"version": "18.16.4",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.16.4.tgz",
|
||||
"integrity": "sha512-LUhvPmAKAbgm+p/K11IWszLZVoZDlMF4NRmqbhEzDz/CnCuehPkZXwZbBCKGJsgjnuVejotBwM7B3Scrq4EqDw==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/@types/retry": {
|
||||
@@ -137,13 +137,6 @@
|
||||
"resolved": "https://registry.npmjs.org/binary-search/-/binary-search-1.3.6.tgz",
|
||||
"integrity": "sha512-nbE1WxOTTrUWIfsfZ4aHGYu5DOuNkbxGokjV6Z2kxfJK3uaAb8zNK1muzOeipoLHZjInT4Br88BHpzevc681xA=="
|
||||
},
|
||||
"node_modules/boolbase": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz",
|
||||
"integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==",
|
||||
"optional": true,
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/brace-expansion": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
|
||||
@@ -209,73 +202,6 @@
|
||||
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/cheerio": {
|
||||
"version": "1.0.0-rc.12",
|
||||
"resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.12.tgz",
|
||||
"integrity": "sha512-VqR8m68vM46BNnuZ5NtnGBKIE/DfN0cRIzg9n40EIq9NOv90ayxLBXA8fXC5gquFRGJSTRqBq25Jt2ECLR431Q==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"cheerio-select": "^2.1.0",
|
||||
"dom-serializer": "^2.0.0",
|
||||
"domhandler": "^5.0.3",
|
||||
"domutils": "^3.0.1",
|
||||
"htmlparser2": "^8.0.1",
|
||||
"parse5": "^7.0.0",
|
||||
"parse5-htmlparser2-tree-adapter": "^7.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 6"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/cheeriojs/cheerio?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/cheerio-select": {
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/cheerio-select/-/cheerio-select-2.1.0.tgz",
|
||||
"integrity": "sha512-9v9kG0LvzrlcungtnJtpGNxY+fzECQKhK4EGJX2vByejiMX84MFNQw4UxPJl3bFbTMw+Dfs37XaIkCwTZfLh4g==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"boolbase": "^1.0.0",
|
||||
"css-select": "^5.1.0",
|
||||
"css-what": "^6.1.0",
|
||||
"domelementtype": "^2.3.0",
|
||||
"domhandler": "^5.0.3",
|
||||
"domutils": "^3.0.1"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/fb55"
|
||||
}
|
||||
},
|
||||
"node_modules/cheerio/node_modules/parse5": {
|
||||
"version": "7.1.2",
|
||||
"resolved": "https://registry.npmjs.org/parse5/-/parse5-7.1.2.tgz",
|
||||
"integrity": "sha512-Czj1WaSVpaoj0wbhMzLmWD69anp2WH7FXMB9n1Sy8/ZFF9jolSQVMu1Ij5WIyGmcBmhk7EOndpO4mIpihVqAXw==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"entities": "^4.4.0"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/inikulin/parse5?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/cheerio/node_modules/parse5-htmlparser2-tree-adapter": {
|
||||
"version": "7.0.0",
|
||||
"resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-7.0.0.tgz",
|
||||
"integrity": "sha512-B77tOZrqqfUfnVcOrUvfdLbz4pu4RopLD/4vmu3HUPswwTA8OH0EMW9BlWR2B0RCoiZRAHEUu7IxeP1Pd1UU+g==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"domhandler": "^5.0.2",
|
||||
"parse5": "^7.0.0"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/inikulin/parse5?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/cli-highlight": {
|
||||
"version": "2.1.11",
|
||||
"resolved": "https://registry.npmjs.org/cli-highlight/-/cli-highlight-2.1.11.tgz",
|
||||
@@ -379,36 +305,6 @@
|
||||
"node-fetch": "2.6.7"
|
||||
}
|
||||
},
|
||||
"node_modules/css-select": {
|
||||
"version": "5.1.0",
|
||||
"resolved": "https://registry.npmjs.org/css-select/-/css-select-5.1.0.tgz",
|
||||
"integrity": "sha512-nwoRF1rvRRnnCqqY7updORDsuqKzqYJ28+oSMaJMMgOauh3fvwHqMS7EZpIPqK8GL+g9mKxF1vP/ZjSeNjEVHg==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"boolbase": "^1.0.0",
|
||||
"css-what": "^6.1.0",
|
||||
"domhandler": "^5.0.2",
|
||||
"domutils": "^3.0.1",
|
||||
"nth-check": "^2.0.1"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/fb55"
|
||||
}
|
||||
},
|
||||
"node_modules/css-what": {
|
||||
"version": "6.1.0",
|
||||
"resolved": "https://registry.npmjs.org/css-what/-/css-what-6.1.0.tgz",
|
||||
"integrity": "sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">= 6"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/fb55"
|
||||
}
|
||||
},
|
||||
"node_modules/debug": {
|
||||
"version": "4.3.4",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz",
|
||||
@@ -433,65 +329,6 @@
|
||||
"node": ">=0.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/dom-serializer": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz",
|
||||
"integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"domelementtype": "^2.3.0",
|
||||
"domhandler": "^5.0.2",
|
||||
"entities": "^4.2.0"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/cheeriojs/dom-serializer?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/domelementtype": {
|
||||
"version": "2.3.0",
|
||||
"resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz",
|
||||
"integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==",
|
||||
"funding": [
|
||||
{
|
||||
"type": "github",
|
||||
"url": "https://github.com/sponsors/fb55"
|
||||
}
|
||||
],
|
||||
"optional": true,
|
||||
"peer": true
|
||||
},
|
||||
"node_modules/domhandler": {
|
||||
"version": "5.0.3",
|
||||
"resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz",
|
||||
"integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"domelementtype": "^2.3.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/fb55/domhandler?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/domutils": {
|
||||
"version": "3.1.0",
|
||||
"resolved": "https://registry.npmjs.org/domutils/-/domutils-3.1.0.tgz",
|
||||
"integrity": "sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"dom-serializer": "^2.0.0",
|
||||
"domelementtype": "^2.3.0",
|
||||
"domhandler": "^5.0.3"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/fb55/domutils?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/dotenv": {
|
||||
"version": "16.0.3",
|
||||
"resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.0.3.tgz",
|
||||
@@ -505,19 +342,6 @@
|
||||
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
|
||||
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
|
||||
},
|
||||
"node_modules/entities": {
|
||||
"version": "4.5.0",
|
||||
"resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz",
|
||||
"integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"engines": {
|
||||
"node": ">=0.12"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/fb55/entities?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/escalade": {
|
||||
"version": "3.1.1",
|
||||
"resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz",
|
||||
@@ -623,26 +447,6 @@
|
||||
"node": "*"
|
||||
}
|
||||
},
|
||||
"node_modules/htmlparser2": {
|
||||
"version": "8.0.2",
|
||||
"resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-8.0.2.tgz",
|
||||
"integrity": "sha512-GYdjWKDkbRLkZ5geuHs5NY1puJ+PXwP7+fHPRz06Eirsb9ugf6d8kkXav6ADhcODhFFPMIXyxkxSuMf3D6NCFA==",
|
||||
"funding": [
|
||||
"https://github.com/fb55/htmlparser2?sponsor=1",
|
||||
{
|
||||
"type": "github",
|
||||
"url": "https://github.com/sponsors/fb55"
|
||||
}
|
||||
],
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"domelementtype": "^2.3.0",
|
||||
"domhandler": "^5.0.3",
|
||||
"domutils": "^3.0.1",
|
||||
"entities": "^4.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/ieee754": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz",
|
||||
@@ -962,19 +766,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/nth-check": {
|
||||
"version": "2.1.1",
|
||||
"resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz",
|
||||
"integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"dependencies": {
|
||||
"boolbase": "^1.0.0"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/fb55/nth-check?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/num-sort": {
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/num-sort/-/num-sort-2.1.0.tgz",
|
||||
@@ -1457,9 +1248,9 @@
|
||||
"integrity": "sha512-Uy0+khmZqUrUGm5dmMqVlnvufZRSK0FbYzVgp0UMstm+F5+W2/jnEEQyc9vo1ZR/E5ZI/B1WjjoTqBqwJL6Krw=="
|
||||
},
|
||||
"@types/node": {
|
||||
"version": "18.16.3",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.16.3.tgz",
|
||||
"integrity": "sha512-OPs5WnnT1xkCBiuQrZA4+YAV4HEJejmHneyraIaxsbev5yCEr6KMwINNFP9wQeFIw8FWcoTqF3vQsa5CDaI+8Q==",
|
||||
"version": "18.16.4",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.16.4.tgz",
|
||||
"integrity": "sha512-LUhvPmAKAbgm+p/K11IWszLZVoZDlMF4NRmqbhEzDz/CnCuehPkZXwZbBCKGJsgjnuVejotBwM7B3Scrq4EqDw==",
|
||||
"dev": true
|
||||
},
|
||||
"@types/retry": {
|
||||
@@ -1520,13 +1311,6 @@
|
||||
"resolved": "https://registry.npmjs.org/binary-search/-/binary-search-1.3.6.tgz",
|
||||
"integrity": "sha512-nbE1WxOTTrUWIfsfZ4aHGYu5DOuNkbxGokjV6Z2kxfJK3uaAb8zNK1muzOeipoLHZjInT4Br88BHpzevc681xA=="
|
||||
},
|
||||
"boolbase": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz",
|
||||
"integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==",
|
||||
"optional": true,
|
||||
"peer": true
|
||||
},
|
||||
"brace-expansion": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
|
||||
@@ -1568,60 +1352,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"cheerio": {
|
||||
"version": "1.0.0-rc.12",
|
||||
"resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0-rc.12.tgz",
|
||||
"integrity": "sha512-VqR8m68vM46BNnuZ5NtnGBKIE/DfN0cRIzg9n40EIq9NOv90ayxLBXA8fXC5gquFRGJSTRqBq25Jt2ECLR431Q==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"requires": {
|
||||
"cheerio-select": "^2.1.0",
|
||||
"dom-serializer": "^2.0.0",
|
||||
"domhandler": "^5.0.3",
|
||||
"domutils": "^3.0.1",
|
||||
"htmlparser2": "^8.0.1",
|
||||
"parse5": "^7.0.0",
|
||||
"parse5-htmlparser2-tree-adapter": "^7.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"parse5": {
|
||||
"version": "7.1.2",
|
||||
"resolved": "https://registry.npmjs.org/parse5/-/parse5-7.1.2.tgz",
|
||||
"integrity": "sha512-Czj1WaSVpaoj0wbhMzLmWD69anp2WH7FXMB9n1Sy8/ZFF9jolSQVMu1Ij5WIyGmcBmhk7EOndpO4mIpihVqAXw==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"requires": {
|
||||
"entities": "^4.4.0"
|
||||
}
|
||||
},
|
||||
"parse5-htmlparser2-tree-adapter": {
|
||||
"version": "7.0.0",
|
||||
"resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-7.0.0.tgz",
|
||||
"integrity": "sha512-B77tOZrqqfUfnVcOrUvfdLbz4pu4RopLD/4vmu3HUPswwTA8OH0EMW9BlWR2B0RCoiZRAHEUu7IxeP1Pd1UU+g==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"requires": {
|
||||
"domhandler": "^5.0.2",
|
||||
"parse5": "^7.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"cheerio-select": {
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/cheerio-select/-/cheerio-select-2.1.0.tgz",
|
||||
"integrity": "sha512-9v9kG0LvzrlcungtnJtpGNxY+fzECQKhK4EGJX2vByejiMX84MFNQw4UxPJl3bFbTMw+Dfs37XaIkCwTZfLh4g==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"requires": {
|
||||
"boolbase": "^1.0.0",
|
||||
"css-select": "^5.1.0",
|
||||
"css-what": "^6.1.0",
|
||||
"domelementtype": "^2.3.0",
|
||||
"domhandler": "^5.0.3",
|
||||
"domutils": "^3.0.1"
|
||||
}
|
||||
},
|
||||
"cli-highlight": {
|
||||
"version": "2.1.11",
|
||||
"resolved": "https://registry.npmjs.org/cli-highlight/-/cli-highlight-2.1.11.tgz",
|
||||
@@ -1705,27 +1435,6 @@
|
||||
"node-fetch": "2.6.7"
|
||||
}
|
||||
},
|
||||
"css-select": {
|
||||
"version": "5.1.0",
|
||||
"resolved": "https://registry.npmjs.org/css-select/-/css-select-5.1.0.tgz",
|
||||
"integrity": "sha512-nwoRF1rvRRnnCqqY7updORDsuqKzqYJ28+oSMaJMMgOauh3fvwHqMS7EZpIPqK8GL+g9mKxF1vP/ZjSeNjEVHg==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"requires": {
|
||||
"boolbase": "^1.0.0",
|
||||
"css-what": "^6.1.0",
|
||||
"domhandler": "^5.0.2",
|
||||
"domutils": "^3.0.1",
|
||||
"nth-check": "^2.0.1"
|
||||
}
|
||||
},
|
||||
"css-what": {
|
||||
"version": "6.1.0",
|
||||
"resolved": "https://registry.npmjs.org/css-what/-/css-what-6.1.0.tgz",
|
||||
"integrity": "sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==",
|
||||
"optional": true,
|
||||
"peer": true
|
||||
},
|
||||
"debug": {
|
||||
"version": "4.3.4",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz",
|
||||
@@ -1739,47 +1448,6 @@
|
||||
"resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
|
||||
"integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ=="
|
||||
},
|
||||
"dom-serializer": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz",
|
||||
"integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"requires": {
|
||||
"domelementtype": "^2.3.0",
|
||||
"domhandler": "^5.0.2",
|
||||
"entities": "^4.2.0"
|
||||
}
|
||||
},
|
||||
"domelementtype": {
|
||||
"version": "2.3.0",
|
||||
"resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz",
|
||||
"integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==",
|
||||
"optional": true,
|
||||
"peer": true
|
||||
},
|
||||
"domhandler": {
|
||||
"version": "5.0.3",
|
||||
"resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz",
|
||||
"integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"requires": {
|
||||
"domelementtype": "^2.3.0"
|
||||
}
|
||||
},
|
||||
"domutils": {
|
||||
"version": "3.1.0",
|
||||
"resolved": "https://registry.npmjs.org/domutils/-/domutils-3.1.0.tgz",
|
||||
"integrity": "sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"requires": {
|
||||
"dom-serializer": "^2.0.0",
|
||||
"domelementtype": "^2.3.0",
|
||||
"domhandler": "^5.0.3"
|
||||
}
|
||||
},
|
||||
"dotenv": {
|
||||
"version": "16.0.3",
|
||||
"resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.0.3.tgz",
|
||||
@@ -1790,13 +1458,6 @@
|
||||
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
|
||||
"integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
|
||||
},
|
||||
"entities": {
|
||||
"version": "4.5.0",
|
||||
"resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz",
|
||||
"integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==",
|
||||
"optional": true,
|
||||
"peer": true
|
||||
},
|
||||
"escalade": {
|
||||
"version": "3.1.1",
|
||||
"resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz",
|
||||
@@ -1864,19 +1525,6 @@
|
||||
"resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-10.7.3.tgz",
|
||||
"integrity": "sha512-tzcUFauisWKNHaRkN4Wjl/ZA07gENAjFl3J/c480dprkGTg5EQstgaNFqBfUqCq54kZRIEcreTsAgF/m2quD7A=="
|
||||
},
|
||||
"htmlparser2": {
|
||||
"version": "8.0.2",
|
||||
"resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-8.0.2.tgz",
|
||||
"integrity": "sha512-GYdjWKDkbRLkZ5geuHs5NY1puJ+PXwP7+fHPRz06Eirsb9ugf6d8kkXav6ADhcODhFFPMIXyxkxSuMf3D6NCFA==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"requires": {
|
||||
"domelementtype": "^2.3.0",
|
||||
"domhandler": "^5.0.3",
|
||||
"domutils": "^3.0.1",
|
||||
"entities": "^4.4.0"
|
||||
}
|
||||
},
|
||||
"ieee754": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz",
|
||||
@@ -2024,16 +1672,6 @@
|
||||
"whatwg-url": "^5.0.0"
|
||||
}
|
||||
},
|
||||
"nth-check": {
|
||||
"version": "2.1.1",
|
||||
"resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz",
|
||||
"integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==",
|
||||
"optional": true,
|
||||
"peer": true,
|
||||
"requires": {
|
||||
"boolbase": "^1.0.0"
|
||||
}
|
||||
},
|
||||
"num-sort": {
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/num-sort/-/num-sort-2.1.0.tgz",
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"author": "dave@gray101.com",
|
||||
"license": "MIT",
|
||||
"devDependencies": {
|
||||
"@types/node": "^18.16.3",
|
||||
"@types/node": "^18.16.4",
|
||||
"typescript": "^5.0.4"
|
||||
},
|
||||
"dependencies": {
|
||||
|
||||
@@ -4,7 +4,7 @@ import { Document } from "langchain/document";
|
||||
import { initializeAgentExecutorWithOptions } from "langchain/agents";
|
||||
import {Calculator} from "langchain/tools/calculator";
|
||||
|
||||
const pathToLocalAi = process.env['OPENAI_API_HOST'] || 'http://api:8080/v1';
|
||||
const pathToLocalAi = process.env['OPENAI_API_BASE'] || 'http://api:8080/v1';
|
||||
const fakeApiKey = process.env['OPENAI_API_KEY'] || '-';
|
||||
const modelName = process.env['MODEL_NAME'] || 'gpt-3.5-turbo';
|
||||
|
||||
|
||||
24
examples/langchain/langchainpy-localai-example/.vscode/launch.json
vendored
Normal file
24
examples/langchain/langchainpy-localai-example/.vscode/launch.json
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Python: Current File",
|
||||
"type": "python",
|
||||
"request": "launch",
|
||||
"program": "${file}",
|
||||
"console": "integratedTerminal",
|
||||
"redirectOutput": true,
|
||||
"justMyCode": false
|
||||
},
|
||||
{
|
||||
"name": "Python: Attach to Port 5678",
|
||||
"type": "python",
|
||||
"request": "attach",
|
||||
"connect": {
|
||||
"host": "localhost",
|
||||
"port": 5678
|
||||
},
|
||||
"justMyCode": false
|
||||
}
|
||||
]
|
||||
}
|
||||
3
examples/langchain/langchainpy-localai-example/.vscode/settings.json
vendored
Normal file
3
examples/langchain/langchainpy-localai-example/.vscode/settings.json
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"python.defaultInterpreterPath": "${workspaceFolder}/.venv/Scripts/python"
|
||||
}
|
||||
46
examples/langchain/langchainpy-localai-example/full_demo.py
Normal file
46
examples/langchain/langchainpy-localai-example/full_demo.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import os
|
||||
import logging
|
||||
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain import PromptTemplate, LLMChain
|
||||
from langchain.prompts.chat import (
|
||||
ChatPromptTemplate,
|
||||
SystemMessagePromptTemplate,
|
||||
AIMessagePromptTemplate,
|
||||
HumanMessagePromptTemplate,
|
||||
)
|
||||
from langchain.schema import (
|
||||
AIMessage,
|
||||
HumanMessage,
|
||||
SystemMessage
|
||||
)
|
||||
|
||||
# This logging incantation makes it easy to see that you're actually reaching your LocalAI instance rather than OpenAI.
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
print('Langchain + LocalAI PYTHON Tests')
|
||||
|
||||
base_path = os.environ.get('OPENAI_API_BASE', 'http://api:8080/v1')
|
||||
key = os.environ.get('OPENAI_API_KEY', '-')
|
||||
model_name = os.environ.get('MODEL_NAME', 'gpt-3.5-turbo')
|
||||
|
||||
|
||||
chat = ChatOpenAI(temperature=0, openai_api_base=base_path, openai_api_key=key, model_name=model_name, max_tokens=100)
|
||||
|
||||
print("Created ChatOpenAI for ", chat.model_name)
|
||||
|
||||
template = "You are a helpful assistant that translates {input_language} to {output_language}. The next message will be a sentence in {input_language}. Respond ONLY with the translation in {output_language}. Do not respond in {input_language}!"
|
||||
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
|
||||
human_template = "{text}"
|
||||
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
|
||||
|
||||
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
|
||||
|
||||
print("ABOUT to execute")
|
||||
|
||||
# get a chat completion from the formatted messages
|
||||
response = chat(chat_prompt.format_prompt(input_language="English", output_language="French", text="I love programming.").to_messages())
|
||||
|
||||
print(response)
|
||||
|
||||
print(".");
|
||||
@@ -0,0 +1,32 @@
|
||||
aiohttp==3.8.4
|
||||
aiosignal==1.3.1
|
||||
async-timeout==4.0.2
|
||||
attrs==23.1.0
|
||||
certifi==2022.12.7
|
||||
charset-normalizer==3.1.0
|
||||
colorama==0.4.6
|
||||
dataclasses-json==0.5.7
|
||||
debugpy==1.6.7
|
||||
frozenlist==1.3.3
|
||||
greenlet==2.0.2
|
||||
idna==3.4
|
||||
langchain==0.0.159
|
||||
marshmallow==3.19.0
|
||||
marshmallow-enum==1.5.1
|
||||
multidict==6.0.4
|
||||
mypy-extensions==1.0.0
|
||||
numexpr==2.8.4
|
||||
numpy==1.24.3
|
||||
openai==0.27.6
|
||||
openapi-schema-pydantic==1.2.4
|
||||
packaging==23.1
|
||||
pydantic==1.10.7
|
||||
PyYAML==6.0
|
||||
requests==2.29.0
|
||||
SQLAlchemy==2.0.12
|
||||
tenacity==8.2.2
|
||||
tqdm==4.65.0
|
||||
typing-inspect==0.8.0
|
||||
typing_extensions==4.5.0
|
||||
urllib3==1.26.15
|
||||
yarl==1.9.2
|
||||
@@ -0,0 +1,6 @@
|
||||
|
||||
from langchain.llms import OpenAI
|
||||
|
||||
llm = OpenAI(temperature=0.9,model_name="gpt-3.5-turbo")
|
||||
text = "What would be a good company name for a company that makes colorful socks?"
|
||||
print(llm(text))
|
||||
@@ -5,13 +5,13 @@ parameters:
|
||||
temperature: 0.2
|
||||
top_p: 0.7
|
||||
context_size: 1024
|
||||
threads: 4
|
||||
stopwords:
|
||||
- "HUMAN:"
|
||||
- "GPT:"
|
||||
roles:
|
||||
user: " "
|
||||
system: " "
|
||||
backend: "gptj"
|
||||
template:
|
||||
completion: completion
|
||||
chat: completion # gpt4all
|
||||
chat: gpt4all
|
||||
26
examples/localai-webui/README.md
Normal file
26
examples/localai-webui/README.md
Normal file
@@ -0,0 +1,26 @@
|
||||
# localai-webui
|
||||
|
||||
Example of integration with [dhruvgera/localai-frontend](https://github.com/Dhruvgera/LocalAI-frontend).
|
||||
|
||||

|
||||
|
||||
## Setup
|
||||
|
||||
```bash
|
||||
# Clone LocalAI
|
||||
git clone https://github.com/go-skynet/LocalAI
|
||||
|
||||
cd LocalAI/examples/localai-webui
|
||||
|
||||
# (optional) Checkout a specific LocalAI tag
|
||||
# git checkout -b build <TAG>
|
||||
|
||||
# Download any desired models to models/ in the parent LocalAI project dir
|
||||
# For example: wget https://gpt4all.io/models/ggml-gpt4all-j.bin
|
||||
|
||||
# start with docker-compose
|
||||
docker-compose up -d --build
|
||||
```
|
||||
|
||||
Open http://localhost:3000 for the Web UI.
|
||||
|
||||
20
examples/localai-webui/docker-compose.yml
Normal file
20
examples/localai-webui/docker-compose.yml
Normal file
@@ -0,0 +1,20 @@
|
||||
version: '3.6'
|
||||
|
||||
services:
|
||||
api:
|
||||
image: quay.io/go-skynet/local-ai:latest
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
ports:
|
||||
- 8080:8080
|
||||
env_file:
|
||||
- .env
|
||||
volumes:
|
||||
- ./models:/models:cached
|
||||
command: ["/usr/bin/local-ai"]
|
||||
|
||||
frontend:
|
||||
image: quay.io/go-skynet/localai-frontend:master
|
||||
ports:
|
||||
- 3000:3000
|
||||
25
examples/privateGPT/README.md
Normal file
25
examples/privateGPT/README.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# privateGPT
|
||||
|
||||
This example is a re-adaptation of https://github.com/imartinez/privateGPT to work with LocalAI and OpenAI endpoints. We have a fork with the changes required to work with privateGPT here https://github.com/go-skynet/privateGPT ( PR: https://github.com/imartinez/privateGPT/pull/408 ).
|
||||
|
||||
Follow the instructions in https://github.com/go-skynet/privateGPT:
|
||||
|
||||
```bash
|
||||
git clone git@github.com:go-skynet/privateGPT.git
|
||||
cd privateGPT
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
Rename `example.env` to `.env` and edit the variables appropriately.
|
||||
|
||||
This is an example `.env` file for LocalAI:
|
||||
|
||||
```
|
||||
PERSIST_DIRECTORY=db
|
||||
# Set to OpenAI here
|
||||
MODEL_TYPE=OpenAI
|
||||
EMBEDDINGS_MODEL_NAME=all-MiniLM-L6-v2
|
||||
MODEL_N_CTX=1000
|
||||
# LocalAI URL
|
||||
OPENAI_API_BASE=http://localhost:8080/v1
|
||||
```
|
||||
1
examples/query_data/.gitignore
vendored
Normal file
1
examples/query_data/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
storage/
|
||||
69
examples/query_data/README.md
Normal file
69
examples/query_data/README.md
Normal file
@@ -0,0 +1,69 @@
|
||||
# Data query example
|
||||
|
||||
This example makes use of [Llama-Index](https://gpt-index.readthedocs.io/en/stable/getting_started/installation.html) to enable question answering on a set of documents.
|
||||
|
||||
It loosely follows [the quickstart](https://gpt-index.readthedocs.io/en/stable/guides/primer/usage_pattern.html).
|
||||
|
||||
Summary of the steps:
|
||||
|
||||
- prepare the dataset (and store it into `data`)
|
||||
- prepare a vector index database to run queries on
|
||||
- run queries
|
||||
|
||||
## Requirements
|
||||
|
||||
You will need a training data set. Copy that over `data`.
|
||||
|
||||
## Setup
|
||||
|
||||
Start the API:
|
||||
|
||||
```bash
|
||||
# Clone LocalAI
|
||||
git clone https://github.com/go-skynet/LocalAI
|
||||
|
||||
cd LocalAI/examples/query_data
|
||||
|
||||
wget https://huggingface.co/skeskinen/ggml/resolve/main/all-MiniLM-L6-v2/ggml-model-q4_0.bin -O models/bert
|
||||
wget https://gpt4all.io/models/ggml-gpt4all-j.bin -O models/ggml-gpt4all-j
|
||||
|
||||
# start with docker-compose
|
||||
docker-compose up -d --build
|
||||
```
|
||||
|
||||
### Create a storage
|
||||
|
||||
In this step we will create a local vector database from our document set, so later we can ask questions on it with the LLM.
|
||||
|
||||
Note: **OPENAI_API_KEY** is not required. However the library might fail if no API_KEY is passed by, so an arbitrary string can be used.
|
||||
|
||||
```bash
|
||||
export OPENAI_API_BASE=http://localhost:8080/v1
|
||||
export OPENAI_API_KEY=sk-
|
||||
|
||||
python store.py
|
||||
```
|
||||
|
||||
After it finishes, a directory "storage" will be created with the vector index database.
|
||||
|
||||
## Query
|
||||
|
||||
We can now query the dataset.
|
||||
|
||||
```bash
|
||||
export OPENAI_API_BASE=http://localhost:8080/v1
|
||||
export OPENAI_API_KEY=sk-
|
||||
|
||||
python query.py
|
||||
```
|
||||
|
||||
## Update
|
||||
|
||||
To update our vector database, run `update.py`
|
||||
|
||||
```bash
|
||||
export OPENAI_API_BASE=http://localhost:8080/v1
|
||||
export OPENAI_API_KEY=sk-
|
||||
|
||||
python update.py
|
||||
```
|
||||
0
examples/query_data/data/.keep
Normal file
0
examples/query_data/data/.keep
Normal file
15
examples/query_data/docker-compose.yml
Normal file
15
examples/query_data/docker-compose.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
version: '3.6'
|
||||
|
||||
services:
|
||||
api:
|
||||
image: quay.io/go-skynet/local-ai:latest
|
||||
build:
|
||||
context: ../../
|
||||
dockerfile: Dockerfile
|
||||
ports:
|
||||
- 8080:8080
|
||||
env_file:
|
||||
- .env
|
||||
volumes:
|
||||
- ./models:/models:cached
|
||||
command: ["/usr/bin/local-ai"]
|
||||
1
examples/query_data/models/completion.tmpl
Normal file
1
examples/query_data/models/completion.tmpl
Normal file
@@ -0,0 +1 @@
|
||||
{{.Input}}
|
||||
6
examples/query_data/models/embeddings.yaml
Normal file
6
examples/query_data/models/embeddings.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
name: text-embedding-ada-002
|
||||
parameters:
|
||||
model: bert
|
||||
threads: 14
|
||||
backend: bert-embeddings
|
||||
embeddings: true
|
||||
16
examples/query_data/models/gpt-3.5-turbo.yaml
Normal file
16
examples/query_data/models/gpt-3.5-turbo.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
name: gpt-3.5-turbo
|
||||
parameters:
|
||||
model: ggml-gpt4all-j
|
||||
top_k: 80
|
||||
temperature: 0.2
|
||||
top_p: 0.7
|
||||
context_size: 1024
|
||||
stopwords:
|
||||
- "HUMAN:"
|
||||
- "GPT:"
|
||||
roles:
|
||||
user: " "
|
||||
system: " "
|
||||
template:
|
||||
completion: completion
|
||||
chat: gpt4all
|
||||
35
examples/query_data/query.py
Normal file
35
examples/query_data/query.py
Normal file
@@ -0,0 +1,35 @@
|
||||
import os
|
||||
|
||||
# Uncomment to specify your OpenAI API key here (local testing only, not in production!), or add corresponding environment variable (recommended)
|
||||
# os.environ['OPENAI_API_KEY']= ""
|
||||
|
||||
from llama_index import LLMPredictor, PromptHelper, ServiceContext
|
||||
from langchain.llms.openai import OpenAI
|
||||
from llama_index import StorageContext, load_index_from_storage
|
||||
|
||||
base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1')
|
||||
|
||||
# This example uses text-davinci-003 by default; feel free to change if desired
|
||||
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_base=base_path))
|
||||
|
||||
# Configure prompt parameters and initialise helper
|
||||
max_input_size = 500
|
||||
num_output = 256
|
||||
max_chunk_overlap = 20
|
||||
|
||||
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
|
||||
|
||||
# Load documents from the 'data' directory
|
||||
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
|
||||
|
||||
# rebuild storage context
|
||||
storage_context = StorageContext.from_defaults(persist_dir='./storage')
|
||||
|
||||
# load index
|
||||
index = load_index_from_storage(storage_context, service_context=service_context, )
|
||||
|
||||
query_engine = index.as_query_engine()
|
||||
|
||||
data = input("Question: ")
|
||||
response = query_engine.query(data)
|
||||
print(response)
|
||||
27
examples/query_data/store.py
Normal file
27
examples/query_data/store.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import os
|
||||
|
||||
# Uncomment to specify your OpenAI API key here (local testing only, not in production!), or add corresponding environment variable (recommended)
|
||||
# os.environ['OPENAI_API_KEY']= ""
|
||||
|
||||
from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, LLMPredictor, PromptHelper, ServiceContext
|
||||
from langchain.llms.openai import OpenAI
|
||||
from llama_index import StorageContext, load_index_from_storage
|
||||
|
||||
base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1')
|
||||
|
||||
# This example uses text-davinci-003 by default; feel free to change if desired
|
||||
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_base=base_path))
|
||||
|
||||
# Configure prompt parameters and initialise helper
|
||||
max_input_size = 400
|
||||
num_output = 400
|
||||
max_chunk_overlap = 30
|
||||
|
||||
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
|
||||
|
||||
# Load documents from the 'data' directory
|
||||
documents = SimpleDirectoryReader('data').load_data()
|
||||
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper, chunk_size_limit = 400)
|
||||
index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context)
|
||||
index.storage_context.persist(persist_dir="./storage")
|
||||
|
||||
32
examples/query_data/update.py
Normal file
32
examples/query_data/update.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import os
|
||||
|
||||
# Uncomment to specify your OpenAI API key here (local testing only, not in production!), or add corresponding environment variable (recommended)
|
||||
# os.environ['OPENAI_API_KEY']= ""
|
||||
|
||||
from llama_index import LLMPredictor, PromptHelper, SimpleDirectoryReader, ServiceContext
|
||||
from langchain.llms.openai import OpenAI
|
||||
from llama_index import StorageContext, load_index_from_storage
|
||||
|
||||
base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1')
|
||||
|
||||
# This example uses text-davinci-003 by default; feel free to change if desired
|
||||
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_base=base_path))
|
||||
|
||||
# Configure prompt parameters and initialise helper
|
||||
max_input_size = 512
|
||||
num_output = 256
|
||||
max_chunk_overlap = 20
|
||||
|
||||
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
|
||||
|
||||
# Load documents from the 'data' directory
|
||||
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
|
||||
|
||||
# rebuild storage context
|
||||
storage_context = StorageContext.from_defaults(persist_dir='./storage')
|
||||
|
||||
# load index
|
||||
index = load_index_from_storage(storage_context, service_context=service_context, )
|
||||
documents = SimpleDirectoryReader('data').load_data()
|
||||
index.refresh(documents)
|
||||
index.storage_context.persist(persist_dir="./storage")
|
||||
2
examples/rwkv/.gitignore
vendored
Normal file
2
examples/rwkv/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
models/rwkv
|
||||
models/rwkv.tokenizer.json
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user