mirror of
https://github.com/mudler/LocalAI.git
synced 2026-02-03 03:02:38 -05:00
Compare commits
37 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
425beea6c5 | ||
|
|
cdfb930a69 | ||
|
|
09641b9790 | ||
|
|
aac9a57500 | ||
|
|
59f7953249 | ||
|
|
217dbb448e | ||
|
|
76c881043e | ||
|
|
835a20610b | ||
|
|
74e808b8c3 | ||
|
|
53c83f2fae | ||
|
|
62365fa31d | ||
|
|
a44c8e9b4e | ||
|
|
320e430c7f | ||
|
|
8615646827 | ||
|
|
925d7c3057 | ||
|
|
e350924ac1 | ||
|
|
e891a46740 | ||
|
|
cd9285bbe6 | ||
|
|
917ff13c86 | ||
|
|
2a40f44023 | ||
|
|
c22d06c780 | ||
|
|
babbd23744 | ||
|
|
eee41cbe2b | ||
|
|
bf54b78270 | ||
|
|
589dfae89f | ||
|
|
c8cc197ddd | ||
|
|
76c561a908 | ||
|
|
04797a80e1 | ||
|
|
29583a5ea5 | ||
|
|
d12c1f7a4a | ||
|
|
505572dae8 | ||
|
|
3ddea794e1 | ||
|
|
10e03bde35 | ||
|
|
e969604d75 | ||
|
|
c822e18f0d | ||
|
|
891af1c524 | ||
|
|
5807d0b766 |
@@ -3,9 +3,13 @@ ARG BUILD_TYPE=
|
||||
FROM golang:$GO_VERSION
|
||||
ENV REBUILD=true
|
||||
WORKDIR /build
|
||||
RUN apt-get update && apt-get install -y cmake libgomp1 libopenblas-dev libopenblas-base libopencv-dev libopencv-core-dev libopencv-core4.5 ca-certificates
|
||||
RUN apt-get update && apt-get install -y cmake curl libgomp1 libopenblas-dev libopenblas-base libopencv-dev libopencv-core-dev libopencv-core4.5 ca-certificates
|
||||
COPY . .
|
||||
RUN ln -s /usr/include/opencv4/opencv2/ /usr/include/opencv2
|
||||
RUN make build
|
||||
ENV HEALTHCHECK_ENDPOINT=http://localhost:8080/readyz
|
||||
# Define the health check command
|
||||
HEALTHCHECK --interval=30s --timeout=360s --retries=10 \
|
||||
CMD curl -f $HEALTHCHECK_ENDPOINT || exit 1
|
||||
EXPOSE 8080
|
||||
ENTRYPOINT [ "/build/entrypoint.sh" ]
|
||||
|
||||
@@ -11,6 +11,10 @@ RUN make build
|
||||
|
||||
FROM debian:$DEBIAN_VERSION
|
||||
COPY --from=builder /build/local-ai /usr/bin/local-ai
|
||||
RUN apt-get update && apt-get install -y ca-certificates
|
||||
RUN apt-get update && apt-get install -y ca-certificates curl
|
||||
ENV HEALTHCHECK_ENDPOINT=http://localhost:8080/readyz
|
||||
# Define the health check command
|
||||
HEALTHCHECK --interval=30s --timeout=360s --retries=10 \
|
||||
CMD curl -f $HEALTHCHECK_ENDPOINT || exit 1
|
||||
EXPOSE 8080
|
||||
ENTRYPOINT [ "/usr/bin/local-ai" ]
|
||||
19
Makefile
19
Makefile
@@ -3,13 +3,13 @@ GOTEST=$(GOCMD) test
|
||||
GOVET=$(GOCMD) vet
|
||||
BINARY_NAME=local-ai
|
||||
|
||||
GOLLAMA_VERSION?=ccf23adfb278c0165d388389a5d60f3fe38e4854
|
||||
GOLLAMA_VERSION?=fbec625895ba0c458f783b62c8569135c5e80d79
|
||||
GPT4ALL_REPO?=https://github.com/nomic-ai/gpt4all
|
||||
GPT4ALL_VERSION?=8119ff4df0a99bde44255db2b8c7290b5582ac2b
|
||||
GOGGMLTRANSFORMERS_VERSION?=14fd6c9
|
||||
GPT4ALL_VERSION?=73db20ba85fbbdc66a56e2619394c0eea40dc72b
|
||||
GOGGMLTRANSFORMERS_VERSION?=4f18e5eb75089dc1fc8f1c955bb8f73d18520a46
|
||||
RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp
|
||||
RWKV_VERSION?=07166da10cb2a9e8854395a4f210464dcea76e47
|
||||
WHISPER_CPP_VERSION?=041be06d5881d3c759cc4ed45d655804361237cd
|
||||
WHISPER_CPP_VERSION?=9b926844e3ae0ca6a0d13573b2e0349be1a4b573
|
||||
BERT_VERSION?=cea1ed76a7f48ef386a8e369f6c82c48cdf2d551
|
||||
BLOOMZ_VERSION?=e9366e82abdfe70565644fbfae9651976714efd1
|
||||
BUILD_TYPE?=
|
||||
@@ -18,7 +18,7 @@ CUDA_LIBPATH?=/usr/local/cuda/lib64/
|
||||
STABLEDIFFUSION_VERSION?=c0748eca3642d58bcf9521108bcee46959c647dc
|
||||
GO_TAGS?=
|
||||
BUILD_ID?=git
|
||||
|
||||
LD_FLAGS=?=
|
||||
OPTIONAL_TARGETS?=
|
||||
|
||||
OS := $(shell uname -s)
|
||||
@@ -41,6 +41,11 @@ ifeq ($(BUILD_TYPE),cublas)
|
||||
export LLAMA_CUBLAS=1
|
||||
endif
|
||||
|
||||
# glibc-static or glibc-devel-static required
|
||||
ifeq ($(STATIC),true)
|
||||
LD_FLAGS=-linkmode external -extldflags -static
|
||||
endif
|
||||
|
||||
ifeq ($(GO_TAGS),stablediffusion)
|
||||
OPTIONAL_TARGETS+=go-stable-diffusion/libstablediffusion.a
|
||||
endif
|
||||
@@ -197,7 +202,7 @@ build: prepare ## Build the project
|
||||
$(info ${GREEN}I local-ai build info:${RESET})
|
||||
$(info ${GREEN}I BUILD_TYPE: ${YELLOW}$(BUILD_TYPE)${RESET})
|
||||
$(info ${GREEN}I GO_TAGS: ${YELLOW}$(GO_TAGS)${RESET})
|
||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} $(GOCMD) build -tags "$(GO_TAGS)" -x -o $(BINARY_NAME) ./
|
||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -x -o $(BINARY_NAME) ./
|
||||
|
||||
dist: build
|
||||
mkdir -p release
|
||||
@@ -223,7 +228,7 @@ test-models/testmodel:
|
||||
|
||||
test: prepare test-models/testmodel
|
||||
cp tests/models_fixtures/* test-models
|
||||
C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo -v -r ./api ./pkg
|
||||
C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} TEST_DIR=$(abspath ./)/test-dir/ FIXTURES=$(abspath ./)/tests/fixtures CONFIG_FILE=$(abspath ./)/test-models/config.yaml MODELS_PATH=$(abspath ./)/test-models $(GOCMD) run github.com/onsi/ginkgo/v2/ginkgo --flakeAttempts 5 -v -r ./api ./pkg
|
||||
|
||||
## Help:
|
||||
help: ## Show this help.
|
||||
|
||||
91
README.md
91
README.md
@@ -39,6 +39,7 @@ LocalAI uses C++ bindings for optimizing speed. It is based on [llama.cpp](https
|
||||
|
||||
## News
|
||||
|
||||
- 23-05-2023: __v1.15.0__ released. `go-gpt2.cpp` backend got renamed to `go-ggml-transformers.cpp` updated including https://github.com/ggerganov/llama.cpp/pull/1508 which breaks compatibility with older models. This impacts RedPajama, GptNeoX, MPT(not `gpt4all-mpt`), Dolly, GPT2 and Starcoder based models. [Binary releases available](https://github.com/go-skynet/LocalAI/releases), various fixes, including https://github.com/go-skynet/LocalAI/pull/341 .
|
||||
- 21-05-2023: __v1.14.0__ released. Minor updates to the `/models/apply` endpoint, `llama.cpp` backend updated including https://github.com/ggerganov/llama.cpp/pull/1508 which breaks compatibility with older models. `gpt4all` is still compatible with the old format.
|
||||
- 19-05-2023: __v1.13.0__ released! 🔥🔥 updates to the `gpt4all` and `llama` backend, consolidated CUDA support ( https://github.com/go-skynet/LocalAI/pull/310 thanks to @bubthegreat and @Thireus ), preliminar support for [installing models via API](https://github.com/go-skynet/LocalAI#advanced-prepare-models-using-the-api).
|
||||
- 17-05-2023: __v1.12.0__ released! 🔥🔥 Minor fixes, plus CUDA (https://github.com/go-skynet/LocalAI/pull/258) support for `llama.cpp`-compatible models and image generation (https://github.com/go-skynet/LocalAI/pull/272).
|
||||
@@ -62,8 +63,9 @@ Now LocalAI can generate images too:
|
||||
|
||||
Twitter: [@LocalAI_API](https://twitter.com/LocalAI_API) and [@mudler_it](https://twitter.com/mudler_it)
|
||||
|
||||
### Blogs and articles
|
||||
### Blogs, articles, media
|
||||
|
||||
- [LocalAI meets k8sgpt](https://www.youtube.com/watch?v=PKrDNuJ_dfE) - CNCF Webinar showcasing LocalAI and k8sgpt.
|
||||
- [Question Answering on Documents locally with LangChain, LocalAI, Chroma, and GPT4All](https://mudler.pm/posts/localai-question-answering/) by Ettore Di Giacinto
|
||||
- [Tutorial to use k8sgpt with LocalAI](https://medium.com/@tyler_97636/k8sgpt-localai-unlock-kubernetes-superpowers-for-free-584790de9b65) - excellent usecase for localAI, using AI to analyse Kubernetes clusters. by Tyller Gillson
|
||||
|
||||
@@ -129,7 +131,7 @@ Depending on the model you are attempting to run might need more RAM or CPU reso
|
||||
| [gpt4all-llama](https://github.com/nomic-ai/gpt4all) | Vicuna, Alpaca, LLaMa | yes | no | no | yes |
|
||||
| [gpt4all-mpt](https://github.com/nomic-ai/gpt4all) | MPT | yes | no | no | yes |
|
||||
| [gpt4all-j](https://github.com/nomic-ai/gpt4all) | GPT4ALL-J | yes | no | no | yes |
|
||||
| [gpt2](https://github.com/ggerganov/ggml) ([binding](https://github.com/go-skynet/go-ggml-transformers.cpp)) | GPT/NeoX, Cerebras | yes | no | no | no |
|
||||
| [gpt2](https://github.com/ggerganov/ggml) ([binding](https://github.com/go-skynet/go-ggml-transformers.cpp)) | GPT2, Cerebras | yes | no | no | no |
|
||||
| [dolly](https://github.com/ggerganov/ggml) ([binding](https://github.com/go-skynet/go-ggml-transformers.cpp)) | Dolly | yes | no | no | no |
|
||||
| [gptj](https://github.com/ggerganov/ggml) ([binding](https://github.com/go-skynet/go-ggml-transformers.cpp)) | GPTJ | yes | no | no | no |
|
||||
| [mpt](https://github.com/ggerganov/ggml) ([binding](https://github.com/go-skynet/go-ggml-transformers.cpp)) | MPT | yes | no | no | no |
|
||||
@@ -607,47 +609,69 @@ It should work, however you need to make sure you give enough resources to the c
|
||||
LocalAI can be installed inside Kubernetes with helm.
|
||||
|
||||
<details>
|
||||
By default, the helm chart will install LocalAI instance using the ggml-gpt4all-j model without persistent storage.
|
||||
|
||||
1. Add the helm repo
|
||||
```bash
|
||||
helm repo add go-skynet https://go-skynet.github.io/helm-charts/
|
||||
```
|
||||
1. Create a values files with your settings:
|
||||
```bash
|
||||
cat <<EOF > values.yaml
|
||||
2. Install the helm chart:
|
||||
```bash
|
||||
helm repo update
|
||||
helm install local-ai go-skynet/local-ai -f values.yaml
|
||||
```
|
||||
> **Note:** For further configuration options, see the [helm chart repository on GitHub](https://github.com/go-skynet/helm-charts).
|
||||
### Example values
|
||||
Deploy a single LocalAI pod with 6GB of persistent storage serving up a `ggml-gpt4all-j` model with custom prompt.
|
||||
```yaml
|
||||
### values.yaml
|
||||
|
||||
deployment:
|
||||
image: quay.io/go-skynet/local-ai:latest
|
||||
# Adjust the number of threads and context size for model inference
|
||||
env:
|
||||
threads: 4
|
||||
contextSize: 1024
|
||||
modelsPath: "/models"
|
||||
# Optionally create a PVC, mount the PV to the LocalAI Deployment,
|
||||
# and download a model to prepopulate the models directory
|
||||
modelsVolume:
|
||||
enabled: true
|
||||
url: "https://gpt4all.io/models/ggml-gpt4all-j.bin"
|
||||
pvc:
|
||||
threads: 14
|
||||
contextSize: 512
|
||||
|
||||
# Set the pod requests/limits
|
||||
resources:
|
||||
limits:
|
||||
cpu: 4000m
|
||||
memory: 7000Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 6000Mi
|
||||
|
||||
# Add a custom prompt template for the ggml-gpt4all-j model
|
||||
promptTemplates:
|
||||
# The name of the model this template belongs to
|
||||
ggml-gpt4all-j.bin.tmpl: |
|
||||
This is my custom prompt template...
|
||||
### Prompt:
|
||||
{{.Input}}
|
||||
### Response:
|
||||
|
||||
# Model configuration
|
||||
models:
|
||||
# Don't re-download models on pod creation
|
||||
forceDownload: false
|
||||
|
||||
# List of models to download and serve
|
||||
list:
|
||||
- url: "https://gpt4all.io/models/ggml-gpt4all-j.bin"
|
||||
# Optional basic HTTP authentication
|
||||
basicAuth: base64EncodedCredentials
|
||||
|
||||
# Enable 6Gb of persistent storage models and prompt templates
|
||||
persistence:
|
||||
enabled: true
|
||||
size: 6Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
auth:
|
||||
# Optional value for HTTP basic access authentication header
|
||||
basic: "" # 'username:password' base64 encoded
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
annotations: {}
|
||||
# If using an AWS load balancer, you'll need to override the default 60s load balancer idle timeout
|
||||
# service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "1200"
|
||||
EOF
|
||||
```
|
||||
3. Install the helm chart:
|
||||
```bash
|
||||
helm repo update
|
||||
helm install local-ai go-skynet/local-ai -f values.yaml
|
||||
```
|
||||
|
||||
Check out also the [helm chart repository on GitHub](https://github.com/go-skynet/helm-charts).
|
||||
|
||||
</details>
|
||||
|
||||
## Supported OpenAI API endpoints
|
||||
@@ -1001,7 +1025,7 @@ There is the availability of localai-webui and chatbot-ui in the examples sectio
|
||||
|
||||
<details>
|
||||
|
||||
AutoGPT currently doesn't allow to set a different API URL, but there is a PR open for it, so this should be possible soon!
|
||||
Yes, see the [examples](https://github.com/go-skynet/LocalAI/tree/master/examples/)!
|
||||
|
||||
</details>
|
||||
|
||||
@@ -1012,11 +1036,8 @@ Feel free to open up a PR to get your project listed!
|
||||
- [Kairos](https://github.com/kairos-io/kairos)
|
||||
- [k8sgpt](https://github.com/k8sgpt-ai/k8sgpt#running-local-models)
|
||||
- [Spark](https://github.com/cedriking/spark)
|
||||
|
||||
## Blog posts and other articles
|
||||
|
||||
- https://medium.com/@tyler_97636/k8sgpt-localai-unlock-kubernetes-superpowers-for-free-584790de9b65
|
||||
- https://kairos.io/docs/examples/localai/
|
||||
- [autogpt4all](https://github.com/aorumbayev/autogpt4all)
|
||||
- [Mods](https://github.com/charmbracelet/mods)
|
||||
|
||||
## Short-term roadmap
|
||||
|
||||
|
||||
20
api/api.go
20
api/api.go
@@ -69,6 +69,18 @@ func App(opts ...AppOption) *fiber.App {
|
||||
// Default middleware config
|
||||
app.Use(recover.New())
|
||||
|
||||
if options.preloadJSONModels != "" {
|
||||
if err := ApplyGalleryFromString(options.loader.ModelPath, options.preloadJSONModels, cm); err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if options.preloadModelsFromPath != "" {
|
||||
if err := ApplyGalleryFromFile(options.loader.ModelPath, options.preloadModelsFromPath, cm); err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if options.cors {
|
||||
if options.corsAllowOrigins == "" {
|
||||
app.Use(cors.New())
|
||||
@@ -114,6 +126,14 @@ func App(opts ...AppOption) *fiber.App {
|
||||
app.Static("/generated-images", options.imageDir)
|
||||
}
|
||||
|
||||
ok := func(c *fiber.Ctx) error {
|
||||
return c.SendStatus(200)
|
||||
}
|
||||
|
||||
// Kubernetes health checks
|
||||
app.Get("/healthz", ok)
|
||||
app.Get("/readyz", ok)
|
||||
|
||||
// models
|
||||
app.Get("/v1/models", listModels(options.loader, cm))
|
||||
app.Get("/models", listModels(options.loader, cm))
|
||||
|
||||
@@ -16,24 +16,28 @@ import (
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
OpenAIRequest `yaml:"parameters"`
|
||||
Name string `yaml:"name"`
|
||||
StopWords []string `yaml:"stopwords"`
|
||||
Cutstrings []string `yaml:"cutstrings"`
|
||||
TrimSpace []string `yaml:"trimspace"`
|
||||
ContextSize int `yaml:"context_size"`
|
||||
F16 bool `yaml:"f16"`
|
||||
Threads int `yaml:"threads"`
|
||||
Debug bool `yaml:"debug"`
|
||||
Roles map[string]string `yaml:"roles"`
|
||||
Embeddings bool `yaml:"embeddings"`
|
||||
Backend string `yaml:"backend"`
|
||||
TemplateConfig TemplateConfig `yaml:"template"`
|
||||
MirostatETA float64 `yaml:"mirostat_eta"`
|
||||
MirostatTAU float64 `yaml:"mirostat_tau"`
|
||||
Mirostat int `yaml:"mirostat"`
|
||||
NGPULayers int `yaml:"gpu_layers"`
|
||||
ImageGenerationAssets string `yaml:"asset_dir"`
|
||||
OpenAIRequest `yaml:"parameters"`
|
||||
Name string `yaml:"name"`
|
||||
StopWords []string `yaml:"stopwords"`
|
||||
Cutstrings []string `yaml:"cutstrings"`
|
||||
TrimSpace []string `yaml:"trimspace"`
|
||||
ContextSize int `yaml:"context_size"`
|
||||
F16 bool `yaml:"f16"`
|
||||
Threads int `yaml:"threads"`
|
||||
Debug bool `yaml:"debug"`
|
||||
Roles map[string]string `yaml:"roles"`
|
||||
Embeddings bool `yaml:"embeddings"`
|
||||
Backend string `yaml:"backend"`
|
||||
TemplateConfig TemplateConfig `yaml:"template"`
|
||||
MirostatETA float64 `yaml:"mirostat_eta"`
|
||||
MirostatTAU float64 `yaml:"mirostat_tau"`
|
||||
Mirostat int `yaml:"mirostat"`
|
||||
NGPULayers int `yaml:"gpu_layers"`
|
||||
ImageGenerationAssets string `yaml:"asset_dir"`
|
||||
|
||||
PromptCachePath string `yaml:"prompt_cache_path"`
|
||||
PromptCacheAll bool `yaml:"prompt_cache_all"`
|
||||
|
||||
PromptStrings, InputStrings []string
|
||||
InputToken [][]int
|
||||
}
|
||||
|
||||
113
api/gallery.go
113
api/gallery.go
@@ -2,10 +2,12 @@ package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
@@ -40,6 +42,43 @@ func newGalleryApplier(modelPath string) *galleryApplier {
|
||||
statuses: make(map[string]*galleryOpStatus),
|
||||
}
|
||||
}
|
||||
|
||||
func applyGallery(modelPath string, req ApplyGalleryModelRequest, cm *ConfigMerger) error {
|
||||
url, err := req.DecodeURL()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Send a GET request to the URL
|
||||
response, err := http.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
// Read the response body
|
||||
body, err := ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshal YAML data into a Config struct
|
||||
var config gallery.Config
|
||||
err = yaml.Unmarshal(body, &config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
config.Files = append(config.Files, req.AdditionalFiles...)
|
||||
|
||||
if err := gallery.Apply(modelPath, req.Name, &config, req.Overrides); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Reload models
|
||||
return cm.LoadConfigs(modelPath)
|
||||
}
|
||||
|
||||
func (g *galleryApplier) updatestatus(s string, op *galleryOpStatus) {
|
||||
g.Lock()
|
||||
defer g.Unlock()
|
||||
@@ -66,44 +105,7 @@ func (g *galleryApplier) start(c context.Context, cm *ConfigMerger) {
|
||||
g.updatestatus(op.id, &galleryOpStatus{Error: e, Processed: true})
|
||||
}
|
||||
|
||||
url, err := op.req.DecodeURL()
|
||||
if err != nil {
|
||||
updateError(err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Send a GET request to the URL
|
||||
response, err := http.Get(url)
|
||||
if err != nil {
|
||||
updateError(err)
|
||||
continue
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
// Read the response body
|
||||
body, err := ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
updateError(err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Unmarshal YAML data into a Config struct
|
||||
var config gallery.Config
|
||||
err = yaml.Unmarshal(body, &config)
|
||||
if err != nil {
|
||||
updateError(fmt.Errorf("failed to unmarshal YAML: %v", err))
|
||||
continue
|
||||
}
|
||||
|
||||
config.Files = append(config.Files, op.req.AdditionalFiles...)
|
||||
|
||||
if err := gallery.Apply(g.modelPath, op.req.Name, &config, op.req.Overrides); err != nil {
|
||||
updateError(err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Reload models
|
||||
if err := cm.LoadConfigs(g.modelPath); err != nil {
|
||||
if err := applyGallery(g.modelPath, op.req, cm); err != nil {
|
||||
updateError(err)
|
||||
continue
|
||||
}
|
||||
@@ -114,6 +116,41 @@ func (g *galleryApplier) start(c context.Context, cm *ConfigMerger) {
|
||||
}()
|
||||
}
|
||||
|
||||
func ApplyGalleryFromFile(modelPath, s string, cm *ConfigMerger) error {
|
||||
dat, err := os.ReadFile(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var requests []ApplyGalleryModelRequest
|
||||
err = json.Unmarshal(dat, &requests)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, r := range requests {
|
||||
if err := applyGallery(modelPath, r, cm); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
func ApplyGalleryFromString(modelPath, s string, cm *ConfigMerger) error {
|
||||
var requests []ApplyGalleryModelRequest
|
||||
err := json.Unmarshal([]byte(s), &requests)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, r := range requests {
|
||||
if err := applyGallery(modelPath, r, cm); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// endpoints
|
||||
|
||||
type ApplyGalleryModelRequest struct {
|
||||
|
||||
@@ -15,6 +15,8 @@ type Option struct {
|
||||
debug, disableMessage bool
|
||||
imageDir string
|
||||
cors bool
|
||||
preloadJSONModels string
|
||||
preloadModelsFromPath string
|
||||
corsAllowOrigins string
|
||||
}
|
||||
|
||||
@@ -53,6 +55,17 @@ func WithContext(ctx context.Context) AppOption {
|
||||
}
|
||||
}
|
||||
|
||||
func WithYAMLConfigPreload(configFile string) AppOption {
|
||||
return func(o *Option) {
|
||||
o.preloadModelsFromPath = configFile
|
||||
}
|
||||
}
|
||||
|
||||
func WithJSONStringPreload(configFile string) AppOption {
|
||||
return func(o *Option) {
|
||||
o.preloadJSONModels = configFile
|
||||
}
|
||||
}
|
||||
func WithConfigFile(configFile string) AppOption {
|
||||
return func(o *Option) {
|
||||
o.configFile = configFile
|
||||
|
||||
@@ -2,6 +2,8 @@ package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -102,7 +104,7 @@ func ModelEmbedding(s string, tokens []int, loader *model.ModelLoader, c Config)
|
||||
switch model := inferenceModel.(type) {
|
||||
case *llama.LLama:
|
||||
fn = func() ([]float32, error) {
|
||||
predictOptions := buildLLamaPredictOptions(c)
|
||||
predictOptions := buildLLamaPredictOptions(c, loader.ModelPath)
|
||||
if len(tokens) > 0 {
|
||||
return model.TokenEmbeddings(tokens, predictOptions...)
|
||||
}
|
||||
@@ -151,7 +153,7 @@ func ModelEmbedding(s string, tokens []int, loader *model.ModelLoader, c Config)
|
||||
}, nil
|
||||
}
|
||||
|
||||
func buildLLamaPredictOptions(c Config) []llama.PredictOption {
|
||||
func buildLLamaPredictOptions(c Config, modelPath string) []llama.PredictOption {
|
||||
// Generate the prediction using the language model
|
||||
predictOptions := []llama.PredictOption{
|
||||
llama.SetTemperature(c.Temperature),
|
||||
@@ -161,6 +163,17 @@ func buildLLamaPredictOptions(c Config) []llama.PredictOption {
|
||||
llama.SetThreads(c.Threads),
|
||||
}
|
||||
|
||||
if c.PromptCacheAll {
|
||||
predictOptions = append(predictOptions, llama.EnablePromptCacheAll)
|
||||
}
|
||||
|
||||
if c.PromptCachePath != "" {
|
||||
// Create parent directory
|
||||
p := filepath.Join(modelPath, c.PromptCachePath)
|
||||
os.MkdirAll(filepath.Dir(p), 0755)
|
||||
predictOptions = append(predictOptions, llama.SetPathPromptCache(p))
|
||||
}
|
||||
|
||||
if c.Mirostat != 0 {
|
||||
predictOptions = append(predictOptions, llama.SetMirostat(c.Mirostat))
|
||||
}
|
||||
@@ -469,7 +482,7 @@ func ModelInference(s string, loader *model.ModelLoader, c Config, tokenCallback
|
||||
model.SetTokenCallback(tokenCallback)
|
||||
}
|
||||
|
||||
predictOptions := buildLLamaPredictOptions(c)
|
||||
predictOptions := buildLLamaPredictOptions(c, loader.ModelPath)
|
||||
|
||||
str, er := model.Predict(
|
||||
s,
|
||||
|
||||
@@ -4,6 +4,13 @@ Here is a list of projects that can easily be integrated with the LocalAI backen
|
||||
|
||||
### Projects
|
||||
|
||||
### AutoGPT
|
||||
|
||||
_by [@mudler](https://github.com/mudler)_
|
||||
|
||||
This example shows how to use AutoGPT with LocalAI.
|
||||
|
||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/autoGPT/)
|
||||
|
||||
### Chatbot-UI
|
||||
|
||||
|
||||
5
examples/autoGPT/.env
Normal file
5
examples/autoGPT/.env
Normal file
@@ -0,0 +1,5 @@
|
||||
OPENAI_API_KEY=sk---anystringhere
|
||||
OPENAI_API_BASE=http://api:8080/v1
|
||||
# Models to preload at start
|
||||
# Here we configure gpt4all as gpt-3.5-turbo and bert as embeddings
|
||||
PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}, { "url": "github:go-skynet/model-gallery/bert-embeddings.yaml", "name": "text-embedding-ada-002"}]
|
||||
32
examples/autoGPT/README.md
Normal file
32
examples/autoGPT/README.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# AutoGPT
|
||||
|
||||
Example of integration with [AutoGPT](https://github.com/Significant-Gravitas/Auto-GPT).
|
||||
|
||||
## Run
|
||||
|
||||
```bash
|
||||
# Clone LocalAI
|
||||
git clone https://github.com/go-skynet/LocalAI
|
||||
|
||||
cd LocalAI/examples/autoGPT
|
||||
|
||||
docker-compose run --rm auto-gpt
|
||||
```
|
||||
|
||||
Note: The example automatically downloads the `gpt4all` model as it is under a permissive license. The GPT4All model does not seem to be enough to run AutoGPT. WizardLM-7b-uncensored seems to perform better (with `f16: true`).
|
||||
|
||||
See the `.env` configuration file to set a different model with the [model-gallery](https://github.com/go-skynet/model-gallery) by editing `PRELOAD_MODELS`.
|
||||
|
||||
## Without docker
|
||||
|
||||
Run AutoGPT with `OPENAI_API_BASE` pointing to the LocalAI endpoint. If you run it locally for instance:
|
||||
|
||||
```
|
||||
OPENAI_API_BASE=http://localhost:8080 python ...
|
||||
```
|
||||
|
||||
Note: you need a model named `gpt-3.5-turbo` and `text-embedding-ada-002`. You can preload those in LocalAI at start by setting in the env:
|
||||
|
||||
```
|
||||
PRELOAD_MODELS=[{"url": "github:go-skynet/model-gallery/gpt4all-j.yaml", "name": "gpt-3.5-turbo"}, { "url": "github:go-skynet/model-gallery/bert-embeddings.yaml", "name": "text-embedding-ada-002"}]
|
||||
```
|
||||
42
examples/autoGPT/docker-compose.yaml
Normal file
42
examples/autoGPT/docker-compose.yaml
Normal file
@@ -0,0 +1,42 @@
|
||||
version: "3.9"
|
||||
services:
|
||||
api:
|
||||
image: quay.io/go-skynet/local-ai:latest
|
||||
ports:
|
||||
- 8080:8080
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
- DEBUG=true
|
||||
- MODELS_PATH=/models
|
||||
volumes:
|
||||
- ./models:/models:cached
|
||||
command: ["/usr/bin/local-ai" ]
|
||||
auto-gpt:
|
||||
image: significantgravitas/auto-gpt
|
||||
depends_on:
|
||||
api:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_started
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
MEMORY_BACKEND: ${MEMORY_BACKEND:-redis}
|
||||
REDIS_HOST: ${REDIS_HOST:-redis}
|
||||
profiles: ["exclude-from-up"]
|
||||
volumes:
|
||||
- ./auto_gpt_workspace:/app/autogpt/auto_gpt_workspace
|
||||
- ./data:/app/data
|
||||
## allow auto-gpt to write logs to disk
|
||||
- ./logs:/app/logs
|
||||
## uncomment following lines if you want to make use of these files
|
||||
## you must have them existing in the same folder as this docker-compose.yml
|
||||
#- type: bind
|
||||
# source: ./azure.yaml
|
||||
# target: /app/azure.yaml
|
||||
#- type: bind
|
||||
# source: ./ai_settings.yaml
|
||||
# target: /app/ai_settings.yaml
|
||||
redis:
|
||||
image: "redis/redis-stack-server:latest"
|
||||
10
go.mod
10
go.mod
@@ -4,18 +4,18 @@ go 1.19
|
||||
|
||||
require (
|
||||
github.com/donomii/go-rwkv.cpp v0.0.0-20230515123100-6fdd0c338e56
|
||||
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230520182345-041be06d5881
|
||||
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230527074028-9b926844e3ae
|
||||
github.com/go-audio/wav v1.1.0
|
||||
github.com/go-skynet/bloomz.cpp v0.0.0-20230510223001-e9366e82abdf
|
||||
github.com/go-skynet/go-bert.cpp v0.0.0-20230516063724-cea1ed76a7f4
|
||||
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230523150735-8bfcb3ea6127
|
||||
github.com/go-skynet/go-llama.cpp v0.0.0-20230520155239-ccf23adfb278
|
||||
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230525204055-4f18e5eb7508
|
||||
github.com/go-skynet/go-llama.cpp v0.0.0-20230524233806-6e7e69a1607e
|
||||
github.com/gofiber/fiber/v2 v2.46.0
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/hashicorp/go-multierror v1.1.1
|
||||
github.com/imdario/mergo v0.3.15
|
||||
github.com/imdario/mergo v0.3.16
|
||||
github.com/mudler/go-stable-diffusion v0.0.0-20230516152536-c0748eca3642
|
||||
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230522220313-2ce22208a3dd
|
||||
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230526132403-a6f3e94458e2
|
||||
github.com/onsi/ginkgo/v2 v2.9.5
|
||||
github.com/onsi/gomega v1.27.7
|
||||
github.com/otiai10/openaigo v1.1.0
|
||||
|
||||
40
go.sum
40
go.sum
@@ -16,8 +16,16 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/donomii/go-rwkv.cpp v0.0.0-20230515123100-6fdd0c338e56 h1:s8/MZdicstKi5fn9D9mKGIQ/q6IWCYCk/BM68i8v51w=
|
||||
github.com/donomii/go-rwkv.cpp v0.0.0-20230515123100-6fdd0c338e56/go.mod h1:gWy7FIWioqYmYxkaoFyBnaKApeZVrUkHhv9EV9pz4dM=
|
||||
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230520182345-041be06d5881 h1:dafqVivljYk51VLFnnpTXJnfWDe637EobWZ1l8PyEf8=
|
||||
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230520182345-041be06d5881/go.mod h1:QIjZ9OktHFG7p+/m3sMvrAJKKdWrr1fZIK0rM6HZlyo=
|
||||
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230523110439-77eab3fbfe5e h1:4PMorQuoUGAXmIzCtnNOHaasyLokXdgd8jUWwsraFTo=
|
||||
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230523110439-77eab3fbfe5e/go.mod h1:QIjZ9OktHFG7p+/m3sMvrAJKKdWrr1fZIK0rM6HZlyo=
|
||||
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230524181101-5e2b3407ef46 h1:+STJWsBFikYC90LnR8I9gcBdysQn7Jv9Jb44+5WBi68=
|
||||
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230524181101-5e2b3407ef46/go.mod h1:QIjZ9OktHFG7p+/m3sMvrAJKKdWrr1fZIK0rM6HZlyo=
|
||||
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230527074028-9b926844e3ae h1:uzi5myq/qNX9xiKMRF/fW3HfxuEo2WcnTalwg9fe2hM=
|
||||
github.com/ggerganov/whisper.cpp/bindings/go v0.0.0-20230527074028-9b926844e3ae/go.mod h1:QIjZ9OktHFG7p+/m3sMvrAJKKdWrr1fZIK0rM6HZlyo=
|
||||
github.com/go-audio/audio v1.0.0 h1:zS9vebldgbQqktK4H0lUqWrG8P0NxCJVqcj7ZpNnwd4=
|
||||
github.com/go-audio/audio v1.0.0/go.mod h1:6uAu0+H2lHkwdGsAY+j2wHPNPpPoeg5AaEFh9FlA+Zs=
|
||||
github.com/go-audio/riff v1.0.0 h1:d8iCGbDvox9BfLagY94fBynxSPHO80LmZCaOsmKxokA=
|
||||
@@ -36,8 +44,24 @@ github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM=
|
||||
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-skynet/bloomz.cpp v0.0.0-20230510223001-e9366e82abdf h1:VJfSn8hIDE+K5+h38M3iAyFXrxpRExMKRdTk33UDxsw=
|
||||
github.com/go-skynet/bloomz.cpp v0.0.0-20230510223001-e9366e82abdf/go.mod h1:wc0fJ9V04yiYTfgKvE5RUUSRQ5Kzi0Bo4I+U3nNOUuA=
|
||||
github.com/go-skynet/go-bert.cpp v0.0.0-20230516063724-cea1ed76a7f4 h1:+3KPDf4Wv1VHOkzAfZnlj9qakLSYggTpm80AswhD/FU=
|
||||
github.com/go-skynet/go-bert.cpp v0.0.0-20230516063724-cea1ed76a7f4/go.mod h1:VY0s5KoAI2jRCvQXKuDeEEe8KG7VaWifSNJSk+E1KtY=
|
||||
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230523173010-f89d7c22df6b h1:uKICsAbdRJxMPZ4RXltwOwXPRDO1/d/pdGR3gEEUV9M=
|
||||
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230523173010-f89d7c22df6b/go.mod h1:hjmO5UfipWl6xkPT54acOs9DDto8GPV81IvsBcvRjsA=
|
||||
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230524084634-c4c581f1853c h1:jXUOCh2K4OzRItTtHzdxvkylE9r1szRSleRpXCNvraY=
|
||||
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230524084634-c4c581f1853c/go.mod h1:hjmO5UfipWl6xkPT54acOs9DDto8GPV81IvsBcvRjsA=
|
||||
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230525204055-4f18e5eb7508 h1:pb7wUQlgqbakB4vILBq44iLe5w9bcjAsP7js2iFOWX8=
|
||||
github.com/go-skynet/go-ggml-transformers.cpp v0.0.0-20230525204055-4f18e5eb7508/go.mod h1:hjmO5UfipWl6xkPT54acOs9DDto8GPV81IvsBcvRjsA=
|
||||
github.com/go-skynet/go-gpt2.cpp v0.0.0-20230523153133-3eb3a32c0874 h1:/6QWh2oarU7iPSpXj/3bLlkKptyxjKTRrNtGUrh8vhI=
|
||||
github.com/go-skynet/go-gpt2.cpp v0.0.0-20230523153133-3eb3a32c0874/go.mod h1:1Wj/xbkMfwQSOrhNYK178IzqQHstZbRfhx4s8p1M5VM=
|
||||
github.com/go-skynet/go-llama.cpp v0.0.0-20230520155239-ccf23adfb278 h1:st4ow9JKy3UuhkwutrbWof2vMFU/YxwBCLYZ1IxJ2Po=
|
||||
github.com/go-skynet/go-llama.cpp v0.0.0-20230520155239-ccf23adfb278/go.mod h1:oA0r4BW8ndyjTMGi1tulsNd7sdg3Ql8MaVFuT1zF6ws=
|
||||
github.com/go-skynet/go-llama.cpp v0.0.0-20230523103108-dcf8da632bce h1:Mcq9LvYG4msXJvFUeiYI6PGftqmYbOoBxNfjyAAyFB4=
|
||||
github.com/go-skynet/go-llama.cpp v0.0.0-20230523103108-dcf8da632bce/go.mod h1:oA0r4BW8ndyjTMGi1tulsNd7sdg3Ql8MaVFuT1zF6ws=
|
||||
github.com/go-skynet/go-llama.cpp v0.0.0-20230524233806-6e7e69a1607e h1:zfxPbHj7/hN2F7V12vfxCi4CFsaVO1WohW96OVFtfNw=
|
||||
github.com/go-skynet/go-llama.cpp v0.0.0-20230524233806-6e7e69a1607e/go.mod h1:oA0r4BW8ndyjTMGi1tulsNd7sdg3Ql8MaVFuT1zF6ws=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
@@ -57,6 +81,8 @@ github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM=
|
||||
github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/klauspost/compress v1.16.3 h1:XuJt9zzcnaz6a16/OU53ZjWp/v7/42WcR5t2a0PcNQY=
|
||||
@@ -79,8 +105,22 @@ github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp9
|
||||
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mudler/go-stable-diffusion v0.0.0-20230516152536-c0748eca3642 h1:KTkh3lOUsGqQyP4v+oa38sPFdrZtNnM4HaxTb3epdYs=
|
||||
github.com/mudler/go-stable-diffusion v0.0.0-20230516152536-c0748eca3642/go.mod h1:8ufRkpz/S/9ahkaxzZ5i4WMgO9w4InEhuRoT7vK5Rnw=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230522220313-2ce22208a3dd h1:is/rE0YD8oEWcX3fQ+VxoS3fD0LqFEmTxh8XZegYYsA=
|
||||
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230522220313-2ce22208a3dd/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
|
||||
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230523222017-b36a52020702 h1:uya1G35AbUfVtG8fu/HuUGTFXpN7n9XuRAAvC1lTr+M=
|
||||
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230523222017-b36a52020702/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
|
||||
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230525153421-63f57635d83c h1:mDy1OKHlG9xv1KDMcOVNYQwoYKZSlb5Mu69W3+DNLYI=
|
||||
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230525153421-63f57635d83c/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
|
||||
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230525202709-afe3870b7a29 h1:hgml/PMZX3M+WigXD4BGy+mbD1oPxYbXJXo16I555Aw=
|
||||
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230525202709-afe3870b7a29/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
|
||||
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230525210850-d1ff7132c553 h1:+zQQHEoOaVUT72uLr6OJF+Lj35LR620aeeyrF7K6x5s=
|
||||
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230525210850-d1ff7132c553/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
|
||||
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230526132403-a6f3e94458e2 h1:DE++nIPuUGk8pz71PF0BITX+CTF0lv4ZNWv12qCBUVk=
|
||||
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230526132403-a6f3e94458e2/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
|
||||
github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q=
|
||||
github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
|
||||
github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU=
|
||||
|
||||
12
main.go
12
main.go
@@ -53,6 +53,16 @@ func main() {
|
||||
EnvVars: []string{"MODELS_PATH"},
|
||||
Value: filepath.Join(path, "models"),
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "preload-models",
|
||||
DefaultText: "A List of models to apply in JSON at start",
|
||||
EnvVars: []string{"PRELOAD_MODELS"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "preload-models-config",
|
||||
DefaultText: "A List of models to apply at startup. Path to a YAML config file",
|
||||
EnvVars: []string{"PRELOAD_MODELS_CONFIG"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "config-file",
|
||||
DefaultText: "Config file",
|
||||
@@ -103,6 +113,8 @@ It uses llama.cpp, ggml and gpt4all as backend with golang c bindings.
|
||||
fmt.Printf("Starting LocalAI using %d threads, with models path: %s\n", ctx.Int("threads"), ctx.String("models-path"))
|
||||
return api.App(
|
||||
api.WithConfigFile(ctx.String("config-file")),
|
||||
api.WithJSONStringPreload(ctx.String("preload-models")),
|
||||
api.WithYAMLConfigPreload(ctx.String("preload-models-config")),
|
||||
api.WithModelLoader(model.NewModelLoader(ctx.String("models-path"))),
|
||||
api.WithContextSize(ctx.Int("context-size")),
|
||||
api.WithDebug(ctx.Bool("debug")),
|
||||
|
||||
Reference in New Issue
Block a user