mirror of
https://github.com/mudler/LocalAI.git
synced 2026-02-03 19:22:39 -05:00
Compare commits
15 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
43d3fb3eba | ||
|
|
f5f8c687be | ||
|
|
9e5cd0f10b | ||
|
|
231a3e7c02 | ||
|
|
57172e2e30 | ||
|
|
043399dd07 | ||
|
|
6b19356740 | ||
|
|
1cbe6a7067 | ||
|
|
2912f9870f | ||
|
|
9630be56e1 | ||
|
|
4aa78843c0 | ||
|
|
b36d9f3776 | ||
|
|
6f54cab3f0 | ||
|
|
ed5df1e68e | ||
|
|
3c07e11e73 |
24
.github/release.yml
vendored
Normal file
24
.github/release.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
# .github/release.yml
|
||||
|
||||
changelog:
|
||||
exclude:
|
||||
labels:
|
||||
- ignore-for-release
|
||||
categories:
|
||||
- title: Breaking Changes 🛠
|
||||
labels:
|
||||
- Semver-Major
|
||||
- breaking-change
|
||||
- title: "Bug fixes :bug:"
|
||||
labels:
|
||||
- bug
|
||||
- title: Exciting New Features 🎉
|
||||
labels:
|
||||
- Semver-Minor
|
||||
- enhancement
|
||||
- title: 👒 Dependencies
|
||||
labels:
|
||||
- dependencies
|
||||
- title: Other Changes
|
||||
labels:
|
||||
- "*"
|
||||
81
.github/workflows/release.yaml
vendored
Normal file
81
.github/workflows/release.yaml
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
name: Build and Release
|
||||
|
||||
on: push
|
||||
|
||||
jobs:
|
||||
build-linux:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- build: 'avx2'
|
||||
defines: ''
|
||||
- build: 'avx'
|
||||
defines: '-DLLAMA_AVX2=OFF'
|
||||
- build: 'avx512'
|
||||
defines: '-DLLAMA_AVX512=ON'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: true
|
||||
- name: Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install build-essential ffmpeg
|
||||
- name: Build
|
||||
id: build
|
||||
env:
|
||||
CMAKE_ARGS: "${{ matrix.define }}"
|
||||
BUILD_ID: "${{ matrix.build }}"
|
||||
run: |
|
||||
make dist
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.build }}
|
||||
path: release/
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
files: |
|
||||
release/*
|
||||
|
||||
build-macOS:
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- build: 'avx2'
|
||||
defines: ''
|
||||
- build: 'avx'
|
||||
defines: '-DLLAMA_AVX2=OFF'
|
||||
- build: 'avx512'
|
||||
defines: '-DLLAMA_AVX512=ON'
|
||||
runs-on: macOS-latest
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- name: Dependencies
|
||||
run: |
|
||||
brew update
|
||||
brew install sdl2 ffmpeg
|
||||
- name: Build
|
||||
id: build
|
||||
env:
|
||||
CMAKE_ARGS: "${{ matrix.define }}"
|
||||
BUILD_ID: "${{ matrix.build }}"
|
||||
run: |
|
||||
make dist
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.build }}
|
||||
path: release/
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
files: |
|
||||
release/*
|
||||
26
.github/workflows/release.yml.disabled
vendored
26
.github/workflows/release.yml.disabled
vendored
@@ -1,26 +0,0 @@
|
||||
name: goreleaser
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.18
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v4
|
||||
with:
|
||||
version: latest
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -16,5 +16,7 @@ local-ai
|
||||
models/*
|
||||
test-models/
|
||||
|
||||
release/
|
||||
|
||||
# just in case
|
||||
.DS_Store
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
# Make sure to check the documentation at http://goreleaser.com
|
||||
project_name: local-ai
|
||||
builds:
|
||||
- ldflags:
|
||||
- -w -s
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
- darwin
|
||||
- windows
|
||||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
binary: '{{ .ProjectName }}'
|
||||
@@ -3,7 +3,7 @@ ARG BUILD_TYPE=
|
||||
FROM golang:$GO_VERSION
|
||||
ENV REBUILD=true
|
||||
WORKDIR /build
|
||||
RUN apt-get update && apt-get install -y cmake libgomp1 libopenblas-dev libopenblas-base libopencv-dev libopencv-core-dev libopencv-core4.5
|
||||
RUN apt-get update && apt-get install -y cmake libgomp1 libopenblas-dev libopenblas-base libopencv-dev libopencv-core-dev libopencv-core4.5 ca-certificates
|
||||
COPY . .
|
||||
RUN ln -s /usr/include/opencv4/opencv2/ /usr/include/opencv2
|
||||
RUN make build
|
||||
|
||||
@@ -11,5 +11,6 @@ RUN make build
|
||||
|
||||
FROM debian:$DEBIAN_VERSION
|
||||
COPY --from=builder /build/local-ai /usr/bin/local-ai
|
||||
RUN apt-get update && apt-get install -y ca-certificates
|
||||
EXPOSE 8080
|
||||
ENTRYPOINT [ "/usr/bin/local-ai" ]
|
||||
10
Makefile
10
Makefile
@@ -5,7 +5,7 @@ BINARY_NAME=local-ai
|
||||
|
||||
GOLLAMA_VERSION?=ccf23adfb278c0165d388389a5d60f3fe38e4854
|
||||
GPT4ALL_REPO?=https://github.com/nomic-ai/gpt4all
|
||||
GPT4ALL_VERSION?=914519e772fd78c15691dcd0b8bac60d6af514ec
|
||||
GPT4ALL_VERSION?=8119ff4df0a99bde44255db2b8c7290b5582ac2b
|
||||
GOGPT2_VERSION?=7bff56f0224502c1c9ed6258d2a17e8084628827
|
||||
RWKV_REPO?=https://github.com/donomii/go-rwkv.cpp
|
||||
RWKV_VERSION?=07166da10cb2a9e8854395a4f210464dcea76e47
|
||||
@@ -17,9 +17,12 @@ CGO_LDFLAGS?=
|
||||
CUDA_LIBPATH?=/usr/local/cuda/lib64/
|
||||
STABLEDIFFUSION_VERSION?=c0748eca3642d58bcf9521108bcee46959c647dc
|
||||
GO_TAGS?=
|
||||
BUILD_ID?=git
|
||||
|
||||
OPTIONAL_TARGETS?=
|
||||
|
||||
OS := $(shell uname -s)
|
||||
ARCH := $(shell uname -m)
|
||||
GREEN := $(shell tput -Txterm setaf 2)
|
||||
YELLOW := $(shell tput -Txterm setaf 3)
|
||||
WHITE := $(shell tput -Txterm setaf 7)
|
||||
@@ -186,6 +189,7 @@ clean: ## Remove build related file
|
||||
rm -rf ./bloomz
|
||||
rm -rf ./whisper.cpp
|
||||
rm -rf $(BINARY_NAME)
|
||||
rm -rf release/
|
||||
|
||||
## Build:
|
||||
|
||||
@@ -195,6 +199,10 @@ build: prepare ## Build the project
|
||||
$(info ${GREEN}I GO_TAGS: ${YELLOW}$(GO_TAGS)${RESET})
|
||||
CGO_LDFLAGS="$(CGO_LDFLAGS)" C_INCLUDE_PATH=${C_INCLUDE_PATH} LIBRARY_PATH=${LIBRARY_PATH} $(GOCMD) build -tags "$(GO_TAGS)" -x -o $(BINARY_NAME) ./
|
||||
|
||||
dist: build
|
||||
mkdir -p release
|
||||
cp $(BINARY_NAME) release/$(BINARY_NAME)-$(BUILD_ID)-$(OS)-$(ARCH)
|
||||
|
||||
generic-build: ## Build the project using generic
|
||||
BUILD_TYPE="generic" $(MAKE) build
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ In a nutshell:
|
||||
|
||||
LocalAI is a community-driven project, focused on making the AI accessible to anyone. Any contribution, feedback and PR is welcome! It was initially created by [mudler](https://github.com/mudler/) at the [SpectroCloud OSS Office](https://github.com/spectrocloud).
|
||||
|
||||
See the [usage](https://github.com/go-skynet/LocalAI#usage) and [examples](https://github.com/go-skynet/LocalAI/tree/master/examples/) sections to learn how to use LocalAI.
|
||||
See the [usage](https://github.com/go-skynet/LocalAI#usage) and [examples](https://github.com/go-skynet/LocalAI/tree/master/examples/) sections to learn how to use LocalAI. For a list of curated models check out the [model gallery](https://github.com/go-skynet/model-gallery).
|
||||
|
||||
### How does it work?
|
||||
|
||||
@@ -39,6 +39,7 @@ LocalAI uses C++ bindings for optimizing speed. It is based on [llama.cpp](https
|
||||
|
||||
## News
|
||||
|
||||
- 21-05-2023: __v1.14.0__ released. Minor updates to the `/models/apply` endpoint, `llama.cpp` backend updated including https://github.com/ggerganov/llama.cpp/pull/1508 which breaks compatibility with older models. `gpt4all` is still compatible with the old format.
|
||||
- 19-05-2023: __v1.13.0__ released! 🔥🔥 updates to the `gpt4all` and `llama` backend, consolidated CUDA support ( https://github.com/go-skynet/LocalAI/pull/310 thanks to @bubthegreat and @Thireus ), preliminar support for [installing models via API](https://github.com/go-skynet/LocalAI#advanced-prepare-models-using-the-api).
|
||||
- 17-05-2023: __v1.12.0__ released! 🔥🔥 Minor fixes, plus CUDA (https://github.com/go-skynet/LocalAI/pull/258) support for `llama.cpp`-compatible models and image generation (https://github.com/go-skynet/LocalAI/pull/272).
|
||||
- 16-05-2023: 🔥🔥🔥 Experimental support for CUDA (https://github.com/go-skynet/LocalAI/pull/258) in the `llama.cpp` backend and Stable diffusion CPU image generation (https://github.com/go-skynet/LocalAI/pull/272) in `master`.
|
||||
|
||||
69
api/api.go
69
api/api.go
@@ -1,10 +1,8 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
model "github.com/go-skynet/LocalAI/pkg/model"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/gofiber/fiber/v2/middleware/cors"
|
||||
"github.com/gofiber/fiber/v2/middleware/logger"
|
||||
@@ -13,16 +11,18 @@ import (
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func App(c context.Context, configFile string, loader *model.ModelLoader, uploadLimitMB, threads, ctxSize int, f16 bool, debug, disableMessage bool, imageDir string) *fiber.App {
|
||||
func App(opts ...AppOption) *fiber.App {
|
||||
options := newOptions(opts...)
|
||||
|
||||
zerolog.SetGlobalLevel(zerolog.InfoLevel)
|
||||
if debug {
|
||||
if options.debug {
|
||||
zerolog.SetGlobalLevel(zerolog.DebugLevel)
|
||||
}
|
||||
|
||||
// Return errors as JSON responses
|
||||
app := fiber.New(fiber.Config{
|
||||
BodyLimit: uploadLimitMB * 1024 * 1024, // this is the default limit of 4MB
|
||||
DisableStartupMessage: disableMessage,
|
||||
BodyLimit: options.uploadLimitMB * 1024 * 1024, // this is the default limit of 4MB
|
||||
DisableStartupMessage: options.disableMessage,
|
||||
// Override default error handler
|
||||
ErrorHandler: func(ctx *fiber.Ctx, err error) error {
|
||||
// Status code defaults to 500
|
||||
@@ -43,24 +43,24 @@ func App(c context.Context, configFile string, loader *model.ModelLoader, upload
|
||||
},
|
||||
})
|
||||
|
||||
if debug {
|
||||
if options.debug {
|
||||
app.Use(logger.New(logger.Config{
|
||||
Format: "[${ip}]:${port} ${status} - ${method} ${path}\n",
|
||||
}))
|
||||
}
|
||||
|
||||
cm := NewConfigMerger()
|
||||
if err := cm.LoadConfigs(loader.ModelPath); err != nil {
|
||||
if err := cm.LoadConfigs(options.loader.ModelPath); err != nil {
|
||||
log.Error().Msgf("error loading config files: %s", err.Error())
|
||||
}
|
||||
|
||||
if configFile != "" {
|
||||
if err := cm.LoadConfigFile(configFile); err != nil {
|
||||
if options.configFile != "" {
|
||||
if err := cm.LoadConfigFile(options.configFile); err != nil {
|
||||
log.Error().Msgf("error loading config file: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if debug {
|
||||
if options.debug {
|
||||
for _, v := range cm.ListConfigs() {
|
||||
cfg, _ := cm.GetConfig(v)
|
||||
log.Debug().Msgf("Model: %s (config: %+v)", v, cfg)
|
||||
@@ -68,46 +68,55 @@ func App(c context.Context, configFile string, loader *model.ModelLoader, upload
|
||||
}
|
||||
// Default middleware config
|
||||
app.Use(recover.New())
|
||||
app.Use(cors.New())
|
||||
|
||||
if options.cors {
|
||||
if options.corsAllowOrigins == "" {
|
||||
app.Use(cors.New())
|
||||
} else {
|
||||
app.Use(cors.New(cors.Config{
|
||||
AllowOrigins: options.corsAllowOrigins,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
// LocalAI API endpoints
|
||||
applier := newGalleryApplier(loader.ModelPath)
|
||||
applier.start(c, cm)
|
||||
app.Post("/models/apply", applyModelGallery(loader.ModelPath, cm, applier.C))
|
||||
applier := newGalleryApplier(options.loader.ModelPath)
|
||||
applier.start(options.context, cm)
|
||||
app.Post("/models/apply", applyModelGallery(options.loader.ModelPath, cm, applier.C))
|
||||
app.Get("/models/jobs/:uuid", getOpStatus(applier))
|
||||
|
||||
// openAI compatible API endpoint
|
||||
|
||||
// chat
|
||||
app.Post("/v1/chat/completions", chatEndpoint(cm, debug, loader, threads, ctxSize, f16))
|
||||
app.Post("/chat/completions", chatEndpoint(cm, debug, loader, threads, ctxSize, f16))
|
||||
app.Post("/v1/chat/completions", chatEndpoint(cm, options))
|
||||
app.Post("/chat/completions", chatEndpoint(cm, options))
|
||||
|
||||
// edit
|
||||
app.Post("/v1/edits", editEndpoint(cm, debug, loader, threads, ctxSize, f16))
|
||||
app.Post("/edits", editEndpoint(cm, debug, loader, threads, ctxSize, f16))
|
||||
app.Post("/v1/edits", editEndpoint(cm, options))
|
||||
app.Post("/edits", editEndpoint(cm, options))
|
||||
|
||||
// completion
|
||||
app.Post("/v1/completions", completionEndpoint(cm, debug, loader, threads, ctxSize, f16))
|
||||
app.Post("/completions", completionEndpoint(cm, debug, loader, threads, ctxSize, f16))
|
||||
app.Post("/v1/completions", completionEndpoint(cm, options))
|
||||
app.Post("/completions", completionEndpoint(cm, options))
|
||||
|
||||
// embeddings
|
||||
app.Post("/v1/embeddings", embeddingsEndpoint(cm, debug, loader, threads, ctxSize, f16))
|
||||
app.Post("/embeddings", embeddingsEndpoint(cm, debug, loader, threads, ctxSize, f16))
|
||||
app.Post("/v1/engines/:model/embeddings", embeddingsEndpoint(cm, debug, loader, threads, ctxSize, f16))
|
||||
app.Post("/v1/embeddings", embeddingsEndpoint(cm, options))
|
||||
app.Post("/embeddings", embeddingsEndpoint(cm, options))
|
||||
app.Post("/v1/engines/:model/embeddings", embeddingsEndpoint(cm, options))
|
||||
|
||||
// audio
|
||||
app.Post("/v1/audio/transcriptions", transcriptEndpoint(cm, debug, loader, threads, ctxSize, f16))
|
||||
app.Post("/v1/audio/transcriptions", transcriptEndpoint(cm, options))
|
||||
|
||||
// images
|
||||
app.Post("/v1/images/generations", imageEndpoint(cm, debug, loader, imageDir))
|
||||
app.Post("/v1/images/generations", imageEndpoint(cm, options))
|
||||
|
||||
if imageDir != "" {
|
||||
app.Static("/generated-images", imageDir)
|
||||
if options.imageDir != "" {
|
||||
app.Static("/generated-images", options.imageDir)
|
||||
}
|
||||
|
||||
// models
|
||||
app.Get("/v1/models", listModels(loader, cm))
|
||||
app.Get("/models", listModels(loader, cm))
|
||||
app.Get("/v1/models", listModels(options.loader, cm))
|
||||
app.Get("/models", listModels(options.loader, cm))
|
||||
|
||||
return app
|
||||
}
|
||||
|
||||
@@ -114,7 +114,7 @@ var _ = Describe("API test", func() {
|
||||
modelLoader = model.NewModelLoader(tmpdir)
|
||||
c, cancel = context.WithCancel(context.Background())
|
||||
|
||||
app = App(c, "", modelLoader, 15, 1, 512, false, true, true, "")
|
||||
app = App(WithContext(c), WithModelLoader(modelLoader))
|
||||
go app.Listen("127.0.0.1:9090")
|
||||
|
||||
defaultConfig := openai.DefaultConfig("")
|
||||
@@ -198,7 +198,7 @@ var _ = Describe("API test", func() {
|
||||
modelLoader = model.NewModelLoader(os.Getenv("MODELS_PATH"))
|
||||
c, cancel = context.WithCancel(context.Background())
|
||||
|
||||
app = App(c, "", modelLoader, 15, 1, 512, false, true, true, "")
|
||||
app = App(WithContext(c), WithModelLoader(modelLoader))
|
||||
go app.Listen("127.0.0.1:9090")
|
||||
|
||||
defaultConfig := openai.DefaultConfig("")
|
||||
@@ -316,7 +316,7 @@ var _ = Describe("API test", func() {
|
||||
modelLoader = model.NewModelLoader(os.Getenv("MODELS_PATH"))
|
||||
c, cancel = context.WithCancel(context.Background())
|
||||
|
||||
app = App(c, os.Getenv("CONFIG_FILE"), modelLoader, 5, 1, 512, false, true, true, "")
|
||||
app = App(WithContext(c), WithModelLoader(modelLoader), WithConfigFile(os.Getenv("CONFIG_FILE")))
|
||||
go app.Listen("127.0.0.1:9090")
|
||||
|
||||
defaultConfig := openai.DefaultConfig("")
|
||||
|
||||
@@ -142,15 +142,15 @@ func defaultRequest(modelFile string) OpenAIRequest {
|
||||
}
|
||||
|
||||
// https://platform.openai.com/docs/api-reference/completions
|
||||
func completionEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threads, ctx int, f16 bool) func(c *fiber.Ctx) error {
|
||||
func completionEndpoint(cm *ConfigMerger, o *Option) func(c *fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
|
||||
model, input, err := readInput(c, loader, true)
|
||||
model, input, err := readInput(c, o.loader, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
|
||||
config, input, err := readConfig(model, input, cm, loader, debug, threads, ctx, f16)
|
||||
config, input, err := readConfig(model, input, cm, o.loader, o.debug, o.threads, o.ctxSize, o.f16)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
@@ -166,7 +166,7 @@ func completionEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader,
|
||||
var result []Choice
|
||||
for _, i := range config.PromptStrings {
|
||||
// A model can have a "file.bin.tmpl" file associated with a prompt template prefix
|
||||
templatedInput, err := loader.TemplatePrefix(templateFile, struct {
|
||||
templatedInput, err := o.loader.TemplatePrefix(templateFile, struct {
|
||||
Input string
|
||||
}{Input: i})
|
||||
if err == nil {
|
||||
@@ -174,7 +174,7 @@ func completionEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader,
|
||||
log.Debug().Msgf("Template found, input modified to: %s", i)
|
||||
}
|
||||
|
||||
r, err := ComputeChoices(i, input, config, loader, func(s string, c *[]Choice) {
|
||||
r, err := ComputeChoices(i, input, config, o.loader, func(s string, c *[]Choice) {
|
||||
*c = append(*c, Choice{Text: s})
|
||||
}, nil)
|
||||
if err != nil {
|
||||
@@ -199,14 +199,14 @@ func completionEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader,
|
||||
}
|
||||
|
||||
// https://platform.openai.com/docs/api-reference/embeddings
|
||||
func embeddingsEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threads, ctx int, f16 bool) func(c *fiber.Ctx) error {
|
||||
func embeddingsEndpoint(cm *ConfigMerger, o *Option) func(c *fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
model, input, err := readInput(c, loader, true)
|
||||
model, input, err := readInput(c, o.loader, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
|
||||
config, input, err := readConfig(model, input, cm, loader, debug, threads, ctx, f16)
|
||||
config, input, err := readConfig(model, input, cm, o.loader, o.debug, o.threads, o.ctxSize, o.f16)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
@@ -216,7 +216,7 @@ func embeddingsEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader,
|
||||
|
||||
for i, s := range config.InputToken {
|
||||
// get the model function to call for the result
|
||||
embedFn, err := ModelEmbedding("", s, loader, *config)
|
||||
embedFn, err := ModelEmbedding("", s, o.loader, *config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -230,7 +230,7 @@ func embeddingsEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader,
|
||||
|
||||
for i, s := range config.InputStrings {
|
||||
// get the model function to call for the result
|
||||
embedFn, err := ModelEmbedding(s, []int{}, loader, *config)
|
||||
embedFn, err := ModelEmbedding(s, []int{}, o.loader, *config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -256,13 +256,20 @@ func embeddingsEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader,
|
||||
}
|
||||
}
|
||||
|
||||
func chatEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threads, ctx int, f16 bool) func(c *fiber.Ctx) error {
|
||||
func chatEndpoint(cm *ConfigMerger, o *Option) func(c *fiber.Ctx) error {
|
||||
|
||||
process := func(s string, req *OpenAIRequest, config *Config, loader *model.ModelLoader, responses chan OpenAIResponse) {
|
||||
initialMessage := OpenAIResponse{
|
||||
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
|
||||
Choices: []Choice{{Delta: &Message{Role: "assistant"}}},
|
||||
Object: "chat.completion.chunk",
|
||||
}
|
||||
responses <- initialMessage
|
||||
|
||||
ComputeChoices(s, req, config, loader, func(s string, c *[]Choice) {}, func(s string) bool {
|
||||
resp := OpenAIResponse{
|
||||
Model: req.Model, // we have to return what the user sent here, due to OpenAI spec.
|
||||
Choices: []Choice{{Delta: &Message{Role: "assistant", Content: s}}},
|
||||
Choices: []Choice{{Delta: &Message{Content: s}}},
|
||||
Object: "chat.completion.chunk",
|
||||
}
|
||||
log.Debug().Msgf("Sending goroutine: %s", s)
|
||||
@@ -273,12 +280,12 @@ func chatEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threa
|
||||
close(responses)
|
||||
}
|
||||
return func(c *fiber.Ctx) error {
|
||||
model, input, err := readInput(c, loader, true)
|
||||
model, input, err := readInput(c, o.loader, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
|
||||
config, input, err := readConfig(model, input, cm, loader, debug, threads, ctx, f16)
|
||||
config, input, err := readConfig(model, input, cm, o.loader, o.debug, o.threads, o.ctxSize, o.f16)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
@@ -319,7 +326,7 @@ func chatEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threa
|
||||
}
|
||||
|
||||
// A model can have a "file.bin.tmpl" file associated with a prompt template prefix
|
||||
templatedInput, err := loader.TemplatePrefix(templateFile, struct {
|
||||
templatedInput, err := o.loader.TemplatePrefix(templateFile, struct {
|
||||
Input string
|
||||
}{Input: predInput})
|
||||
if err == nil {
|
||||
@@ -330,7 +337,7 @@ func chatEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threa
|
||||
if input.Stream {
|
||||
responses := make(chan OpenAIResponse)
|
||||
|
||||
go process(predInput, input, config, loader, responses)
|
||||
go process(predInput, input, config, o.loader, responses)
|
||||
|
||||
c.Context().SetBodyStreamWriter(fasthttp.StreamWriter(func(w *bufio.Writer) {
|
||||
|
||||
@@ -339,13 +346,11 @@ func chatEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threa
|
||||
enc := json.NewEncoder(&buf)
|
||||
enc.Encode(ev)
|
||||
|
||||
fmt.Fprintf(w, "event: data\n\n")
|
||||
fmt.Fprintf(w, "data: %v\n\n", buf.String())
|
||||
log.Debug().Msgf("Sending chunk: %s", buf.String())
|
||||
fmt.Fprintf(w, "data: %v\n", buf.String())
|
||||
w.Flush()
|
||||
}
|
||||
|
||||
w.WriteString("event: data\n\n")
|
||||
resp := &OpenAIResponse{
|
||||
Model: input.Model, // we have to return what the user sent here, due to OpenAI spec.
|
||||
Choices: []Choice{{FinishReason: "stop"}},
|
||||
@@ -353,12 +358,13 @@ func chatEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threa
|
||||
respData, _ := json.Marshal(resp)
|
||||
|
||||
w.WriteString(fmt.Sprintf("data: %s\n\n", respData))
|
||||
w.WriteString("data: [DONE]\n\n")
|
||||
w.Flush()
|
||||
}))
|
||||
return nil
|
||||
}
|
||||
|
||||
result, err := ComputeChoices(predInput, input, config, loader, func(s string, c *[]Choice) {
|
||||
result, err := ComputeChoices(predInput, input, config, o.loader, func(s string, c *[]Choice) {
|
||||
*c = append(*c, Choice{Message: &Message{Role: "assistant", Content: s}})
|
||||
}, nil)
|
||||
if err != nil {
|
||||
@@ -378,14 +384,14 @@ func chatEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threa
|
||||
}
|
||||
}
|
||||
|
||||
func editEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threads, ctx int, f16 bool) func(c *fiber.Ctx) error {
|
||||
func editEndpoint(cm *ConfigMerger, o *Option) func(c *fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
model, input, err := readInput(c, loader, true)
|
||||
model, input, err := readInput(c, o.loader, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
|
||||
config, input, err := readConfig(model, input, cm, loader, debug, threads, ctx, f16)
|
||||
config, input, err := readConfig(model, input, cm, o.loader, o.debug, o.threads, o.ctxSize, o.f16)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
@@ -401,7 +407,7 @@ func editEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threa
|
||||
var result []Choice
|
||||
for _, i := range config.InputStrings {
|
||||
// A model can have a "file.bin.tmpl" file associated with a prompt template prefix
|
||||
templatedInput, err := loader.TemplatePrefix(templateFile, struct {
|
||||
templatedInput, err := o.loader.TemplatePrefix(templateFile, struct {
|
||||
Input string
|
||||
Instruction string
|
||||
}{Input: i})
|
||||
@@ -410,7 +416,7 @@ func editEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threa
|
||||
log.Debug().Msgf("Template found, input modified to: %s", i)
|
||||
}
|
||||
|
||||
r, err := ComputeChoices(i, input, config, loader, func(s string, c *[]Choice) {
|
||||
r, err := ComputeChoices(i, input, config, o.loader, func(s string, c *[]Choice) {
|
||||
*c = append(*c, Choice{Text: s})
|
||||
}, nil)
|
||||
if err != nil {
|
||||
@@ -449,9 +455,9 @@ func editEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threa
|
||||
|
||||
*
|
||||
*/
|
||||
func imageEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, imageDir string) func(c *fiber.Ctx) error {
|
||||
func imageEndpoint(cm *ConfigMerger, o *Option) func(c *fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
m, input, err := readInput(c, loader, false)
|
||||
m, input, err := readInput(c, o.loader, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
@@ -461,7 +467,7 @@ func imageEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, imag
|
||||
}
|
||||
log.Debug().Msgf("Loading model: %+v", m)
|
||||
|
||||
config, input, err := readConfig(m, input, cm, loader, debug, 0, 0, false)
|
||||
config, input, err := readConfig(m, input, cm, o.loader, o.debug, 0, 0, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
@@ -518,7 +524,7 @@ func imageEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, imag
|
||||
|
||||
tempDir := ""
|
||||
if !b64JSON {
|
||||
tempDir = imageDir
|
||||
tempDir = o.imageDir
|
||||
}
|
||||
// Create a temporary file
|
||||
outputFile, err := ioutil.TempFile(tempDir, "b64")
|
||||
@@ -535,7 +541,7 @@ func imageEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, imag
|
||||
|
||||
baseURL := c.BaseURL()
|
||||
|
||||
fn, err := ImageGeneration(height, width, mode, step, input.Seed, positive_prompt, negative_prompt, output, loader, *config)
|
||||
fn, err := ImageGeneration(height, width, mode, step, input.Seed, positive_prompt, negative_prompt, output, o.loader, *config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -574,14 +580,14 @@ func imageEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, imag
|
||||
}
|
||||
|
||||
// https://platform.openai.com/docs/api-reference/audio/create
|
||||
func transcriptEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader, threads, ctx int, f16 bool) func(c *fiber.Ctx) error {
|
||||
func transcriptEndpoint(cm *ConfigMerger, o *Option) func(c *fiber.Ctx) error {
|
||||
return func(c *fiber.Ctx) error {
|
||||
m, input, err := readInput(c, loader, false)
|
||||
m, input, err := readInput(c, o.loader, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
|
||||
config, input, err := readConfig(m, input, cm, loader, debug, threads, ctx, f16)
|
||||
config, input, err := readConfig(m, input, cm, o.loader, o.debug, o.threads, o.ctxSize, o.f16)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed reading parameters from request:%w", err)
|
||||
}
|
||||
@@ -616,7 +622,7 @@ func transcriptEndpoint(cm *ConfigMerger, debug bool, loader *model.ModelLoader,
|
||||
|
||||
log.Debug().Msgf("Audio file copied to: %+v", dst)
|
||||
|
||||
whisperModel, err := loader.BackendLoader(model.WhisperBackend, config.Model, []llama.ModelOption{}, uint32(config.Threads))
|
||||
whisperModel, err := o.loader.BackendLoader(model.WhisperBackend, config.Model, []llama.ModelOption{}, uint32(config.Threads))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
108
api/options.go
Normal file
108
api/options.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
model "github.com/go-skynet/LocalAI/pkg/model"
|
||||
)
|
||||
|
||||
type Option struct {
|
||||
context context.Context
|
||||
configFile string
|
||||
loader *model.ModelLoader
|
||||
uploadLimitMB, threads, ctxSize int
|
||||
f16 bool
|
||||
debug, disableMessage bool
|
||||
imageDir string
|
||||
cors bool
|
||||
corsAllowOrigins string
|
||||
}
|
||||
|
||||
type AppOption func(*Option)
|
||||
|
||||
func newOptions(o ...AppOption) *Option {
|
||||
opt := &Option{
|
||||
context: context.Background(),
|
||||
uploadLimitMB: 15,
|
||||
threads: 1,
|
||||
ctxSize: 512,
|
||||
debug: true,
|
||||
disableMessage: true,
|
||||
}
|
||||
for _, oo := range o {
|
||||
oo(opt)
|
||||
}
|
||||
return opt
|
||||
}
|
||||
|
||||
func WithCors(b bool) AppOption {
|
||||
return func(o *Option) {
|
||||
o.cors = b
|
||||
}
|
||||
}
|
||||
|
||||
func WithCorsAllowOrigins(b string) AppOption {
|
||||
return func(o *Option) {
|
||||
o.corsAllowOrigins = b
|
||||
}
|
||||
}
|
||||
|
||||
func WithContext(ctx context.Context) AppOption {
|
||||
return func(o *Option) {
|
||||
o.context = ctx
|
||||
}
|
||||
}
|
||||
|
||||
func WithConfigFile(configFile string) AppOption {
|
||||
return func(o *Option) {
|
||||
o.configFile = configFile
|
||||
}
|
||||
}
|
||||
|
||||
func WithModelLoader(loader *model.ModelLoader) AppOption {
|
||||
return func(o *Option) {
|
||||
o.loader = loader
|
||||
}
|
||||
}
|
||||
|
||||
func WithUploadLimitMB(limit int) AppOption {
|
||||
return func(o *Option) {
|
||||
o.uploadLimitMB = limit
|
||||
}
|
||||
}
|
||||
|
||||
func WithThreads(threads int) AppOption {
|
||||
return func(o *Option) {
|
||||
o.threads = threads
|
||||
}
|
||||
}
|
||||
|
||||
func WithContextSize(ctxSize int) AppOption {
|
||||
return func(o *Option) {
|
||||
o.ctxSize = ctxSize
|
||||
}
|
||||
}
|
||||
|
||||
func WithF16(f16 bool) AppOption {
|
||||
return func(o *Option) {
|
||||
o.f16 = f16
|
||||
}
|
||||
}
|
||||
|
||||
func WithDebug(debug bool) AppOption {
|
||||
return func(o *Option) {
|
||||
o.debug = debug
|
||||
}
|
||||
}
|
||||
|
||||
func WithDisableMessage(disableMessage bool) AppOption {
|
||||
return func(o *Option) {
|
||||
o.disableMessage = disableMessage
|
||||
}
|
||||
}
|
||||
|
||||
func WithImageDir(imageDir string) AppOption {
|
||||
return func(o *Option) {
|
||||
o.imageDir = imageDir
|
||||
}
|
||||
}
|
||||
@@ -57,6 +57,14 @@ A full example on how to run RWKV models with LocalAI
|
||||
|
||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/rwkv/)
|
||||
|
||||
### PrivateGPT
|
||||
|
||||
_by [@mudler](https://github.com/mudler)_
|
||||
|
||||
A full example on how to run PrivateGPT with LocalAI
|
||||
|
||||
[Check it out here](https://github.com/go-skynet/LocalAI/tree/master/examples/privateGPT/)
|
||||
|
||||
### Slack bot
|
||||
|
||||
_by [@mudler](https://github.com/mudler)_
|
||||
|
||||
@@ -5,7 +5,6 @@ parameters:
|
||||
temperature: 0.2
|
||||
top_p: 0.7
|
||||
context_size: 1024
|
||||
threads: 14
|
||||
stopwords:
|
||||
- "HUMAN:"
|
||||
- "GPT:"
|
||||
|
||||
@@ -5,7 +5,6 @@ parameters:
|
||||
temperature: 0.2
|
||||
top_p: 0.7
|
||||
context_size: 1024
|
||||
threads: 4
|
||||
stopwords:
|
||||
- "HUMAN:"
|
||||
- "GPT:"
|
||||
|
||||
25
examples/privateGPT/README.md
Normal file
25
examples/privateGPT/README.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# privateGPT
|
||||
|
||||
This example is a re-adaptation of https://github.com/imartinez/privateGPT to work with LocalAI and OpenAI endpoints. We have a fork with the changes required to work with privateGPT here https://github.com/go-skynet/privateGPT ( PR: https://github.com/imartinez/privateGPT/pull/408 ).
|
||||
|
||||
Follow the instructions in https://github.com/go-skynet/privateGPT:
|
||||
|
||||
```bash
|
||||
git clone git@github.com:go-skynet/privateGPT.git
|
||||
cd privateGPT
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
Rename `example.env` to `.env` and edit the variables appropriately.
|
||||
|
||||
This is an example `.env` file for LocalAI:
|
||||
|
||||
```
|
||||
PERSIST_DIRECTORY=db
|
||||
# Set to OpenAI here
|
||||
MODEL_TYPE=OpenAI
|
||||
EMBEDDINGS_MODEL_NAME=all-MiniLM-L6-v2
|
||||
MODEL_N_CTX=1000
|
||||
# LocalAI URL
|
||||
OPENAI_API_BASE=http://localhost:8080/v1
|
||||
```
|
||||
@@ -5,7 +5,6 @@ parameters:
|
||||
temperature: 0.2
|
||||
top_p: 0.7
|
||||
context_size: 1024
|
||||
threads: 14
|
||||
stopwords:
|
||||
- "HUMAN:"
|
||||
- "GPT:"
|
||||
|
||||
@@ -6,7 +6,6 @@ parameters:
|
||||
max_tokens: 100
|
||||
top_p: 0.8
|
||||
context_size: 1024
|
||||
threads: 14
|
||||
backend: "rwkv"
|
||||
cutwords:
|
||||
- "Bob:.*"
|
||||
|
||||
2
go.mod
2
go.mod
@@ -15,7 +15,7 @@ require (
|
||||
github.com/hashicorp/go-multierror v1.1.1
|
||||
github.com/imdario/mergo v0.3.15
|
||||
github.com/mudler/go-stable-diffusion v0.0.0-20230516152536-c0748eca3642
|
||||
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230519014017-914519e772fd
|
||||
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230522220313-2ce22208a3dd
|
||||
github.com/onsi/ginkgo/v2 v2.9.5
|
||||
github.com/onsi/gomega v1.27.7
|
||||
github.com/otiai10/openaigo v1.1.0
|
||||
|
||||
8
go.sum
8
go.sum
@@ -101,6 +101,14 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWb
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230519014017-914519e772fd h1:kMnZASxCNc8GsPuAV94tltEsfT6T+esuB+rgzdjwFVM=
|
||||
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230519014017-914519e772fd/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
|
||||
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230521011615-aba1147a2253 h1:7udNpoHYOBktcpCEe8aDaPJ0LyzyRhVjpzAGFjPxPkY=
|
||||
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230521011615-aba1147a2253/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
|
||||
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230522155256-c8c95ab46f92 h1:brOLJSsTLnFK2vUVi7MaVdxAEhHkOsoboR0vR5WW1HU=
|
||||
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230522155256-c8c95ab46f92/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
|
||||
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230522200803-5ca8767c81a2 h1:3368tGU1ooRSPw0zMvXqv9wLMxS82LzEkVSuo8DWZBI=
|
||||
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230522200803-5ca8767c81a2/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
|
||||
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230522220313-2ce22208a3dd h1:is/rE0YD8oEWcX3fQ+VxoS3fD0LqFEmTxh8XZegYYsA=
|
||||
github.com/nomic-ai/gpt4all/gpt4all-bindings/golang v0.0.0-20230522220313-2ce22208a3dd/go.mod h1:4T3CHXyrt+7FQHXaxULZfPjHbD8/99WuDDJa0YVZARI=
|
||||
github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q=
|
||||
github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
|
||||
github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU=
|
||||
|
||||
22
main.go
22
main.go
@@ -1,7 +1,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -34,6 +33,14 @@ func main() {
|
||||
Name: "debug",
|
||||
EnvVars: []string{"DEBUG"},
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "cors",
|
||||
EnvVars: []string{"CORS"},
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "cors-allow-origins",
|
||||
EnvVars: []string{"CORS_ALLOW_ORIGINS"},
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "threads",
|
||||
DefaultText: "Number of threads used for parallel computation. Usage of the number of physical cores in the system is suggested.",
|
||||
@@ -94,7 +101,18 @@ It uses llama.cpp, ggml and gpt4all as backend with golang c bindings.
|
||||
Copyright: "go-skynet authors",
|
||||
Action: func(ctx *cli.Context) error {
|
||||
fmt.Printf("Starting LocalAI using %d threads, with models path: %s\n", ctx.Int("threads"), ctx.String("models-path"))
|
||||
return api.App(context.Background(), ctx.String("config-file"), model.NewModelLoader(ctx.String("models-path")), ctx.Int("upload-limit"), ctx.Int("threads"), ctx.Int("context-size"), ctx.Bool("f16"), ctx.Bool("debug"), false, ctx.String("image-path")).Listen(ctx.String("address"))
|
||||
return api.App(
|
||||
api.WithConfigFile(ctx.String("config-file")),
|
||||
api.WithModelLoader(model.NewModelLoader(ctx.String("models-path"))),
|
||||
api.WithContextSize(ctx.Int("context-size")),
|
||||
api.WithDebug(ctx.Bool("debug")),
|
||||
api.WithImageDir(ctx.String("image-path")),
|
||||
api.WithF16(ctx.Bool("f16")),
|
||||
api.WithDisableMessage(false),
|
||||
api.WithCors(ctx.Bool("cors")),
|
||||
api.WithCorsAllowOrigins(ctx.String("cors-allow-origins")),
|
||||
api.WithThreads(ctx.Int("threads")),
|
||||
api.WithUploadLimitMB(ctx.Int("upload-limit"))).Listen(ctx.String("address"))
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user