mirror of
https://github.com/mudler/LocalAI.git
synced 2026-02-03 03:02:38 -05:00
Compare commits
391 Commits
feat/nvidi
...
copilot/fi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
eae90cafac | ||
|
|
d2ed2b48a8 | ||
|
|
22d8b48fd1 | ||
|
|
f2ba636290 | ||
|
|
95b6c9bb5a | ||
|
|
2cc4809b0d | ||
|
|
77bbeed57e | ||
|
|
3152611184 | ||
|
|
30f992f241 | ||
|
|
2709220b84 | ||
|
|
4278506876 | ||
|
|
1dd1d12da1 | ||
|
|
3a5b3bb0a6 | ||
|
|
94d9fc923f | ||
|
|
6fcf2c50b6 | ||
|
|
7cbd4a2f18 | ||
|
|
18d11396cd | ||
|
|
93cd688f40 | ||
|
|
721c3f962b | ||
|
|
fb834805db | ||
|
|
839aa7b42b | ||
|
|
e963a45d66 | ||
|
|
c313b2c671 | ||
|
|
137f16336e | ||
|
|
d7f9f3ac93 | ||
|
|
cd7d384500 | ||
|
|
d1a0dd10e6 | ||
|
|
be8cf838c2 | ||
|
|
3276d1cdaf | ||
|
|
5e5f01badd | ||
|
|
6d0f646c37 | ||
|
|
99d31667f8 | ||
|
|
47b546afdc | ||
|
|
a09d49da43 | ||
|
|
1cdcaf0152 | ||
|
|
03e9f4b140 | ||
|
|
7129409bf6 | ||
|
|
d9e9ec6825 | ||
|
|
b82645d28d | ||
|
|
735ca757fa | ||
|
|
b1d1f2a37d | ||
|
|
3728552e94 | ||
|
|
87d0020c10 | ||
|
|
a8eb537071 | ||
|
|
04fe0b0da8 | ||
|
|
fae93e5ba2 | ||
|
|
b606034243 | ||
|
|
5f4663252d | ||
|
|
80bb7c5f67 | ||
|
|
f6881ea023 | ||
|
|
5651a19aa1 | ||
|
|
c834cdb826 | ||
|
|
fa2caef63d | ||
|
|
31abc799f9 | ||
|
|
2368395a0c | ||
|
|
bf77c11b65 | ||
|
|
8876073f5c | ||
|
|
8432915cb8 | ||
|
|
9ddb94b507 | ||
|
|
e42f0f7e79 | ||
|
|
34bc1bda1e | ||
|
|
01cd58a739 | ||
|
|
679d43c2f5 | ||
|
|
4730b52461 | ||
|
|
f678c6b0a9 | ||
|
|
2f2f9beee7 | ||
|
|
8ac7e28c12 | ||
|
|
c5c3538115 | ||
|
|
5ef16b5693 | ||
|
|
02cc8cbcaa | ||
|
|
e5e86d0acb | ||
|
|
edd35d2b33 | ||
|
|
e8cc29e364 | ||
|
|
8f7c499f17 | ||
|
|
ea446fde08 | ||
|
|
122e4c7094 | ||
|
|
2573102317 | ||
|
|
41b60fcfd3 | ||
|
|
cb81869140 | ||
|
|
db9957b94e | ||
|
|
98158881c2 | ||
|
|
79247a5d17 | ||
|
|
46b7a4c5f2 | ||
|
|
436e2d91d0 | ||
|
|
a86fdc4087 | ||
|
|
c7ac6ca687 | ||
|
|
7088327e8d | ||
|
|
e2cb44ef37 | ||
|
|
3a40b4129c | ||
|
|
4ca8055f21 | ||
|
|
704786cc6d | ||
|
|
e5ce1fd9cc | ||
|
|
ea2037f141 | ||
|
|
567fa62330 | ||
|
|
d424a27fa2 | ||
|
|
3ce9cb566d | ||
|
|
ee7638a9b0 | ||
|
|
e57e50e441 | ||
|
|
81880e7975 | ||
|
|
2cad2c8591 | ||
|
|
b87b41ee45 | ||
|
|
424acd66ad | ||
|
|
3cd8234550 | ||
|
|
c70a0f05b8 | ||
|
|
f85e2dd1b8 | ||
|
|
e485bdf9ab | ||
|
|
495c4ee694 | ||
|
|
161d1a0344 | ||
|
|
b6d1def96f | ||
|
|
9ecfdc5938 | ||
|
|
c332ef5cce | ||
|
|
6e7a8c6041 | ||
|
|
43e707ec4f | ||
|
|
fed3663a74 | ||
|
|
5b72798db3 | ||
|
|
d24d6d4e93 | ||
|
|
50ee1fbe06 | ||
|
|
19f3425ce0 | ||
|
|
a6ef245534 | ||
|
|
88cb379c2d | ||
|
|
0ddb2e8dcf | ||
|
|
91b9301bec | ||
|
|
fad5868f7b | ||
|
|
1e5b9135df | ||
|
|
36d19e23e0 | ||
|
|
cba9d1aac0 | ||
|
|
dd21a0d2f9 | ||
|
|
302a43b3ae | ||
|
|
2955061b42 | ||
|
|
84644ab693 | ||
|
|
b8f40dde1e | ||
|
|
a6c9789a54 | ||
|
|
a48d9ce27c | ||
|
|
fb825a2708 | ||
|
|
5558dce449 | ||
|
|
cf74a11e65 | ||
|
|
86b5deec81 | ||
|
|
ba1b8e7757 | ||
|
|
79b68fdc25 | ||
|
|
a946cb08b5 | ||
|
|
d95d4992fe | ||
|
|
e13cb8346d | ||
|
|
615c56503e | ||
|
|
79a8edd8b9 | ||
|
|
8d138dd68f | ||
|
|
2b33844562 | ||
|
|
63e6721c2f | ||
|
|
4859d809aa | ||
|
|
be027b1ccd | ||
|
|
3ecadeeb93 | ||
|
|
4af3348f91 | ||
|
|
dde08845bf | ||
|
|
76d1ba168d | ||
|
|
80605e4f66 | ||
|
|
5b99584a31 | ||
|
|
fc134b18fe | ||
|
|
c2006273c5 | ||
|
|
5343889098 | ||
|
|
c42afc56d9 | ||
|
|
53f44dac89 | ||
|
|
0468456fad | ||
|
|
df899ee26a | ||
|
|
93fe25468f | ||
|
|
238aad666e | ||
|
|
4408ed4f88 | ||
|
|
5df1f59a3c | ||
|
|
8225697139 | ||
|
|
0c0186d866 | ||
|
|
ce2f8828f9 | ||
|
|
7a8565a45e | ||
|
|
192589a17f | ||
|
|
28ab73d4a1 | ||
|
|
ed4ac0b61e | ||
|
|
e41d8b65ce | ||
|
|
c28e5b39d6 | ||
|
|
b66bd2706f | ||
|
|
fa7a9d96f8 | ||
|
|
61d972a2ef | ||
|
|
fffdbc31c6 | ||
|
|
32c0ab3a7f | ||
|
|
24ce79a67c | ||
|
|
bfa8530088 | ||
|
|
4278144dd5 | ||
|
|
79fa4d691e | ||
|
|
7a3d9ee5c1 | ||
|
|
22923d3b23 | ||
|
|
d32a459209 | ||
|
|
47b2a502dd | ||
|
|
b85f339eb4 | ||
|
|
8821865eac | ||
|
|
4b30846d57 | ||
|
|
7a35986407 | ||
|
|
ee34aa7bd5 | ||
|
|
40cf798dfe | ||
|
|
18810038f5 | ||
|
|
8fb79bc6f6 | ||
|
|
4b5ad1405f | ||
|
|
4493078cdd | ||
|
|
7f68c89cbe | ||
|
|
69adc46936 | ||
|
|
d22439918f | ||
|
|
103d4e87e5 | ||
|
|
8c5ba9e0d7 | ||
|
|
f1b713df08 | ||
|
|
f94b89c1b5 | ||
|
|
a1b056737a | ||
|
|
a22f6a499d | ||
|
|
e5bf2a9a11 | ||
|
|
05aba5a311 | ||
|
|
354bf5debb | ||
|
|
7f88abb3b1 | ||
|
|
36b3a538f8 | ||
|
|
e293b65ad9 | ||
|
|
cce185b345 | ||
|
|
03ed4382c7 | ||
|
|
1c73e10676 | ||
|
|
4ade65f959 | ||
|
|
c54f5cdf12 | ||
|
|
33c48164d7 | ||
|
|
7aed3b3bac | ||
|
|
9e349c715e | ||
|
|
639ecb59b3 | ||
|
|
bfb0794f87 | ||
|
|
05f1e9e757 | ||
|
|
1ca6f6dada | ||
|
|
bc5397bcfc | ||
|
|
f452a027a2 | ||
|
|
7bac49fb87 | ||
|
|
02300cfbd1 | ||
|
|
17c5c732c7 | ||
|
|
10a66938f9 | ||
|
|
f0245fa36c | ||
|
|
83534f8e00 | ||
|
|
75eaf8c853 | ||
|
|
03096154d4 | ||
|
|
22c9e8c09e | ||
|
|
da16727ad6 | ||
|
|
ad44df6d83 | ||
|
|
276c552583 | ||
|
|
9109e5c149 | ||
|
|
71a84b91e3 | ||
|
|
209d40be71 | ||
|
|
bfd76805e8 | ||
|
|
561aa5e443 | ||
|
|
b0eb1ab2a1 | ||
|
|
1208fb6fa1 | ||
|
|
f98fe85c42 | ||
|
|
167c183c84 | ||
|
|
244e47e1e0 | ||
|
|
9680a0b0fe | ||
|
|
acbd10a661 | ||
|
|
c6b989be13 | ||
|
|
670103705c | ||
|
|
cb90bd226e | ||
|
|
df9b2abf84 | ||
|
|
582114bda9 | ||
|
|
91ffe5ac38 | ||
|
|
8a58d76254 | ||
|
|
c3442fe574 | ||
|
|
1087bd217e | ||
|
|
7ed3666d2e | ||
|
|
2e2e89e499 | ||
|
|
13c9c20f42 | ||
|
|
b3d3988d85 | ||
|
|
0529c7d0a0 | ||
|
|
af31a77061 | ||
|
|
2d8956167f | ||
|
|
509f85f82c | ||
|
|
bb2b377b18 | ||
|
|
48917889ce | ||
|
|
ef754259b0 | ||
|
|
7e26f28113 | ||
|
|
d7c8129549 | ||
|
|
3a8fbb698e | ||
|
|
b1ef34ef9f | ||
|
|
b7822250fe | ||
|
|
05055f7e95 | ||
|
|
c856d7dc73 | ||
|
|
69d565e55d | ||
|
|
fa6bbd9fa2 | ||
|
|
3f767121d2 | ||
|
|
e963e16bc5 | ||
|
|
1e9b115251 | ||
|
|
cd1e1124ea | ||
|
|
81b31b4283 | ||
|
|
d763bce46d | ||
|
|
4aac0ef42e | ||
|
|
7a36e8d967 | ||
|
|
dc2be93412 | ||
|
|
69a2b91495 | ||
|
|
791bc769c1 | ||
|
|
a15a1f07e3 | ||
|
|
c6f0b44228 | ||
|
|
cb0ed55d89 | ||
|
|
2fe97110fd | ||
|
|
fa8037b21d | ||
|
|
99a72a4b11 | ||
|
|
1a52ce1bd4 | ||
|
|
925d752f8d | ||
|
|
c0b9d00f35 | ||
|
|
fcf8d41a00 | ||
|
|
27c4161401 | ||
|
|
459b6ab86d | ||
|
|
336257cc3c | ||
|
|
df46a438b8 | ||
|
|
5e1d809904 | ||
|
|
a9c7ce7275 | ||
|
|
8c47c8c8ed | ||
|
|
8e8d427549 | ||
|
|
ee251115f4 | ||
|
|
661e66090c | ||
|
|
c38564e22c | ||
|
|
20f1e842b3 | ||
|
|
aa8965b634 | ||
|
|
35c676188b | ||
|
|
183559bb98 | ||
|
|
1123a5c49c | ||
|
|
6f17c260a7 | ||
|
|
da6278aae9 | ||
|
|
2e51871ad5 | ||
|
|
8067d25710 | ||
|
|
cb2df6c5bf | ||
|
|
07e1519b3f | ||
|
|
8fc41673fa | ||
|
|
fff0e5911b | ||
|
|
09346bdc06 | ||
|
|
d4d42740c8 | ||
|
|
5de7a43319 | ||
|
|
85e27ec74c | ||
|
|
698205a2f3 | ||
|
|
3ed582b091 | ||
|
|
752e33f676 | ||
|
|
930553ef60 | ||
|
|
fc8d5c9198 | ||
|
|
60b6472fa0 | ||
|
|
6b2c8277c2 | ||
|
|
6d5d3ebcf6 | ||
|
|
530c174fd3 | ||
|
|
8fb95686af | ||
|
|
4132085c01 | ||
|
|
c14f1ffcfd | ||
|
|
07cca4b69a | ||
|
|
dd927c36f6 | ||
|
|
052f42e926 | ||
|
|
30d43588ab | ||
|
|
d21ec22f74 | ||
|
|
04fecd634a | ||
|
|
33c14198db | ||
|
|
967c2727e3 | ||
|
|
f41f30ad92 | ||
|
|
e77340e8a5 | ||
|
|
d51a3090f7 | ||
|
|
1bf3bc932c | ||
|
|
564a47da4e | ||
|
|
c37ee93ff2 | ||
|
|
f4b65db4e7 | ||
|
|
f5fa8e6649 | ||
|
|
570e39bdcf | ||
|
|
2ebe37b671 | ||
|
|
dca685f784 | ||
|
|
84ebf2a2c9 | ||
|
|
ce5662ba90 | ||
|
|
9878f27813 | ||
|
|
f2b9452ec4 | ||
|
|
585da99c52 | ||
|
|
fd4f432079 | ||
|
|
238c68c57b | ||
|
|
04fbf5cb82 | ||
|
|
c85d559919 | ||
|
|
b5efc4f89e | ||
|
|
3f9c09a4c5 | ||
|
|
4a84660475 | ||
|
|
737248256e | ||
|
|
0ae334fc62 | ||
|
|
36c373b7c9 | ||
|
|
6afcb932b7 | ||
|
|
357bf571a3 | ||
|
|
e74ade9ebb | ||
|
|
f7f26b8efa | ||
|
|
75eb98f8bd | ||
|
|
c337e7baf7 | ||
|
|
660bd45be8 | ||
|
|
c27da0a0f6 | ||
|
|
ac043ed9ba | ||
|
|
2e0d66a1c8 | ||
|
|
41a0f361eb | ||
|
|
d3c5c02837 | ||
|
|
ae3d8fb0c4 | ||
|
|
902e47f0b0 | ||
|
|
50bb78fd24 | ||
|
|
542f07ab2d |
8
.air.toml
Normal file
8
.air.toml
Normal file
@@ -0,0 +1,8 @@
|
||||
# .air.toml
|
||||
[build]
|
||||
cmd = "make build"
|
||||
bin = "./local-ai"
|
||||
args_bin = [ "--debug" ]
|
||||
include_ext = ["go", "html", "yaml", "toml", "json", "txt", "md"]
|
||||
exclude_dir = ["pkg/grpc/proto"]
|
||||
delay = 1000
|
||||
288
.github/gallery-agent/agent.go
vendored
Normal file
288
.github/gallery-agent/agent.go
vendored
Normal file
@@ -0,0 +1,288 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
hfapi "github.com/mudler/LocalAI/pkg/huggingface-api"
|
||||
cogito "github.com/mudler/cogito"
|
||||
|
||||
"github.com/mudler/cogito/structures"
|
||||
"github.com/sashabaranov/go-openai/jsonschema"
|
||||
)
|
||||
|
||||
var (
|
||||
openAIModel = os.Getenv("OPENAI_MODEL")
|
||||
openAIKey = os.Getenv("OPENAI_KEY")
|
||||
openAIBaseURL = os.Getenv("OPENAI_BASE_URL")
|
||||
galleryIndexPath = os.Getenv("GALLERY_INDEX_PATH")
|
||||
//defaultclient
|
||||
llm = cogito.NewOpenAILLM(openAIModel, openAIKey, openAIBaseURL)
|
||||
)
|
||||
|
||||
// cleanTextContent removes trailing spaces, tabs, and normalizes line endings
|
||||
// to prevent YAML linting issues like trailing spaces and multiple empty lines
|
||||
func cleanTextContent(text string) string {
|
||||
lines := strings.Split(text, "\n")
|
||||
var cleanedLines []string
|
||||
var prevEmpty bool
|
||||
for _, line := range lines {
|
||||
// Remove all trailing whitespace (spaces, tabs, etc.)
|
||||
trimmed := strings.TrimRight(line, " \t\r")
|
||||
// Avoid multiple consecutive empty lines
|
||||
if trimmed == "" {
|
||||
if !prevEmpty {
|
||||
cleanedLines = append(cleanedLines, "")
|
||||
}
|
||||
prevEmpty = true
|
||||
} else {
|
||||
cleanedLines = append(cleanedLines, trimmed)
|
||||
prevEmpty = false
|
||||
}
|
||||
}
|
||||
// Remove trailing empty lines from the result
|
||||
result := strings.Join(cleanedLines, "\n")
|
||||
return strings.TrimRight(result, "\n")
|
||||
}
|
||||
|
||||
// isModelExisting checks if a specific model ID exists in the gallery using text search
|
||||
func isModelExisting(modelID string) (bool, error) {
|
||||
indexPath := getGalleryIndexPath()
|
||||
content, err := os.ReadFile(indexPath)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to read %s: %w", indexPath, err)
|
||||
}
|
||||
|
||||
contentStr := string(content)
|
||||
// Simple text search - if the model ID appears anywhere in the file, it exists
|
||||
return strings.Contains(contentStr, modelID), nil
|
||||
}
|
||||
|
||||
// filterExistingModels removes models that already exist in the gallery
|
||||
func filterExistingModels(models []ProcessedModel) ([]ProcessedModel, error) {
|
||||
var filteredModels []ProcessedModel
|
||||
for _, model := range models {
|
||||
exists, err := isModelExisting(model.ModelID)
|
||||
if err != nil {
|
||||
fmt.Printf("Error checking if model %s exists: %v, skipping\n", model.ModelID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !exists {
|
||||
filteredModels = append(filteredModels, model)
|
||||
} else {
|
||||
fmt.Printf("Skipping existing model: %s\n", model.ModelID)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Filtered out %d existing models, %d new models remaining\n",
|
||||
len(models)-len(filteredModels), len(filteredModels))
|
||||
|
||||
return filteredModels, nil
|
||||
}
|
||||
|
||||
// getGalleryIndexPath returns the gallery index file path, with a default fallback
|
||||
func getGalleryIndexPath() string {
|
||||
if galleryIndexPath != "" {
|
||||
return galleryIndexPath
|
||||
}
|
||||
return "gallery/index.yaml"
|
||||
}
|
||||
|
||||
func getRealReadme(ctx context.Context, repository string) (string, error) {
|
||||
// Create a conversation fragment
|
||||
fragment := cogito.NewEmptyFragment().
|
||||
AddMessage("user",
|
||||
`Your task is to get a clear description of a large language model from huggingface by using the provided tool. I will share with you a repository that might be quantized, and as such probably not by the original model author. We need to get the real description of the model, and not the one that might be quantized. You will have to call the tool to get the readme more than once by figuring out from the quantized readme which is the base model readme. This is the repository: `+repository)
|
||||
|
||||
// Execute with tools
|
||||
result, err := cogito.ExecuteTools(llm, fragment,
|
||||
cogito.WithIterations(3),
|
||||
cogito.WithMaxAttempts(3),
|
||||
cogito.WithTools(&HFReadmeTool{client: hfapi.NewClient()}))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
result = result.AddMessage("user", "Describe the model in a clear and concise way that can be shared in a model gallery.")
|
||||
|
||||
// Get a response
|
||||
newFragment, err := llm.Ask(ctx, result)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
content := newFragment.LastMessage().Content
|
||||
return cleanTextContent(content), nil
|
||||
}
|
||||
|
||||
func selectMostInterestingModels(ctx context.Context, searchResult *SearchResult) ([]ProcessedModel, error) {
|
||||
// Create a conversation fragment
|
||||
fragment := cogito.NewEmptyFragment().
|
||||
AddMessage("user",
|
||||
`Your task is to analyze a list of AI models and select the most interesting ones for a model gallery. You will be given detailed information about multiple models including their metadata, file information, and README content.
|
||||
|
||||
Consider the following criteria when selecting models:
|
||||
1. Model popularity (download count)
|
||||
2. Model recency (last modified date)
|
||||
3. Model completeness (has preferred model file, README, etc.)
|
||||
4. Model uniqueness (not duplicates or very similar models)
|
||||
5. Model quality (based on README content and description)
|
||||
6. Model utility (practical applications)
|
||||
|
||||
You should select models that would be most valuable for users browsing a model gallery. Prioritize models that are:
|
||||
- Well-documented with clear READMEs
|
||||
- Recently updated
|
||||
- Popular (high download count)
|
||||
- Have the preferred quantization format available
|
||||
- Offer unique capabilities or are from reputable authors
|
||||
|
||||
Return your analysis and selection reasoning.`)
|
||||
|
||||
// Add the search results as context
|
||||
modelsInfo := fmt.Sprintf("Found %d models matching '%s' with quantization preference '%s':\n\n",
|
||||
searchResult.TotalModelsFound, searchResult.SearchTerm, searchResult.Quantization)
|
||||
|
||||
for i, model := range searchResult.Models {
|
||||
modelsInfo += fmt.Sprintf("Model %d:\n", i+1)
|
||||
modelsInfo += fmt.Sprintf(" ID: %s\n", model.ModelID)
|
||||
modelsInfo += fmt.Sprintf(" Author: %s\n", model.Author)
|
||||
modelsInfo += fmt.Sprintf(" Downloads: %d\n", model.Downloads)
|
||||
modelsInfo += fmt.Sprintf(" Last Modified: %s\n", model.LastModified)
|
||||
modelsInfo += fmt.Sprintf(" Files: %d files\n", len(model.Files))
|
||||
|
||||
if model.PreferredModelFile != nil {
|
||||
modelsInfo += fmt.Sprintf(" Preferred Model File: %s (%d bytes)\n",
|
||||
model.PreferredModelFile.Path, model.PreferredModelFile.Size)
|
||||
} else {
|
||||
modelsInfo += " No preferred model file found\n"
|
||||
}
|
||||
|
||||
if model.ReadmeContent != "" {
|
||||
modelsInfo += fmt.Sprintf(" README: %s\n", model.ReadmeContent)
|
||||
}
|
||||
|
||||
if model.ProcessingError != "" {
|
||||
modelsInfo += fmt.Sprintf(" Processing Error: %s\n", model.ProcessingError)
|
||||
}
|
||||
|
||||
modelsInfo += "\n"
|
||||
}
|
||||
|
||||
fragment = fragment.AddMessage("user", modelsInfo)
|
||||
|
||||
fragment = fragment.AddMessage("user", "Based on your analysis, select the top 5 most interesting models and provide a brief explanation for each selection. Also, create a filtered SearchResult with only the selected models. Return just a list of repositories IDs, you will later be asked to output it as a JSON array with the json tool.")
|
||||
|
||||
// Get a response
|
||||
newFragment, err := llm.Ask(ctx, fragment)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fmt.Println(newFragment.LastMessage().Content)
|
||||
repositories := struct {
|
||||
Repositories []string `json:"repositories"`
|
||||
}{}
|
||||
|
||||
s := structures.Structure{
|
||||
Schema: jsonschema.Definition{
|
||||
Type: jsonschema.Object,
|
||||
AdditionalProperties: false,
|
||||
Properties: map[string]jsonschema.Definition{
|
||||
"repositories": {
|
||||
Type: jsonschema.Array,
|
||||
Items: &jsonschema.Definition{Type: jsonschema.String},
|
||||
Description: "The trending repositories IDs",
|
||||
},
|
||||
},
|
||||
Required: []string{"repositories"},
|
||||
},
|
||||
Object: &repositories,
|
||||
}
|
||||
|
||||
err = newFragment.ExtractStructure(ctx, llm, s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filteredModels := []ProcessedModel{}
|
||||
for _, m := range searchResult.Models {
|
||||
if slices.Contains(repositories.Repositories, m.ModelID) {
|
||||
filteredModels = append(filteredModels, m)
|
||||
}
|
||||
}
|
||||
|
||||
return filteredModels, nil
|
||||
}
|
||||
|
||||
// ModelFamily represents a YAML anchor/family
|
||||
type ModelFamily struct {
|
||||
Anchor string `json:"anchor"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// selectModelFamily selects the appropriate model family/anchor for a given model
|
||||
func selectModelFamily(ctx context.Context, model ProcessedModel, availableFamilies []ModelFamily) (string, error) {
|
||||
// Create a conversation fragment
|
||||
fragment := cogito.NewEmptyFragment().
|
||||
AddMessage("user",
|
||||
`Your task is to select the most appropriate model family/anchor for a given AI model. You will be provided with:
|
||||
1. Information about the model (name, description, etc.)
|
||||
2. A list of available model families/anchors
|
||||
|
||||
You need to select the family that best matches the model's architecture, capabilities, or characteristics. Consider:
|
||||
- Model architecture (e.g., Llama, Qwen, Mistral, etc.)
|
||||
- Model capabilities (e.g., vision, coding, chat, etc.)
|
||||
- Model size/type (e.g., small, medium, large)
|
||||
- Model purpose (e.g., general purpose, specialized, etc.)
|
||||
|
||||
Return the anchor name that best fits the model.`)
|
||||
|
||||
// Add model information
|
||||
modelInfo := "Model Information:\n"
|
||||
modelInfo += fmt.Sprintf(" ID: %s\n", model.ModelID)
|
||||
modelInfo += fmt.Sprintf(" Author: %s\n", model.Author)
|
||||
modelInfo += fmt.Sprintf(" Downloads: %d\n", model.Downloads)
|
||||
modelInfo += fmt.Sprintf(" Description: %s\n", model.ReadmeContentPreview)
|
||||
|
||||
fragment = fragment.AddMessage("user", modelInfo)
|
||||
|
||||
// Add available families
|
||||
familiesInfo := "Available Model Families:\n"
|
||||
for _, family := range availableFamilies {
|
||||
familiesInfo += fmt.Sprintf(" - %s (%s)\n", family.Anchor, family.Name)
|
||||
}
|
||||
|
||||
fragment = fragment.AddMessage("user", familiesInfo)
|
||||
fragment = fragment.AddMessage("user", "Select the most appropriate family anchor for this model. Return just the anchor name.")
|
||||
|
||||
// Get a response
|
||||
newFragment, err := llm.Ask(ctx, fragment)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Extract the selected family
|
||||
selectedFamily := strings.TrimSpace(newFragment.LastMessage().Content)
|
||||
|
||||
// Validate that the selected family exists in our list
|
||||
for _, family := range availableFamilies {
|
||||
if family.Anchor == selectedFamily {
|
||||
return selectedFamily, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If no exact match, try to find a close match
|
||||
for _, family := range availableFamilies {
|
||||
if strings.Contains(strings.ToLower(family.Anchor), strings.ToLower(selectedFamily)) ||
|
||||
strings.Contains(strings.ToLower(selectedFamily), strings.ToLower(family.Anchor)) {
|
||||
return family.Anchor, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Default fallback
|
||||
return "llama3", nil
|
||||
}
|
||||
203
.github/gallery-agent/gallery.go
vendored
Normal file
203
.github/gallery-agent/gallery.go
vendored
Normal file
@@ -0,0 +1,203 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// generateYAMLEntry generates a YAML entry for a model using the specified anchor
|
||||
func generateYAMLEntry(model ProcessedModel, familyAnchor string) string {
|
||||
// Extract model name from ModelID
|
||||
parts := strings.Split(model.ModelID, "/")
|
||||
modelName := model.ModelID
|
||||
if len(parts) > 0 {
|
||||
modelName = strings.ToLower(parts[len(parts)-1])
|
||||
}
|
||||
// Remove common suffixes
|
||||
modelName = strings.ReplaceAll(modelName, "-gguf", "")
|
||||
modelName = strings.ReplaceAll(modelName, "-q4_k_m", "")
|
||||
modelName = strings.ReplaceAll(modelName, "-q4_k_s", "")
|
||||
modelName = strings.ReplaceAll(modelName, "-q3_k_m", "")
|
||||
modelName = strings.ReplaceAll(modelName, "-q2_k", "")
|
||||
|
||||
fileName := ""
|
||||
checksum := ""
|
||||
if model.PreferredModelFile != nil {
|
||||
fileParts := strings.Split(model.PreferredModelFile.Path, "/")
|
||||
if len(fileParts) > 0 {
|
||||
fileName = fileParts[len(fileParts)-1]
|
||||
}
|
||||
checksum = model.PreferredModelFile.SHA256
|
||||
} else {
|
||||
fileName = model.ModelID
|
||||
}
|
||||
|
||||
description := model.ReadmeContent
|
||||
if description == "" {
|
||||
description = fmt.Sprintf("AI model: %s", modelName)
|
||||
}
|
||||
|
||||
// Clean up description to prevent YAML linting issues
|
||||
description = cleanTextContent(description)
|
||||
|
||||
// Format description for YAML (indent each line and ensure no trailing spaces)
|
||||
lines := strings.Split(description, "\n")
|
||||
var formattedLines []string
|
||||
for _, line := range lines {
|
||||
if strings.TrimSpace(line) == "" {
|
||||
// Keep empty lines as empty (no indentation)
|
||||
formattedLines = append(formattedLines, "")
|
||||
} else {
|
||||
// Add indentation to non-empty lines
|
||||
formattedLines = append(formattedLines, " "+line)
|
||||
}
|
||||
}
|
||||
formattedDescription := strings.Join(formattedLines, "\n")
|
||||
// Remove any trailing spaces from the formatted description
|
||||
formattedDescription = strings.TrimRight(formattedDescription, " \t")
|
||||
yamlTemplate := ""
|
||||
if checksum != "" {
|
||||
yamlTemplate = `- !!merge <<: *%s
|
||||
name: "%s"
|
||||
urls:
|
||||
- https://huggingface.co/%s
|
||||
description: |
|
||||
%s
|
||||
overrides:
|
||||
parameters:
|
||||
model: %s
|
||||
files:
|
||||
- filename: %s
|
||||
sha256: %s
|
||||
uri: huggingface://%s/%s`
|
||||
return fmt.Sprintf(yamlTemplate,
|
||||
familyAnchor,
|
||||
modelName,
|
||||
model.ModelID,
|
||||
formattedDescription,
|
||||
fileName,
|
||||
fileName,
|
||||
checksum,
|
||||
model.ModelID,
|
||||
fileName,
|
||||
)
|
||||
} else {
|
||||
yamlTemplate = `- !!merge <<: *%s
|
||||
name: "%s"
|
||||
urls:
|
||||
- https://huggingface.co/%s
|
||||
description: |
|
||||
%s
|
||||
overrides:
|
||||
parameters:
|
||||
model: %s`
|
||||
return fmt.Sprintf(yamlTemplate,
|
||||
familyAnchor,
|
||||
modelName,
|
||||
model.ModelID,
|
||||
formattedDescription,
|
||||
fileName,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// extractModelFamilies extracts all YAML anchors from the gallery index.yaml file
|
||||
func extractModelFamilies() ([]ModelFamily, error) {
|
||||
// Read the index.yaml file
|
||||
indexPath := getGalleryIndexPath()
|
||||
content, err := os.ReadFile(indexPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read %s: %w", indexPath, err)
|
||||
}
|
||||
|
||||
lines := strings.Split(string(content), "\n")
|
||||
var families []ModelFamily
|
||||
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
// Look for YAML anchors (lines starting with "- &")
|
||||
if strings.HasPrefix(line, "- &") {
|
||||
// Extract the anchor name (everything after "- &")
|
||||
anchor := strings.TrimPrefix(line, "- &")
|
||||
// Remove any trailing colon or other characters
|
||||
anchor = strings.Split(anchor, ":")[0]
|
||||
anchor = strings.Split(anchor, " ")[0]
|
||||
|
||||
if anchor != "" {
|
||||
families = append(families, ModelFamily{
|
||||
Anchor: anchor,
|
||||
Name: anchor, // Use anchor as name for now
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return families, nil
|
||||
}
|
||||
|
||||
// generateYAMLForModels generates YAML entries for selected models and appends to index.yaml
|
||||
func generateYAMLForModels(ctx context.Context, models []ProcessedModel) error {
|
||||
// Extract available model families
|
||||
families, err := extractModelFamilies()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to extract model families: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Found %d model families: %v\n", len(families),
|
||||
func() []string {
|
||||
var names []string
|
||||
for _, f := range families {
|
||||
names = append(names, f.Anchor)
|
||||
}
|
||||
return names
|
||||
}())
|
||||
|
||||
// Generate YAML entries for each model
|
||||
var yamlEntries []string
|
||||
for _, model := range models {
|
||||
fmt.Printf("Selecting family for model: %s\n", model.ModelID)
|
||||
|
||||
// Select appropriate family for this model
|
||||
familyAnchor, err := selectModelFamily(ctx, model, families)
|
||||
if err != nil {
|
||||
fmt.Printf("Error selecting family for %s: %v, using default\n", model.ModelID, err)
|
||||
familyAnchor = "llama3" // Default fallback
|
||||
}
|
||||
|
||||
fmt.Printf("Selected family '%s' for model %s\n", familyAnchor, model.ModelID)
|
||||
|
||||
// Generate YAML entry
|
||||
yamlEntry := generateYAMLEntry(model, familyAnchor)
|
||||
yamlEntries = append(yamlEntries, yamlEntry)
|
||||
}
|
||||
|
||||
// Append to index.yaml
|
||||
if len(yamlEntries) > 0 {
|
||||
indexPath := getGalleryIndexPath()
|
||||
fmt.Printf("Appending YAML entries to %s...\n", indexPath)
|
||||
|
||||
// Read current content
|
||||
content, err := os.ReadFile(indexPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read %s: %w", indexPath, err)
|
||||
}
|
||||
|
||||
// Append new entries
|
||||
// Remove trailing whitespace from existing content and join entries without extra newlines
|
||||
existingContent := strings.TrimRight(string(content), " \t\n\r")
|
||||
yamlBlock := strings.Join(yamlEntries, "\n")
|
||||
newContent := existingContent + "\n" + yamlBlock + "\n"
|
||||
|
||||
// Write back to file
|
||||
err = os.WriteFile(indexPath, []byte(newContent), 0644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write %s: %w", indexPath, err)
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully added %d models to %s\n", len(yamlEntries), indexPath)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
351
.github/gallery-agent/main.go
vendored
Normal file
351
.github/gallery-agent/main.go
vendored
Normal file
@@ -0,0 +1,351 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
hfapi "github.com/mudler/LocalAI/pkg/huggingface-api"
|
||||
)
|
||||
|
||||
// ProcessedModelFile represents a processed model file with additional metadata
|
||||
type ProcessedModelFile struct {
|
||||
Path string `json:"path"`
|
||||
Size int64 `json:"size"`
|
||||
SHA256 string `json:"sha256"`
|
||||
IsReadme bool `json:"is_readme"`
|
||||
FileType string `json:"file_type"` // "model", "readme", "other"
|
||||
}
|
||||
|
||||
// ProcessedModel represents a processed model with all gathered metadata
|
||||
type ProcessedModel struct {
|
||||
ModelID string `json:"model_id"`
|
||||
Author string `json:"author"`
|
||||
Downloads int `json:"downloads"`
|
||||
LastModified string `json:"last_modified"`
|
||||
Files []ProcessedModelFile `json:"files"`
|
||||
PreferredModelFile *ProcessedModelFile `json:"preferred_model_file,omitempty"`
|
||||
ReadmeFile *ProcessedModelFile `json:"readme_file,omitempty"`
|
||||
ReadmeContent string `json:"readme_content,omitempty"`
|
||||
ReadmeContentPreview string `json:"readme_content_preview,omitempty"`
|
||||
QuantizationPreferences []string `json:"quantization_preferences"`
|
||||
ProcessingError string `json:"processing_error,omitempty"`
|
||||
}
|
||||
|
||||
// SearchResult represents the complete result of searching and processing models
|
||||
type SearchResult struct {
|
||||
SearchTerm string `json:"search_term"`
|
||||
Limit int `json:"limit"`
|
||||
Quantization string `json:"quantization"`
|
||||
TotalModelsFound int `json:"total_models_found"`
|
||||
Models []ProcessedModel `json:"models"`
|
||||
FormattedOutput string `json:"formatted_output"`
|
||||
}
|
||||
|
||||
// AddedModelSummary represents a summary of models added to the gallery
|
||||
type AddedModelSummary struct {
|
||||
SearchTerm string `json:"search_term"`
|
||||
TotalFound int `json:"total_found"`
|
||||
ModelsAdded int `json:"models_added"`
|
||||
AddedModelIDs []string `json:"added_model_ids"`
|
||||
AddedModelURLs []string `json:"added_model_urls"`
|
||||
Quantization string `json:"quantization"`
|
||||
ProcessingTime string `json:"processing_time"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
startTime := time.Now()
|
||||
|
||||
// Check for synthetic mode
|
||||
syntheticMode := os.Getenv("SYNTHETIC_MODE")
|
||||
if syntheticMode == "true" || syntheticMode == "1" {
|
||||
fmt.Println("Running in SYNTHETIC MODE - generating random test data")
|
||||
err := runSyntheticMode()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error in synthetic mode: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Get configuration from environment variables
|
||||
searchTerm := os.Getenv("SEARCH_TERM")
|
||||
if searchTerm == "" {
|
||||
searchTerm = "GGUF"
|
||||
}
|
||||
|
||||
limitStr := os.Getenv("LIMIT")
|
||||
if limitStr == "" {
|
||||
limitStr = "5"
|
||||
}
|
||||
limit, err := strconv.Atoi(limitStr)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error parsing LIMIT: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
quantization := os.Getenv("QUANTIZATION")
|
||||
|
||||
maxModels := os.Getenv("MAX_MODELS")
|
||||
if maxModels == "" {
|
||||
maxModels = "1"
|
||||
}
|
||||
maxModelsInt, err := strconv.Atoi(maxModels)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error parsing MAX_MODELS: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Print configuration
|
||||
fmt.Printf("Gallery Agent Configuration:\n")
|
||||
fmt.Printf(" Search Term: %s\n", searchTerm)
|
||||
fmt.Printf(" Limit: %d\n", limit)
|
||||
fmt.Printf(" Quantization: %s\n", quantization)
|
||||
fmt.Printf(" Max Models to Add: %d\n", maxModelsInt)
|
||||
fmt.Printf(" Gallery Index Path: %s\n", os.Getenv("GALLERY_INDEX_PATH"))
|
||||
fmt.Println()
|
||||
|
||||
result, err := searchAndProcessModels(searchTerm, limit, quantization)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Println(result.FormattedOutput)
|
||||
|
||||
// Use AI agent to select the most interesting models
|
||||
fmt.Println("Using AI agent to select the most interesting models...")
|
||||
models, err := selectMostInterestingModels(context.Background(), result)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error in model selection: %v\n", err)
|
||||
// Continue with original result if selection fails
|
||||
models = result.Models
|
||||
}
|
||||
|
||||
fmt.Print(models)
|
||||
|
||||
// Filter out models that already exist in the gallery
|
||||
fmt.Println("Filtering out existing models...")
|
||||
models, err = filterExistingModels(models)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error filtering existing models: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Limit to maxModelsInt after filtering
|
||||
if len(models) > maxModelsInt {
|
||||
models = models[:maxModelsInt]
|
||||
}
|
||||
|
||||
// Track added models for summary
|
||||
var addedModelIDs []string
|
||||
var addedModelURLs []string
|
||||
|
||||
// Generate YAML entries and append to gallery/index.yaml
|
||||
if len(models) > 0 {
|
||||
for _, model := range models {
|
||||
addedModelIDs = append(addedModelIDs, model.ModelID)
|
||||
// Generate Hugging Face URL for the model
|
||||
modelURL := fmt.Sprintf("https://huggingface.co/%s", model.ModelID)
|
||||
addedModelURLs = append(addedModelURLs, modelURL)
|
||||
}
|
||||
fmt.Println("Generating YAML entries for selected models...")
|
||||
err = generateYAMLForModels(context.Background(), models)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error generating YAML entries: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
fmt.Println("No new models to add to the gallery.")
|
||||
}
|
||||
|
||||
// Create and write summary
|
||||
processingTime := time.Since(startTime).String()
|
||||
summary := AddedModelSummary{
|
||||
SearchTerm: searchTerm,
|
||||
TotalFound: result.TotalModelsFound,
|
||||
ModelsAdded: len(addedModelIDs),
|
||||
AddedModelIDs: addedModelIDs,
|
||||
AddedModelURLs: addedModelURLs,
|
||||
Quantization: quantization,
|
||||
ProcessingTime: processingTime,
|
||||
}
|
||||
|
||||
// Write summary to file
|
||||
summaryData, err := json.MarshalIndent(summary, "", " ")
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error marshaling summary: %v\n", err)
|
||||
} else {
|
||||
err = os.WriteFile("gallery-agent-summary.json", summaryData, 0644)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error writing summary file: %v\n", err)
|
||||
} else {
|
||||
fmt.Printf("Summary written to gallery-agent-summary.json\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func searchAndProcessModels(searchTerm string, limit int, quantization string) (*SearchResult, error) {
|
||||
client := hfapi.NewClient()
|
||||
var outputBuilder strings.Builder
|
||||
|
||||
fmt.Println("Searching for models...")
|
||||
// Initialize the result struct
|
||||
result := &SearchResult{
|
||||
SearchTerm: searchTerm,
|
||||
Limit: limit,
|
||||
Quantization: quantization,
|
||||
Models: []ProcessedModel{},
|
||||
}
|
||||
|
||||
models, err := client.GetLatest(searchTerm, limit)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch models: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println("Models found:", len(models))
|
||||
result.TotalModelsFound = len(models)
|
||||
|
||||
if len(models) == 0 {
|
||||
outputBuilder.WriteString("No models found.\n")
|
||||
result.FormattedOutput = outputBuilder.String()
|
||||
return result, nil
|
||||
}
|
||||
|
||||
outputBuilder.WriteString(fmt.Sprintf("Found %d models matching '%s':\n\n", len(models), searchTerm))
|
||||
|
||||
// Process each model
|
||||
for i, model := range models {
|
||||
outputBuilder.WriteString(fmt.Sprintf("%d. Processing Model: %s\n", i+1, model.ModelID))
|
||||
outputBuilder.WriteString(fmt.Sprintf(" Author: %s\n", model.Author))
|
||||
outputBuilder.WriteString(fmt.Sprintf(" Downloads: %d\n", model.Downloads))
|
||||
outputBuilder.WriteString(fmt.Sprintf(" Last Modified: %s\n", model.LastModified))
|
||||
|
||||
// Initialize processed model struct
|
||||
processedModel := ProcessedModel{
|
||||
ModelID: model.ModelID,
|
||||
Author: model.Author,
|
||||
Downloads: model.Downloads,
|
||||
LastModified: model.LastModified,
|
||||
QuantizationPreferences: []string{quantization, "Q4_K_M", "Q4_K_S", "Q3_K_M", "Q2_K"},
|
||||
}
|
||||
|
||||
// Get detailed model information
|
||||
details, err := client.GetModelDetails(model.ModelID)
|
||||
if err != nil {
|
||||
errorMsg := fmt.Sprintf(" Error getting model details: %v\n", err)
|
||||
outputBuilder.WriteString(errorMsg)
|
||||
processedModel.ProcessingError = err.Error()
|
||||
result.Models = append(result.Models, processedModel)
|
||||
continue
|
||||
}
|
||||
|
||||
// Define quantization preferences (in order of preference)
|
||||
quantizationPreferences := []string{quantization, "Q4_K_M", "Q4_K_S", "Q3_K_M", "Q2_K"}
|
||||
|
||||
// Find preferred model file
|
||||
preferredModelFile := hfapi.FindPreferredModelFile(details.Files, quantizationPreferences)
|
||||
|
||||
// Process files
|
||||
processedFiles := make([]ProcessedModelFile, len(details.Files))
|
||||
for j, file := range details.Files {
|
||||
fileType := "other"
|
||||
if file.IsReadme {
|
||||
fileType = "readme"
|
||||
} else if preferredModelFile != nil && file.Path == preferredModelFile.Path {
|
||||
fileType = "model"
|
||||
}
|
||||
|
||||
processedFiles[j] = ProcessedModelFile{
|
||||
Path: file.Path,
|
||||
Size: file.Size,
|
||||
SHA256: file.SHA256,
|
||||
IsReadme: file.IsReadme,
|
||||
FileType: fileType,
|
||||
}
|
||||
}
|
||||
|
||||
processedModel.Files = processedFiles
|
||||
|
||||
// Set preferred model file
|
||||
if preferredModelFile != nil {
|
||||
for _, file := range processedFiles {
|
||||
if file.Path == preferredModelFile.Path {
|
||||
processedModel.PreferredModelFile = &file
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Print file information
|
||||
outputBuilder.WriteString(fmt.Sprintf(" Files found: %d\n", len(details.Files)))
|
||||
|
||||
if preferredModelFile != nil {
|
||||
outputBuilder.WriteString(fmt.Sprintf(" Preferred Model File: %s (SHA256: %s)\n",
|
||||
preferredModelFile.Path,
|
||||
preferredModelFile.SHA256))
|
||||
} else {
|
||||
outputBuilder.WriteString(fmt.Sprintf(" No model file found with quantization preferences: %v\n", quantizationPreferences))
|
||||
}
|
||||
|
||||
if details.ReadmeFile != nil {
|
||||
outputBuilder.WriteString(fmt.Sprintf(" README File: %s\n", details.ReadmeFile.Path))
|
||||
|
||||
// Find and set readme file
|
||||
for _, file := range processedFiles {
|
||||
if file.IsReadme {
|
||||
processedModel.ReadmeFile = &file
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("Getting real readme for", model.ModelID, "waiting...")
|
||||
// Use agent to get the real readme and prepare the model description
|
||||
readmeContent, err := getRealReadme(context.Background(), model.ModelID)
|
||||
if err == nil {
|
||||
processedModel.ReadmeContent = readmeContent
|
||||
processedModel.ReadmeContentPreview = truncateString(readmeContent, 200)
|
||||
outputBuilder.WriteString(fmt.Sprintf(" README Content Preview: %s\n",
|
||||
processedModel.ReadmeContentPreview))
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
fmt.Println("Real readme got", readmeContent)
|
||||
// Get README content
|
||||
// readmeContent, err := client.GetReadmeContent(model.ModelID, details.ReadmeFile.Path)
|
||||
// if err == nil {
|
||||
// processedModel.ReadmeContent = readmeContent
|
||||
// processedModel.ReadmeContentPreview = truncateString(readmeContent, 200)
|
||||
// outputBuilder.WriteString(fmt.Sprintf(" README Content Preview: %s\n",
|
||||
// processedModel.ReadmeContentPreview))
|
||||
// }
|
||||
}
|
||||
|
||||
// Print all files with their checksums
|
||||
outputBuilder.WriteString(" All Files:\n")
|
||||
for _, file := range processedFiles {
|
||||
outputBuilder.WriteString(fmt.Sprintf(" - %s (%s, %d bytes", file.Path, file.FileType, file.Size))
|
||||
if file.SHA256 != "" {
|
||||
outputBuilder.WriteString(fmt.Sprintf(", SHA256: %s", file.SHA256))
|
||||
}
|
||||
outputBuilder.WriteString(")\n")
|
||||
}
|
||||
|
||||
outputBuilder.WriteString("\n")
|
||||
result.Models = append(result.Models, processedModel)
|
||||
}
|
||||
|
||||
result.FormattedOutput = outputBuilder.String()
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func truncateString(s string, maxLen int) string {
|
||||
if len(s) <= maxLen {
|
||||
return s
|
||||
}
|
||||
return s[:maxLen] + "..."
|
||||
}
|
||||
190
.github/gallery-agent/testing.go
vendored
Normal file
190
.github/gallery-agent/testing.go
vendored
Normal file
@@ -0,0 +1,190 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// runSyntheticMode generates synthetic test data and appends it to the gallery
|
||||
func runSyntheticMode() error {
|
||||
generator := NewSyntheticDataGenerator()
|
||||
|
||||
// Generate a random number of synthetic models (1-3)
|
||||
numModels := generator.rand.Intn(3) + 1
|
||||
fmt.Printf("Generating %d synthetic models for testing...\n", numModels)
|
||||
|
||||
var models []ProcessedModel
|
||||
for i := 0; i < numModels; i++ {
|
||||
model := generator.GenerateProcessedModel()
|
||||
models = append(models, model)
|
||||
fmt.Printf("Generated synthetic model: %s\n", model.ModelID)
|
||||
}
|
||||
|
||||
// Generate YAML entries and append to gallery/index.yaml
|
||||
fmt.Println("Generating YAML entries for synthetic models...")
|
||||
err := generateYAMLForModels(context.Background(), models)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error generating YAML entries: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully added %d synthetic models to the gallery for testing!\n", len(models))
|
||||
return nil
|
||||
}
|
||||
|
||||
// SyntheticDataGenerator provides methods to generate synthetic test data
|
||||
type SyntheticDataGenerator struct {
|
||||
rand *rand.Rand
|
||||
}
|
||||
|
||||
// NewSyntheticDataGenerator creates a new synthetic data generator
|
||||
func NewSyntheticDataGenerator() *SyntheticDataGenerator {
|
||||
return &SyntheticDataGenerator{
|
||||
rand: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateProcessedModelFile creates a synthetic ProcessedModelFile
|
||||
func (g *SyntheticDataGenerator) GenerateProcessedModelFile() ProcessedModelFile {
|
||||
fileTypes := []string{"model", "readme", "other"}
|
||||
fileType := fileTypes[g.rand.Intn(len(fileTypes))]
|
||||
|
||||
var path string
|
||||
var isReadme bool
|
||||
|
||||
switch fileType {
|
||||
case "model":
|
||||
path = fmt.Sprintf("model-%s.gguf", g.randomString(8))
|
||||
isReadme = false
|
||||
case "readme":
|
||||
path = "README.md"
|
||||
isReadme = true
|
||||
default:
|
||||
path = fmt.Sprintf("file-%s.txt", g.randomString(6))
|
||||
isReadme = false
|
||||
}
|
||||
|
||||
return ProcessedModelFile{
|
||||
Path: path,
|
||||
Size: int64(g.rand.Intn(1000000000) + 1000000), // 1MB to 1GB
|
||||
SHA256: g.randomSHA256(),
|
||||
IsReadme: isReadme,
|
||||
FileType: fileType,
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateProcessedModel creates a synthetic ProcessedModel
|
||||
func (g *SyntheticDataGenerator) GenerateProcessedModel() ProcessedModel {
|
||||
authors := []string{"microsoft", "meta", "google", "openai", "anthropic", "mistralai", "huggingface"}
|
||||
modelNames := []string{"llama", "gpt", "claude", "mistral", "gemma", "phi", "qwen", "codellama"}
|
||||
|
||||
author := authors[g.rand.Intn(len(authors))]
|
||||
modelName := modelNames[g.rand.Intn(len(modelNames))]
|
||||
modelID := fmt.Sprintf("%s/%s-%s", author, modelName, g.randomString(6))
|
||||
|
||||
// Generate files
|
||||
numFiles := g.rand.Intn(5) + 2 // 2-6 files
|
||||
files := make([]ProcessedModelFile, numFiles)
|
||||
|
||||
// Ensure at least one model file and one readme
|
||||
hasModelFile := false
|
||||
hasReadme := false
|
||||
|
||||
for i := 0; i < numFiles; i++ {
|
||||
files[i] = g.GenerateProcessedModelFile()
|
||||
if files[i].FileType == "model" {
|
||||
hasModelFile = true
|
||||
}
|
||||
if files[i].FileType == "readme" {
|
||||
hasReadme = true
|
||||
}
|
||||
}
|
||||
|
||||
// Add required files if missing
|
||||
if !hasModelFile {
|
||||
modelFile := g.GenerateProcessedModelFile()
|
||||
modelFile.FileType = "model"
|
||||
modelFile.Path = fmt.Sprintf("%s-Q4_K_M.gguf", modelName)
|
||||
files = append(files, modelFile)
|
||||
}
|
||||
|
||||
if !hasReadme {
|
||||
readmeFile := g.GenerateProcessedModelFile()
|
||||
readmeFile.FileType = "readme"
|
||||
readmeFile.Path = "README.md"
|
||||
readmeFile.IsReadme = true
|
||||
files = append(files, readmeFile)
|
||||
}
|
||||
|
||||
// Find preferred model file
|
||||
var preferredModelFile *ProcessedModelFile
|
||||
for i := range files {
|
||||
if files[i].FileType == "model" {
|
||||
preferredModelFile = &files[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Find readme file
|
||||
var readmeFile *ProcessedModelFile
|
||||
for i := range files {
|
||||
if files[i].FileType == "readme" {
|
||||
readmeFile = &files[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
readmeContent := g.generateReadmeContent(modelName, author)
|
||||
|
||||
return ProcessedModel{
|
||||
ModelID: modelID,
|
||||
Author: author,
|
||||
Downloads: g.rand.Intn(1000000) + 1000,
|
||||
LastModified: g.randomDate(),
|
||||
Files: files,
|
||||
PreferredModelFile: preferredModelFile,
|
||||
ReadmeFile: readmeFile,
|
||||
ReadmeContent: readmeContent,
|
||||
ReadmeContentPreview: truncateString(readmeContent, 200),
|
||||
QuantizationPreferences: []string{"Q4_K_M", "Q4_K_S", "Q3_K_M", "Q2_K"},
|
||||
ProcessingError: "",
|
||||
}
|
||||
}
|
||||
|
||||
// Helper methods for synthetic data generation
|
||||
func (g *SyntheticDataGenerator) randomString(length int) string {
|
||||
const charset = "abcdefghijklmnopqrstuvwxyz0123456789"
|
||||
b := make([]byte, length)
|
||||
for i := range b {
|
||||
b[i] = charset[g.rand.Intn(len(charset))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (g *SyntheticDataGenerator) randomSHA256() string {
|
||||
const charset = "0123456789abcdef"
|
||||
b := make([]byte, 64)
|
||||
for i := range b {
|
||||
b[i] = charset[g.rand.Intn(len(charset))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (g *SyntheticDataGenerator) randomDate() string {
|
||||
now := time.Now()
|
||||
daysAgo := g.rand.Intn(365) // Random date within last year
|
||||
pastDate := now.AddDate(0, 0, -daysAgo)
|
||||
return pastDate.Format("2006-01-02T15:04:05.000Z")
|
||||
}
|
||||
|
||||
func (g *SyntheticDataGenerator) generateReadmeContent(modelName, author string) string {
|
||||
templates := []string{
|
||||
fmt.Sprintf("# %s Model\n\nThis is a %s model developed by %s. It's designed for various natural language processing tasks including text generation, question answering, and conversation.\n\n## Features\n\n- High-quality text generation\n- Efficient inference\n- Multiple quantization options\n- Easy to use with LocalAI\n\n## Usage\n\nUse this model with LocalAI for various AI tasks.", strings.Title(modelName), modelName, author),
|
||||
fmt.Sprintf("# %s\n\nA powerful language model from %s. This model excels at understanding and generating human-like text across multiple domains.\n\n## Capabilities\n\n- Text completion\n- Code generation\n- Creative writing\n- Technical documentation\n\n## Model Details\n\n- Architecture: Transformer-based\n- Training: Large-scale supervised learning\n- Quantization: Available in multiple formats", strings.Title(modelName), author),
|
||||
fmt.Sprintf("# %s Language Model\n\nDeveloped by %s, this model represents state-of-the-art performance in natural language understanding and generation.\n\n## Key Features\n\n- Multilingual support\n- Context-aware responses\n- Efficient memory usage\n- Fast inference speed\n\n## Applications\n\n- Chatbots and virtual assistants\n- Content generation\n- Code completion\n- Educational tools", strings.Title(modelName), author),
|
||||
}
|
||||
|
||||
return templates[g.rand.Intn(len(templates))]
|
||||
}
|
||||
46
.github/gallery-agent/tools.go
vendored
Normal file
46
.github/gallery-agent/tools.go
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
hfapi "github.com/mudler/LocalAI/pkg/huggingface-api"
|
||||
openai "github.com/sashabaranov/go-openai"
|
||||
jsonschema "github.com/sashabaranov/go-openai/jsonschema"
|
||||
)
|
||||
|
||||
// Get repository README from HF
|
||||
type HFReadmeTool struct {
|
||||
client *hfapi.Client
|
||||
}
|
||||
|
||||
func (s *HFReadmeTool) Execute(args map[string]any) (string, error) {
|
||||
q, ok := args["repository"].(string)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("no query")
|
||||
}
|
||||
readme, err := s.client.GetReadmeContent(q, "README.md")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return readme, nil
|
||||
}
|
||||
|
||||
func (s *HFReadmeTool) Tool() openai.Tool {
|
||||
return openai.Tool{
|
||||
Type: openai.ToolTypeFunction,
|
||||
Function: &openai.FunctionDefinition{
|
||||
Name: "hf_readme",
|
||||
Description: "A tool to get the README content of a huggingface repository",
|
||||
Parameters: jsonschema.Definition{
|
||||
Type: jsonschema.Object,
|
||||
Properties: map[string]jsonschema.Definition{
|
||||
"repository": {
|
||||
Type: jsonschema.String,
|
||||
Description: "The huggingface repository to get the README content of",
|
||||
},
|
||||
},
|
||||
Required: []string{"repository"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
253
.github/workflows/backend.yml
vendored
253
.github/workflows/backend.yml
vendored
@@ -197,18 +197,6 @@ jobs:
|
||||
backend: "rerankers"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-rerankers'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "rerankers"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
@@ -221,18 +209,6 @@ jobs:
|
||||
backend: "llama-cpp"
|
||||
dockerfile: "./backend/Dockerfile.llama-cpp"
|
||||
context: "./"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-llama-cpp'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "llama-cpp"
|
||||
dockerfile: "./backend/Dockerfile.llama-cpp"
|
||||
context: "./"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
@@ -245,18 +221,6 @@ jobs:
|
||||
backend: "vllm"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-vllm'
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "vllm"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
@@ -269,18 +233,6 @@ jobs:
|
||||
backend: "transformers"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-transformers'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "transformers"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
@@ -293,19 +245,7 @@ jobs:
|
||||
backend: "diffusers"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-diffusers'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "diffusers"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
# CUDA additional backends
|
||||
# CUDA 12 additional backends
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
@@ -318,18 +258,6 @@ jobs:
|
||||
backend: "kokoro"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-kokoro'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "kokoro"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
@@ -342,18 +270,6 @@ jobs:
|
||||
backend: "faster-whisper"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-faster-whisper'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "faster-whisper"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
@@ -366,18 +282,6 @@ jobs:
|
||||
backend: "coqui"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-coqui'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "coqui"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
@@ -390,18 +294,6 @@ jobs:
|
||||
backend: "bark"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-bark'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "bark"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
@@ -414,18 +306,6 @@ jobs:
|
||||
backend: "chatterbox"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-chatterbox'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "chatterbox"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
# hipblas builds
|
||||
- build-type: 'hipblas'
|
||||
cuda-major-version: ""
|
||||
@@ -609,6 +489,18 @@ jobs:
|
||||
backend: "diffusers"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-l4t-kokoro'
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
||||
skip-drivers: 'true'
|
||||
backend: "kokoro"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
# SYCL additional backends
|
||||
- build-type: 'intel'
|
||||
cuda-major-version: ""
|
||||
@@ -745,18 +637,6 @@ jobs:
|
||||
backend: "stablediffusion-ggml"
|
||||
dockerfile: "./backend/Dockerfile.golang"
|
||||
context: "./"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13-stablediffusion-ggml'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "stablediffusion-ggml"
|
||||
dockerfile: "./backend/Dockerfile.golang"
|
||||
context: "./"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "11"
|
||||
cuda-minor-version: "7"
|
||||
@@ -842,18 +722,6 @@ jobs:
|
||||
backend: "whisper"
|
||||
dockerfile: "./backend/Dockerfile.golang"
|
||||
context: "./"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-whisper'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "whisper"
|
||||
dockerfile: "./backend/Dockerfile.golang"
|
||||
context: "./"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "11"
|
||||
cuda-minor-version: "7"
|
||||
@@ -990,18 +858,6 @@ jobs:
|
||||
backend: "rfdetr"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-rfdetr'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "rfdetr"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "11"
|
||||
cuda-minor-version: "7"
|
||||
@@ -1026,7 +882,7 @@ jobs:
|
||||
backend: "rfdetr"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
@@ -1063,18 +919,6 @@ jobs:
|
||||
backend: "exllama2"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-exllama2'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "exllama2"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "11"
|
||||
cuda-minor-version: "7"
|
||||
@@ -1111,6 +955,18 @@ jobs:
|
||||
backend: "exllama2"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
skip-drivers: 'true'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-l4t-arm64-chatterbox'
|
||||
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
backend: "chatterbox"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
# runs out of space on the runner
|
||||
# - build-type: 'hipblas'
|
||||
# cuda-major-version: ""
|
||||
@@ -1137,6 +993,55 @@ jobs:
|
||||
backend: "kitten-tts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
# neutts
|
||||
- build-type: ''
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64,linux/arm64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-cpu-neutts'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "neutts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-12-neutts'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
backend: "neutts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'hipblas'
|
||||
cuda-major-version: ""
|
||||
cuda-minor-version: ""
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-rocm-hipblas-neutts'
|
||||
runs-on: 'arc-runner-set'
|
||||
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
|
||||
skip-drivers: 'false'
|
||||
backend: "neutts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
- build-type: 'l4t'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/arm64'
|
||||
skip-drivers: 'true'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-nvidia-l4t-arm64-neutts'
|
||||
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
|
||||
runs-on: 'ubuntu-24.04-arm'
|
||||
backend: "neutts"
|
||||
dockerfile: "./backend/Dockerfile.python"
|
||||
context: "./backend"
|
||||
backend-jobs-darwin:
|
||||
uses: ./.github/workflows/backend_build_darwin.yml
|
||||
strategy:
|
||||
@@ -1204,7 +1109,7 @@ jobs:
|
||||
make protogen-go
|
||||
make backends/llama-cpp-darwin
|
||||
- name: Upload llama-cpp.tar
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: llama-cpp-tar
|
||||
path: backend-images/llama-cpp.tar
|
||||
@@ -1214,7 +1119,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download llama-cpp.tar
|
||||
uses: actions/download-artifact@v5
|
||||
uses: actions/download-artifact@v6
|
||||
with:
|
||||
name: llama-cpp-tar
|
||||
path: .
|
||||
@@ -1292,7 +1197,7 @@ jobs:
|
||||
export PLATFORMARCH=darwin/amd64
|
||||
make backends/llama-cpp-darwin
|
||||
- name: Upload llama-cpp.tar
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: llama-cpp-tar-x86
|
||||
path: backend-images/llama-cpp.tar
|
||||
@@ -1302,7 +1207,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download llama-cpp.tar
|
||||
uses: actions/download-artifact@v5
|
||||
uses: actions/download-artifact@v6
|
||||
with:
|
||||
name: llama-cpp-tar-x86
|
||||
path: .
|
||||
|
||||
4
.github/workflows/backend_build_darwin.yml
vendored
4
.github/workflows/backend_build_darwin.yml
vendored
@@ -74,7 +74,7 @@ jobs:
|
||||
BACKEND=${{ inputs.backend }} BUILD_TYPE=${{ inputs.build-type }} USE_PIP=${{ inputs.use-pip }} make build-darwin-${{ inputs.lang }}-backend
|
||||
|
||||
- name: Upload ${{ inputs.backend }}.tar
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: ${{ inputs.backend }}-tar
|
||||
path: backend-images/${{ inputs.backend }}.tar
|
||||
@@ -85,7 +85,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Download ${{ inputs.backend }}.tar
|
||||
uses: actions/download-artifact@v5
|
||||
uses: actions/download-artifact@v6
|
||||
with:
|
||||
name: ${{ inputs.backend }}-tar
|
||||
path: .
|
||||
|
||||
10
.github/workflows/build-test.yaml
vendored
10
.github/workflows/build-test.yaml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.23
|
||||
go-version: 1.25
|
||||
- name: Run GoReleaser
|
||||
run: |
|
||||
make dev-dist
|
||||
@@ -31,13 +31,13 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.23
|
||||
go-version: 1.25
|
||||
- name: Build launcher for macOS ARM64
|
||||
run: |
|
||||
make build-launcher-darwin
|
||||
ls -liah dist
|
||||
- name: Upload macOS launcher artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: launcher-macos
|
||||
path: dist/
|
||||
@@ -53,14 +53,14 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.23
|
||||
go-version: 1.25
|
||||
- name: Build launcher for Linux
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install golang gcc libgl1-mesa-dev xorg-dev libxkbcommon-dev
|
||||
make build-launcher-linux
|
||||
- name: Upload Linux launcher artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: launcher-linux
|
||||
path: local-ai-launcher-linux.tar.xz
|
||||
|
||||
4
.github/workflows/bump_deps.yaml
vendored
4
.github/workflows/bump_deps.yaml
vendored
@@ -1,10 +1,10 @@
|
||||
name: Bump dependencies
|
||||
name: Bump Backend dependencies
|
||||
on:
|
||||
schedule:
|
||||
- cron: 0 20 * * *
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
bump:
|
||||
bump-backends:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
|
||||
4
.github/workflows/bump_docs.yaml
vendored
4
.github/workflows/bump_docs.yaml
vendored
@@ -1,10 +1,10 @@
|
||||
name: Bump dependencies
|
||||
name: Bump Documentation
|
||||
on:
|
||||
schedule:
|
||||
- cron: 0 20 * * *
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
bump:
|
||||
bump-docs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
|
||||
4
.github/workflows/deploy-explorer.yaml
vendored
4
.github/workflows/deploy-explorer.yaml
vendored
@@ -33,7 +33,7 @@ jobs:
|
||||
run: |
|
||||
CGO_ENABLED=0 make build
|
||||
- name: rm
|
||||
uses: appleboy/ssh-action@v1.2.2
|
||||
uses: appleboy/ssh-action@v1.2.3
|
||||
with:
|
||||
host: ${{ secrets.EXPLORER_SSH_HOST }}
|
||||
username: ${{ secrets.EXPLORER_SSH_USERNAME }}
|
||||
@@ -53,7 +53,7 @@ jobs:
|
||||
rm: true
|
||||
target: ./local-ai
|
||||
- name: restarting
|
||||
uses: appleboy/ssh-action@v1.2.2
|
||||
uses: appleboy/ssh-action@v1.2.3
|
||||
with:
|
||||
host: ${{ secrets.EXPLORER_SSH_HOST }}
|
||||
username: ${{ secrets.EXPLORER_SSH_USERNAME }}
|
||||
|
||||
119
.github/workflows/gallery-agent.yaml
vendored
Normal file
119
.github/workflows/gallery-agent.yaml
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
name: Gallery Agent
|
||||
on:
|
||||
|
||||
schedule:
|
||||
- cron: '0 */3 * * *' # Run every 4 hours
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
search_term:
|
||||
description: 'Search term for models'
|
||||
required: false
|
||||
default: 'GGUF'
|
||||
type: string
|
||||
limit:
|
||||
description: 'Maximum number of models to process'
|
||||
required: false
|
||||
default: '15'
|
||||
type: string
|
||||
quantization:
|
||||
description: 'Preferred quantization format'
|
||||
required: false
|
||||
default: 'Q4_K_M'
|
||||
type: string
|
||||
max_models:
|
||||
description: 'Maximum number of models to add to the gallery'
|
||||
required: false
|
||||
default: '1'
|
||||
type: string
|
||||
jobs:
|
||||
gallery-agent:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.21'
|
||||
|
||||
|
||||
- name: Run gallery agent
|
||||
env:
|
||||
OPENAI_MODEL: ${{ secrets.OPENAI_MODEL }}
|
||||
OPENAI_KEY: ${{ secrets.OPENAI_KEY }}
|
||||
OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }}
|
||||
SEARCH_TERM: ${{ github.event.inputs.search_term || 'GGUF' }}
|
||||
LIMIT: ${{ github.event.inputs.limit || '15' }}
|
||||
QUANTIZATION: ${{ github.event.inputs.quantization || 'Q4_K_M' }}
|
||||
MAX_MODELS: ${{ github.event.inputs.max_models || '1' }}
|
||||
run: |
|
||||
export GALLERY_INDEX_PATH=$PWD/gallery/index.yaml
|
||||
go run .github/gallery-agent
|
||||
|
||||
- name: Check for changes
|
||||
id: check_changes
|
||||
run: |
|
||||
if git diff --quiet gallery/index.yaml; then
|
||||
echo "changes=false" >> $GITHUB_OUTPUT
|
||||
echo "No changes detected in gallery/index.yaml"
|
||||
else
|
||||
echo "changes=true" >> $GITHUB_OUTPUT
|
||||
echo "Changes detected in gallery/index.yaml"
|
||||
git diff gallery/index.yaml
|
||||
fi
|
||||
|
||||
- name: Read gallery agent summary
|
||||
id: read_summary
|
||||
if: steps.check_changes.outputs.changes == 'true'
|
||||
run: |
|
||||
if [ -f ".github/gallery-agent/gallery-agent-summary.json" ]; then
|
||||
echo "summary_exists=true" >> $GITHUB_OUTPUT
|
||||
# Extract summary data using jq
|
||||
echo "search_term=$(jq -r '.search_term' .github/gallery-agent/gallery-agent-summary.json)" >> $GITHUB_OUTPUT
|
||||
echo "total_found=$(jq -r '.total_found' .github/gallery-agent/gallery-agent-summary.json)" >> $GITHUB_OUTPUT
|
||||
echo "models_added=$(jq -r '.models_added' .github/gallery-agent/gallery-agent-summary.json)" >> $GITHUB_OUTPUT
|
||||
echo "quantization=$(jq -r '.quantization' .github/gallery-agent/gallery-agent-summary.json)" >> $GITHUB_OUTPUT
|
||||
echo "processing_time=$(jq -r '.processing_time' .github/gallery-agent/gallery-agent-summary.json)" >> $GITHUB_OUTPUT
|
||||
|
||||
# Create a formatted list of added models with URLs
|
||||
added_models=$(jq -r 'range(0; .added_model_ids | length) as $i | "- [\(.added_model_ids[$i])](\(.added_model_urls[$i]))"' .github/gallery-agent/gallery-agent-summary.json | tr '\n' '\n')
|
||||
echo "added_models<<EOF" >> $GITHUB_OUTPUT
|
||||
echo "$added_models" >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
rm -f .github/gallery-agent/gallery-agent-summary.json
|
||||
else
|
||||
echo "summary_exists=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Create Pull Request
|
||||
if: steps.check_changes.outputs.changes == 'true'
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
token: ${{ secrets.UPDATE_BOT_TOKEN }}
|
||||
push-to-fork: ci-forks/LocalAI
|
||||
commit-message: 'chore(model gallery): :robot: add new models via gallery agent'
|
||||
title: 'chore(model gallery): :robot: add ${{ steps.read_summary.outputs.models_added || 0 }} new models via gallery agent'
|
||||
# Branch has to be unique so PRs are not overriding each other
|
||||
branch-suffix: timestamp
|
||||
body: |
|
||||
This PR was automatically created by the gallery agent workflow.
|
||||
|
||||
**Summary:**
|
||||
- **Search Term:** ${{ steps.read_summary.outputs.search_term || github.event.inputs.search_term || 'GGUF' }}
|
||||
- **Models Found:** ${{ steps.read_summary.outputs.total_found || 'N/A' }}
|
||||
- **Models Added:** ${{ steps.read_summary.outputs.models_added || '0' }}
|
||||
- **Quantization:** ${{ steps.read_summary.outputs.quantization || github.event.inputs.quantization || 'Q4_K_M' }}
|
||||
- **Processing Time:** ${{ steps.read_summary.outputs.processing_time || 'N/A' }}
|
||||
|
||||
**Added Models:**
|
||||
${{ steps.read_summary.outputs.added_models || '- No models added' }}
|
||||
|
||||
**Workflow Details:**
|
||||
- Triggered by: `${{ github.event_name }}`
|
||||
- Run ID: `${{ github.run_id }}`
|
||||
- Commit: `${{ github.sha }}`
|
||||
signoff: true
|
||||
delete-branch: true
|
||||
9
.github/workflows/image-pr.yml
vendored
9
.github/workflows/image-pr.yml
vendored
@@ -34,15 +34,6 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'false'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
makeflags: "--jobs=3 --output-sync=target"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "12"
|
||||
cuda-minor-version: "0"
|
||||
|
||||
11
.github/workflows/image.yml
vendored
11
.github/workflows/image.yml
vendored
@@ -100,17 +100,6 @@ jobs:
|
||||
skip-drivers: 'false'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
aio: "-aio-gpu-nvidia-cuda-12"
|
||||
- build-type: 'cublas'
|
||||
cuda-major-version: "13"
|
||||
cuda-minor-version: "0"
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
tag-suffix: '-gpu-nvidia-cuda-13'
|
||||
runs-on: 'ubuntu-latest'
|
||||
base-image: "ubuntu:22.04"
|
||||
skip-drivers: 'false'
|
||||
makeflags: "--jobs=4 --output-sync=target"
|
||||
aio: "-aio-gpu-nvidia-cuda-13"
|
||||
- build-type: 'vulkan'
|
||||
platforms: 'linux/amd64'
|
||||
tag-latest: 'auto'
|
||||
|
||||
5
.github/workflows/localaibot_automerge.yml
vendored
5
.github/workflows/localaibot_automerge.yml
vendored
@@ -6,11 +6,12 @@ permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
packages: read
|
||||
|
||||
issues: write # for Homebrew/actions/post-comment
|
||||
actions: write # to dispatch publish workflow
|
||||
jobs:
|
||||
dependabot:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.actor == 'localai-bot' }}
|
||||
if: ${{ github.actor == 'localai-bot' && !contains(github.event.pull_request.title, 'chore(model gallery):') }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
|
||||
18
.github/workflows/notify-models.yaml
vendored
18
.github/workflows/notify-models.yaml
vendored
@@ -1,22 +1,27 @@
|
||||
name: Notifications for new models
|
||||
on:
|
||||
pull_request:
|
||||
pull_request_target:
|
||||
types:
|
||||
- closed
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
|
||||
jobs:
|
||||
notify-discord:
|
||||
if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'area/ai-model')) }}
|
||||
env:
|
||||
MODEL_NAME: gemma-3-12b-it
|
||||
MODEL_NAME: gemma-3-12b-it-qat
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0 # needed to checkout all branches for this Action to work
|
||||
ref: ${{ github.event.pull_request.head.sha }} # Checkout the PR head to get the actual changes
|
||||
- uses: mudler/localai-github-action@v1
|
||||
with:
|
||||
model: 'gemma-3-12b-it' # Any from models.localai.io, or from huggingface.com with: "huggingface://<repository>/file"
|
||||
model: 'gemma-3-12b-it-qat' # Any from models.localai.io, or from huggingface.com with: "huggingface://<repository>/file"
|
||||
# Check the PR diff using the current branch and the base branch of the PR
|
||||
- uses: GrantBirki/git-diff-action@v2.8.1
|
||||
id: git-diff-action
|
||||
@@ -79,7 +84,7 @@ jobs:
|
||||
args: ${{ steps.summarize.outputs.message }}
|
||||
- name: Setup tmate session if fails
|
||||
if: ${{ failure() }}
|
||||
uses: mxschmitt/action-tmate@v3.22
|
||||
uses: mxschmitt/action-tmate@v3.23
|
||||
with:
|
||||
detached: true
|
||||
connect-timeout-seconds: 180
|
||||
@@ -87,12 +92,13 @@ jobs:
|
||||
notify-twitter:
|
||||
if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'area/ai-model')) }}
|
||||
env:
|
||||
MODEL_NAME: gemma-3-12b-it
|
||||
MODEL_NAME: gemma-3-12b-it-qat
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0 # needed to checkout all branches for this Action to work
|
||||
ref: ${{ github.event.pull_request.head.sha }} # Checkout the PR head to get the actual changes
|
||||
- name: Start LocalAI
|
||||
run: |
|
||||
echo "Starting LocalAI..."
|
||||
@@ -161,7 +167,7 @@ jobs:
|
||||
TWITTER_ACCESS_TOKEN_SECRET: ${{ secrets.TWITTER_ACCESS_TOKEN_SECRET }}
|
||||
- name: Setup tmate session if fails
|
||||
if: ${{ failure() }}
|
||||
uses: mxschmitt/action-tmate@v3.22
|
||||
uses: mxschmitt/action-tmate@v3.23
|
||||
with:
|
||||
detached: true
|
||||
connect-timeout-seconds: 180
|
||||
|
||||
3
.github/workflows/notify-releases.yaml
vendored
3
.github/workflows/notify-releases.yaml
vendored
@@ -11,10 +11,11 @@ jobs:
|
||||
RELEASE_BODY: ${{ github.event.release.body }}
|
||||
RELEASE_TITLE: ${{ github.event.release.name }}
|
||||
RELEASE_TAG_NAME: ${{ github.event.release.tag_name }}
|
||||
MODEL_NAME: gemma-3-12b-it-qat
|
||||
steps:
|
||||
- uses: mudler/localai-github-action@v1
|
||||
with:
|
||||
model: 'gemma-3-12b-it' # Any from models.localai.io, or from huggingface.com with: "huggingface://<repository>/file"
|
||||
model: 'gemma-3-12b-it-qat' # Any from models.localai.io, or from huggingface.com with: "huggingface://<repository>/file"
|
||||
- name: Summarize
|
||||
id: summarize
|
||||
run: |
|
||||
|
||||
4
.github/workflows/secscan.yaml
vendored
4
.github/workflows/secscan.yaml
vendored
@@ -18,13 +18,13 @@ jobs:
|
||||
if: ${{ github.actor != 'dependabot[bot]' }}
|
||||
- name: Run Gosec Security Scanner
|
||||
if: ${{ github.actor != 'dependabot[bot]' }}
|
||||
uses: securego/gosec@v2.22.8
|
||||
uses: securego/gosec@v2.22.9
|
||||
with:
|
||||
# we let the report trigger content trigger a failure using the GitHub Security features.
|
||||
args: '-no-fail -fmt sarif -out results.sarif ./...'
|
||||
- name: Upload SARIF file
|
||||
if: ${{ github.actor != 'dependabot[bot]' }}
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
uses: github/codeql-action/upload-sarif@v4
|
||||
with:
|
||||
# Path to SARIF file relative to the root of the repository
|
||||
sarif_file: results.sarif
|
||||
|
||||
2
.github/workflows/stalebot.yml
vendored
2
.github/workflows/stalebot.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@3a9db7e6a41a89f618792c92c0e97cc736e1b13f # v9
|
||||
- uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v9
|
||||
with:
|
||||
stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
|
||||
stale-pr-message: 'This PR is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 10 days.'
|
||||
|
||||
10
.github/workflows/test.yml
vendored
10
.github/workflows/test.yml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: ['1.21.x']
|
||||
go-version: ['1.25.x']
|
||||
steps:
|
||||
- name: Free Disk Space (Ubuntu)
|
||||
uses: jlumbroso/free-disk-space@main
|
||||
@@ -124,7 +124,7 @@ jobs:
|
||||
PATH="$PATH:/root/go/bin" GO_TAGS="tts" make --jobs 5 --output-sync=target test
|
||||
- name: Setup tmate session if tests fail
|
||||
if: ${{ failure() }}
|
||||
uses: mxschmitt/action-tmate@v3.22
|
||||
uses: mxschmitt/action-tmate@v3.23
|
||||
with:
|
||||
detached: true
|
||||
connect-timeout-seconds: 180
|
||||
@@ -183,7 +183,7 @@ jobs:
|
||||
PATH="$PATH:$HOME/go/bin" make backends/local-store backends/silero-vad backends/llama-cpp backends/whisper backends/piper backends/stablediffusion-ggml docker-build-aio e2e-aio
|
||||
- name: Setup tmate session if tests fail
|
||||
if: ${{ failure() }}
|
||||
uses: mxschmitt/action-tmate@v3.22
|
||||
uses: mxschmitt/action-tmate@v3.23
|
||||
with:
|
||||
detached: true
|
||||
connect-timeout-seconds: 180
|
||||
@@ -193,7 +193,7 @@ jobs:
|
||||
runs-on: macOS-14
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: ['1.21.x']
|
||||
go-version: ['1.25.x']
|
||||
steps:
|
||||
- name: Clone
|
||||
uses: actions/checkout@v5
|
||||
@@ -226,7 +226,7 @@ jobs:
|
||||
PATH="$PATH:$HOME/go/bin" BUILD_TYPE="GITHUB_CI_HAS_BROKEN_METAL" CMAKE_ARGS="-DGGML_F16C=OFF -DGGML_AVX512=OFF -DGGML_AVX2=OFF -DGGML_FMA=OFF" make --jobs 4 --output-sync=target test
|
||||
- name: Setup tmate session if tests fail
|
||||
if: ${{ failure() }}
|
||||
uses: mxschmitt/action-tmate@v3.22
|
||||
uses: mxschmitt/action-tmate@v3.23
|
||||
with:
|
||||
detached: true
|
||||
connect-timeout-seconds: 180
|
||||
|
||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -1,6 +1,3 @@
|
||||
[submodule "docs/themes/hugo-theme-relearn"]
|
||||
path = docs/themes/hugo-theme-relearn
|
||||
url = https://github.com/McShelby/hugo-theme-relearn.git
|
||||
[submodule "docs/themes/lotusdocs"]
|
||||
path = docs/themes/lotusdocs
|
||||
url = https://github.com/colinwilson/lotusdocs
|
||||
|
||||
@@ -30,6 +30,7 @@ Thank you for your interest in contributing to LocalAI! We appreciate your time
|
||||
3. Install the required dependencies ( see https://localai.io/basics/build/#build-localai-locally )
|
||||
4. Build LocalAI: `make build`
|
||||
5. Run LocalAI: `./local-ai`
|
||||
6. To Build and live reload: `make build-dev`
|
||||
|
||||
## Contributing
|
||||
|
||||
@@ -76,7 +77,7 @@ LOCALAI_IMAGE_TAG=test LOCALAI_IMAGE=local-ai-aio make run-e2e-aio
|
||||
## Documentation
|
||||
|
||||
We are welcome the contribution of the documents, please open new PR or create a new issue. The documentation is available under `docs/` https://github.com/mudler/LocalAI/tree/master/docs
|
||||
|
||||
|
||||
## Community and Communication
|
||||
|
||||
- You can reach out via the Github issue tracker.
|
||||
|
||||
10
Dockerfile
10
Dockerfile
@@ -78,6 +78,16 @@ RUN <<EOT bash
|
||||
fi
|
||||
EOT
|
||||
|
||||
# https://github.com/NVIDIA/Isaac-GR00T/issues/343
|
||||
RUN <<EOT bash
|
||||
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${TARGETARCH}" = "arm64" ]; then
|
||||
wget https://developer.download.nvidia.com/compute/cudss/0.6.0/local_installers/cudss-local-tegra-repo-ubuntu2204-0.6.0_0.6.0-1_arm64.deb && \
|
||||
dpkg -i cudss-local-tegra-repo-ubuntu2204-0.6.0_0.6.0-1_arm64.deb && \
|
||||
cp /var/cudss-local-tegra-repo-ubuntu2204-0.6.0/cudss-*-keyring.gpg /usr/share/keyrings/ && \
|
||||
apt-get update && apt-get -y install cudss
|
||||
fi
|
||||
EOT
|
||||
|
||||
# If we are building with clblas support, we need the libraries for the builds
|
||||
RUN if [ "${BUILD_TYPE}" = "clblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
|
||||
apt-get update && \
|
||||
|
||||
16
Makefile
16
Makefile
@@ -103,6 +103,10 @@ build-launcher: ## Build the launcher application
|
||||
|
||||
build-all: build build-launcher ## Build both server and launcher
|
||||
|
||||
build-dev: ## Run LocalAI in dev mode with live reload
|
||||
@command -v air >/dev/null 2>&1 || go install github.com/air-verse/air@latest
|
||||
air -c .air.toml
|
||||
|
||||
dev-dist:
|
||||
$(GORELEASER) build --snapshot --clean
|
||||
|
||||
@@ -376,6 +380,9 @@ backends/llama-cpp-darwin: build
|
||||
bash ./scripts/build/llama-cpp-darwin.sh
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/llama-cpp.tar)"
|
||||
|
||||
backends/neutts: docker-build-neutts docker-save-neutts build
|
||||
./local-ai backends install "ocifile://$(abspath ./backend-images/neutts.tar)"
|
||||
|
||||
build-darwin-python-backend: build
|
||||
bash ./scripts/build/python-darwin.sh
|
||||
|
||||
@@ -429,6 +436,15 @@ docker-build-kitten-tts:
|
||||
docker-save-kitten-tts: backend-images
|
||||
docker save local-ai-backend:kitten-tts -o backend-images/kitten-tts.tar
|
||||
|
||||
docker-save-chatterbox: backend-images
|
||||
docker save local-ai-backend:chatterbox -o backend-images/chatterbox.tar
|
||||
|
||||
docker-build-neutts:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:neutts -f backend/Dockerfile.python --build-arg BACKEND=neutts ./backend
|
||||
|
||||
docker-save-neutts: backend-images
|
||||
docker save local-ai-backend:neutts -o backend-images/neutts.tar
|
||||
|
||||
docker-build-kokoro:
|
||||
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:kokoro -f backend/Dockerfile.python --build-arg BACKEND=kokoro ./backend
|
||||
|
||||
|
||||
26
README.md
26
README.md
@@ -43,7 +43,7 @@
|
||||
|
||||
> :bulb: Get help - [❓FAQ](https://localai.io/faq/) [💭Discussions](https://github.com/go-skynet/LocalAI/discussions) [:speech_balloon: Discord](https://discord.gg/uJAeKSAGDy) [:book: Documentation website](https://localai.io/)
|
||||
>
|
||||
> [💻 Quickstart](https://localai.io/basics/getting_started/) [🖼️ Models](https://models.localai.io/) [🚀 Roadmap](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) [🌍 Explorer](https://explorer.localai.io) [🛫 Examples](https://github.com/mudler/LocalAI-examples) Try on
|
||||
> [💻 Quickstart](https://localai.io/basics/getting_started/) [🖼️ Models](https://models.localai.io/) [🚀 Roadmap](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) [🛫 Examples](https://github.com/mudler/LocalAI-examples) Try on
|
||||
[](https://t.me/localaiofficial_bot)
|
||||
|
||||
[](https://github.com/go-skynet/LocalAI/actions/workflows/test.yml)[](https://github.com/go-skynet/LocalAI/actions/workflows/release.yaml)[](https://github.com/go-skynet/LocalAI/actions/workflows/image.yml)[](https://github.com/go-skynet/LocalAI/actions/workflows/bump_deps.yaml)[](https://artifacthub.io/packages/search?repo=localai)
|
||||
@@ -116,8 +116,17 @@ For more installation options, see [Installer Options](https://localai.io/docs/a
|
||||
<img src="https://img.shields.io/badge/Download-macOS-blue?style=for-the-badge&logo=apple&logoColor=white" alt="Download LocalAI for macOS"/>
|
||||
</a>
|
||||
|
||||
> Note: the DMGs are not signed by Apple as quarantined. See https://github.com/mudler/LocalAI/issues/6268 for a workaround, fix is tracked here: https://github.com/mudler/LocalAI/issues/6244
|
||||
|
||||
Or run with docker:
|
||||
|
||||
> **💡 Docker Run vs Docker Start**
|
||||
>
|
||||
> - `docker run` creates and starts a new container. If a container with the same name already exists, this command will fail.
|
||||
> - `docker start` starts an existing container that was previously created with `docker run`.
|
||||
>
|
||||
> If you've already run LocalAI before and want to start it again, use: `docker start -i local-ai`
|
||||
|
||||
### CPU only image:
|
||||
|
||||
```bash
|
||||
@@ -193,10 +202,12 @@ local-ai run oci://localai/phi-2:latest
|
||||
|
||||
> ⚡ **Automatic Backend Detection**: When you install models from the gallery or YAML files, LocalAI automatically detects your system's GPU capabilities (NVIDIA, AMD, Intel) and downloads the appropriate backend. For advanced configuration options, see [GPU Acceleration](https://localai.io/features/gpu-acceleration/#automatic-backend-detection).
|
||||
|
||||
For more information, see [💻 Getting started](https://localai.io/basics/getting_started/index.html)
|
||||
For more information, see [💻 Getting started](https://localai.io/basics/getting_started/index.html), if you are interested in our roadmap items and future enhancements, you can see the [Issues labeled as Roadmap here](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap)
|
||||
|
||||
## 📰 Latest project news
|
||||
|
||||
- October 2025: 🔌 [Model Context Protocol (MCP)](https://localai.io/docs/features/mcp/) support added for agentic capabilities with external tools
|
||||
- September 2025: New Launcher application for MacOS and Linux, extended support to many backends for Mac and Nvidia L4T devices. Models: Added MLX-Audio, WAN 2.2. WebUI improvements and Python-based backends now ships portable python environments.
|
||||
- August 2025: MLX, MLX-VLM, Diffusers and llama.cpp are now supported on Mac M1/M2/M3+ chips ( with `development` suffix in the gallery ): https://github.com/mudler/LocalAI/pull/6049 https://github.com/mudler/LocalAI/pull/6119 https://github.com/mudler/LocalAI/pull/6121 https://github.com/mudler/LocalAI/pull/6060
|
||||
- July/August 2025: 🔍 [Object Detection](https://localai.io/features/object-detection/) added to the API featuring [rf-detr](https://github.com/roboflow/rf-detr)
|
||||
- July 2025: All backends migrated outside of the main binary. LocalAI is now more lightweight, small, and automatically downloads the required backend to run the model. [Read the release notes](https://github.com/mudler/LocalAI/releases/tag/v3.2.0)
|
||||
@@ -235,7 +246,7 @@ Roadmap items: [List of issues](https://github.com/mudler/LocalAI/issues?q=is%3A
|
||||
- 🔍 [Object Detection](https://localai.io/features/object-detection/)
|
||||
- 📈 [Reranker API](https://localai.io/features/reranker/)
|
||||
- 🆕🖧 [P2P Inferencing](https://localai.io/features/distribute/)
|
||||
- [Agentic capabilities](https://github.com/mudler/LocalAGI)
|
||||
- 🆕🔌 [Model Context Protocol (MCP)](https://localai.io/docs/features/mcp/) - Agentic capabilities with external tools and [LocalAGI's Agentic capabilities](https://github.com/mudler/LocalAGI)
|
||||
- 🔊 Voice activity detection (Silero-VAD support)
|
||||
- 🌍 Integrated WebUI!
|
||||
|
||||
@@ -266,6 +277,7 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
|
||||
| **piper** | Fast neural TTS system | CPU |
|
||||
| **kitten-tts** | Kitten TTS models | CPU |
|
||||
| **silero-vad** | Voice Activity Detection | CPU |
|
||||
| **neutts** | Text-to-speech with voice cloning | CUDA 12, ROCm, CPU |
|
||||
|
||||
### Image & Video Generation
|
||||
| Backend | Description | Acceleration Support |
|
||||
@@ -287,7 +299,7 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
|
||||
|-------------------|-------------------|------------------|
|
||||
| **NVIDIA CUDA 11** | llama.cpp, whisper, stablediffusion, diffusers, rerankers, bark, chatterbox | Nvidia hardware |
|
||||
| **NVIDIA CUDA 12** | All CUDA-compatible backends | Nvidia hardware |
|
||||
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, bark | AMD Graphics |
|
||||
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, bark, neutts | AMD Graphics |
|
||||
| **Intel oneAPI** | llama.cpp, whisper, stablediffusion, vllm, transformers, diffusers, rfdetr, rerankers, exllama2, coqui, kokoro, bark | Intel Arc, Intel iGPUs |
|
||||
| **Apple Metal** | llama.cpp, whisper, diffusers, MLX, MLX-VLM, bark-cpp | Apple M1/M2/M3+ |
|
||||
| **Vulkan** | llama.cpp, whisper, stablediffusion | Cross-platform GPUs |
|
||||
@@ -304,6 +316,12 @@ WebUIs:
|
||||
- https://github.com/go-skynet/LocalAI-frontend
|
||||
- QA-Pilot(An interactive chat project that leverages LocalAI LLMs for rapid understanding and navigation of GitHub code repository) https://github.com/reid41/QA-Pilot
|
||||
|
||||
Agentic Libraries:
|
||||
- https://github.com/mudler/cogito
|
||||
|
||||
MCPs:
|
||||
- https://github.com/mudler/MCPs
|
||||
|
||||
Model galleries
|
||||
- https://github.com/go-skynet/model-gallery
|
||||
|
||||
|
||||
@@ -197,7 +197,7 @@ EOT
|
||||
|
||||
|
||||
# Copy libraries using a script to handle architecture differences
|
||||
RUN make -C /LocalAI/backend/cpp/llama-cpp package
|
||||
RUN make -BC /LocalAI/backend/cpp/llama-cpp package
|
||||
|
||||
|
||||
FROM scratch
|
||||
|
||||
@@ -28,7 +28,7 @@ RUN apt-get update && \
|
||||
curl python3-pip \
|
||||
python-is-python3 \
|
||||
python3-dev llvm \
|
||||
python3-venv make && \
|
||||
python3-venv make cmake && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/* && \
|
||||
pip install --upgrade pip
|
||||
|
||||
@@ -154,6 +154,10 @@ message PredictOptions {
|
||||
repeated string Videos = 45;
|
||||
repeated string Audios = 46;
|
||||
string CorrelationId = 47;
|
||||
string Tools = 48; // JSON array of available tools/functions for tool calling
|
||||
string ToolChoice = 49; // JSON string or object specifying tool choice behavior
|
||||
int32 Logprobs = 50; // Number of top logprobs to return (maps to OpenAI logprobs parameter)
|
||||
int32 TopLogprobs = 51; // Number of top logprobs to return per token (maps to OpenAI top_logprobs parameter)
|
||||
}
|
||||
|
||||
// The response message containing the result
|
||||
@@ -164,6 +168,7 @@ message Reply {
|
||||
double timing_prompt_processing = 4;
|
||||
double timing_token_generation = 5;
|
||||
bytes audio = 6;
|
||||
bytes logprobs = 7; // JSON-encoded logprobs data matching OpenAI format
|
||||
}
|
||||
|
||||
message GrammarTrigger {
|
||||
@@ -382,6 +387,11 @@ message StatusResponse {
|
||||
message Message {
|
||||
string role = 1;
|
||||
string content = 2;
|
||||
// Optional fields for OpenAI-compatible message format
|
||||
string name = 3; // Tool name (for tool messages)
|
||||
string tool_call_id = 4; // Tool call ID (for tool messages)
|
||||
string reasoning_content = 5; // Reasoning content (for thinking models)
|
||||
string tool_calls = 6; // Tool calls as JSON string (for assistant messages with tool calls)
|
||||
}
|
||||
|
||||
message DetectOptions {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
|
||||
LLAMA_VERSION?=8ff206097c2bf3ca1c7aa95f9d6db779fc7bdd68
|
||||
LLAMA_VERSION?=10e9780154365b191fb43ca4830659ef12def80f
|
||||
LLAMA_REPO?=https://github.com/ggerganov/llama.cpp
|
||||
|
||||
CMAKE_ARGS?=
|
||||
@@ -14,7 +14,7 @@ CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF -DLLAMA_CURL=OFF
|
||||
|
||||
CURRENT_MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
|
||||
ifeq ($(NATIVE),false)
|
||||
CMAKE_ARGS+=-DGGML_NATIVE=OFF
|
||||
CMAKE_ARGS+=-DGGML_NATIVE=OFF -DLLAMA_OPENSSL=OFF
|
||||
endif
|
||||
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
|
||||
ifeq ($(BUILD_TYPE),cublas)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -14,6 +14,8 @@ cp -r grpc-server.cpp llama.cpp/tools/grpc-server/
|
||||
cp -rfv llama.cpp/vendor/nlohmann/json.hpp llama.cpp/tools/grpc-server/
|
||||
cp -rfv llama.cpp/tools/server/utils.hpp llama.cpp/tools/grpc-server/
|
||||
cp -rfv llama.cpp/vendor/cpp-httplib/httplib.h llama.cpp/tools/grpc-server/
|
||||
cp -rfv llama.cpp/tools/server/server-http.cpp llama.cpp/tools/grpc-server/
|
||||
cp -rfv llama.cpp/tools/server/server-http.h llama.cpp/tools/grpc-server/
|
||||
|
||||
set +e
|
||||
if grep -q "grpc-server" llama.cpp/tools/CMakeLists.txt; then
|
||||
|
||||
@@ -8,7 +8,8 @@ JOBS?=$(shell nproc --ignore=1)
|
||||
|
||||
# whisper.cpp version
|
||||
WHISPER_REPO?=https://github.com/ggml-org/whisper.cpp
|
||||
WHISPER_CPP_VERSION?=edea8a9c3cf0eb7676dcdb604991eb2f95c3d984
|
||||
WHISPER_CPP_VERSION?=b12abefa9be2abae39a73fa903322af135024a36
|
||||
SO_TARGET?=libgowhisper.so
|
||||
|
||||
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF
|
||||
|
||||
@@ -57,15 +58,18 @@ sources/whisper.cpp:
|
||||
git checkout $(WHISPER_CPP_VERSION) && \
|
||||
git submodule update --init --recursive --depth 1 --single-branch
|
||||
|
||||
libgowhisper.so: sources/whisper.cpp CMakeLists.txt gowhisper.cpp gowhisper.h
|
||||
mkdir -p build && \
|
||||
cd build && \
|
||||
cmake .. $(CMAKE_ARGS) && \
|
||||
cmake --build . --config Release -j$(JOBS) && \
|
||||
cd .. && \
|
||||
mv build/libgowhisper.so ./
|
||||
# Detect OS
|
||||
UNAME_S := $(shell uname -s)
|
||||
|
||||
whisper: main.go gowhisper.go libgowhisper.so
|
||||
# Only build CPU variants on Linux
|
||||
ifeq ($(UNAME_S),Linux)
|
||||
VARIANT_TARGETS = libgowhisper-avx.so libgowhisper-avx2.so libgowhisper-avx512.so libgowhisper-fallback.so
|
||||
else
|
||||
# On non-Linux (e.g., Darwin), build only fallback variant
|
||||
VARIANT_TARGETS = libgowhisper-fallback.so
|
||||
endif
|
||||
|
||||
whisper: main.go gowhisper.go $(VARIANT_TARGETS)
|
||||
CGO_ENABLED=0 $(GOCMD) build -tags "$(GO_TAGS)" -o whisper ./
|
||||
|
||||
package: whisper
|
||||
@@ -73,5 +77,46 @@ package: whisper
|
||||
|
||||
build: package
|
||||
|
||||
clean:
|
||||
rm -rf libgowhisper.o build whisper
|
||||
clean: purge
|
||||
rm -rf libgowhisper*.so sources/whisper.cpp whisper
|
||||
|
||||
purge:
|
||||
rm -rf build*
|
||||
|
||||
# Build all variants (Linux only)
|
||||
ifeq ($(UNAME_S),Linux)
|
||||
libgowhisper-avx.so: sources/whisper.cpp
|
||||
$(MAKE) purge
|
||||
$(info ${GREEN}I whisper build info:avx${RESET})
|
||||
SO_TARGET=libgowhisper-avx.so CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE) libgowhisper-custom
|
||||
rm -rfv build*
|
||||
|
||||
libgowhisper-avx2.so: sources/whisper.cpp
|
||||
$(MAKE) purge
|
||||
$(info ${GREEN}I whisper build info:avx2${RESET})
|
||||
SO_TARGET=libgowhisper-avx2.so CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on" $(MAKE) libgowhisper-custom
|
||||
rm -rfv build*
|
||||
|
||||
libgowhisper-avx512.so: sources/whisper.cpp
|
||||
$(MAKE) purge
|
||||
$(info ${GREEN}I whisper build info:avx512${RESET})
|
||||
SO_TARGET=libgowhisper-avx512.so CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=on -DGGML_FMA=on -DGGML_F16C=on" $(MAKE) libgowhisper-custom
|
||||
rm -rfv build*
|
||||
endif
|
||||
|
||||
# Build fallback variant (all platforms)
|
||||
libgowhisper-fallback.so: sources/whisper.cpp
|
||||
$(MAKE) purge
|
||||
$(info ${GREEN}I whisper build info:fallback${RESET})
|
||||
SO_TARGET=libgowhisper-fallback.so CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE) libgowhisper-custom
|
||||
rm -rfv build*
|
||||
|
||||
libgowhisper-custom: CMakeLists.txt gowhisper.cpp gowhisper.h
|
||||
mkdir -p build-$(SO_TARGET) && \
|
||||
cd build-$(SO_TARGET) && \
|
||||
cmake .. $(CMAKE_ARGS) && \
|
||||
cmake --build . --config Release -j$(JOBS) && \
|
||||
cd .. && \
|
||||
mv build-$(SO_TARGET)/libgowhisper.so ./$(SO_TARGET)
|
||||
|
||||
all: whisper package
|
||||
|
||||
@@ -3,6 +3,7 @@ package main
|
||||
// Note: this is started internally by LocalAI and a server is allocated for each model
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
|
||||
"github.com/ebitengine/purego"
|
||||
grpc "github.com/mudler/LocalAI/pkg/grpc"
|
||||
@@ -18,7 +19,13 @@ type LibFuncs struct {
|
||||
}
|
||||
|
||||
func main() {
|
||||
gosd, err := purego.Dlopen("./libgowhisper.so", purego.RTLD_NOW|purego.RTLD_GLOBAL)
|
||||
// Get library name from environment variable, default to fallback
|
||||
libName := os.Getenv("WHISPER_LIBRARY")
|
||||
if libName == "" {
|
||||
libName = "./libgowhisper-fallback.so"
|
||||
}
|
||||
|
||||
gosd, err := purego.Dlopen(libName, purego.RTLD_NOW|purego.RTLD_GLOBAL)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -10,7 +10,8 @@ CURDIR=$(dirname "$(realpath $0)")
|
||||
# Create lib directory
|
||||
mkdir -p $CURDIR/package/lib
|
||||
|
||||
cp -avf $CURDIR/whisper $CURDIR/libgowhisper.so $CURDIR/package/
|
||||
cp -avf $CURDIR/whisper $CURDIR/package/
|
||||
cp -fv $CURDIR/libgowhisper-*.so $CURDIR/package/
|
||||
cp -fv $CURDIR/run.sh $CURDIR/package/
|
||||
|
||||
# Detect architecture and copy appropriate libraries
|
||||
|
||||
@@ -1,14 +1,52 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
# Get the absolute current dir where the script is located
|
||||
CURDIR=$(dirname "$(realpath $0)")
|
||||
|
||||
cd /
|
||||
|
||||
echo "CPU info:"
|
||||
if [ "$(uname)" != "Darwin" ]; then
|
||||
grep -e "model\sname" /proc/cpuinfo | head -1
|
||||
grep -e "flags" /proc/cpuinfo | head -1
|
||||
fi
|
||||
|
||||
LIBRARY="$CURDIR/libgowhisper-fallback.so"
|
||||
|
||||
if [ "$(uname)" != "Darwin" ]; then
|
||||
if grep -q -e "\savx\s" /proc/cpuinfo ; then
|
||||
echo "CPU: AVX found OK"
|
||||
if [ -e $CURDIR/libgowhisper-avx.so ]; then
|
||||
LIBRARY="$CURDIR/libgowhisper-avx.so"
|
||||
fi
|
||||
fi
|
||||
|
||||
if grep -q -e "\savx2\s" /proc/cpuinfo ; then
|
||||
echo "CPU: AVX2 found OK"
|
||||
if [ -e $CURDIR/libgowhisper-avx2.so ]; then
|
||||
LIBRARY="$CURDIR/libgowhisper-avx2.so"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check avx 512
|
||||
if grep -q -e "\savx512f\s" /proc/cpuinfo ; then
|
||||
echo "CPU: AVX512F found OK"
|
||||
if [ -e $CURDIR/libgowhisper-avx512.so ]; then
|
||||
LIBRARY="$CURDIR/libgowhisper-avx512.so"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
export LD_LIBRARY_PATH=$CURDIR/lib:$LD_LIBRARY_PATH
|
||||
export WHISPER_LIBRARY=$LIBRARY
|
||||
|
||||
# If there is a lib/ld.so, use it
|
||||
if [ -f $CURDIR/lib/ld.so ]; then
|
||||
echo "Using lib/ld.so"
|
||||
echo "Using library: $LIBRARY"
|
||||
exec $CURDIR/lib/ld.so $CURDIR/whisper "$@"
|
||||
fi
|
||||
|
||||
echo "Using library: $LIBRARY"
|
||||
exec $CURDIR/whisper "$@"
|
||||
@@ -270,6 +270,7 @@
|
||||
nvidia: "cuda12-kokoro"
|
||||
intel: "intel-kokoro"
|
||||
amd: "rocm-kokoro"
|
||||
nvidia-l4t: "nvidia-l4t-kokoro"
|
||||
- &coqui
|
||||
urls:
|
||||
- https://github.com/idiap/coqui-ai-TTS
|
||||
@@ -352,6 +353,7 @@
|
||||
nvidia: "cuda12-chatterbox"
|
||||
metal: "metal-chatterbox"
|
||||
default: "cpu-chatterbox"
|
||||
nvidia-l4t: "nvidia-l4t-arm64-chatterbox"
|
||||
- &piper
|
||||
name: "piper"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-piper"
|
||||
@@ -425,6 +427,68 @@
|
||||
- text-to-speech
|
||||
- TTS
|
||||
license: apache-2.0
|
||||
- &neutts
|
||||
name: "neutts"
|
||||
urls:
|
||||
- https://github.com/neuphonic/neutts-air
|
||||
description: |
|
||||
NeuTTS Air is the world’s first super-realistic, on-device, TTS speech language model with instant voice cloning. Built off a 0.5B LLM backbone, NeuTTS Air brings natural-sounding speech, real-time performance, built-in security and speaker cloning to your local device - unlocking a new category of embedded voice agents, assistants, toys, and compliance-safe apps.
|
||||
tags:
|
||||
- text-to-speech
|
||||
- TTS
|
||||
license: apache-2.0
|
||||
capabilities:
|
||||
default: "cpu-neutts"
|
||||
nvidia: "cuda12-neutts"
|
||||
amd: "rocm-neutts"
|
||||
nvidia-l4t: "nvidia-l4t-neutts"
|
||||
- !!merge <<: *neutts
|
||||
name: "neutts-development"
|
||||
capabilities:
|
||||
default: "cpu-neutts-development"
|
||||
nvidia: "cuda12-neutts-development"
|
||||
amd: "rocm-neutts-development"
|
||||
nvidia-l4t: "nvidia-l4t-neutts-development"
|
||||
- !!merge <<: *neutts
|
||||
name: "cpu-neutts"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-neutts"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-cpu-neutts
|
||||
- !!merge <<: *neutts
|
||||
name: "cuda12-neutts"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-neutts"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-cuda-12-neutts
|
||||
- !!merge <<: *neutts
|
||||
name: "rocm-neutts"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-neutts"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-rocm-hipblas-neutts
|
||||
- !!merge <<: *neutts
|
||||
name: "nvidia-l4t-neutts"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-arm64-neutts"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-nvidia-l4t-arm64-neutts
|
||||
- !!merge <<: *neutts
|
||||
name: "cpu-neutts-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-neutts"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-cpu-neutts
|
||||
- !!merge <<: *neutts
|
||||
name: "cuda12-neutts-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-neutts"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-cuda-12-neutts
|
||||
- !!merge <<: *neutts
|
||||
name: "rocm-neutts-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-neutts"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-rocm-hipblas-neutts
|
||||
- !!merge <<: *neutts
|
||||
name: "nvidia-l4t-neutts-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-arm64-neutts"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-nvidia-l4t-arm64-neutts
|
||||
- !!merge <<: *mlx
|
||||
name: "mlx-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-mlx"
|
||||
@@ -1049,6 +1113,7 @@
|
||||
nvidia: "cuda12-kokoro-development"
|
||||
intel: "intel-kokoro-development"
|
||||
amd: "rocm-kokoro-development"
|
||||
nvidia-l4t: "nvidia-l4t-kokoro-development"
|
||||
- !!merge <<: *kokoro
|
||||
name: "cuda11-kokoro-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-kokoro"
|
||||
@@ -1074,6 +1139,16 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-kokoro"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-intel-kokoro
|
||||
- !!merge <<: *kokoro
|
||||
name: "nvidia-l4t-kokoro"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-l4t-kokoro"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-l4t-kokoro
|
||||
- !!merge <<: *kokoro
|
||||
name: "nvidia-l4t-kokoro-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-l4t-kokoro"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-l4t-kokoro
|
||||
- !!merge <<: *kokoro
|
||||
name: "cuda11-kokoro"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-kokoro"
|
||||
@@ -1227,6 +1302,7 @@
|
||||
nvidia: "cuda12-chatterbox-development"
|
||||
metal: "metal-chatterbox-development"
|
||||
default: "cpu-chatterbox-development"
|
||||
nvidia-l4t: "nvidia-l4t-arm64-chatterbox"
|
||||
- !!merge <<: *chatterbox
|
||||
name: "cpu-chatterbox"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-chatterbox"
|
||||
@@ -1237,6 +1313,16 @@
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-chatterbox"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-cpu-chatterbox
|
||||
- !!merge <<: *chatterbox
|
||||
name: "nvidia-l4t-arm64-chatterbox"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-l4t-arm64-chatterbox"
|
||||
mirrors:
|
||||
- localai/localai-backends:latest-gpu-nvidia-l4t-arm64-chatterbox
|
||||
- !!merge <<: *chatterbox
|
||||
name: "nvidia-l4t-arm64-chatterbox-development"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-l4t-arm64-chatterbox"
|
||||
mirrors:
|
||||
- localai/localai-backends:master-gpu-nvidia-l4t-arm64-chatterbox
|
||||
- !!merge <<: *chatterbox
|
||||
name: "metal-chatterbox"
|
||||
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-chatterbox"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
bark==0.1.5
|
||||
grpcio==1.74.0
|
||||
grpcio==1.76.0
|
||||
protobuf
|
||||
certifi
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
This is an extra gRPC server of LocalAI for Bark TTS
|
||||
This is an extra gRPC server of LocalAI for Chatterbox TTS
|
||||
"""
|
||||
from concurrent import futures
|
||||
import time
|
||||
@@ -14,15 +14,98 @@ import backend_pb2_grpc
|
||||
import torch
|
||||
import torchaudio as ta
|
||||
from chatterbox.tts import ChatterboxTTS
|
||||
|
||||
from chatterbox.mtl_tts import ChatterboxMultilingualTTS
|
||||
import grpc
|
||||
import tempfile
|
||||
|
||||
def is_float(s):
|
||||
"""Check if a string can be converted to float."""
|
||||
try:
|
||||
float(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
def is_int(s):
|
||||
"""Check if a string can be converted to int."""
|
||||
try:
|
||||
int(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
def split_text_at_word_boundary(text, max_length=250):
|
||||
"""
|
||||
Split text at word boundaries without truncating words.
|
||||
Returns a list of text chunks.
|
||||
"""
|
||||
if not text or len(text) <= max_length:
|
||||
return [text]
|
||||
|
||||
chunks = []
|
||||
words = text.split()
|
||||
current_chunk = ""
|
||||
|
||||
for word in words:
|
||||
# Check if adding this word would exceed the limit
|
||||
if len(current_chunk) + len(word) + 1 <= max_length:
|
||||
if current_chunk:
|
||||
current_chunk += " " + word
|
||||
else:
|
||||
current_chunk = word
|
||||
else:
|
||||
# If current chunk is not empty, add it to chunks
|
||||
if current_chunk:
|
||||
chunks.append(current_chunk)
|
||||
current_chunk = word
|
||||
else:
|
||||
# If a single word is longer than max_length, we have to include it anyway
|
||||
chunks.append(word)
|
||||
current_chunk = ""
|
||||
|
||||
# Add the last chunk if it's not empty
|
||||
if current_chunk:
|
||||
chunks.append(current_chunk)
|
||||
|
||||
return chunks
|
||||
|
||||
def merge_audio_files(audio_files, output_path, sample_rate):
|
||||
"""
|
||||
Merge multiple audio files into a single audio file.
|
||||
"""
|
||||
if not audio_files:
|
||||
return
|
||||
|
||||
if len(audio_files) == 1:
|
||||
# If only one file, just copy it
|
||||
import shutil
|
||||
shutil.copy2(audio_files[0], output_path)
|
||||
return
|
||||
|
||||
# Load all audio files
|
||||
waveforms = []
|
||||
for audio_file in audio_files:
|
||||
waveform, sr = ta.load(audio_file)
|
||||
if sr != sample_rate:
|
||||
# Resample if necessary
|
||||
resampler = ta.transforms.Resample(sr, sample_rate)
|
||||
waveform = resampler(waveform)
|
||||
waveforms.append(waveform)
|
||||
|
||||
# Concatenate all waveforms
|
||||
merged_waveform = torch.cat(waveforms, dim=1)
|
||||
|
||||
# Save the merged audio
|
||||
ta.save(output_path, merged_waveform, sample_rate)
|
||||
|
||||
# Clean up temporary files
|
||||
for audio_file in audio_files:
|
||||
if os.path.exists(audio_file):
|
||||
os.remove(audio_file)
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
|
||||
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
|
||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
||||
COQUI_LANGUAGE = os.environ.get('COQUI_LANGUAGE', None)
|
||||
|
||||
# Implement the BackendServicer class with the service methods
|
||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
@@ -47,6 +130,28 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
if not torch.cuda.is_available() and request.CUDA:
|
||||
return backend_pb2.Result(success=False, message="CUDA is not available")
|
||||
|
||||
|
||||
options = request.Options
|
||||
|
||||
# empty dict
|
||||
self.options = {}
|
||||
|
||||
# The options are a list of strings in this form optname:optvalue
|
||||
# We are storing all the options in a dict so we can use it later when
|
||||
# generating the images
|
||||
for opt in options:
|
||||
if ":" not in opt:
|
||||
continue
|
||||
key, value = opt.split(":")
|
||||
# if value is a number, convert it to the appropriate type
|
||||
if is_float(value):
|
||||
value = float(value)
|
||||
elif is_int(value):
|
||||
value = int(value)
|
||||
elif value.lower() in ["true", "false"]:
|
||||
value = value.lower() == "true"
|
||||
self.options[key] = value
|
||||
|
||||
self.AudioPath = None
|
||||
|
||||
if os.path.isabs(request.AudioPath):
|
||||
@@ -56,10 +161,14 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
modelFileBase = os.path.dirname(request.ModelFile)
|
||||
# modify LoraAdapter to be relative to modelFileBase
|
||||
self.AudioPath = os.path.join(modelFileBase, request.AudioPath)
|
||||
|
||||
try:
|
||||
print("Preparing models, please wait", file=sys.stderr)
|
||||
self.model = ChatterboxTTS.from_pretrained(device=device)
|
||||
if "multilingual" in self.options:
|
||||
# remove key from options
|
||||
del self.options["multilingual"]
|
||||
self.model = ChatterboxMultilingualTTS.from_pretrained(device=device)
|
||||
else:
|
||||
self.model = ChatterboxTTS.from_pretrained(device=device)
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
# Implement your logic here for the LoadModel service
|
||||
@@ -68,14 +177,43 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
|
||||
def TTS(self, request, context):
|
||||
try:
|
||||
# Generate audio using ChatterboxTTS
|
||||
kwargs = {}
|
||||
|
||||
if "language" in self.options:
|
||||
kwargs["language_id"] = self.options["language"]
|
||||
if self.AudioPath is not None:
|
||||
wav = self.model.generate(request.text, audio_prompt_path=self.AudioPath)
|
||||
kwargs["audio_prompt_path"] = self.AudioPath
|
||||
|
||||
# add options to kwargs
|
||||
kwargs.update(self.options)
|
||||
|
||||
# Check if text exceeds 250 characters
|
||||
# (chatterbox does not support long text)
|
||||
# https://github.com/resemble-ai/chatterbox/issues/60
|
||||
# https://github.com/resemble-ai/chatterbox/issues/110
|
||||
if len(request.text) > 250:
|
||||
# Split text at word boundaries
|
||||
text_chunks = split_text_at_word_boundary(request.text, max_length=250)
|
||||
print(f"Splitting text into chunks of 250 characters: {len(text_chunks)}", file=sys.stderr)
|
||||
# Generate audio for each chunk
|
||||
temp_audio_files = []
|
||||
for i, chunk in enumerate(text_chunks):
|
||||
# Generate audio for this chunk
|
||||
wav = self.model.generate(chunk, **kwargs)
|
||||
|
||||
# Create temporary file for this chunk
|
||||
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.wav')
|
||||
temp_file.close()
|
||||
ta.save(temp_file.name, wav, self.model.sr)
|
||||
temp_audio_files.append(temp_file.name)
|
||||
|
||||
# Merge all audio files
|
||||
merge_audio_files(temp_audio_files, request.dst, self.model.sr)
|
||||
else:
|
||||
wav = self.model.generate(request.text)
|
||||
|
||||
# Save the generated audio
|
||||
ta.save(request.dst, wav, self.model.sr)
|
||||
# Generate audio using ChatterboxTTS for short text
|
||||
wav = self.model.generate(request.text, **kwargs)
|
||||
# Save the generated audio
|
||||
ta.save(request.dst, wav, self.model.sr)
|
||||
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
|
||||
@@ -15,5 +15,6 @@ fi
|
||||
if [ "x${BUILD_PROFILE}" == "xintel" ]; then
|
||||
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
|
||||
fi
|
||||
EXTRA_PIP_INSTALL_FLAGS+=" --no-build-isolation"
|
||||
|
||||
installRequirements
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
accelerate
|
||||
torch==2.6.0
|
||||
torchaudio==2.6.0
|
||||
transformers==4.46.3
|
||||
chatterbox-tts==0.1.2
|
||||
torch
|
||||
torchaudio
|
||||
numpy>=1.24.0,<1.26.0
|
||||
transformers
|
||||
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289
|
||||
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
|
||||
#chatterbox-tts==0.1.4
|
||||
@@ -2,5 +2,7 @@
|
||||
torch==2.6.0+cu118
|
||||
torchaudio==2.6.0+cu118
|
||||
transformers==4.46.3
|
||||
chatterbox-tts==0.1.2
|
||||
numpy>=1.24.0,<1.26.0
|
||||
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289
|
||||
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
|
||||
accelerate
|
||||
@@ -1,5 +1,7 @@
|
||||
torch==2.6.0
|
||||
torchaudio==2.6.0
|
||||
transformers==4.46.3
|
||||
chatterbox-tts==0.1.2
|
||||
torch
|
||||
torchaudio
|
||||
transformers
|
||||
numpy>=1.24.0,<1.26.0
|
||||
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289
|
||||
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
|
||||
accelerate
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.0
|
||||
torch==2.6.0+rocm6.1
|
||||
torchaudio==2.6.0+rocm6.1
|
||||
transformers==4.46.3
|
||||
chatterbox-tts==0.1.2
|
||||
transformers
|
||||
numpy>=1.24.0,<1.26.0
|
||||
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289
|
||||
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
|
||||
accelerate
|
||||
|
||||
@@ -2,8 +2,10 @@
|
||||
intel-extension-for-pytorch==2.3.110+xpu
|
||||
torch==2.3.1+cxx11.abi
|
||||
torchaudio==2.3.1+cxx11.abi
|
||||
transformers==4.46.3
|
||||
chatterbox-tts==0.1.2
|
||||
transformers
|
||||
numpy>=1.24.0,<1.26.0
|
||||
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289
|
||||
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
|
||||
accelerate
|
||||
oneccl_bind_pt==2.3.100+xpu
|
||||
optimum[openvino]
|
||||
|
||||
7
backend/python/chatterbox/requirements-l4t.txt
Normal file
7
backend/python/chatterbox/requirements-l4t.txt
Normal file
@@ -0,0 +1,7 @@
|
||||
--extra-index-url https://pypi.jetson-ai-lab.io/jp6/cu126/
|
||||
torch
|
||||
torchaudio
|
||||
transformers
|
||||
numpy>=1.24.0,<1.26.0
|
||||
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
|
||||
accelerate
|
||||
@@ -2,4 +2,5 @@ grpcio==1.71.0
|
||||
protobuf
|
||||
certifi
|
||||
packaging
|
||||
setuptools
|
||||
setuptools
|
||||
poetry
|
||||
@@ -1,3 +1,3 @@
|
||||
grpcio==1.74.0
|
||||
grpcio==1.76.0
|
||||
protobuf
|
||||
grpcio-tools
|
||||
@@ -1,4 +1,4 @@
|
||||
grpcio==1.74.0
|
||||
grpcio==1.76.0
|
||||
protobuf
|
||||
certifi
|
||||
packaging==24.1
|
||||
@@ -66,11 +66,20 @@ from diffusers.schedulers import (
|
||||
)
|
||||
|
||||
def is_float(s):
|
||||
"""Check if a string can be converted to float."""
|
||||
try:
|
||||
float(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
def is_int(s):
|
||||
"""Check if a string can be converted to int."""
|
||||
try:
|
||||
int(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
# The scheduler list mapping was taken from here: https://github.com/neggles/animatediff-cli/blob/6f336f5f4b5e38e85d7f06f1744ef42d0a45f2a7/src/animatediff/schedulers.py#L39
|
||||
# Credits to https://github.com/neggles
|
||||
@@ -177,10 +186,11 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
key, value = opt.split(":")
|
||||
# if value is a number, convert it to the appropriate type
|
||||
if is_float(value):
|
||||
if value.is_integer():
|
||||
value = int(value)
|
||||
else:
|
||||
value = float(value)
|
||||
value = float(value)
|
||||
elif is_int(value):
|
||||
value = int(value)
|
||||
elif value.lower() in ["true", "false"]:
|
||||
value = value.lower() == "true"
|
||||
self.options[key] = value
|
||||
|
||||
# From options, extract if present "torch_dtype" and set it to the appropriate type
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
setuptools
|
||||
grpcio==1.74.0
|
||||
grpcio==1.76.0
|
||||
pillow
|
||||
protobuf
|
||||
certifi
|
||||
|
||||
@@ -31,7 +31,7 @@ class TestBackendServicer(unittest.TestCase):
|
||||
"""
|
||||
This method tests if the server starts up successfully
|
||||
"""
|
||||
time.sleep(10)
|
||||
time.sleep(20)
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
@@ -48,7 +48,7 @@ class TestBackendServicer(unittest.TestCase):
|
||||
"""
|
||||
This method tests if the model is loaded successfully
|
||||
"""
|
||||
time.sleep(10)
|
||||
time.sleep(20)
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
@@ -66,7 +66,7 @@ class TestBackendServicer(unittest.TestCase):
|
||||
"""
|
||||
This method tests if the backend can generate images
|
||||
"""
|
||||
time.sleep(10)
|
||||
time.sleep(20)
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
grpcio==1.74.0
|
||||
grpcio==1.76.0
|
||||
protobuf
|
||||
certifi
|
||||
wheel
|
||||
|
||||
@@ -64,15 +64,15 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
# Generate audio using Kokoro pipeline
|
||||
generator = self.pipeline(request.text, voice=voice)
|
||||
|
||||
# Get the first (and typically only) audio segment
|
||||
speechs = []
|
||||
# Get all the audio segment
|
||||
for i, (gs, ps, audio) in enumerate(generator):
|
||||
# Save audio to the destination file
|
||||
sf.write(request.dst, audio, 24000)
|
||||
speechs.append(audio)
|
||||
print(f"Generated audio segment {i}: gs={gs}, ps={ps}", file=sys.stderr)
|
||||
# For now, we only process the first segment
|
||||
# If you need to handle multiple segments, you might want to modify this
|
||||
break
|
||||
|
||||
# Merges the audio segments and writes them to the destination
|
||||
speech = torch.cat(speechs, dim=0)
|
||||
sf.write(request.dst, speech, 24000)
|
||||
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
|
||||
|
||||
7
backend/python/kokoro/requirements-l4t.txt
Normal file
7
backend/python/kokoro/requirements-l4t.txt
Normal file
@@ -0,0 +1,7 @@
|
||||
--extra-index-url https://pypi.jetson-ai-lab.io/jp6/cu126/
|
||||
torch
|
||||
torchaudio
|
||||
transformers
|
||||
accelerate
|
||||
kokoro
|
||||
soundfile
|
||||
@@ -20,6 +20,21 @@ import soundfile as sf
|
||||
import numpy as np
|
||||
import uuid
|
||||
|
||||
def is_float(s):
|
||||
"""Check if a string can be converted to float."""
|
||||
try:
|
||||
float(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
def is_int(s):
|
||||
"""Check if a string can be converted to int."""
|
||||
try:
|
||||
int(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
|
||||
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
|
||||
@@ -32,14 +47,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
This backend provides TTS (Text-to-Speech) functionality using MLX-Audio.
|
||||
"""
|
||||
|
||||
def _is_float(self, s):
|
||||
"""Check if a string can be converted to float."""
|
||||
try:
|
||||
float(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
def Health(self, request, context):
|
||||
"""
|
||||
Returns a health check message.
|
||||
@@ -80,11 +87,10 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
key, value = opt.split(":", 1) # Split only on first colon to handle values with colons
|
||||
|
||||
# Convert numeric values to appropriate types
|
||||
if self._is_float(value):
|
||||
if float(value).is_integer():
|
||||
value = int(value)
|
||||
else:
|
||||
value = float(value)
|
||||
if is_float(value):
|
||||
value = float(value)
|
||||
elif is_int(value):
|
||||
value = int(value)
|
||||
elif value.lower() in ["true", "false"]:
|
||||
value = value.lower() == "true"
|
||||
|
||||
|
||||
@@ -21,6 +21,21 @@ import io
|
||||
from PIL import Image
|
||||
import tempfile
|
||||
|
||||
def is_float(s):
|
||||
"""Check if a string can be converted to float."""
|
||||
try:
|
||||
float(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
def is_int(s):
|
||||
"""Check if a string can be converted to int."""
|
||||
try:
|
||||
int(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
|
||||
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
|
||||
@@ -32,14 +47,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
A gRPC servicer that implements the Backend service defined in backend.proto.
|
||||
"""
|
||||
|
||||
def _is_float(self, s):
|
||||
"""Check if a string can be converted to float."""
|
||||
try:
|
||||
float(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
def Health(self, request, context):
|
||||
"""
|
||||
Returns a health check message.
|
||||
@@ -79,12 +86,10 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
continue
|
||||
key, value = opt.split(":", 1) # Split only on first colon to handle values with colons
|
||||
|
||||
# Convert numeric values to appropriate types
|
||||
if self._is_float(value):
|
||||
if float(value).is_integer():
|
||||
value = int(value)
|
||||
else:
|
||||
value = float(value)
|
||||
if is_float(value):
|
||||
value = float(value)
|
||||
elif is_int(value):
|
||||
value = int(value)
|
||||
elif value.lower() in ["true", "false"]:
|
||||
value = value.lower() == "true"
|
||||
|
||||
|
||||
@@ -24,20 +24,27 @@ _ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
|
||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
||||
|
||||
def is_float(s):
|
||||
"""Check if a string can be converted to float."""
|
||||
try:
|
||||
float(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
def is_int(s):
|
||||
"""Check if a string can be converted to int."""
|
||||
try:
|
||||
int(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
# Implement the BackendServicer class with the service methods
|
||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
"""
|
||||
A gRPC servicer that implements the Backend service defined in backend.proto.
|
||||
"""
|
||||
|
||||
def _is_float(self, s):
|
||||
"""Check if a string can be converted to float."""
|
||||
try:
|
||||
float(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
def Health(self, request, context):
|
||||
"""
|
||||
Returns a health check message.
|
||||
@@ -78,11 +85,10 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
key, value = opt.split(":", 1) # Split only on first colon to handle values with colons
|
||||
|
||||
# Convert numeric values to appropriate types
|
||||
if self._is_float(value):
|
||||
if float(value).is_integer():
|
||||
value = int(value)
|
||||
else:
|
||||
value = float(value)
|
||||
if is_float(value):
|
||||
value = float(value)
|
||||
elif is_int(value):
|
||||
value = int(value)
|
||||
elif value.lower() in ["true", "false"]:
|
||||
value = value.lower() == "true"
|
||||
|
||||
|
||||
23
backend/python/neutts/Makefile
Normal file
23
backend/python/neutts/Makefile
Normal file
@@ -0,0 +1,23 @@
|
||||
.PHONY: neutts
|
||||
neutts:
|
||||
bash install.sh
|
||||
|
||||
.PHONY: run
|
||||
run: neutts
|
||||
@echo "Running neutts..."
|
||||
bash run.sh
|
||||
@echo "neutts run."
|
||||
|
||||
.PHONY: test
|
||||
test: neutts
|
||||
@echo "Testing neutts..."
|
||||
bash test.sh
|
||||
@echo "neutts tested."
|
||||
|
||||
.PHONY: protogen-clean
|
||||
protogen-clean:
|
||||
$(RM) backend_pb2_grpc.py backend_pb2.py
|
||||
|
||||
.PHONY: clean
|
||||
clean: protogen-clean
|
||||
rm -rf venv __pycache__
|
||||
162
backend/python/neutts/backend.py
Normal file
162
backend/python/neutts/backend.py
Normal file
@@ -0,0 +1,162 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
This is an extra gRPC server of LocalAI for NeuTTSAir
|
||||
"""
|
||||
from concurrent import futures
|
||||
import time
|
||||
import argparse
|
||||
import signal
|
||||
import sys
|
||||
import os
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
import torch
|
||||
from neuttsair.neutts import NeuTTSAir
|
||||
import soundfile as sf
|
||||
|
||||
import grpc
|
||||
|
||||
def is_float(s):
|
||||
"""Check if a string can be converted to float."""
|
||||
try:
|
||||
float(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
def is_int(s):
|
||||
"""Check if a string can be converted to int."""
|
||||
try:
|
||||
int(s)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
|
||||
|
||||
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
|
||||
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
|
||||
|
||||
# Implement the BackendServicer class with the service methods
|
||||
class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
"""
|
||||
BackendServicer is the class that implements the gRPC service
|
||||
"""
|
||||
def Health(self, request, context):
|
||||
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
|
||||
def LoadModel(self, request, context):
|
||||
|
||||
# Get device
|
||||
# device = "cuda" if request.CUDA else "cpu"
|
||||
if torch.cuda.is_available():
|
||||
print("CUDA is available", file=sys.stderr)
|
||||
device = "cuda"
|
||||
else:
|
||||
print("CUDA is not available", file=sys.stderr)
|
||||
device = "cpu"
|
||||
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
||||
if mps_available:
|
||||
device = "mps"
|
||||
if not torch.cuda.is_available() and request.CUDA:
|
||||
return backend_pb2.Result(success=False, message="CUDA is not available")
|
||||
|
||||
|
||||
options = request.Options
|
||||
|
||||
# empty dict
|
||||
self.options = {}
|
||||
self.ref_text = None
|
||||
|
||||
# The options are a list of strings in this form optname:optvalue
|
||||
# We are storing all the options in a dict so we can use it later when
|
||||
# generating the images
|
||||
for opt in options:
|
||||
if ":" not in opt:
|
||||
continue
|
||||
key, value = opt.split(":")
|
||||
# if value is a number, convert it to the appropriate type
|
||||
if is_float(value):
|
||||
value = float(value)
|
||||
elif is_int(value):
|
||||
value = int(value)
|
||||
elif value.lower() in ["true", "false"]:
|
||||
value = value.lower() == "true"
|
||||
self.options[key] = value
|
||||
|
||||
codec_repo = "neuphonic/neucodec"
|
||||
if "codec_repo" in self.options:
|
||||
codec_repo = self.options["codec_repo"]
|
||||
del self.options["codec_repo"]
|
||||
if "ref_text" in self.options:
|
||||
self.ref_text = self.options["ref_text"]
|
||||
del self.options["ref_text"]
|
||||
|
||||
self.AudioPath = None
|
||||
|
||||
if os.path.isabs(request.AudioPath):
|
||||
self.AudioPath = request.AudioPath
|
||||
elif request.AudioPath and request.ModelFile != "" and not os.path.isabs(request.AudioPath):
|
||||
# get base path of modelFile
|
||||
modelFileBase = os.path.dirname(request.ModelFile)
|
||||
# modify LoraAdapter to be relative to modelFileBase
|
||||
self.AudioPath = os.path.join(modelFileBase, request.AudioPath)
|
||||
try:
|
||||
print("Preparing models, please wait", file=sys.stderr)
|
||||
self.model = NeuTTSAir(backbone_repo=request.Model, backbone_device=device, codec_repo=codec_repo, codec_device=device)
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
# Implement your logic here for the LoadModel service
|
||||
# Replace this with your desired response
|
||||
return backend_pb2.Result(message="Model loaded successfully", success=True)
|
||||
|
||||
def TTS(self, request, context):
|
||||
try:
|
||||
kwargs = {}
|
||||
|
||||
# add options to kwargs
|
||||
kwargs.update(self.options)
|
||||
|
||||
ref_codes = self.model.encode_reference(self.AudioPath)
|
||||
|
||||
wav = self.model.infer(request.text, ref_codes, self.ref_text)
|
||||
|
||||
sf.write(request.dst, wav, 24000)
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
return backend_pb2.Result(success=True)
|
||||
|
||||
def serve(address):
|
||||
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
|
||||
options=[
|
||||
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
|
||||
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
|
||||
])
|
||||
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
|
||||
server.add_insecure_port(address)
|
||||
server.start()
|
||||
print("Server started. Listening on: " + address, file=sys.stderr)
|
||||
|
||||
# Define the signal handler function
|
||||
def signal_handler(sig, frame):
|
||||
print("Received termination signal. Shutting down...")
|
||||
server.stop(0)
|
||||
sys.exit(0)
|
||||
|
||||
# Set the signal handlers for SIGINT and SIGTERM
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
try:
|
||||
while True:
|
||||
time.sleep(_ONE_DAY_IN_SECONDS)
|
||||
except KeyboardInterrupt:
|
||||
server.stop(0)
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Run the gRPC server.")
|
||||
parser.add_argument(
|
||||
"--addr", default="localhost:50051", help="The address to bind the server to."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
serve(args.addr)
|
||||
33
backend/python/neutts/install.sh
Executable file
33
backend/python/neutts/install.sh
Executable file
@@ -0,0 +1,33 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
# This is here because the Intel pip index is broken and returns 200 status codes for every package name, it just doesn't return any package links.
|
||||
# This makes uv think that the package exists in the Intel pip index, and by default it stops looking at other pip indexes once it finds a match.
|
||||
# We need uv to continue falling through to the pypi default index to find optimum[openvino] in the pypi index
|
||||
# the --upgrade actually allows us to *downgrade* torch to the version provided in the Intel pip index
|
||||
if [ "x${BUILD_PROFILE}" == "xintel" ]; then
|
||||
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
|
||||
fi
|
||||
|
||||
if [ "x${BUILD_TYPE}" == "xcublas" ] || [ "x${BUILD_TYPE}" == "xl4t" ]; then
|
||||
export CMAKE_ARGS="-DGGML_CUDA=on"
|
||||
fi
|
||||
|
||||
if [ "x${BUILD_TYPE}" == "xhipblas" ]; then
|
||||
export CMAKE_ARGS="-DGGML_HIPBLAS=on"
|
||||
fi
|
||||
|
||||
EXTRA_PIP_INSTALL_FLAGS+=" --no-build-isolation"
|
||||
|
||||
git clone https://github.com/neuphonic/neutts-air neutts-air
|
||||
|
||||
cp -rfv neutts-air/neuttsair ./
|
||||
|
||||
installRequirements
|
||||
2
backend/python/neutts/requirements-after.txt
Normal file
2
backend/python/neutts/requirements-after.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
datasets==4.1.1
|
||||
torchtune==0.6.1
|
||||
10
backend/python/neutts/requirements-cpu.txt
Normal file
10
backend/python/neutts/requirements-cpu.txt
Normal file
@@ -0,0 +1,10 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||
accelerate
|
||||
torch==2.8.0
|
||||
transformers==4.56.1
|
||||
librosa==0.11.0
|
||||
neucodec>=0.0.4
|
||||
phonemizer==3.3.0
|
||||
soundfile==0.13.1
|
||||
resemble-perth==1.0.1
|
||||
llama-cpp-python
|
||||
8
backend/python/neutts/requirements-cublas12.txt
Normal file
8
backend/python/neutts/requirements-cublas12.txt
Normal file
@@ -0,0 +1,8 @@
|
||||
librosa==0.11.0
|
||||
neucodec>=0.0.4
|
||||
phonemizer==3.3.0
|
||||
soundfile==0.13.1
|
||||
torch==2.8.0
|
||||
transformers==4.56.1
|
||||
resemble-perth==1.0.1
|
||||
accelerate
|
||||
10
backend/python/neutts/requirements-hipblas.txt
Normal file
10
backend/python/neutts/requirements-hipblas.txt
Normal file
@@ -0,0 +1,10 @@
|
||||
--extra-index-url https://download.pytorch.org/whl/rocm6.3
|
||||
torch==2.8.0+rocm6.3
|
||||
transformers==4.56.1
|
||||
accelerate
|
||||
librosa==0.11.0
|
||||
neucodec>=0.0.4
|
||||
phonemizer==3.3.0
|
||||
soundfile==0.13.1
|
||||
resemble-perth==1.0.1
|
||||
llama-cpp-python
|
||||
10
backend/python/neutts/requirements-l4t.txt
Normal file
10
backend/python/neutts/requirements-l4t.txt
Normal file
@@ -0,0 +1,10 @@
|
||||
--extra-index-url https://pypi.jetson-ai-lab.io/jp6/cu126/
|
||||
torch
|
||||
transformers
|
||||
accelerate
|
||||
librosa==0.11.0
|
||||
neucodec>=0.0.4
|
||||
phonemizer==3.3.0
|
||||
soundfile==0.13.1
|
||||
resemble-perth==1.0.1
|
||||
llama-cpp-python
|
||||
7
backend/python/neutts/requirements.txt
Normal file
7
backend/python/neutts/requirements.txt
Normal file
@@ -0,0 +1,7 @@
|
||||
grpcio==1.71.0
|
||||
protobuf
|
||||
certifi
|
||||
packaging
|
||||
setuptools
|
||||
numpy==2.2.6
|
||||
scikit_build_core
|
||||
10
backend/python/neutts/run.sh
Executable file
10
backend/python/neutts/run.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
|
||||
startBackend $@
|
||||
82
backend/python/neutts/test.py
Normal file
82
backend/python/neutts/test.py
Normal file
@@ -0,0 +1,82 @@
|
||||
"""
|
||||
A test script to test the gRPC service
|
||||
"""
|
||||
import unittest
|
||||
import subprocess
|
||||
import time
|
||||
import backend_pb2
|
||||
import backend_pb2_grpc
|
||||
|
||||
import grpc
|
||||
|
||||
|
||||
class TestBackendServicer(unittest.TestCase):
|
||||
"""
|
||||
TestBackendServicer is the class that tests the gRPC service
|
||||
"""
|
||||
def setUp(self):
|
||||
"""
|
||||
This method sets up the gRPC service by starting the server
|
||||
"""
|
||||
self.service = subprocess.Popen(["python3", "backend.py", "--addr", "localhost:50051"])
|
||||
time.sleep(30)
|
||||
|
||||
def tearDown(self) -> None:
|
||||
"""
|
||||
This method tears down the gRPC service by terminating the server
|
||||
"""
|
||||
self.service.terminate()
|
||||
self.service.wait()
|
||||
|
||||
def test_server_startup(self):
|
||||
"""
|
||||
This method tests if the server starts up successfully
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.Health(backend_pb2.HealthMessage())
|
||||
self.assertEqual(response.message, b'OK')
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("Server failed to start")
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
def test_load_model(self):
|
||||
"""
|
||||
This method tests if the model is loaded successfully
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions())
|
||||
print(response)
|
||||
self.assertTrue(response.success)
|
||||
self.assertEqual(response.message, "Model loaded successfully")
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("LoadModel service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
def test_tts(self):
|
||||
"""
|
||||
This method tests if the embeddings are generated successfully
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions())
|
||||
self.assertTrue(response.success)
|
||||
tts_request = backend_pb2.TTSRequest(text="80s TV news production music hit for tonight's biggest story")
|
||||
tts_response = stub.TTS(tts_request)
|
||||
self.assertIsNotNone(tts_response)
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("TTS service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
11
backend/python/neutts/test.sh
Executable file
11
backend/python/neutts/test.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
backend_dir=$(dirname $0)
|
||||
if [ -d $backend_dir/common ]; then
|
||||
source $backend_dir/common/libbackend.sh
|
||||
else
|
||||
source $backend_dir/../common/libbackend.sh
|
||||
fi
|
||||
|
||||
runUnittests
|
||||
@@ -61,7 +61,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
if request.PipelineType != "": # Reuse the PipelineType field for language
|
||||
kwargs['lang'] = request.PipelineType
|
||||
self.model_name = model_name
|
||||
self.model = Reranker(model_name, **kwargs)
|
||||
self.model = Reranker(model_name, **kwargs)
|
||||
except Exception as err:
|
||||
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
|
||||
|
||||
@@ -75,12 +75,13 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
|
||||
documents.append(doc)
|
||||
ranked_results=self.model.rank(query=request.query, docs=documents, doc_ids=list(range(len(request.documents))))
|
||||
# Prepare results to return
|
||||
cropped_results = ranked_results.top_k(request.top_n) if request.top_n > 0 else ranked_results
|
||||
results = [
|
||||
backend_pb2.DocumentResult(
|
||||
index=res.doc_id,
|
||||
text=res.text,
|
||||
relevance_score=res.score
|
||||
) for res in ranked_results.results
|
||||
) for res in (cropped_results)
|
||||
]
|
||||
|
||||
# Calculate the usage and total tokens
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
grpcio==1.74.0
|
||||
grpcio==1.76.0
|
||||
protobuf
|
||||
certifi
|
||||
@@ -76,7 +76,7 @@ class TestBackendServicer(unittest.TestCase):
|
||||
)
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="cross-encoder"))
|
||||
self.assertTrue(response.success)
|
||||
|
||||
|
||||
rerank_response = stub.Rerank(request)
|
||||
print(rerank_response.results[0])
|
||||
self.assertIsNotNone(rerank_response.results)
|
||||
@@ -87,4 +87,60 @@ class TestBackendServicer(unittest.TestCase):
|
||||
print(err)
|
||||
self.fail("Reranker service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
self.tearDown()
|
||||
|
||||
def test_rerank_omit_top_n(self):
|
||||
"""
|
||||
This method tests if the embeddings are generated successfully even top_n is omitted
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
request = backend_pb2.RerankRequest(
|
||||
query="I love you",
|
||||
documents=["I hate you", "I really like you"],
|
||||
top_n=0 #
|
||||
)
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="cross-encoder"))
|
||||
self.assertTrue(response.success)
|
||||
|
||||
rerank_response = stub.Rerank(request)
|
||||
print(rerank_response.results[0])
|
||||
self.assertIsNotNone(rerank_response.results)
|
||||
self.assertEqual(len(rerank_response.results), 2)
|
||||
self.assertEqual(rerank_response.results[0].text, "I really like you")
|
||||
self.assertEqual(rerank_response.results[1].text, "I hate you")
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("Reranker service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
def test_rerank_crop(self):
|
||||
"""
|
||||
This method tests top_n cropping
|
||||
"""
|
||||
try:
|
||||
self.setUp()
|
||||
with grpc.insecure_channel("localhost:50051") as channel:
|
||||
stub = backend_pb2_grpc.BackendStub(channel)
|
||||
request = backend_pb2.RerankRequest(
|
||||
query="I love you",
|
||||
documents=["I hate you", "I really like you", "I hate ignoring top_n"],
|
||||
top_n=2
|
||||
)
|
||||
response = stub.LoadModel(backend_pb2.ModelOptions(Model="cross-encoder"))
|
||||
self.assertTrue(response.success)
|
||||
|
||||
rerank_response = stub.Rerank(request)
|
||||
print(rerank_response.results[0])
|
||||
self.assertIsNotNone(rerank_response.results)
|
||||
self.assertEqual(len(rerank_response.results), 2)
|
||||
self.assertEqual(rerank_response.results[0].text, "I really like you")
|
||||
self.assertEqual(rerank_response.results[1].text, "I hate you")
|
||||
except Exception as err:
|
||||
print(err)
|
||||
self.fail("Reranker service failed")
|
||||
finally:
|
||||
self.tearDown()
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
grpcio==1.74.0
|
||||
grpcio==1.76.0
|
||||
protobuf==6.32.0
|
||||
certifi
|
||||
setuptools
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
grpcio==1.74.0
|
||||
grpcio==1.76.0
|
||||
protobuf
|
||||
certifi
|
||||
setuptools
|
||||
@@ -2,14 +2,12 @@ package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"fyne.io/fyne/v2"
|
||||
"fyne.io/fyne/v2/app"
|
||||
"fyne.io/fyne/v2/driver/desktop"
|
||||
coreLauncher "github.com/mudler/LocalAI/cmd/launcher/internal"
|
||||
"github.com/mudler/LocalAI/pkg/signals"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -42,7 +40,12 @@ func main() {
|
||||
}
|
||||
|
||||
// Setup signal handling for graceful shutdown
|
||||
setupSignalHandling(launcher)
|
||||
signals.RegisterGracefulTerminationHandler(func() {
|
||||
// Perform cleanup
|
||||
if err := launcher.Shutdown(); err != nil {
|
||||
log.Printf("Error during shutdown: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Initialize the launcher state
|
||||
go func() {
|
||||
@@ -67,26 +70,3 @@ func main() {
|
||||
// Run the application in background (window only shown when "Settings" is clicked)
|
||||
myApp.Run()
|
||||
}
|
||||
|
||||
// setupSignalHandling sets up signal handlers for graceful shutdown
|
||||
func setupSignalHandling(launcher *coreLauncher.Launcher) {
|
||||
// Create a channel to receive OS signals
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
|
||||
// Register for interrupt and terminate signals
|
||||
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
// Handle signals in a separate goroutine
|
||||
go func() {
|
||||
sig := <-sigChan
|
||||
log.Printf("Received signal %v, shutting down gracefully...", sig)
|
||||
|
||||
// Perform cleanup
|
||||
if err := launcher.Shutdown(); err != nil {
|
||||
log.Printf("Error during shutdown: %v", err)
|
||||
}
|
||||
|
||||
// Exit the application
|
||||
os.Exit(0)
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -22,9 +22,15 @@ func New(opts ...config.AppOption) (*Application, error) {
|
||||
|
||||
log.Info().Msgf("Starting LocalAI using %d threads, with models path: %s", options.Threads, options.SystemState.Model.ModelsPath)
|
||||
log.Info().Msgf("LocalAI version: %s", internal.PrintableVersion())
|
||||
|
||||
if err := application.start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
caps, err := xsysinfo.CPUCapabilities()
|
||||
if err == nil {
|
||||
log.Debug().Msgf("CPU capabilities: %v", caps)
|
||||
|
||||
}
|
||||
gpus, err := xsysinfo.GPUs()
|
||||
if err == nil {
|
||||
@@ -56,12 +62,12 @@ func New(opts ...config.AppOption) (*Application, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if err := coreStartup.InstallModels(options.Galleries, options.BackendGalleries, options.SystemState, application.ModelLoader(), options.EnforcePredownloadScans, options.AutoloadBackendGalleries, nil, options.ModelsURL...); err != nil {
|
||||
if err := coreStartup.InstallModels(options.Context, application.GalleryService(), options.Galleries, options.BackendGalleries, options.SystemState, application.ModelLoader(), options.EnforcePredownloadScans, options.AutoloadBackendGalleries, nil, options.ModelsURL...); err != nil {
|
||||
log.Error().Err(err).Msg("error installing models")
|
||||
}
|
||||
|
||||
for _, backend := range options.ExternalBackends {
|
||||
if err := coreStartup.InstallExternalBackends(options.BackendGalleries, options.SystemState, application.ModelLoader(), nil, backend, "", ""); err != nil {
|
||||
if err := coreStartup.InstallExternalBackends(options.Context, options.BackendGalleries, options.SystemState, application.ModelLoader(), nil, backend, "", ""); err != nil {
|
||||
log.Error().Err(err).Msg("error installing external backend")
|
||||
}
|
||||
}
|
||||
@@ -152,10 +158,6 @@ func New(opts ...config.AppOption) (*Application, error) {
|
||||
// Watch the configuration directory
|
||||
startWatcher(options)
|
||||
|
||||
if err := application.start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Info().Msg("core/startup process completed!")
|
||||
return application, nil
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package backend
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strings"
|
||||
@@ -26,6 +25,7 @@ type LLMResponse struct {
|
||||
Response string // should this be []byte?
|
||||
Usage TokenUsage
|
||||
AudioOutput string
|
||||
Logprobs *schema.Logprobs // Logprobs from the backend response
|
||||
}
|
||||
|
||||
type TokenUsage struct {
|
||||
@@ -35,7 +35,7 @@ type TokenUsage struct {
|
||||
TimingTokenGeneration float64
|
||||
}
|
||||
|
||||
func ModelInference(ctx context.Context, s string, messages []schema.Message, images, videos, audios []string, loader *model.ModelLoader, c *config.ModelConfig, cl *config.ModelConfigLoader, o *config.ApplicationConfig, tokenCallback func(string, TokenUsage) bool) (func() (LLMResponse, error), error) {
|
||||
func ModelInference(ctx context.Context, s string, messages schema.Messages, images, videos, audios []string, loader *model.ModelLoader, c *config.ModelConfig, cl *config.ModelConfigLoader, o *config.ApplicationConfig, tokenCallback func(string, TokenUsage) bool, tools string, toolChoice string, logprobs *int, topLogprobs *int, logitBias map[string]float64) (func() (LLMResponse, error), error) {
|
||||
modelFile := c.Model
|
||||
|
||||
// Check if the modelFile exists, if it doesn't try to load it from the gallery
|
||||
@@ -47,7 +47,7 @@ func ModelInference(ctx context.Context, s string, messages []schema.Message, im
|
||||
if !slices.Contains(modelNames, c.Name) {
|
||||
utils.ResetDownloadTimers()
|
||||
// if we failed to load the model, we try to download it
|
||||
err := gallery.InstallModelFromGallery(o.Galleries, o.BackendGalleries, o.SystemState, loader, c.Name, gallery.GalleryModel{}, utils.DisplayDownloadFunction, o.EnforcePredownloadScans, o.AutoloadBackendGalleries)
|
||||
err := gallery.InstallModelFromGallery(ctx, o.Galleries, o.BackendGalleries, o.SystemState, loader, c.Name, gallery.GalleryModel{}, utils.DisplayDownloadFunction, o.EnforcePredownloadScans, o.AutoloadBackendGalleries)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msgf("failed to install model %q from gallery", modelFile)
|
||||
//return nil, err
|
||||
@@ -65,29 +65,8 @@ func ModelInference(ctx context.Context, s string, messages []schema.Message, im
|
||||
var protoMessages []*proto.Message
|
||||
// if we are using the tokenizer template, we need to convert the messages to proto messages
|
||||
// unless the prompt has already been tokenized (non-chat endpoints + functions)
|
||||
if c.TemplateConfig.UseTokenizerTemplate && s == "" {
|
||||
protoMessages = make([]*proto.Message, len(messages), len(messages))
|
||||
for i, message := range messages {
|
||||
protoMessages[i] = &proto.Message{
|
||||
Role: message.Role,
|
||||
}
|
||||
switch ct := message.Content.(type) {
|
||||
case string:
|
||||
protoMessages[i].Content = ct
|
||||
case []interface{}:
|
||||
// If using the tokenizer template, in case of multimodal we want to keep the multimodal content as and return only strings here
|
||||
data, _ := json.Marshal(ct)
|
||||
resultData := []struct {
|
||||
Text string `json:"text"`
|
||||
}{}
|
||||
json.Unmarshal(data, &resultData)
|
||||
for _, r := range resultData {
|
||||
protoMessages[i].Content += r.Text
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported type for schema.Message.Content for inference: %T", ct)
|
||||
}
|
||||
}
|
||||
if c.TemplateConfig.UseTokenizerTemplate && len(messages) > 0 {
|
||||
protoMessages = messages.ToProto()
|
||||
}
|
||||
|
||||
// in GRPC, the backend is supposed to answer to 1 single token if stream is not supported
|
||||
@@ -99,6 +78,21 @@ func ModelInference(ctx context.Context, s string, messages []schema.Message, im
|
||||
opts.Images = images
|
||||
opts.Videos = videos
|
||||
opts.Audios = audios
|
||||
opts.Tools = tools
|
||||
opts.ToolChoice = toolChoice
|
||||
if logprobs != nil {
|
||||
opts.Logprobs = int32(*logprobs)
|
||||
}
|
||||
if topLogprobs != nil {
|
||||
opts.TopLogprobs = int32(*topLogprobs)
|
||||
}
|
||||
if len(logitBias) > 0 {
|
||||
// Serialize logit_bias map to JSON string for proto
|
||||
logitBiasJSON, err := json.Marshal(logitBias)
|
||||
if err == nil {
|
||||
opts.LogitBias = string(logitBiasJSON)
|
||||
}
|
||||
}
|
||||
|
||||
tokenUsage := TokenUsage{}
|
||||
|
||||
@@ -130,6 +124,7 @@ func ModelInference(ctx context.Context, s string, messages []schema.Message, im
|
||||
}
|
||||
|
||||
ss := ""
|
||||
var logprobs *schema.Logprobs
|
||||
|
||||
var partialRune []byte
|
||||
err := inferenceModel.PredictStream(ctx, opts, func(reply *proto.Reply) {
|
||||
@@ -141,6 +136,14 @@ func ModelInference(ctx context.Context, s string, messages []schema.Message, im
|
||||
tokenUsage.TimingTokenGeneration = reply.TimingTokenGeneration
|
||||
tokenUsage.TimingPromptProcessing = reply.TimingPromptProcessing
|
||||
|
||||
// Parse logprobs from reply if present (collect from last chunk that has them)
|
||||
if len(reply.Logprobs) > 0 {
|
||||
var parsedLogprobs schema.Logprobs
|
||||
if err := json.Unmarshal(reply.Logprobs, &parsedLogprobs); err == nil {
|
||||
logprobs = &parsedLogprobs
|
||||
}
|
||||
}
|
||||
|
||||
// Process complete runes and accumulate them
|
||||
var completeRunes []byte
|
||||
for len(partialRune) > 0 {
|
||||
@@ -166,6 +169,7 @@ func ModelInference(ctx context.Context, s string, messages []schema.Message, im
|
||||
return LLMResponse{
|
||||
Response: ss,
|
||||
Usage: tokenUsage,
|
||||
Logprobs: logprobs,
|
||||
}, err
|
||||
} else {
|
||||
// TODO: Is the chicken bit the only way to get here? is that acceptable?
|
||||
@@ -188,9 +192,19 @@ func ModelInference(ctx context.Context, s string, messages []schema.Message, im
|
||||
response = c.TemplateConfig.ReplyPrefix + response
|
||||
}
|
||||
|
||||
// Parse logprobs from reply if present
|
||||
var logprobs *schema.Logprobs
|
||||
if len(reply.Logprobs) > 0 {
|
||||
var parsedLogprobs schema.Logprobs
|
||||
if err := json.Unmarshal(reply.Logprobs, &parsedLogprobs); err == nil {
|
||||
logprobs = &parsedLogprobs
|
||||
}
|
||||
}
|
||||
|
||||
return LLMResponse{
|
||||
Response: response,
|
||||
Usage: tokenUsage,
|
||||
Logprobs: logprobs,
|
||||
}, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -129,7 +129,6 @@ func grpcModelOpts(c config.ModelConfig) *pb.ModelOptions {
|
||||
triggers = append(triggers, &pb.GrammarTrigger{
|
||||
Word: t.Word,
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
return &pb.ModelOptions{
|
||||
@@ -213,7 +212,7 @@ func gRPCPredictOpts(c config.ModelConfig, modelPath string) *pb.PredictOptions
|
||||
}
|
||||
}
|
||||
|
||||
return &pb.PredictOptions{
|
||||
pbOpts := &pb.PredictOptions{
|
||||
Temperature: float32(*c.Temperature),
|
||||
TopP: float32(*c.TopP),
|
||||
NDraft: c.NDraft,
|
||||
@@ -250,4 +249,6 @@ func gRPCPredictOpts(c config.ModelConfig, modelPath string) *pb.PredictOptions
|
||||
TailFreeSamplingZ: float32(*c.TFZ),
|
||||
TypicalP: float32(*c.TypicalP),
|
||||
}
|
||||
// Logprobs and TopLogprobs are set by the caller if provided
|
||||
return pbOpts
|
||||
}
|
||||
|
||||
@@ -60,7 +60,7 @@ func SoundGeneration(
|
||||
|
||||
// return RPC error if any
|
||||
if !res.Success {
|
||||
return "", nil, fmt.Errorf(res.Message)
|
||||
return "", nil, fmt.Errorf("error during sound generation: %s", res.Message)
|
||||
}
|
||||
|
||||
return filePath, res, err
|
||||
|
||||
@@ -70,7 +70,7 @@ func ModelTTS(
|
||||
|
||||
// return RPC error if any
|
||||
if !res.Success {
|
||||
return "", nil, fmt.Errorf(res.Message)
|
||||
return "", nil, fmt.Errorf("error during TTS: %s", res.Message)
|
||||
}
|
||||
|
||||
return filePath, res, err
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
@@ -102,7 +103,7 @@ func (bi *BackendsInstall) Run(ctx *cliContext.Context) error {
|
||||
}
|
||||
|
||||
modelLoader := model.NewModelLoader(systemState, true)
|
||||
err = startup.InstallExternalBackends(galleries, systemState, modelLoader, progressCallback, bi.BackendArgs, bi.Name, bi.Alias)
|
||||
err = startup.InstallExternalBackends(context.Background(), galleries, systemState, modelLoader, progressCallback, bi.BackendArgs, bi.Name, bi.Alias)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -5,9 +5,10 @@ import (
|
||||
"time"
|
||||
|
||||
cliContext "github.com/mudler/LocalAI/core/cli/context"
|
||||
"github.com/mudler/LocalAI/core/cli/signals"
|
||||
"github.com/mudler/LocalAI/core/explorer"
|
||||
"github.com/mudler/LocalAI/core/http"
|
||||
"github.com/mudler/LocalAI/pkg/signals"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
type ExplorerCMD struct {
|
||||
@@ -46,7 +47,13 @@ func (e *ExplorerCMD) Run(ctx *cliContext.Context) error {
|
||||
|
||||
appHTTP := http.Explorer(db)
|
||||
|
||||
signals.Handler(nil)
|
||||
signals.RegisterGracefulTerminationHandler(func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
if err := appHTTP.Shutdown(ctx); err != nil {
|
||||
log.Error().Err(err).Msg("error during shutdown")
|
||||
}
|
||||
})
|
||||
|
||||
return appHTTP.Listen(e.Address)
|
||||
return appHTTP.Start(e.Address)
|
||||
}
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
|
||||
cliContext "github.com/mudler/LocalAI/core/cli/context"
|
||||
"github.com/mudler/LocalAI/core/cli/signals"
|
||||
"github.com/mudler/LocalAI/core/p2p"
|
||||
"github.com/mudler/LocalAI/pkg/signals"
|
||||
)
|
||||
|
||||
type FederatedCLI struct {
|
||||
@@ -20,7 +20,11 @@ func (f *FederatedCLI) Run(ctx *cliContext.Context) error {
|
||||
|
||||
fs := p2p.NewFederatedServer(f.Address, p2p.NetworkID(f.Peer2PeerNetworkID, p2p.FederatedID), f.Peer2PeerToken, !f.RandomWorker, f.TargetWorker)
|
||||
|
||||
signals.Handler(nil)
|
||||
c, cancel := context.WithCancel(context.Background())
|
||||
|
||||
return fs.Start(context.Background())
|
||||
signals.RegisterGracefulTerminationHandler(func() {
|
||||
cancel()
|
||||
})
|
||||
|
||||
return fs.Start(c)
|
||||
}
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
cliContext "github.com/mudler/LocalAI/core/cli/context"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/services"
|
||||
|
||||
"github.com/mudler/LocalAI/core/gallery"
|
||||
"github.com/mudler/LocalAI/core/startup"
|
||||
@@ -78,6 +80,12 @@ func (mi *ModelsInstall) Run(ctx *cliContext.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
galleryService := services.NewGalleryService(&config.ApplicationConfig{}, model.NewModelLoader(systemState, true))
|
||||
err = galleryService.Start(context.Background(), config.NewModelConfigLoader(mi.ModelsPath), systemState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var galleries []config.Gallery
|
||||
if err := json.Unmarshal([]byte(mi.Galleries), &galleries); err != nil {
|
||||
log.Error().Err(err).Msg("unable to load galleries")
|
||||
@@ -127,7 +135,7 @@ func (mi *ModelsInstall) Run(ctx *cliContext.Context) error {
|
||||
}
|
||||
|
||||
modelLoader := model.NewModelLoader(systemState, true)
|
||||
err = startup.InstallModels(galleries, backendGalleries, systemState, modelLoader, !mi.DisablePredownloadScan, mi.AutoloadBackendGalleries, progressCallback, modelName)
|
||||
err = startup.InstallModels(context.Background(), galleryService, galleries, backendGalleries, systemState, modelLoader, !mi.DisablePredownloadScan, mi.AutoloadBackendGalleries, progressCallback, modelName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -10,11 +10,11 @@ import (
|
||||
"github.com/mudler/LocalAI/core/application"
|
||||
cli_api "github.com/mudler/LocalAI/core/cli/api"
|
||||
cliContext "github.com/mudler/LocalAI/core/cli/context"
|
||||
"github.com/mudler/LocalAI/core/cli/signals"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/http"
|
||||
"github.com/mudler/LocalAI/core/p2p"
|
||||
"github.com/mudler/LocalAI/internal"
|
||||
"github.com/mudler/LocalAI/pkg/signals"
|
||||
"github.com/mudler/LocalAI/pkg/system"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
@@ -127,6 +127,7 @@ func (r *RunCMD) Run(ctx *cliContext.Context) error {
|
||||
config.WithP2PNetworkID(r.Peer2PeerNetworkID),
|
||||
config.WithLoadToMemory(r.LoadToMemory),
|
||||
config.WithMachineTag(r.MachineTag),
|
||||
config.WithAPIAddress(r.Address),
|
||||
}
|
||||
|
||||
if r.DisableMetricsEndpoint {
|
||||
@@ -225,8 +226,11 @@ func (r *RunCMD) Run(ctx *cliContext.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Catch signals from the OS requesting us to exit, and stop all backends
|
||||
signals.Handler(app.ModelLoader())
|
||||
signals.RegisterGracefulTerminationHandler(func() {
|
||||
if err := app.ModelLoader().StopAllGRPC(); err != nil {
|
||||
log.Error().Err(err).Msg("error while stopping all grpc backends")
|
||||
}
|
||||
})
|
||||
|
||||
return appHTTP.Listen(r.Address)
|
||||
return appHTTP.Start(r.Address)
|
||||
}
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
package signals
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/mudler/LocalAI/pkg/model"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func Handler(m *model.ModelLoader) {
|
||||
// Catch signals from the OS requesting us to exit, and stop all backends
|
||||
go func(m *model.ModelLoader) {
|
||||
c := make(chan os.Signal, 1) // we need to reserve to buffer size 1, so the notifier are not blocked
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGINT)
|
||||
<-c
|
||||
if m != nil {
|
||||
if err := m.StopAllGRPC(); err != nil {
|
||||
log.Error().Err(err).Msg("error while stopping all grpc backends")
|
||||
}
|
||||
}
|
||||
os.Exit(1)
|
||||
}(m)
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -11,7 +12,6 @@ import (
|
||||
|
||||
cliContext "github.com/mudler/LocalAI/core/cli/context"
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/core/cli/signals"
|
||||
"github.com/mudler/LocalAI/core/gallery"
|
||||
"github.com/mudler/LocalAI/pkg/model"
|
||||
"github.com/mudler/LocalAI/pkg/system"
|
||||
@@ -43,7 +43,7 @@ func findLLamaCPPBackend(galleries string, systemState *system.SystemState) (str
|
||||
log.Error().Err(err).Msg("failed loading galleries")
|
||||
return "", err
|
||||
}
|
||||
err := gallery.InstallBackendFromGallery(gals, systemState, ml, llamaCPPGalleryName, nil, true)
|
||||
err := gallery.InstallBackendFromGallery(context.Background(), gals, systemState, ml, llamaCPPGalleryName, nil, true)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("llama-cpp backend not found, failed to install it")
|
||||
return "", err
|
||||
@@ -85,8 +85,6 @@ func (r *LLamaCPP) Run(ctx *cliContext.Context) error {
|
||||
|
||||
args = append([]string{grpcProcess}, args...)
|
||||
|
||||
signals.Handler(nil)
|
||||
|
||||
return syscall.Exec(
|
||||
grpcProcess,
|
||||
args,
|
||||
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
"time"
|
||||
|
||||
cliContext "github.com/mudler/LocalAI/core/cli/context"
|
||||
"github.com/mudler/LocalAI/core/cli/signals"
|
||||
"github.com/mudler/LocalAI/core/p2p"
|
||||
"github.com/mudler/LocalAI/pkg/signals"
|
||||
"github.com/mudler/LocalAI/pkg/system"
|
||||
"github.com/phayes/freeport"
|
||||
"github.com/rs/zerolog/log"
|
||||
@@ -48,6 +48,9 @@ func (r *P2P) Run(ctx *cliContext.Context) error {
|
||||
|
||||
address := "127.0.0.1"
|
||||
|
||||
c, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
if r.NoRunner {
|
||||
// Let override which port and address to bind if the user
|
||||
// configure the llama-cpp service on its own
|
||||
@@ -59,7 +62,7 @@ func (r *P2P) Run(ctx *cliContext.Context) error {
|
||||
p = r.RunnerPort
|
||||
}
|
||||
|
||||
_, err = p2p.ExposeService(context.Background(), address, p, r.Token, p2p.NetworkID(r.Peer2PeerNetworkID, p2p.WorkerID))
|
||||
_, err = p2p.ExposeService(c, address, p, r.Token, p2p.NetworkID(r.Peer2PeerNetworkID, p2p.WorkerID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -101,13 +104,15 @@ func (r *P2P) Run(ctx *cliContext.Context) error {
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = p2p.ExposeService(context.Background(), address, fmt.Sprint(port), r.Token, p2p.NetworkID(r.Peer2PeerNetworkID, p2p.WorkerID))
|
||||
_, err = p2p.ExposeService(c, address, fmt.Sprint(port), r.Token, p2p.NetworkID(r.Peer2PeerNetworkID, p2p.WorkerID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
signals.Handler(nil)
|
||||
signals.RegisterGracefulTerminationHandler(func() {
|
||||
cancel()
|
||||
})
|
||||
|
||||
for {
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
@@ -63,6 +63,8 @@ type ApplicationConfig struct {
|
||||
WatchDogBusyTimeout, WatchDogIdleTimeout time.Duration
|
||||
|
||||
MachineTag string
|
||||
|
||||
APIAddress string
|
||||
}
|
||||
|
||||
type AppOption func(*ApplicationConfig)
|
||||
@@ -343,6 +345,12 @@ func WithDisableApiKeyRequirementForHttpGet(required bool) AppOption {
|
||||
}
|
||||
}
|
||||
|
||||
func WithAPIAddress(address string) AppOption {
|
||||
return func(o *ApplicationConfig) {
|
||||
o.APIAddress = address
|
||||
}
|
||||
}
|
||||
|
||||
var DisableMetricsEndpoint AppOption = func(o *ApplicationConfig) {
|
||||
o.DisableMetrics = true
|
||||
}
|
||||
|
||||
@@ -1,606 +0,0 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
"github.com/mudler/LocalAI/pkg/downloader"
|
||||
"github.com/mudler/LocalAI/pkg/functions"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
const (
|
||||
RAND_SEED = -1
|
||||
)
|
||||
|
||||
type TTSConfig struct {
|
||||
|
||||
// Voice wav path or id
|
||||
Voice string `yaml:"voice" json:"voice"`
|
||||
|
||||
AudioPath string `yaml:"audio_path" json:"audio_path"`
|
||||
}
|
||||
|
||||
// ModelConfig represents a model configuration
|
||||
type ModelConfig struct {
|
||||
schema.PredictionOptions `yaml:"parameters" json:"parameters"`
|
||||
Name string `yaml:"name" json:"name"`
|
||||
|
||||
F16 *bool `yaml:"f16" json:"f16"`
|
||||
Threads *int `yaml:"threads" json:"threads"`
|
||||
Debug *bool `yaml:"debug" json:"debug"`
|
||||
Roles map[string]string `yaml:"roles" json:"roles"`
|
||||
Embeddings *bool `yaml:"embeddings" json:"embeddings"`
|
||||
Backend string `yaml:"backend" json:"backend"`
|
||||
TemplateConfig TemplateConfig `yaml:"template" json:"template"`
|
||||
KnownUsecaseStrings []string `yaml:"known_usecases" json:"known_usecases"`
|
||||
KnownUsecases *ModelConfigUsecases `yaml:"-" json:"-"`
|
||||
Pipeline Pipeline `yaml:"pipeline" json:"pipeline"`
|
||||
|
||||
PromptStrings, InputStrings []string `yaml:"-" json:"-"`
|
||||
InputToken [][]int `yaml:"-" json:"-"`
|
||||
functionCallString, functionCallNameString string `yaml:"-" json:"-"`
|
||||
ResponseFormat string `yaml:"-" json:"-"`
|
||||
ResponseFormatMap map[string]interface{} `yaml:"-" json:"-"`
|
||||
|
||||
FunctionsConfig functions.FunctionsConfig `yaml:"function" json:"function"`
|
||||
|
||||
FeatureFlag FeatureFlag `yaml:"feature_flags" json:"feature_flags"` // Feature Flag registry. We move fast, and features may break on a per model/backend basis. Registry for (usually temporary) flags that indicate aborting something early.
|
||||
// LLM configs (GPT4ALL, Llama.cpp, ...)
|
||||
LLMConfig `yaml:",inline" json:",inline"`
|
||||
|
||||
// Diffusers
|
||||
Diffusers Diffusers `yaml:"diffusers" json:"diffusers"`
|
||||
Step int `yaml:"step" json:"step"`
|
||||
|
||||
// GRPC Options
|
||||
GRPC GRPC `yaml:"grpc" json:"grpc"`
|
||||
|
||||
// TTS specifics
|
||||
TTSConfig `yaml:"tts" json:"tts"`
|
||||
|
||||
// CUDA
|
||||
// Explicitly enable CUDA or not (some backends might need it)
|
||||
CUDA bool `yaml:"cuda" json:"cuda"`
|
||||
|
||||
DownloadFiles []File `yaml:"download_files" json:"download_files"`
|
||||
|
||||
Description string `yaml:"description" json:"description"`
|
||||
Usage string `yaml:"usage" json:"usage"`
|
||||
|
||||
Options []string `yaml:"options" json:"options"`
|
||||
Overrides []string `yaml:"overrides" json:"overrides"`
|
||||
}
|
||||
|
||||
// Pipeline defines other models to use for audio-to-audio
|
||||
type Pipeline struct {
|
||||
TTS string `yaml:"tts" json:"tts"`
|
||||
LLM string `yaml:"llm" json:"llm"`
|
||||
Transcription string `yaml:"transcription" json:"transcription"`
|
||||
VAD string `yaml:"vad" json:"vad"`
|
||||
}
|
||||
|
||||
type File struct {
|
||||
Filename string `yaml:"filename" json:"filename"`
|
||||
SHA256 string `yaml:"sha256" json:"sha256"`
|
||||
URI downloader.URI `yaml:"uri" json:"uri"`
|
||||
}
|
||||
|
||||
type FeatureFlag map[string]*bool
|
||||
|
||||
func (ff FeatureFlag) Enabled(s string) bool {
|
||||
if v, exists := ff[s]; exists && v != nil {
|
||||
return *v
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type GRPC struct {
|
||||
Attempts int `yaml:"attempts" json:"attempts"`
|
||||
AttemptsSleepTime int `yaml:"attempts_sleep_time" json:"attempts_sleep_time"`
|
||||
}
|
||||
|
||||
type Diffusers struct {
|
||||
CUDA bool `yaml:"cuda" json:"cuda"`
|
||||
PipelineType string `yaml:"pipeline_type" json:"pipeline_type"`
|
||||
SchedulerType string `yaml:"scheduler_type" json:"scheduler_type"`
|
||||
EnableParameters string `yaml:"enable_parameters" json:"enable_parameters"` // A list of comma separated parameters to specify
|
||||
IMG2IMG bool `yaml:"img2img" json:"img2img"` // Image to Image Diffuser
|
||||
ClipSkip int `yaml:"clip_skip" json:"clip_skip"` // Skip every N frames
|
||||
ClipModel string `yaml:"clip_model" json:"clip_model"` // Clip model to use
|
||||
ClipSubFolder string `yaml:"clip_subfolder" json:"clip_subfolder"` // Subfolder to use for clip model
|
||||
ControlNet string `yaml:"control_net" json:"control_net"`
|
||||
}
|
||||
|
||||
// LLMConfig is a struct that holds the configuration that are
|
||||
// generic for most of the LLM backends.
|
||||
type LLMConfig struct {
|
||||
SystemPrompt string `yaml:"system_prompt" json:"system_prompt"`
|
||||
TensorSplit string `yaml:"tensor_split" json:"tensor_split"`
|
||||
MainGPU string `yaml:"main_gpu" json:"main_gpu"`
|
||||
RMSNormEps float32 `yaml:"rms_norm_eps" json:"rms_norm_eps"`
|
||||
NGQA int32 `yaml:"ngqa" json:"ngqa"`
|
||||
PromptCachePath string `yaml:"prompt_cache_path" json:"prompt_cache_path"`
|
||||
PromptCacheAll bool `yaml:"prompt_cache_all" json:"prompt_cache_all"`
|
||||
PromptCacheRO bool `yaml:"prompt_cache_ro" json:"prompt_cache_ro"`
|
||||
MirostatETA *float64 `yaml:"mirostat_eta" json:"mirostat_eta"`
|
||||
MirostatTAU *float64 `yaml:"mirostat_tau" json:"mirostat_tau"`
|
||||
Mirostat *int `yaml:"mirostat" json:"mirostat"`
|
||||
NGPULayers *int `yaml:"gpu_layers" json:"gpu_layers"`
|
||||
MMap *bool `yaml:"mmap" json:"mmap"`
|
||||
MMlock *bool `yaml:"mmlock" json:"mmlock"`
|
||||
LowVRAM *bool `yaml:"low_vram" json:"low_vram"`
|
||||
Reranking *bool `yaml:"reranking" json:"reranking"`
|
||||
Grammar string `yaml:"grammar" json:"grammar"`
|
||||
StopWords []string `yaml:"stopwords" json:"stopwords"`
|
||||
Cutstrings []string `yaml:"cutstrings" json:"cutstrings"`
|
||||
ExtractRegex []string `yaml:"extract_regex" json:"extract_regex"`
|
||||
TrimSpace []string `yaml:"trimspace" json:"trimspace"`
|
||||
TrimSuffix []string `yaml:"trimsuffix" json:"trimsuffix"`
|
||||
|
||||
ContextSize *int `yaml:"context_size" json:"context_size"`
|
||||
NUMA bool `yaml:"numa" json:"numa"`
|
||||
LoraAdapter string `yaml:"lora_adapter" json:"lora_adapter"`
|
||||
LoraBase string `yaml:"lora_base" json:"lora_base"`
|
||||
LoraAdapters []string `yaml:"lora_adapters" json:"lora_adapters"`
|
||||
LoraScales []float32 `yaml:"lora_scales" json:"lora_scales"`
|
||||
LoraScale float32 `yaml:"lora_scale" json:"lora_scale"`
|
||||
NoMulMatQ bool `yaml:"no_mulmatq" json:"no_mulmatq"`
|
||||
DraftModel string `yaml:"draft_model" json:"draft_model"`
|
||||
NDraft int32 `yaml:"n_draft" json:"n_draft"`
|
||||
Quantization string `yaml:"quantization" json:"quantization"`
|
||||
LoadFormat string `yaml:"load_format" json:"load_format"`
|
||||
GPUMemoryUtilization float32 `yaml:"gpu_memory_utilization" json:"gpu_memory_utilization"` // vLLM
|
||||
TrustRemoteCode bool `yaml:"trust_remote_code" json:"trust_remote_code"` // vLLM
|
||||
EnforceEager bool `yaml:"enforce_eager" json:"enforce_eager"` // vLLM
|
||||
SwapSpace int `yaml:"swap_space" json:"swap_space"` // vLLM
|
||||
MaxModelLen int `yaml:"max_model_len" json:"max_model_len"` // vLLM
|
||||
TensorParallelSize int `yaml:"tensor_parallel_size" json:"tensor_parallel_size"` // vLLM
|
||||
DisableLogStatus bool `yaml:"disable_log_stats" json:"disable_log_stats"` // vLLM
|
||||
DType string `yaml:"dtype" json:"dtype"` // vLLM
|
||||
LimitMMPerPrompt LimitMMPerPrompt `yaml:"limit_mm_per_prompt" json:"limit_mm_per_prompt"` // vLLM
|
||||
MMProj string `yaml:"mmproj" json:"mmproj"`
|
||||
|
||||
FlashAttention *string `yaml:"flash_attention" json:"flash_attention"`
|
||||
NoKVOffloading bool `yaml:"no_kv_offloading" json:"no_kv_offloading"`
|
||||
CacheTypeK string `yaml:"cache_type_k" json:"cache_type_k"`
|
||||
CacheTypeV string `yaml:"cache_type_v" json:"cache_type_v"`
|
||||
|
||||
RopeScaling string `yaml:"rope_scaling" json:"rope_scaling"`
|
||||
ModelType string `yaml:"type" json:"type"`
|
||||
|
||||
YarnExtFactor float32 `yaml:"yarn_ext_factor" json:"yarn_ext_factor"`
|
||||
YarnAttnFactor float32 `yaml:"yarn_attn_factor" json:"yarn_attn_factor"`
|
||||
YarnBetaFast float32 `yaml:"yarn_beta_fast" json:"yarn_beta_fast"`
|
||||
YarnBetaSlow float32 `yaml:"yarn_beta_slow" json:"yarn_beta_slow"`
|
||||
|
||||
CFGScale float32 `yaml:"cfg_scale" json:"cfg_scale"` // Classifier-Free Guidance Scale
|
||||
}
|
||||
|
||||
// LimitMMPerPrompt is a struct that holds the configuration for the limit-mm-per-prompt config in vLLM
|
||||
type LimitMMPerPrompt struct {
|
||||
LimitImagePerPrompt int `yaml:"image" json:"image"`
|
||||
LimitVideoPerPrompt int `yaml:"video" json:"video"`
|
||||
LimitAudioPerPrompt int `yaml:"audio" json:"audio"`
|
||||
}
|
||||
|
||||
// TemplateConfig is a struct that holds the configuration of the templating system
|
||||
type TemplateConfig struct {
|
||||
// Chat is the template used in the chat completion endpoint
|
||||
Chat string `yaml:"chat" json:"chat"`
|
||||
|
||||
// ChatMessage is the template used for chat messages
|
||||
ChatMessage string `yaml:"chat_message" json:"chat_message"`
|
||||
|
||||
// Completion is the template used for completion requests
|
||||
Completion string `yaml:"completion" json:"completion"`
|
||||
|
||||
// Edit is the template used for edit completion requests
|
||||
Edit string `yaml:"edit" json:"edit"`
|
||||
|
||||
// Functions is the template used when tools are present in the client requests
|
||||
Functions string `yaml:"function" json:"function"`
|
||||
|
||||
// UseTokenizerTemplate is a flag that indicates if the tokenizer template should be used.
|
||||
// Note: this is mostly consumed for backends such as vllm and transformers
|
||||
// that can use the tokenizers specified in the JSON config files of the models
|
||||
UseTokenizerTemplate bool `yaml:"use_tokenizer_template" json:"use_tokenizer_template"`
|
||||
|
||||
// JoinChatMessagesByCharacter is a string that will be used to join chat messages together.
|
||||
// It defaults to \n
|
||||
JoinChatMessagesByCharacter *string `yaml:"join_chat_messages_by_character" json:"join_chat_messages_by_character"`
|
||||
|
||||
Multimodal string `yaml:"multimodal" json:"multimodal"`
|
||||
|
||||
JinjaTemplate bool `yaml:"jinja_template" json:"jinja_template"`
|
||||
|
||||
ReplyPrefix string `yaml:"reply_prefix" json:"reply_prefix"`
|
||||
}
|
||||
|
||||
func (c *ModelConfig) UnmarshalYAML(value *yaml.Node) error {
|
||||
type BCAlias ModelConfig
|
||||
var aux BCAlias
|
||||
if err := value.Decode(&aux); err != nil {
|
||||
return err
|
||||
}
|
||||
*c = ModelConfig(aux)
|
||||
|
||||
c.KnownUsecases = GetUsecasesFromYAML(c.KnownUsecaseStrings)
|
||||
// Make sure the usecases are valid, we rewrite with what we identified
|
||||
c.KnownUsecaseStrings = []string{}
|
||||
for k, usecase := range GetAllModelConfigUsecases() {
|
||||
if c.HasUsecases(usecase) {
|
||||
c.KnownUsecaseStrings = append(c.KnownUsecaseStrings, k)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ModelConfig) SetFunctionCallString(s string) {
|
||||
c.functionCallString = s
|
||||
}
|
||||
|
||||
func (c *ModelConfig) SetFunctionCallNameString(s string) {
|
||||
c.functionCallNameString = s
|
||||
}
|
||||
|
||||
func (c *ModelConfig) ShouldUseFunctions() bool {
|
||||
return ((c.functionCallString != "none" || c.functionCallString == "") || c.ShouldCallSpecificFunction())
|
||||
}
|
||||
|
||||
func (c *ModelConfig) ShouldCallSpecificFunction() bool {
|
||||
return len(c.functionCallNameString) > 0
|
||||
}
|
||||
|
||||
// MMProjFileName returns the filename of the MMProj file
|
||||
// If the MMProj is a URL, it will return the MD5 of the URL which is the filename
|
||||
func (c *ModelConfig) MMProjFileName() string {
|
||||
uri := downloader.URI(c.MMProj)
|
||||
if uri.LooksLikeURL() {
|
||||
f, _ := uri.FilenameFromUrl()
|
||||
return f
|
||||
}
|
||||
|
||||
return c.MMProj
|
||||
}
|
||||
|
||||
func (c *ModelConfig) IsMMProjURL() bool {
|
||||
uri := downloader.URI(c.MMProj)
|
||||
return uri.LooksLikeURL()
|
||||
}
|
||||
|
||||
func (c *ModelConfig) IsModelURL() bool {
|
||||
uri := downloader.URI(c.Model)
|
||||
return uri.LooksLikeURL()
|
||||
}
|
||||
|
||||
// ModelFileName returns the filename of the model
|
||||
// If the model is a URL, it will return the MD5 of the URL which is the filename
|
||||
func (c *ModelConfig) ModelFileName() string {
|
||||
uri := downloader.URI(c.Model)
|
||||
if uri.LooksLikeURL() {
|
||||
f, _ := uri.FilenameFromUrl()
|
||||
return f
|
||||
}
|
||||
|
||||
return c.Model
|
||||
}
|
||||
|
||||
func (c *ModelConfig) FunctionToCall() string {
|
||||
if c.functionCallNameString != "" &&
|
||||
c.functionCallNameString != "none" && c.functionCallNameString != "auto" {
|
||||
return c.functionCallNameString
|
||||
}
|
||||
|
||||
return c.functionCallString
|
||||
}
|
||||
|
||||
func (cfg *ModelConfig) SetDefaults(opts ...ConfigLoaderOption) {
|
||||
lo := &LoadOptions{}
|
||||
lo.Apply(opts...)
|
||||
|
||||
ctx := lo.ctxSize
|
||||
threads := lo.threads
|
||||
f16 := lo.f16
|
||||
debug := lo.debug
|
||||
// https://github.com/ggerganov/llama.cpp/blob/75cd4c77292034ecec587ecb401366f57338f7c0/common/sampling.h#L22
|
||||
defaultTopP := 0.95
|
||||
defaultTopK := 40
|
||||
defaultTemp := 0.9
|
||||
// https://github.com/mudler/LocalAI/issues/2780
|
||||
defaultMirostat := 0
|
||||
defaultMirostatTAU := 5.0
|
||||
defaultMirostatETA := 0.1
|
||||
defaultTypicalP := 1.0
|
||||
defaultTFZ := 1.0
|
||||
defaultZero := 0
|
||||
|
||||
trueV := true
|
||||
falseV := false
|
||||
|
||||
if cfg.Seed == nil {
|
||||
// random number generator seed
|
||||
defaultSeed := RAND_SEED
|
||||
cfg.Seed = &defaultSeed
|
||||
}
|
||||
|
||||
if cfg.TopK == nil {
|
||||
cfg.TopK = &defaultTopK
|
||||
}
|
||||
|
||||
if cfg.TypicalP == nil {
|
||||
cfg.TypicalP = &defaultTypicalP
|
||||
}
|
||||
|
||||
if cfg.TFZ == nil {
|
||||
cfg.TFZ = &defaultTFZ
|
||||
}
|
||||
|
||||
if cfg.MMap == nil {
|
||||
// MMap is enabled by default
|
||||
|
||||
// Only exception is for Intel GPUs
|
||||
if os.Getenv("XPU") != "" {
|
||||
cfg.MMap = &falseV
|
||||
} else {
|
||||
cfg.MMap = &trueV
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.MMlock == nil {
|
||||
// MMlock is disabled by default
|
||||
cfg.MMlock = &falseV
|
||||
}
|
||||
|
||||
if cfg.TopP == nil {
|
||||
cfg.TopP = &defaultTopP
|
||||
}
|
||||
if cfg.Temperature == nil {
|
||||
cfg.Temperature = &defaultTemp
|
||||
}
|
||||
|
||||
if cfg.Maxtokens == nil {
|
||||
cfg.Maxtokens = &defaultZero
|
||||
}
|
||||
|
||||
if cfg.Mirostat == nil {
|
||||
cfg.Mirostat = &defaultMirostat
|
||||
}
|
||||
|
||||
if cfg.MirostatETA == nil {
|
||||
cfg.MirostatETA = &defaultMirostatETA
|
||||
}
|
||||
|
||||
if cfg.MirostatTAU == nil {
|
||||
cfg.MirostatTAU = &defaultMirostatTAU
|
||||
}
|
||||
|
||||
if cfg.LowVRAM == nil {
|
||||
cfg.LowVRAM = &falseV
|
||||
}
|
||||
|
||||
if cfg.Embeddings == nil {
|
||||
cfg.Embeddings = &falseV
|
||||
}
|
||||
|
||||
if cfg.Reranking == nil {
|
||||
cfg.Reranking = &falseV
|
||||
}
|
||||
|
||||
if threads == 0 {
|
||||
// Threads can't be 0
|
||||
threads = 4
|
||||
}
|
||||
|
||||
if cfg.Threads == nil {
|
||||
cfg.Threads = &threads
|
||||
}
|
||||
|
||||
if cfg.F16 == nil {
|
||||
cfg.F16 = &f16
|
||||
}
|
||||
|
||||
if cfg.Debug == nil {
|
||||
cfg.Debug = &falseV
|
||||
}
|
||||
|
||||
if debug {
|
||||
cfg.Debug = &trueV
|
||||
}
|
||||
|
||||
guessDefaultsFromFile(cfg, lo.modelPath, ctx)
|
||||
}
|
||||
|
||||
func (c *ModelConfig) Validate() bool {
|
||||
downloadedFileNames := []string{}
|
||||
for _, f := range c.DownloadFiles {
|
||||
downloadedFileNames = append(downloadedFileNames, f.Filename)
|
||||
}
|
||||
validationTargets := []string{c.Backend, c.Model, c.MMProj}
|
||||
validationTargets = append(validationTargets, downloadedFileNames...)
|
||||
// Simple validation to make sure the model can be correctly loaded
|
||||
for _, n := range validationTargets {
|
||||
if n == "" {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(n, string(os.PathSeparator)) ||
|
||||
strings.Contains(n, "..") {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if c.Backend != "" {
|
||||
// a regex that checks that is a string name with no special characters, except '-' and '_'
|
||||
re := regexp.MustCompile(`^[a-zA-Z0-9-_]+$`)
|
||||
return re.MatchString(c.Backend)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *ModelConfig) HasTemplate() bool {
|
||||
return c.TemplateConfig.Completion != "" || c.TemplateConfig.Edit != "" || c.TemplateConfig.Chat != "" || c.TemplateConfig.ChatMessage != ""
|
||||
}
|
||||
|
||||
type ModelConfigUsecases int
|
||||
|
||||
const (
|
||||
FLAG_ANY ModelConfigUsecases = 0b000000000000
|
||||
FLAG_CHAT ModelConfigUsecases = 0b000000000001
|
||||
FLAG_COMPLETION ModelConfigUsecases = 0b000000000010
|
||||
FLAG_EDIT ModelConfigUsecases = 0b000000000100
|
||||
FLAG_EMBEDDINGS ModelConfigUsecases = 0b000000001000
|
||||
FLAG_RERANK ModelConfigUsecases = 0b000000010000
|
||||
FLAG_IMAGE ModelConfigUsecases = 0b000000100000
|
||||
FLAG_TRANSCRIPT ModelConfigUsecases = 0b000001000000
|
||||
FLAG_TTS ModelConfigUsecases = 0b000010000000
|
||||
FLAG_SOUND_GENERATION ModelConfigUsecases = 0b000100000000
|
||||
FLAG_TOKENIZE ModelConfigUsecases = 0b001000000000
|
||||
FLAG_VAD ModelConfigUsecases = 0b010000000000
|
||||
FLAG_VIDEO ModelConfigUsecases = 0b100000000000
|
||||
FLAG_DETECTION ModelConfigUsecases = 0b1000000000000
|
||||
|
||||
// Common Subsets
|
||||
FLAG_LLM ModelConfigUsecases = FLAG_CHAT | FLAG_COMPLETION | FLAG_EDIT
|
||||
)
|
||||
|
||||
func GetAllModelConfigUsecases() map[string]ModelConfigUsecases {
|
||||
return map[string]ModelConfigUsecases{
|
||||
"FLAG_ANY": FLAG_ANY,
|
||||
"FLAG_CHAT": FLAG_CHAT,
|
||||
"FLAG_COMPLETION": FLAG_COMPLETION,
|
||||
"FLAG_EDIT": FLAG_EDIT,
|
||||
"FLAG_EMBEDDINGS": FLAG_EMBEDDINGS,
|
||||
"FLAG_RERANK": FLAG_RERANK,
|
||||
"FLAG_IMAGE": FLAG_IMAGE,
|
||||
"FLAG_TRANSCRIPT": FLAG_TRANSCRIPT,
|
||||
"FLAG_TTS": FLAG_TTS,
|
||||
"FLAG_SOUND_GENERATION": FLAG_SOUND_GENERATION,
|
||||
"FLAG_TOKENIZE": FLAG_TOKENIZE,
|
||||
"FLAG_VAD": FLAG_VAD,
|
||||
"FLAG_LLM": FLAG_LLM,
|
||||
"FLAG_VIDEO": FLAG_VIDEO,
|
||||
"FLAG_DETECTION": FLAG_DETECTION,
|
||||
}
|
||||
}
|
||||
|
||||
func stringToFlag(s string) string {
|
||||
return "FLAG_" + strings.ToUpper(s)
|
||||
}
|
||||
|
||||
func GetUsecasesFromYAML(input []string) *ModelConfigUsecases {
|
||||
if len(input) == 0 {
|
||||
return nil
|
||||
}
|
||||
result := FLAG_ANY
|
||||
flags := GetAllModelConfigUsecases()
|
||||
for _, str := range input {
|
||||
flag, exists := flags[stringToFlag(str)]
|
||||
if exists {
|
||||
result |= flag
|
||||
}
|
||||
}
|
||||
return &result
|
||||
}
|
||||
|
||||
// HasUsecases examines a ModelConfig and determines which endpoints have a chance of success.
|
||||
func (c *ModelConfig) HasUsecases(u ModelConfigUsecases) bool {
|
||||
if (c.KnownUsecases != nil) && ((u & *c.KnownUsecases) == u) {
|
||||
return true
|
||||
}
|
||||
return c.GuessUsecases(u)
|
||||
}
|
||||
|
||||
// GuessUsecases is a **heuristic based** function, as the backend in question may not be loaded yet, and the config may not record what it's useful at.
|
||||
// In its current state, this function should ideally check for properties of the config like templates, rather than the direct backend name checks for the lower half.
|
||||
// This avoids the maintenance burden of updating this list for each new backend - but unfortunately, that's the best option for some services currently.
|
||||
func (c *ModelConfig) GuessUsecases(u ModelConfigUsecases) bool {
|
||||
if (u & FLAG_CHAT) == FLAG_CHAT {
|
||||
if c.TemplateConfig.Chat == "" && c.TemplateConfig.ChatMessage == "" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if (u & FLAG_COMPLETION) == FLAG_COMPLETION {
|
||||
if c.TemplateConfig.Completion == "" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if (u & FLAG_EDIT) == FLAG_EDIT {
|
||||
if c.TemplateConfig.Edit == "" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if (u & FLAG_EMBEDDINGS) == FLAG_EMBEDDINGS {
|
||||
if c.Embeddings == nil || !*c.Embeddings {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if (u & FLAG_IMAGE) == FLAG_IMAGE {
|
||||
imageBackends := []string{"diffusers", "stablediffusion", "stablediffusion-ggml"}
|
||||
if !slices.Contains(imageBackends, c.Backend) {
|
||||
return false
|
||||
}
|
||||
|
||||
if c.Backend == "diffusers" && c.Diffusers.PipelineType == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
}
|
||||
if (u & FLAG_VIDEO) == FLAG_VIDEO {
|
||||
videoBackends := []string{"diffusers", "stablediffusion"}
|
||||
if !slices.Contains(videoBackends, c.Backend) {
|
||||
return false
|
||||
}
|
||||
|
||||
if c.Backend == "diffusers" && c.Diffusers.PipelineType == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
}
|
||||
if (u & FLAG_RERANK) == FLAG_RERANK {
|
||||
if c.Backend != "rerankers" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if (u & FLAG_TRANSCRIPT) == FLAG_TRANSCRIPT {
|
||||
if c.Backend != "whisper" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if (u & FLAG_TTS) == FLAG_TTS {
|
||||
ttsBackends := []string{"bark-cpp", "piper", "transformers-musicgen"}
|
||||
if !slices.Contains(ttsBackends, c.Backend) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if (u & FLAG_DETECTION) == FLAG_DETECTION {
|
||||
if c.Backend != "rfdetr" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if (u & FLAG_SOUND_GENERATION) == FLAG_SOUND_GENERATION {
|
||||
if c.Backend != "transformers-musicgen" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if (u & FLAG_TOKENIZE) == FLAG_TOKENIZE {
|
||||
tokenizeCapableBackends := []string{"llama.cpp", "rwkv"}
|
||||
if !slices.Contains(tokenizeCapableBackends, c.Backend) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if (u & FLAG_VAD) == FLAG_VAD {
|
||||
if c.Backend != "silero-vad" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
@@ -1,151 +1,17 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/mudler/LocalAI/pkg/xsysinfo"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
gguf "github.com/gpustack/gguf-parser-go"
|
||||
)
|
||||
|
||||
type familyType uint8
|
||||
|
||||
const (
|
||||
Unknown familyType = iota
|
||||
LLaMa3
|
||||
CommandR
|
||||
Phi3
|
||||
ChatML
|
||||
Mistral03
|
||||
Gemma
|
||||
DeepSeek2
|
||||
)
|
||||
|
||||
const (
|
||||
defaultContextSize = 1024
|
||||
defaultNGPULayers = 99999999
|
||||
)
|
||||
|
||||
type settingsConfig struct {
|
||||
StopWords []string
|
||||
TemplateConfig TemplateConfig
|
||||
RepeatPenalty float64
|
||||
}
|
||||
|
||||
// default settings to adopt with a given model family
|
||||
var defaultsSettings map[familyType]settingsConfig = map[familyType]settingsConfig{
|
||||
Gemma: {
|
||||
RepeatPenalty: 1.0,
|
||||
StopWords: []string{"<|im_end|>", "<end_of_turn>", "<start_of_turn>"},
|
||||
TemplateConfig: TemplateConfig{
|
||||
Chat: "{{.Input }}\n<start_of_turn>model\n",
|
||||
ChatMessage: "<start_of_turn>{{if eq .RoleName \"assistant\" }}model{{else}}{{ .RoleName }}{{end}}\n{{ if .Content -}}\n{{.Content -}}\n{{ end -}}<end_of_turn>",
|
||||
Completion: "{{.Input}}",
|
||||
},
|
||||
},
|
||||
DeepSeek2: {
|
||||
StopWords: []string{"<|end▁of▁sentence|>"},
|
||||
TemplateConfig: TemplateConfig{
|
||||
ChatMessage: `{{if eq .RoleName "user" -}}User: {{.Content }}
|
||||
{{ end -}}
|
||||
{{if eq .RoleName "assistant" -}}Assistant: {{.Content}}<|end▁of▁sentence|>{{end}}
|
||||
{{if eq .RoleName "system" -}}{{.Content}}
|
||||
{{end -}}`,
|
||||
Chat: "{{.Input -}}\nAssistant: ",
|
||||
},
|
||||
},
|
||||
LLaMa3: {
|
||||
StopWords: []string{"<|eot_id|>"},
|
||||
TemplateConfig: TemplateConfig{
|
||||
Chat: "<|begin_of_text|>{{.Input }}\n<|start_header_id|>assistant<|end_header_id|>",
|
||||
ChatMessage: "<|start_header_id|>{{ .RoleName }}<|end_header_id|>\n\n{{.Content }}<|eot_id|>",
|
||||
},
|
||||
},
|
||||
CommandR: {
|
||||
TemplateConfig: TemplateConfig{
|
||||
Chat: "{{.Input -}}<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>",
|
||||
Functions: `<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>
|
||||
You are a function calling AI model, you can call the following functions:
|
||||
## Available Tools
|
||||
{{range .Functions}}
|
||||
- {"type": "function", "function": {"name": "{{.Name}}", "description": "{{.Description}}", "parameters": {{toJson .Parameters}} }}
|
||||
{{end}}
|
||||
When using a tool, reply with JSON, for instance {"name": "tool_name", "arguments": {"param1": "value1", "param2": "value2"}}
|
||||
<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>{{.Input -}}`,
|
||||
ChatMessage: `{{if eq .RoleName "user" -}}
|
||||
<|START_OF_TURN_TOKEN|><|USER_TOKEN|>{{.Content}}<|END_OF_TURN_TOKEN|>
|
||||
{{- else if eq .RoleName "system" -}}
|
||||
<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>{{.Content}}<|END_OF_TURN_TOKEN|>
|
||||
{{- else if eq .RoleName "assistant" -}}
|
||||
<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>{{.Content}}<|END_OF_TURN_TOKEN|>
|
||||
{{- else if eq .RoleName "tool" -}}
|
||||
<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>{{.Content}}<|END_OF_TURN_TOKEN|>
|
||||
{{- else if .FunctionCall -}}
|
||||
<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>{{toJson .FunctionCall}}}<|END_OF_TURN_TOKEN|>
|
||||
{{- end -}}`,
|
||||
},
|
||||
StopWords: []string{"<|END_OF_TURN_TOKEN|>"},
|
||||
},
|
||||
Phi3: {
|
||||
TemplateConfig: TemplateConfig{
|
||||
Chat: "{{.Input}}\n<|assistant|>",
|
||||
ChatMessage: "<|{{ .RoleName }}|>\n{{.Content}}<|end|>",
|
||||
Completion: "{{.Input}}",
|
||||
},
|
||||
StopWords: []string{"<|end|>", "<|endoftext|>"},
|
||||
},
|
||||
ChatML: {
|
||||
TemplateConfig: TemplateConfig{
|
||||
Chat: "{{.Input -}}\n<|im_start|>assistant",
|
||||
Functions: `<|im_start|>system
|
||||
You are a function calling AI model. You are provided with functions to execute. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools:
|
||||
{{range .Functions}}
|
||||
{'type': 'function', 'function': {'name': '{{.Name}}', 'description': '{{.Description}}', 'parameters': {{toJson .Parameters}} }}
|
||||
{{end}}
|
||||
For each function call return a json object with function name and arguments
|
||||
<|im_end|>
|
||||
{{.Input -}}
|
||||
<|im_start|>assistant`,
|
||||
ChatMessage: `<|im_start|>{{ .RoleName }}
|
||||
{{ if .FunctionCall -}}
|
||||
Function call:
|
||||
{{ else if eq .RoleName "tool" -}}
|
||||
Function response:
|
||||
{{ end -}}
|
||||
{{ if .Content -}}
|
||||
{{.Content }}
|
||||
{{ end -}}
|
||||
{{ if .FunctionCall -}}
|
||||
{{toJson .FunctionCall}}
|
||||
{{ end -}}<|im_end|>`,
|
||||
},
|
||||
StopWords: []string{"<|im_end|>", "<dummy32000>", "</s>"},
|
||||
},
|
||||
Mistral03: {
|
||||
TemplateConfig: TemplateConfig{
|
||||
Chat: "{{.Input -}}",
|
||||
Functions: `[AVAILABLE_TOOLS] [{{range .Functions}}{"type": "function", "function": {"name": "{{.Name}}", "description": "{{.Description}}", "parameters": {{toJson .Parameters}} }}{{end}} ] [/AVAILABLE_TOOLS]{{.Input }}`,
|
||||
ChatMessage: `{{if eq .RoleName "user" -}}
|
||||
[INST] {{.Content }} [/INST]
|
||||
{{- else if .FunctionCall -}}
|
||||
[TOOL_CALLS] {{toJson .FunctionCall}} [/TOOL_CALLS]
|
||||
{{- else if eq .RoleName "tool" -}}
|
||||
[TOOL_RESULTS] {{.Content}} [/TOOL_RESULTS]
|
||||
{{- else -}}
|
||||
{{ .Content -}}
|
||||
{{ end -}}`,
|
||||
},
|
||||
StopWords: []string{"<|im_end|>", "<dummy32000>", "</tool_call>", "<|eot_id|>", "<|end_of_text|>", "</s>", "[/TOOL_CALLS]", "[/ACTIONS]"},
|
||||
},
|
||||
}
|
||||
|
||||
// this maps well known template used in HF to model families defined above
|
||||
var knownTemplates = map[string]familyType{
|
||||
`{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|im_start|>user\n' + content + '<|im_end|>\n<|im_start|>assistant\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|im_end|>' + '\n' }}{% endif %}{% endfor %}`: ChatML,
|
||||
`{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}`: Mistral03,
|
||||
}
|
||||
|
||||
func guessGGUFFromFile(cfg *ModelConfig, f *gguf.GGUFFile, defaultCtx int) {
|
||||
|
||||
if defaultCtx == 0 && cfg.ContextSize == nil {
|
||||
@@ -216,81 +82,9 @@ func guessGGUFFromFile(cfg *ModelConfig, f *gguf.GGUFFile, defaultCtx int) {
|
||||
cfg.Name = f.Metadata().Name
|
||||
}
|
||||
|
||||
family := identifyFamily(f)
|
||||
|
||||
if family == Unknown {
|
||||
log.Debug().Msgf("guessDefaultsFromFile: %s", "family not identified")
|
||||
return
|
||||
}
|
||||
|
||||
// identify template
|
||||
settings, ok := defaultsSettings[family]
|
||||
if ok {
|
||||
cfg.TemplateConfig = settings.TemplateConfig
|
||||
log.Debug().Any("family", family).Msgf("guessDefaultsFromFile: guessed template %+v", cfg.TemplateConfig)
|
||||
if len(cfg.StopWords) == 0 {
|
||||
cfg.StopWords = settings.StopWords
|
||||
}
|
||||
if cfg.RepeatPenalty == 0.0 {
|
||||
cfg.RepeatPenalty = settings.RepeatPenalty
|
||||
}
|
||||
} else {
|
||||
log.Debug().Any("family", family).Msgf("guessDefaultsFromFile: no template found for family")
|
||||
}
|
||||
|
||||
if cfg.HasTemplate() {
|
||||
return
|
||||
}
|
||||
|
||||
// identify from well known templates first, otherwise use the raw jinja template
|
||||
chatTemplate, found := f.Header.MetadataKV.Get("tokenizer.chat_template")
|
||||
if found {
|
||||
// try to use the jinja template
|
||||
cfg.TemplateConfig.JinjaTemplate = true
|
||||
cfg.TemplateConfig.ChatMessage = chatTemplate.ValueString()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func identifyFamily(f *gguf.GGUFFile) familyType {
|
||||
|
||||
// identify from well known templates first
|
||||
chatTemplate, found := f.Header.MetadataKV.Get("tokenizer.chat_template")
|
||||
if found && chatTemplate.ValueString() != "" {
|
||||
if family, ok := knownTemplates[chatTemplate.ValueString()]; ok {
|
||||
return family
|
||||
}
|
||||
}
|
||||
|
||||
// otherwise try to identify from the model properties
|
||||
arch := f.Architecture().Architecture
|
||||
eosTokenID := f.Tokenizer().EOSTokenID
|
||||
bosTokenID := f.Tokenizer().BOSTokenID
|
||||
|
||||
isYI := arch == "llama" && bosTokenID == 1 && eosTokenID == 2
|
||||
// WTF! Mistral0.3 and isYi have same bosTokenID and eosTokenID
|
||||
|
||||
llama3 := arch == "llama" && eosTokenID == 128009
|
||||
commandR := arch == "command-r" && eosTokenID == 255001
|
||||
qwen2 := arch == "qwen2"
|
||||
phi3 := arch == "phi-3"
|
||||
gemma := strings.HasPrefix(arch, "gemma") || strings.Contains(strings.ToLower(f.Metadata().Name), "gemma")
|
||||
deepseek2 := arch == "deepseek2"
|
||||
|
||||
switch {
|
||||
case deepseek2:
|
||||
return DeepSeek2
|
||||
case gemma:
|
||||
return Gemma
|
||||
case llama3:
|
||||
return LLaMa3
|
||||
case commandR:
|
||||
return CommandR
|
||||
case phi3:
|
||||
return Phi3
|
||||
case qwen2, isYI:
|
||||
return ChatML
|
||||
default:
|
||||
return Unknown
|
||||
}
|
||||
// Instruct to use template from llama.cpp
|
||||
cfg.TemplateConfig.UseTokenizerTemplate = true
|
||||
cfg.FunctionsConfig.GrammarConfig.NoGrammar = true
|
||||
cfg.Options = append(cfg.Options, "use_jinja:true")
|
||||
cfg.KnownUsecaseStrings = append(cfg.KnownUsecaseStrings, "FLAG_CHAT")
|
||||
}
|
||||
|
||||
713
core/config/model_config.go
Normal file
713
core/config/model_config.go
Normal file
@@ -0,0 +1,713 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/mudler/LocalAI/core/schema"
|
||||
"github.com/mudler/LocalAI/pkg/downloader"
|
||||
"github.com/mudler/LocalAI/pkg/functions"
|
||||
"github.com/mudler/cogito"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
const (
|
||||
RAND_SEED = -1
|
||||
)
|
||||
|
||||
// @Description TTS configuration
|
||||
type TTSConfig struct {
|
||||
|
||||
// Voice wav path or id
|
||||
Voice string `yaml:"voice,omitempty" json:"voice,omitempty"`
|
||||
|
||||
AudioPath string `yaml:"audio_path,omitempty" json:"audio_path,omitempty"`
|
||||
}
|
||||
|
||||
// @Description ModelConfig represents a model configuration
|
||||
type ModelConfig struct {
|
||||
modelConfigFile string `yaml:"-" json:"-"`
|
||||
schema.PredictionOptions `yaml:"parameters,omitempty" json:"parameters,omitempty"`
|
||||
Name string `yaml:"name,omitempty" json:"name,omitempty"`
|
||||
|
||||
F16 *bool `yaml:"f16,omitempty" json:"f16,omitempty"`
|
||||
Threads *int `yaml:"threads,omitempty" json:"threads,omitempty"`
|
||||
Debug *bool `yaml:"debug,omitempty" json:"debug,omitempty"`
|
||||
Roles map[string]string `yaml:"roles,omitempty" json:"roles,omitempty"`
|
||||
Embeddings *bool `yaml:"embeddings,omitempty" json:"embeddings,omitempty"`
|
||||
Backend string `yaml:"backend,omitempty" json:"backend,omitempty"`
|
||||
TemplateConfig TemplateConfig `yaml:"template,omitempty" json:"template,omitempty"`
|
||||
KnownUsecaseStrings []string `yaml:"known_usecases,omitempty" json:"known_usecases,omitempty"`
|
||||
KnownUsecases *ModelConfigUsecases `yaml:"-" json:"-"`
|
||||
Pipeline Pipeline `yaml:"pipeline,omitempty" json:"pipeline,omitempty"`
|
||||
|
||||
PromptStrings, InputStrings []string `yaml:"-" json:"-"`
|
||||
InputToken [][]int `yaml:"-" json:"-"`
|
||||
functionCallString, functionCallNameString string `yaml:"-" json:"-"`
|
||||
ResponseFormat string `yaml:"-" json:"-"`
|
||||
ResponseFormatMap map[string]interface{} `yaml:"-" json:"-"`
|
||||
|
||||
FunctionsConfig functions.FunctionsConfig `yaml:"function,omitempty" json:"function,omitempty"`
|
||||
|
||||
FeatureFlag FeatureFlag `yaml:"feature_flags,omitempty" json:"feature_flags,omitempty"` // Feature Flag registry. We move fast, and features may break on a per model/backend basis. Registry for (usually temporary) flags that indicate aborting something early.
|
||||
// LLM configs (GPT4ALL, Llama.cpp, ...)
|
||||
LLMConfig `yaml:",inline" json:",inline"`
|
||||
|
||||
// Diffusers
|
||||
Diffusers Diffusers `yaml:"diffusers,omitempty" json:"diffusers,omitempty"`
|
||||
Step int `yaml:"step,omitempty" json:"step,omitempty"`
|
||||
|
||||
// GRPC Options
|
||||
GRPC GRPC `yaml:"grpc,omitempty" json:"grpc,omitempty"`
|
||||
|
||||
// TTS specifics
|
||||
TTSConfig `yaml:"tts,omitempty" json:"tts,omitempty"`
|
||||
|
||||
// CUDA
|
||||
// Explicitly enable CUDA or not (some backends might need it)
|
||||
CUDA bool `yaml:"cuda,omitempty" json:"cuda,omitempty"`
|
||||
|
||||
DownloadFiles []File `yaml:"download_files,omitempty" json:"download_files,omitempty"`
|
||||
|
||||
Description string `yaml:"description,omitempty" json:"description,omitempty"`
|
||||
Usage string `yaml:"usage,omitempty" json:"usage,omitempty"`
|
||||
|
||||
Options []string `yaml:"options,omitempty" json:"options,omitempty"`
|
||||
Overrides []string `yaml:"overrides,omitempty" json:"overrides,omitempty"`
|
||||
|
||||
MCP MCPConfig `yaml:"mcp,omitempty" json:"mcp,omitempty"`
|
||||
Agent AgentConfig `yaml:"agent,omitempty" json:"agent,omitempty"`
|
||||
}
|
||||
|
||||
// @Description MCP configuration
|
||||
type MCPConfig struct {
|
||||
Servers string `yaml:"remote,omitempty" json:"remote,omitempty"`
|
||||
Stdio string `yaml:"stdio,omitempty" json:"stdio,omitempty"`
|
||||
}
|
||||
|
||||
// @Description Agent configuration
|
||||
type AgentConfig struct {
|
||||
MaxAttempts int `yaml:"max_attempts,omitempty" json:"max_attempts,omitempty"`
|
||||
MaxIterations int `yaml:"max_iterations,omitempty" json:"max_iterations,omitempty"`
|
||||
EnableReasoning bool `yaml:"enable_reasoning,omitempty" json:"enable_reasoning,omitempty"`
|
||||
EnablePlanning bool `yaml:"enable_planning,omitempty" json:"enable_planning,omitempty"`
|
||||
EnableMCPPrompts bool `yaml:"enable_mcp_prompts,omitempty" json:"enable_mcp_prompts,omitempty"`
|
||||
EnablePlanReEvaluator bool `yaml:"enable_plan_re_evaluator,omitempty" json:"enable_plan_re_evaluator,omitempty"`
|
||||
}
|
||||
|
||||
func (c *MCPConfig) MCPConfigFromYAML() (MCPGenericConfig[MCPRemoteServers], MCPGenericConfig[MCPSTDIOServers], error) {
|
||||
var remote MCPGenericConfig[MCPRemoteServers]
|
||||
var stdio MCPGenericConfig[MCPSTDIOServers]
|
||||
|
||||
if err := yaml.Unmarshal([]byte(c.Servers), &remote); err != nil {
|
||||
return remote, stdio, err
|
||||
}
|
||||
|
||||
if err := yaml.Unmarshal([]byte(c.Stdio), &stdio); err != nil {
|
||||
return remote, stdio, err
|
||||
}
|
||||
return remote, stdio, nil
|
||||
}
|
||||
|
||||
// @Description MCP generic configuration
|
||||
type MCPGenericConfig[T any] struct {
|
||||
Servers T `yaml:"mcpServers,omitempty" json:"mcpServers,omitempty"`
|
||||
}
|
||||
type MCPRemoteServers map[string]MCPRemoteServer
|
||||
type MCPSTDIOServers map[string]MCPSTDIOServer
|
||||
|
||||
// @Description MCP remote server configuration
|
||||
type MCPRemoteServer struct {
|
||||
URL string `json:"url,omitempty"`
|
||||
Token string `json:"token,omitempty"`
|
||||
}
|
||||
|
||||
// @Description MCP STDIO server configuration
|
||||
type MCPSTDIOServer struct {
|
||||
Args []string `json:"args,omitempty"`
|
||||
Env map[string]string `json:"env,omitempty"`
|
||||
Command string `json:"command,omitempty"`
|
||||
}
|
||||
|
||||
// @Description Pipeline defines other models to use for audio-to-audio
|
||||
type Pipeline struct {
|
||||
TTS string `yaml:"tts,omitempty" json:"tts,omitempty"`
|
||||
LLM string `yaml:"llm,omitempty" json:"llm,omitempty"`
|
||||
Transcription string `yaml:"transcription,omitempty" json:"transcription,omitempty"`
|
||||
VAD string `yaml:"vad,omitempty" json:"vad,omitempty"`
|
||||
}
|
||||
|
||||
// @Description File configuration for model downloads
|
||||
type File struct {
|
||||
Filename string `yaml:"filename,omitempty" json:"filename,omitempty"`
|
||||
SHA256 string `yaml:"sha256,omitempty" json:"sha256,omitempty"`
|
||||
URI downloader.URI `yaml:"uri,omitempty" json:"uri,omitempty"`
|
||||
}
|
||||
|
||||
type FeatureFlag map[string]*bool
|
||||
|
||||
func (ff FeatureFlag) Enabled(s string) bool {
|
||||
if v, exists := ff[s]; exists && v != nil {
|
||||
return *v
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// @Description GRPC configuration
|
||||
type GRPC struct {
|
||||
Attempts int `yaml:"attempts,omitempty" json:"attempts,omitempty"`
|
||||
AttemptsSleepTime int `yaml:"attempts_sleep_time,omitempty" json:"attempts_sleep_time,omitempty"`
|
||||
}
|
||||
|
||||
// @Description Diffusers configuration
|
||||
type Diffusers struct {
|
||||
CUDA bool `yaml:"cuda,omitempty" json:"cuda,omitempty"`
|
||||
PipelineType string `yaml:"pipeline_type,omitempty" json:"pipeline_type,omitempty"`
|
||||
SchedulerType string `yaml:"scheduler_type,omitempty" json:"scheduler_type,omitempty"`
|
||||
EnableParameters string `yaml:"enable_parameters,omitempty" json:"enable_parameters,omitempty"` // A list of comma separated parameters to specify
|
||||
IMG2IMG bool `yaml:"img2img,omitempty" json:"img2img,omitempty"` // Image to Image Diffuser
|
||||
ClipSkip int `yaml:"clip_skip,omitempty" json:"clip_skip,omitempty"` // Skip every N frames
|
||||
ClipModel string `yaml:"clip_model,omitempty" json:"clip_model,omitempty"` // Clip model to use
|
||||
ClipSubFolder string `yaml:"clip_subfolder,omitempty" json:"clip_subfolder,omitempty"` // Subfolder to use for clip model
|
||||
ControlNet string `yaml:"control_net,omitempty" json:"control_net,omitempty"`
|
||||
}
|
||||
|
||||
// @Description LLMConfig is a struct that holds the configuration that are generic for most of the LLM backends.
|
||||
type LLMConfig struct {
|
||||
SystemPrompt string `yaml:"system_prompt,omitempty" json:"system_prompt,omitempty"`
|
||||
TensorSplit string `yaml:"tensor_split,omitempty" json:"tensor_split,omitempty"`
|
||||
MainGPU string `yaml:"main_gpu,omitempty" json:"main_gpu,omitempty"`
|
||||
RMSNormEps float32 `yaml:"rms_norm_eps,omitempty" json:"rms_norm_eps,omitempty"`
|
||||
NGQA int32 `yaml:"ngqa,omitempty" json:"ngqa,omitempty"`
|
||||
PromptCachePath string `yaml:"prompt_cache_path,omitempty" json:"prompt_cache_path,omitempty"`
|
||||
PromptCacheAll bool `yaml:"prompt_cache_all,omitempty" json:"prompt_cache_all,omitempty"`
|
||||
PromptCacheRO bool `yaml:"prompt_cache_ro,omitempty" json:"prompt_cache_ro,omitempty"`
|
||||
MirostatETA *float64 `yaml:"mirostat_eta,omitempty" json:"mirostat_eta,omitempty"`
|
||||
MirostatTAU *float64 `yaml:"mirostat_tau,omitempty" json:"mirostat_tau,omitempty"`
|
||||
Mirostat *int `yaml:"mirostat,omitempty" json:"mirostat,omitempty"`
|
||||
NGPULayers *int `yaml:"gpu_layers,omitempty" json:"gpu_layers,omitempty"`
|
||||
MMap *bool `yaml:"mmap,omitempty" json:"mmap,omitempty"`
|
||||
MMlock *bool `yaml:"mmlock,omitempty" json:"mmlock,omitempty"`
|
||||
LowVRAM *bool `yaml:"low_vram,omitempty" json:"low_vram,omitempty"`
|
||||
Reranking *bool `yaml:"reranking,omitempty" json:"reranking,omitempty"`
|
||||
Grammar string `yaml:"grammar,omitempty" json:"grammar,omitempty"`
|
||||
StopWords []string `yaml:"stopwords,omitempty" json:"stopwords,omitempty"`
|
||||
Cutstrings []string `yaml:"cutstrings,omitempty" json:"cutstrings,omitempty"`
|
||||
ExtractRegex []string `yaml:"extract_regex,omitempty" json:"extract_regex,omitempty"`
|
||||
TrimSpace []string `yaml:"trimspace,omitempty" json:"trimspace,omitempty"`
|
||||
TrimSuffix []string `yaml:"trimsuffix,omitempty" json:"trimsuffix,omitempty"`
|
||||
|
||||
ContextSize *int `yaml:"context_size,omitempty" json:"context_size,omitempty"`
|
||||
NUMA bool `yaml:"numa,omitempty" json:"numa,omitempty"`
|
||||
LoraAdapter string `yaml:"lora_adapter,omitempty" json:"lora_adapter,omitempty"`
|
||||
LoraBase string `yaml:"lora_base,omitempty" json:"lora_base,omitempty"`
|
||||
LoraAdapters []string `yaml:"lora_adapters,omitempty" json:"lora_adapters,omitempty"`
|
||||
LoraScales []float32 `yaml:"lora_scales,omitempty" json:"lora_scales,omitempty"`
|
||||
LoraScale float32 `yaml:"lora_scale,omitempty" json:"lora_scale,omitempty"`
|
||||
NoMulMatQ bool `yaml:"no_mulmatq,omitempty" json:"no_mulmatq,omitempty"`
|
||||
DraftModel string `yaml:"draft_model,omitempty" json:"draft_model,omitempty"`
|
||||
NDraft int32 `yaml:"n_draft,omitempty" json:"n_draft,omitempty"`
|
||||
Quantization string `yaml:"quantization,omitempty" json:"quantization,omitempty"`
|
||||
LoadFormat string `yaml:"load_format,omitempty" json:"load_format,omitempty"`
|
||||
GPUMemoryUtilization float32 `yaml:"gpu_memory_utilization,omitempty" json:"gpu_memory_utilization,omitempty"` // vLLM
|
||||
TrustRemoteCode bool `yaml:"trust_remote_code,omitempty" json:"trust_remote_code,omitempty"` // vLLM
|
||||
EnforceEager bool `yaml:"enforce_eager,omitempty" json:"enforce_eager,omitempty"` // vLLM
|
||||
SwapSpace int `yaml:"swap_space,omitempty" json:"swap_space,omitempty"` // vLLM
|
||||
MaxModelLen int `yaml:"max_model_len,omitempty" json:"max_model_len,omitempty"` // vLLM
|
||||
TensorParallelSize int `yaml:"tensor_parallel_size,omitempty" json:"tensor_parallel_size,omitempty"` // vLLM
|
||||
DisableLogStatus bool `yaml:"disable_log_stats,omitempty" json:"disable_log_stats,omitempty"` // vLLM
|
||||
DType string `yaml:"dtype,omitempty" json:"dtype,omitempty"` // vLLM
|
||||
LimitMMPerPrompt LimitMMPerPrompt `yaml:"limit_mm_per_prompt,omitempty" json:"limit_mm_per_prompt,omitempty"` // vLLM
|
||||
MMProj string `yaml:"mmproj,omitempty" json:"mmproj,omitempty"`
|
||||
|
||||
FlashAttention *string `yaml:"flash_attention,omitempty" json:"flash_attention,omitempty"`
|
||||
NoKVOffloading bool `yaml:"no_kv_offloading,omitempty" json:"no_kv_offloading,omitempty"`
|
||||
CacheTypeK string `yaml:"cache_type_k,omitempty" json:"cache_type_k,omitempty"`
|
||||
CacheTypeV string `yaml:"cache_type_v,omitempty" json:"cache_type_v,omitempty"`
|
||||
|
||||
RopeScaling string `yaml:"rope_scaling,omitempty" json:"rope_scaling,omitempty"`
|
||||
ModelType string `yaml:"type,omitempty" json:"type,omitempty"`
|
||||
|
||||
YarnExtFactor float32 `yaml:"yarn_ext_factor,omitempty" json:"yarn_ext_factor,omitempty"`
|
||||
YarnAttnFactor float32 `yaml:"yarn_attn_factor,omitempty" json:"yarn_attn_factor,omitempty"`
|
||||
YarnBetaFast float32 `yaml:"yarn_beta_fast,omitempty" json:"yarn_beta_fast,omitempty"`
|
||||
YarnBetaSlow float32 `yaml:"yarn_beta_slow,omitempty" json:"yarn_beta_slow,omitempty"`
|
||||
|
||||
CFGScale float32 `yaml:"cfg_scale,omitempty" json:"cfg_scale,omitempty"` // Classifier-Free Guidance Scale
|
||||
}
|
||||
|
||||
// @Description LimitMMPerPrompt is a struct that holds the configuration for the limit-mm-per-prompt config in vLLM
|
||||
type LimitMMPerPrompt struct {
|
||||
LimitImagePerPrompt int `yaml:"image,omitempty" json:"image,omitempty"`
|
||||
LimitVideoPerPrompt int `yaml:"video,omitempty" json:"video,omitempty"`
|
||||
LimitAudioPerPrompt int `yaml:"audio,omitempty" json:"audio,omitempty"`
|
||||
}
|
||||
|
||||
// @Description TemplateConfig is a struct that holds the configuration of the templating system
|
||||
type TemplateConfig struct {
|
||||
// Chat is the template used in the chat completion endpoint
|
||||
Chat string `yaml:"chat,omitempty" json:"chat,omitempty"`
|
||||
|
||||
// ChatMessage is the template used for chat messages
|
||||
ChatMessage string `yaml:"chat_message,omitempty" json:"chat_message,omitempty"`
|
||||
|
||||
// Completion is the template used for completion requests
|
||||
Completion string `yaml:"completion,omitempty" json:"completion,omitempty"`
|
||||
|
||||
// Edit is the template used for edit completion requests
|
||||
Edit string `yaml:"edit,omitempty" json:"edit,omitempty"`
|
||||
|
||||
// Functions is the template used when tools are present in the client requests
|
||||
Functions string `yaml:"function,omitempty" json:"function,omitempty"`
|
||||
|
||||
// UseTokenizerTemplate is a flag that indicates if the tokenizer template should be used.
|
||||
// Note: this is mostly consumed for backends such as vllm and transformers
|
||||
// that can use the tokenizers specified in the JSON config files of the models
|
||||
UseTokenizerTemplate bool `yaml:"use_tokenizer_template,omitempty" json:"use_tokenizer_template,omitempty"`
|
||||
|
||||
// JoinChatMessagesByCharacter is a string that will be used to join chat messages together.
|
||||
// It defaults to \n
|
||||
JoinChatMessagesByCharacter *string `yaml:"join_chat_messages_by_character,omitempty" json:"join_chat_messages_by_character,omitempty"`
|
||||
|
||||
Multimodal string `yaml:"multimodal,omitempty" json:"multimodal,omitempty"`
|
||||
|
||||
ReplyPrefix string `yaml:"reply_prefix,omitempty" json:"reply_prefix,omitempty"`
|
||||
}
|
||||
|
||||
func (c *ModelConfig) syncKnownUsecasesFromString() {
|
||||
c.KnownUsecases = GetUsecasesFromYAML(c.KnownUsecaseStrings)
|
||||
// Make sure the usecases are valid, we rewrite with what we identified
|
||||
c.KnownUsecaseStrings = []string{}
|
||||
for k, usecase := range GetAllModelConfigUsecases() {
|
||||
if c.HasUsecases(usecase) {
|
||||
c.KnownUsecaseStrings = append(c.KnownUsecaseStrings, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ModelConfig) UnmarshalYAML(value *yaml.Node) error {
|
||||
type BCAlias ModelConfig
|
||||
var aux BCAlias
|
||||
if err := value.Decode(&aux); err != nil {
|
||||
return err
|
||||
}
|
||||
*c = ModelConfig(aux)
|
||||
|
||||
c.syncKnownUsecasesFromString()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ModelConfig) SetFunctionCallString(s string) {
|
||||
c.functionCallString = s
|
||||
}
|
||||
|
||||
func (c *ModelConfig) SetFunctionCallNameString(s string) {
|
||||
c.functionCallNameString = s
|
||||
}
|
||||
|
||||
func (c *ModelConfig) ShouldUseFunctions() bool {
|
||||
return ((c.functionCallString != "none" || c.functionCallString == "") || c.ShouldCallSpecificFunction())
|
||||
}
|
||||
|
||||
func (c *ModelConfig) ShouldCallSpecificFunction() bool {
|
||||
return len(c.functionCallNameString) > 0
|
||||
}
|
||||
|
||||
// MMProjFileName returns the filename of the MMProj file
|
||||
// If the MMProj is a URL, it will return the MD5 of the URL which is the filename
|
||||
func (c *ModelConfig) MMProjFileName() string {
|
||||
uri := downloader.URI(c.MMProj)
|
||||
if uri.LooksLikeURL() {
|
||||
f, _ := uri.FilenameFromUrl()
|
||||
return f
|
||||
}
|
||||
|
||||
return c.MMProj
|
||||
}
|
||||
|
||||
func (c *ModelConfig) IsMMProjURL() bool {
|
||||
uri := downloader.URI(c.MMProj)
|
||||
return uri.LooksLikeURL()
|
||||
}
|
||||
|
||||
func (c *ModelConfig) IsModelURL() bool {
|
||||
uri := downloader.URI(c.Model)
|
||||
return uri.LooksLikeURL()
|
||||
}
|
||||
|
||||
// ModelFileName returns the filename of the model
|
||||
// If the model is a URL, it will return the MD5 of the URL which is the filename
|
||||
func (c *ModelConfig) ModelFileName() string {
|
||||
uri := downloader.URI(c.Model)
|
||||
if uri.LooksLikeURL() {
|
||||
f, _ := uri.FilenameFromUrl()
|
||||
return f
|
||||
}
|
||||
|
||||
return c.Model
|
||||
}
|
||||
|
||||
func (c *ModelConfig) FunctionToCall() string {
|
||||
if c.functionCallNameString != "" &&
|
||||
c.functionCallNameString != "none" && c.functionCallNameString != "auto" {
|
||||
return c.functionCallNameString
|
||||
}
|
||||
|
||||
return c.functionCallString
|
||||
}
|
||||
|
||||
func (cfg *ModelConfig) SetDefaults(opts ...ConfigLoaderOption) {
|
||||
lo := &LoadOptions{}
|
||||
lo.Apply(opts...)
|
||||
|
||||
ctx := lo.ctxSize
|
||||
threads := lo.threads
|
||||
f16 := lo.f16
|
||||
debug := lo.debug
|
||||
// https://github.com/ggerganov/llama.cpp/blob/75cd4c77292034ecec587ecb401366f57338f7c0/common/sampling.h#L22
|
||||
defaultTopP := 0.95
|
||||
defaultTopK := 40
|
||||
defaultTemp := 0.9
|
||||
// https://github.com/mudler/LocalAI/issues/2780
|
||||
defaultMirostat := 0
|
||||
defaultMirostatTAU := 5.0
|
||||
defaultMirostatETA := 0.1
|
||||
defaultTypicalP := 1.0
|
||||
defaultTFZ := 1.0
|
||||
defaultZero := 0
|
||||
|
||||
trueV := true
|
||||
falseV := false
|
||||
|
||||
if cfg.Seed == nil {
|
||||
// random number generator seed
|
||||
defaultSeed := RAND_SEED
|
||||
cfg.Seed = &defaultSeed
|
||||
}
|
||||
|
||||
if cfg.TopK == nil {
|
||||
cfg.TopK = &defaultTopK
|
||||
}
|
||||
|
||||
if cfg.TypicalP == nil {
|
||||
cfg.TypicalP = &defaultTypicalP
|
||||
}
|
||||
|
||||
if cfg.TFZ == nil {
|
||||
cfg.TFZ = &defaultTFZ
|
||||
}
|
||||
|
||||
if cfg.MMap == nil {
|
||||
// MMap is enabled by default
|
||||
|
||||
// Only exception is for Intel GPUs
|
||||
if os.Getenv("XPU") != "" {
|
||||
cfg.MMap = &falseV
|
||||
} else {
|
||||
cfg.MMap = &trueV
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.MMlock == nil {
|
||||
// MMlock is disabled by default
|
||||
cfg.MMlock = &falseV
|
||||
}
|
||||
|
||||
if cfg.TopP == nil {
|
||||
cfg.TopP = &defaultTopP
|
||||
}
|
||||
if cfg.Temperature == nil {
|
||||
cfg.Temperature = &defaultTemp
|
||||
}
|
||||
|
||||
if cfg.Maxtokens == nil {
|
||||
cfg.Maxtokens = &defaultZero
|
||||
}
|
||||
|
||||
if cfg.Mirostat == nil {
|
||||
cfg.Mirostat = &defaultMirostat
|
||||
}
|
||||
|
||||
if cfg.MirostatETA == nil {
|
||||
cfg.MirostatETA = &defaultMirostatETA
|
||||
}
|
||||
|
||||
if cfg.MirostatTAU == nil {
|
||||
cfg.MirostatTAU = &defaultMirostatTAU
|
||||
}
|
||||
|
||||
if cfg.LowVRAM == nil {
|
||||
cfg.LowVRAM = &falseV
|
||||
}
|
||||
|
||||
if cfg.Embeddings == nil {
|
||||
cfg.Embeddings = &falseV
|
||||
}
|
||||
|
||||
if cfg.Reranking == nil {
|
||||
cfg.Reranking = &falseV
|
||||
}
|
||||
|
||||
if threads == 0 {
|
||||
// Threads can't be 0
|
||||
threads = 4
|
||||
}
|
||||
|
||||
if cfg.Threads == nil {
|
||||
cfg.Threads = &threads
|
||||
}
|
||||
|
||||
if cfg.F16 == nil {
|
||||
cfg.F16 = &f16
|
||||
}
|
||||
|
||||
if cfg.Debug == nil {
|
||||
cfg.Debug = &falseV
|
||||
}
|
||||
|
||||
if debug {
|
||||
cfg.Debug = &trueV
|
||||
}
|
||||
|
||||
guessDefaultsFromFile(cfg, lo.modelPath, ctx)
|
||||
cfg.syncKnownUsecasesFromString()
|
||||
}
|
||||
|
||||
func (c *ModelConfig) Validate() (bool, error) {
|
||||
downloadedFileNames := []string{}
|
||||
for _, f := range c.DownloadFiles {
|
||||
downloadedFileNames = append(downloadedFileNames, f.Filename)
|
||||
}
|
||||
validationTargets := []string{c.Backend, c.Model, c.MMProj}
|
||||
validationTargets = append(validationTargets, downloadedFileNames...)
|
||||
// Simple validation to make sure the model can be correctly loaded
|
||||
for _, n := range validationTargets {
|
||||
if n == "" {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(n, string(os.PathSeparator)) ||
|
||||
strings.Contains(n, "..") {
|
||||
return false, fmt.Errorf("invalid file path: %s", n)
|
||||
}
|
||||
}
|
||||
|
||||
if c.Backend != "" {
|
||||
// a regex that checks that is a string name with no special characters, except '-' and '_'
|
||||
re := regexp.MustCompile(`^[a-zA-Z0-9-_]+$`)
|
||||
if !re.MatchString(c.Backend) {
|
||||
return false, fmt.Errorf("invalid backend name: %s", c.Backend)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (c *ModelConfig) HasTemplate() bool {
|
||||
return c.TemplateConfig.Completion != "" || c.TemplateConfig.Edit != "" || c.TemplateConfig.Chat != "" || c.TemplateConfig.ChatMessage != "" || c.TemplateConfig.UseTokenizerTemplate
|
||||
}
|
||||
|
||||
func (c *ModelConfig) GetModelConfigFile() string {
|
||||
return c.modelConfigFile
|
||||
}
|
||||
|
||||
type ModelConfigUsecases int
|
||||
|
||||
const (
|
||||
FLAG_ANY ModelConfigUsecases = 0b000000000000
|
||||
FLAG_CHAT ModelConfigUsecases = 0b000000000001
|
||||
FLAG_COMPLETION ModelConfigUsecases = 0b000000000010
|
||||
FLAG_EDIT ModelConfigUsecases = 0b000000000100
|
||||
FLAG_EMBEDDINGS ModelConfigUsecases = 0b000000001000
|
||||
FLAG_RERANK ModelConfigUsecases = 0b000000010000
|
||||
FLAG_IMAGE ModelConfigUsecases = 0b000000100000
|
||||
FLAG_TRANSCRIPT ModelConfigUsecases = 0b000001000000
|
||||
FLAG_TTS ModelConfigUsecases = 0b000010000000
|
||||
FLAG_SOUND_GENERATION ModelConfigUsecases = 0b000100000000
|
||||
FLAG_TOKENIZE ModelConfigUsecases = 0b001000000000
|
||||
FLAG_VAD ModelConfigUsecases = 0b010000000000
|
||||
FLAG_VIDEO ModelConfigUsecases = 0b100000000000
|
||||
FLAG_DETECTION ModelConfigUsecases = 0b1000000000000
|
||||
|
||||
// Common Subsets
|
||||
FLAG_LLM ModelConfigUsecases = FLAG_CHAT | FLAG_COMPLETION | FLAG_EDIT
|
||||
)
|
||||
|
||||
func GetAllModelConfigUsecases() map[string]ModelConfigUsecases {
|
||||
return map[string]ModelConfigUsecases{
|
||||
// Note: FLAG_ANY is intentionally excluded from this map
|
||||
// because it's 0 and would always match in HasUsecases checks
|
||||
"FLAG_CHAT": FLAG_CHAT,
|
||||
"FLAG_COMPLETION": FLAG_COMPLETION,
|
||||
"FLAG_EDIT": FLAG_EDIT,
|
||||
"FLAG_EMBEDDINGS": FLAG_EMBEDDINGS,
|
||||
"FLAG_RERANK": FLAG_RERANK,
|
||||
"FLAG_IMAGE": FLAG_IMAGE,
|
||||
"FLAG_TRANSCRIPT": FLAG_TRANSCRIPT,
|
||||
"FLAG_TTS": FLAG_TTS,
|
||||
"FLAG_SOUND_GENERATION": FLAG_SOUND_GENERATION,
|
||||
"FLAG_TOKENIZE": FLAG_TOKENIZE,
|
||||
"FLAG_VAD": FLAG_VAD,
|
||||
"FLAG_LLM": FLAG_LLM,
|
||||
"FLAG_VIDEO": FLAG_VIDEO,
|
||||
"FLAG_DETECTION": FLAG_DETECTION,
|
||||
}
|
||||
}
|
||||
|
||||
func stringToFlag(s string) string {
|
||||
return "FLAG_" + strings.ToUpper(s)
|
||||
}
|
||||
|
||||
func GetUsecasesFromYAML(input []string) *ModelConfigUsecases {
|
||||
if len(input) == 0 {
|
||||
return nil
|
||||
}
|
||||
result := FLAG_ANY
|
||||
flags := GetAllModelConfigUsecases()
|
||||
for _, str := range input {
|
||||
flag, exists := flags[stringToFlag(str)]
|
||||
if exists {
|
||||
result |= flag
|
||||
}
|
||||
}
|
||||
return &result
|
||||
}
|
||||
|
||||
// HasUsecases examines a ModelConfig and determines which endpoints have a chance of success.
|
||||
func (c *ModelConfig) HasUsecases(u ModelConfigUsecases) bool {
|
||||
if (c.KnownUsecases != nil) && ((u & *c.KnownUsecases) == u) {
|
||||
return true
|
||||
}
|
||||
return c.GuessUsecases(u)
|
||||
}
|
||||
|
||||
// GuessUsecases is a **heuristic based** function, as the backend in question may not be loaded yet, and the config may not record what it's useful at.
|
||||
// In its current state, this function should ideally check for properties of the config like templates, rather than the direct backend name checks for the lower half.
|
||||
// This avoids the maintenance burden of updating this list for each new backend - but unfortunately, that's the best option for some services currently.
|
||||
func (c *ModelConfig) GuessUsecases(u ModelConfigUsecases) bool {
|
||||
if (u & FLAG_CHAT) == FLAG_CHAT {
|
||||
if c.TemplateConfig.Chat == "" && c.TemplateConfig.ChatMessage == "" && !c.TemplateConfig.UseTokenizerTemplate {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if (u & FLAG_COMPLETION) == FLAG_COMPLETION {
|
||||
if c.TemplateConfig.Completion == "" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if (u & FLAG_EDIT) == FLAG_EDIT {
|
||||
if c.TemplateConfig.Edit == "" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if (u & FLAG_EMBEDDINGS) == FLAG_EMBEDDINGS {
|
||||
if c.Embeddings == nil || !*c.Embeddings {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if (u & FLAG_IMAGE) == FLAG_IMAGE {
|
||||
imageBackends := []string{"diffusers", "stablediffusion", "stablediffusion-ggml"}
|
||||
if !slices.Contains(imageBackends, c.Backend) {
|
||||
return false
|
||||
}
|
||||
|
||||
if c.Backend == "diffusers" && c.Diffusers.PipelineType == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
}
|
||||
if (u & FLAG_VIDEO) == FLAG_VIDEO {
|
||||
videoBackends := []string{"diffusers", "stablediffusion"}
|
||||
if !slices.Contains(videoBackends, c.Backend) {
|
||||
return false
|
||||
}
|
||||
|
||||
if c.Backend == "diffusers" && c.Diffusers.PipelineType == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
}
|
||||
if (u & FLAG_RERANK) == FLAG_RERANK {
|
||||
if c.Backend != "rerankers" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if (u & FLAG_TRANSCRIPT) == FLAG_TRANSCRIPT {
|
||||
if c.Backend != "whisper" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if (u & FLAG_TTS) == FLAG_TTS {
|
||||
ttsBackends := []string{"bark-cpp", "piper", "transformers-musicgen", "kokoro"}
|
||||
if !slices.Contains(ttsBackends, c.Backend) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if (u & FLAG_DETECTION) == FLAG_DETECTION {
|
||||
if c.Backend != "rfdetr" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if (u & FLAG_SOUND_GENERATION) == FLAG_SOUND_GENERATION {
|
||||
if c.Backend != "transformers-musicgen" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if (u & FLAG_TOKENIZE) == FLAG_TOKENIZE {
|
||||
tokenizeCapableBackends := []string{"llama.cpp", "rwkv"}
|
||||
if !slices.Contains(tokenizeCapableBackends, c.Backend) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if (u & FLAG_VAD) == FLAG_VAD {
|
||||
if c.Backend != "silero-vad" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// BuildCogitoOptions generates cogito options from the model configuration
|
||||
// It accepts a context, MCP sessions, and optional callback functions for status, reasoning, tool calls, and tool results
|
||||
func (c *ModelConfig) BuildCogitoOptions() []cogito.Option {
|
||||
cogitoOpts := []cogito.Option{
|
||||
cogito.WithIterations(3), // default to 3 iterations
|
||||
cogito.WithMaxAttempts(3), // default to 3 attempts
|
||||
cogito.WithForceReasoning(),
|
||||
}
|
||||
|
||||
// Apply agent configuration options
|
||||
if c.Agent.EnableReasoning {
|
||||
cogitoOpts = append(cogitoOpts, cogito.EnableToolReasoner)
|
||||
}
|
||||
|
||||
if c.Agent.EnablePlanning {
|
||||
cogitoOpts = append(cogitoOpts, cogito.EnableAutoPlan)
|
||||
}
|
||||
|
||||
if c.Agent.EnableMCPPrompts {
|
||||
cogitoOpts = append(cogitoOpts, cogito.EnableMCPPrompts)
|
||||
}
|
||||
|
||||
if c.Agent.EnablePlanReEvaluator {
|
||||
cogitoOpts = append(cogitoOpts, cogito.EnableAutoPlanReEvaluator)
|
||||
}
|
||||
|
||||
if c.Agent.MaxIterations != 0 {
|
||||
cogitoOpts = append(cogitoOpts, cogito.WithIterations(c.Agent.MaxIterations))
|
||||
}
|
||||
|
||||
if c.Agent.MaxAttempts != 0 {
|
||||
cogitoOpts = append(cogitoOpts, cogito.WithMaxAttempts(c.Agent.MaxAttempts))
|
||||
}
|
||||
|
||||
return cogitoOpts
|
||||
}
|
||||
@@ -88,6 +88,7 @@ func readMultipleModelConfigsFromFile(file string, opts ...ConfigLoaderOption) (
|
||||
}
|
||||
|
||||
for _, cc := range *c {
|
||||
cc.modelConfigFile = file
|
||||
cc.SetDefaults(opts...)
|
||||
}
|
||||
|
||||
@@ -108,6 +109,8 @@ func readModelConfigFromFile(file string, opts ...ConfigLoaderOption) (*ModelCon
|
||||
}
|
||||
|
||||
c.SetDefaults(opts...)
|
||||
|
||||
c.modelConfigFile = file
|
||||
return c, nil
|
||||
}
|
||||
|
||||
@@ -166,7 +169,7 @@ func (bcl *ModelConfigLoader) LoadMultipleModelConfigsSingleFile(file string, op
|
||||
}
|
||||
|
||||
for _, cc := range c {
|
||||
if cc.Validate() {
|
||||
if valid, _ := cc.Validate(); valid {
|
||||
bcl.configs[cc.Name] = *cc
|
||||
}
|
||||
}
|
||||
@@ -181,7 +184,7 @@ func (bcl *ModelConfigLoader) ReadModelConfig(file string, opts ...ConfigLoaderO
|
||||
return fmt.Errorf("ReadModelConfig cannot read config file %q: %w", file, err)
|
||||
}
|
||||
|
||||
if c.Validate() {
|
||||
if valid, _ := c.Validate(); valid {
|
||||
bcl.configs[c.Name] = *c
|
||||
} else {
|
||||
return fmt.Errorf("config is not valid")
|
||||
@@ -359,7 +362,7 @@ func (bcl *ModelConfigLoader) LoadModelConfigsFromPath(path string, opts ...Conf
|
||||
log.Error().Err(err).Str("File Name", file.Name()).Msgf("LoadModelConfigsFromPath cannot read config file")
|
||||
continue
|
||||
}
|
||||
if c.Validate() {
|
||||
if valid, _ := c.Validate(); valid {
|
||||
bcl.configs[c.Name] = *c
|
||||
} else {
|
||||
log.Error().Err(err).Str("Name", c.Name).Msgf("config is not valid")
|
||||
@@ -28,7 +28,9 @@ known_usecases:
|
||||
config, err := readModelConfigFromFile(tmp.Name())
|
||||
Expect(err).To(BeNil())
|
||||
Expect(config).ToNot(BeNil())
|
||||
Expect(config.Validate()).To(BeFalse())
|
||||
valid, err := config.Validate()
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(valid).To(BeFalse())
|
||||
Expect(config.KnownUsecases).ToNot(BeNil())
|
||||
})
|
||||
It("Test Validate", func() {
|
||||
@@ -46,7 +48,9 @@ parameters:
|
||||
Expect(config).ToNot(BeNil())
|
||||
// two configs in config.yaml
|
||||
Expect(config.Name).To(Equal("bar-baz"))
|
||||
Expect(config.Validate()).To(BeTrue())
|
||||
valid, err := config.Validate()
|
||||
Expect(err).To(BeNil())
|
||||
Expect(valid).To(BeTrue())
|
||||
|
||||
// download https://raw.githubusercontent.com/mudler/LocalAI/v2.25.0/embedded/models/hermes-2-pro-mistral.yaml
|
||||
httpClient := http.Client{}
|
||||
@@ -63,7 +67,9 @@ parameters:
|
||||
Expect(config).ToNot(BeNil())
|
||||
// two configs in config.yaml
|
||||
Expect(config.Name).To(Equal("hermes-2-pro-mistral"))
|
||||
Expect(config.Validate()).To(BeTrue())
|
||||
valid, err = config.Validate()
|
||||
Expect(err).To(BeNil())
|
||||
Expect(valid).To(BeTrue())
|
||||
})
|
||||
})
|
||||
It("Properly handles backend usecase matching", func() {
|
||||
@@ -160,4 +166,76 @@ parameters:
|
||||
Expect(i.HasUsecases(FLAG_COMPLETION)).To(BeTrue())
|
||||
Expect(i.HasUsecases(FLAG_CHAT)).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Handles multiple configs with same model file but different names", func() {
|
||||
// Create a temporary directory for test configs
|
||||
tmpDir, err := os.MkdirTemp("", "config_test_*")
|
||||
Expect(err).To(BeNil())
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// Write first config without MCP
|
||||
config1Path := tmpDir + "/model-without-mcp.yaml"
|
||||
err = os.WriteFile(config1Path, []byte(`name: model-without-mcp
|
||||
backend: llama-cpp
|
||||
parameters:
|
||||
model: shared-model.gguf
|
||||
`), 0644)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// Write second config with MCP
|
||||
config2Path := tmpDir + "/model-with-mcp.yaml"
|
||||
err = os.WriteFile(config2Path, []byte(`name: model-with-mcp
|
||||
backend: llama-cpp
|
||||
parameters:
|
||||
model: shared-model.gguf
|
||||
mcp:
|
||||
stdio: |
|
||||
mcpServers:
|
||||
test:
|
||||
command: echo
|
||||
args: ["hello"]
|
||||
`), 0644)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// Load all configs
|
||||
loader := NewModelConfigLoader(tmpDir)
|
||||
err = loader.LoadModelConfigsFromPath(tmpDir)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// Verify both configs are loaded
|
||||
cfg1, exists1 := loader.GetModelConfig("model-without-mcp")
|
||||
Expect(exists1).To(BeTrue())
|
||||
Expect(cfg1.Name).To(Equal("model-without-mcp"))
|
||||
Expect(cfg1.Model).To(Equal("shared-model.gguf"))
|
||||
Expect(cfg1.MCP.Stdio).To(Equal(""))
|
||||
Expect(cfg1.MCP.Servers).To(Equal(""))
|
||||
|
||||
cfg2, exists2 := loader.GetModelConfig("model-with-mcp")
|
||||
Expect(exists2).To(BeTrue())
|
||||
Expect(cfg2.Name).To(Equal("model-with-mcp"))
|
||||
Expect(cfg2.Model).To(Equal("shared-model.gguf"))
|
||||
Expect(cfg2.MCP.Stdio).ToNot(Equal(""))
|
||||
|
||||
// Verify both configs are in the list
|
||||
allConfigs := loader.GetAllModelsConfigs()
|
||||
Expect(len(allConfigs)).To(Equal(2))
|
||||
|
||||
// Find each config in the list
|
||||
foundWithoutMCP := false
|
||||
foundWithMCP := false
|
||||
for _, cfg := range allConfigs {
|
||||
if cfg.Name == "model-without-mcp" {
|
||||
foundWithoutMCP = true
|
||||
Expect(cfg.Model).To(Equal("shared-model.gguf"))
|
||||
Expect(cfg.MCP.Stdio).To(Equal(""))
|
||||
}
|
||||
if cfg.Name == "model-with-mcp" {
|
||||
foundWithMCP = true
|
||||
Expect(cfg.Model).To(Equal("shared-model.gguf"))
|
||||
Expect(cfg.MCP.Stdio).ToNot(Equal(""))
|
||||
}
|
||||
}
|
||||
Expect(foundWithoutMCP).To(BeTrue())
|
||||
Expect(foundWithMCP).To(BeTrue())
|
||||
})
|
||||
})
|
||||
@@ -1,6 +1,8 @@
|
||||
package gallery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/mudler/LocalAI/core/config"
|
||||
"github.com/mudler/LocalAI/pkg/system"
|
||||
"github.com/rs/zerolog/log"
|
||||
@@ -72,3 +74,7 @@ func (m *GalleryBackend) GetDescription() string {
|
||||
func (m *GalleryBackend) GetTags() []string {
|
||||
return m.Tags
|
||||
}
|
||||
|
||||
func (m GalleryBackend) ID() string {
|
||||
return fmt.Sprintf("%s@%s", m.Gallery.Name, m.Name)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,9 @@
|
||||
package gallery
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -68,7 +70,7 @@ func writeBackendMetadata(backendPath string, metadata *BackendMetadata) error {
|
||||
}
|
||||
|
||||
// InstallBackendFromGallery installs a backend from the gallery.
|
||||
func InstallBackendFromGallery(galleries []config.Gallery, systemState *system.SystemState, modelLoader *model.ModelLoader, name string, downloadStatus func(string, string, string, float64), force bool) error {
|
||||
func InstallBackendFromGallery(ctx context.Context, galleries []config.Gallery, systemState *system.SystemState, modelLoader *model.ModelLoader, name string, downloadStatus func(string, string, string, float64), force bool) error {
|
||||
if !force {
|
||||
// check if we already have the backend installed
|
||||
backends, err := ListSystemBackends(systemState)
|
||||
@@ -108,7 +110,7 @@ func InstallBackendFromGallery(galleries []config.Gallery, systemState *system.S
|
||||
log.Debug().Str("name", name).Str("bestBackend", bestBackend.Name).Msg("Installing backend from meta backend")
|
||||
|
||||
// Then, let's install the best backend
|
||||
if err := InstallBackend(systemState, modelLoader, bestBackend, downloadStatus); err != nil {
|
||||
if err := InstallBackend(ctx, systemState, modelLoader, bestBackend, downloadStatus); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -133,10 +135,10 @@ func InstallBackendFromGallery(galleries []config.Gallery, systemState *system.S
|
||||
return nil
|
||||
}
|
||||
|
||||
return InstallBackend(systemState, modelLoader, backend, downloadStatus)
|
||||
return InstallBackend(ctx, systemState, modelLoader, backend, downloadStatus)
|
||||
}
|
||||
|
||||
func InstallBackend(systemState *system.SystemState, modelLoader *model.ModelLoader, config *GalleryBackend, downloadStatus func(string, string, string, float64)) error {
|
||||
func InstallBackend(ctx context.Context, systemState *system.SystemState, modelLoader *model.ModelLoader, config *GalleryBackend, downloadStatus func(string, string, string, float64)) error {
|
||||
// Create base path if it doesn't exist
|
||||
err := os.MkdirAll(systemState.Backend.BackendsPath, 0750)
|
||||
if err != nil {
|
||||
@@ -162,23 +164,40 @@ func InstallBackend(systemState *system.SystemState, modelLoader *model.ModelLoa
|
||||
return fmt.Errorf("failed copying: %w", err)
|
||||
}
|
||||
} else {
|
||||
uri := downloader.URI(config.URI)
|
||||
if err := uri.DownloadFile(backendPath, "", 1, 1, downloadStatus); err != nil {
|
||||
log.Debug().Str("uri", config.URI).Str("backendPath", backendPath).Msg("Downloading backend")
|
||||
if err := uri.DownloadFileWithContext(ctx, backendPath, "", 1, 1, downloadStatus); err != nil {
|
||||
success := false
|
||||
// Try to download from mirrors
|
||||
for _, mirror := range config.Mirrors {
|
||||
if err := downloader.URI(mirror).DownloadFile(backendPath, "", 1, 1, downloadStatus); err == nil {
|
||||
// Check for cancellation before trying next mirror
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
if err := downloader.URI(mirror).DownloadFileWithContext(ctx, backendPath, "", 1, 1, downloadStatus); err == nil {
|
||||
success = true
|
||||
log.Debug().Str("uri", config.URI).Str("backendPath", backendPath).Msg("Downloaded backend")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !success {
|
||||
log.Error().Str("uri", config.URI).Str("backendPath", backendPath).Err(err).Msg("Failed to download backend")
|
||||
return fmt.Errorf("failed to download backend %q: %v", config.URI, err)
|
||||
}
|
||||
} else {
|
||||
log.Debug().Str("uri", config.URI).Str("backendPath", backendPath).Msg("Downloaded backend")
|
||||
}
|
||||
}
|
||||
|
||||
// sanity check - check if runfile is present
|
||||
runFile := filepath.Join(backendPath, runFile)
|
||||
if _, err := os.Stat(runFile); os.IsNotExist(err) {
|
||||
log.Error().Str("runFile", runFile).Msg("Run file not found")
|
||||
return fmt.Errorf("not a valid backend: run file not found %q", runFile)
|
||||
}
|
||||
|
||||
// Create metadata for the backend
|
||||
metadata := &BackendMetadata{
|
||||
Name: name,
|
||||
@@ -310,8 +329,10 @@ func ListSystemBackends(systemState *system.SystemState) (SystemBackends, error)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
} else if !errors.Is(err, os.ErrNotExist) {
|
||||
log.Warn().Err(err).Msg("Failed to read system backends, proceeding with user-managed backends")
|
||||
} else if errors.Is(err, os.ErrNotExist) {
|
||||
log.Debug().Msg("No system backends found")
|
||||
}
|
||||
|
||||
// User-managed backends and alias collection
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package gallery
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -55,7 +56,7 @@ var _ = Describe("Runtime capability-based backend selection", func() {
|
||||
)
|
||||
must(err)
|
||||
sysDefault.GPUVendor = "" // force default selection
|
||||
backs, err := ListSystemBackends(sysDefault)
|
||||
backs, err := ListSystemBackends(sysDefault)
|
||||
must(err)
|
||||
aliasBack, ok := backs.Get("llama-cpp")
|
||||
Expect(ok).To(BeTrue())
|
||||
@@ -77,7 +78,7 @@ var _ = Describe("Runtime capability-based backend selection", func() {
|
||||
must(err)
|
||||
sysNvidia.GPUVendor = "nvidia"
|
||||
sysNvidia.VRAM = 8 * 1024 * 1024 * 1024
|
||||
backs, err = ListSystemBackends(sysNvidia)
|
||||
backs, err = ListSystemBackends(sysNvidia)
|
||||
must(err)
|
||||
aliasBack, ok = backs.Get("llama-cpp")
|
||||
Expect(ok).To(BeTrue())
|
||||
@@ -116,13 +117,13 @@ var _ = Describe("Gallery Backends", func() {
|
||||
|
||||
Describe("InstallBackendFromGallery", func() {
|
||||
It("should return error when backend is not found", func() {
|
||||
err := InstallBackendFromGallery(galleries, systemState, ml, "non-existent", nil, true)
|
||||
err := InstallBackendFromGallery(context.TODO(), galleries, systemState, ml, "non-existent", nil, true)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("no backend found with name \"non-existent\""))
|
||||
})
|
||||
|
||||
It("should install backend from gallery", func() {
|
||||
err := InstallBackendFromGallery(galleries, systemState, ml, "test-backend", nil, true)
|
||||
err := InstallBackendFromGallery(context.TODO(), galleries, systemState, ml, "test-backend", nil, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(filepath.Join(tempDir, "test-backend", "run.sh")).To(BeARegularFile())
|
||||
})
|
||||
@@ -298,7 +299,7 @@ var _ = Describe("Gallery Backends", func() {
|
||||
VRAM: 1000000000000,
|
||||
Backend: system.Backend{BackendsPath: tempDir},
|
||||
}
|
||||
err = InstallBackendFromGallery([]config.Gallery{gallery}, nvidiaSystemState, ml, "meta-backend", nil, true)
|
||||
err = InstallBackendFromGallery(context.TODO(), []config.Gallery{gallery}, nvidiaSystemState, ml, "meta-backend", nil, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
metaBackendPath := filepath.Join(tempDir, "meta-backend")
|
||||
@@ -378,7 +379,7 @@ var _ = Describe("Gallery Backends", func() {
|
||||
VRAM: 1000000000000,
|
||||
Backend: system.Backend{BackendsPath: tempDir},
|
||||
}
|
||||
err = InstallBackendFromGallery([]config.Gallery{gallery}, nvidiaSystemState, ml, "meta-backend", nil, true)
|
||||
err = InstallBackendFromGallery(context.TODO(), []config.Gallery{gallery}, nvidiaSystemState, ml, "meta-backend", nil, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
metaBackendPath := filepath.Join(tempDir, "meta-backend")
|
||||
@@ -462,7 +463,7 @@ var _ = Describe("Gallery Backends", func() {
|
||||
VRAM: 1000000000000,
|
||||
Backend: system.Backend{BackendsPath: tempDir},
|
||||
}
|
||||
err = InstallBackendFromGallery([]config.Gallery{gallery}, nvidiaSystemState, ml, "meta-backend", nil, true)
|
||||
err = InstallBackendFromGallery(context.TODO(), []config.Gallery{gallery}, nvidiaSystemState, ml, "meta-backend", nil, true)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
metaBackendPath := filepath.Join(tempDir, "meta-backend")
|
||||
@@ -561,9 +562,9 @@ var _ = Describe("Gallery Backends", func() {
|
||||
system.WithBackendPath(newPath),
|
||||
)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = InstallBackend(systemState, ml, &backend, nil)
|
||||
Expect(err).To(HaveOccurred()) // Will fail due to invalid URI, but path should be created
|
||||
err = InstallBackend(context.TODO(), systemState, ml, &backend, nil)
|
||||
Expect(newPath).To(BeADirectory())
|
||||
Expect(err).To(HaveOccurred()) // Will fail due to invalid URI, but path should be created
|
||||
})
|
||||
|
||||
It("should overwrite existing backend", func() {
|
||||
@@ -593,7 +594,7 @@ var _ = Describe("Gallery Backends", func() {
|
||||
system.WithBackendPath(tempDir),
|
||||
)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = InstallBackend(systemState, ml, &backend, nil)
|
||||
err = InstallBackend(context.TODO(), systemState, ml, &backend, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(filepath.Join(tempDir, "test-backend", "metadata.json")).To(BeARegularFile())
|
||||
dat, err := os.ReadFile(filepath.Join(tempDir, "test-backend", "metadata.json"))
|
||||
@@ -626,7 +627,7 @@ var _ = Describe("Gallery Backends", func() {
|
||||
|
||||
Expect(filepath.Join(tempDir, "test-backend", "metadata.json")).ToNot(BeARegularFile())
|
||||
|
||||
err = InstallBackend(systemState, ml, &backend, nil)
|
||||
err = InstallBackend(context.TODO(), systemState, ml, &backend, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(filepath.Join(tempDir, "test-backend", "metadata.json")).To(BeARegularFile())
|
||||
})
|
||||
@@ -647,7 +648,7 @@ var _ = Describe("Gallery Backends", func() {
|
||||
system.WithBackendPath(tempDir),
|
||||
)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = InstallBackend(systemState, ml, &backend, nil)
|
||||
err = InstallBackend(context.TODO(), systemState, ml, &backend, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(filepath.Join(tempDir, "test-backend", "metadata.json")).To(BeARegularFile())
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user