Compare commits

..

1 Commits

Author SHA1 Message Date
Ettore Di Giacinto
3f52776a1c WIP 2025-07-23 21:18:47 +02:00
409 changed files with 10168 additions and 27668 deletions

View File

@@ -6,10 +6,6 @@ models
backends backends
examples/chatbot-ui/models examples/chatbot-ui/models
backend/go/image/stablediffusion-ggml/build/ backend/go/image/stablediffusion-ggml/build/
backend/go/*/build
backend/go/*/.cache
backend/go/*/sources
backend/go/*/package
examples/rwkv/models examples/rwkv/models
examples/**/models examples/**/models
Dockerfile* Dockerfile*

View File

@@ -1,288 +0,0 @@
package main
import (
"context"
"fmt"
"os"
"slices"
"strings"
"github.com/go-skynet/LocalAI/.github/gallery-agent/hfapi"
"github.com/mudler/cogito"
"github.com/mudler/cogito/structures"
"github.com/sashabaranov/go-openai/jsonschema"
)
var (
openAIModel = os.Getenv("OPENAI_MODEL")
openAIKey = os.Getenv("OPENAI_KEY")
openAIBaseURL = os.Getenv("OPENAI_BASE_URL")
galleryIndexPath = os.Getenv("GALLERY_INDEX_PATH")
//defaultclient
llm = cogito.NewOpenAILLM(openAIModel, openAIKey, openAIBaseURL)
)
// cleanTextContent removes trailing spaces, tabs, and normalizes line endings
// to prevent YAML linting issues like trailing spaces and multiple empty lines
func cleanTextContent(text string) string {
lines := strings.Split(text, "\n")
var cleanedLines []string
var prevEmpty bool
for _, line := range lines {
// Remove all trailing whitespace (spaces, tabs, etc.)
trimmed := strings.TrimRight(line, " \t\r")
// Avoid multiple consecutive empty lines
if trimmed == "" {
if !prevEmpty {
cleanedLines = append(cleanedLines, "")
}
prevEmpty = true
} else {
cleanedLines = append(cleanedLines, trimmed)
prevEmpty = false
}
}
// Remove trailing empty lines from the result
result := strings.Join(cleanedLines, "\n")
return strings.TrimRight(result, "\n")
}
// isModelExisting checks if a specific model ID exists in the gallery using text search
func isModelExisting(modelID string) (bool, error) {
indexPath := getGalleryIndexPath()
content, err := os.ReadFile(indexPath)
if err != nil {
return false, fmt.Errorf("failed to read %s: %w", indexPath, err)
}
contentStr := string(content)
// Simple text search - if the model ID appears anywhere in the file, it exists
return strings.Contains(contentStr, modelID), nil
}
// filterExistingModels removes models that already exist in the gallery
func filterExistingModels(models []ProcessedModel) ([]ProcessedModel, error) {
var filteredModels []ProcessedModel
for _, model := range models {
exists, err := isModelExisting(model.ModelID)
if err != nil {
fmt.Printf("Error checking if model %s exists: %v, skipping\n", model.ModelID, err)
continue
}
if !exists {
filteredModels = append(filteredModels, model)
} else {
fmt.Printf("Skipping existing model: %s\n", model.ModelID)
}
}
fmt.Printf("Filtered out %d existing models, %d new models remaining\n",
len(models)-len(filteredModels), len(filteredModels))
return filteredModels, nil
}
// getGalleryIndexPath returns the gallery index file path, with a default fallback
func getGalleryIndexPath() string {
if galleryIndexPath != "" {
return galleryIndexPath
}
return "gallery/index.yaml"
}
func getRealReadme(ctx context.Context, repository string) (string, error) {
// Create a conversation fragment
fragment := cogito.NewEmptyFragment().
AddMessage("user",
`Your task is to get a clear description of a large language model from huggingface by using the provided tool. I will share with you a repository that might be quantized, and as such probably not by the original model author. We need to get the real description of the model, and not the one that might be quantized. You will have to call the tool to get the readme more than once by figuring out from the quantized readme which is the base model readme. This is the repository: `+repository)
// Execute with tools
result, err := cogito.ExecuteTools(llm, fragment,
cogito.WithIterations(3),
cogito.WithMaxAttempts(3),
cogito.WithTools(&HFReadmeTool{client: hfapi.NewClient()}))
if err != nil {
return "", err
}
result = result.AddMessage("user", "Describe the model in a clear and concise way that can be shared in a model gallery.")
// Get a response
newFragment, err := llm.Ask(ctx, result)
if err != nil {
return "", err
}
content := newFragment.LastMessage().Content
return cleanTextContent(content), nil
}
func selectMostInterestingModels(ctx context.Context, searchResult *SearchResult) ([]ProcessedModel, error) {
// Create a conversation fragment
fragment := cogito.NewEmptyFragment().
AddMessage("user",
`Your task is to analyze a list of AI models and select the most interesting ones for a model gallery. You will be given detailed information about multiple models including their metadata, file information, and README content.
Consider the following criteria when selecting models:
1. Model popularity (download count)
2. Model recency (last modified date)
3. Model completeness (has preferred model file, README, etc.)
4. Model uniqueness (not duplicates or very similar models)
5. Model quality (based on README content and description)
6. Model utility (practical applications)
You should select models that would be most valuable for users browsing a model gallery. Prioritize models that are:
- Well-documented with clear READMEs
- Recently updated
- Popular (high download count)
- Have the preferred quantization format available
- Offer unique capabilities or are from reputable authors
Return your analysis and selection reasoning.`)
// Add the search results as context
modelsInfo := fmt.Sprintf("Found %d models matching '%s' with quantization preference '%s':\n\n",
searchResult.TotalModelsFound, searchResult.SearchTerm, searchResult.Quantization)
for i, model := range searchResult.Models {
modelsInfo += fmt.Sprintf("Model %d:\n", i+1)
modelsInfo += fmt.Sprintf(" ID: %s\n", model.ModelID)
modelsInfo += fmt.Sprintf(" Author: %s\n", model.Author)
modelsInfo += fmt.Sprintf(" Downloads: %d\n", model.Downloads)
modelsInfo += fmt.Sprintf(" Last Modified: %s\n", model.LastModified)
modelsInfo += fmt.Sprintf(" Files: %d files\n", len(model.Files))
if model.PreferredModelFile != nil {
modelsInfo += fmt.Sprintf(" Preferred Model File: %s (%d bytes)\n",
model.PreferredModelFile.Path, model.PreferredModelFile.Size)
} else {
modelsInfo += " No preferred model file found\n"
}
if model.ReadmeContent != "" {
modelsInfo += fmt.Sprintf(" README: %s\n", model.ReadmeContent)
}
if model.ProcessingError != "" {
modelsInfo += fmt.Sprintf(" Processing Error: %s\n", model.ProcessingError)
}
modelsInfo += "\n"
}
fragment = fragment.AddMessage("user", modelsInfo)
fragment = fragment.AddMessage("user", "Based on your analysis, select the top 5 most interesting models and provide a brief explanation for each selection. Also, create a filtered SearchResult with only the selected models. Return just a list of repositories IDs, you will later be asked to output it as a JSON array with the json tool.")
// Get a response
newFragment, err := llm.Ask(ctx, fragment)
if err != nil {
return nil, err
}
fmt.Println(newFragment.LastMessage().Content)
repositories := struct {
Repositories []string `json:"repositories"`
}{}
s := structures.Structure{
Schema: jsonschema.Definition{
Type: jsonschema.Object,
AdditionalProperties: false,
Properties: map[string]jsonschema.Definition{
"repositories": {
Type: jsonschema.Array,
Items: &jsonschema.Definition{Type: jsonschema.String},
Description: "The trending repositories IDs",
},
},
Required: []string{"repositories"},
},
Object: &repositories,
}
err = newFragment.ExtractStructure(ctx, llm, s)
if err != nil {
return nil, err
}
filteredModels := []ProcessedModel{}
for _, m := range searchResult.Models {
if slices.Contains(repositories.Repositories, m.ModelID) {
filteredModels = append(filteredModels, m)
}
}
return filteredModels, nil
}
// ModelFamily represents a YAML anchor/family
type ModelFamily struct {
Anchor string `json:"anchor"`
Name string `json:"name"`
}
// selectModelFamily selects the appropriate model family/anchor for a given model
func selectModelFamily(ctx context.Context, model ProcessedModel, availableFamilies []ModelFamily) (string, error) {
// Create a conversation fragment
fragment := cogito.NewEmptyFragment().
AddMessage("user",
`Your task is to select the most appropriate model family/anchor for a given AI model. You will be provided with:
1. Information about the model (name, description, etc.)
2. A list of available model families/anchors
You need to select the family that best matches the model's architecture, capabilities, or characteristics. Consider:
- Model architecture (e.g., Llama, Qwen, Mistral, etc.)
- Model capabilities (e.g., vision, coding, chat, etc.)
- Model size/type (e.g., small, medium, large)
- Model purpose (e.g., general purpose, specialized, etc.)
Return the anchor name that best fits the model.`)
// Add model information
modelInfo := "Model Information:\n"
modelInfo += fmt.Sprintf(" ID: %s\n", model.ModelID)
modelInfo += fmt.Sprintf(" Author: %s\n", model.Author)
modelInfo += fmt.Sprintf(" Downloads: %d\n", model.Downloads)
modelInfo += fmt.Sprintf(" Description: %s\n", model.ReadmeContentPreview)
fragment = fragment.AddMessage("user", modelInfo)
// Add available families
familiesInfo := "Available Model Families:\n"
for _, family := range availableFamilies {
familiesInfo += fmt.Sprintf(" - %s (%s)\n", family.Anchor, family.Name)
}
fragment = fragment.AddMessage("user", familiesInfo)
fragment = fragment.AddMessage("user", "Select the most appropriate family anchor for this model. Return just the anchor name.")
// Get a response
newFragment, err := llm.Ask(ctx, fragment)
if err != nil {
return "", err
}
// Extract the selected family
selectedFamily := strings.TrimSpace(newFragment.LastMessage().Content)
// Validate that the selected family exists in our list
for _, family := range availableFamilies {
if family.Anchor == selectedFamily {
return selectedFamily, nil
}
}
// If no exact match, try to find a close match
for _, family := range availableFamilies {
if strings.Contains(strings.ToLower(family.Anchor), strings.ToLower(selectedFamily)) ||
strings.Contains(strings.ToLower(selectedFamily), strings.ToLower(family.Anchor)) {
return family.Anchor, nil
}
}
// Default fallback
return "llama3", nil
}

View File

@@ -1,203 +0,0 @@
package main
import (
"context"
"fmt"
"os"
"strings"
)
// generateYAMLEntry generates a YAML entry for a model using the specified anchor
func generateYAMLEntry(model ProcessedModel, familyAnchor string) string {
// Extract model name from ModelID
parts := strings.Split(model.ModelID, "/")
modelName := model.ModelID
if len(parts) > 0 {
modelName = strings.ToLower(parts[len(parts)-1])
}
// Remove common suffixes
modelName = strings.ReplaceAll(modelName, "-gguf", "")
modelName = strings.ReplaceAll(modelName, "-q4_k_m", "")
modelName = strings.ReplaceAll(modelName, "-q4_k_s", "")
modelName = strings.ReplaceAll(modelName, "-q3_k_m", "")
modelName = strings.ReplaceAll(modelName, "-q2_k", "")
fileName := ""
checksum := ""
if model.PreferredModelFile != nil {
fileParts := strings.Split(model.PreferredModelFile.Path, "/")
if len(fileParts) > 0 {
fileName = fileParts[len(fileParts)-1]
}
checksum = model.PreferredModelFile.SHA256
} else {
fileName = model.ModelID
}
description := model.ReadmeContent
if description == "" {
description = fmt.Sprintf("AI model: %s", modelName)
}
// Clean up description to prevent YAML linting issues
description = cleanTextContent(description)
// Format description for YAML (indent each line and ensure no trailing spaces)
lines := strings.Split(description, "\n")
var formattedLines []string
for _, line := range lines {
if strings.TrimSpace(line) == "" {
// Keep empty lines as empty (no indentation)
formattedLines = append(formattedLines, "")
} else {
// Add indentation to non-empty lines
formattedLines = append(formattedLines, " "+line)
}
}
formattedDescription := strings.Join(formattedLines, "\n")
// Remove any trailing spaces from the formatted description
formattedDescription = strings.TrimRight(formattedDescription, " \t")
yamlTemplate := ""
if checksum != "" {
yamlTemplate = `- !!merge <<: *%s
name: "%s"
urls:
- https://huggingface.co/%s
description: |
%s
overrides:
parameters:
model: %s
files:
- filename: %s
sha256: %s
uri: huggingface://%s/%s`
return fmt.Sprintf(yamlTemplate,
familyAnchor,
modelName,
model.ModelID,
formattedDescription,
fileName,
fileName,
checksum,
model.ModelID,
fileName,
)
} else {
yamlTemplate = `- !!merge <<: *%s
name: "%s"
urls:
- https://huggingface.co/%s
description: |
%s
overrides:
parameters:
model: %s`
return fmt.Sprintf(yamlTemplate,
familyAnchor,
modelName,
model.ModelID,
formattedDescription,
fileName,
)
}
}
// extractModelFamilies extracts all YAML anchors from the gallery index.yaml file
func extractModelFamilies() ([]ModelFamily, error) {
// Read the index.yaml file
indexPath := getGalleryIndexPath()
content, err := os.ReadFile(indexPath)
if err != nil {
return nil, fmt.Errorf("failed to read %s: %w", indexPath, err)
}
lines := strings.Split(string(content), "\n")
var families []ModelFamily
for _, line := range lines {
line = strings.TrimSpace(line)
// Look for YAML anchors (lines starting with "- &")
if strings.HasPrefix(line, "- &") {
// Extract the anchor name (everything after "- &")
anchor := strings.TrimPrefix(line, "- &")
// Remove any trailing colon or other characters
anchor = strings.Split(anchor, ":")[0]
anchor = strings.Split(anchor, " ")[0]
if anchor != "" {
families = append(families, ModelFamily{
Anchor: anchor,
Name: anchor, // Use anchor as name for now
})
}
}
}
return families, nil
}
// generateYAMLForModels generates YAML entries for selected models and appends to index.yaml
func generateYAMLForModels(ctx context.Context, models []ProcessedModel) error {
// Extract available model families
families, err := extractModelFamilies()
if err != nil {
return fmt.Errorf("failed to extract model families: %w", err)
}
fmt.Printf("Found %d model families: %v\n", len(families),
func() []string {
var names []string
for _, f := range families {
names = append(names, f.Anchor)
}
return names
}())
// Generate YAML entries for each model
var yamlEntries []string
for _, model := range models {
fmt.Printf("Selecting family for model: %s\n", model.ModelID)
// Select appropriate family for this model
familyAnchor, err := selectModelFamily(ctx, model, families)
if err != nil {
fmt.Printf("Error selecting family for %s: %v, using default\n", model.ModelID, err)
familyAnchor = "llama3" // Default fallback
}
fmt.Printf("Selected family '%s' for model %s\n", familyAnchor, model.ModelID)
// Generate YAML entry
yamlEntry := generateYAMLEntry(model, familyAnchor)
yamlEntries = append(yamlEntries, yamlEntry)
}
// Append to index.yaml
if len(yamlEntries) > 0 {
indexPath := getGalleryIndexPath()
fmt.Printf("Appending YAML entries to %s...\n", indexPath)
// Read current content
content, err := os.ReadFile(indexPath)
if err != nil {
return fmt.Errorf("failed to read %s: %w", indexPath, err)
}
// Append new entries
// Remove trailing whitespace from existing content and join entries without extra newlines
existingContent := strings.TrimRight(string(content), " \t\n\r")
yamlBlock := strings.Join(yamlEntries, "\n")
newContent := existingContent + "\n" + yamlBlock + "\n"
// Write back to file
err = os.WriteFile(indexPath, []byte(newContent), 0644)
if err != nil {
return fmt.Errorf("failed to write %s: %w", indexPath, err)
}
fmt.Printf("Successfully added %d models to %s\n", len(yamlEntries), indexPath)
}
return nil
}

View File

@@ -1,39 +0,0 @@
module github.com/go-skynet/LocalAI/.github/gallery-agent
go 1.24.1
require (
github.com/mudler/cogito v0.3.0
github.com/onsi/ginkgo/v2 v2.25.3
github.com/onsi/gomega v1.38.2
github.com/sashabaranov/go-openai v1.41.2
github.com/tmc/langchaingo v0.1.13
gopkg.in/yaml.v3 v3.0.1
)
require (
dario.cat/mergo v1.0.1 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.4.0 // indirect
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/google/jsonschema-go v0.3.0 // indirect
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/huandu/xstrings v1.5.0 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/modelcontextprotocol/go-sdk v1.0.0 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/spf13/cast v1.7.0 // indirect
github.com/yosida95/uritemplate/v3 v3.0.2 // indirect
go.uber.org/automaxprocs v1.6.0 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/crypto v0.41.0 // indirect
golang.org/x/net v0.43.0 // indirect
golang.org/x/sys v0.35.0 // indirect
golang.org/x/text v0.28.0 // indirect
golang.org/x/tools v0.36.0 // indirect
)

View File

@@ -1,168 +0,0 @@
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs=
github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA=
github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/docker v28.2.2+incompatible h1:CjwRSksz8Yo4+RmQ339Dp/D2tGO5JxwYeqtMOEe0LDw=
github.com/docker/docker v28.2.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw=
github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/jsonschema-go v0.3.0 h1:6AH2TxVNtk3IlvkkhjrtbUc4S8AvO0Xii0DxIygDg+Q=
github.com/google/jsonschema-go v0.3.0/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE=
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI=
github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE=
github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ=
github.com/moby/go-archive v0.1.0/go.mod h1:G9B+YoujNohJmrIYFBpSd54GTUB4lt9S+xVQvsJyFuo=
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modelcontextprotocol/go-sdk v1.0.0 h1:Z4MSjLi38bTgLrd/LjSmofqRqyBiVKRyQSJgw8q8V74=
github.com/modelcontextprotocol/go-sdk v1.0.0/go.mod h1:nYtYQroQ2KQiM0/SbyEPUWQ6xs4B95gJjEalc9AQyOs=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mudler/cogito v0.3.0 h1:NbVAO3bLkK5oGSY0xq87jlz8C9OIsLW55s+8Hfzeu9s=
github.com/mudler/cogito v0.3.0/go.mod h1:abMwl+CUjCp87IufA2quZdZt0bbLaHHN79o17HbUKxU=
github.com/onsi/ginkgo/v2 v2.25.3 h1:Ty8+Yi/ayDAGtk4XxmmfUy4GabvM+MegeB4cDLRi6nw=
github.com/onsi/ginkgo/v2 v2.25.3/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE=
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
github.com/sashabaranov/go-openai v1.41.2 h1:vfPRBZNMpnqu8ELsclWcAvF19lDNgh1t6TVfFFOPiSM=
github.com/sashabaranov/go-openai v1.41.2/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg=
github.com/shirou/gopsutil/v4 v4.25.5 h1:rtd9piuSMGeU8g1RMXjZs9y9luK5BwtnG7dZaQUJAsc=
github.com/shirou/gopsutil/v4 v4.25.5/go.mod h1:PfybzyydfZcN+JMMjkF6Zb8Mq1A/VcogFFg7hj50W9c=
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/testcontainers/testcontainers-go v0.38.0 h1:d7uEapLcv2P8AvH8ahLqDMMxda2W9gQN1nRbHS28HBw=
github.com/testcontainers/testcontainers-go v0.38.0/go.mod h1:C52c9MoHpWO+C4aqmgSU+hxlR5jlEayWtgYrb8Pzz1w=
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
github.com/tmc/langchaingo v0.1.13 h1:rcpMWBIi2y3B90XxfE4Ao8dhCQPVDMaNPnN5cGB1CaA=
github.com/tmc/langchaingo v0.1.13/go.mod h1:vpQ5NOIhpzxDfTZK9B6tf2GM/MoaHewPWM5KXXGh7hg=
github.com/yosida95/uritemplate/v3 v3.0.2 h1:Ed3Oyj9yrmi9087+NczuL5BwkIc4wvTb5zIM+UJPGz4=
github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 h1:Xs2Ncz0gNihqu9iosIZ5SkBbWo5T8JhhLJFMQL1qmLI=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0/go.mod h1:vy+2G/6NvVMpwGX/NyLqcC41fxepnuKHk16E6IZUcJc=
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc=
google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -1,299 +0,0 @@
package hfapi
import (
"encoding/json"
"fmt"
"io"
"net/http"
"path/filepath"
"strings"
)
// Model represents a model from the Hugging Face API
type Model struct {
ModelID string `json:"modelId"`
Author string `json:"author"`
Downloads int `json:"downloads"`
LastModified string `json:"lastModified"`
PipelineTag string `json:"pipelineTag"`
Private bool `json:"private"`
Tags []string `json:"tags"`
CreatedAt string `json:"createdAt"`
UpdatedAt string `json:"updatedAt"`
Sha string `json:"sha"`
Config map[string]interface{} `json:"config"`
ModelIndex string `json:"model_index"`
LibraryName string `json:"library_name"`
MaskToken string `json:"mask_token"`
TokenizerClass string `json:"tokenizer_class"`
}
// FileInfo represents file information from HuggingFace
type FileInfo struct {
Type string `json:"type"`
Oid string `json:"oid"`
Size int64 `json:"size"`
Path string `json:"path"`
LFS *LFSInfo `json:"lfs,omitempty"`
XetHash string `json:"xetHash,omitempty"`
}
// LFSInfo represents LFS (Large File Storage) information
type LFSInfo struct {
Oid string `json:"oid"`
Size int64 `json:"size"`
PointerSize int `json:"pointerSize"`
}
// ModelFile represents a file in a model repository
type ModelFile struct {
Path string
Size int64
SHA256 string
IsReadme bool
}
// ModelDetails represents detailed information about a model
type ModelDetails struct {
ModelID string
Author string
Files []ModelFile
ReadmeFile *ModelFile
ReadmeContent string
}
// SearchParams represents the parameters for searching models
type SearchParams struct {
Sort string `json:"sort"`
Direction int `json:"direction"`
Limit int `json:"limit"`
Search string `json:"search"`
}
// Client represents a Hugging Face API client
type Client struct {
baseURL string
client *http.Client
}
// NewClient creates a new Hugging Face API client
func NewClient() *Client {
return &Client{
baseURL: "https://huggingface.co/api/models",
client: &http.Client{},
}
}
// SearchModels searches for models using the Hugging Face API
func (c *Client) SearchModels(params SearchParams) ([]Model, error) {
req, err := http.NewRequest("GET", c.baseURL, nil)
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
// Add query parameters
q := req.URL.Query()
q.Add("sort", params.Sort)
q.Add("direction", fmt.Sprintf("%d", params.Direction))
q.Add("limit", fmt.Sprintf("%d", params.Limit))
q.Add("search", params.Search)
req.URL.RawQuery = q.Encode()
// Make the HTTP request
resp, err := c.client.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to make request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("failed to fetch models. Status code: %d", resp.StatusCode)
}
// Read the response body
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response body: %w", err)
}
// Parse the JSON response
var models []Model
if err := json.Unmarshal(body, &models); err != nil {
return nil, fmt.Errorf("failed to parse JSON response: %w", err)
}
return models, nil
}
// GetLatest fetches the latest GGUF models
func (c *Client) GetLatest(searchTerm string, limit int) ([]Model, error) {
params := SearchParams{
Sort: "lastModified",
Direction: -1,
Limit: limit,
Search: searchTerm,
}
return c.SearchModels(params)
}
// BaseURL returns the current base URL
func (c *Client) BaseURL() string {
return c.baseURL
}
// SetBaseURL sets a new base URL (useful for testing)
func (c *Client) SetBaseURL(url string) {
c.baseURL = url
}
// ListFiles lists all files in a HuggingFace repository
func (c *Client) ListFiles(repoID string) ([]FileInfo, error) {
baseURL := strings.TrimSuffix(c.baseURL, "/api/models")
url := fmt.Sprintf("%s/api/models/%s/tree/main", baseURL, repoID)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
resp, err := c.client.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to make request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("failed to fetch files. Status code: %d", resp.StatusCode)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response body: %w", err)
}
var files []FileInfo
if err := json.Unmarshal(body, &files); err != nil {
return nil, fmt.Errorf("failed to parse JSON response: %w", err)
}
return files, nil
}
// GetFileSHA gets the SHA256 checksum for a specific file by searching through the file list
func (c *Client) GetFileSHA(repoID, fileName string) (string, error) {
files, err := c.ListFiles(repoID)
if err != nil {
return "", fmt.Errorf("failed to list files: %w", err)
}
for _, file := range files {
if filepath.Base(file.Path) == fileName {
if file.LFS != nil && file.LFS.Oid != "" {
// The LFS OID contains the SHA256 hash
return file.LFS.Oid, nil
}
// If no LFS, return the regular OID
return file.Oid, nil
}
}
return "", fmt.Errorf("file %s not found", fileName)
}
// GetModelDetails gets detailed information about a model including files and checksums
func (c *Client) GetModelDetails(repoID string) (*ModelDetails, error) {
files, err := c.ListFiles(repoID)
if err != nil {
return nil, fmt.Errorf("failed to list files: %w", err)
}
details := &ModelDetails{
ModelID: repoID,
Author: strings.Split(repoID, "/")[0],
Files: make([]ModelFile, 0, len(files)),
}
// Process each file
for _, file := range files {
fileName := filepath.Base(file.Path)
isReadme := strings.Contains(strings.ToLower(fileName), "readme")
// Extract SHA256 from LFS or use OID
sha256 := ""
if file.LFS != nil && file.LFS.Oid != "" {
sha256 = file.LFS.Oid
} else {
sha256 = file.Oid
}
modelFile := ModelFile{
Path: file.Path,
Size: file.Size,
SHA256: sha256,
IsReadme: isReadme,
}
details.Files = append(details.Files, modelFile)
// Set the readme file
if isReadme && details.ReadmeFile == nil {
details.ReadmeFile = &modelFile
}
}
return details, nil
}
// GetReadmeContent gets the content of a README file
func (c *Client) GetReadmeContent(repoID, readmePath string) (string, error) {
baseURL := strings.TrimSuffix(c.baseURL, "/api/models")
url := fmt.Sprintf("%s/%s/raw/main/%s", baseURL, repoID, readmePath)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return "", fmt.Errorf("failed to create request: %w", err)
}
resp, err := c.client.Do(req)
if err != nil {
return "", fmt.Errorf("failed to make request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("failed to fetch readme content. Status code: %d", resp.StatusCode)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", fmt.Errorf("failed to read response body: %w", err)
}
return string(body), nil
}
// FilterFilesByQuantization filters files by quantization type
func FilterFilesByQuantization(files []ModelFile, quantization string) []ModelFile {
var filtered []ModelFile
for _, file := range files {
fileName := filepath.Base(file.Path)
if strings.Contains(strings.ToLower(fileName), strings.ToLower(quantization)) {
filtered = append(filtered, file)
}
}
return filtered
}
// FindPreferredModelFile finds the preferred model file based on quantization preferences
func FindPreferredModelFile(files []ModelFile, preferences []string) *ModelFile {
for _, preference := range preferences {
for i := range files {
fileName := filepath.Base(files[i].Path)
if strings.Contains(strings.ToLower(fileName), strings.ToLower(preference)) {
return &files[i]
}
}
}
return nil
}

View File

@@ -1,511 +0,0 @@
package hfapi_test
import (
"net/http"
"net/http/httptest"
"strings"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/go-skynet/LocalAI/.github/gallery-agent/hfapi"
)
var _ = Describe("HuggingFace API Client", func() {
var (
client *hfapi.Client
server *httptest.Server
)
BeforeEach(func() {
client = hfapi.NewClient()
})
AfterEach(func() {
if server != nil {
server.Close()
}
})
Context("when creating a new client", func() {
It("should initialize with correct base URL", func() {
Expect(client).ToNot(BeNil())
Expect(client.BaseURL()).To(Equal("https://huggingface.co/api/models"))
})
})
Context("when searching for models", func() {
BeforeEach(func() {
// Mock response data
mockResponse := `[
{
"modelId": "test-model-1",
"author": "test-author",
"downloads": 1000,
"lastModified": "2024-01-01T00:00:00.000Z",
"pipelineTag": "text-generation",
"private": false,
"tags": ["gguf", "llama"],
"createdAt": "2024-01-01T00:00:00.000Z",
"updatedAt": "2024-01-01T00:00:00.000Z",
"sha": "abc123",
"config": {},
"model_index": "test-index",
"library_name": "transformers",
"mask_token": null,
"tokenizer_class": "LlamaTokenizer"
},
{
"modelId": "test-model-2",
"author": "test-author-2",
"downloads": 2000,
"lastModified": "2024-01-02T00:00:00.000Z",
"pipelineTag": "text-generation",
"private": false,
"tags": ["gguf", "mistral"],
"createdAt": "2024-01-02T00:00:00.000Z",
"updatedAt": "2024-01-02T00:00:00.000Z",
"sha": "def456",
"config": {},
"model_index": "test-index-2",
"library_name": "transformers",
"mask_token": null,
"tokenizer_class": "MistralTokenizer"
}
]`
server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Verify request parameters
Expect(r.URL.Query().Get("sort")).To(Equal("lastModified"))
Expect(r.URL.Query().Get("direction")).To(Equal("-1"))
Expect(r.URL.Query().Get("limit")).To(Equal("30"))
Expect(r.URL.Query().Get("search")).To(Equal("GGUF"))
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(mockResponse))
}))
// Override the client's base URL to use our mock server
client.SetBaseURL(server.URL)
})
It("should successfully search for models", func() {
params := hfapi.SearchParams{
Sort: "lastModified",
Direction: -1,
Limit: 30,
Search: "GGUF",
}
models, err := client.SearchModels(params)
Expect(err).ToNot(HaveOccurred())
Expect(models).To(HaveLen(2))
// Verify first model
Expect(models[0].ModelID).To(Equal("test-model-1"))
Expect(models[0].Author).To(Equal("test-author"))
Expect(models[0].Downloads).To(Equal(1000))
Expect(models[0].PipelineTag).To(Equal("text-generation"))
Expect(models[0].Private).To(BeFalse())
Expect(models[0].Tags).To(ContainElements("gguf", "llama"))
// Verify second model
Expect(models[1].ModelID).To(Equal("test-model-2"))
Expect(models[1].Author).To(Equal("test-author-2"))
Expect(models[1].Downloads).To(Equal(2000))
Expect(models[1].Tags).To(ContainElements("gguf", "mistral"))
})
It("should handle empty search results", func() {
server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte("[]"))
}))
client.SetBaseURL(server.URL)
params := hfapi.SearchParams{
Sort: "lastModified",
Direction: -1,
Limit: 30,
Search: "nonexistent",
}
models, err := client.SearchModels(params)
Expect(err).ToNot(HaveOccurred())
Expect(models).To(HaveLen(0))
})
It("should handle HTTP errors", func() {
server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("Internal Server Error"))
}))
client.SetBaseURL(server.URL)
params := hfapi.SearchParams{
Sort: "lastModified",
Direction: -1,
Limit: 30,
Search: "GGUF",
}
models, err := client.SearchModels(params)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("Status code: 500"))
Expect(models).To(BeNil())
})
It("should handle malformed JSON response", func() {
server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte("invalid json"))
}))
client.SetBaseURL(server.URL)
params := hfapi.SearchParams{
Sort: "lastModified",
Direction: -1,
Limit: 30,
Search: "GGUF",
}
models, err := client.SearchModels(params)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("failed to parse JSON response"))
Expect(models).To(BeNil())
})
})
Context("when getting latest GGUF models", func() {
BeforeEach(func() {
mockResponse := `[
{
"modelId": "latest-gguf-model",
"author": "gguf-author",
"downloads": 5000,
"lastModified": "2024-01-03T00:00:00.000Z",
"pipelineTag": "text-generation",
"private": false,
"tags": ["gguf", "latest"],
"createdAt": "2024-01-03T00:00:00.000Z",
"updatedAt": "2024-01-03T00:00:00.000Z",
"sha": "latest123",
"config": {},
"model_index": "latest-index",
"library_name": "transformers",
"mask_token": null,
"tokenizer_class": "LlamaTokenizer"
}
]`
server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Verify the search parameters are correct for GGUF search
Expect(r.URL.Query().Get("search")).To(Equal("GGUF"))
Expect(r.URL.Query().Get("sort")).To(Equal("lastModified"))
Expect(r.URL.Query().Get("direction")).To(Equal("-1"))
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(mockResponse))
}))
client.SetBaseURL(server.URL)
})
It("should fetch latest GGUF models with correct parameters", func() {
models, err := client.GetLatest("GGUF", 10)
Expect(err).ToNot(HaveOccurred())
Expect(models).To(HaveLen(1))
Expect(models[0].ModelID).To(Equal("latest-gguf-model"))
Expect(models[0].Author).To(Equal("gguf-author"))
Expect(models[0].Downloads).To(Equal(5000))
Expect(models[0].Tags).To(ContainElements("gguf", "latest"))
})
It("should use custom search term", func() {
server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
Expect(r.URL.Query().Get("search")).To(Equal("custom-search"))
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte("[]"))
}))
client.SetBaseURL(server.URL)
models, err := client.GetLatest("custom-search", 5)
Expect(err).ToNot(HaveOccurred())
Expect(models).To(HaveLen(0))
})
})
Context("when handling network errors", func() {
It("should handle connection failures gracefully", func() {
// Use an invalid URL to simulate connection failure
client.SetBaseURL("http://invalid-url-that-does-not-exist")
params := hfapi.SearchParams{
Sort: "lastModified",
Direction: -1,
Limit: 30,
Search: "GGUF",
}
models, err := client.SearchModels(params)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("failed to make request"))
Expect(models).To(BeNil())
})
})
Context("when listing files", func() {
BeforeEach(func() {
mockFilesResponse := `[
{
"type": "file",
"path": "model-Q4_K_M.gguf",
"size": 1000000,
"oid": "abc123",
"lfs": {
"oid": "def456789",
"size": 1000000,
"pointerSize": 135
}
},
{
"type": "file",
"path": "README.md",
"size": 5000,
"oid": "readme123"
},
{
"type": "file",
"path": "config.json",
"size": 1000,
"oid": "config123"
}
]`
server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.Contains(r.URL.Path, "/tree/main") {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(mockFilesResponse))
} else {
w.WriteHeader(http.StatusNotFound)
}
}))
client.SetBaseURL(server.URL)
})
It("should list files successfully", func() {
files, err := client.ListFiles("test/model")
Expect(err).ToNot(HaveOccurred())
Expect(files).To(HaveLen(3))
Expect(files[0].Path).To(Equal("model-Q4_K_M.gguf"))
Expect(files[0].Size).To(Equal(int64(1000000)))
Expect(files[0].LFS).ToNot(BeNil())
Expect(files[0].LFS.Oid).To(Equal("def456789"))
Expect(files[1].Path).To(Equal("README.md"))
Expect(files[1].Size).To(Equal(int64(5000)))
})
})
Context("when getting file SHA", func() {
BeforeEach(func() {
mockFileInfoResponse := `{
"path": "model-Q4_K_M.gguf",
"size": 1000000,
"oid": "abc123",
"lfs": {
"oid": "sha256:def456",
"size": 1000000,
"pointer": "version https://git-lfs.github.com/spec/v1",
"sha256": "def456789"
}
}`
server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.Contains(r.URL.Path, "/paths-info") {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(mockFileInfoResponse))
} else {
w.WriteHeader(http.StatusNotFound)
}
}))
client.SetBaseURL(server.URL)
})
It("should get file SHA successfully", func() {
sha, err := client.GetFileSHA("test/model", "model-Q4_K_M.gguf")
Expect(err).ToNot(HaveOccurred())
Expect(sha).To(Equal("def456789"))
})
It("should handle missing SHA gracefully", func() {
server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"path": "file.txt", "size": 100}`))
}))
client.SetBaseURL(server.URL)
sha, err := client.GetFileSHA("test/model", "file.txt")
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("no SHA256 found"))
Expect(sha).To(Equal(""))
})
})
Context("when getting model details", func() {
BeforeEach(func() {
mockFilesResponse := `[
{
"path": "model-Q4_K_M.gguf",
"size": 1000000,
"oid": "abc123",
"lfs": {
"oid": "sha256:def456",
"size": 1000000,
"pointer": "version https://git-lfs.github.com/spec/v1",
"sha256": "def456789"
}
},
{
"path": "README.md",
"size": 5000,
"oid": "readme123"
}
]`
mockFileInfoResponse := `{
"path": "model-Q4_K_M.gguf",
"size": 1000000,
"oid": "abc123",
"lfs": {
"oid": "sha256:def456",
"size": 1000000,
"pointer": "version https://git-lfs.github.com/spec/v1",
"sha256": "def456789"
}
}`
server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.Contains(r.URL.Path, "/tree/main") {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(mockFilesResponse))
} else if strings.Contains(r.URL.Path, "/paths-info") {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(mockFileInfoResponse))
} else {
w.WriteHeader(http.StatusNotFound)
}
}))
client.SetBaseURL(server.URL)
})
It("should get model details successfully", func() {
details, err := client.GetModelDetails("test/model")
Expect(err).ToNot(HaveOccurred())
Expect(details.ModelID).To(Equal("test/model"))
Expect(details.Author).To(Equal("test"))
Expect(details.Files).To(HaveLen(2))
Expect(details.ReadmeFile).ToNot(BeNil())
Expect(details.ReadmeFile.Path).To(Equal("README.md"))
Expect(details.ReadmeFile.IsReadme).To(BeTrue())
})
})
Context("when getting README content", func() {
BeforeEach(func() {
mockReadmeContent := "# Test Model\n\nThis is a test model for demonstration purposes."
server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.Contains(r.URL.Path, "/raw/main/") {
w.Header().Set("Content-Type", "text/plain")
w.WriteHeader(http.StatusOK)
w.Write([]byte(mockReadmeContent))
} else {
w.WriteHeader(http.StatusNotFound)
}
}))
client.SetBaseURL(server.URL)
})
It("should get README content successfully", func() {
content, err := client.GetReadmeContent("test/model", "README.md")
Expect(err).ToNot(HaveOccurred())
Expect(content).To(Equal("# Test Model\n\nThis is a test model for demonstration purposes."))
})
})
Context("when filtering files", func() {
It("should filter files by quantization", func() {
files := []hfapi.ModelFile{
{Path: "model-Q4_K_M.gguf"},
{Path: "model-Q3_K_M.gguf"},
{Path: "README.md", IsReadme: true},
}
filtered := hfapi.FilterFilesByQuantization(files, "Q4_K_M")
Expect(filtered).To(HaveLen(1))
Expect(filtered[0].Path).To(Equal("model-Q4_K_M.gguf"))
})
It("should find preferred model file", func() {
files := []hfapi.ModelFile{
{Path: "model-Q3_K_M.gguf"},
{Path: "model-Q4_K_M.gguf"},
{Path: "README.md", IsReadme: true},
}
preferences := []string{"Q4_K_M", "Q3_K_M"}
preferred := hfapi.FindPreferredModelFile(files, preferences)
Expect(preferred).ToNot(BeNil())
Expect(preferred.Path).To(Equal("model-Q4_K_M.gguf"))
Expect(preferred.IsReadme).To(BeFalse())
})
It("should return nil if no preferred file found", func() {
files := []hfapi.ModelFile{
{Path: "model-Q2_K.gguf"},
{Path: "README.md", IsReadme: true},
}
preferences := []string{"Q4_K_M", "Q3_K_M"}
preferred := hfapi.FindPreferredModelFile(files, preferences)
Expect(preferred).To(BeNil())
})
})
})

View File

@@ -1,13 +0,0 @@
package hfapi_test
import (
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func TestHfapi(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "HuggingFace API Suite")
}

View File

@@ -1,351 +0,0 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
"strconv"
"strings"
"time"
"github.com/go-skynet/LocalAI/.github/gallery-agent/hfapi"
)
// ProcessedModelFile represents a processed model file with additional metadata
type ProcessedModelFile struct {
Path string `json:"path"`
Size int64 `json:"size"`
SHA256 string `json:"sha256"`
IsReadme bool `json:"is_readme"`
FileType string `json:"file_type"` // "model", "readme", "other"
}
// ProcessedModel represents a processed model with all gathered metadata
type ProcessedModel struct {
ModelID string `json:"model_id"`
Author string `json:"author"`
Downloads int `json:"downloads"`
LastModified string `json:"last_modified"`
Files []ProcessedModelFile `json:"files"`
PreferredModelFile *ProcessedModelFile `json:"preferred_model_file,omitempty"`
ReadmeFile *ProcessedModelFile `json:"readme_file,omitempty"`
ReadmeContent string `json:"readme_content,omitempty"`
ReadmeContentPreview string `json:"readme_content_preview,omitempty"`
QuantizationPreferences []string `json:"quantization_preferences"`
ProcessingError string `json:"processing_error,omitempty"`
}
// SearchResult represents the complete result of searching and processing models
type SearchResult struct {
SearchTerm string `json:"search_term"`
Limit int `json:"limit"`
Quantization string `json:"quantization"`
TotalModelsFound int `json:"total_models_found"`
Models []ProcessedModel `json:"models"`
FormattedOutput string `json:"formatted_output"`
}
// AddedModelSummary represents a summary of models added to the gallery
type AddedModelSummary struct {
SearchTerm string `json:"search_term"`
TotalFound int `json:"total_found"`
ModelsAdded int `json:"models_added"`
AddedModelIDs []string `json:"added_model_ids"`
AddedModelURLs []string `json:"added_model_urls"`
Quantization string `json:"quantization"`
ProcessingTime string `json:"processing_time"`
}
func main() {
startTime := time.Now()
// Check for synthetic mode
syntheticMode := os.Getenv("SYNTHETIC_MODE")
if syntheticMode == "true" || syntheticMode == "1" {
fmt.Println("Running in SYNTHETIC MODE - generating random test data")
err := runSyntheticMode()
if err != nil {
fmt.Fprintf(os.Stderr, "Error in synthetic mode: %v\n", err)
os.Exit(1)
}
return
}
// Get configuration from environment variables
searchTerm := os.Getenv("SEARCH_TERM")
if searchTerm == "" {
searchTerm = "GGUF"
}
limitStr := os.Getenv("LIMIT")
if limitStr == "" {
limitStr = "5"
}
limit, err := strconv.Atoi(limitStr)
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing LIMIT: %v\n", err)
os.Exit(1)
}
quantization := os.Getenv("QUANTIZATION")
maxModels := os.Getenv("MAX_MODELS")
if maxModels == "" {
maxModels = "1"
}
maxModelsInt, err := strconv.Atoi(maxModels)
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing MAX_MODELS: %v\n", err)
os.Exit(1)
}
// Print configuration
fmt.Printf("Gallery Agent Configuration:\n")
fmt.Printf(" Search Term: %s\n", searchTerm)
fmt.Printf(" Limit: %d\n", limit)
fmt.Printf(" Quantization: %s\n", quantization)
fmt.Printf(" Max Models to Add: %d\n", maxModelsInt)
fmt.Printf(" Gallery Index Path: %s\n", os.Getenv("GALLERY_INDEX_PATH"))
fmt.Println()
result, err := searchAndProcessModels(searchTerm, limit, quantization)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
fmt.Println(result.FormattedOutput)
// Use AI agent to select the most interesting models
fmt.Println("Using AI agent to select the most interesting models...")
models, err := selectMostInterestingModels(context.Background(), result)
if err != nil {
fmt.Fprintf(os.Stderr, "Error in model selection: %v\n", err)
// Continue with original result if selection fails
models = result.Models
}
fmt.Print(models)
// Filter out models that already exist in the gallery
fmt.Println("Filtering out existing models...")
models, err = filterExistingModels(models)
if err != nil {
fmt.Fprintf(os.Stderr, "Error filtering existing models: %v\n", err)
os.Exit(1)
}
// Limit to maxModelsInt after filtering
if len(models) > maxModelsInt {
models = models[:maxModelsInt]
}
// Track added models for summary
var addedModelIDs []string
var addedModelURLs []string
// Generate YAML entries and append to gallery/index.yaml
if len(models) > 0 {
for _, model := range models {
addedModelIDs = append(addedModelIDs, model.ModelID)
// Generate Hugging Face URL for the model
modelURL := fmt.Sprintf("https://huggingface.co/%s", model.ModelID)
addedModelURLs = append(addedModelURLs, modelURL)
}
fmt.Println("Generating YAML entries for selected models...")
err = generateYAMLForModels(context.Background(), models)
if err != nil {
fmt.Fprintf(os.Stderr, "Error generating YAML entries: %v\n", err)
os.Exit(1)
}
} else {
fmt.Println("No new models to add to the gallery.")
}
// Create and write summary
processingTime := time.Since(startTime).String()
summary := AddedModelSummary{
SearchTerm: searchTerm,
TotalFound: result.TotalModelsFound,
ModelsAdded: len(addedModelIDs),
AddedModelIDs: addedModelIDs,
AddedModelURLs: addedModelURLs,
Quantization: quantization,
ProcessingTime: processingTime,
}
// Write summary to file
summaryData, err := json.MarshalIndent(summary, "", " ")
if err != nil {
fmt.Fprintf(os.Stderr, "Error marshaling summary: %v\n", err)
} else {
err = os.WriteFile("gallery-agent-summary.json", summaryData, 0644)
if err != nil {
fmt.Fprintf(os.Stderr, "Error writing summary file: %v\n", err)
} else {
fmt.Printf("Summary written to gallery-agent-summary.json\n")
}
}
}
func searchAndProcessModels(searchTerm string, limit int, quantization string) (*SearchResult, error) {
client := hfapi.NewClient()
var outputBuilder strings.Builder
fmt.Println("Searching for models...")
// Initialize the result struct
result := &SearchResult{
SearchTerm: searchTerm,
Limit: limit,
Quantization: quantization,
Models: []ProcessedModel{},
}
models, err := client.GetLatest(searchTerm, limit)
if err != nil {
return nil, fmt.Errorf("failed to fetch models: %w", err)
}
fmt.Println("Models found:", len(models))
result.TotalModelsFound = len(models)
if len(models) == 0 {
outputBuilder.WriteString("No models found.\n")
result.FormattedOutput = outputBuilder.String()
return result, nil
}
outputBuilder.WriteString(fmt.Sprintf("Found %d models matching '%s':\n\n", len(models), searchTerm))
// Process each model
for i, model := range models {
outputBuilder.WriteString(fmt.Sprintf("%d. Processing Model: %s\n", i+1, model.ModelID))
outputBuilder.WriteString(fmt.Sprintf(" Author: %s\n", model.Author))
outputBuilder.WriteString(fmt.Sprintf(" Downloads: %d\n", model.Downloads))
outputBuilder.WriteString(fmt.Sprintf(" Last Modified: %s\n", model.LastModified))
// Initialize processed model struct
processedModel := ProcessedModel{
ModelID: model.ModelID,
Author: model.Author,
Downloads: model.Downloads,
LastModified: model.LastModified,
QuantizationPreferences: []string{quantization, "Q4_K_M", "Q4_K_S", "Q3_K_M", "Q2_K"},
}
// Get detailed model information
details, err := client.GetModelDetails(model.ModelID)
if err != nil {
errorMsg := fmt.Sprintf(" Error getting model details: %v\n", err)
outputBuilder.WriteString(errorMsg)
processedModel.ProcessingError = err.Error()
result.Models = append(result.Models, processedModel)
continue
}
// Define quantization preferences (in order of preference)
quantizationPreferences := []string{quantization, "Q4_K_M", "Q4_K_S", "Q3_K_M", "Q2_K"}
// Find preferred model file
preferredModelFile := hfapi.FindPreferredModelFile(details.Files, quantizationPreferences)
// Process files
processedFiles := make([]ProcessedModelFile, len(details.Files))
for j, file := range details.Files {
fileType := "other"
if file.IsReadme {
fileType = "readme"
} else if preferredModelFile != nil && file.Path == preferredModelFile.Path {
fileType = "model"
}
processedFiles[j] = ProcessedModelFile{
Path: file.Path,
Size: file.Size,
SHA256: file.SHA256,
IsReadme: file.IsReadme,
FileType: fileType,
}
}
processedModel.Files = processedFiles
// Set preferred model file
if preferredModelFile != nil {
for _, file := range processedFiles {
if file.Path == preferredModelFile.Path {
processedModel.PreferredModelFile = &file
break
}
}
}
// Print file information
outputBuilder.WriteString(fmt.Sprintf(" Files found: %d\n", len(details.Files)))
if preferredModelFile != nil {
outputBuilder.WriteString(fmt.Sprintf(" Preferred Model File: %s (SHA256: %s)\n",
preferredModelFile.Path,
preferredModelFile.SHA256))
} else {
outputBuilder.WriteString(fmt.Sprintf(" No model file found with quantization preferences: %v\n", quantizationPreferences))
}
if details.ReadmeFile != nil {
outputBuilder.WriteString(fmt.Sprintf(" README File: %s\n", details.ReadmeFile.Path))
// Find and set readme file
for _, file := range processedFiles {
if file.IsReadme {
processedModel.ReadmeFile = &file
break
}
}
fmt.Println("Getting real readme for", model.ModelID, "waiting...")
// Use agent to get the real readme and prepare the model description
readmeContent, err := getRealReadme(context.Background(), model.ModelID)
if err == nil {
processedModel.ReadmeContent = readmeContent
processedModel.ReadmeContentPreview = truncateString(readmeContent, 200)
outputBuilder.WriteString(fmt.Sprintf(" README Content Preview: %s\n",
processedModel.ReadmeContentPreview))
} else {
continue
}
fmt.Println("Real readme got", readmeContent)
// Get README content
// readmeContent, err := client.GetReadmeContent(model.ModelID, details.ReadmeFile.Path)
// if err == nil {
// processedModel.ReadmeContent = readmeContent
// processedModel.ReadmeContentPreview = truncateString(readmeContent, 200)
// outputBuilder.WriteString(fmt.Sprintf(" README Content Preview: %s\n",
// processedModel.ReadmeContentPreview))
// }
}
// Print all files with their checksums
outputBuilder.WriteString(" All Files:\n")
for _, file := range processedFiles {
outputBuilder.WriteString(fmt.Sprintf(" - %s (%s, %d bytes", file.Path, file.FileType, file.Size))
if file.SHA256 != "" {
outputBuilder.WriteString(fmt.Sprintf(", SHA256: %s", file.SHA256))
}
outputBuilder.WriteString(")\n")
}
outputBuilder.WriteString("\n")
result.Models = append(result.Models, processedModel)
}
result.FormattedOutput = outputBuilder.String()
return result, nil
}
func truncateString(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
return s[:maxLen] + "..."
}

View File

@@ -1,190 +0,0 @@
package main
import (
"context"
"fmt"
"math/rand"
"strings"
"time"
)
// runSyntheticMode generates synthetic test data and appends it to the gallery
func runSyntheticMode() error {
generator := NewSyntheticDataGenerator()
// Generate a random number of synthetic models (1-3)
numModels := generator.rand.Intn(3) + 1
fmt.Printf("Generating %d synthetic models for testing...\n", numModels)
var models []ProcessedModel
for i := 0; i < numModels; i++ {
model := generator.GenerateProcessedModel()
models = append(models, model)
fmt.Printf("Generated synthetic model: %s\n", model.ModelID)
}
// Generate YAML entries and append to gallery/index.yaml
fmt.Println("Generating YAML entries for synthetic models...")
err := generateYAMLForModels(context.Background(), models)
if err != nil {
return fmt.Errorf("error generating YAML entries: %w", err)
}
fmt.Printf("Successfully added %d synthetic models to the gallery for testing!\n", len(models))
return nil
}
// SyntheticDataGenerator provides methods to generate synthetic test data
type SyntheticDataGenerator struct {
rand *rand.Rand
}
// NewSyntheticDataGenerator creates a new synthetic data generator
func NewSyntheticDataGenerator() *SyntheticDataGenerator {
return &SyntheticDataGenerator{
rand: rand.New(rand.NewSource(time.Now().UnixNano())),
}
}
// GenerateProcessedModelFile creates a synthetic ProcessedModelFile
func (g *SyntheticDataGenerator) GenerateProcessedModelFile() ProcessedModelFile {
fileTypes := []string{"model", "readme", "other"}
fileType := fileTypes[g.rand.Intn(len(fileTypes))]
var path string
var isReadme bool
switch fileType {
case "model":
path = fmt.Sprintf("model-%s.gguf", g.randomString(8))
isReadme = false
case "readme":
path = "README.md"
isReadme = true
default:
path = fmt.Sprintf("file-%s.txt", g.randomString(6))
isReadme = false
}
return ProcessedModelFile{
Path: path,
Size: int64(g.rand.Intn(1000000000) + 1000000), // 1MB to 1GB
SHA256: g.randomSHA256(),
IsReadme: isReadme,
FileType: fileType,
}
}
// GenerateProcessedModel creates a synthetic ProcessedModel
func (g *SyntheticDataGenerator) GenerateProcessedModel() ProcessedModel {
authors := []string{"microsoft", "meta", "google", "openai", "anthropic", "mistralai", "huggingface"}
modelNames := []string{"llama", "gpt", "claude", "mistral", "gemma", "phi", "qwen", "codellama"}
author := authors[g.rand.Intn(len(authors))]
modelName := modelNames[g.rand.Intn(len(modelNames))]
modelID := fmt.Sprintf("%s/%s-%s", author, modelName, g.randomString(6))
// Generate files
numFiles := g.rand.Intn(5) + 2 // 2-6 files
files := make([]ProcessedModelFile, numFiles)
// Ensure at least one model file and one readme
hasModelFile := false
hasReadme := false
for i := 0; i < numFiles; i++ {
files[i] = g.GenerateProcessedModelFile()
if files[i].FileType == "model" {
hasModelFile = true
}
if files[i].FileType == "readme" {
hasReadme = true
}
}
// Add required files if missing
if !hasModelFile {
modelFile := g.GenerateProcessedModelFile()
modelFile.FileType = "model"
modelFile.Path = fmt.Sprintf("%s-Q4_K_M.gguf", modelName)
files = append(files, modelFile)
}
if !hasReadme {
readmeFile := g.GenerateProcessedModelFile()
readmeFile.FileType = "readme"
readmeFile.Path = "README.md"
readmeFile.IsReadme = true
files = append(files, readmeFile)
}
// Find preferred model file
var preferredModelFile *ProcessedModelFile
for i := range files {
if files[i].FileType == "model" {
preferredModelFile = &files[i]
break
}
}
// Find readme file
var readmeFile *ProcessedModelFile
for i := range files {
if files[i].FileType == "readme" {
readmeFile = &files[i]
break
}
}
readmeContent := g.generateReadmeContent(modelName, author)
return ProcessedModel{
ModelID: modelID,
Author: author,
Downloads: g.rand.Intn(1000000) + 1000,
LastModified: g.randomDate(),
Files: files,
PreferredModelFile: preferredModelFile,
ReadmeFile: readmeFile,
ReadmeContent: readmeContent,
ReadmeContentPreview: truncateString(readmeContent, 200),
QuantizationPreferences: []string{"Q4_K_M", "Q4_K_S", "Q3_K_M", "Q2_K"},
ProcessingError: "",
}
}
// Helper methods for synthetic data generation
func (g *SyntheticDataGenerator) randomString(length int) string {
const charset = "abcdefghijklmnopqrstuvwxyz0123456789"
b := make([]byte, length)
for i := range b {
b[i] = charset[g.rand.Intn(len(charset))]
}
return string(b)
}
func (g *SyntheticDataGenerator) randomSHA256() string {
const charset = "0123456789abcdef"
b := make([]byte, 64)
for i := range b {
b[i] = charset[g.rand.Intn(len(charset))]
}
return string(b)
}
func (g *SyntheticDataGenerator) randomDate() string {
now := time.Now()
daysAgo := g.rand.Intn(365) // Random date within last year
pastDate := now.AddDate(0, 0, -daysAgo)
return pastDate.Format("2006-01-02T15:04:05.000Z")
}
func (g *SyntheticDataGenerator) generateReadmeContent(modelName, author string) string {
templates := []string{
fmt.Sprintf("# %s Model\n\nThis is a %s model developed by %s. It's designed for various natural language processing tasks including text generation, question answering, and conversation.\n\n## Features\n\n- High-quality text generation\n- Efficient inference\n- Multiple quantization options\n- Easy to use with LocalAI\n\n## Usage\n\nUse this model with LocalAI for various AI tasks.", strings.Title(modelName), modelName, author),
fmt.Sprintf("# %s\n\nA powerful language model from %s. This model excels at understanding and generating human-like text across multiple domains.\n\n## Capabilities\n\n- Text completion\n- Code generation\n- Creative writing\n- Technical documentation\n\n## Model Details\n\n- Architecture: Transformer-based\n- Training: Large-scale supervised learning\n- Quantization: Available in multiple formats", strings.Title(modelName), author),
fmt.Sprintf("# %s Language Model\n\nDeveloped by %s, this model represents state-of-the-art performance in natural language understanding and generation.\n\n## Key Features\n\n- Multilingual support\n- Context-aware responses\n- Efficient memory usage\n- Fast inference speed\n\n## Applications\n\n- Chatbots and virtual assistants\n- Content generation\n- Code completion\n- Educational tools", strings.Title(modelName), author),
}
return templates[g.rand.Intn(len(templates))]
}

View File

@@ -1,46 +0,0 @@
package main
import (
"fmt"
"github.com/go-skynet/LocalAI/.github/gallery-agent/hfapi"
"github.com/sashabaranov/go-openai"
"github.com/tmc/langchaingo/jsonschema"
)
// Get repository README from HF
type HFReadmeTool struct {
client *hfapi.Client
}
func (s *HFReadmeTool) Run(args map[string]any) (string, error) {
q, ok := args["repository"].(string)
if !ok {
return "", fmt.Errorf("no query")
}
readme, err := s.client.GetReadmeContent(q, "README.md")
if err != nil {
return "", err
}
return readme, nil
}
func (s *HFReadmeTool) Tool() openai.Tool {
return openai.Tool{
Type: openai.ToolTypeFunction,
Function: &openai.FunctionDefinition{
Name: "hf_readme",
Description: "A tool to get the README content of a huggingface repository",
Parameters: jsonschema.Definition{
Type: jsonschema.Object,
Properties: map[string]jsonschema.Definition{
"repository": {
Type: jsonschema.String,
Description: "The huggingface repository to get the README content of",
},
},
Required: []string{"repository"},
},
},
}
}

View File

@@ -87,42 +87,6 @@ jobs:
backend: "diffusers" backend: "diffusers"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
context: "./backend" context: "./backend"
- build-type: 'l4t'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/arm64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-l4t-diffusers'
runs-on: 'ubuntu-24.04-arm'
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
skip-drivers: 'true'
backend: "diffusers"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: ''
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-cpu-diffusers'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'true'
backend: "diffusers"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: ''
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-cpu-chatterbox'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'true'
backend: "chatterbox"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
# CUDA 11 additional backends # CUDA 11 additional backends
- build-type: 'cublas' - build-type: 'cublas'
cuda-major-version: "11" cuda-major-version: "11"
@@ -215,7 +179,7 @@ jobs:
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-vllm' tag-suffix: '-gpu-nvidia-cuda-12-vllm'
runs-on: 'arc-runner-set' runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "vllm" backend: "vllm"
@@ -242,7 +206,7 @@ jobs:
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "diffusers" backend: "diffusers"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
context: "./backend" context: "./backend"
# CUDA 12 additional backends # CUDA 12 additional backends
@@ -314,7 +278,7 @@ jobs:
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-rerankers' tag-suffix: '-gpu-rocm-hipblas-rerankers'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "rocm/dev-ubuntu-22.04:6.4.3" base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false' skip-drivers: 'false'
backend: "rerankers" backend: "rerankers"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
@@ -326,7 +290,7 @@ jobs:
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-llama-cpp' tag-suffix: '-gpu-rocm-hipblas-llama-cpp'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "rocm/dev-ubuntu-22.04:6.4.3" base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false' skip-drivers: 'false'
backend: "llama-cpp" backend: "llama-cpp"
dockerfile: "./backend/Dockerfile.llama-cpp" dockerfile: "./backend/Dockerfile.llama-cpp"
@@ -337,8 +301,8 @@ jobs:
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-vllm' tag-suffix: '-gpu-rocm-hipblas-vllm'
runs-on: 'arc-runner-set' runs-on: 'ubuntu-latest'
base-image: "rocm/dev-ubuntu-22.04:6.4.3" base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false' skip-drivers: 'false'
backend: "vllm" backend: "vllm"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
@@ -349,8 +313,8 @@ jobs:
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-transformers' tag-suffix: '-gpu-rocm-hipblas-transformers'
runs-on: 'arc-runner-set' runs-on: 'ubuntu-latest'
base-image: "rocm/dev-ubuntu-22.04:6.4.3" base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false' skip-drivers: 'false'
backend: "transformers" backend: "transformers"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
@@ -361,8 +325,8 @@ jobs:
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-diffusers' tag-suffix: '-gpu-rocm-hipblas-diffusers'
runs-on: 'arc-runner-set' runs-on: 'ubuntu-latest'
base-image: "rocm/dev-ubuntu-22.04:6.4.3" base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false' skip-drivers: 'false'
backend: "diffusers" backend: "diffusers"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
@@ -374,8 +338,8 @@ jobs:
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-kokoro' tag-suffix: '-gpu-rocm-hipblas-kokoro'
runs-on: 'arc-runner-set' runs-on: 'ubuntu-latest'
base-image: "rocm/dev-ubuntu-22.04:6.4.3" base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false' skip-drivers: 'false'
backend: "kokoro" backend: "kokoro"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
@@ -387,7 +351,7 @@ jobs:
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-faster-whisper' tag-suffix: '-gpu-rocm-hipblas-faster-whisper'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "rocm/dev-ubuntu-22.04:6.4.3" base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false' skip-drivers: 'false'
backend: "faster-whisper" backend: "faster-whisper"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
@@ -399,7 +363,7 @@ jobs:
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-coqui' tag-suffix: '-gpu-rocm-hipblas-coqui'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "rocm/dev-ubuntu-22.04:6.4.3" base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false' skip-drivers: 'false'
backend: "coqui" backend: "coqui"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
@@ -410,19 +374,31 @@ jobs:
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-bark' tag-suffix: '-gpu-rocm-hipblas-bark'
runs-on: 'arc-runner-set' runs-on: 'ubuntu-latest'
base-image: "rocm/dev-ubuntu-22.04:6.4.3" base-image: "rocm/dev-ubuntu-22.04:6.1"
skip-drivers: 'false' skip-drivers: 'false'
backend: "bark" backend: "bark"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
context: "./backend" context: "./backend"
# sycl builds # sycl builds
- build-type: 'intel' - build-type: 'sycl_f32'
cuda-major-version: "" cuda-major-version: ""
cuda-minor-version: "" cuda-minor-version: ""
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-intel-rerankers' tag-suffix: '-gpu-intel-sycl-f32-rerankers'
runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false'
backend: "rerankers"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'sycl_f16'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-intel-sycl-f16-rerankers'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false' skip-drivers: 'false'
@@ -453,97 +429,157 @@ jobs:
backend: "llama-cpp" backend: "llama-cpp"
dockerfile: "./backend/Dockerfile.llama-cpp" dockerfile: "./backend/Dockerfile.llama-cpp"
context: "./" context: "./"
- build-type: 'intel' - build-type: 'sycl_f32'
cuda-major-version: "" cuda-major-version: ""
cuda-minor-version: "" cuda-minor-version: ""
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-intel-vllm' tag-suffix: '-gpu-intel-sycl-f32-vllm'
runs-on: 'arc-runner-set' runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false' skip-drivers: 'false'
backend: "vllm" backend: "vllm"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
context: "./backend" context: "./backend"
- build-type: 'intel' - build-type: 'sycl_f16'
cuda-major-version: "" cuda-major-version: ""
cuda-minor-version: "" cuda-minor-version: ""
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-intel-transformers' tag-suffix: '-gpu-intel-sycl-f16-vllm'
runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false'
backend: "vllm"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'sycl_f32'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-intel-sycl-f32-transformers'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false' skip-drivers: 'false'
backend: "transformers" backend: "transformers"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
context: "./backend" context: "./backend"
- build-type: 'intel' - build-type: 'sycl_f16'
cuda-major-version: "" cuda-major-version: ""
cuda-minor-version: "" cuda-minor-version: ""
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-intel-diffusers' tag-suffix: '-gpu-intel-sycl-f16-transformers'
runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false'
backend: "transformers"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'sycl_f32'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-intel-sycl-f32-diffusers'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false' skip-drivers: 'false'
backend: "diffusers" backend: "diffusers"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
context: "./backend" context: "./backend"
- build-type: 'l4t'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/arm64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-l4t-kokoro'
runs-on: 'ubuntu-24.04-arm'
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
skip-drivers: 'true'
backend: "kokoro"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
# SYCL additional backends # SYCL additional backends
- build-type: 'intel' - build-type: 'sycl_f32'
cuda-major-version: "" cuda-major-version: ""
cuda-minor-version: "" cuda-minor-version: ""
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-intel-kokoro' tag-suffix: '-gpu-intel-sycl-f32-kokoro'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false' skip-drivers: 'false'
backend: "kokoro" backend: "kokoro"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
context: "./backend" context: "./backend"
- build-type: 'intel' - build-type: 'sycl_f16'
cuda-major-version: "" cuda-major-version: ""
cuda-minor-version: "" cuda-minor-version: ""
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-intel-faster-whisper' tag-suffix: '-gpu-intel-sycl-f16-kokoro'
runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false'
backend: "kokoro"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'sycl_f32'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-intel-sycl-f32-faster-whisper'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false' skip-drivers: 'false'
backend: "faster-whisper" backend: "faster-whisper"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
context: "./backend" context: "./backend"
- build-type: 'intel' - build-type: 'sycl_f16'
cuda-major-version: "" cuda-major-version: ""
cuda-minor-version: "" cuda-minor-version: ""
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-intel-coqui' tag-suffix: '-gpu-intel-sycl-f16-faster-whisper'
runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false'
backend: "faster-whisper"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'sycl_f32'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-intel-sycl-f32-coqui'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false' skip-drivers: 'false'
backend: "coqui" backend: "coqui"
dockerfile: "./backend/Dockerfile.python" dockerfile: "./backend/Dockerfile.python"
context: "./backend" context: "./backend"
- build-type: 'intel' - build-type: 'sycl_f16'
cuda-major-version: "" cuda-major-version: ""
cuda-minor-version: "" cuda-minor-version: ""
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-intel-bark' tag-suffix: '-gpu-intel-sycl-f16-coqui'
runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false'
backend: "coqui"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'sycl_f32'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-intel-sycl-f32-bark'
runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false'
backend: "bark"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'sycl_f16'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-intel-sycl-f16-bark'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false' skip-drivers: 'false'
@@ -561,7 +597,7 @@ jobs:
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "piper" backend: "piper"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
# bark-cpp # bark-cpp
- build-type: '' - build-type: ''
@@ -574,7 +610,7 @@ jobs:
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "bark-cpp" backend: "bark-cpp"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: '' - build-type: ''
cuda-major-version: "" cuda-major-version: ""
@@ -623,7 +659,7 @@ jobs:
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "stablediffusion-ggml" backend: "stablediffusion-ggml"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: 'cublas' - build-type: 'cublas'
cuda-major-version: "12" cuda-major-version: "12"
@@ -635,7 +671,7 @@ jobs:
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "stablediffusion-ggml" backend: "stablediffusion-ggml"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: 'cublas' - build-type: 'cublas'
cuda-major-version: "11" cuda-major-version: "11"
@@ -647,7 +683,7 @@ jobs:
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "stablediffusion-ggml" backend: "stablediffusion-ggml"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: 'sycl_f32' - build-type: 'sycl_f32'
cuda-major-version: "" cuda-major-version: ""
@@ -659,7 +695,7 @@ jobs:
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false' skip-drivers: 'false'
backend: "stablediffusion-ggml" backend: "stablediffusion-ggml"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: 'sycl_f16' - build-type: 'sycl_f16'
cuda-major-version: "" cuda-major-version: ""
@@ -671,7 +707,7 @@ jobs:
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false' skip-drivers: 'false'
backend: "stablediffusion-ggml" backend: "stablediffusion-ggml"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: 'vulkan' - build-type: 'vulkan'
cuda-major-version: "" cuda-major-version: ""
@@ -683,7 +719,7 @@ jobs:
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "stablediffusion-ggml" backend: "stablediffusion-ggml"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: 'cublas' - build-type: 'cublas'
cuda-major-version: "12" cuda-major-version: "12"
@@ -695,7 +731,7 @@ jobs:
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0" base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
runs-on: 'ubuntu-24.04-arm' runs-on: 'ubuntu-24.04-arm'
backend: "stablediffusion-ggml" backend: "stablediffusion-ggml"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
# whisper # whisper
- build-type: '' - build-type: ''
@@ -708,7 +744,7 @@ jobs:
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "whisper" backend: "whisper"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: 'cublas' - build-type: 'cublas'
cuda-major-version: "12" cuda-major-version: "12"
@@ -720,7 +756,7 @@ jobs:
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "whisper" backend: "whisper"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: 'cublas' - build-type: 'cublas'
cuda-major-version: "11" cuda-major-version: "11"
@@ -732,7 +768,7 @@ jobs:
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "whisper" backend: "whisper"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: 'sycl_f32' - build-type: 'sycl_f32'
cuda-major-version: "" cuda-major-version: ""
@@ -744,7 +780,7 @@ jobs:
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false' skip-drivers: 'false'
backend: "whisper" backend: "whisper"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: 'sycl_f16' - build-type: 'sycl_f16'
cuda-major-version: "" cuda-major-version: ""
@@ -756,7 +792,7 @@ jobs:
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false' skip-drivers: 'false'
backend: "whisper" backend: "whisper"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: 'vulkan' - build-type: 'vulkan'
cuda-major-version: "" cuda-major-version: ""
@@ -768,7 +804,7 @@ jobs:
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "whisper" backend: "whisper"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: 'cublas' - build-type: 'cublas'
cuda-major-version: "12" cuda-major-version: "12"
@@ -780,19 +816,19 @@ jobs:
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0" base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
runs-on: 'ubuntu-24.04-arm' runs-on: 'ubuntu-24.04-arm'
backend: "whisper" backend: "whisper"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
- build-type: 'hipblas' - build-type: 'hipblas'
cuda-major-version: "" cuda-major-version: ""
cuda-minor-version: "" cuda-minor-version: ""
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-whisper' tag-suffix: '-gpu-hipblas-whisper'
base-image: "rocm/dev-ubuntu-22.04:6.4.3" base-image: "rocm/dev-ubuntu-22.04:6.1"
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
skip-drivers: 'false' skip-drivers: 'false'
backend: "whisper" backend: "whisper"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
#silero-vad #silero-vad
- build-type: '' - build-type: ''
@@ -805,7 +841,7 @@ jobs:
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "silero-vad" backend: "silero-vad"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
# local-store # local-store
- build-type: '' - build-type: ''
@@ -818,7 +854,7 @@ jobs:
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "local-store" backend: "local-store"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
# huggingface # huggingface
- build-type: '' - build-type: ''
@@ -831,258 +867,8 @@ jobs:
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
backend: "huggingface" backend: "huggingface"
dockerfile: "./backend/Dockerfile.golang" dockerfile: "./backend/Dockerfile.go"
context: "./" context: "./"
# rfdetr
- build-type: ''
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64,linux/arm64'
tag-latest: 'auto'
tag-suffix: '-cpu-rfdetr'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
backend: "rfdetr"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-rfdetr'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
backend: "rfdetr"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'cublas'
cuda-major-version: "11"
cuda-minor-version: "7"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-11-rfdetr'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
backend: "rfdetr"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'intel'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-intel-rfdetr'
runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false'
backend: "rfdetr"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'l4t'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/arm64'
skip-drivers: 'true'
tag-latest: 'auto'
tag-suffix: '-nvidia-l4t-arm64-rfdetr'
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
runs-on: 'ubuntu-24.04-arm'
backend: "rfdetr"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
# exllama2
- build-type: ''
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-cpu-exllama2'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
backend: "exllama2"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-exllama2'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
backend: "exllama2"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'cublas'
cuda-major-version: "11"
cuda-minor-version: "7"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-11-exllama2'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
backend: "exllama2"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'intel'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-intel-exllama2'
runs-on: 'ubuntu-latest'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
skip-drivers: 'false'
backend: "exllama2"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'hipblas'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
skip-drivers: 'true'
tag-latest: 'auto'
tag-suffix: '-gpu-hipblas-exllama2'
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
runs-on: 'ubuntu-latest'
backend: "exllama2"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'l4t'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/arm64'
skip-drivers: 'true'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-l4t-arm64-chatterbox'
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
runs-on: 'ubuntu-24.04-arm'
backend: "chatterbox"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
# runs out of space on the runner
# - build-type: 'hipblas'
# cuda-major-version: ""
# cuda-minor-version: ""
# platforms: 'linux/amd64'
# tag-latest: 'auto'
# tag-suffix: '-gpu-hipblas-rfdetr'
# base-image: "rocm/dev-ubuntu-22.04:6.4.3"
# runs-on: 'ubuntu-latest'
# skip-drivers: 'false'
# backend: "rfdetr"
# dockerfile: "./backend/Dockerfile.python"
# context: "./backend"
# kitten-tts
- build-type: ''
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64,linux/arm64'
tag-latest: 'auto'
tag-suffix: '-kitten-tts'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
backend: "kitten-tts"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
# neutts
- build-type: ''
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64,linux/arm64'
tag-latest: 'auto'
tag-suffix: '-cpu-neutts'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
backend: "neutts"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12-neutts'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
backend: "neutts"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'hipblas'
cuda-major-version: ""
cuda-minor-version: ""
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-rocm-hipblas-neutts'
runs-on: 'arc-runner-set'
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
skip-drivers: 'false'
backend: "neutts"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
- build-type: 'l4t'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/arm64'
skip-drivers: 'true'
tag-latest: 'auto'
tag-suffix: '-nvidia-l4t-arm64-neutts'
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
runs-on: 'ubuntu-24.04-arm'
backend: "neutts"
dockerfile: "./backend/Dockerfile.python"
context: "./backend"
backend-jobs-darwin:
uses: ./.github/workflows/backend_build_darwin.yml
strategy:
matrix:
include:
- backend: "diffusers"
tag-suffix: "-metal-darwin-arm64-diffusers"
build-type: "mps"
- backend: "mlx"
tag-suffix: "-metal-darwin-arm64-mlx"
build-type: "mps"
- backend: "chatterbox"
tag-suffix: "-metal-darwin-arm64-chatterbox"
build-type: "mps"
- backend: "mlx-vlm"
tag-suffix: "-metal-darwin-arm64-mlx-vlm"
build-type: "mps"
- backend: "mlx-audio"
tag-suffix: "-metal-darwin-arm64-mlx-audio"
build-type: "mps"
- backend: "stablediffusion-ggml"
tag-suffix: "-metal-darwin-arm64-stablediffusion-ggml"
build-type: "metal"
lang: "go"
- backend: "whisper"
tag-suffix: "-metal-darwin-arm64-whisper"
build-type: "metal"
lang: "go"
with:
backend: ${{ matrix.backend }}
build-type: ${{ matrix.build-type }}
go-version: "1.24.x"
tag-suffix: ${{ matrix.tag-suffix }}
lang: ${{ matrix.lang || 'python' }}
use-pip: ${{ matrix.backend == 'diffusers' }}
runs-on: "macOS-14"
secrets:
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
llama-cpp-darwin: llama-cpp-darwin:
runs-on: macOS-14 runs-on: macOS-14
strategy: strategy:
@@ -1090,7 +876,7 @@ jobs:
go-version: ['1.21.x'] go-version: ['1.21.x']
steps: steps:
- name: Clone - name: Clone
uses: actions/checkout@v5 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Setup Go ${{ matrix.go-version }} - name: Setup Go ${{ matrix.go-version }}
@@ -1107,19 +893,21 @@ jobs:
- name: Build llama-cpp-darwin - name: Build llama-cpp-darwin
run: | run: |
make protogen-go make protogen-go
make backends/llama-cpp-darwin make build
bash scripts/build-llama-cpp-darwin.sh
ls -la build/darwin.tar
mv build/darwin.tar build/llama-cpp.tar
- name: Upload llama-cpp.tar - name: Upload llama-cpp.tar
uses: actions/upload-artifact@v5 uses: actions/upload-artifact@v4
with: with:
name: llama-cpp-tar name: llama-cpp-tar
path: backend-images/llama-cpp.tar path: build/llama-cpp.tar
llama-cpp-darwin-publish: llama-cpp-darwin-publish:
needs: llama-cpp-darwin needs: llama-cpp-darwin
if: github.event_name != 'pull_request'
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Download llama-cpp.tar - name: Download llama-cpp.tar
uses: actions/download-artifact@v6 uses: actions/download-artifact@v4
with: with:
name: llama-cpp-tar name: llama-cpp-tar
path: . path: .
@@ -1176,7 +964,7 @@ jobs:
go-version: ['1.21.x'] go-version: ['1.21.x']
steps: steps:
- name: Clone - name: Clone
uses: actions/checkout@v5 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Setup Go ${{ matrix.go-version }} - name: Setup Go ${{ matrix.go-version }}
@@ -1195,19 +983,20 @@ jobs:
make protogen-go make protogen-go
make build make build
export PLATFORMARCH=darwin/amd64 export PLATFORMARCH=darwin/amd64
make backends/llama-cpp-darwin bash scripts/build-llama-cpp-darwin.sh
ls -la build/darwin.tar
mv build/darwin.tar build/llama-cpp.tar
- name: Upload llama-cpp.tar - name: Upload llama-cpp.tar
uses: actions/upload-artifact@v5 uses: actions/upload-artifact@v4
with: with:
name: llama-cpp-tar-x86 name: llama-cpp-tar-x86
path: backend-images/llama-cpp.tar path: build/llama-cpp.tar
llama-cpp-darwin-x86-publish: llama-cpp-darwin-x86-publish:
if: github.event_name != 'pull_request'
needs: llama-cpp-darwin-x86 needs: llama-cpp-darwin-x86
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Download llama-cpp.tar - name: Download llama-cpp.tar
uses: actions/download-artifact@v6 uses: actions/download-artifact@v4
with: with:
name: llama-cpp-tar-x86 name: llama-cpp-tar-x86
path: . path: .
@@ -1256,4 +1045,4 @@ jobs:
run: | run: |
for tag in $(echo "${{ steps.quaymeta.outputs.tags }}" | tr ',' '\n'); do for tag in $(echo "${{ steps.quaymeta.outputs.tags }}" | tr ',' '\n'); do
crane push llama-cpp.tar $tag crane push llama-cpp.tar $tag
done done

View File

@@ -55,9 +55,9 @@ on:
type: string type: string
secrets: secrets:
dockerUsername: dockerUsername:
required: false required: true
dockerPassword: dockerPassword:
required: false required: true
quayUsername: quayUsername:
required: true required: true
quayPassword: quayPassword:
@@ -66,8 +66,6 @@ on:
jobs: jobs:
backend-build: backend-build:
runs-on: ${{ inputs.runs-on }} runs-on: ${{ inputs.runs-on }}
env:
quay_username: ${{ secrets.quayUsername }}
steps: steps:
@@ -97,7 +95,7 @@ jobs:
&& sudo apt-get install -y git && sudo apt-get install -y git
- name: Checkout - name: Checkout
uses: actions/checkout@v5 uses: actions/checkout@v4
- name: Release space from worker - name: Release space from worker
if: inputs.runs-on == 'ubuntu-latest' if: inputs.runs-on == 'ubuntu-latest'
@@ -189,7 +187,7 @@ jobs:
password: ${{ secrets.dockerPassword }} password: ${{ secrets.dockerPassword }}
- name: Login to Quay.io - name: Login to Quay.io
if: ${{ env.quay_username != '' }} # if: github.event_name != 'pull_request'
uses: docker/login-action@v3 uses: docker/login-action@v3
with: with:
registry: quay.io registry: quay.io
@@ -232,7 +230,7 @@ jobs:
file: ${{ inputs.dockerfile }} file: ${{ inputs.dockerfile }}
cache-from: type=gha cache-from: type=gha
platforms: ${{ inputs.platforms }} platforms: ${{ inputs.platforms }}
push: ${{ env.quay_username != '' }} push: true
tags: ${{ steps.meta_pull_request.outputs.tags }} tags: ${{ steps.meta_pull_request.outputs.tags }}
labels: ${{ steps.meta_pull_request.outputs.labels }} labels: ${{ steps.meta_pull_request.outputs.labels }}
@@ -240,4 +238,4 @@ jobs:
- name: job summary - name: job summary
run: | run: |
echo "Built image: ${{ steps.meta.outputs.labels }}" >> $GITHUB_STEP_SUMMARY echo "Built image: ${{ steps.meta.outputs.labels }}" >> $GITHUB_STEP_SUMMARY

View File

@@ -1,144 +0,0 @@
---
name: 'build darwin python backend container images (reusable)'
on:
workflow_call:
inputs:
backend:
description: 'Backend to build'
required: true
type: string
build-type:
description: 'Build type (e.g., mps)'
default: ''
type: string
use-pip:
description: 'Use pip to install dependencies'
default: false
type: boolean
lang:
description: 'Programming language (e.g. go)'
default: 'python'
type: string
go-version:
description: 'Go version to use'
default: '1.24.x'
type: string
tag-suffix:
description: 'Tag suffix for the built image'
required: true
type: string
runs-on:
description: 'Runner to use'
default: 'macOS-14'
type: string
secrets:
dockerUsername:
required: false
dockerPassword:
required: false
quayUsername:
required: true
quayPassword:
required: true
jobs:
darwin-backend-build:
runs-on: ${{ inputs.runs-on }}
strategy:
matrix:
go-version: ['${{ inputs.go-version }}']
steps:
- name: Clone
uses: actions/checkout@v5
with:
submodules: true
- name: Setup Go ${{ matrix.go-version }}
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
cache: false
# You can test your matrix by printing the current Go version
- name: Display Go version
run: go version
- name: Dependencies
run: |
brew install protobuf grpc make protoc-gen-go protoc-gen-go-grpc libomp llvm
- name: Build ${{ inputs.backend }}-darwin
run: |
make protogen-go
BACKEND=${{ inputs.backend }} BUILD_TYPE=${{ inputs.build-type }} USE_PIP=${{ inputs.use-pip }} make build-darwin-${{ inputs.lang }}-backend
- name: Upload ${{ inputs.backend }}.tar
uses: actions/upload-artifact@v5
with:
name: ${{ inputs.backend }}-tar
path: backend-images/${{ inputs.backend }}.tar
darwin-backend-publish:
needs: darwin-backend-build
if: github.event_name != 'pull_request'
runs-on: ubuntu-latest
steps:
- name: Download ${{ inputs.backend }}.tar
uses: actions/download-artifact@v6
with:
name: ${{ inputs.backend }}-tar
path: .
- name: Install crane
run: |
curl -L https://github.com/google/go-containerregistry/releases/latest/download/go-containerregistry_Linux_x86_64.tar.gz | tar -xz
sudo mv crane /usr/local/bin/
- name: Log in to DockerHub
run: |
echo "${{ secrets.dockerPassword }}" | crane auth login docker.io -u "${{ secrets.dockerUsername }}" --password-stdin
- name: Log in to quay.io
run: |
echo "${{ secrets.quayPassword }}" | crane auth login quay.io -u "${{ secrets.quayUsername }}" --password-stdin
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: |
localai/localai-backends
tags: |
type=ref,event=branch
type=semver,pattern={{raw}}
type=sha
flavor: |
latest=auto
suffix=${{ inputs.tag-suffix }},onlatest=true
- name: Docker meta
id: quaymeta
uses: docker/metadata-action@v5
with:
images: |
quay.io/go-skynet/local-ai-backends
tags: |
type=ref,event=branch
type=semver,pattern={{raw}}
type=sha
flavor: |
latest=auto
suffix=${{ inputs.tag-suffix }},onlatest=true
- name: Push Docker image (DockerHub)
run: |
for tag in $(echo "${{ steps.meta.outputs.tags }}" | tr ',' '\n'); do
crane push ${{ inputs.backend }}.tar $tag
done
- name: Push Docker image (Quay)
run: |
for tag in $(echo "${{ steps.quaymeta.outputs.tags }}" | tr ',' '\n'); do
crane push ${{ inputs.backend }}.tar $tag
done

View File

@@ -1,78 +0,0 @@
name: 'build backend container images (PR-filtered)'
on:
pull_request:
concurrency:
group: ci-backends-pr-${{ github.head_ref || github.ref }}-${{ github.repository }}
cancel-in-progress: true
jobs:
generate-matrix:
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
matrix-darwin: ${{ steps.set-matrix.outputs.matrix-darwin }}
has-backends: ${{ steps.set-matrix.outputs.has-backends }}
has-backends-darwin: ${{ steps.set-matrix.outputs.has-backends-darwin }}
steps:
- name: Checkout repository
uses: actions/checkout@v5
- name: Setup Bun
uses: oven-sh/setup-bun@v2
- name: Install dependencies
run: |
bun add js-yaml
bun add @octokit/core
# filters the matrix in backend.yml
- name: Filter matrix for changed backends
id: set-matrix
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GITHUB_EVENT_PATH: ${{ github.event_path }}
run: bun run scripts/changed-backends.js
backend-jobs:
needs: generate-matrix
uses: ./.github/workflows/backend_build.yml
if: needs.generate-matrix.outputs.has-backends == 'true'
with:
tag-latest: ${{ matrix.tag-latest }}
tag-suffix: ${{ matrix.tag-suffix }}
build-type: ${{ matrix.build-type }}
cuda-major-version: ${{ matrix.cuda-major-version }}
cuda-minor-version: ${{ matrix.cuda-minor-version }}
platforms: ${{ matrix.platforms }}
runs-on: ${{ matrix.runs-on }}
base-image: ${{ matrix.base-image }}
backend: ${{ matrix.backend }}
dockerfile: ${{ matrix.dockerfile }}
skip-drivers: ${{ matrix.skip-drivers }}
context: ${{ matrix.context }}
secrets:
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
strategy:
fail-fast: true
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
backend-jobs-darwin:
needs: generate-matrix
uses: ./.github/workflows/backend_build_darwin.yml
if: needs.generate-matrix.outputs.has-backends-darwin == 'true'
with:
backend: ${{ matrix.backend }}
build-type: ${{ matrix.build-type }}
go-version: "1.24.x"
tag-suffix: ${{ matrix.tag-suffix }}
lang: ${{ matrix.lang || 'python' }}
use-pip: ${{ matrix.backend == 'diffusers' }}
runs-on: "macOS-14"
secrets:
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
strategy:
fail-fast: true
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix-darwin) }}

View File

@@ -11,57 +11,13 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v5 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v5 uses: actions/setup-go@v5
with: with:
go-version: 1.25 go-version: 1.23
- name: Run GoReleaser - name: Run GoReleaser
run: | run: |
make dev-dist make dev-dist
launcher-build-darwin:
runs-on: macos-latest
steps:
- name: Checkout
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.25
- name: Build launcher for macOS ARM64
run: |
make build-launcher-darwin
ls -liah dist
- name: Upload macOS launcher artifacts
uses: actions/upload-artifact@v5
with:
name: launcher-macos
path: dist/
retention-days: 30
launcher-build-linux:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.25
- name: Build launcher for Linux
run: |
sudo apt-get update
sudo apt-get install golang gcc libgl1-mesa-dev xorg-dev libxkbcommon-dev
make build-launcher-linux
- name: Upload Linux launcher artifacts
uses: actions/upload-artifact@v5
with:
name: launcher-linux
path: local-ai-launcher-linux.tar.xz
retention-days: 30

View File

@@ -1,10 +1,10 @@
name: Bump Backend dependencies name: Bump dependencies
on: on:
schedule: schedule:
- cron: 0 20 * * * - cron: 0 20 * * *
workflow_dispatch: workflow_dispatch:
jobs: jobs:
bump-backends: bump:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
@@ -21,7 +21,7 @@ jobs:
variable: "BARKCPP_VERSION" variable: "BARKCPP_VERSION"
branch: "main" branch: "main"
file: "Makefile" file: "Makefile"
- repository: "leejet/stable-diffusion.cpp" - repository: "richiejp/stable-diffusion.cpp"
variable: "STABLEDIFFUSION_GGML_VERSION" variable: "STABLEDIFFUSION_GGML_VERSION"
branch: "master" branch: "master"
file: "backend/go/stablediffusion-ggml/Makefile" file: "backend/go/stablediffusion-ggml/Makefile"
@@ -31,7 +31,7 @@ jobs:
file: "backend/go/piper/Makefile" file: "backend/go/piper/Makefile"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- name: Bump dependencies 🔧 - name: Bump dependencies 🔧
id: bump id: bump
run: | run: |

View File

@@ -1,10 +1,10 @@
name: Bump Documentation name: Bump dependencies
on: on:
schedule: schedule:
- cron: 0 20 * * * - cron: 0 20 * * *
workflow_dispatch: workflow_dispatch:
jobs: jobs:
bump-docs: bump:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
@@ -12,7 +12,7 @@ jobs:
- repository: "mudler/LocalAI" - repository: "mudler/LocalAI"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- name: Bump dependencies 🔧 - name: Bump dependencies 🔧
run: | run: |
bash .github/bump_docs.sh ${{ matrix.repository }} bash .github/bump_docs.sh ${{ matrix.repository }}

View File

@@ -15,7 +15,7 @@ jobs:
&& sudo add-apt-repository -y ppa:git-core/ppa \ && sudo add-apt-repository -y ppa:git-core/ppa \
&& sudo apt-get update \ && sudo apt-get update \
&& sudo apt-get install -y git && sudo apt-get install -y git
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- name: Install dependencies - name: Install dependencies
run: | run: |
sudo apt-get update sudo apt-get update

View File

@@ -20,7 +20,7 @@ jobs:
skip-commit-verification: true skip-commit-verification: true
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v5 uses: actions/checkout@v4
- name: Approve a PR if not already approved - name: Approve a PR if not already approved
run: | run: |

View File

@@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Clone - name: Clone
uses: actions/checkout@v5 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- uses: actions/setup-go@v5 - uses: actions/setup-go@v5

View File

@@ -1,126 +0,0 @@
name: Gallery Agent
on:
schedule:
- cron: '0 */1 * * *' # Run every 4 hours
workflow_dispatch:
inputs:
search_term:
description: 'Search term for models'
required: false
default: 'GGUF'
type: string
limit:
description: 'Maximum number of models to process'
required: false
default: '15'
type: string
quantization:
description: 'Preferred quantization format'
required: false
default: 'Q4_K_M'
type: string
max_models:
description: 'Maximum number of models to add to the gallery'
required: false
default: '1'
type: string
jobs:
gallery-agent:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v5
with:
token: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.21'
- name: Build gallery agent
run: |
cd .github/gallery-agent
go mod download
go build -o gallery-agent .
- name: Run gallery agent
env:
OPENAI_MODEL: ${{ secrets.OPENAI_MODEL }}
OPENAI_KEY: ${{ secrets.OPENAI_KEY }}
OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }}
SEARCH_TERM: ${{ github.event.inputs.search_term || 'GGUF' }}
LIMIT: ${{ github.event.inputs.limit || '15' }}
QUANTIZATION: ${{ github.event.inputs.quantization || 'Q4_K_M' }}
MAX_MODELS: ${{ github.event.inputs.max_models || '1' }}
run: |
export GALLERY_INDEX_PATH=$PWD/gallery/index.yaml
cd .github/gallery-agent
./gallery-agent
rm -rf gallery-agent
- name: Check for changes
id: check_changes
run: |
if git diff --quiet gallery/index.yaml; then
echo "changes=false" >> $GITHUB_OUTPUT
echo "No changes detected in gallery/index.yaml"
else
echo "changes=true" >> $GITHUB_OUTPUT
echo "Changes detected in gallery/index.yaml"
git diff gallery/index.yaml
fi
- name: Read gallery agent summary
id: read_summary
if: steps.check_changes.outputs.changes == 'true'
run: |
if [ -f ".github/gallery-agent/gallery-agent-summary.json" ]; then
echo "summary_exists=true" >> $GITHUB_OUTPUT
# Extract summary data using jq
echo "search_term=$(jq -r '.search_term' .github/gallery-agent/gallery-agent-summary.json)" >> $GITHUB_OUTPUT
echo "total_found=$(jq -r '.total_found' .github/gallery-agent/gallery-agent-summary.json)" >> $GITHUB_OUTPUT
echo "models_added=$(jq -r '.models_added' .github/gallery-agent/gallery-agent-summary.json)" >> $GITHUB_OUTPUT
echo "quantization=$(jq -r '.quantization' .github/gallery-agent/gallery-agent-summary.json)" >> $GITHUB_OUTPUT
echo "processing_time=$(jq -r '.processing_time' .github/gallery-agent/gallery-agent-summary.json)" >> $GITHUB_OUTPUT
# Create a formatted list of added models with URLs
added_models=$(jq -r 'range(0; .added_model_ids | length) as $i | "- [\(.added_model_ids[$i])](\(.added_model_urls[$i]))"' .github/gallery-agent/gallery-agent-summary.json | tr '\n' '\n')
echo "added_models<<EOF" >> $GITHUB_OUTPUT
echo "$added_models" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
rm -f .github/gallery-agent/gallery-agent-summary.json
else
echo "summary_exists=false" >> $GITHUB_OUTPUT
fi
- name: Create Pull Request
if: steps.check_changes.outputs.changes == 'true'
uses: peter-evans/create-pull-request@v7
with:
token: ${{ secrets.UPDATE_BOT_TOKEN }}
push-to-fork: ci-forks/LocalAI
commit-message: 'chore(model gallery): :robot: add new models via gallery agent'
title: 'chore(model gallery): :robot: add ${{ steps.read_summary.outputs.models_added || 0 }} new models via gallery agent'
# Branch has to be unique so PRs are not overriding each other
branch-suffix: timestamp
body: |
This PR was automatically created by the gallery agent workflow.
**Summary:**
- **Search Term:** ${{ steps.read_summary.outputs.search_term || github.event.inputs.search_term || 'GGUF' }}
- **Models Found:** ${{ steps.read_summary.outputs.total_found || 'N/A' }}
- **Models Added:** ${{ steps.read_summary.outputs.models_added || '0' }}
- **Quantization:** ${{ steps.read_summary.outputs.quantization || github.event.inputs.quantization || 'Q4_K_M' }}
- **Processing Time:** ${{ steps.read_summary.outputs.processing_time || 'N/A' }}
**Added Models:**
${{ steps.read_summary.outputs.added_models || '- No models added' }}
**Workflow Details:**
- Triggered by: `${{ github.event_name }}`
- Run ID: `${{ github.run_id }}`
- Commit: `${{ github.sha }}`
signoff: true
delete-branch: true

View File

@@ -73,7 +73,7 @@ jobs:
uses: docker/setup-buildx-action@master uses: docker/setup-buildx-action@master
- name: Checkout - name: Checkout
uses: actions/checkout@v5 uses: actions/checkout@v4
- name: Cache GRPC - name: Cache GRPC
uses: docker/build-push-action@v6 uses: docker/build-push-action@v6

View File

@@ -43,7 +43,7 @@ jobs:
uses: docker/setup-buildx-action@master uses: docker/setup-buildx-action@master
- name: Checkout - name: Checkout
uses: actions/checkout@v5 uses: actions/checkout@v4
- name: Cache Intel images - name: Cache Intel images
uses: docker/build-push-action@v6 uses: docker/build-push-action@v6

View File

@@ -39,7 +39,7 @@ jobs:
cuda-minor-version: "0" cuda-minor-version: "0"
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'false' tag-latest: 'false'
tag-suffix: '-gpu-nvidia-cuda-12' tag-suffix: '-gpu-nvidia-cuda12'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
makeflags: "--jobs=3 --output-sync=target" makeflags: "--jobs=3 --output-sync=target"
@@ -47,16 +47,16 @@ jobs:
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'false' tag-latest: 'false'
tag-suffix: '-hipblas' tag-suffix: '-hipblas'
base-image: "rocm/dev-ubuntu-22.04:6.4.3" base-image: "rocm/dev-ubuntu-22.04:6.1"
grpc-base-image: "ubuntu:22.04" grpc-base-image: "ubuntu:22.04"
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
makeflags: "--jobs=3 --output-sync=target" makeflags: "--jobs=3 --output-sync=target"
- build-type: 'sycl' - build-type: 'sycl_f16'
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'false' tag-latest: 'false'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
grpc-base-image: "ubuntu:22.04" grpc-base-image: "ubuntu:22.04"
tag-suffix: 'sycl' tag-suffix: 'sycl-f16'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
makeflags: "--jobs=3 --output-sync=target" makeflags: "--jobs=3 --output-sync=target"
- build-type: 'vulkan' - build-type: 'vulkan'

View File

@@ -39,7 +39,7 @@ jobs:
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-hipblas' tag-suffix: '-gpu-hipblas'
base-image: "rocm/dev-ubuntu-22.04:6.4.3" base-image: "rocm/dev-ubuntu-22.04:6.1"
grpc-base-image: "ubuntu:22.04" grpc-base-image: "ubuntu:22.04"
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
makeflags: "--jobs=3 --output-sync=target" makeflags: "--jobs=3 --output-sync=target"
@@ -83,7 +83,7 @@ jobs:
cuda-minor-version: "7" cuda-minor-version: "7"
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-11' tag-suffix: '-gpu-nvidia-cuda11'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
makeflags: "--jobs=4 --output-sync=target" makeflags: "--jobs=4 --output-sync=target"
@@ -94,7 +94,7 @@ jobs:
cuda-minor-version: "0" cuda-minor-version: "0"
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12' tag-suffix: '-gpu-nvidia-cuda12'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
@@ -103,21 +103,30 @@ jobs:
- build-type: 'vulkan' - build-type: 'vulkan'
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
tag-suffix: '-gpu-vulkan' tag-suffix: '-vulkan'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04" base-image: "ubuntu:22.04"
skip-drivers: 'false' skip-drivers: 'false'
makeflags: "--jobs=4 --output-sync=target" makeflags: "--jobs=4 --output-sync=target"
aio: "-aio-gpu-vulkan" aio: "-aio-gpu-vulkan"
- build-type: 'intel' - build-type: 'sycl_f16'
platforms: 'linux/amd64' platforms: 'linux/amd64'
tag-latest: 'auto' tag-latest: 'auto'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest" base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
grpc-base-image: "ubuntu:22.04" grpc-base-image: "ubuntu:22.04"
tag-suffix: '-gpu-intel' tag-suffix: '-gpu-intel-f16'
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
makeflags: "--jobs=3 --output-sync=target" makeflags: "--jobs=3 --output-sync=target"
aio: "-aio-gpu-intel" aio: "-aio-gpu-intel-f16"
- build-type: 'sycl_f32'
platforms: 'linux/amd64'
tag-latest: 'auto'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
grpc-base-image: "ubuntu:22.04"
tag-suffix: '-gpu-intel-f32'
runs-on: 'ubuntu-latest'
makeflags: "--jobs=3 --output-sync=target"
aio: "-aio-gpu-intel-f32"
gh-runner: gh-runner:
uses: ./.github/workflows/image_build.yml uses: ./.github/workflows/image_build.yml

View File

@@ -94,7 +94,7 @@ jobs:
&& sudo apt-get update \ && sudo apt-get update \
&& sudo apt-get install -y git && sudo apt-get install -y git
- name: Checkout - name: Checkout
uses: actions/checkout@v5 uses: actions/checkout@v4
- name: Release space from worker - name: Release space from worker
if: inputs.runs-on == 'ubuntu-latest' if: inputs.runs-on == 'ubuntu-latest'

View File

@@ -9,4 +9,4 @@ jobs:
pull-requests: write pull-requests: write
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/labeler@v6 - uses: actions/labeler@v5

View File

@@ -6,15 +6,14 @@ permissions:
contents: write contents: write
pull-requests: write pull-requests: write
packages: read packages: read
issues: write # for Homebrew/actions/post-comment
actions: write # to dispatch publish workflow
jobs: jobs:
dependabot: dependabot:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{ github.actor == 'localai-bot' && !contains(github.event.pull_request.title, 'chore(model gallery):') }} if: ${{ github.actor == 'localai-bot' }}
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v5 uses: actions/checkout@v4
- name: Approve a PR if not already approved - name: Approve a PR if not already approved
run: | run: |

View File

@@ -1,27 +1,22 @@
name: Notifications for new models name: Notifications for new models
on: on:
pull_request_target: pull_request:
types: types:
- closed - closed
permissions:
contents: read
pull-requests: read
jobs: jobs:
notify-discord: notify-discord:
if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'area/ai-model')) }} if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'area/ai-model')) }}
env: env:
MODEL_NAME: gemma-3-12b-it-qat MODEL_NAME: gemma-3-12b-it
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 # needed to checkout all branches for this Action to work fetch-depth: 0 # needed to checkout all branches for this Action to work
ref: ${{ github.event.pull_request.head.sha }} # Checkout the PR head to get the actual changes
- uses: mudler/localai-github-action@v1 - uses: mudler/localai-github-action@v1
with: with:
model: 'gemma-3-12b-it-qat' # Any from models.localai.io, or from huggingface.com with: "huggingface://<repository>/file" model: 'gemma-3-12b-it' # Any from models.localai.io, or from huggingface.com with: "huggingface://<repository>/file"
# Check the PR diff using the current branch and the base branch of the PR # Check the PR diff using the current branch and the base branch of the PR
- uses: GrantBirki/git-diff-action@v2.8.1 - uses: GrantBirki/git-diff-action@v2.8.1
id: git-diff-action id: git-diff-action
@@ -84,7 +79,7 @@ jobs:
args: ${{ steps.summarize.outputs.message }} args: ${{ steps.summarize.outputs.message }}
- name: Setup tmate session if fails - name: Setup tmate session if fails
if: ${{ failure() }} if: ${{ failure() }}
uses: mxschmitt/action-tmate@v3.23 uses: mxschmitt/action-tmate@v3.22
with: with:
detached: true detached: true
connect-timeout-seconds: 180 connect-timeout-seconds: 180
@@ -92,13 +87,12 @@ jobs:
notify-twitter: notify-twitter:
if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'area/ai-model')) }} if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'area/ai-model')) }}
env: env:
MODEL_NAME: gemma-3-12b-it-qat MODEL_NAME: gemma-3-12b-it
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 # needed to checkout all branches for this Action to work fetch-depth: 0 # needed to checkout all branches for this Action to work
ref: ${{ github.event.pull_request.head.sha }} # Checkout the PR head to get the actual changes
- name: Start LocalAI - name: Start LocalAI
run: | run: |
echo "Starting LocalAI..." echo "Starting LocalAI..."
@@ -167,7 +161,7 @@ jobs:
TWITTER_ACCESS_TOKEN_SECRET: ${{ secrets.TWITTER_ACCESS_TOKEN_SECRET }} TWITTER_ACCESS_TOKEN_SECRET: ${{ secrets.TWITTER_ACCESS_TOKEN_SECRET }}
- name: Setup tmate session if fails - name: Setup tmate session if fails
if: ${{ failure() }} if: ${{ failure() }}
uses: mxschmitt/action-tmate@v3.23 uses: mxschmitt/action-tmate@v3.22
with: with:
detached: true detached: true
connect-timeout-seconds: 180 connect-timeout-seconds: 180

View File

@@ -11,11 +11,10 @@ jobs:
RELEASE_BODY: ${{ github.event.release.body }} RELEASE_BODY: ${{ github.event.release.body }}
RELEASE_TITLE: ${{ github.event.release.name }} RELEASE_TITLE: ${{ github.event.release.name }}
RELEASE_TAG_NAME: ${{ github.event.release.tag_name }} RELEASE_TAG_NAME: ${{ github.event.release.tag_name }}
MODEL_NAME: gemma-3-12b-it-qat
steps: steps:
- uses: mudler/localai-github-action@v1 - uses: mudler/localai-github-action@v1
with: with:
model: 'gemma-3-12b-it-qat' # Any from models.localai.io, or from huggingface.com with: "huggingface://<repository>/file" model: 'gemma-3-12b-it' # Any from models.localai.io, or from huggingface.com with: "huggingface://<repository>/file"
- name: Summarize - name: Summarize
id: summarize id: summarize
run: | run: |

View File

@@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v5 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Set up Go - name: Set up Go
@@ -23,42 +23,4 @@ jobs:
version: v2.11.0 version: v2.11.0
args: release --clean args: release --clean
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
launcher-build-darwin:
runs-on: macos-latest
steps:
- name: Checkout
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.23
- name: Build launcher for macOS ARM64
run: |
make build-launcher-darwin
- name: Upload DMG to Release
uses: softprops/action-gh-release@v2
with:
files: ./dist/LocalAI.dmg
launcher-build-linux:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.23
- name: Build launcher for Linux
run: |
sudo apt-get update
sudo apt-get install golang gcc libgl1-mesa-dev xorg-dev libxkbcommon-dev
make build-launcher-linux
- name: Upload Linux launcher artifacts
uses: softprops/action-gh-release@v2
with:
files: ./local-ai-launcher-linux.tar.xz

View File

@@ -14,17 +14,17 @@ jobs:
GO111MODULE: on GO111MODULE: on
steps: steps:
- name: Checkout Source - name: Checkout Source
uses: actions/checkout@v5 uses: actions/checkout@v4
if: ${{ github.actor != 'dependabot[bot]' }} if: ${{ github.actor != 'dependabot[bot]' }}
- name: Run Gosec Security Scanner - name: Run Gosec Security Scanner
if: ${{ github.actor != 'dependabot[bot]' }} if: ${{ github.actor != 'dependabot[bot]' }}
uses: securego/gosec@v2.22.9 uses: securego/gosec@v2.22.7
with: with:
# we let the report trigger content trigger a failure using the GitHub Security features. # we let the report trigger content trigger a failure using the GitHub Security features.
args: '-no-fail -fmt sarif -out results.sarif ./...' args: '-no-fail -fmt sarif -out results.sarif ./...'
- name: Upload SARIF file - name: Upload SARIF file
if: ${{ github.actor != 'dependabot[bot]' }} if: ${{ github.actor != 'dependabot[bot]' }}
uses: github/codeql-action/upload-sarif@v4 uses: github/codeql-action/upload-sarif@v3
with: with:
# Path to SARIF file relative to the root of the repository # Path to SARIF file relative to the root of the repository
sarif_file: results.sarif sarif_file: results.sarif

View File

@@ -10,7 +10,7 @@ jobs:
stale: stale:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v9 - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9
with: with:
stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 5 days.' stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
stale-pr-message: 'This PR is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 10 days.' stale-pr-message: 'This PR is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 10 days.'

View File

@@ -19,7 +19,7 @@ jobs:
# runs-on: ubuntu-latest # runs-on: ubuntu-latest
# steps: # steps:
# - name: Clone # - name: Clone
# uses: actions/checkout@v5 # uses: actions/checkout@v4
# with: # with:
# submodules: true # submodules: true
# - name: Dependencies # - name: Dependencies
@@ -40,7 +40,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Clone - name: Clone
uses: actions/checkout@v5 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Dependencies - name: Dependencies
@@ -61,7 +61,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Clone - name: Clone
uses: actions/checkout@v5 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Dependencies - name: Dependencies
@@ -83,7 +83,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Clone - name: Clone
uses: actions/checkout@v5 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Dependencies - name: Dependencies
@@ -104,7 +104,7 @@ jobs:
# runs-on: ubuntu-latest # runs-on: ubuntu-latest
# steps: # steps:
# - name: Clone # - name: Clone
# uses: actions/checkout@v5 # uses: actions/checkout@v4
# with: # with:
# submodules: true # submodules: true
# - name: Dependencies # - name: Dependencies
@@ -124,7 +124,7 @@ jobs:
# runs-on: ubuntu-latest # runs-on: ubuntu-latest
# steps: # steps:
# - name: Clone # - name: Clone
# uses: actions/checkout@v5 # uses: actions/checkout@v4
# with: # with:
# submodules: true # submodules: true
# - name: Dependencies # - name: Dependencies
@@ -186,7 +186,7 @@ jobs:
# sudo rm -rf "$AGENT_TOOLSDIRECTORY" || true # sudo rm -rf "$AGENT_TOOLSDIRECTORY" || true
# df -h # df -h
# - name: Clone # - name: Clone
# uses: actions/checkout@v5 # uses: actions/checkout@v4
# with: # with:
# submodules: true # submodules: true
# - name: Dependencies # - name: Dependencies
@@ -211,7 +211,7 @@ jobs:
# runs-on: ubuntu-latest # runs-on: ubuntu-latest
# steps: # steps:
# - name: Clone # - name: Clone
# uses: actions/checkout@v5 # uses: actions/checkout@v4
# with: # with:
# submodules: true # submodules: true
# - name: Dependencies # - name: Dependencies
@@ -232,7 +232,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Clone - name: Clone
uses: actions/checkout@v5 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Dependencies - name: Dependencies

View File

@@ -21,22 +21,8 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
go-version: ['1.25.x'] go-version: ['1.21.x']
steps: steps:
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
# this might remove tools that are actually needed,
# if set to "true" but frees about 6 GB
tool-cache: true
# all of these default to true, but feel free to set to
# "false" if necessary for your workflow
android: true
dotnet: true
haskell: true
large-packages: true
docker-images: true
swap-storage: true
- name: Release space from worker - name: Release space from worker
run: | run: |
echo "Listing top largest packages" echo "Listing top largest packages"
@@ -70,7 +56,7 @@ jobs:
sudo rm -rfv build || true sudo rm -rfv build || true
df -h df -h
- name: Clone - name: Clone
uses: actions/checkout@v5 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Setup Go ${{ matrix.go-version }} - name: Setup Go ${{ matrix.go-version }}
@@ -124,7 +110,7 @@ jobs:
PATH="$PATH:/root/go/bin" GO_TAGS="tts" make --jobs 5 --output-sync=target test PATH="$PATH:/root/go/bin" GO_TAGS="tts" make --jobs 5 --output-sync=target test
- name: Setup tmate session if tests fail - name: Setup tmate session if tests fail
if: ${{ failure() }} if: ${{ failure() }}
uses: mxschmitt/action-tmate@v3.23 uses: mxschmitt/action-tmate@v3.22
with: with:
detached: true detached: true
connect-timeout-seconds: 180 connect-timeout-seconds: 180
@@ -166,7 +152,7 @@ jobs:
sudo rm -rfv build || true sudo rm -rfv build || true
df -h df -h
- name: Clone - name: Clone
uses: actions/checkout@v5 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Dependencies - name: Dependencies
@@ -183,7 +169,7 @@ jobs:
PATH="$PATH:$HOME/go/bin" make backends/local-store backends/silero-vad backends/llama-cpp backends/whisper backends/piper backends/stablediffusion-ggml docker-build-aio e2e-aio PATH="$PATH:$HOME/go/bin" make backends/local-store backends/silero-vad backends/llama-cpp backends/whisper backends/piper backends/stablediffusion-ggml docker-build-aio e2e-aio
- name: Setup tmate session if tests fail - name: Setup tmate session if tests fail
if: ${{ failure() }} if: ${{ failure() }}
uses: mxschmitt/action-tmate@v3.23 uses: mxschmitt/action-tmate@v3.22
with: with:
detached: true detached: true
connect-timeout-seconds: 180 connect-timeout-seconds: 180
@@ -193,10 +179,10 @@ jobs:
runs-on: macOS-14 runs-on: macOS-14
strategy: strategy:
matrix: matrix:
go-version: ['1.25.x'] go-version: ['1.21.x']
steps: steps:
- name: Clone - name: Clone
uses: actions/checkout@v5 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Setup Go ${{ matrix.go-version }} - name: Setup Go ${{ matrix.go-version }}
@@ -214,7 +200,11 @@ jobs:
- name: Build llama-cpp-darwin - name: Build llama-cpp-darwin
run: | run: |
make protogen-go make protogen-go
make backends/llama-cpp-darwin make build
bash scripts/build-llama-cpp-darwin.sh
ls -la build/darwin.tar
mv build/darwin.tar build/llama-cpp.tar
./local-ai backends install "ocifile://$PWD/build/llama-cpp.tar"
- name: Test - name: Test
run: | run: |
export C_INCLUDE_PATH=/usr/local/include export C_INCLUDE_PATH=/usr/local/include
@@ -226,7 +216,7 @@ jobs:
PATH="$PATH:$HOME/go/bin" BUILD_TYPE="GITHUB_CI_HAS_BROKEN_METAL" CMAKE_ARGS="-DGGML_F16C=OFF -DGGML_AVX512=OFF -DGGML_AVX2=OFF -DGGML_FMA=OFF" make --jobs 4 --output-sync=target test PATH="$PATH:$HOME/go/bin" BUILD_TYPE="GITHUB_CI_HAS_BROKEN_METAL" CMAKE_ARGS="-DGGML_F16C=OFF -DGGML_AVX512=OFF -DGGML_AVX2=OFF -DGGML_FMA=OFF" make --jobs 4 --output-sync=target test
- name: Setup tmate session if tests fail - name: Setup tmate session if tests fail
if: ${{ failure() }} if: ${{ failure() }}
uses: mxschmitt/action-tmate@v3.23 uses: mxschmitt/action-tmate@v3.22
with: with:
detached: true detached: true
connect-timeout-seconds: 180 connect-timeout-seconds: 180

View File

@@ -9,7 +9,7 @@ jobs:
fail-fast: false fail-fast: false
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v5 - uses: actions/checkout@v4
- uses: actions/setup-go@v5 - uses: actions/setup-go@v5
with: with:
go-version: 'stable' go-version: 'stable'

3
.gitignore vendored
View File

@@ -12,7 +12,6 @@ prepare-sources
/backends /backends
/backend-images /backend-images
/result.yaml /result.yaml
protoc
*.log *.log
@@ -24,7 +23,7 @@ go-bert
# LocalAI build binary # LocalAI build binary
LocalAI LocalAI
/local-ai local-ai
# prevent above rules from omitting the helm chart # prevent above rules from omitting the helm chart
!charts/* !charts/*
# prevent above rules from omitting the api/localai folder # prevent above rules from omitting the api/localai folder

View File

@@ -8,7 +8,7 @@ source:
enabled: true enabled: true
name_template: '{{ .ProjectName }}-{{ .Tag }}-source' name_template: '{{ .ProjectName }}-{{ .Tag }}-source'
builds: builds:
- main: ./cmd/local-ai -
env: env:
- CGO_ENABLED=0 - CGO_ENABLED=0
ldflags: ldflags:

View File

@@ -9,7 +9,7 @@ ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y --no-install-recommends \ apt-get install -y --no-install-recommends \
ca-certificates curl wget espeak-ng libgomp1 \ ca-certificates curl wget espeak-ng libgomp1 \
ffmpeg libopenblas-base libopenblas-dev && \ python3 python-is-python3 ffmpeg && \
apt-get clean && \ apt-get clean && \
rm -rf /var/lib/apt/lists/* rm -rf /var/lib/apt/lists/*
@@ -72,22 +72,6 @@ RUN <<EOT bash
fi fi
EOT EOT
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${TARGETARCH}" = "arm64" ]; then
echo "nvidia-l4t" > /run/localai/capability
fi
EOT
# https://github.com/NVIDIA/Isaac-GR00T/issues/343
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${TARGETARCH}" = "arm64" ]; then
wget https://developer.download.nvidia.com/compute/cudss/0.6.0/local_installers/cudss-local-tegra-repo-ubuntu2204-0.6.0_0.6.0-1_arm64.deb && \
dpkg -i cudss-local-tegra-repo-ubuntu2204-0.6.0_0.6.0-1_arm64.deb && \
cp /var/cudss-local-tegra-repo-ubuntu2204-0.6.0/cudss-*-keyring.gpg /usr/share/keyrings/ && \
apt-get update && apt-get -y install cudss
fi
EOT
# If we are building with clblas support, we need the libraries for the builds # If we are building with clblas support, we need the libraries for the builds
RUN if [ "${BUILD_TYPE}" = "clblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \ RUN if [ "${BUILD_TYPE}" = "clblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
apt-get update && \ apt-get update && \
@@ -110,12 +94,6 @@ RUN if [ "${BUILD_TYPE}" = "hipblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
ldconfig \ ldconfig \
; fi ; fi
RUN if [ "${BUILD_TYPE}" = "hipblas" ]; then \
ln -s /opt/rocm-**/lib/llvm/lib/libomp.so /usr/lib/libomp.so \
; fi
RUN expr "${BUILD_TYPE}" = intel && echo "intel" > /run/localai/capability || echo "not intel"
# Cuda # Cuda
ENV PATH=/usr/local/cuda/bin:${PATH} ENV PATH=/usr/local/cuda/bin:${PATH}

297
Makefile
View File

@@ -2,10 +2,11 @@ GOCMD=go
GOTEST=$(GOCMD) test GOTEST=$(GOCMD) test
GOVET=$(GOCMD) vet GOVET=$(GOCMD) vet
BINARY_NAME=local-ai BINARY_NAME=local-ai
LAUNCHER_BINARY_NAME=local-ai-launcher
GORELEASER?= GORELEASER?=
ONEAPI_VERSION?=2025.2
export BUILD_TYPE?= export BUILD_TYPE?=
GO_TAGS?= GO_TAGS?=
@@ -91,17 +92,7 @@ build: protogen-go install-go-tools ## Build the project
$(info ${GREEN}I LD_FLAGS: ${YELLOW}$(LD_FLAGS)${RESET}) $(info ${GREEN}I LD_FLAGS: ${YELLOW}$(LD_FLAGS)${RESET})
$(info ${GREEN}I UPX: ${YELLOW}$(UPX)${RESET}) $(info ${GREEN}I UPX: ${YELLOW}$(UPX)${RESET})
rm -rf $(BINARY_NAME) || true rm -rf $(BINARY_NAME) || true
CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o $(BINARY_NAME) ./cmd/local-ai CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o $(BINARY_NAME) ./
build-launcher: ## Build the launcher application
$(info ${GREEN}I local-ai launcher build info:${RESET})
$(info ${GREEN}I BUILD_TYPE: ${YELLOW}$(BUILD_TYPE)${RESET})
$(info ${GREEN}I GO_TAGS: ${YELLOW}$(GO_TAGS)${RESET})
$(info ${GREEN}I LD_FLAGS: ${YELLOW}$(LD_FLAGS)${RESET})
rm -rf $(LAUNCHER_BINARY_NAME) || true
CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o $(LAUNCHER_BINARY_NAME) ./cmd/launcher
build-all: build build-launcher ## Build both server and launcher
dev-dist: dev-dist:
$(GORELEASER) build --snapshot --clean $(GORELEASER) build --snapshot --clean
@@ -117,8 +108,8 @@ run: ## run local-ai
CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) run ./ CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) run ./
test-models/testmodel.ggml: test-models/testmodel.ggml:
mkdir -p test-models mkdir test-models
mkdir -p test-dir mkdir test-dir
wget -q https://huggingface.co/mradermacher/gpt2-alpaca-gpt4-GGUF/resolve/main/gpt2-alpaca-gpt4.Q4_K_M.gguf -O test-models/testmodel.ggml wget -q https://huggingface.co/mradermacher/gpt2-alpaca-gpt4-GGUF/resolve/main/gpt2-alpaca-gpt4.Q4_K_M.gguf -O test-models/testmodel.ggml
wget -q https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-base.en.bin -O test-models/whisper-en wget -q https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-base.en.bin -O test-models/whisper-en
wget -q https://huggingface.co/mudler/all-MiniLM-L6-v2/resolve/main/ggml-model-q4_0.bin -O test-models/bert wget -q https://huggingface.co/mudler/all-MiniLM-L6-v2/resolve/main/ggml-model-q4_0.bin -O test-models/bert
@@ -143,6 +134,27 @@ test: test-models/testmodel.ggml protogen-go
$(MAKE) test-tts $(MAKE) test-tts
$(MAKE) test-stablediffusion $(MAKE) test-stablediffusion
backends/llama-cpp: docker-build-llama-cpp docker-save-llama-cpp build
./local-ai backends install "ocifile://$(abspath ./backend-images/llama-cpp.tar)"
backends/piper: docker-build-piper docker-save-piper build
./local-ai backends install "ocifile://$(abspath ./backend-images/piper.tar)"
backends/stablediffusion-ggml: docker-build-stablediffusion-ggml docker-save-stablediffusion-ggml build
./local-ai backends install "ocifile://$(abspath ./backend-images/stablediffusion-ggml.tar)"
backends/whisper: docker-build-whisper docker-save-whisper build
./local-ai backends install "ocifile://$(abspath ./backend-images/whisper.tar)"
backends/silero-vad: docker-build-silero-vad docker-save-silero-vad build
./local-ai backends install "ocifile://$(abspath ./backend-images/silero-vad.tar)"
backends/local-store: docker-build-local-store docker-save-local-store build
./local-ai backends install "ocifile://$(abspath ./backend-images/local-store.tar)"
backends/huggingface: docker-build-huggingface docker-save-huggingface build
./local-ai backends install "ocifile://$(abspath ./backend-images/huggingface.tar)"
######################################################## ########################################################
## AIO tests ## AIO tests
######################################################## ########################################################
@@ -230,7 +242,10 @@ help: ## Show this help.
######################################################## ########################################################
.PHONY: protogen .PHONY: protogen
protogen: protogen-go protogen: protogen-go protogen-python
.PHONY: protogen-clean
protogen-clean: protogen-go-clean protogen-python-clean
protoc: protoc:
@OS_NAME=$$(uname -s | tr '[:upper:]' '[:lower:]'); \ @OS_NAME=$$(uname -s | tr '[:upper:]' '[:lower:]'); \
@@ -275,6 +290,93 @@ protogen-go-clean:
$(RM) pkg/grpc/proto/backend.pb.go pkg/grpc/proto/backend_grpc.pb.go $(RM) pkg/grpc/proto/backend.pb.go pkg/grpc/proto/backend_grpc.pb.go
$(RM) bin/* $(RM) bin/*
.PHONY: protogen-python
protogen-python: bark-protogen coqui-protogen chatterbox-protogen diffusers-protogen exllama2-protogen rerankers-protogen transformers-protogen kokoro-protogen vllm-protogen faster-whisper-protogen
.PHONY: protogen-python-clean
protogen-python-clean: bark-protogen-clean coqui-protogen-clean chatterbox-protogen-clean diffusers-protogen-clean exllama2-protogen-clean rerankers-protogen-clean transformers-protogen-clean kokoro-protogen-clean vllm-protogen-clean faster-whisper-protogen-clean
.PHONY: bark-protogen
bark-protogen:
$(MAKE) -C backend/python/bark protogen
.PHONY: bark-protogen-clean
bark-protogen-clean:
$(MAKE) -C backend/python/bark protogen-clean
.PHONY: coqui-protogen
coqui-protogen:
$(MAKE) -C backend/python/coqui protogen
.PHONY: coqui-protogen-clean
coqui-protogen-clean:
$(MAKE) -C backend/python/coqui protogen-clean
.PHONY: diffusers-protogen
diffusers-protogen:
$(MAKE) -C backend/python/diffusers protogen
.PHONY: chatterbox-protogen
chatterbox-protogen:
$(MAKE) -C backend/python/chatterbox protogen
.PHONY: diffusers-protogen-clean
diffusers-protogen-clean:
$(MAKE) -C backend/python/diffusers protogen-clean
.PHONY: chatterbox-protogen-clean
chatterbox-protogen-clean:
$(MAKE) -C backend/python/chatterbox protogen-clean
.PHONY: faster-whisper-protogen
faster-whisper-protogen:
$(MAKE) -C backend/python/faster-whisper protogen
.PHONY: faster-whisper-protogen-clean
faster-whisper-protogen-clean:
$(MAKE) -C backend/python/faster-whisper protogen-clean
.PHONY: exllama2-protogen
exllama2-protogen:
$(MAKE) -C backend/python/exllama2 protogen
.PHONY: exllama2-protogen-clean
exllama2-protogen-clean:
$(MAKE) -C backend/python/exllama2 protogen-clean
.PHONY: rerankers-protogen
rerankers-protogen:
$(MAKE) -C backend/python/rerankers protogen
.PHONY: rerankers-protogen-clean
rerankers-protogen-clean:
$(MAKE) -C backend/python/rerankers protogen-clean
.PHONY: transformers-protogen
transformers-protogen:
$(MAKE) -C backend/python/transformers protogen
.PHONY: transformers-protogen-clean
transformers-protogen-clean:
$(MAKE) -C backend/python/transformers protogen-clean
.PHONY: kokoro-protogen
kokoro-protogen:
$(MAKE) -C backend/python/kokoro protogen
.PHONY: kokoro-protogen-clean
kokoro-protogen-clean:
$(MAKE) -C backend/python/kokoro protogen-clean
.PHONY: vllm-protogen
vllm-protogen:
$(MAKE) -C backend/python/vllm protogen
.PHONY: vllm-protogen-clean
vllm-protogen-clean:
$(MAKE) -C backend/python/vllm protogen-clean
prepare-test-extra: protogen-python prepare-test-extra: protogen-python
$(MAKE) -C backend/python/transformers $(MAKE) -C backend/python/transformers
$(MAKE) -C backend/python/diffusers $(MAKE) -C backend/python/diffusers
@@ -310,7 +412,7 @@ docker-cuda11:
--build-arg GO_TAGS="$(GO_TAGS)" \ --build-arg GO_TAGS="$(GO_TAGS)" \
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \ --build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
--build-arg BUILD_TYPE=$(BUILD_TYPE) \ --build-arg BUILD_TYPE=$(BUILD_TYPE) \
-t $(DOCKER_IMAGE)-cuda-11 . -t $(DOCKER_IMAGE)-cuda11 .
docker-aio: docker-aio:
@echo "Building AIO image with base $(BASE_IMAGE) as $(DOCKER_AIO_IMAGE)" @echo "Building AIO image with base $(BASE_IMAGE) as $(DOCKER_AIO_IMAGE)"
@@ -325,130 +427,41 @@ docker-aio-all:
docker-image-intel: docker-image-intel:
docker build \ docker build \
--build-arg BASE_IMAGE=quay.io/go-skynet/intel-oneapi-base:latest \ --build-arg BASE_IMAGE=intel/oneapi-basekit:${ONEAPI_VERSION}.0-0-devel-ubuntu24.04 \
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \ --build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
--build-arg GO_TAGS="$(GO_TAGS)" \ --build-arg GO_TAGS="$(GO_TAGS)" \
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \ --build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
--build-arg BUILD_TYPE=intel -t $(DOCKER_IMAGE) . --build-arg BUILD_TYPE=sycl_f32 -t $(DOCKER_IMAGE) .
docker-image-intel-xpu:
docker build \
--build-arg BASE_IMAGE=intel/oneapi-basekit:${ONEAPI_VERSION}.0-0-devel-ubuntu22.04 \
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
--build-arg GO_TAGS="$(GO_TAGS)" \
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
--build-arg BUILD_TYPE=sycl_f32 -t $(DOCKER_IMAGE) .
######################################################## ########################################################
## Backends ## Backends
######################################################## ########################################################
backends/diffusers: docker-build-diffusers docker-save-diffusers build
./local-ai backends install "ocifile://$(abspath ./backend-images/diffusers.tar)"
backends/llama-cpp: docker-build-llama-cpp docker-save-llama-cpp build
./local-ai backends install "ocifile://$(abspath ./backend-images/llama-cpp.tar)"
backends/piper: docker-build-piper docker-save-piper build
./local-ai backends install "ocifile://$(abspath ./backend-images/piper.tar)"
backends/stablediffusion-ggml: docker-build-stablediffusion-ggml docker-save-stablediffusion-ggml build
./local-ai backends install "ocifile://$(abspath ./backend-images/stablediffusion-ggml.tar)"
backends/whisper: docker-build-whisper docker-save-whisper build
./local-ai backends install "ocifile://$(abspath ./backend-images/whisper.tar)"
backends/silero-vad: docker-build-silero-vad docker-save-silero-vad build
./local-ai backends install "ocifile://$(abspath ./backend-images/silero-vad.tar)"
backends/local-store: docker-build-local-store docker-save-local-store build
./local-ai backends install "ocifile://$(abspath ./backend-images/local-store.tar)"
backends/huggingface: docker-build-huggingface docker-save-huggingface build
./local-ai backends install "ocifile://$(abspath ./backend-images/huggingface.tar)"
backends/rfdetr: docker-build-rfdetr docker-save-rfdetr build
./local-ai backends install "ocifile://$(abspath ./backend-images/rfdetr.tar)"
backends/kitten-tts: docker-build-kitten-tts docker-save-kitten-tts build
./local-ai backends install "ocifile://$(abspath ./backend-images/kitten-tts.tar)"
backends/kokoro: docker-build-kokoro docker-save-kokoro build
./local-ai backends install "ocifile://$(abspath ./backend-images/kokoro.tar)"
backends/chatterbox: docker-build-chatterbox docker-save-chatterbox build
./local-ai backends install "ocifile://$(abspath ./backend-images/chatterbox.tar)"
backends/llama-cpp-darwin: build
bash ./scripts/build/llama-cpp-darwin.sh
./local-ai backends install "ocifile://$(abspath ./backend-images/llama-cpp.tar)"
backends/neutts: docker-build-neutts docker-save-neutts build
./local-ai backends install "ocifile://$(abspath ./backend-images/neutts.tar)"
build-darwin-python-backend: build
bash ./scripts/build/python-darwin.sh
build-darwin-go-backend: build
bash ./scripts/build/golang-darwin.sh
backends/mlx:
BACKEND=mlx $(MAKE) build-darwin-python-backend
./local-ai backends install "ocifile://$(abspath ./backend-images/mlx.tar)"
backends/diffuser-darwin:
BACKEND=diffusers $(MAKE) build-darwin-python-backend
./local-ai backends install "ocifile://$(abspath ./backend-images/diffusers.tar)"
backends/mlx-vlm:
BACKEND=mlx-vlm $(MAKE) build-darwin-python-backend
./local-ai backends install "ocifile://$(abspath ./backend-images/mlx-vlm.tar)"
backends/mlx-audio:
BACKEND=mlx-audio $(MAKE) build-darwin-python-backend
./local-ai backends install "ocifile://$(abspath ./backend-images/mlx-audio.tar)"
backends/stablediffusion-ggml-darwin:
BACKEND=stablediffusion-ggml BUILD_TYPE=metal $(MAKE) build-darwin-go-backend
./local-ai backends install "ocifile://$(abspath ./backend-images/stablediffusion-ggml.tar)"
backend-images: backend-images:
mkdir -p backend-images mkdir -p backend-images
docker-build-llama-cpp: docker-build-llama-cpp:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:llama-cpp -f backend/Dockerfile.llama-cpp . docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg IMAGE_BASE=$(IMAGE_BASE) -t local-ai-backend:llama-cpp -f backend/Dockerfile.llama-cpp .
docker-build-bark-cpp: docker-build-bark-cpp:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:bark-cpp -f backend/Dockerfile.golang --build-arg BACKEND=bark-cpp . docker build -t local-ai-backend:bark-cpp -f backend/Dockerfile.go --build-arg BACKEND=bark-cpp .
docker-build-piper: docker-build-piper:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:piper -f backend/Dockerfile.golang --build-arg BACKEND=piper . docker build -t local-ai-backend:piper -f backend/Dockerfile.go --build-arg BACKEND=piper .
docker-build-local-store: docker-build-local-store:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:local-store -f backend/Dockerfile.golang --build-arg BACKEND=local-store . docker build -t local-ai-backend:local-store -f backend/Dockerfile.go --build-arg BACKEND=local-store .
docker-build-huggingface: docker-build-huggingface:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:huggingface -f backend/Dockerfile.golang --build-arg BACKEND=huggingface . docker build -t local-ai-backend:huggingface -f backend/Dockerfile.go --build-arg BACKEND=huggingface .
docker-build-rfdetr:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:rfdetr -f backend/Dockerfile.python --build-arg BACKEND=rfdetr ./backend
docker-build-kitten-tts:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:kitten-tts -f backend/Dockerfile.python --build-arg BACKEND=kitten-tts ./backend
docker-save-kitten-tts: backend-images
docker save local-ai-backend:kitten-tts -o backend-images/kitten-tts.tar
docker-save-chatterbox: backend-images
docker save local-ai-backend:chatterbox -o backend-images/chatterbox.tar
docker-build-neutts:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:neutts -f backend/Dockerfile.python --build-arg BACKEND=neutts ./backend
docker-save-neutts: backend-images
docker save local-ai-backend:neutts -o backend-images/neutts.tar
docker-build-kokoro:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:kokoro -f backend/Dockerfile.python --build-arg BACKEND=kokoro ./backend
docker-save-kokoro: backend-images
docker save local-ai-backend:kokoro -o backend-images/kokoro.tar
docker-save-rfdetr: backend-images
docker save local-ai-backend:rfdetr -o backend-images/rfdetr.tar
docker-save-huggingface: backend-images docker-save-huggingface: backend-images
docker save local-ai-backend:huggingface -o backend-images/huggingface.tar docker save local-ai-backend:huggingface -o backend-images/huggingface.tar
@@ -457,7 +470,7 @@ docker-save-local-store: backend-images
docker save local-ai-backend:local-store -o backend-images/local-store.tar docker save local-ai-backend:local-store -o backend-images/local-store.tar
docker-build-silero-vad: docker-build-silero-vad:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:silero-vad -f backend/Dockerfile.golang --build-arg BACKEND=silero-vad . docker build -t local-ai-backend:silero-vad -f backend/Dockerfile.go --build-arg BACKEND=silero-vad .
docker-save-silero-vad: backend-images docker-save-silero-vad: backend-images
docker save local-ai-backend:silero-vad -o backend-images/silero-vad.tar docker save local-ai-backend:silero-vad -o backend-images/silero-vad.tar
@@ -472,46 +485,46 @@ docker-save-bark-cpp: backend-images
docker save local-ai-backend:bark-cpp -o backend-images/bark-cpp.tar docker save local-ai-backend:bark-cpp -o backend-images/bark-cpp.tar
docker-build-stablediffusion-ggml: docker-build-stablediffusion-ggml:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:stablediffusion-ggml -f backend/Dockerfile.golang --build-arg BACKEND=stablediffusion-ggml . docker build -t local-ai-backend:stablediffusion-ggml -f backend/Dockerfile.go --build-arg BACKEND=stablediffusion-ggml .
docker-save-stablediffusion-ggml: backend-images docker-save-stablediffusion-ggml: backend-images
docker save local-ai-backend:stablediffusion-ggml -o backend-images/stablediffusion-ggml.tar docker save local-ai-backend:stablediffusion-ggml -o backend-images/stablediffusion-ggml.tar
docker-build-rerankers: docker-build-rerankers:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:rerankers -f backend/Dockerfile.python --build-arg BACKEND=rerankers . docker build -t local-ai-backend:rerankers -f backend/Dockerfile.python --build-arg BACKEND=rerankers .
docker-build-vllm: docker-build-vllm:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:vllm -f backend/Dockerfile.python --build-arg BACKEND=vllm . docker build -t local-ai-backend:vllm -f backend/Dockerfile.python --build-arg BACKEND=vllm .
docker-build-transformers: docker-build-transformers:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:transformers -f backend/Dockerfile.python --build-arg BACKEND=transformers . docker build -t local-ai-backend:transformers -f backend/Dockerfile.python --build-arg BACKEND=transformers .
docker-build-diffusers: docker-build-diffusers:
docker build --progress=plain --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:diffusers -f backend/Dockerfile.python --build-arg BACKEND=diffusers ./backend docker build -t local-ai-backend:diffusers -f backend/Dockerfile.python --build-arg BACKEND=diffusers .
docker-save-diffusers: backend-images docker-build-kokoro:
docker save local-ai-backend:diffusers -o backend-images/diffusers.tar docker build -t local-ai-backend:kokoro -f backend/Dockerfile.python --build-arg BACKEND=kokoro .
docker-build-whisper: docker-build-whisper:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:whisper -f backend/Dockerfile.golang --build-arg BACKEND=whisper . docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:whisper -f backend/Dockerfile.go --build-arg BACKEND=whisper .
docker-save-whisper: backend-images docker-save-whisper: backend-images
docker save local-ai-backend:whisper -o backend-images/whisper.tar docker save local-ai-backend:whisper -o backend-images/whisper.tar
docker-build-faster-whisper: docker-build-faster-whisper:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:faster-whisper -f backend/Dockerfile.python --build-arg BACKEND=faster-whisper . docker build -t local-ai-backend:faster-whisper -f backend/Dockerfile.python --build-arg BACKEND=faster-whisper .
docker-build-coqui: docker-build-coqui:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:coqui -f backend/Dockerfile.python --build-arg BACKEND=coqui . docker build -t local-ai-backend:coqui -f backend/Dockerfile.python --build-arg BACKEND=coqui .
docker-build-bark: docker-build-bark:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:bark -f backend/Dockerfile.python --build-arg BACKEND=bark . docker build -t local-ai-backend:bark -f backend/Dockerfile.python --build-arg BACKEND=bark .
docker-build-chatterbox: docker-build-chatterbox:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:chatterbox -f backend/Dockerfile.python --build-arg BACKEND=chatterbox ./backend docker build -t local-ai-backend:chatterbox -f backend/Dockerfile.python --build-arg BACKEND=chatterbox .
docker-build-exllama2: docker-build-exllama2:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:exllama2 -f backend/Dockerfile.python --build-arg BACKEND=exllama2 . docker build -t local-ai-backend:exllama2 -f backend/Dockerfile.python --build-arg BACKEND=exllama2 .
docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-bark docker-build-chatterbox docker-build-exllama2 docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-bark docker-build-chatterbox docker-build-exllama2
@@ -544,19 +557,3 @@ docs-clean:
.PHONY: docs .PHONY: docs
docs: docs/static/gallery.html docs: docs/static/gallery.html
cd docs && hugo serve cd docs && hugo serve
########################################################
## Platform-specific builds
########################################################
## fyne cross-platform build
build-launcher-darwin: build-launcher
go run github.com/tiagomelo/macos-dmg-creator/cmd/createdmg@latest \
--appName "LocalAI" \
--appBinaryPath "$(LAUNCHER_BINARY_NAME)" \
--bundleIdentifier "com.localai.launcher" \
--iconPath "core/http/static/logo.png" \
--outputDir "dist/"
build-launcher-linux:
cd cmd/launcher && go run fyne.io/tools/cmd/fyne@latest package -os linux -icon ../../core/http/static/logo.png --executable $(LAUNCHER_BINARY_NAME)-linux && mv launcher.tar.xz ../../$(LAUNCHER_BINARY_NAME)-linux.tar.xz

View File

@@ -43,7 +43,7 @@
> :bulb: Get help - [❓FAQ](https://localai.io/faq/) [💭Discussions](https://github.com/go-skynet/LocalAI/discussions) [:speech_balloon: Discord](https://discord.gg/uJAeKSAGDy) [:book: Documentation website](https://localai.io/) > :bulb: Get help - [❓FAQ](https://localai.io/faq/) [💭Discussions](https://github.com/go-skynet/LocalAI/discussions) [:speech_balloon: Discord](https://discord.gg/uJAeKSAGDy) [:book: Documentation website](https://localai.io/)
> >
> [💻 Quickstart](https://localai.io/basics/getting_started/) [🖼️ Models](https://models.localai.io/) [🚀 Roadmap](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) [🌍 Explorer](https://explorer.localai.io) [🛫 Examples](https://github.com/mudler/LocalAI-examples) Try on > [💻 Quickstart](https://localai.io/basics/getting_started/) [🖼️ Models](https://models.localai.io/) [🚀 Roadmap](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) [🥽 Demo](https://demo.localai.io) [🌍 Explorer](https://explorer.localai.io) [🛫 Examples](https://github.com/mudler/LocalAI-examples) Try on
[![Telegram](https://img.shields.io/badge/Telegram-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white)](https://t.me/localaiofficial_bot) [![Telegram](https://img.shields.io/badge/Telegram-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white)](https://t.me/localaiofficial_bot)
[![tests](https://github.com/go-skynet/LocalAI/actions/workflows/test.yml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/test.yml)[![Build and Release](https://github.com/go-skynet/LocalAI/actions/workflows/release.yaml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/release.yaml)[![build container images](https://github.com/go-skynet/LocalAI/actions/workflows/image.yml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/image.yml)[![Bump dependencies](https://github.com/go-skynet/LocalAI/actions/workflows/bump_deps.yaml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/bump_deps.yaml)[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/localai)](https://artifacthub.io/packages/search?repo=localai) [![tests](https://github.com/go-skynet/LocalAI/actions/workflows/test.yml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/test.yml)[![Build and Release](https://github.com/go-skynet/LocalAI/actions/workflows/release.yaml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/release.yaml)[![build container images](https://github.com/go-skynet/LocalAI/actions/workflows/image.yml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/image.yml)[![Bump dependencies](https://github.com/go-skynet/LocalAI/actions/workflows/bump_deps.yaml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/bump_deps.yaml)[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/localai)](https://artifacthub.io/packages/search?repo=localai)
@@ -110,21 +110,8 @@ curl https://localai.io/install.sh | sh
For more installation options, see [Installer Options](https://localai.io/docs/advanced/installer/). For more installation options, see [Installer Options](https://localai.io/docs/advanced/installer/).
### macOS Download:
<a href="https://github.com/mudler/LocalAI/releases/latest/download/LocalAI.dmg">
<img src="https://img.shields.io/badge/Download-macOS-blue?style=for-the-badge&logo=apple&logoColor=white" alt="Download LocalAI for macOS"/>
</a>
Or run with docker: Or run with docker:
> **💡 Docker Run vs Docker Start**
>
> - `docker run` creates and starts a new container. If a container with the same name already exists, this command will fail.
> - `docker start` starts an existing container that was previously created with `docker run`.
>
> If you've already run LocalAI before and want to start it again, use: `docker start -i local-ai`
### CPU only image: ### CPU only image:
```bash ```bash
@@ -153,7 +140,11 @@ docker run -ti --name local-ai -p 8080:8080 --device=/dev/kfd --device=/dev/dri
### Intel GPU Images (oneAPI): ### Intel GPU Images (oneAPI):
```bash ```bash
docker run -ti --name local-ai -p 8080:8080 --device=/dev/dri/card1 --device=/dev/dri/renderD128 localai/localai:latest-gpu-intel # Intel GPU with FP16 support
docker run -ti --name local-ai -p 8080:8080 --device=/dev/dri/card1 --device=/dev/dri/renderD128 localai/localai:latest-gpu-intel-f16
# Intel GPU with FP32 support
docker run -ti --name local-ai -p 8080:8080 --device=/dev/dri/card1 --device=/dev/dri/renderD128 localai/localai:latest-gpu-intel-f32
``` ```
### Vulkan GPU Images: ### Vulkan GPU Images:
@@ -175,7 +166,7 @@ docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-ai
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-11 docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-11
# Intel GPU version # Intel GPU version
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-gpu-intel docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-gpu-intel-f16
# AMD GPU version # AMD GPU version
docker run -ti --name local-ai -p 8080:8080 --device=/dev/kfd --device=/dev/dri --group-add=video localai/localai:latest-aio-gpu-hipblas docker run -ti --name local-ai -p 8080:8080 --device=/dev/kfd --device=/dev/dri --group-add=video localai/localai:latest-aio-gpu-hipblas
@@ -198,17 +189,10 @@ local-ai run https://gist.githubusercontent.com/.../phi-2.yaml
local-ai run oci://localai/phi-2:latest local-ai run oci://localai/phi-2:latest
``` ```
> ⚡ **Automatic Backend Detection**: When you install models from the gallery or YAML files, LocalAI automatically detects your system's GPU capabilities (NVIDIA, AMD, Intel) and downloads the appropriate backend. For advanced configuration options, see [GPU Acceleration](https://localai.io/features/gpu-acceleration/#automatic-backend-detection).
For more information, see [💻 Getting started](https://localai.io/basics/getting_started/index.html) For more information, see [💻 Getting started](https://localai.io/basics/getting_started/index.html)
## 📰 Latest project news ## 📰 Latest project news
- October 2025: 🔌 [Model Context Protocol (MCP)](https://localai.io/docs/features/mcp/) support added for agentic capabilities with external tools
- September 2025: New Launcher application for MacOS and Linux, extended support to many backends for Mac and Nvidia L4T devices. Models: Added MLX-Audio, WAN 2.2. WebUI improvements and Python-based backends now ships portable python environments.
- August 2025: MLX, MLX-VLM, Diffusers and llama.cpp are now supported on Mac M1/M2/M3+ chips ( with `development` suffix in the gallery ): https://github.com/mudler/LocalAI/pull/6049 https://github.com/mudler/LocalAI/pull/6119 https://github.com/mudler/LocalAI/pull/6121 https://github.com/mudler/LocalAI/pull/6060
- July/August 2025: 🔍 [Object Detection](https://localai.io/features/object-detection/) added to the API featuring [rf-detr](https://github.com/roboflow/rf-detr)
- July 2025: All backends migrated outside of the main binary. LocalAI is now more lightweight, small, and automatically downloads the required backend to run the model. [Read the release notes](https://github.com/mudler/LocalAI/releases/tag/v3.2.0)
- June 2025: [Backend management](https://github.com/mudler/LocalAI/pull/5607) has been added. Attention: extras images are going to be deprecated from the next release! Read [the backend management PR](https://github.com/mudler/LocalAI/pull/5607). - June 2025: [Backend management](https://github.com/mudler/LocalAI/pull/5607) has been added. Attention: extras images are going to be deprecated from the next release! Read [the backend management PR](https://github.com/mudler/LocalAI/pull/5607).
- May 2025: [Audio input](https://github.com/mudler/LocalAI/pull/5466) and [Reranking](https://github.com/mudler/LocalAI/pull/5396) in llama.cpp backend, [Realtime API](https://github.com/mudler/LocalAI/pull/5392), Support to Gemma, SmollVLM, and more multimodal models (available in the gallery). - May 2025: [Audio input](https://github.com/mudler/LocalAI/pull/5466) and [Reranking](https://github.com/mudler/LocalAI/pull/5396) in llama.cpp backend, [Realtime API](https://github.com/mudler/LocalAI/pull/5392), Support to Gemma, SmollVLM, and more multimodal models (available in the gallery).
- May 2025: Important: image name changes [See release](https://github.com/mudler/LocalAI/releases/tag/v2.29.0) - May 2025: Important: image name changes [See release](https://github.com/mudler/LocalAI/releases/tag/v2.29.0)
@@ -241,68 +225,12 @@ Roadmap items: [List of issues](https://github.com/mudler/LocalAI/issues?q=is%3A
- ✍️ [Constrained grammars](https://localai.io/features/constrained_grammars/) - ✍️ [Constrained grammars](https://localai.io/features/constrained_grammars/)
- 🖼️ [Download Models directly from Huggingface ](https://localai.io/models/) - 🖼️ [Download Models directly from Huggingface ](https://localai.io/models/)
- 🥽 [Vision API](https://localai.io/features/gpt-vision/) - 🥽 [Vision API](https://localai.io/features/gpt-vision/)
- 🔍 [Object Detection](https://localai.io/features/object-detection/)
- 📈 [Reranker API](https://localai.io/features/reranker/) - 📈 [Reranker API](https://localai.io/features/reranker/)
- 🆕🖧 [P2P Inferencing](https://localai.io/features/distribute/) - 🆕🖧 [P2P Inferencing](https://localai.io/features/distribute/)
- 🆕🔌 [Model Context Protocol (MCP)](https://localai.io/docs/features/mcp/) - Agentic capabilities with external tools and [LocalAGI's Agentic capabilities](https://github.com/mudler/LocalAGI) - [Agentic capabilities](https://github.com/mudler/LocalAGI)
- 🔊 Voice activity detection (Silero-VAD support) - 🔊 Voice activity detection (Silero-VAD support)
- 🌍 Integrated WebUI! - 🌍 Integrated WebUI!
## 🧩 Supported Backends & Acceleration
LocalAI supports a comprehensive range of AI backends with multiple acceleration options:
### Text Generation & Language Models
| Backend | Description | Acceleration Support |
|---------|-------------|---------------------|
| **llama.cpp** | LLM inference in C/C++ | CUDA 11/12, ROCm, Intel SYCL, Vulkan, Metal, CPU |
| **vLLM** | Fast LLM inference with PagedAttention | CUDA 12, ROCm, Intel |
| **transformers** | HuggingFace transformers framework | CUDA 11/12, ROCm, Intel, CPU |
| **exllama2** | GPTQ inference library | CUDA 12 |
| **MLX** | Apple Silicon LLM inference | Metal (M1/M2/M3+) |
| **MLX-VLM** | Apple Silicon Vision-Language Models | Metal (M1/M2/M3+) |
### Audio & Speech Processing
| Backend | Description | Acceleration Support |
|---------|-------------|---------------------|
| **whisper.cpp** | OpenAI Whisper in C/C++ | CUDA 12, ROCm, Intel SYCL, Vulkan, CPU |
| **faster-whisper** | Fast Whisper with CTranslate2 | CUDA 12, ROCm, Intel, CPU |
| **bark** | Text-to-audio generation | CUDA 12, ROCm, Intel |
| **bark-cpp** | C++ implementation of Bark | CUDA, Metal, CPU |
| **coqui** | Advanced TTS with 1100+ languages | CUDA 12, ROCm, Intel, CPU |
| **kokoro** | Lightweight TTS model | CUDA 12, ROCm, Intel, CPU |
| **chatterbox** | Production-grade TTS | CUDA 11/12, CPU |
| **piper** | Fast neural TTS system | CPU |
| **kitten-tts** | Kitten TTS models | CPU |
| **silero-vad** | Voice Activity Detection | CPU |
| **neutts** | Text-to-speech with voice cloning | CUDA 12, ROCm, CPU |
### Image & Video Generation
| Backend | Description | Acceleration Support |
|---------|-------------|---------------------|
| **stablediffusion.cpp** | Stable Diffusion in C/C++ | CUDA 12, Intel SYCL, Vulkan, CPU |
| **diffusers** | HuggingFace diffusion models | CUDA 11/12, ROCm, Intel, Metal, CPU |
### Specialized AI Tasks
| Backend | Description | Acceleration Support |
|---------|-------------|---------------------|
| **rfdetr** | Real-time object detection | CUDA 12, Intel, CPU |
| **rerankers** | Document reranking API | CUDA 11/12, ROCm, Intel, CPU |
| **local-store** | Vector database | CPU |
| **huggingface** | HuggingFace API integration | API-based |
### Hardware Acceleration Matrix
| Acceleration Type | Supported Backends | Hardware Support |
|-------------------|-------------------|------------------|
| **NVIDIA CUDA 11** | llama.cpp, whisper, stablediffusion, diffusers, rerankers, bark, chatterbox | Nvidia hardware |
| **NVIDIA CUDA 12** | All CUDA-compatible backends | Nvidia hardware |
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, bark, neutts | AMD Graphics |
| **Intel oneAPI** | llama.cpp, whisper, stablediffusion, vllm, transformers, diffusers, rfdetr, rerankers, exllama2, coqui, kokoro, bark | Intel Arc, Intel iGPUs |
| **Apple Metal** | llama.cpp, whisper, diffusers, MLX, MLX-VLM, bark-cpp | Apple M1/M2/M3+ |
| **Vulkan** | llama.cpp, whisper, stablediffusion | Cross-platform GPUs |
| **NVIDIA Jetson** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr | ARM64 embedded AI |
| **CPU Optimized** | All backends | AVX/AVX2/AVX512, quantization support |
### 🔗 Community and integrations ### 🔗 Community and integrations
@@ -314,18 +242,9 @@ WebUIs:
- https://github.com/go-skynet/LocalAI-frontend - https://github.com/go-skynet/LocalAI-frontend
- QA-Pilot(An interactive chat project that leverages LocalAI LLMs for rapid understanding and navigation of GitHub code repository) https://github.com/reid41/QA-Pilot - QA-Pilot(An interactive chat project that leverages LocalAI LLMs for rapid understanding and navigation of GitHub code repository) https://github.com/reid41/QA-Pilot
Agentic Libraries:
- https://github.com/mudler/cogito
MCPs:
- https://github.com/mudler/MCPs
Model galleries Model galleries
- https://github.com/go-skynet/model-gallery - https://github.com/go-skynet/model-gallery
Voice:
- https://github.com/richiejp/VoxInput
Other: Other:
- Helm chart https://github.com/go-skynet/helm-charts - Helm chart https://github.com/go-skynet/helm-charts
- VSCode extension https://github.com/badgooooor/localai-vscode-plugin - VSCode extension https://github.com/badgooooor/localai-vscode-plugin

View File

@@ -2,10 +2,10 @@ context_size: 4096
f16: true f16: true
backend: llama-cpp backend: llama-cpp
mmap: true mmap: true
mmproj: minicpm-v-4_5-mmproj-f16.gguf mmproj: minicpm-v-2_6-mmproj-f16.gguf
name: gpt-4o name: gpt-4o
parameters: parameters:
model: minicpm-v-4_5-Q4_K_M.gguf model: minicpm-v-2_6-Q4_K_M.gguf
stopwords: stopwords:
- <|im_end|> - <|im_end|>
- <dummy32000> - <dummy32000>
@@ -42,9 +42,9 @@ template:
<|im_start|>assistant <|im_start|>assistant
download_files: download_files:
- filename: minicpm-v-4_5-Q4_K_M.gguf - filename: minicpm-v-2_6-Q4_K_M.gguf
sha256: c1c3c33100b15b4caf7319acce4e23c0eb0ce1cbd12f70e8d24f05aa67b7512f sha256: 3a4078d53b46f22989adbf998ce5a3fd090b6541f112d7e936eb4204a04100b1
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/ggml-model-Q4_K_M.gguf uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/ggml-model-Q4_K_M.gguf
- filename: minicpm-v-4_5-mmproj-f16.gguf - filename: minicpm-v-2_6-mmproj-f16.gguf
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/mmproj-model-f16.gguf uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/mmproj-model-f16.gguf
sha256: 7a7225a32e8d453aaa3d22d8c579b5bf833c253f784cdb05c99c9a76fd616df8 sha256: 4485f68a0f1aa404c391e788ea88ea653c100d8e98fe572698f701e5809711fd

View File

@@ -2,10 +2,10 @@ context_size: 4096
backend: llama-cpp backend: llama-cpp
f16: true f16: true
mmap: true mmap: true
mmproj: minicpm-v-4_5-mmproj-f16.gguf mmproj: minicpm-v-2_6-mmproj-f16.gguf
name: gpt-4o name: gpt-4o
parameters: parameters:
model: minicpm-v-4_5-Q4_K_M.gguf model: minicpm-v-2_6-Q4_K_M.gguf
stopwords: stopwords:
- <|im_end|> - <|im_end|>
- <dummy32000> - <dummy32000>
@@ -42,9 +42,9 @@ template:
<|im_start|>assistant <|im_start|>assistant
download_files: download_files:
- filename: minicpm-v-4_5-Q4_K_M.gguf - filename: minicpm-v-2_6-Q4_K_M.gguf
sha256: c1c3c33100b15b4caf7319acce4e23c0eb0ce1cbd12f70e8d24f05aa67b7512f sha256: 3a4078d53b46f22989adbf998ce5a3fd090b6541f112d7e936eb4204a04100b1
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/ggml-model-Q4_K_M.gguf uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/ggml-model-Q4_K_M.gguf
- filename: minicpm-v-4_5-mmproj-f16.gguf - filename: minicpm-v-2_6-mmproj-f16.gguf
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/mmproj-model-f16.gguf uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/mmproj-model-f16.gguf
sha256: 7a7225a32e8d453aaa3d22d8c579b5bf833c253f784cdb05c99c9a76fd616df8 sha256: 4485f68a0f1aa404c391e788ea88ea653c100d8e98fe572698f701e5809711fd

View File

@@ -2,10 +2,10 @@ context_size: 4096
backend: llama-cpp backend: llama-cpp
f16: true f16: true
mmap: true mmap: true
mmproj: minicpm-v-4_5-mmproj-f16.gguf mmproj: minicpm-v-2_6-mmproj-f16.gguf
name: gpt-4o name: gpt-4o
parameters: parameters:
model: minicpm-v-4_5-Q4_K_M.gguf model: minicpm-v-2_6-Q4_K_M.gguf
stopwords: stopwords:
- <|im_end|> - <|im_end|>
- <dummy32000> - <dummy32000>
@@ -43,9 +43,9 @@ template:
download_files: download_files:
- filename: minicpm-v-4_5-Q4_K_M.gguf - filename: minicpm-v-2_6-Q4_K_M.gguf
sha256: c1c3c33100b15b4caf7319acce4e23c0eb0ce1cbd12f70e8d24f05aa67b7512f sha256: 3a4078d53b46f22989adbf998ce5a3fd090b6541f112d7e936eb4204a04100b1
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/ggml-model-Q4_K_M.gguf uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/ggml-model-Q4_K_M.gguf
- filename: minicpm-v-4_5-mmproj-f16.gguf - filename: minicpm-v-2_6-mmproj-f16.gguf
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/mmproj-model-f16.gguf uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/mmproj-model-f16.gguf
sha256: 7a7225a32e8d453aaa3d22d8c579b5bf833c253f784cdb05c99c9a76fd616df8 sha256: 4485f68a0f1aa404c391e788ea88ea653c100d8e98fe572698f701e5809711fd

View File

@@ -96,6 +96,17 @@ RUN if [ "${BUILD_TYPE}" = "hipblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
ldconfig \ ldconfig \
; fi ; fi
# Intel oneAPI requirements
RUN <<EOT bash
if [[ "${BUILD_TYPE}" == sycl* ]] && [ "${SKIP_DRIVERS}" = "false" ]; then
apt-get update && \
apt-get install -y --no-install-recommends \
intel-oneapi-runtime-libs && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
fi
EOT
# Install Go # Install Go
RUN curl -L -s https://go.dev/dl/go${GO_VERSION}.linux-${TARGETARCH}.tar.gz | tar -C /usr/local -xz RUN curl -L -s https://go.dev/dl/go${GO_VERSION}.linux-${TARGETARCH}.tar.gz | tar -C /usr/local -xz
ENV PATH=$PATH:/root/go/bin:/usr/local/go/bin:/usr/local/bin ENV PATH=$PATH:/root/go/bin:/usr/local/go/bin:/usr/local/bin

View File

@@ -11,6 +11,7 @@ ARG GRPC_MAKEFLAGS="-j4 -Otarget"
ARG GRPC_VERSION=v1.65.0 ARG GRPC_VERSION=v1.65.0
ARG CMAKE_FROM_SOURCE=false ARG CMAKE_FROM_SOURCE=false
ARG CMAKE_VERSION=3.26.4 ARG CMAKE_VERSION=3.26.4
ARG PROTOBUF_VERSION=v21.12
ENV MAKEFLAGS=${GRPC_MAKEFLAGS} ENV MAKEFLAGS=${GRPC_MAKEFLAGS}
@@ -49,6 +50,14 @@ RUN git clone --recurse-submodules --jobs 4 -b ${GRPC_VERSION} --depth 1 --shall
make install && \ make install && \
rm -rf /build rm -rf /build
RUN git clone --recurse-submodules --branch ${PROTOBUF_VERSION} https://github.com/protocolbuffers/protobuf.git && \
mkdir -p /build/protobuf/build && \
cd /build/protobuf/build && \
cmake -Dprotobuf_BUILD_SHARED_LIBS=ON -Dprotobuf_BUILD_TESTS=OFF .. && \
make && \
make install && \
rm -rf /build
FROM ${BASE_IMAGE} AS builder FROM ${BASE_IMAGE} AS builder
ARG BACKEND=rerankers ARG BACKEND=rerankers
ARG BUILD_TYPE ARG BUILD_TYPE
@@ -180,24 +189,12 @@ COPY --from=grpc /opt/grpc /usr/local
COPY . /LocalAI COPY . /LocalAI
## Otherwise just run the normal build RUN make -C /LocalAI/backend/cpp/llama-cpp llama-cpp
RUN <<EOT bash RUN make -C /LocalAI/backend/cpp/llama-cpp llama-cpp-grpc
if [ "${TARGETARCH}" = "arm64" ] || [ "${BUILD_TYPE}" = "hipblas" ]; then \ RUN make -C /LocalAI/backend/cpp/llama-cpp llama-cpp-rpc-server
cd /LocalAI/backend/cpp/llama-cpp && make llama-cpp-fallback && \
make llama-cpp-grpc && make llama-cpp-rpc-server; \
else \
cd /LocalAI/backend/cpp/llama-cpp && make llama-cpp-avx && \
make llama-cpp-avx2 && \
make llama-cpp-avx512 && \
make llama-cpp-fallback && \
make llama-cpp-grpc && \
make llama-cpp-rpc-server; \
fi
EOT
# Copy libraries using a script to handle architecture differences # Copy libraries using a script to handle architecture differences
RUN make -BC /LocalAI/backend/cpp/llama-cpp package RUN make -C /LocalAI/backend/cpp/llama-cpp package
FROM scratch FROM scratch

View File

@@ -23,12 +23,12 @@ RUN apt-get update && \
libssl-dev \ libssl-dev \
git \ git \
git-lfs \ git-lfs \
unzip clang \ unzip \
upx-ucl \ upx-ucl \
curl python3-pip \ curl python3-pip \
python-is-python3 \ python-is-python3 \
python3-dev llvm \ python3-dev llvm \
python3-venv make cmake && \ python3-venv make && \
apt-get clean && \ apt-get clean && \
rm -rf /var/lib/apt/lists/* && \ rm -rf /var/lib/apt/lists/* && \
pip install --upgrade pip pip install --upgrade pip
@@ -116,7 +116,7 @@ COPY python/${BACKEND} /${BACKEND}
COPY backend.proto /${BACKEND}/backend.proto COPY backend.proto /${BACKEND}/backend.proto
COPY python/common/ /${BACKEND}/common COPY python/common/ /${BACKEND}/common
RUN cd /${BACKEND} && PORTABLE_PYTHON=true make RUN cd /${BACKEND} && make
FROM scratch FROM scratch
ARG BACKEND=rerankers ARG BACKEND=rerankers

View File

@@ -1,213 +0,0 @@
# LocalAI Backend Architecture
This directory contains the core backend infrastructure for LocalAI, including the gRPC protocol definition, multi-language Dockerfiles, and language-specific backend implementations.
## Overview
LocalAI uses a unified gRPC-based architecture that allows different programming languages to implement AI backends while maintaining consistent interfaces and capabilities. The backend system supports multiple hardware acceleration targets and provides a standardized way to integrate various AI models and frameworks.
## Architecture Components
### 1. Protocol Definition (`backend.proto`)
The `backend.proto` file defines the gRPC service interface that all backends must implement. This ensures consistency across different language implementations and provides a contract for communication between LocalAI core and backend services.
#### Core Services
- **Text Generation**: `Predict`, `PredictStream` for LLM inference
- **Embeddings**: `Embedding` for text vectorization
- **Image Generation**: `GenerateImage` for stable diffusion and image models
- **Audio Processing**: `AudioTranscription`, `TTS`, `SoundGeneration`
- **Video Generation**: `GenerateVideo` for video synthesis
- **Object Detection**: `Detect` for computer vision tasks
- **Vector Storage**: `StoresSet`, `StoresGet`, `StoresFind` for RAG operations
- **Reranking**: `Rerank` for document relevance scoring
- **Voice Activity Detection**: `VAD` for audio segmentation
#### Key Message Types
- **`PredictOptions`**: Comprehensive configuration for text generation
- **`ModelOptions`**: Model loading and configuration parameters
- **`Result`**: Standardized response format
- **`StatusResponse`**: Backend health and memory usage information
### 2. Multi-Language Dockerfiles
The backend system provides language-specific Dockerfiles that handle the build environment and dependencies for different programming languages:
- `Dockerfile.python`
- `Dockerfile.golang`
- `Dockerfile.llama-cpp`
### 3. Language-Specific Implementations
#### Python Backends (`python/`)
- **transformers**: Hugging Face Transformers framework
- **vllm**: High-performance LLM inference
- **mlx**: Apple Silicon optimization
- **diffusers**: Stable Diffusion models
- **Audio**: bark, coqui, faster-whisper, kitten-tts
- **Vision**: mlx-vlm, rfdetr
- **Specialized**: rerankers, chatterbox, kokoro
#### Go Backends (`go/`)
- **whisper**: OpenAI Whisper speech recognition in Go with GGML cpp backend (whisper.cpp)
- **stablediffusion-ggml**: Stable Diffusion in Go with GGML Cpp backend
- **huggingface**: Hugging Face model integration
- **piper**: Text-to-speech synthesis Golang with C bindings using rhaspy/piper
- **bark-cpp**: Bark TTS models Golang with Cpp bindings
- **local-store**: Vector storage backend
#### C++ Backends (`cpp/`)
- **llama-cpp**: Llama.cpp integration
- **grpc**: GRPC utilities and helpers
## Hardware Acceleration Support
### CUDA (NVIDIA)
- **Versions**: CUDA 11.x, 12.x
- **Features**: cuBLAS, cuDNN, TensorRT optimization
- **Targets**: x86_64, ARM64 (Jetson)
### ROCm (AMD)
- **Features**: HIP, rocBLAS, MIOpen
- **Targets**: AMD GPUs with ROCm support
### Intel
- **Features**: oneAPI, Intel Extension for PyTorch
- **Targets**: Intel GPUs, XPUs, CPUs
### Vulkan
- **Features**: Cross-platform GPU acceleration
- **Targets**: Windows, Linux, Android, macOS
### Apple Silicon
- **Features**: MLX framework, Metal Performance Shaders
- **Targets**: M1/M2/M3 Macs
## Backend Registry (`index.yaml`)
The `index.yaml` file serves as a central registry for all available backends, providing:
- **Metadata**: Name, description, license, icons
- **Capabilities**: Hardware targets and optimization profiles
- **Tags**: Categorization for discovery
- **URLs**: Source code and documentation links
## Building Backends
### Prerequisites
- Docker with multi-architecture support
- Appropriate hardware drivers (CUDA, ROCm, etc.)
- Build tools (make, cmake, compilers)
### Build Commands
Example of build commands with Docker
```bash
# Build Python backend
docker build -f backend/Dockerfile.python \
--build-arg BACKEND=transformers \
--build-arg BUILD_TYPE=cublas12 \
--build-arg CUDA_MAJOR_VERSION=12 \
--build-arg CUDA_MINOR_VERSION=0 \
-t localai-backend-transformers .
# Build Go backend
docker build -f backend/Dockerfile.golang \
--build-arg BACKEND=whisper \
--build-arg BUILD_TYPE=cpu \
-t localai-backend-whisper .
# Build C++ backend
docker build -f backend/Dockerfile.llama-cpp \
--build-arg BACKEND=llama-cpp \
--build-arg BUILD_TYPE=cublas12 \
-t localai-backend-llama-cpp .
```
For ARM64/Mac builds, docker can't be used, and the makefile in the respective backend has to be used.
### Build Types
- **`cpu`**: CPU-only optimization
- **`cublas11`**: CUDA 11.x with cuBLAS
- **`cublas12`**: CUDA 12.x with cuBLAS
- **`hipblas`**: ROCm with rocBLAS
- **`intel`**: Intel oneAPI optimization
- **`vulkan`**: Vulkan-based acceleration
- **`metal`**: Apple Metal optimization
## Backend Development
### Creating a New Backend
1. **Choose Language**: Select Python, Go, or C++ based on requirements
2. **Implement Interface**: Implement the gRPC service defined in `backend.proto`
3. **Add Dependencies**: Create appropriate requirements files
4. **Configure Build**: Set up Dockerfile and build scripts
5. **Register Backend**: Add entry to `index.yaml`
6. **Test Integration**: Verify gRPC communication and functionality
### Backend Structure
```
backend-name/
├── backend.py/go/cpp # Main implementation
├── requirements.txt # Dependencies
├── Dockerfile # Build configuration
├── install.sh # Installation script
├── run.sh # Execution script
├── test.sh # Test script
└── README.md # Backend documentation
```
### Required gRPC Methods
At minimum, backends must implement:
- `Health()` - Service health check
- `LoadModel()` - Model loading and initialization
- `Predict()` - Main inference endpoint
- `Status()` - Backend status and metrics
## Integration with LocalAI Core
Backends communicate with LocalAI core through gRPC:
1. **Service Discovery**: Core discovers available backends
2. **Model Loading**: Core requests model loading via `LoadModel`
3. **Inference**: Core sends requests via `Predict` or specialized endpoints
4. **Streaming**: Core handles streaming responses for real-time generation
5. **Monitoring**: Core tracks backend health and performance
## Performance Optimization
### Memory Management
- **Model Caching**: Efficient model loading and caching
- **Batch Processing**: Optimize for multiple concurrent requests
- **Memory Pinning**: GPU memory optimization for CUDA/ROCm
### Hardware Utilization
- **Multi-GPU**: Support for tensor parallelism
- **Mixed Precision**: FP16/BF16 for memory efficiency
- **Kernel Fusion**: Optimized CUDA/ROCm kernels
## Troubleshooting
### Common Issues
1. **GRPC Connection**: Verify backend service is running and accessible
2. **Model Loading**: Check model paths and dependencies
3. **Hardware Detection**: Ensure appropriate drivers and libraries
4. **Memory Issues**: Monitor GPU memory usage and model sizes
## Contributing
When contributing to the backend system:
1. **Follow Protocol**: Implement the exact gRPC interface
2. **Add Tests**: Include comprehensive test coverage
3. **Document**: Provide clear usage examples
4. **Optimize**: Consider performance and resource usage
5. **Validate**: Test across different hardware targets

View File

@@ -20,7 +20,6 @@ service Backend {
rpc SoundGeneration(SoundGenerationRequest) returns (Result) {} rpc SoundGeneration(SoundGenerationRequest) returns (Result) {}
rpc TokenizeString(PredictOptions) returns (TokenizationResponse) {} rpc TokenizeString(PredictOptions) returns (TokenizationResponse) {}
rpc Status(HealthMessage) returns (StatusResponse) {} rpc Status(HealthMessage) returns (StatusResponse) {}
rpc Detect(DetectOptions) returns (DetectResponse) {}
rpc StoresSet(StoresSetOptions) returns (Result) {} rpc StoresSet(StoresSetOptions) returns (Result) {}
rpc StoresDelete(StoresDeleteOptions) returns (Result) {} rpc StoresDelete(StoresDeleteOptions) returns (Result) {}
@@ -242,7 +241,7 @@ message ModelOptions {
string Type = 49; string Type = 49;
string FlashAttention = 56; bool FlashAttention = 56;
bool NoKVOffload = 57; bool NoKVOffload = 57;
string ModelPath = 59; string ModelPath = 59;
@@ -276,7 +275,6 @@ message TranscriptRequest {
string language = 3; string language = 3;
uint32 threads = 4; uint32 threads = 4;
bool translate = 5; bool translate = 5;
bool diarize = 6;
} }
message TranscriptResult { message TranscriptResult {
@@ -306,24 +304,19 @@ message GenerateImageRequest {
// Diffusers // Diffusers
string EnableParameters = 10; string EnableParameters = 10;
int32 CLIPSkip = 11; int32 CLIPSkip = 11;
// Reference images for models that support them (e.g., Flux Kontext)
repeated string ref_images = 12;
} }
message GenerateVideoRequest { message GenerateVideoRequest {
string prompt = 1; string prompt = 1;
string negative_prompt = 2; // Negative prompt for video generation string start_image = 2; // Path or base64 encoded image for the start frame
string start_image = 3; // Path or base64 encoded image for the start frame string end_image = 3; // Path or base64 encoded image for the end frame
string end_image = 4; // Path or base64 encoded image for the end frame int32 width = 4;
int32 width = 5; int32 height = 5;
int32 height = 6; int32 num_frames = 6; // Number of frames to generate
int32 num_frames = 7; // Number of frames to generate int32 fps = 7; // Frames per second
int32 fps = 8; // Frames per second int32 seed = 8;
int32 seed = 9; float cfg_scale = 9; // Classifier-free guidance scale
float cfg_scale = 10; // Classifier-free guidance scale string dst = 10; // Output path for the generated video
int32 step = 11; // Number of inference steps
string dst = 12; // Output path for the generated video
} }
message TTSRequest { message TTSRequest {
@@ -383,20 +376,3 @@ message Message {
string role = 1; string role = 1;
string content = 2; string content = 2;
} }
message DetectOptions {
string src = 1;
}
message Detection {
float x = 1;
float y = 2;
float width = 3;
float height = 4;
float confidence = 5;
string class_name = 6;
}
message DetectResponse {
repeated Detection Detections = 1;
}

View File

@@ -17,6 +17,8 @@ if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
include_directories("${HOMEBREW_DEFAULT_PREFIX}/include") include_directories("${HOMEBREW_DEFAULT_PREFIX}/include")
endif() endif()
set(Protobuf_USE_STATIC_LIBS OFF)
set(gRPC_USE_STATIC_LIBS OFF)
find_package(absl CONFIG REQUIRED) find_package(absl CONFIG REQUIRED)
find_package(Protobuf CONFIG REQUIRED) find_package(Protobuf CONFIG REQUIRED)
find_package(gRPC CONFIG REQUIRED) find_package(gRPC CONFIG REQUIRED)

View File

@@ -1,5 +1,5 @@
LLAMA_VERSION?=31c511a968348281e11d590446bb815048a1e912 LLAMA_VERSION?=acd6cb1c41676f6bbb25c2a76fa5abeb1719301e
LLAMA_REPO?=https://github.com/ggerganov/llama.cpp LLAMA_REPO?=https://github.com/ggerganov/llama.cpp
CMAKE_ARGS?= CMAKE_ARGS?=
@@ -7,14 +7,13 @@ BUILD_TYPE?=
NATIVE?=false NATIVE?=false
ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh
TARGET?=--target grpc-server TARGET?=--target grpc-server
JOBS?=$(shell nproc)
# Disable Shared libs as we are linking on static gRPC and we can't mix shared and static # Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF -DLLAMA_CURL=OFF CMAKE_ARGS+=-DBUILD_SHARED_LIBS=ON -DLLAMA_CURL=OFF -DGGML_CPU_ALL_VARIANTS=ON -DGGML_BACKEND_DL=ON
CURRENT_MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) CURRENT_MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
ifeq ($(NATIVE),false) ifeq ($(NATIVE),false)
CMAKE_ARGS+=-DGGML_NATIVE=OFF -DLLAMA_OPENSSL=OFF CMAKE_ARGS+=-DGGML_NATIVE=OFF
endif endif
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically # If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
ifeq ($(BUILD_TYPE),cublas) ifeq ($(BUILD_TYPE),cublas)
@@ -26,14 +25,16 @@ else ifeq ($(BUILD_TYPE),openblas)
# If build type is clblas (openCL) we set -DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path # If build type is clblas (openCL) we set -DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
else ifeq ($(BUILD_TYPE),clblas) else ifeq ($(BUILD_TYPE),clblas)
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++ # If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
else ifeq ($(BUILD_TYPE),hipblas) else ifeq ($(BUILD_TYPE),hipblas)
ROCM_HOME ?= /opt/rocm ROCM_HOME ?= /opt/rocm
ROCM_PATH ?= /opt/rocm ROCM_PATH ?= /opt/rocm
export CXX=$(ROCM_HOME)/llvm/bin/clang++ export CXX=$(ROCM_HOME)/llvm/bin/clang++
export CC=$(ROCM_HOME)/llvm/bin/clang export CC=$(ROCM_HOME)/llvm/bin/clang
AMDGPU_TARGETS?=gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102,gfx1200,gfx1201 # GPU_TARGETS ?= gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102
CMAKE_ARGS+=-DGGML_HIP=ON -DAMDGPU_TARGETS=$(AMDGPU_TARGETS) # AMDGPU_TARGETS ?= "$(GPU_TARGETS)"
CMAKE_ARGS+=-DGGML_HIP=ON
# CMAKE_ARGS+=-DGGML_HIP=ON -DAMDGPU_TARGETS="$(AMDGPU_TARGETS)" -DGPU_TARGETS="$(GPU_TARGETS)"
else ifeq ($(BUILD_TYPE),vulkan) else ifeq ($(BUILD_TYPE),vulkan)
CMAKE_ARGS+=-DGGML_VULKAN=1 CMAKE_ARGS+=-DGGML_VULKAN=1
else ifeq ($(OS),Darwin) else ifeq ($(OS),Darwin)
@@ -88,33 +89,12 @@ else
LLAMA_VERSION=$(LLAMA_VERSION) $(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../$(VARIANT) grpc-server LLAMA_VERSION=$(LLAMA_VERSION) $(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../$(VARIANT) grpc-server
endif endif
llama-cpp-avx2: llama.cpp llama-cpp: llama.cpp
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx2-build cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-build
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx2-build purge $(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-build purge
$(info ${GREEN}I llama-cpp build info:avx2${RESET}) $(info ${GREEN}I llama-cpp build info:${RESET})
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on" $(MAKE) VARIANT="llama-cpp-avx2-build" build-llama-cpp-grpc-server CMAKE_ARGS="$(CMAKE_ARGS)" $(MAKE) VARIANT="llama-cpp-build" build-llama-cpp-grpc-server
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx2-build/grpc-server llama-cpp-avx2 cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-build/grpc-server llama-cpp
llama-cpp-avx512: llama.cpp
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx512-build
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx512-build purge
$(info ${GREEN}I llama-cpp build info:avx512${RESET})
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=on -DGGML_FMA=on -DGGML_F16C=on" $(MAKE) VARIANT="llama-cpp-avx512-build" build-llama-cpp-grpc-server
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx512-build/grpc-server llama-cpp-avx512
llama-cpp-avx: llama.cpp
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build purge
$(info ${GREEN}I llama-cpp build info:avx${RESET})
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE) VARIANT="llama-cpp-avx-build" build-llama-cpp-grpc-server
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build/grpc-server llama-cpp-avx
llama-cpp-fallback: llama.cpp
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build purge
$(info ${GREEN}I llama-cpp build info:fallback${RESET})
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE) VARIANT="llama-cpp-fallback-build" build-llama-cpp-grpc-server
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build/grpc-server llama-cpp-fallback
llama-cpp-grpc: llama.cpp llama-cpp-grpc: llama.cpp
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build
@@ -159,8 +139,8 @@ grpc-server: llama.cpp llama.cpp/tools/grpc-server
@echo "Building grpc-server with $(BUILD_TYPE) build type and $(CMAKE_ARGS)" @echo "Building grpc-server with $(BUILD_TYPE) build type and $(CMAKE_ARGS)"
ifneq (,$(findstring sycl,$(BUILD_TYPE))) ifneq (,$(findstring sycl,$(BUILD_TYPE)))
+bash -c "source $(ONEAPI_VARS); \ +bash -c "source $(ONEAPI_VARS); \
cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release -j $(JOBS) $(TARGET)" cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release $(TARGET)"
else else
+cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release -j $(JOBS) $(TARGET) +cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release $(TARGET)
endif endif
cp llama.cpp/build/bin/grpc-server . cp llama.cpp/build/bin/grpc-server .

View File

@@ -53,9 +53,9 @@ static void start_llama_server(server_context& ctx_server) {
LOG_INF("%s: model loaded\n", __func__); LOG_INF("%s: model loaded\n", __func__);
// print sample chat example to make it clear which template is used // print sample chat example to make it clear which template is used
// LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__, LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__,
// common_chat_templates_source(ctx_server.chat_templates.get()), common_chat_templates_source(ctx_server.chat_templates.get()),
// common_chat_format_example(ctx_server.chat_templates.get(), ctx_server.params_base.use_jinja).c_str(), ctx_server.params_base.default_template_kwargs); common_chat_format_example(ctx_server.chat_templates.get(), ctx_server.params_base.use_jinja).c_str());
// Reset the chat templates // Reset the chat templates
// TODO: We should make this configurable by respecting the option that is already present in LocalAI for vLLM // TODO: We should make this configurable by respecting the option that is already present in LocalAI for vLLM
@@ -92,7 +92,7 @@ static void start_llama_server(server_context& ctx_server) {
ctx_server.queue_tasks.start_loop(); ctx_server.queue_tasks.start_loop();
} }
json parse_options(bool streaming, const backend::PredictOptions* predict, const server_context& ctx_server) json parse_options(bool streaming, const backend::PredictOptions* predict)
{ {
// Create now a json data from the prediction options instead // Create now a json data from the prediction options instead
@@ -147,28 +147,6 @@ json parse_options(bool streaming, const backend::PredictOptions* predict, const
// data["n_probs"] = predict->nprobs(); // data["n_probs"] = predict->nprobs();
//TODO: images, //TODO: images,
// Serialize grammar triggers from server context to JSON array
if (!ctx_server.params_base.sampling.grammar_triggers.empty()) {
json grammar_triggers = json::array();
for (const auto& trigger : ctx_server.params_base.sampling.grammar_triggers) {
json trigger_json;
trigger_json["value"] = trigger.value;
// Always serialize as WORD type since upstream converts WORD to TOKEN internally
trigger_json["type"] = static_cast<int>(COMMON_GRAMMAR_TRIGGER_TYPE_WORD);
grammar_triggers.push_back(trigger_json);
}
data["grammar_triggers"] = grammar_triggers;
}
// Serialize preserved tokens from server context to JSON array
if (!ctx_server.params_base.sampling.preserved_tokens.empty()) {
json preserved_tokens = json::array();
for (const auto& token : ctx_server.params_base.sampling.preserved_tokens) {
preserved_tokens.push_back(common_token_to_piece(ctx_server.ctx, token));
}
data["preserved_tokens"] = preserved_tokens;
}
return data; return data;
} }
@@ -229,7 +207,7 @@ static void add_rpc_devices(std::string servers) {
} }
} }
static void params_parse(server_context& ctx_server, const backend::ModelOptions* request, static void params_parse(const backend::ModelOptions* request,
common_params & params) { common_params & params) {
// this is comparable to: https://github.com/ggerganov/llama.cpp/blob/d9b33fe95bd257b36c84ee5769cc048230067d6f/examples/server/server.cpp#L1809 // this is comparable to: https://github.com/ggerganov/llama.cpp/blob/d9b33fe95bd257b36c84ee5769cc048230067d6f/examples/server/server.cpp#L1809
@@ -253,7 +231,6 @@ static void params_parse(server_context& ctx_server, const backend::ModelOptions
params.cpuparams.n_threads = request->threads(); params.cpuparams.n_threads = request->threads();
params.n_gpu_layers = request->ngpulayers(); params.n_gpu_layers = request->ngpulayers();
params.n_batch = request->nbatch(); params.n_batch = request->nbatch();
params.n_ubatch = request->nbatch(); // fixes issue with reranking models being limited to 512 tokens (the default n_ubatch size); allows for setting the maximum input amount of tokens thereby avoiding this error "input is too large to process. increase the physical batch size"
// Set params.n_parallel by environment variable (LLAMA_PARALLEL), defaults to 1 // Set params.n_parallel by environment variable (LLAMA_PARALLEL), defaults to 1
//params.n_parallel = 1; //params.n_parallel = 1;
const char *env_parallel = std::getenv("LLAMACPP_PARALLEL"); const char *env_parallel = std::getenv("LLAMACPP_PARALLEL");
@@ -291,11 +268,6 @@ static void params_parse(server_context& ctx_server, const backend::ModelOptions
} }
} }
if (!params.kv_overrides.empty()) {
params.kv_overrides.emplace_back();
params.kv_overrides.back().key[0] = 0;
}
// TODO: Add yarn // TODO: Add yarn
if (!request->tensorsplit().empty()) { if (!request->tensorsplit().empty()) {
@@ -332,15 +304,7 @@ static void params_parse(server_context& ctx_server, const backend::ModelOptions
} }
params.use_mlock = request->mlock(); params.use_mlock = request->mlock();
params.use_mmap = request->mmap(); params.use_mmap = request->mmap();
params.flash_attn = request->flashattention();
if (request->flashattention() == "on" || request->flashattention() == "enabled") {
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_ENABLED;
} else if (request->flashattention() == "off" || request->flashattention() == "disabled") {
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_DISABLED;
} else if (request->flashattention() == "auto") {
params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_AUTO;
}
params.no_kv_offload = request->nokvoffload(); params.no_kv_offload = request->nokvoffload();
params.ctx_shift = false; // We control context-shifting in any case (and we disable it as it could just lead to infinite loops) params.ctx_shift = false; // We control context-shifting in any case (and we disable it as it could just lead to infinite loops)
@@ -349,11 +313,9 @@ static void params_parse(server_context& ctx_server, const backend::ModelOptions
params.pooling_type = LLAMA_POOLING_TYPE_RANK; params.pooling_type = LLAMA_POOLING_TYPE_RANK;
} }
if (request->ropescaling() == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_NONE; } if (request->ropescaling() == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_NONE; }
else if (request->ropescaling() == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_YARN; } else if (request->ropescaling() == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_YARN; }
else if (request->ropescaling() == "linear") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_LINEAR; } else { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_LINEAR; }
if ( request->yarnextfactor() != 0.0f ) { if ( request->yarnextfactor() != 0.0f ) {
params.yarn_ext_factor = request->yarnextfactor(); params.yarn_ext_factor = request->yarnextfactor();
} }
@@ -374,14 +336,14 @@ static void params_parse(server_context& ctx_server, const backend::ModelOptions
} }
if (request->grammartriggers_size() > 0) { if (request->grammartriggers_size() > 0) {
//params.sampling.grammar_lazy = true; params.sampling.grammar_lazy = true;
// Store grammar trigger words for processing after model is loaded
for (int i = 0; i < request->grammartriggers_size(); i++) { for (int i = 0; i < request->grammartriggers_size(); i++) {
const auto & word = request->grammartriggers(i).word();
common_grammar_trigger trigger; common_grammar_trigger trigger;
trigger.type = COMMON_GRAMMAR_TRIGGER_TYPE_WORD; trigger.type = COMMON_GRAMMAR_TRIGGER_TYPE_WORD;
trigger.value = word; trigger.value = request->grammartriggers(i).word();
params.sampling.grammar_triggers.push_back(std::move(trigger)); // trigger.at_start = request->grammartriggers(i).at_start();
params.sampling.grammar_triggers.push_back(trigger);
} }
} }
} }
@@ -404,7 +366,7 @@ public:
grpc::Status LoadModel(ServerContext* context, const backend::ModelOptions* request, backend::Result* result) { grpc::Status LoadModel(ServerContext* context, const backend::ModelOptions* request, backend::Result* result) {
// Implement LoadModel RPC // Implement LoadModel RPC
common_params params; common_params params;
params_parse(ctx_server, request, params); params_parse(request, params);
common_init(); common_init();
@@ -423,39 +385,6 @@ public:
return Status::CANCELLED; return Status::CANCELLED;
} }
// Process grammar triggers now that vocab is available
if (!params.sampling.grammar_triggers.empty()) {
std::vector<common_grammar_trigger> processed_triggers;
for (const auto& trigger : params.sampling.grammar_triggers) {
if (trigger.type == COMMON_GRAMMAR_TRIGGER_TYPE_WORD) {
auto ids = common_tokenize(ctx_server.vocab, trigger.value, /* add_special= */ false, /* parse_special= */ true);
if (ids.size() == 1) {
auto token = ids[0];
// Add the token to preserved_tokens if not already present
if (params.sampling.preserved_tokens.find(token) == params.sampling.preserved_tokens.end()) {
params.sampling.preserved_tokens.insert(token);
LOG_INF("Added grammar trigger token to preserved tokens: %d (`%s`)\n", token, trigger.value.c_str());
}
LOG_INF("Grammar trigger token: %d (`%s`)\n", token, trigger.value.c_str());
common_grammar_trigger processed_trigger;
processed_trigger.type = COMMON_GRAMMAR_TRIGGER_TYPE_TOKEN;
processed_trigger.value = trigger.value;
processed_trigger.token = token;
processed_triggers.push_back(std::move(processed_trigger));
} else {
LOG_INF("Grammar trigger word: `%s`\n", trigger.value.c_str());
processed_triggers.push_back(trigger);
}
} else {
processed_triggers.push_back(trigger);
}
}
// Update the grammar triggers in params_base
ctx_server.params_base.sampling.grammar_triggers = std::move(processed_triggers);
// Also update preserved_tokens in params_base
ctx_server.params_base.sampling.preserved_tokens = params.sampling.preserved_tokens;
}
//ctx_server.init(); //ctx_server.init();
result->set_message("Loading succeeded"); result->set_message("Loading succeeded");
result->set_success(true); result->set_success(true);
@@ -466,7 +395,7 @@ public:
} }
grpc::Status PredictStream(grpc::ServerContext* context, const backend::PredictOptions* request, grpc::ServerWriter<backend::Reply>* writer) override { grpc::Status PredictStream(grpc::ServerContext* context, const backend::PredictOptions* request, grpc::ServerWriter<backend::Reply>* writer) override {
json data = parse_options(true, request, ctx_server); json data = parse_options(true, request);
//Raise error if embeddings is set to true //Raise error if embeddings is set to true
@@ -506,7 +435,24 @@ public:
} }
} }
// process files
mtmd::bitmaps bitmaps;
const bool has_mtmd = ctx_server.mctx != nullptr; const bool has_mtmd = ctx_server.mctx != nullptr;
{
if (!has_mtmd && !files.empty()) {
throw std::runtime_error("This server does not support multimodal");
}
for (auto & file : files) {
mtmd::bitmap bmp(mtmd_helper_bitmap_init_from_buf(ctx_server.mctx, file.data(), file.size()));
if (!bmp.ptr) {
throw std::runtime_error("Failed to load image/audio");
}
// calculate bitmap hash (for KV caching)
std::string hash = fnv_hash(bmp.data(), bmp.n_bytes());
bmp.set_id(hash.c_str());
bitmaps.entries.push_back(std::move(bmp));
}
}
// process prompt // process prompt
std::vector<server_tokens> inputs; std::vector<server_tokens> inputs;
@@ -516,10 +462,32 @@ public:
if (has_mtmd) { if (has_mtmd) {
// multimodal // multimodal
inputs.push_back(process_mtmd_prompt(ctx_server.mctx, prompt.get<std::string>(), files)); std::string prompt_str = prompt.get<std::string>();
mtmd_input_text inp_txt = {
prompt_str.c_str(),
/* add_special */ true,
/* parse_special */ true,
};
mtmd::input_chunks chunks(mtmd_input_chunks_init());
auto bitmaps_c_ptr = bitmaps.c_ptr();
int32_t tokenized = mtmd_tokenize(ctx_server.mctx,
chunks.ptr.get(),
&inp_txt,
bitmaps_c_ptr.data(),
bitmaps_c_ptr.size());
if (tokenized != 0) {
throw std::runtime_error("Failed to tokenize prompt");
}
server_tokens tmp(chunks, true);
inputs.push_back(std::move(tmp));
} else { } else {
// Everything else, including multimodal completions. // non-multimodal version
inputs = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, prompt, true, true); auto tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, prompt, true, true);
for (auto & p : tokenized_prompts) {
auto tmp = server_tokens(p, ctx_server.mctx != nullptr);
inputs.push_back(std::move(tmp));
}
} }
tasks.reserve(inputs.size()); tasks.reserve(inputs.size());
@@ -529,12 +497,12 @@ public:
task.id = ctx_server.queue_tasks.get_new_id(); task.id = ctx_server.queue_tasks.get_new_id();
task.index = i; task.index = i;
task.tokens = std::move(inputs[i]); task.prompt_tokens = std::move(inputs[i]);
task.params = server_task::params_from_json_cmpl( task.params = server_task::params_from_json_cmpl(
ctx_server.ctx, ctx_server.ctx,
ctx_server.params_base, ctx_server.params_base,
data); data);
task.id_slot = json_value(data, "id_slot", -1); task.id_selected_slot = json_value(data, "id_slot", -1);
// OAI-compat // OAI-compat
task.params.oaicompat = OAICOMPAT_TYPE_NONE; task.params.oaicompat = OAICOMPAT_TYPE_NONE;
@@ -616,7 +584,7 @@ public:
} }
grpc::Status Predict(ServerContext* context, const backend::PredictOptions* request, backend::Reply* reply) { grpc::Status Predict(ServerContext* context, const backend::PredictOptions* request, backend::Reply* reply) {
json data = parse_options(true, request, ctx_server); json data = parse_options(true, request);
data["stream"] = false; data["stream"] = false;
//Raise error if embeddings is set to true //Raise error if embeddings is set to true
@@ -660,7 +628,23 @@ public:
} }
// process files // process files
mtmd::bitmaps bitmaps;
const bool has_mtmd = ctx_server.mctx != nullptr; const bool has_mtmd = ctx_server.mctx != nullptr;
{
if (!has_mtmd && !files.empty()) {
throw std::runtime_error("This server does not support multimodal");
}
for (auto & file : files) {
mtmd::bitmap bmp(mtmd_helper_bitmap_init_from_buf(ctx_server.mctx, file.data(), file.size()));
if (!bmp.ptr) {
throw std::runtime_error("Failed to load image/audio");
}
// calculate bitmap hash (for KV caching)
std::string hash = fnv_hash(bmp.data(), bmp.n_bytes());
bmp.set_id(hash.c_str());
bitmaps.entries.push_back(std::move(bmp));
}
}
// process prompt // process prompt
std::vector<server_tokens> inputs; std::vector<server_tokens> inputs;
@@ -671,10 +655,33 @@ public:
if (has_mtmd) { if (has_mtmd) {
// multimodal // multimodal
inputs.push_back(process_mtmd_prompt(ctx_server.mctx, prompt.get<std::string>(), files)); std::string prompt_str = prompt.get<std::string>();
mtmd_input_text inp_txt = {
prompt_str.c_str(),
/* add_special */ true,
/* parse_special */ true,
};
mtmd::input_chunks chunks(mtmd_input_chunks_init());
auto bitmaps_c_ptr = bitmaps.c_ptr();
int32_t tokenized = mtmd_tokenize(ctx_server.mctx,
chunks.ptr.get(),
&inp_txt,
bitmaps_c_ptr.data(),
bitmaps_c_ptr.size());
if (tokenized != 0) {
std::cout << "[PREDICT] Failed to tokenize prompt" << std::endl;
throw std::runtime_error("Failed to tokenize prompt");
}
server_tokens tmp(chunks, true);
inputs.push_back(std::move(tmp));
} else { } else {
// Everything else, including multimodal completions. // non-multimodal version
inputs = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, prompt, true, true); auto tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, prompt, true, true);
for (auto & p : tokenized_prompts) {
auto tmp = server_tokens(p, ctx_server.mctx != nullptr);
inputs.push_back(std::move(tmp));
}
} }
tasks.reserve(inputs.size()); tasks.reserve(inputs.size());
@@ -684,12 +691,12 @@ public:
task.id = ctx_server.queue_tasks.get_new_id(); task.id = ctx_server.queue_tasks.get_new_id();
task.index = i; task.index = i;
task.tokens = std::move(inputs[i]); task.prompt_tokens = std::move(inputs[i]);
task.params = server_task::params_from_json_cmpl( task.params = server_task::params_from_json_cmpl(
ctx_server.ctx, ctx_server.ctx,
ctx_server.params_base, ctx_server.params_base,
data); data);
task.id_slot = json_value(data, "id_slot", -1); task.id_selected_slot = json_value(data, "id_slot", -1);
// OAI-compat // OAI-compat
task.params.oaicompat = OAICOMPAT_TYPE_NONE; task.params.oaicompat = OAICOMPAT_TYPE_NONE;
@@ -751,7 +758,7 @@ public:
grpc::Status Embedding(ServerContext* context, const backend::PredictOptions* request, backend::EmbeddingResult* embeddingResult) { grpc::Status Embedding(ServerContext* context, const backend::PredictOptions* request, backend::EmbeddingResult* embeddingResult) {
json body = parse_options(false, request, ctx_server); json body = parse_options(false, request);
body["stream"] = false; body["stream"] = false;
@@ -762,10 +769,10 @@ public:
*/ */
// for the shape of input/content, see tokenize_input_prompts() // for the shape of input/content, see tokenize_input_prompts()
json prompt = body.at("embeddings"); json prompt = body.at("prompt");
auto tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, ctx_server.mctx, prompt, true, true); auto tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, prompt, true, true);
for (const auto & tokens : tokenized_prompts) { for (const auto & tokens : tokenized_prompts) {
// this check is necessary for models that do not add BOS token to the input // this check is necessary for models that do not add BOS token to the input
if (tokens.empty()) { if (tokens.empty()) {
@@ -773,7 +780,6 @@ public:
} }
} }
int embd_normalize = 2; // default to Euclidean/L2 norm
// create and queue the task // create and queue the task
json responses = json::array(); json responses = json::array();
bool error = false; bool error = false;
@@ -785,10 +791,11 @@ public:
task.id = ctx_server.queue_tasks.get_new_id(); task.id = ctx_server.queue_tasks.get_new_id();
task.index = i; task.index = i;
task.tokens = std::move(tokenized_prompts[i]); task.prompt_tokens = server_tokens(tokenized_prompts[i], ctx_server.mctx != nullptr);
// OAI-compat
task.params.oaicompat = OAICOMPAT_TYPE_EMBEDDING;
task.params.oaicompat = OAICOMPAT_TYPE_NONE;
task.params.embd_normalize = embd_normalize;
tasks.push_back(std::move(task)); tasks.push_back(std::move(task));
} }
@@ -804,8 +811,9 @@ public:
responses.push_back(res->to_json()); responses.push_back(res->to_json());
} }
}, [&](const json & error_data) { }, [&](const json & error_data) {
error = true; return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, error_data.value("content", ""));
}, [&]() { }, [&]() {
// NOTE: we should try to check when the writer is closed here
return false; return false;
}); });
@@ -815,36 +823,12 @@ public:
return grpc::Status(grpc::StatusCode::INTERNAL, "Error in receiving results"); return grpc::Status(grpc::StatusCode::INTERNAL, "Error in receiving results");
} }
std::cout << "[DEBUG] Responses size: " << responses.size() << std::endl; std::vector<float> embeddings = responses[0].value("embedding", std::vector<float>());
// loop the vector and set the embeddings results
// Process the responses and extract embeddings for (int i = 0; i < embeddings.size(); i++) {
for (const auto & response_elem : responses) { embeddingResult->add_embeddings(embeddings[i]);
// Check if the response has an "embedding" field
if (response_elem.contains("embedding")) {
json embedding_data = json_value(response_elem, "embedding", json::array());
if (embedding_data.is_array() && !embedding_data.empty()) {
for (const auto & embedding_vector : embedding_data) {
if (embedding_vector.is_array()) {
for (const auto & embedding_value : embedding_vector) {
embeddingResult->add_embeddings(embedding_value.get<float>());
}
}
}
}
} else {
// Check if the response itself contains the embedding data directly
if (response_elem.is_array()) {
for (const auto & embedding_value : response_elem) {
embeddingResult->add_embeddings(embedding_value.get<float>());
}
}
}
} }
return grpc::Status::OK; return grpc::Status::OK;
} }
@@ -862,6 +846,9 @@ public:
return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "\"documents\" must be a non-empty string array"); return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "\"documents\" must be a non-empty string array");
} }
// Tokenize the query
llama_tokens tokenized_query = tokenize_input_prompts(ctx_server.vocab, request->query(), /* add_special */ false, true)[0];
// Create and queue the task // Create and queue the task
json responses = json::array(); json responses = json::array();
bool error = false; bool error = false;
@@ -873,13 +860,14 @@ public:
documents.push_back(request->documents(i)); documents.push_back(request->documents(i));
} }
tasks.reserve(documents.size()); auto tokenized_docs = tokenize_input_prompts(ctx_server.vocab, documents, /* add_special */ false, true);
for (size_t i = 0; i < documents.size(); i++) { tasks.reserve(tokenized_docs.size());
auto tmp = format_rerank(ctx_server.model, ctx_server.vocab, ctx_server.mctx, request->query(), documents[i]); for (size_t i = 0; i < tokenized_docs.size(); i++) {
auto tmp = format_rerank(ctx_server.vocab, tokenized_query, tokenized_docs[i]);
server_task task = server_task(SERVER_TASK_TYPE_RERANK); server_task task = server_task(SERVER_TASK_TYPE_RERANK);
task.id = ctx_server.queue_tasks.get_new_id(); task.id = ctx_server.queue_tasks.get_new_id();
task.index = i; task.index = i;
task.tokens = std::move(tmp); task.prompt_tokens = server_tokens(tmp, ctx_server.mctx != nullptr);
tasks.push_back(std::move(task)); tasks.push_back(std::move(task));
} }
@@ -932,7 +920,7 @@ public:
} }
grpc::Status TokenizeString(ServerContext* context, const backend::PredictOptions* request, backend::TokenizationResponse* response) { grpc::Status TokenizeString(ServerContext* context, const backend::PredictOptions* request, backend::TokenizationResponse* response) {
json body = parse_options(false, request, ctx_server); json body = parse_options(false, request);
body["stream"] = false; body["stream"] = false;
json tokens_response = json::array(); json tokens_response = json::array();

View File

@@ -6,34 +6,9 @@ CURDIR=$(dirname "$(realpath $0)")
cd / cd /
echo "CPU info:" BINARY=llama-cpp
grep -e "model\sname" /proc/cpuinfo | head -1
grep -e "flags" /proc/cpuinfo | head -1
BINARY=llama-cpp-fallback
if grep -q -e "\savx\s" /proc/cpuinfo ; then
echo "CPU: AVX found OK"
if [ -e $CURDIR/llama-cpp-avx ]; then
BINARY=llama-cpp-avx
fi
fi
if grep -q -e "\savx2\s" /proc/cpuinfo ; then
echo "CPU: AVX2 found OK"
if [ -e $CURDIR/llama-cpp-avx2 ]; then
BINARY=llama-cpp-avx2
fi
fi
# Check avx 512
if grep -q -e "\savx512f\s" /proc/cpuinfo ; then
echo "CPU: AVX512F found OK"
if [ -e $CURDIR/llama-cpp-avx512 ]; then
BINARY=llama-cpp-avx512
fi
fi
## P2P/GRPC mode
if [ -n "$LLAMACPP_GRPC_SERVERS" ]; then if [ -n "$LLAMACPP_GRPC_SERVERS" ]; then
if [ -e $CURDIR/llama-cpp-grpc ]; then if [ -e $CURDIR/llama-cpp-grpc ]; then
BINARY=llama-cpp-grpc BINARY=llama-cpp-grpc
@@ -42,8 +17,7 @@ fi
# Extend ld library path with the dir where this script is located/lib # Extend ld library path with the dir where this script is located/lib
if [ "$(uname)" == "Darwin" ]; then if [ "$(uname)" == "Darwin" ]; then
export DYLD_LIBRARY_PATH=$CURDIR/lib:$DYLD_LIBRARY_PATH DYLD_FALLBACK_LIBRARY_PATH=$CURDIR/lib:$DYLD_FALLBACK_LIBRARY_PATH
#export DYLD_FALLBACK_LIBRARY_PATH=$CURDIR/lib:$DYLD_FALLBACK_LIBRARY_PATH
else else
export LD_LIBRARY_PATH=$CURDIR/lib:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=$CURDIR/lib:$LD_LIBRARY_PATH
fi fi
@@ -57,6 +31,3 @@ fi
echo "Using binary: $BINARY" echo "Using binary: $BINARY"
exec $CURDIR/$BINARY "$@" exec $CURDIR/$BINARY "$@"
# We should never reach this point, however just in case we do, run fallback
exec $CURDIR/llama-cpp-fallback "$@"

View File

@@ -1,6 +0,0 @@
package/
sources/
.cache/
build/
libgosd.so
stablediffusion-ggml

View File

@@ -1,20 +0,0 @@
cmake_minimum_required(VERSION 3.12)
project(gosd LANGUAGES C CXX)
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
add_subdirectory(./sources/stablediffusion-ggml.cpp)
add_library(gosd MODULE gosd.cpp)
target_link_libraries(gosd PRIVATE stable-diffusion ggml)
if(CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 9.0)
target_link_libraries(gosd PRIVATE stdc++fs)
endif()
target_include_directories(gosd PUBLIC
stable-diffusion.cpp
stable-diffusion.cpp/thirdparty
)
set_property(TARGET gosd PROPERTY CXX_STANDARD 17)
set_target_properties(gosd PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})

View File

@@ -1,16 +1,28 @@
INCLUDE_PATH := $(abspath ./)
LIBRARY_PATH := $(abspath ./)
AR?=ar
CMAKE_ARGS?= CMAKE_ARGS?=
BUILD_TYPE?= BUILD_TYPE?=
NATIVE?=false NATIVE?=false
CUDA_LIBPATH?=/usr/local/cuda/lib64/
ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh
# keep standard at C11 and C++11
CXXFLAGS = -I. -I$(INCLUDE_PATH)/sources/stablediffusion-ggml.cpp/thirdparty -I$(INCLUDE_PATH)/sources/stablediffusion-ggml.cpp/ggml/include -I$(INCLUDE_PATH)/sources/stablediffusion-ggml.cpp -O3 -DNDEBUG -std=c++17 -fPIC
GOCMD?=go GOCMD?=go
CGO_LDFLAGS?=
# Avoid parent make file overwriting CGO_LDFLAGS which is needed for hipblas
CGO_LDFLAGS_SYCL=
GO_TAGS?= GO_TAGS?=
JOBS?=$(shell nproc --ignore=1) LD_FLAGS?=
# stablediffusion.cpp (ggml) # stablediffusion.cpp (ggml)
STABLEDIFFUSION_GGML_REPO?=https://github.com/leejet/stable-diffusion.cpp STABLEDIFFUSION_GGML_REPO?=https://github.com/richiejp/stable-diffusion.cpp
STABLEDIFFUSION_GGML_VERSION?=0ebe6fe118f125665939b27c89f34ed38716bff8 STABLEDIFFUSION_GGML_VERSION?=53e3b17eb3d0b5760ced06a1f98320b68b34aaae
CMAKE_ARGS+=-DGGML_MAX_NAME=128 # Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF
ifeq ($(NATIVE),false) ifeq ($(NATIVE),false)
CMAKE_ARGS+=-DGGML_NATIVE=OFF CMAKE_ARGS+=-DGGML_NATIVE=OFF
@@ -19,6 +31,7 @@ endif
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically # If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
ifeq ($(BUILD_TYPE),cublas) ifeq ($(BUILD_TYPE),cublas)
CMAKE_ARGS+=-DSD_CUDA=ON -DGGML_CUDA=ON CMAKE_ARGS+=-DSD_CUDA=ON -DGGML_CUDA=ON
CGO_LDFLAGS+=-lcublas -lcudart -L$(CUDA_LIBPATH) -L$(CUDA_LIBPATH)/stubs/ -lcuda
# If build type is openblas then we set -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS # If build type is openblas then we set -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
# to CMAKE_ARGS automatically # to CMAKE_ARGS automatically
else ifeq ($(BUILD_TYPE),openblas) else ifeq ($(BUILD_TYPE),openblas)
@@ -29,14 +42,18 @@ else ifeq ($(BUILD_TYPE),clblas)
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++ # If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
else ifeq ($(BUILD_TYPE),hipblas) else ifeq ($(BUILD_TYPE),hipblas)
CMAKE_ARGS+=-DSD_HIPBLAS=ON -DGGML_HIPBLAS=ON CMAKE_ARGS+=-DSD_HIPBLAS=ON -DGGML_HIPBLAS=ON
# If it's OSX, DO NOT embed the metal library - -DGGML_METAL_EMBED_LIBRARY=ON requires further investigation
# But if it's OSX without metal, disable it here
else ifeq ($(BUILD_TYPE),vulkan) else ifeq ($(BUILD_TYPE),vulkan)
CMAKE_ARGS+=-DSD_VULKAN=ON -DGGML_VULKAN=ON CMAKE_ARGS+=-DSD_VULKAN=ON -DGGML_VULKAN=ON
CGO_LDFLAGS+=-lvulkan
else ifeq ($(OS),Darwin) else ifeq ($(OS),Darwin)
ifneq ($(BUILD_TYPE),metal) ifneq ($(BUILD_TYPE),metal)
CMAKE_ARGS+=-DSD_METAL=OFF -DGGML_METAL=OFF CMAKE_ARGS+=-DSD_METAL=OFF -DGGML_METAL=OFF
else else
CMAKE_ARGS+=-DSD_METAL=ON -DGGML_METAL=ON CMAKE_ARGS+=-DSD_METAL=ON -DGGML_METAL=ON
CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON
TARGET+=--target ggml-metal
endif endif
endif endif
@@ -46,6 +63,12 @@ ifeq ($(BUILD_TYPE),sycl_f16)
-DCMAKE_CXX_COMPILER=icpx \ -DCMAKE_CXX_COMPILER=icpx \
-DSD_SYCL=ON \ -DSD_SYCL=ON \
-DGGML_SYCL_F16=ON -DGGML_SYCL_F16=ON
export CC=icx
export CXX=icpx
CGO_LDFLAGS_SYCL += -fsycl -L${DNNLROOT}/lib -ldnnl ${MKLROOT}/lib/intel64/libmkl_sycl.a -fiopenmp -fopenmp-targets=spir64 -lOpenCL
CGO_LDFLAGS_SYCL += $(shell pkg-config --libs mkl-static-lp64-gomp)
CGO_CXXFLAGS += -fiopenmp -fopenmp-targets=spir64
CGO_CXXFLAGS += $(shell pkg-config --cflags mkl-static-lp64-gomp )
endif endif
ifeq ($(BUILD_TYPE),sycl_f32) ifeq ($(BUILD_TYPE),sycl_f32)
@@ -53,29 +76,83 @@ ifeq ($(BUILD_TYPE),sycl_f32)
-DCMAKE_C_COMPILER=icx \ -DCMAKE_C_COMPILER=icx \
-DCMAKE_CXX_COMPILER=icpx \ -DCMAKE_CXX_COMPILER=icpx \
-DSD_SYCL=ON -DSD_SYCL=ON
export CC=icx
export CXX=icpx
CGO_LDFLAGS_SYCL += -fsycl -L${DNNLROOT}/lib -ldnnl ${MKLROOT}/lib/intel64/libmkl_sycl.a -fiopenmp -fopenmp-targets=spir64 -lOpenCL
CGO_LDFLAGS_SYCL += $(shell pkg-config --libs mkl-static-lp64-gomp)
CGO_CXXFLAGS += -fiopenmp -fopenmp-targets=spir64
CGO_CXXFLAGS += $(shell pkg-config --cflags mkl-static-lp64-gomp )
endif endif
# warnings
# CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function
# Find all .a archives in ARCHIVE_DIR
# (ggml can have different backends cpu, cuda, etc., each backend generates a .a archive)
GGML_ARCHIVE_DIR := build/ggml/src/
ALL_ARCHIVES := $(shell find $(GGML_ARCHIVE_DIR) -type f -name '*.a')
# Name of the single merged library
COMBINED_LIB := libggmlall.a
# Rule to merge all the .a files into one
$(COMBINED_LIB): $(ALL_ARCHIVES)
@echo "Merging all .a into $(COMBINED_LIB)"
rm -f $@
mkdir -p merge-tmp
for a in $(ALL_ARCHIVES); do \
( cd merge-tmp && ar x ../$$a ); \
done
( cd merge-tmp && ar rcs ../$@ *.o )
# Ensure we have a proper index
ranlib $@
# Clean up
rm -rf merge-tmp
build/libstable-diffusion.a:
@echo "Building SD with $(BUILD_TYPE) build type and $(CMAKE_ARGS)"
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
+bash -c "source $(ONEAPI_VARS); \
mkdir -p build && \
cd build && \
cmake $(CMAKE_ARGS) ../sources/stablediffusion-ggml.cpp && \
cmake --build . --config Release"
else
mkdir -p build && \
cd build && \
cmake $(CMAKE_ARGS) ../sources/stablediffusion-ggml.cpp && \
cmake --build . --config Release
endif
$(MAKE) $(COMBINED_LIB)
gosd.o:
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
+bash -c "source $(ONEAPI_VARS); \
$(CXX) $(CXXFLAGS) gosd.cpp -o gosd.o -c"
else
$(CXX) $(CXXFLAGS) gosd.cpp -o gosd.o -c
endif
## stablediffusion (ggml)
sources/stablediffusion-ggml.cpp: sources/stablediffusion-ggml.cpp:
git clone --recursive $(STABLEDIFFUSION_GGML_REPO) sources/stablediffusion-ggml.cpp && \ git clone --recursive $(STABLEDIFFUSION_GGML_REPO) sources/stablediffusion-ggml.cpp && \
cd sources/stablediffusion-ggml.cpp && \ cd sources/stablediffusion-ggml.cpp && \
git checkout $(STABLEDIFFUSION_GGML_VERSION) && \ git checkout $(STABLEDIFFUSION_GGML_VERSION) && \
git submodule update --init --recursive --depth 1 --single-branch git submodule update --init --recursive --depth 1 --single-branch
libgosd.so: sources/stablediffusion-ggml.cpp CMakeLists.txt gosd.cpp gosd.h libsd.a: sources/stablediffusion-ggml.cpp build/libstable-diffusion.a gosd.o
mkdir -p build && \ cp $(INCLUDE_PATH)/build/libstable-diffusion.a ./libsd.a
cd build && \ $(AR) rcs libsd.a gosd.o
cmake .. $(CMAKE_ARGS) && \
cmake --build . --config Release -j$(JOBS) && \
cd .. && \
mv build/libgosd.so ./
stablediffusion-ggml: main.go gosd.go libgosd.so stablediffusion-ggml: libsd.a
CGO_ENABLED=0 $(GOCMD) build -tags "$(GO_TAGS)" -o stablediffusion-ggml ./ CGO_LDFLAGS="$(CGO_LDFLAGS) $(CGO_LDFLAGS_SYCL)" C_INCLUDE_PATH="$(INCLUDE_PATH)" LIBRARY_PATH="$(LIBRARY_PATH)" \
CC="$(CC)" CXX="$(CXX)" CGO_CXXFLAGS="$(CGO_CXXFLAGS)" \
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o stablediffusion-ggml ./
package: stablediffusion-ggml package:
bash package.sh bash package.sh
build: package build: stablediffusion-ggml package
clean: clean:
rm -rf libgosd.so build stablediffusion-ggml package sources rm -rf gosd.o libsd.a build $(COMBINED_LIB)

View File

@@ -1,14 +1,16 @@
#include <cstdint>
#define GGML_MAX_NAME 128
#include <stdio.h> #include <stdio.h>
#include <string.h> #include <string.h>
#include <time.h> #include <time.h>
#include <iostream>
#include <random>
#include <string> #include <string>
#include <vector> #include <vector>
#include <filesystem>
#include "gosd.h" #include "gosd.h"
// #include "preprocessing.hpp"
#include "flux.hpp"
#include "stable-diffusion.h"
#define STB_IMAGE_IMPLEMENTATION #define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_STATIC #define STB_IMAGE_STATIC
#include "stb_image.h" #include "stb_image.h"
@@ -23,7 +25,7 @@
// Names of the sampler method, same order as enum sample_method in stable-diffusion.h // Names of the sampler method, same order as enum sample_method in stable-diffusion.h
const char* sample_method_str[] = { const char* sample_method_str[] = {
"default", "euler_a",
"euler", "euler",
"heun", "heun",
"dpm2", "dpm2",
@@ -35,89 +37,43 @@ const char* sample_method_str[] = {
"lcm", "lcm",
"ddim_trailing", "ddim_trailing",
"tcd", "tcd",
"euler_a",
}; };
static_assert(std::size(sample_method_str) == SAMPLE_METHOD_COUNT, "sample method mismatch");
// Names of the sigma schedule overrides, same order as sample_schedule in stable-diffusion.h // Names of the sigma schedule overrides, same order as sample_schedule in stable-diffusion.h
const char* schedulers[] = { const char* schedule_str[] = {
"default", "default",
"discrete", "discrete",
"karras", "karras",
"exponential", "exponential",
"ays", "ays",
"gits", "gits",
"smoothstep",
}; };
static_assert(std::size(schedulers) == SCHEDULE_COUNT, "schedulers mismatch");
sd_ctx_t* sd_c; sd_ctx_t* sd_c;
// Moved from the context (load time) to generation time params
scheduler_t scheduler = scheduler_t::DEFAULT;
sample_method_t sample_method; sample_method_t sample_method;
// Copied from the upstream CLI int load_model(char *model, char* options[], int threads, int diff) {
static void sd_log_cb(enum sd_log_level_t level, const char* log, void* data) { fprintf (stderr, "Loading model!\n");
//SDParams* params = (SDParams*)data;
const char* level_str;
if (!log /*|| (!params->verbose && level <= SD_LOG_DEBUG)*/) { char *stableDiffusionModel = "";
return;
}
switch (level) {
case SD_LOG_DEBUG:
level_str = "DEBUG";
break;
case SD_LOG_INFO:
level_str = "INFO";
break;
case SD_LOG_WARN:
level_str = "WARN";
break;
case SD_LOG_ERROR:
level_str = "ERROR";
break;
default: /* Potential future-proofing */
level_str = "?????";
break;
}
fprintf(stderr, "[%-5s] ", level_str);
fputs(log, stderr);
fflush(stderr);
}
int load_model(const char *model, char *model_path, char* options[], int threads, int diff) {
fprintf (stderr, "Loading model: %p=%s\n", model, model);
sd_set_log_callback(sd_log_cb, NULL);
const char *stableDiffusionModel = "";
if (diff == 1 ) { if (diff == 1 ) {
stableDiffusionModel = model; stableDiffusionModel = model;
model = ""; model = "";
} }
// decode options. Options are in form optname:optvale, or if booleans only optname. // decode options. Options are in form optname:optvale, or if booleans only optname.
const char *clip_l_path = ""; char *clip_l_path = "";
const char *clip_g_path = ""; char *clip_g_path = "";
const char *t5xxl_path = ""; char *t5xxl_path = "";
const char *vae_path = ""; char *vae_path = "";
const char *scheduler_str = ""; char *scheduler = "";
const char *sampler = ""; char *sampler = "";
char *lora_dir = model_path;
bool lora_dir_allocated = false;
fprintf(stderr, "parsing options: %p\n", options);
// If options is not NULL, parse options // If options is not NULL, parse options
for (int i = 0; options[i] != NULL; i++) { for (int i = 0; options[i] != NULL; i++) {
const char *optname = strtok(options[i], ":"); char *optname = strtok(options[i], ":");
const char *optval = strtok(NULL, ":"); char *optval = strtok(NULL, ":");
if (optval == NULL) { if (optval == NULL) {
optval = "true"; optval = "true";
} }
@@ -135,132 +91,77 @@ int load_model(const char *model, char *model_path, char* options[], int threads
vae_path = optval; vae_path = optval;
} }
if (!strcmp(optname, "scheduler")) { if (!strcmp(optname, "scheduler")) {
scheduler_str = optval; scheduler = optval;
} }
if (!strcmp(optname, "sampler")) { if (!strcmp(optname, "sampler")) {
sampler = optval; sampler = optval;
} }
if (!strcmp(optname, "lora_dir")) {
// Path join with model dir
if (model_path && strlen(model_path) > 0) {
std::filesystem::path model_path_str(model_path);
std::filesystem::path lora_path(optval);
std::filesystem::path full_lora_path = model_path_str / lora_path;
lora_dir = strdup(full_lora_path.string().c_str());
lora_dir_allocated = true;
fprintf(stderr, "Lora dir resolved to: %s\n", lora_dir);
} else {
lora_dir = strdup(optval);
lora_dir_allocated = true;
fprintf(stderr, "No model path provided, using lora dir as-is: %s\n", lora_dir);
}
}
} }
fprintf(stderr, "parsed options\n");
int sample_method_found = -1; int sample_method_found = -1;
for (int m = 0; m < SAMPLE_METHOD_COUNT; m++) { for (int m = 0; m < N_SAMPLE_METHODS; m++) {
if (!strcmp(sampler, sample_method_str[m])) { if (!strcmp(sampler, sample_method_str[m])) {
sample_method_found = m; sample_method_found = m;
fprintf(stderr, "Found sampler: %s\n", sampler);
} }
} }
if (sample_method_found == -1) { if (sample_method_found == -1) {
fprintf(stderr, "Invalid sample method, default to EULER_A!\n"); fprintf(stderr, "Invalid sample method, default to EULER_A!\n");
sample_method_found = sample_method_t::SAMPLE_METHOD_DEFAULT; sample_method_found = EULER_A;
} }
sample_method = (sample_method_t)sample_method_found; sample_method = (sample_method_t)sample_method_found;
for (int d = 0; d < SCHEDULE_COUNT; d++) { int schedule_found = -1;
if (!strcmp(scheduler_str, schedulers[d])) { for (int d = 0; d < N_SCHEDULES; d++) {
scheduler = (scheduler_t)d; if (!strcmp(scheduler, schedule_str[d])) {
fprintf (stderr, "Found scheduler: %s\n", scheduler_str); schedule_found = d;
fprintf (stderr, "Found scheduler: %s\n", scheduler);
} }
} }
if (schedule_found == -1) {
fprintf (stderr, "Invalid scheduler! using DEFAULT\n");
schedule_found = DEFAULT;
}
schedule_t schedule = (schedule_t)schedule_found;
fprintf (stderr, "Creating context\n"); fprintf (stderr, "Creating context\n");
sd_ctx_params_t ctx_params; sd_ctx_t* sd_ctx = new_sd_ctx(model,
sd_ctx_params_init(&ctx_params); clip_l_path,
ctx_params.model_path = model; clip_g_path,
ctx_params.clip_l_path = clip_l_path; t5xxl_path,
ctx_params.clip_g_path = clip_g_path; stableDiffusionModel,
ctx_params.t5xxl_path = t5xxl_path; vae_path,
ctx_params.diffusion_model_path = stableDiffusionModel; "",
ctx_params.vae_path = vae_path; "",
ctx_params.taesd_path = ""; "",
ctx_params.control_net_path = ""; "",
ctx_params.lora_model_dir = lora_dir; "",
ctx_params.embedding_dir = ""; false,
ctx_params.vae_decode_only = false; false,
ctx_params.free_params_immediately = false; false,
ctx_params.n_threads = threads; threads,
ctx_params.rng_type = STD_DEFAULT_RNG; SD_TYPE_COUNT,
sd_ctx_t* sd_ctx = new_sd_ctx(&ctx_params); STD_DEFAULT_RNG,
schedule,
false,
false,
false,
false);
if (sd_ctx == NULL) { if (sd_ctx == NULL) {
fprintf (stderr, "failed loading model (generic error)\n"); fprintf (stderr, "failed loading model (generic error)\n");
// Clean up allocated memory
if (lora_dir_allocated && lora_dir) {
free(lora_dir);
}
return 1; return 1;
} }
fprintf (stderr, "Created context: OK\n"); fprintf (stderr, "Created context: OK\n");
sd_c = sd_ctx; sd_c = sd_ctx;
// Clean up allocated memory
if (lora_dir_allocated && lora_dir) {
free(lora_dir);
}
return 0; return 0;
} }
void sd_tiling_params_set_enabled(sd_tiling_params_t *params, bool enabled) { int gen_image(char *text, char *negativeText, int width, int height, int steps, int seed , char *dst, float cfg_scale) {
params->enabled = enabled;
}
void sd_tiling_params_set_tile_sizes(sd_tiling_params_t *params, int tile_size_x, int tile_size_y) {
params->tile_size_x = tile_size_x;
params->tile_size_y = tile_size_y;
}
void sd_tiling_params_set_rel_sizes(sd_tiling_params_t *params, float rel_size_x, float rel_size_y) {
params->rel_size_x = rel_size_x;
params->rel_size_y = rel_size_y;
}
void sd_tiling_params_set_target_overlap(sd_tiling_params_t *params, float target_overlap) {
params->target_overlap = target_overlap;
}
sd_tiling_params_t* sd_img_gen_params_get_vae_tiling_params(sd_img_gen_params_t *params) {
return &params->vae_tiling_params;
}
sd_img_gen_params_t* sd_img_gen_params_new(void) {
sd_img_gen_params_t *params = (sd_img_gen_params_t *)std::malloc(sizeof(sd_img_gen_params_t));
sd_img_gen_params_init(params);
return params;
}
void sd_img_gen_params_set_prompts(sd_img_gen_params_t *params, const char *prompt, const char *negative_prompt) {
params->prompt = prompt;
params->negative_prompt = negative_prompt;
}
void sd_img_gen_params_set_dimensions(sd_img_gen_params_t *params, int width, int height) {
params->width = width;
params->height = height;
}
void sd_img_gen_params_set_seed(sd_img_gen_params_t *params, int64_t seed) {
params->seed = seed;
}
int gen_image(sd_img_gen_params_t *p, int steps, char *dst, float cfg_scale, char *src_image, float strength, char *mask_image, char **ref_images, int ref_images_count) {
sd_image_t* results; sd_image_t* results;
@@ -268,199 +169,37 @@ int gen_image(sd_img_gen_params_t *p, int steps, char *dst, float cfg_scale, cha
fprintf (stderr, "Generating image\n"); fprintf (stderr, "Generating image\n");
p->sample_params.guidance.txt_cfg = cfg_scale; results = txt2img(sd_c,
p->sample_params.guidance.slg.layers = skip_layers.data(); text,
p->sample_params.guidance.slg.layer_count = skip_layers.size(); negativeText,
p->sample_params.sample_method = sample_method; -1, //clip_skip
p->sample_params.sample_steps = steps; cfg_scale, // sfg_scale
p->sample_params.scheduler = scheduler; 3.5f,
0, // eta
int width = p->width; width,
int height = p->height; height,
sample_method,
// Handle input image for img2img steps,
bool has_input_image = (src_image != NULL && strlen(src_image) > 0); seed,
bool has_mask_image = (mask_image != NULL && strlen(mask_image) > 0); 1,
NULL,
uint8_t* input_image_buffer = NULL; 0.9f,
uint8_t* mask_image_buffer = NULL; 20.f,
std::vector<uint8_t> default_mask_image_vec; false,
"",
if (has_input_image) { skip_layers.data(),
fprintf(stderr, "Loading input image: %s\n", src_image); skip_layers.size(),
0,
int c = 0; 0.01,
int img_width = 0; 0.2);
int img_height = 0;
input_image_buffer = stbi_load(src_image, &img_width, &img_height, &c, 3);
if (input_image_buffer == NULL) {
fprintf(stderr, "Failed to load input image from '%s'\n", src_image);
return 1;
}
if (c < 3) {
fprintf(stderr, "Input image must have at least 3 channels, got %d\n", c);
free(input_image_buffer);
return 1;
}
// Resize input image if dimensions don't match
if (img_width != width || img_height != height) {
fprintf(stderr, "Resizing input image from %dx%d to %dx%d\n", img_width, img_height, width, height);
uint8_t* resized_image_buffer = (uint8_t*)malloc(height * width * 3);
if (resized_image_buffer == NULL) {
fprintf(stderr, "Failed to allocate memory for resized image\n");
free(input_image_buffer);
return 1;
}
stbir_resize(input_image_buffer, img_width, img_height, 0,
resized_image_buffer, width, height, 0, STBIR_TYPE_UINT8,
3, STBIR_ALPHA_CHANNEL_NONE, 0,
STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP,
STBIR_FILTER_BOX, STBIR_FILTER_BOX,
STBIR_COLORSPACE_SRGB, nullptr);
free(input_image_buffer);
input_image_buffer = resized_image_buffer;
}
p->init_image = {(uint32_t)width, (uint32_t)height, 3, input_image_buffer};
p->strength = strength;
fprintf(stderr, "Using img2img with strength: %.2f\n", strength);
} else {
// No input image, use empty image for text-to-image
p->init_image = {(uint32_t)width, (uint32_t)height, 3, NULL};
p->strength = 0.0f;
}
// Handle mask image for inpainting
if (has_mask_image) {
fprintf(stderr, "Loading mask image: %s\n", mask_image);
int c = 0;
int mask_width = 0;
int mask_height = 0;
mask_image_buffer = stbi_load(mask_image, &mask_width, &mask_height, &c, 1);
if (mask_image_buffer == NULL) {
fprintf(stderr, "Failed to load mask image from '%s'\n", mask_image);
if (input_image_buffer) free(input_image_buffer);
return 1;
}
// Resize mask if dimensions don't match
if (mask_width != width || mask_height != height) {
fprintf(stderr, "Resizing mask image from %dx%d to %dx%d\n", mask_width, mask_height, width, height);
uint8_t* resized_mask_buffer = (uint8_t*)malloc(height * width);
if (resized_mask_buffer == NULL) {
fprintf(stderr, "Failed to allocate memory for resized mask\n");
free(mask_image_buffer);
if (input_image_buffer) free(input_image_buffer);
return 1;
}
stbir_resize(mask_image_buffer, mask_width, mask_height, 0,
resized_mask_buffer, width, height, 0, STBIR_TYPE_UINT8,
1, STBIR_ALPHA_CHANNEL_NONE, 0,
STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP,
STBIR_FILTER_BOX, STBIR_FILTER_BOX,
STBIR_COLORSPACE_SRGB, nullptr);
free(mask_image_buffer);
mask_image_buffer = resized_mask_buffer;
}
p->mask_image = {(uint32_t)width, (uint32_t)height, 1, mask_image_buffer};
fprintf(stderr, "Using inpainting with mask\n");
} else {
// No mask image, create default full mask
default_mask_image_vec.resize(width * height, 255);
p->mask_image = {(uint32_t)width, (uint32_t)height, 1, default_mask_image_vec.data()};
}
// Handle reference images
std::vector<sd_image_t> ref_images_vec;
std::vector<uint8_t*> ref_image_buffers;
if (ref_images_count > 0 && ref_images != NULL) {
fprintf(stderr, "Loading %d reference images\n", ref_images_count);
for (int i = 0; i < ref_images_count; i++) {
if (ref_images[i] == NULL || strlen(ref_images[i]) == 0) {
continue;
}
fprintf(stderr, "Loading reference image %d: %s\n", i + 1, ref_images[i]);
int c = 0;
int ref_width = 0;
int ref_height = 0;
uint8_t* ref_image_buffer = stbi_load(ref_images[i], &ref_width, &ref_height, &c, 3);
if (ref_image_buffer == NULL) {
fprintf(stderr, "Failed to load reference image from '%s'\n", ref_images[i]);
continue;
}
if (c < 3) {
fprintf(stderr, "Reference image must have at least 3 channels, got %d\n", c);
free(ref_image_buffer);
continue;
}
// Resize reference image if dimensions don't match
if (ref_width != width || ref_height != height) {
fprintf(stderr, "Resizing reference image from %dx%d to %dx%d\n", ref_width, ref_height, width, height);
uint8_t* resized_ref_buffer = (uint8_t*)malloc(height * width * 3);
if (resized_ref_buffer == NULL) {
fprintf(stderr, "Failed to allocate memory for resized reference image\n");
free(ref_image_buffer);
continue;
}
stbir_resize(ref_image_buffer, ref_width, ref_height, 0,
resized_ref_buffer, width, height, 0, STBIR_TYPE_UINT8,
3, STBIR_ALPHA_CHANNEL_NONE, 0,
STBIR_EDGE_CLAMP, STBIR_EDGE_CLAMP,
STBIR_FILTER_BOX, STBIR_FILTER_BOX,
STBIR_COLORSPACE_SRGB, nullptr);
free(ref_image_buffer);
ref_image_buffer = resized_ref_buffer;
}
ref_image_buffers.push_back(ref_image_buffer);
ref_images_vec.push_back({(uint32_t)width, (uint32_t)height, 3, ref_image_buffer});
}
if (!ref_images_vec.empty()) {
p->ref_images = ref_images_vec.data();
p->ref_images_count = ref_images_vec.size();
fprintf(stderr, "Using %zu reference images\n", ref_images_vec.size());
}
}
results = generate_image(sd_c, p);
std::free(p);
if (results == NULL) { if (results == NULL) {
fprintf (stderr, "NO results\n"); fprintf (stderr, "NO results\n");
if (input_image_buffer) free(input_image_buffer);
if (mask_image_buffer) free(mask_image_buffer);
for (auto buffer : ref_image_buffers) {
if (buffer) free(buffer);
}
return 1; return 1;
} }
if (results[0].data == NULL) { if (results[0].data == NULL) {
fprintf (stderr, "Results with no data\n"); fprintf (stderr, "Results with no data\n");
if (input_image_buffer) free(input_image_buffer);
if (mask_image_buffer) free(mask_image_buffer);
for (auto buffer : ref_image_buffers) {
if (buffer) free(buffer);
}
return 1; return 1;
} }
@@ -476,21 +215,17 @@ int gen_image(sd_img_gen_params_t *p, int steps, char *dst, float cfg_scale, cha
results[0].data, 0, NULL); results[0].data, 0, NULL);
fprintf (stderr, "Saved resulting image to '%s'\n", dst); fprintf (stderr, "Saved resulting image to '%s'\n", dst);
// Clean up // TODO: free results. Why does it crash?
free(results[0].data); free(results[0].data);
results[0].data = NULL; results[0].data = NULL;
free(results); free(results);
if (input_image_buffer) free(input_image_buffer); fprintf (stderr, "gen_image is done", dst);
if (mask_image_buffer) free(mask_image_buffer);
for (auto buffer : ref_image_buffers) {
if (buffer) free(buffer);
}
fprintf (stderr, "gen_image is done: %s", dst);
return 0; return 0;
} }
int unload() { int unload() {
free_sd_ctx(sd_c); free_sd_ctx(sd_c);
return 0;
} }

View File

@@ -1,10 +1,15 @@
package main package main
// #cgo CXXFLAGS: -I${SRCDIR}/sources/stablediffusion-ggml.cpp/thirdparty -I${SRCDIR}/sources/stablediffusion-ggml.cpp -I${SRCDIR}/sources/stablediffusion-ggml.cpp/ggml/include
// #cgo LDFLAGS: -L${SRCDIR}/ -lsd -lstdc++ -lm -lggmlall -lgomp
// #include <gosd.h>
// #include <stdlib.h>
import "C"
import ( import (
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"runtime"
"strings" "strings"
"unsafe" "unsafe"
@@ -20,45 +25,20 @@ type SDGGML struct {
cfgScale float32 cfgScale float32
} }
var (
LoadModel func(model, model_apth string, options []uintptr, threads int32, diff int) int
GenImage func(params uintptr, steps int, dst string, cfgScale float32, srcImage string, strength float32, maskImage string, refImages []string, refImagesCount int) int
TilingParamsSetEnabled func(params uintptr, enabled bool)
TilingParamsSetTileSizes func(params uintptr, tileSizeX int, tileSizeY int)
TilingParamsSetRelSizes func(params uintptr, relSizeX float32, relSizeY float32)
TilingParamsSetTargetOverlap func(params uintptr, targetOverlap float32)
ImgGenParamsNew func() uintptr
ImgGenParamsSetPrompts func(params uintptr, prompt string, negativePrompt string)
ImgGenParamsSetDimensions func(params uintptr, width int, height int)
ImgGenParamsSetSeed func(params uintptr, seed int64)
ImgGenParamsGetVaeTilingParams func(params uintptr) uintptr
)
// Copied from Purego internal/strings
// TODO: We should upstream sending []string
func hasSuffix(s, suffix string) bool {
return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix
}
func CString(name string) *byte {
if hasSuffix(name, "\x00") {
return &(*(*[]byte)(unsafe.Pointer(&name)))[0]
}
b := make([]byte, len(name)+1)
copy(b, name)
return &b[0]
}
func (sd *SDGGML) Load(opts *pb.ModelOptions) error { func (sd *SDGGML) Load(opts *pb.ModelOptions) error {
sd.threads = int(opts.Threads) sd.threads = int(opts.Threads)
modelPath := opts.ModelPath modelFile := C.CString(opts.ModelFile)
defer C.free(unsafe.Pointer(modelFile))
modelFile := opts.ModelFile var options **C.char
modelPathC := modelPath // prepare the options array to pass to C
size := C.size_t(unsafe.Sizeof((*C.char)(nil)))
length := C.size_t(len(opts.Options))
options = (**C.char)(C.malloc(length * size))
view := (*[1 << 30]*C.char)(unsafe.Pointer(options))[0:len(opts.Options):len(opts.Options)]
var diffusionModel int var diffusionModel int
@@ -83,63 +63,31 @@ func (sd *SDGGML) Load(opts *pb.ModelOptions) error {
fmt.Fprintf(os.Stderr, "Options: %+v\n", oo) fmt.Fprintf(os.Stderr, "Options: %+v\n", oo)
// At the time of writing Purego doesn't recurse into slices and convert Go strings to pointers so we need to do that for i, x := range oo {
var keepAlive []any view[i] = C.CString(x)
options := make([]uintptr, len(oo), len(oo)+1)
for i, op := range oo {
bytep := CString(op)
options[i] = uintptr(unsafe.Pointer(bytep))
keepAlive = append(keepAlive, bytep)
} }
sd.cfgScale = opts.CFGScale sd.cfgScale = opts.CFGScale
ret := LoadModel(modelFile, modelPathC, options, opts.Threads, diffusionModel) ret := C.load_model(modelFile, options, C.int(opts.Threads), C.int(diffusionModel))
if ret != 0 { if ret != 0 {
return fmt.Errorf("could not load model") return fmt.Errorf("could not load model")
} }
runtime.KeepAlive(keepAlive)
return nil return nil
} }
func (sd *SDGGML) GenerateImage(opts *pb.GenerateImageRequest) error { func (sd *SDGGML) GenerateImage(opts *pb.GenerateImageRequest) error {
t := opts.PositivePrompt t := C.CString(opts.PositivePrompt)
dst := opts.Dst defer C.free(unsafe.Pointer(t))
negative := opts.NegativePrompt
srcImage := opts.Src
var maskImage string dst := C.CString(opts.Dst)
if opts.EnableParameters != "" { defer C.free(unsafe.Pointer(dst))
if strings.Contains(opts.EnableParameters, "mask:") {
parts := strings.Split(opts.EnableParameters, "mask:")
if len(parts) > 1 {
maskPath := strings.TrimSpace(parts[1])
if maskPath != "" {
maskImage = maskPath
}
}
}
}
refImagesCount := len(opts.RefImages) negative := C.CString(opts.NegativePrompt)
refImages := make([]string, refImagesCount, refImagesCount+1) defer C.free(unsafe.Pointer(negative))
copy(refImages, opts.RefImages)
*(*uintptr)(unsafe.Add(unsafe.Pointer(&refImages), refImagesCount)) = 0
// Default strength for img2img (0.75 is a good default) ret := C.gen_image(t, negative, C.int(opts.Width), C.int(opts.Height), C.int(opts.Step), C.int(opts.Seed), dst, C.float(sd.cfgScale))
strength := float32(0.75)
// free'd by GenImage
p := ImgGenParamsNew()
ImgGenParamsSetPrompts(p, t, negative)
ImgGenParamsSetDimensions(p, int(opts.Width), int(opts.Height))
ImgGenParamsSetSeed(p, int64(opts.Seed))
vaep := ImgGenParamsGetVaeTilingParams(p)
TilingParamsSetEnabled(vaep, false)
ret := GenImage(p, int(opts.Step), dst, sd.cfgScale, srcImage, strength, maskImage, refImages, refImagesCount)
if ret != 0 { if ret != 0 {
return fmt.Errorf("inference failed") return fmt.Errorf("inference failed")
} }

View File

@@ -1,23 +1,8 @@
#include <cstdint>
#include "stable-diffusion.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
int load_model(char *model, char* options[], int threads, int diffusionModel);
void sd_tiling_params_set_enabled(sd_tiling_params_t *params, bool enabled); int gen_image(char *text, char *negativeText, int width, int height, int steps, int seed, char *dst, float cfg_scale);
void sd_tiling_params_set_tile_sizes(sd_tiling_params_t *params, int tile_size_x, int tile_size_y);
void sd_tiling_params_set_rel_sizes(sd_tiling_params_t *params, float rel_size_x, float rel_size_y);
void sd_tiling_params_set_target_overlap(sd_tiling_params_t *params, float target_overlap);
sd_tiling_params_t* sd_img_gen_params_get_vae_tiling_params(sd_img_gen_params_t *params);
sd_img_gen_params_t* sd_img_gen_params_new(void);
void sd_img_gen_params_set_prompts(sd_img_gen_params_t *params, const char *prompt, const char *negative_prompt);
void sd_img_gen_params_set_dimensions(sd_img_gen_params_t *params, int width, int height);
void sd_img_gen_params_set_seed(sd_img_gen_params_t *params, int64_t seed);
int load_model(const char *model, char *model_path, char* options[], int threads, int diffusionModel);
int gen_image(sd_img_gen_params_t *p, int steps, char *dst, float cfg_scale, char *src_image, float strength, char *mask_image, char **ref_images, int ref_images_count);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@@ -1,9 +1,9 @@
package main package main
// Note: this is started internally by LocalAI and a server is allocated for each model
import ( import (
"flag" "flag"
"github.com/ebitengine/purego"
grpc "github.com/mudler/LocalAI/pkg/grpc" grpc "github.com/mudler/LocalAI/pkg/grpc"
) )
@@ -11,36 +11,7 @@ var (
addr = flag.String("addr", "localhost:50051", "the address to connect to") addr = flag.String("addr", "localhost:50051", "the address to connect to")
) )
type LibFuncs struct {
FuncPtr any
Name string
}
func main() { func main() {
gosd, err := purego.Dlopen("./libgosd.so", purego.RTLD_NOW|purego.RTLD_GLOBAL)
if err != nil {
panic(err)
}
libFuncs := []LibFuncs{
{&LoadModel, "load_model"},
{&GenImage, "gen_image"},
{&TilingParamsSetEnabled, "sd_tiling_params_set_enabled"},
{&TilingParamsSetTileSizes, "sd_tiling_params_set_tile_sizes"},
{&TilingParamsSetRelSizes, "sd_tiling_params_set_rel_sizes"},
{&TilingParamsSetTargetOverlap, "sd_tiling_params_set_target_overlap"},
{&ImgGenParamsNew, "sd_img_gen_params_new"},
{&ImgGenParamsSetPrompts, "sd_img_gen_params_set_prompts"},
{&ImgGenParamsSetDimensions, "sd_img_gen_params_set_dimensions"},
{&ImgGenParamsSetSeed, "sd_img_gen_params_set_seed"},
{&ImgGenParamsGetVaeTilingParams, "sd_img_gen_params_get_vae_tiling_params"},
}
for _, lf := range libFuncs {
purego.RegisterLibFunc(lf.FuncPtr, gosd, lf.Name)
}
flag.Parse() flag.Parse()
if err := grpc.StartServer(*addr, &SDGGML{}); err != nil { if err := grpc.StartServer(*addr, &SDGGML{}); err != nil {

View File

@@ -10,9 +10,8 @@ CURDIR=$(dirname "$(realpath $0)")
# Create lib directory # Create lib directory
mkdir -p $CURDIR/package/lib mkdir -p $CURDIR/package/lib
cp -avf $CURDIR/libgosd.so $CURDIR/package/ cp -avrf $CURDIR/stablediffusion-ggml $CURDIR/package/
cp -avf $CURDIR/stablediffusion-ggml $CURDIR/package/ cp -rfv $CURDIR/run.sh $CURDIR/package/
cp -fv $CURDIR/run.sh $CURDIR/package/
# Detect architecture and copy appropriate libraries # Detect architecture and copy appropriate libraries
if [ -f "/lib64/ld-linux-x86-64.so.2" ]; then if [ -f "/lib64/ld-linux-x86-64.so.2" ]; then
@@ -43,13 +42,11 @@ elif [ -f "/lib/ld-linux-aarch64.so.1" ]; then
cp -arfLv /lib/aarch64-linux-gnu/libdl.so.2 $CURDIR/package/lib/libdl.so.2 cp -arfLv /lib/aarch64-linux-gnu/libdl.so.2 $CURDIR/package/lib/libdl.so.2
cp -arfLv /lib/aarch64-linux-gnu/librt.so.1 $CURDIR/package/lib/librt.so.1 cp -arfLv /lib/aarch64-linux-gnu/librt.so.1 $CURDIR/package/lib/librt.so.1
cp -arfLv /lib/aarch64-linux-gnu/libpthread.so.0 $CURDIR/package/lib/libpthread.so.0 cp -arfLv /lib/aarch64-linux-gnu/libpthread.so.0 $CURDIR/package/lib/libpthread.so.0
elif [ $(uname -s) = "Darwin" ]; then
echo "Detected Darwin"
else else
echo "Error: Could not detect architecture" echo "Error: Could not detect architecture"
exit 1 exit 1
fi fi
echo "Packaging completed successfully" echo "Packaging completed successfully"
ls -liah $CURDIR/package/ ls -liah $CURDIR/package/
ls -liah $CURDIR/package/lib/ ls -liah $CURDIR/package/lib/

View File

@@ -1,7 +0,0 @@
.cache/
sources/
build/
package/
whisper
libgowhisper.so

View File

@@ -1,16 +0,0 @@
cmake_minimum_required(VERSION 3.12)
project(gowhisper LANGUAGES C CXX)
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
add_subdirectory(./sources/whisper.cpp)
add_library(gowhisper MODULE gowhisper.cpp)
target_link_libraries(gowhisper PRIVATE whisper ggml)
if(CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 9.0)
target_link_libraries(gosd PRIVATE stdc++fs)
endif()
set_property(TARGET gowhisper PROPERTY CXX_STANDARD 17)
set_target_properties(gowhisper PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})

View File

@@ -1,54 +1,110 @@
CMAKE_ARGS?= GOCMD=go
BUILD_TYPE?=
NATIVE?=false NATIVE?=false
GOCMD?=go BUILD_TYPE?=
GO_TAGS?= CMAKE_ARGS?=
JOBS?=$(shell nproc --ignore=1)
# whisper.cpp version # whisper.cpp version
WHISPER_REPO?=https://github.com/ggml-org/whisper.cpp WHISPER_REPO?=https://github.com/ggml-org/whisper.cpp
WHISPER_CPP_VERSION?=c62adfbd1ecdaea9e295c72d672992514a2d887c WHISPER_CPP_VERSION?=1f5cf0b2888402d57bb17b2029b2caa97e5f3baf
SO_TARGET?=libgowhisper.so
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF export WHISPER_CMAKE_ARGS?=-DBUILD_SHARED_LIBS=OFF
export WHISPER_DIR=$(abspath ./sources/whisper.cpp)
export WHISPER_INCLUDE_PATH=$(WHISPER_DIR)/include:$(WHISPER_DIR)/ggml/include
export WHISPER_LIBRARY_PATH=$(WHISPER_DIR)/build/src/:$(WHISPER_DIR)/build/ggml/src
CGO_LDFLAGS_WHISPER?=
CGO_LDFLAGS_WHISPER+=-lggml
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF -DLLAMA_CURL=OFF
CUDA_LIBPATH?=/usr/local/cuda/lib64/
ONEAPI_VERSION?=2025.2
# IF native is false, we add -DGGML_NATIVE=OFF to CMAKE_ARGS
ifeq ($(NATIVE),false)
CMAKE_ARGS+=-DGGML_NATIVE=OFF
WHISPER_CMAKE_ARGS+=-DGGML_NATIVE=OFF
endif
CURRENT_MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
ifeq ($(NATIVE),false) ifeq ($(NATIVE),false)
CMAKE_ARGS+=-DGGML_NATIVE=OFF CMAKE_ARGS+=-DGGML_NATIVE=OFF
endif endif
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
ifeq ($(BUILD_TYPE),cublas) ifeq ($(BUILD_TYPE),cublas)
CGO_LDFLAGS+=-lcublas -lcudart -L$(CUDA_LIBPATH) -L$(CUDA_LIBPATH)/stubs/ -lcuda
CMAKE_ARGS+=-DGGML_CUDA=ON CMAKE_ARGS+=-DGGML_CUDA=ON
CGO_LDFLAGS_WHISPER+=-lcufft -lggml-cuda
export WHISPER_LIBRARY_PATH:=$(WHISPER_LIBRARY_PATH):$(WHISPER_DIR)/build/ggml/src/ggml-cuda/
# If build type is openblas then we set -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
# to CMAKE_ARGS automatically
else ifeq ($(BUILD_TYPE),openblas) else ifeq ($(BUILD_TYPE),openblas)
CMAKE_ARGS+=-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS CMAKE_ARGS+=-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
# If build type is clblas (openCL) we set -DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
else ifeq ($(BUILD_TYPE),clblas) else ifeq ($(BUILD_TYPE),clblas)
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
else ifeq ($(BUILD_TYPE),hipblas) else ifeq ($(BUILD_TYPE),hipblas)
CMAKE_ARGS+=-DGGML_HIPBLAS=ON ROCM_HOME ?= /opt/rocm
ROCM_PATH ?= /opt/rocm
LD_LIBRARY_PATH ?= /opt/rocm/lib:/opt/rocm/llvm/lib
export STABLE_BUILD_TYPE=
export CXX=$(ROCM_HOME)/llvm/bin/clang++
export CC=$(ROCM_HOME)/llvm/bin/clang
# GPU_TARGETS ?= gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102
# AMDGPU_TARGETS ?= "$(GPU_TARGETS)"
CMAKE_ARGS+=-DGGML_HIP=ON
CGO_LDFLAGS += -O3 --rtlib=compiler-rt -unwindlib=libgcc -lhipblas -lrocblas --hip-link -L${ROCM_HOME}/lib/llvm/lib -L$(CURRENT_MAKEFILE_DIR)/sources/whisper.cpp/build/ggml/src/ggml-hip/ -lggml-hip
# CMAKE_ARGS+=-DGGML_HIP=ON -DAMDGPU_TARGETS="$(AMDGPU_TARGETS)" -DGPU_TARGETS="$(GPU_TARGETS)"
else ifeq ($(BUILD_TYPE),vulkan) else ifeq ($(BUILD_TYPE),vulkan)
CMAKE_ARGS+=-DGGML_VULKAN=ON CMAKE_ARGS+=-DGGML_VULKAN=1
CGO_LDFLAGS_WHISPER+=-lggml-vulkan -lvulkan
export WHISPER_LIBRARY_PATH:=$(WHISPER_LIBRARY_PATH):$(WHISPER_DIR)/build/ggml/src/ggml-vulkan/
else ifeq ($(OS),Darwin) else ifeq ($(OS),Darwin)
ifeq ($(BUILD_TYPE),)
BUILD_TYPE=metal
endif
ifneq ($(BUILD_TYPE),metal) ifneq ($(BUILD_TYPE),metal)
CMAKE_ARGS+=-DGGML_METAL=OFF CMAKE_ARGS+=-DGGML_METAL=OFF
CGO_LDFLAGS_WHISPER+=-lggml-blas
export WHISPER_LIBRARY_PATH:=$(WHISPER_LIBRARY_PATH):$(WHISPER_DIR)/build/ggml/src/ggml-blas
else else
CMAKE_ARGS+=-DGGML_METAL=ON CMAKE_ARGS+=-DGGML_METAL=ON
CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON
CMAKE_ARGS+=-DGGML_METAL_USE_BF16=ON
CMAKE_ARGS+=-DGGML_OPENMP=OFF
CMAKE_ARGS+=-DWHISPER_BUILD_EXAMPLES=OFF
CMAKE_ARGS+=-DWHISPER_BUILD_TESTS=OFF
CMAKE_ARGS+=-DWHISPER_BUILD_SERVER=OFF
CGO_LDFLAGS += -framework Accelerate
CGO_LDFLAGS_WHISPER+=-lggml-metal -lggml-blas
export WHISPER_LIBRARY_PATH:=$(WHISPER_LIBRARY_PATH):$(WHISPER_DIR)/build/ggml/src/ggml-metal/:$(WHISPER_DIR)/build/ggml/src/ggml-blas
endif endif
TARGET+=--target ggml-metal
endif endif
ifeq ($(BUILD_TYPE),sycl_f16) ifneq (,$(findstring sycl,$(BUILD_TYPE)))
export CC=icx
export CXX=icpx
CGO_LDFLAGS_WHISPER += -fsycl -L${DNNLROOT}/lib -rpath ${ONEAPI_ROOT}/${ONEAPI_VERSION}/lib -ldnnl ${MKLROOT}/lib/intel64/libmkl_sycl.a -fiopenmp -fopenmp-targets=spir64 -lOpenCL -lggml-sycl
CGO_LDFLAGS_WHISPER += $(shell pkg-config --libs mkl-static-lp64-gomp)
CGO_CXXFLAGS_WHISPER += -fiopenmp -fopenmp-targets=spir64
CGO_CXXFLAGS_WHISPER += $(shell pkg-config --cflags mkl-static-lp64-gomp )
export WHISPER_LIBRARY_PATH:=$(WHISPER_LIBRARY_PATH):$(WHISPER_DIR)/build/ggml/src/ggml-sycl/
CMAKE_ARGS+=-DGGML_SYCL=ON \ CMAKE_ARGS+=-DGGML_SYCL=ON \
-DCMAKE_C_COMPILER=icx \ -DCMAKE_C_COMPILER=icx \
-DCMAKE_CXX_COMPILER=icpx \ -DCMAKE_CXX_COMPILER=icpx \
-DGGML_SYCL_F16=ON -DCMAKE_CXX_FLAGS="-fsycl"
endif endif
ifeq ($(BUILD_TYPE),sycl_f32) ifeq ($(BUILD_TYPE),sycl_f16)
CMAKE_ARGS+=-DGGML_SYCL=ON \ CMAKE_ARGS+=-DGGML_SYCL_F16=ON
-DCMAKE_C_COMPILER=icx \
-DCMAKE_CXX_COMPILER=icpx
endif endif
ifneq ($(OS),Darwin)
CGO_LDFLAGS_WHISPER+=-lgomp
endif
## whisper
sources/whisper.cpp: sources/whisper.cpp:
mkdir -p sources/whisper.cpp mkdir -p sources/whisper.cpp
cd sources/whisper.cpp && \ cd sources/whisper.cpp && \
@@ -58,65 +114,18 @@ sources/whisper.cpp:
git checkout $(WHISPER_CPP_VERSION) && \ git checkout $(WHISPER_CPP_VERSION) && \
git submodule update --init --recursive --depth 1 --single-branch git submodule update --init --recursive --depth 1 --single-branch
# Detect OS sources/whisper.cpp/build/src/libwhisper.a: sources/whisper.cpp
UNAME_S := $(shell uname -s) cd sources/whisper.cpp && cmake $(CMAKE_ARGS) $(WHISPER_CMAKE_ARGS) . -B ./build
cd sources/whisper.cpp/build && cmake --build . --config Release
# Only build CPU variants on Linux whisper: sources/whisper.cpp sources/whisper.cpp/build/src/libwhisper.a
ifeq ($(UNAME_S),Linux) $(GOCMD) mod edit -replace github.com/ggerganov/whisper.cpp=$(CURDIR)/sources/whisper.cpp
VARIANT_TARGETS = libgowhisper-avx.so libgowhisper-avx2.so libgowhisper-avx512.so libgowhisper-fallback.so $(GOCMD) mod edit -replace github.com/ggerganov/whisper.cpp/bindings/go=$(CURDIR)/sources/whisper.cpp/bindings/go
else CGO_LDFLAGS="$(CGO_LDFLAGS) $(CGO_LDFLAGS_WHISPER)" C_INCLUDE_PATH="${WHISPER_INCLUDE_PATH}" LIBRARY_PATH="${WHISPER_LIBRARY_PATH}" LD_LIBRARY_PATH="${WHISPER_LIBRARY_PATH}" \
# On non-Linux (e.g., Darwin), build only fallback variant CGO_CXXFLAGS="$(CGO_CXXFLAGS_WHISPER)" \
VARIANT_TARGETS = libgowhisper-fallback.so $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o whisper ./
endif
whisper: main.go gowhisper.go $(VARIANT_TARGETS) package:
CGO_ENABLED=0 $(GOCMD) build -tags "$(GO_TAGS)" -o whisper ./
package: whisper
bash package.sh bash package.sh
build: package build: whisper package
clean: purge
rm -rf libgowhisper*.so sources/whisper.cpp whisper
purge:
rm -rf build*
# Build all variants (Linux only)
ifeq ($(UNAME_S),Linux)
libgowhisper-avx.so: sources/whisper.cpp
$(MAKE) purge
$(info ${GREEN}I whisper build info:avx${RESET})
SO_TARGET=libgowhisper-avx.so CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE) libgowhisper-custom
rm -rfv build*
libgowhisper-avx2.so: sources/whisper.cpp
$(MAKE) purge
$(info ${GREEN}I whisper build info:avx2${RESET})
SO_TARGET=libgowhisper-avx2.so CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on" $(MAKE) libgowhisper-custom
rm -rfv build*
libgowhisper-avx512.so: sources/whisper.cpp
$(MAKE) purge
$(info ${GREEN}I whisper build info:avx512${RESET})
SO_TARGET=libgowhisper-avx512.so CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=on -DGGML_FMA=on -DGGML_F16C=on" $(MAKE) libgowhisper-custom
rm -rfv build*
endif
# Build fallback variant (all platforms)
libgowhisper-fallback.so: sources/whisper.cpp
$(MAKE) purge
$(info ${GREEN}I whisper build info:fallback${RESET})
SO_TARGET=libgowhisper-fallback.so CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE) libgowhisper-custom
rm -rfv build*
libgowhisper-custom: CMakeLists.txt gowhisper.cpp gowhisper.h
mkdir -p build-$(SO_TARGET) && \
cd build-$(SO_TARGET) && \
cmake .. $(CMAKE_ARGS) && \
cmake --build . --config Release -j$(JOBS) && \
cd .. && \
mv build-$(SO_TARGET)/libgowhisper.so ./$(SO_TARGET)
all: whisper package

View File

@@ -1,154 +0,0 @@
#include "gowhisper.h"
#include "ggml-backend.h"
#include "whisper.h"
#include <vector>
static struct whisper_vad_context *vctx;
static struct whisper_context *ctx;
static std::vector<float> flat_segs;
static void ggml_log_cb(enum ggml_log_level level, const char *log,
void *data) {
const char *level_str;
if (!log) {
return;
}
switch (level) {
case GGML_LOG_LEVEL_DEBUG:
level_str = "DEBUG";
break;
case GGML_LOG_LEVEL_INFO:
level_str = "INFO";
break;
case GGML_LOG_LEVEL_WARN:
level_str = "WARN";
break;
case GGML_LOG_LEVEL_ERROR:
level_str = "ERROR";
break;
default: /* Potential future-proofing */
level_str = "?????";
break;
}
fprintf(stderr, "[%-5s] ", level_str);
fputs(log, stderr);
fflush(stderr);
}
int load_model(const char *const model_path) {
whisper_log_set(ggml_log_cb, nullptr);
ggml_backend_load_all();
struct whisper_context_params cparams = whisper_context_default_params();
ctx = whisper_init_from_file_with_params(model_path, cparams);
if (ctx == nullptr) {
fprintf(stderr, "error: Also failed to init model as transcriber\n");
return 1;
}
return 0;
}
int load_model_vad(const char *const model_path) {
whisper_log_set(ggml_log_cb, nullptr);
ggml_backend_load_all();
struct whisper_vad_context_params vcparams =
whisper_vad_default_context_params();
// XXX: Overridden to false in upstream due to performance?
// vcparams.use_gpu = true;
vctx = whisper_vad_init_from_file_with_params(model_path, vcparams);
if (vctx == nullptr) {
fprintf(stderr, "error: Failed to init model as VAD\n");
return 1;
}
return 0;
}
int vad(float pcmf32[], size_t pcmf32_len, float **segs_out,
size_t *segs_out_len) {
if (!whisper_vad_detect_speech(vctx, pcmf32, pcmf32_len)) {
fprintf(stderr, "error: failed to detect speech\n");
return 1;
}
struct whisper_vad_params params = whisper_vad_default_params();
struct whisper_vad_segments *segs =
whisper_vad_segments_from_probs(vctx, params);
size_t segn = whisper_vad_segments_n_segments(segs);
// fprintf(stderr, "Got segments %zd\n", segn);
flat_segs.clear();
for (int i = 0; i < segn; i++) {
flat_segs.push_back(whisper_vad_segments_get_segment_t0(segs, i));
flat_segs.push_back(whisper_vad_segments_get_segment_t1(segs, i));
}
// fprintf(stderr, "setting out variables: %p=%p -> %p, %p=%zx -> %zx\n",
// segs_out, *segs_out, flat_segs.data(), segs_out_len, *segs_out_len,
// flat_segs.size());
*segs_out = flat_segs.data();
*segs_out_len = flat_segs.size();
// fprintf(stderr, "freeing segs\n");
whisper_vad_free_segments(segs);
// fprintf(stderr, "returning\n");
return 0;
}
int transcribe(uint32_t threads, char *lang, bool translate, bool tdrz,
float pcmf32[], size_t pcmf32_len, size_t *segs_out_len) {
whisper_full_params wparams =
whisper_full_default_params(WHISPER_SAMPLING_GREEDY);
wparams.n_threads = threads;
if (*lang != '\0')
wparams.language = lang;
else {
wparams.language = nullptr;
}
wparams.translate = translate;
wparams.debug_mode = true;
wparams.print_progress = true;
wparams.tdrz_enable = tdrz;
fprintf(stderr, "info: Enable tdrz: %d\n", tdrz);
if (whisper_full(ctx, wparams, pcmf32, pcmf32_len)) {
fprintf(stderr, "error: transcription failed\n");
return 1;
}
*segs_out_len = whisper_full_n_segments(ctx);
return 0;
}
const char *get_segment_text(int i) {
return whisper_full_get_segment_text(ctx, i);
}
int64_t get_segment_t0(int i) { return whisper_full_get_segment_t0(ctx, i); }
int64_t get_segment_t1(int i) { return whisper_full_get_segment_t1(ctx, i); }
int n_tokens(int i) { return whisper_full_n_tokens(ctx, i); }
int32_t get_token_id(int i, int j) {
return whisper_full_get_token_id(ctx, i, j);
}
bool get_segment_speaker_turn_next(int i) {
return whisper_full_get_segment_speaker_turn_next(ctx, i);
}

View File

@@ -1,161 +0,0 @@
package main
import (
"fmt"
"os"
"path/filepath"
"strings"
"unsafe"
"github.com/go-audio/wav"
"github.com/mudler/LocalAI/pkg/grpc/base"
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
"github.com/mudler/LocalAI/pkg/utils"
)
var (
CppLoadModel func(modelPath string) int
CppLoadModelVAD func(modelPath string) int
CppVAD func(pcmf32 []float32, pcmf32Size uintptr, segsOut unsafe.Pointer, segsOutLen unsafe.Pointer) int
CppTranscribe func(threads uint32, lang string, translate bool, diarize bool, pcmf32 []float32, pcmf32Len uintptr, segsOutLen unsafe.Pointer) int
CppGetSegmentText func(i int) string
CppGetSegmentStart func(i int) int64
CppGetSegmentEnd func(i int) int64
CppNTokens func(i int) int
CppGetTokenID func(i int, j int) int
CppGetSegmentSpeakerTurnNext func(i int) bool
)
type Whisper struct {
base.SingleThread
}
func (w *Whisper) Load(opts *pb.ModelOptions) error {
vadOnly := false
for _, oo := range opts.Options {
if oo == "vad_only" {
vadOnly = true
} else {
fmt.Fprintf(os.Stderr, "Unrecognized option: %v\n", oo)
}
}
if vadOnly {
if ret := CppLoadModelVAD(opts.ModelFile); ret != 0 {
return fmt.Errorf("Failed to load Whisper VAD model")
}
return nil
}
if ret := CppLoadModel(opts.ModelFile); ret != 0 {
return fmt.Errorf("Failed to load Whisper transcription model")
}
return nil
}
func (w *Whisper) VAD(req *pb.VADRequest) (pb.VADResponse, error) {
audio := req.Audio
// We expect 0xdeadbeef to be overwritten and if we see it in a stack trace we know it wasn't
segsPtr, segsLen := uintptr(0xdeadbeef), uintptr(0xdeadbeef)
segsPtrPtr, segsLenPtr := unsafe.Pointer(&segsPtr), unsafe.Pointer(&segsLen)
if ret := CppVAD(audio, uintptr(len(audio)), segsPtrPtr, segsLenPtr); ret != 0 {
return pb.VADResponse{}, fmt.Errorf("Failed VAD")
}
// Happens when CPP vector has not had any elements pushed to it
if segsPtr == 0 {
return pb.VADResponse{
Segments: []*pb.VADSegment{},
}, nil
}
// unsafeptr warning is caused by segsPtr being on the stack and therefor being subject to stack copying AFAICT
// however the stack shouldn't have grown between setting segsPtr and now, also the memory pointed to is allocated by C++
segs := unsafe.Slice((*float32)(unsafe.Pointer(segsPtr)), segsLen)
vadSegments := []*pb.VADSegment{}
for i := range len(segs) >> 1 {
s := segs[2*i] / 100
t := segs[2*i+1] / 100
vadSegments = append(vadSegments, &pb.VADSegment{
Start: s,
End: t,
})
}
return pb.VADResponse{
Segments: vadSegments,
}, nil
}
func (w *Whisper) AudioTranscription(opts *pb.TranscriptRequest) (pb.TranscriptResult, error) {
dir, err := os.MkdirTemp("", "whisper")
if err != nil {
return pb.TranscriptResult{}, err
}
defer os.RemoveAll(dir)
convertedPath := filepath.Join(dir, "converted.wav")
if err := utils.AudioToWav(opts.Dst, convertedPath); err != nil {
return pb.TranscriptResult{}, err
}
// Open samples
fh, err := os.Open(convertedPath)
if err != nil {
return pb.TranscriptResult{}, err
}
defer fh.Close()
// Read samples
d := wav.NewDecoder(fh)
buf, err := d.FullPCMBuffer()
if err != nil {
return pb.TranscriptResult{}, err
}
data := buf.AsFloat32Buffer().Data
segsLen := uintptr(0xdeadbeef)
segsLenPtr := unsafe.Pointer(&segsLen)
if ret := CppTranscribe(opts.Threads, opts.Language, opts.Translate, opts.Diarize, data, uintptr(len(data)), segsLenPtr); ret != 0 {
return pb.TranscriptResult{}, fmt.Errorf("Failed Transcribe")
}
segments := []*pb.TranscriptSegment{}
text := ""
for i := range int(segsLen) {
s := CppGetSegmentStart(i)
t := CppGetSegmentEnd(i)
txt := strings.Clone(CppGetSegmentText(i))
tokens := make([]int32, CppNTokens(i))
if opts.Diarize && CppGetSegmentSpeakerTurnNext(i) {
txt += " [SPEAKER_TURN]"
}
for j := range tokens {
tokens[j] = int32(CppGetTokenID(i, j))
}
segment := &pb.TranscriptSegment{
Id: int32(i),
Text: txt,
Start: s, End: t,
Tokens: tokens,
}
segments = append(segments, segment)
text += " " + strings.TrimSpace(txt)
}
return pb.TranscriptResult{
Segments: segments,
Text: strings.TrimSpace(text),
}, nil
}

View File

@@ -1,17 +0,0 @@
#include <cstddef>
#include <cstdint>
extern "C" {
int load_model(const char *const model_path);
int load_model_vad(const char *const model_path);
int vad(float pcmf32[], size_t pcmf32_size, float **segs_out,
size_t *segs_out_len);
int transcribe(uint32_t threads, char *lang, bool translate, bool tdrz,
float pcmf32[], size_t pcmf32_len, size_t *segs_out_len);
const char *get_segment_text(int i);
int64_t get_segment_t0(int i);
int64_t get_segment_t1(int i);
int n_tokens(int i);
int32_t get_token_id(int i, int j);
bool get_segment_speaker_turn_next(int i);
}

View File

@@ -1,11 +1,10 @@
package main package main
// Note: this is started internally by LocalAI and a server is allocated for each model // Note: this is started internally by LocalAI and a server is allocated for each model
import ( import (
"flag" "flag"
"os"
"github.com/ebitengine/purego"
grpc "github.com/mudler/LocalAI/pkg/grpc" grpc "github.com/mudler/LocalAI/pkg/grpc"
) )
@@ -13,40 +12,7 @@ var (
addr = flag.String("addr", "localhost:50051", "the address to connect to") addr = flag.String("addr", "localhost:50051", "the address to connect to")
) )
type LibFuncs struct {
FuncPtr any
Name string
}
func main() { func main() {
// Get library name from environment variable, default to fallback
libName := os.Getenv("WHISPER_LIBRARY")
if libName == "" {
libName = "./libgowhisper-fallback.so"
}
gosd, err := purego.Dlopen(libName, purego.RTLD_NOW|purego.RTLD_GLOBAL)
if err != nil {
panic(err)
}
libFuncs := []LibFuncs{
{&CppLoadModel, "load_model"},
{&CppLoadModelVAD, "load_model_vad"},
{&CppVAD, "vad"},
{&CppTranscribe, "transcribe"},
{&CppGetSegmentText, "get_segment_text"},
{&CppGetSegmentStart, "get_segment_t0"},
{&CppGetSegmentEnd, "get_segment_t1"},
{&CppNTokens, "n_tokens"},
{&CppGetTokenID, "get_token_id"},
{&CppGetSegmentSpeakerTurnNext, "get_segment_speaker_turn_next"},
}
for _, lf := range libFuncs {
purego.RegisterLibFunc(lf.FuncPtr, gosd, lf.Name)
}
flag.Parse() flag.Parse()
if err := grpc.StartServer(*addr, &Whisper{}); err != nil { if err := grpc.StartServer(*addr, &Whisper{}); err != nil {

View File

@@ -10,9 +10,8 @@ CURDIR=$(dirname "$(realpath $0)")
# Create lib directory # Create lib directory
mkdir -p $CURDIR/package/lib mkdir -p $CURDIR/package/lib
cp -avf $CURDIR/whisper $CURDIR/package/ cp -avrf $CURDIR/whisper $CURDIR/package/
cp -fv $CURDIR/libgowhisper-*.so $CURDIR/package/ cp -rfv $CURDIR/run.sh $CURDIR/package/
cp -fv $CURDIR/run.sh $CURDIR/package/
# Detect architecture and copy appropriate libraries # Detect architecture and copy appropriate libraries
if [ -f "/lib64/ld-linux-x86-64.so.2" ]; then if [ -f "/lib64/ld-linux-x86-64.so.2" ]; then
@@ -43,13 +42,11 @@ elif [ -f "/lib/ld-linux-aarch64.so.1" ]; then
cp -arfLv /lib/aarch64-linux-gnu/libdl.so.2 $CURDIR/package/lib/libdl.so.2 cp -arfLv /lib/aarch64-linux-gnu/libdl.so.2 $CURDIR/package/lib/libdl.so.2
cp -arfLv /lib/aarch64-linux-gnu/librt.so.1 $CURDIR/package/lib/librt.so.1 cp -arfLv /lib/aarch64-linux-gnu/librt.so.1 $CURDIR/package/lib/librt.so.1
cp -arfLv /lib/aarch64-linux-gnu/libpthread.so.0 $CURDIR/package/lib/libpthread.so.0 cp -arfLv /lib/aarch64-linux-gnu/libpthread.so.0 $CURDIR/package/lib/libpthread.so.0
elif [ $(uname -s) = "Darwin" ]; then
echo "Detected Darwin"
else else
echo "Error: Could not detect architecture" echo "Error: Could not detect architecture"
exit 1 exit 1
fi fi
echo "Packaging completed successfully" echo "Packaging completed successfully"
ls -liah $CURDIR/package/ ls -liah $CURDIR/package/
ls -liah $CURDIR/package/lib/ ls -liah $CURDIR/package/lib/

View File

@@ -1,52 +1,14 @@
#!/bin/bash #!/bin/bash
set -ex set -ex
# Get the absolute current dir where the script is located
CURDIR=$(dirname "$(realpath $0)") CURDIR=$(dirname "$(realpath $0)")
cd /
echo "CPU info:"
if [ "$(uname)" != "Darwin" ]; then
grep -e "model\sname" /proc/cpuinfo | head -1
grep -e "flags" /proc/cpuinfo | head -1
fi
LIBRARY="$CURDIR/libgowhisper-fallback.so"
if [ "$(uname)" != "Darwin" ]; then
if grep -q -e "\savx\s" /proc/cpuinfo ; then
echo "CPU: AVX found OK"
if [ -e $CURDIR/libgowhisper-avx.so ]; then
LIBRARY="$CURDIR/libgowhisper-avx.so"
fi
fi
if grep -q -e "\savx2\s" /proc/cpuinfo ; then
echo "CPU: AVX2 found OK"
if [ -e $CURDIR/libgowhisper-avx2.so ]; then
LIBRARY="$CURDIR/libgowhisper-avx2.so"
fi
fi
# Check avx 512
if grep -q -e "\savx512f\s" /proc/cpuinfo ; then
echo "CPU: AVX512F found OK"
if [ -e $CURDIR/libgowhisper-avx512.so ]; then
LIBRARY="$CURDIR/libgowhisper-avx512.so"
fi
fi
fi
export LD_LIBRARY_PATH=$CURDIR/lib:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=$CURDIR/lib:$LD_LIBRARY_PATH
export WHISPER_LIBRARY=$LIBRARY
# If there is a lib/ld.so, use it # If there is a lib/ld.so, use it
if [ -f $CURDIR/lib/ld.so ]; then if [ -f $CURDIR/lib/ld.so ]; then
echo "Using lib/ld.so" echo "Using lib/ld.so"
echo "Using library: $LIBRARY"
exec $CURDIR/lib/ld.so $CURDIR/whisper "$@" exec $CURDIR/lib/ld.so $CURDIR/whisper "$@"
fi fi
echo "Using library: $LIBRARY"
exec $CURDIR/whisper "$@" exec $CURDIR/whisper "$@"

View File

@@ -0,0 +1,105 @@
package main
// This is a wrapper to statisfy the GRPC service interface
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
import (
"os"
"path/filepath"
"github.com/ggerganov/whisper.cpp/bindings/go/pkg/whisper"
"github.com/go-audio/wav"
"github.com/mudler/LocalAI/pkg/grpc/base"
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
"github.com/mudler/LocalAI/pkg/utils"
)
type Whisper struct {
base.SingleThread
whisper whisper.Model
}
func (sd *Whisper) Load(opts *pb.ModelOptions) error {
// Note: the Model here is a path to a directory containing the model files
w, err := whisper.New(opts.ModelFile)
sd.whisper = w
return err
}
func (sd *Whisper) AudioTranscription(opts *pb.TranscriptRequest) (pb.TranscriptResult, error) {
dir, err := os.MkdirTemp("", "whisper")
if err != nil {
return pb.TranscriptResult{}, err
}
defer os.RemoveAll(dir)
convertedPath := filepath.Join(dir, "converted.wav")
if err := utils.AudioToWav(opts.Dst, convertedPath); err != nil {
return pb.TranscriptResult{}, err
}
// Open samples
fh, err := os.Open(convertedPath)
if err != nil {
return pb.TranscriptResult{}, err
}
defer fh.Close()
// Read samples
d := wav.NewDecoder(fh)
buf, err := d.FullPCMBuffer()
if err != nil {
return pb.TranscriptResult{}, err
}
data := buf.AsFloat32Buffer().Data
// Process samples
context, err := sd.whisper.NewContext()
if err != nil {
return pb.TranscriptResult{}, err
}
context.SetThreads(uint(opts.Threads))
if opts.Language != "" {
context.SetLanguage(opts.Language)
} else {
context.SetLanguage("auto")
}
if opts.Translate {
context.SetTranslate(true)
}
if err := context.Process(data, nil, nil, nil); err != nil {
return pb.TranscriptResult{}, err
}
segments := []*pb.TranscriptSegment{}
text := ""
for {
s, err := context.NextSegment()
if err != nil {
break
}
var tokens []int32
for _, t := range s.Tokens {
tokens = append(tokens, int32(t.Id))
}
segment := &pb.TranscriptSegment{Id: int32(s.Num), Text: s.Text, Start: int64(s.Start), End: int64(s.End), Tokens: tokens}
segments = append(segments, segment)
text += s.Text
}
return pb.TranscriptResult{
Segments: segments,
Text: text,
}, nil
}

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,190 +1,38 @@
# Python Backends for LocalAI # Common commands about conda environment
This directory contains Python-based AI backends for LocalAI, providing support for various AI models and hardware acceleration targets. ## Create a new empty conda environment
## Overview ```
conda create --name <env-name> python=<your version> -y
The Python backends use a unified build system based on `libbackend.sh` that provides: conda create --name autogptq python=3.11 -y
- **Automatic virtual environment management** with support for both `uv` and `pip`
- **Hardware-specific dependency installation** (CPU, CUDA, Intel, MLX, etc.)
- **Portable Python support** for standalone deployments
- **Consistent backend execution** across different environments
## Available Backends
### Core AI Models
- **transformers** - Hugging Face Transformers framework (PyTorch-based)
- **vllm** - High-performance LLM inference engine
- **mlx** - Apple Silicon optimized ML framework
- **exllama2** - ExLlama2 quantized models
### Audio & Speech
- **bark** - Text-to-speech synthesis
- **coqui** - Coqui TTS models
- **faster-whisper** - Fast Whisper speech recognition
- **kitten-tts** - Lightweight TTS
- **mlx-audio** - Apple Silicon audio processing
- **chatterbox** - TTS model
- **kokoro** - TTS models
### Computer Vision
- **diffusers** - Stable Diffusion and image generation
- **mlx-vlm** - Vision-language models for Apple Silicon
- **rfdetr** - Object detection models
### Specialized
- **rerankers** - Text reranking models
## Quick Start
### Prerequisites
- Python 3.10+ (default: 3.10.18)
- `uv` package manager (recommended) or `pip`
- Appropriate hardware drivers for your target (CUDA, Intel, etc.)
### Installation
Each backend can be installed individually:
```bash
# Navigate to a specific backend
cd backend/python/transformers
# Install dependencies
make transformers
# or
bash install.sh
# Run the backend
make run
# or
bash run.sh
``` ```
### Using the Unified Build System ## To activate the environment
The `libbackend.sh` script provides consistent commands across all backends: As of conda 4.4
```
```bash conda activate autogptq
# Source the library in your backend script
source $(dirname $0)/../common/libbackend.sh
# Install requirements (automatically handles hardware detection)
installRequirements
# Start the backend server
startBackend $@
# Run tests
runUnittests
``` ```
## Hardware Targets The conda version older than 4.4
The build system automatically detects and configures for different hardware: ```
source activate autogptq
- **CPU** - Standard CPU-only builds
- **CUDA** - NVIDIA GPU acceleration (supports CUDA 11/12)
- **Intel** - Intel XPU/GPU optimization
- **MLX** - Apple Silicon (M1/M2/M3) optimization
- **HIP** - AMD GPU acceleration
### Target-Specific Requirements
Backends can specify hardware-specific dependencies:
- `requirements.txt` - Base requirements
- `requirements-cpu.txt` - CPU-specific packages
- `requirements-cublas11.txt` - CUDA 11 packages
- `requirements-cublas12.txt` - CUDA 12 packages
- `requirements-intel.txt` - Intel-optimized packages
- `requirements-mps.txt` - Apple Silicon packages
## Configuration Options
### Environment Variables
- `PYTHON_VERSION` - Python version (default: 3.10)
- `PYTHON_PATCH` - Python patch version (default: 18)
- `BUILD_TYPE` - Force specific build target
- `USE_PIP` - Use pip instead of uv (default: false)
- `PORTABLE_PYTHON` - Enable portable Python builds
- `LIMIT_TARGETS` - Restrict backend to specific targets
### Example: CUDA 12 Only Backend
```bash
# In your backend script
LIMIT_TARGETS="cublas12"
source $(dirname $0)/../common/libbackend.sh
``` ```
### Example: Intel-Optimized Backend ## Install the packages to your environment
```bash Sometimes you need to install the packages from the conda-forge channel
# In your backend script
LIMIT_TARGETS="intel" By using `conda`
source $(dirname $0)/../common/libbackend.sh ```
conda install <your-package-name>
conda install -c conda-forge <your package-name>
``` ```
## Development Or by using `pip`
### Adding a New Backend
1. Create a new directory in `backend/python/`
2. Copy the template structure from `common/template/`
3. Implement your `backend.py` with the required gRPC interface
4. Add appropriate requirements files for your target hardware
5. Use `libbackend.sh` for consistent build and execution
### Testing
```bash
# Run backend tests
make test
# or
bash test.sh
``` ```
pip install <your-package-name>
### Building
```bash
# Install dependencies
make <backend-name>
# Clean build artifacts
make clean
``` ```
## Architecture
Each backend follows a consistent structure:
```
backend-name/
├── backend.py # Main backend implementation
├── requirements.txt # Base dependencies
├── requirements-*.txt # Hardware-specific dependencies
├── install.sh # Installation script
├── run.sh # Execution script
├── test.sh # Test script
├── Makefile # Build targets
└── test.py # Unit tests
```
## Troubleshooting
### Common Issues
1. **Missing dependencies**: Ensure all requirements files are properly configured
2. **Hardware detection**: Check that `BUILD_TYPE` matches your system
3. **Python version**: Verify Python 3.10+ is available
4. **Virtual environment**: Use `ensureVenv` to create/activate environments
## Contributing
When adding new backends or modifying existing ones:
1. Follow the established directory structure
2. Use `libbackend.sh` for consistent behavior
3. Include appropriate requirements files for all target hardware
4. Add comprehensive tests
5. Update this README if adding new backend types

View File

@@ -1,23 +1,29 @@
.PHONY: ttsbark .PHONY: ttsbark
ttsbark: ttsbark: protogen
bash install.sh bash install.sh
.PHONY: run .PHONY: run
run: ttsbark run: protogen
@echo "Running bark..." @echo "Running bark..."
bash run.sh bash run.sh
@echo "bark run." @echo "bark run."
.PHONY: test .PHONY: test
test: ttsbark test: protogen
@echo "Testing bark..." @echo "Testing bark..."
bash test.sh bash test.sh
@echo "bark tested." @echo "bark tested."
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean .PHONY: protogen-clean
protogen-clean: protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py $(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
.PHONY: clean .PHONY: clean
clean: protogen-clean clean: protogen-clean
rm -rf venv __pycache__ rm -rf venv __pycache__

View File

@@ -1,5 +1,5 @@
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
intel-extension-for-pytorch==2.8.10+xpu intel-extension-for-pytorch==2.3.110+xpu
torch==2.3.1+cxx11.abi torch==2.3.1+cxx11.abi
torchaudio==2.3.1+cxx11.abi torchaudio==2.3.1+cxx11.abi
oneccl_bind_pt==2.3.100+xpu oneccl_bind_pt==2.3.100+xpu

View File

@@ -1,4 +1,4 @@
bark==0.1.5 bark==0.1.5
grpcio==1.76.0 grpcio==1.71.0
protobuf protobuf
certifi certifi

View File

@@ -1,23 +1,29 @@
.PHONY: chatterbox .PHONY: coqui
chatterbox: coqui: protogen
bash install.sh bash install.sh
.PHONY: run .PHONY: run
run: chatterbox run: protogen
@echo "Running coqui..." @echo "Running coqui..."
bash run.sh bash run.sh
@echo "coqui run." @echo "coqui run."
.PHONY: test .PHONY: test
test: chatterbox test: protogen
@echo "Testing coqui..." @echo "Testing coqui..."
bash test.sh bash test.sh
@echo "coqui tested." @echo "coqui tested."
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean .PHONY: protogen-clean
protogen-clean: protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py $(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
.PHONY: clean .PHONY: clean
clean: protogen-clean clean: protogen-clean
rm -rf venv __pycache__ rm -rf venv __pycache__

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
""" """
This is an extra gRPC server of LocalAI for Chatterbox TTS This is an extra gRPC server of LocalAI for Bark TTS
""" """
from concurrent import futures from concurrent import futures
import time import time
@@ -14,98 +14,15 @@ import backend_pb2_grpc
import torch import torch
import torchaudio as ta import torchaudio as ta
from chatterbox.tts import ChatterboxTTS from chatterbox.tts import ChatterboxTTS
from chatterbox.mtl_tts import ChatterboxMultilingualTTS
import grpc import grpc
import tempfile
def is_float(s):
"""Check if a string can be converted to float."""
try:
float(s)
return True
except ValueError:
return False
def is_int(s):
"""Check if a string can be converted to int."""
try:
int(s)
return True
except ValueError:
return False
def split_text_at_word_boundary(text, max_length=250):
"""
Split text at word boundaries without truncating words.
Returns a list of text chunks.
"""
if not text or len(text) <= max_length:
return [text]
chunks = []
words = text.split()
current_chunk = ""
for word in words:
# Check if adding this word would exceed the limit
if len(current_chunk) + len(word) + 1 <= max_length:
if current_chunk:
current_chunk += " " + word
else:
current_chunk = word
else:
# If current chunk is not empty, add it to chunks
if current_chunk:
chunks.append(current_chunk)
current_chunk = word
else:
# If a single word is longer than max_length, we have to include it anyway
chunks.append(word)
current_chunk = ""
# Add the last chunk if it's not empty
if current_chunk:
chunks.append(current_chunk)
return chunks
def merge_audio_files(audio_files, output_path, sample_rate):
"""
Merge multiple audio files into a single audio file.
"""
if not audio_files:
return
if len(audio_files) == 1:
# If only one file, just copy it
import shutil
shutil.copy2(audio_files[0], output_path)
return
# Load all audio files
waveforms = []
for audio_file in audio_files:
waveform, sr = ta.load(audio_file)
if sr != sample_rate:
# Resample if necessary
resampler = ta.transforms.Resample(sr, sample_rate)
waveform = resampler(waveform)
waveforms.append(waveform)
# Concatenate all waveforms
merged_waveform = torch.cat(waveforms, dim=1)
# Save the merged audio
ta.save(output_path, merged_waveform, sample_rate)
# Clean up temporary files
for audio_file in audio_files:
if os.path.exists(audio_file):
os.remove(audio_file)
_ONE_DAY_IN_SECONDS = 60 * 60 * 24 _ONE_DAY_IN_SECONDS = 60 * 60 * 24
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1 # If MAX_WORKERS are specified in the environment use it, otherwise default to 1
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1')) MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
COQUI_LANGUAGE = os.environ.get('COQUI_LANGUAGE', None)
# Implement the BackendServicer class with the service methods # Implement the BackendServicer class with the service methods
class BackendServicer(backend_pb2_grpc.BackendServicer): class BackendServicer(backend_pb2_grpc.BackendServicer):
@@ -124,34 +41,10 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
else: else:
print("CUDA is not available", file=sys.stderr) print("CUDA is not available", file=sys.stderr)
device = "cpu" device = "cpu"
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
if mps_available:
device = "mps"
if not torch.cuda.is_available() and request.CUDA: if not torch.cuda.is_available() and request.CUDA:
return backend_pb2.Result(success=False, message="CUDA is not available") return backend_pb2.Result(success=False, message="CUDA is not available")
options = request.Options
# empty dict
self.options = {}
# The options are a list of strings in this form optname:optvalue
# We are storing all the options in a dict so we can use it later when
# generating the images
for opt in options:
if ":" not in opt:
continue
key, value = opt.split(":")
# if value is a number, convert it to the appropriate type
if is_float(value):
value = float(value)
elif is_int(value):
value = int(value)
elif value.lower() in ["true", "false"]:
value = value.lower() == "true"
self.options[key] = value
self.AudioPath = None self.AudioPath = None
if os.path.isabs(request.AudioPath): if os.path.isabs(request.AudioPath):
@@ -161,14 +54,10 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
modelFileBase = os.path.dirname(request.ModelFile) modelFileBase = os.path.dirname(request.ModelFile)
# modify LoraAdapter to be relative to modelFileBase # modify LoraAdapter to be relative to modelFileBase
self.AudioPath = os.path.join(modelFileBase, request.AudioPath) self.AudioPath = os.path.join(modelFileBase, request.AudioPath)
try: try:
print("Preparing models, please wait", file=sys.stderr) print("Preparing models, please wait", file=sys.stderr)
if "multilingual" in self.options: self.model = ChatterboxTTS.from_pretrained(device=device)
# remove key from options
del self.options["multilingual"]
self.model = ChatterboxMultilingualTTS.from_pretrained(device=device)
else:
self.model = ChatterboxTTS.from_pretrained(device=device)
except Exception as err: except Exception as err:
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}") return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
# Implement your logic here for the LoadModel service # Implement your logic here for the LoadModel service
@@ -177,43 +66,14 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
def TTS(self, request, context): def TTS(self, request, context):
try: try:
kwargs = {} # Generate audio using ChatterboxTTS
if "language" in self.options:
kwargs["language_id"] = self.options["language"]
if self.AudioPath is not None: if self.AudioPath is not None:
kwargs["audio_prompt_path"] = self.AudioPath wav = self.model.generate(request.text, audio_prompt_path=self.AudioPath)
# add options to kwargs
kwargs.update(self.options)
# Check if text exceeds 250 characters
# (chatterbox does not support long text)
# https://github.com/resemble-ai/chatterbox/issues/60
# https://github.com/resemble-ai/chatterbox/issues/110
if len(request.text) > 250:
# Split text at word boundaries
text_chunks = split_text_at_word_boundary(request.text, max_length=250)
print(f"Splitting text into chunks of 250 characters: {len(text_chunks)}", file=sys.stderr)
# Generate audio for each chunk
temp_audio_files = []
for i, chunk in enumerate(text_chunks):
# Generate audio for this chunk
wav = self.model.generate(chunk, **kwargs)
# Create temporary file for this chunk
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.wav')
temp_file.close()
ta.save(temp_file.name, wav, self.model.sr)
temp_audio_files.append(temp_file.name)
# Merge all audio files
merge_audio_files(temp_audio_files, request.dst, self.model.sr)
else: else:
# Generate audio using ChatterboxTTS for short text wav = self.model.generate(request.text)
wav = self.model.generate(request.text, **kwargs)
# Save the generated audio # Save the generated audio
ta.save(request.dst, wav, self.model.sr) ta.save(request.dst, wav, self.model.sr)
except Exception as err: except Exception as err:
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}") return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")

View File

@@ -15,6 +15,5 @@ fi
if [ "x${BUILD_PROFILE}" == "xintel" ]; then if [ "x${BUILD_PROFILE}" == "xintel" ]; then
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match" EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
fi fi
EXTRA_PIP_INSTALL_FLAGS+=" --no-build-isolation"
installRequirements installRequirements

View File

@@ -1,8 +1,5 @@
--extra-index-url https://download.pytorch.org/whl/cpu
accelerate accelerate
torch torch==2.6.0
torchaudio torchaudio==2.6.0
transformers transformers==4.46.3
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289 chatterbox-tts
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
#chatterbox-tts==0.1.4

View File

@@ -2,6 +2,5 @@
torch==2.6.0+cu118 torch==2.6.0+cu118
torchaudio==2.6.0+cu118 torchaudio==2.6.0+cu118
transformers==4.46.3 transformers==4.46.3
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289 chatterbox-tts
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
accelerate accelerate

View File

@@ -1,6 +1,5 @@
torch torch==2.6.0
torchaudio torchaudio==2.6.0
transformers transformers==4.46.3
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289 chatterbox-tts
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
accelerate accelerate

View File

@@ -1,7 +1,6 @@
--extra-index-url https://download.pytorch.org/whl/rocm6.0 --extra-index-url https://download.pytorch.org/whl/rocm6.0
torch==2.6.0+rocm6.1 torch==2.6.0+rocm6.1
torchaudio==2.6.0+rocm6.1 torchaudio==2.6.0+rocm6.1
transformers transformers==4.46.3
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289 chatterbox-tts
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
accelerate accelerate

View File

@@ -2,10 +2,10 @@
intel-extension-for-pytorch==2.3.110+xpu intel-extension-for-pytorch==2.3.110+xpu
torch==2.3.1+cxx11.abi torch==2.3.1+cxx11.abi
torchaudio==2.3.1+cxx11.abi torchaudio==2.3.1+cxx11.abi
transformers transformers==4.46.3
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289 chatterbox-tts
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
accelerate accelerate
oneccl_bind_pt==2.3.100+xpu oneccl_bind_pt==2.3.100+xpu
optimum[openvino] optimum[openvino]
setuptools setuptools
accelerate

View File

@@ -1,6 +0,0 @@
--extra-index-url https://pypi.jetson-ai-lab.io/jp6/cu126/
torch
torchaudio
transformers
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
accelerate

View File

@@ -2,5 +2,4 @@ grpcio==1.71.0
protobuf protobuf
certifi certifi
packaging packaging
setuptools setuptools
poetry

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env bash
set -euo pipefail
# init handles the setup of the library
# #
# use the library by adding the following line to a script: # use the library by adding the following line to a script:
# source $(dirname $0)/../common/libbackend.sh # source $(dirname $0)/../common/libbackend.sh
@@ -17,182 +17,29 @@ set -euo pipefail
# LIMIT_TARGETS="cublas12" # LIMIT_TARGETS="cublas12"
# source $(dirname $0)/../common/libbackend.sh # source $(dirname $0)/../common/libbackend.sh
# #
# You can switch between uv (conda-like) and pip installation methods by setting USE_PIP:
# USE_PIP=true source $(dirname $0)/../common/libbackend.sh
#
# ===================== user-configurable defaults =====================
PYTHON_VERSION="${PYTHON_VERSION:-3.10}" # e.g. 3.10 / 3.11 / 3.12 / 3.13
PYTHON_PATCH="${PYTHON_PATCH:-18}" # e.g. 18 -> 3.10.18 ; 13 -> 3.11.13
PY_STANDALONE_TAG="${PY_STANDALONE_TAG:-20250818}" # release tag date
# Enable/disable bundling of a portable Python build
PORTABLE_PYTHON="${PORTABLE_PYTHON:-false}"
# If you want to fully pin the filename (including tuned CPU targets), set: PYTHON_VERSION="3.10"
# PORTABLE_PY_FILENAME="cpython-3.10.18+20250818-x86_64_v3-unknown-linux-gnu-install_only.tar.gz"
: "${PORTABLE_PY_FILENAME:=}"
: "${PORTABLE_PY_SHA256:=}" # optional; if set we verify the download
# =====================================================================
# Default to uv if USE_PIP is not set
if [ "x${USE_PIP:-}" == "x" ]; then
USE_PIP=false
fi
# ----------------------- helpers -----------------------
function _is_musl() {
# detect musl (Alpine, etc)
if command -v ldd >/dev/null 2>&1; then
ldd --version 2>&1 | grep -qi musl && return 0
fi
# busybox-ish fallback
if command -v getconf >/dev/null 2>&1; then
getconf GNU_LIBC_VERSION >/dev/null 2>&1 || return 0
fi
return 1
}
function _triple() {
local os="" arch="" libc="gnu"
case "$(uname -s)" in
Linux*) os="unknown-linux" ;;
Darwin*) os="apple-darwin" ;;
MINGW*|MSYS*|CYGWIN*) os="pc-windows-msvc" ;; # best-effort for Git Bash
*) echo "Unsupported OS $(uname -s)"; exit 1;;
esac
case "$(uname -m)" in
x86_64) arch="x86_64" ;;
aarch64|arm64) arch="aarch64" ;;
armv7l) arch="armv7" ;;
i686|i386) arch="i686" ;;
ppc64le) arch="ppc64le" ;;
s390x) arch="s390x" ;;
riscv64) arch="riscv64" ;;
*) echo "Unsupported arch $(uname -m)"; exit 1;;
esac
if [[ "$os" == "unknown-linux" ]]; then
if _is_musl; then
libc="musl"
else
libc="gnu"
fi
echo "${arch}-${os}-${libc}"
else
echo "${arch}-${os}"
fi
}
function _portable_dir() {
echo "${EDIR}/python"
}
function _portable_bin() {
# python-build-standalone puts python in ./bin
echo "$(_portable_dir)/bin"
}
function _portable_python() {
if [ -x "$(_portable_bin)/python3" ]; then
echo "$(_portable_bin)/python3"
else
echo "$(_portable_bin)/python"
fi
}
# macOS loader env for the portable CPython
_macosPortableEnv() {
if [ "$(uname -s)" = "Darwin" ]; then
export DYLD_LIBRARY_PATH="$(_portable_dir)/lib${DYLD_LIBRARY_PATH:+:${DYLD_LIBRARY_PATH}}"
export DYLD_FALLBACK_LIBRARY_PATH="$(_portable_dir)/lib${DYLD_FALLBACK_LIBRARY_PATH:+:${DYLD_FALLBACK_LIBRARY_PATH}}"
fi
}
# Good hygiene on macOS for downloaded/extracted trees
_unquarantinePortablePython() {
if [ "$(uname -s)" = "Darwin" ]; then
command -v xattr >/dev/null 2>&1 && xattr -dr com.apple.quarantine "$(_portable_dir)" || true
fi
}
# ------------------ ### PORTABLE PYTHON ------------------
function ensurePortablePython() {
local pdir="$(_portable_dir)"
local pbin="$(_portable_bin)"
local pyexe
if [ -x "${pbin}/python3" ] || [ -x "${pbin}/python" ]; then
_macosPortableEnv
return 0
fi
mkdir -p "${pdir}"
local triple="$(_triple)"
local full_ver="${PYTHON_VERSION}.${PYTHON_PATCH}"
local fn=""
if [ -n "${PORTABLE_PY_FILENAME}" ]; then
fn="${PORTABLE_PY_FILENAME}"
else
# generic asset name: cpython-<full_ver>+<tag>-<triple>-install_only.tar.gz
fn="cpython-${full_ver}+${PY_STANDALONE_TAG}-${triple}-install_only.tar.gz"
fi
local url="https://github.com/astral-sh/python-build-standalone/releases/download/${PY_STANDALONE_TAG}/${fn}"
local tmp="${pdir}/${fn}"
echo "Downloading portable Python: ${fn}"
# curl with retries; fall back to wget if needed
if command -v curl >/dev/null 2>&1; then
curl -L --fail --retry 3 --retry-delay 1 -o "${tmp}" "${url}"
else
wget -O "${tmp}" "${url}"
fi
if [ -n "${PORTABLE_PY_SHA256}" ]; then
echo "${PORTABLE_PY_SHA256} ${tmp}" | sha256sum -c -
fi
echo "Extracting ${fn} -> ${pdir}"
# always a .tar.gz (we purposely choose install_only)
tar -xzf "${tmp}" -C "${pdir}"
rm -f "${tmp}"
# Some archives nest a directory; if so, flatten to ${pdir}
# Find the first dir with a 'bin/python*'
local inner
inner="$(find "${pdir}" -type f -path "*/bin/python*" -maxdepth 3 2>/dev/null | head -n1 || true)"
if [ -n "${inner}" ]; then
local inner_root
inner_root="$(dirname "$(dirname "${inner}")")" # .../bin -> root
if [ "${inner_root}" != "${pdir}" ]; then
# move contents up one level
shopt -s dotglob
mv "${inner_root}/"* "${pdir}/"
rm -rf "${inner_root}"
shopt -u dotglob
fi
fi
_unquarantinePortablePython
_macosPortableEnv
# Make sure it's runnable
pyexe="$(_portable_python)"
"${pyexe}" -V
}
# init handles the setup of the library
function init() { function init() {
# Name of the backend (directory name)
BACKEND_NAME=${PWD##*/} BACKEND_NAME=${PWD##*/}
MY_DIR=$(realpath "$(dirname "$0")")
# Path where all backends files are
MY_DIR=$(realpath `dirname $0`)
# Build type
BUILD_PROFILE=$(getBuildProfile) BUILD_PROFILE=$(getBuildProfile)
# Environment directory
EDIR=${MY_DIR} EDIR=${MY_DIR}
if [ "x${ENV_DIR:-}" != "x" ]; then
# Allow to specify a custom env dir for shared environments
if [ "x${ENV_DIR}" != "x" ]; then
EDIR=${ENV_DIR} EDIR=${ENV_DIR}
fi fi
if [ ! -z "${LIMIT_TARGETS:-}" ]; then # If a backend has defined a list of valid build profiles...
if [ ! -z "${LIMIT_TARGETS}" ]; then
isValidTarget=$(checkTargets ${LIMIT_TARGETS}) isValidTarget=$(checkTargets ${LIMIT_TARGETS})
if [ ${isValidTarget} != true ]; then if [ ${isValidTarget} != true ]; then
echo "${BACKEND_NAME} can only be used on the following targets: ${LIMIT_TARGETS}" echo "${BACKEND_NAME} can only be used on the following targets: ${LIMIT_TARGETS}"
@@ -203,7 +50,6 @@ function init() {
echo "Initializing libbackend for ${BACKEND_NAME}" echo "Initializing libbackend for ${BACKEND_NAME}"
} }
# getBuildProfile will inspect the system to determine which build profile is appropriate: # getBuildProfile will inspect the system to determine which build profile is appropriate:
# returns one of the following: # returns one of the following:
# - cublas11 # - cublas11
@@ -211,140 +57,53 @@ function init() {
# - hipblas # - hipblas
# - intel # - intel
function getBuildProfile() { function getBuildProfile() {
if [ x"${BUILD_TYPE:-}" == "xcublas" ]; then # First check if we are a cublas build, and if so report the correct build profile
if [ ! -z "${CUDA_MAJOR_VERSION:-}" ]; then if [ x"${BUILD_TYPE}" == "xcublas" ]; then
if [ ! -z ${CUDA_MAJOR_VERSION} ]; then
# If we have been given a CUDA version, we trust it
echo ${BUILD_TYPE}${CUDA_MAJOR_VERSION} echo ${BUILD_TYPE}${CUDA_MAJOR_VERSION}
else else
# We don't know what version of cuda we are, so we report ourselves as a generic cublas
echo ${BUILD_TYPE} echo ${BUILD_TYPE}
fi fi
return 0 return 0
fi fi
# If /opt/intel exists, then we are doing an intel/ARC build
if [ -d "/opt/intel" ]; then if [ -d "/opt/intel" ]; then
echo "intel" echo "intel"
return 0 return 0
fi fi
if [ -n "${BUILD_TYPE:-}" ]; then # If for any other values of BUILD_TYPE, we don't need any special handling/discovery
if [ ! -z ${BUILD_TYPE} ]; then
echo ${BUILD_TYPE} echo ${BUILD_TYPE}
return 0 return 0
fi fi
# If there is no BUILD_TYPE set at all, set a build-profile value of CPU, we aren't building for any GPU targets
echo "cpu" echo "cpu"
} }
# Make the venv relocatable:
# - rewrite venv/bin/python{,3} to relative symlinks into $(_portable_dir)
# - normalize entrypoint shebangs to /usr/bin/env python3
_makeVenvPortable() {
local venv_dir="${EDIR}/venv"
local vbin="${venv_dir}/bin"
[ -d "${vbin}" ] || return 0
# 1) Replace python symlinks with relative ones to ../../python/bin/python3
# (venv/bin -> venv -> EDIR -> python/bin)
local rel_py='../../python/bin/python3'
for name in python3 python; do
if [ -e "${vbin}/${name}" ] || [ -L "${vbin}/${name}" ]; then
rm -f "${vbin}/${name}"
fi
done
ln -s "${rel_py}" "${vbin}/python3"
ln -s "python3" "${vbin}/python"
# 2) Rewrite shebangs of entry points to use env, so the venv is relocatable
# Only touch text files that start with #! and reference the current venv.
local ve_abs="${vbin}/python"
local sed_i=(sed -i)
# macOS/BSD sed needs a backup suffix; GNU sed doesn't. Make it portable:
if sed --version >/dev/null 2>&1; then
sed_i=(sed -i)
else
sed_i=(sed -i '')
fi
for f in "${vbin}"/*; do
[ -f "$f" ] || continue
# Fast path: check first two bytes (#!)
head -c2 "$f" 2>/dev/null | grep -q '^#!' || continue
# Only rewrite if the shebang mentions the (absolute) venv python
if head -n1 "$f" | grep -Fq "${ve_abs}"; then
"${sed_i[@]}" '1s|^#!.*$|#!/usr/bin/env python3|' "$f"
chmod +x "$f" 2>/dev/null || true
fi
done
}
# ensureVenv makes sure that the venv for the backend both exists, and is activated. # ensureVenv makes sure that the venv for the backend both exists, and is activated.
# #
# This function is idempotent, so you can call it as many times as you want and it will # This function is idempotent, so you can call it as many times as you want and it will
# always result in an activated virtual environment # always result in an activated virtual environment
function ensureVenv() { function ensureVenv() {
local interpreter=""
if [ "x${PORTABLE_PYTHON}" == "xtrue" ] || [ -e "$(_portable_python)" ]; then
echo "Using portable Python"
ensurePortablePython
interpreter="$(_portable_python)"
else
# Prefer system python${PYTHON_VERSION}, else python3, else fall back to bundled
if command -v python${PYTHON_VERSION} >/dev/null 2>&1; then
interpreter="python${PYTHON_VERSION}"
elif command -v python3 >/dev/null 2>&1; then
interpreter="python3"
else
echo "No suitable system Python found, bootstrapping portable build..."
ensurePortablePython
interpreter="$(_portable_python)"
fi
fi
if [ ! -d "${EDIR}/venv" ]; then if [ ! -d "${EDIR}/venv" ]; then
if [ "x${USE_PIP}" == "xtrue" ]; then uv venv --python ${PYTHON_VERSION} ${EDIR}/venv
"${interpreter}" -m venv --copies "${EDIR}/venv" echo "virtualenv created"
source "${EDIR}/venv/bin/activate"
"${interpreter}" -m pip install --upgrade pip
else
if [ "x${PORTABLE_PYTHON}" == "xtrue" ]; then
uv venv --python "${interpreter}" "${EDIR}/venv"
else
uv venv --python "${PYTHON_VERSION}" "${EDIR}/venv"
fi
fi
if [ "x${PORTABLE_PYTHON}" == "xtrue" ]; then
_makeVenvPortable
fi
fi fi
# We call it here to make sure that when we source a venv we can still use python as expected # Source if we are not already in a Virtual env
if [ -x "$(_portable_python)" ]; then if [ "x${VIRTUAL_ENV}" != "x${EDIR}/venv" ]; then
_macosPortableEnv source ${EDIR}/venv/bin/activate
echo "virtualenv activated"
fi fi
if [ "x${VIRTUAL_ENV:-}" != "x${EDIR}/venv" ]; then echo "activated virtualenv has been ensured"
source "${EDIR}/venv/bin/activate"
fi
} }
function runProtogen() {
ensureVenv
if [ "x${USE_PIP}" == "xtrue" ]; then
pip install grpcio-tools
else
uv pip install grpcio-tools
fi
pushd "${EDIR}" >/dev/null
# use the venv python (ensures correct interpreter & sys.path)
python -m grpc_tools.protoc -I../../ -I./ --python_out=. --grpc_python_out=. backend.proto
popd >/dev/null
}
# installRequirements looks for several requirements files and if they exist runs the install for them in order # installRequirements looks for several requirements files and if they exist runs the install for them in order
# #
# - requirements-install.txt # - requirements-install.txt
@@ -352,7 +111,7 @@ function runProtogen() {
# - requirements-${BUILD_TYPE}.txt # - requirements-${BUILD_TYPE}.txt
# - requirements-${BUILD_PROFILE}.txt # - requirements-${BUILD_PROFILE}.txt
# #
# BUILD_PROFILE is a more specific version of BUILD_TYPE, ex: cuda-11 or cuda-12 # BUILD_PROFILE is a pore specific version of BUILD_TYPE, ex: cuda11 or cuda12
# it can also include some options that we do not have BUILD_TYPES for, ex: intel # it can also include some options that we do not have BUILD_TYPES for, ex: intel
# #
# NOTE: for BUILD_PROFILE==intel, this function does NOT automatically use the Intel python package index. # NOTE: for BUILD_PROFILE==intel, this function does NOT automatically use the Intel python package index.
@@ -368,41 +127,36 @@ function runProtogen() {
# installRequirements # installRequirements
function installRequirements() { function installRequirements() {
ensureVenv ensureVenv
# These are the requirements files we will attempt to install, in order
declare -a requirementFiles=( declare -a requirementFiles=(
"${EDIR}/requirements-install.txt" "${EDIR}/requirements-install.txt"
"${EDIR}/requirements.txt" "${EDIR}/requirements.txt"
"${EDIR}/requirements-${BUILD_TYPE:-}.txt" "${EDIR}/requirements-${BUILD_TYPE}.txt"
) )
if [ "x${BUILD_TYPE:-}" != "x${BUILD_PROFILE}" ]; then if [ "x${BUILD_TYPE}" != "x${BUILD_PROFILE}" ]; then
requirementFiles+=("${EDIR}/requirements-${BUILD_PROFILE}.txt") requirementFiles+=("${EDIR}/requirements-${BUILD_PROFILE}.txt")
fi fi
if [ "x${BUILD_TYPE:-}" == "x" ]; then
# if BUILD_TYPE is empty, we are a CPU build, so we should try to install the CPU requirements
if [ "x${BUILD_TYPE}" == "x" ]; then
requirementFiles+=("${EDIR}/requirements-cpu.txt") requirementFiles+=("${EDIR}/requirements-cpu.txt")
fi fi
requirementFiles+=("${EDIR}/requirements-after.txt") requirementFiles+=("${EDIR}/requirements-after.txt")
if [ "x${BUILD_TYPE:-}" != "x${BUILD_PROFILE}" ]; then
if [ "x${BUILD_TYPE}" != "x${BUILD_PROFILE}" ]; then
requirementFiles+=("${EDIR}/requirements-${BUILD_PROFILE}-after.txt") requirementFiles+=("${EDIR}/requirements-${BUILD_PROFILE}-after.txt")
fi fi
# This is needed to build wheels that e.g. depends on Python.h
if [ "x${PORTABLE_PYTHON}" == "xtrue" ]; then
export C_INCLUDE_PATH="${C_INCLUDE_PATH:-}:$(_portable_dir)/include/python${PYTHON_VERSION}"
fi
for reqFile in ${requirementFiles[@]}; do for reqFile in ${requirementFiles[@]}; do
if [ -f "${reqFile}" ]; then if [ -f ${reqFile} ]; then
echo "starting requirements install for ${reqFile}" echo "starting requirements install for ${reqFile}"
if [ "x${USE_PIP}" == "xtrue" ]; then uv pip install ${EXTRA_PIP_INSTALL_FLAGS} --requirement ${reqFile}
pip install ${EXTRA_PIP_INSTALL_FLAGS:-} --requirement "${reqFile}"
else
uv pip install ${EXTRA_PIP_INSTALL_FLAGS:-} --requirement "${reqFile}"
fi
echo "finished requirements install for ${reqFile}" echo "finished requirements install for ${reqFile}"
fi fi
done done
runProtogen
} }
# startBackend discovers and runs the backend GRPC server # startBackend discovers and runs the backend GRPC server
@@ -420,18 +174,18 @@ function installRequirements() {
# - ${BACKEND_NAME}.py # - ${BACKEND_NAME}.py
function startBackend() { function startBackend() {
ensureVenv ensureVenv
if [ ! -z "${BACKEND_FILE:-}" ]; then
exec "${EDIR}/venv/bin/python" "${BACKEND_FILE}" "$@" if [ ! -z ${BACKEND_FILE} ]; then
exec ${EDIR}/venv/bin/python ${BACKEND_FILE} $@
elif [ -e "${MY_DIR}/server.py" ]; then elif [ -e "${MY_DIR}/server.py" ]; then
exec "${EDIR}/venv/bin/python" "${MY_DIR}/server.py" "$@" exec ${EDIR}/venv/bin/python ${MY_DIR}/server.py $@
elif [ -e "${MY_DIR}/backend.py" ]; then elif [ -e "${MY_DIR}/backend.py" ]; then
exec "${EDIR}/venv/bin/python" "${MY_DIR}/backend.py" "$@" exec ${EDIR}/venv/bin/python ${MY_DIR}/backend.py $@
elif [ -e "${MY_DIR}/${BACKEND_NAME}.py" ]; then elif [ -e "${MY_DIR}/${BACKEND_NAME}.py" ]; then
exec "${EDIR}/venv/bin/python" "${MY_DIR}/${BACKEND_NAME}.py" "$@" exec ${EDIR}/venv/bin/python ${MY_DIR}/${BACKEND_NAME}.py $@
fi fi
} }
# runUnittests discovers and runs python unittests # runUnittests discovers and runs python unittests
# #
# You can specify a specific test file to use by setting TEST_FILE before calling runUnittests. # You can specify a specific test file to use by setting TEST_FILE before calling runUnittests.
@@ -444,36 +198,41 @@ function startBackend() {
# be default a file named test.py in the backends directory will be used # be default a file named test.py in the backends directory will be used
function runUnittests() { function runUnittests() {
ensureVenv ensureVenv
if [ ! -z "${TEST_FILE:-}" ]; then
testDir=$(dirname "$(realpath "${TEST_FILE}")") if [ ! -z ${TEST_FILE} ]; then
testFile=$(basename "${TEST_FILE}") testDir=$(dirname `realpath ${TEST_FILE}`)
pushd "${testDir}" >/dev/null testFile=$(basename ${TEST_FILE})
python -m unittest "${testFile}" pushd ${testDir}
popd >/dev/null python -m unittest ${testFile}
popd
elif [ -f "${MY_DIR}/test.py" ]; then elif [ -f "${MY_DIR}/test.py" ]; then
pushd "${MY_DIR}" >/dev/null pushd ${MY_DIR}
python -m unittest test.py python -m unittest test.py
popd >/dev/null popd
else else
echo "no tests defined for ${BACKEND_NAME}" echo "no tests defined for ${BACKEND_NAME}"
fi fi
} }
################################################################################## ##################################################################################
# Below here are helper functions not intended to be used outside of the library # # Below here are helper functions not intended to be used outside of the library #
################################################################################## ##################################################################################
# checkTargets determines if the current BUILD_TYPE or BUILD_PROFILE is in a list of valid targets # checkTargets determines if the current BUILD_TYPE or BUILD_PROFILE is in a list of valid targets
function checkTargets() { function checkTargets() {
# Collect all provided targets into a variable and...
targets=$@ targets=$@
# ...convert it into an array
declare -a targets=($targets) declare -a targets=($targets)
for target in ${targets[@]}; do for target in ${targets[@]}; do
if [ "x${BUILD_TYPE:-}" == "x${target}" ]; then if [ "x${BUILD_TYPE}" == "x${target}" ]; then
echo true; return 0 echo true
return 0
fi fi
if [ "x${BUILD_PROFILE}" == "x${target}" ]; then if [ "x${BUILD_PROFILE}" == "x${target}" ]; then
echo true; return 0 echo true
return 0
fi fi
done done
echo false echo false

View File

@@ -3,11 +3,18 @@
.PHONY: install .PHONY: install
install: install:
bash install.sh bash install.sh
$(MAKE) protogen
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean .PHONY: protogen-clean
protogen-clean: protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py $(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
bash protogen.sh
.PHONY: clean .PHONY: clean
clean: protogen-clean clean: protogen-clean
rm -rf venv __pycache__ rm -rf venv __pycache__

View File

@@ -8,4 +8,4 @@ else
source $backend_dir/../common/libbackend.sh source $backend_dir/../common/libbackend.sh
fi fi
runProtogen python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto

View File

@@ -1,5 +1,5 @@
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
intel-extension-for-pytorch==2.8.10+xpu intel-extension-for-pytorch==2.3.110+xpu
torch==2.8.0 torch==2.3.1+cxx11.abi
oneccl_bind_pt==2.8.0+xpu oneccl_bind_pt==2.3.100+xpu
optimum[openvino] optimum[openvino]

View File

@@ -1,3 +1,3 @@
grpcio==1.76.0 grpcio==1.71.0
protobuf protobuf
grpcio-tools grpcio-tools

View File

@@ -1,23 +1,29 @@
.PHONY: coqui .PHONY: coqui
coqui: coqui: protogen
bash install.sh bash install.sh
.PHONY: run .PHONY: run
run: coqui run: protogen
@echo "Running coqui..." @echo "Running coqui..."
bash run.sh bash run.sh
@echo "coqui run." @echo "coqui run."
.PHONY: test .PHONY: test
test: coqui test: protogen
@echo "Testing coqui..." @echo "Testing coqui..."
bash test.sh bash test.sh
@echo "coqui tested." @echo "coqui tested."
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean .PHONY: protogen-clean
protogen-clean: protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py $(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
.PHONY: clean .PHONY: clean
clean: protogen-clean clean: protogen-clean
rm -rf venv __pycache__ rm -rf venv __pycache__

View File

@@ -40,9 +40,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
else: else:
print("CUDA is not available", file=sys.stderr) print("CUDA is not available", file=sys.stderr)
device = "cpu" device = "cpu"
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
if mps_available:
device = "mps"
if not torch.cuda.is_available() and request.CUDA: if not torch.cuda.is_available() and request.CUDA:
return backend_pb2.Result(success=False, message="CUDA is not available") return backend_pb2.Result(success=False, message="CUDA is not available")

View File

@@ -1,4 +1,4 @@
grpcio==1.76.0 grpcio==1.71.0
protobuf protobuf
certifi certifi
packaging==24.1 packaging==24.1

View File

@@ -12,22 +12,28 @@ export SKIP_CONDA=1
endif endif
.PHONY: diffusers .PHONY: diffusers
diffusers: diffusers: protogen
bash install.sh bash install.sh
.PHONY: run .PHONY: run
run: diffusers run: protogen
@echo "Running diffusers..." @echo "Running diffusers..."
bash run.sh bash run.sh
@echo "Diffusers run." @echo "Diffusers run."
test: diffusers test: protogen
bash test.sh bash test.sh
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean .PHONY: protogen-clean
protogen-clean: protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py $(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
.PHONY: clean .PHONY: clean
clean: protogen-clean clean: protogen-clean
rm -rf venv __pycache__ rm -rf venv __pycache__

View File

@@ -18,7 +18,7 @@ import backend_pb2_grpc
import grpc import grpc
from diffusers import SanaPipeline, StableDiffusion3Pipeline, StableDiffusionXLPipeline, StableDiffusionDepth2ImgPipeline, DPMSolverMultistepScheduler, StableDiffusionPipeline, DiffusionPipeline, \ from diffusers import SanaPipeline, StableDiffusion3Pipeline, StableDiffusionXLPipeline, StableDiffusionDepth2ImgPipeline, DPMSolverMultistepScheduler, StableDiffusionPipeline, DiffusionPipeline, \
EulerAncestralDiscreteScheduler, FluxPipeline, FluxTransformer2DModel, QwenImageEditPipeline, AutoencoderKLWan, WanPipeline, WanImageToVideoPipeline EulerAncestralDiscreteScheduler, FluxPipeline, FluxTransformer2DModel
from diffusers import StableDiffusionImg2ImgPipeline, AutoPipelineForText2Image, ControlNetModel, StableVideoDiffusionPipeline, Lumina2Text2ImgPipeline from diffusers import StableDiffusionImg2ImgPipeline, AutoPipelineForText2Image, ControlNetModel, StableVideoDiffusionPipeline, Lumina2Text2ImgPipeline
from diffusers.pipelines.stable_diffusion import safety_checker from diffusers.pipelines.stable_diffusion import safety_checker
from diffusers.utils import load_image, export_to_video from diffusers.utils import load_image, export_to_video
@@ -65,21 +65,6 @@ from diffusers.schedulers import (
UniPCMultistepScheduler, UniPCMultistepScheduler,
) )
def is_float(s):
"""Check if a string can be converted to float."""
try:
float(s)
return True
except ValueError:
return False
def is_int(s):
"""Check if a string can be converted to int."""
try:
int(s)
return True
except ValueError:
return False
# The scheduler list mapping was taken from here: https://github.com/neggles/animatediff-cli/blob/6f336f5f4b5e38e85d7f06f1744ef42d0a45f2a7/src/animatediff/schedulers.py#L39 # The scheduler list mapping was taken from here: https://github.com/neggles/animatediff-cli/blob/6f336f5f4b5e38e85d7f06f1744ef42d0a45f2a7/src/animatediff/schedulers.py#L39
# Credits to https://github.com/neggles # Credits to https://github.com/neggles
@@ -184,26 +169,8 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
if ":" not in opt: if ":" not in opt:
continue continue
key, value = opt.split(":") key, value = opt.split(":")
# if value is a number, convert it to the appropriate type
if is_float(value):
value = float(value)
elif is_int(value):
value = int(value)
elif value.lower() in ["true", "false"]:
value = value.lower() == "true"
self.options[key] = value self.options[key] = value
# From options, extract if present "torch_dtype" and set it to the appropriate type
if "torch_dtype" in self.options:
if self.options["torch_dtype"] == "fp16":
torchType = torch.float16
elif self.options["torch_dtype"] == "bf16":
torchType = torch.bfloat16
elif self.options["torch_dtype"] == "fp32":
torchType = torch.float32
# remove it from options
del self.options["torch_dtype"]
print(f"Options: {self.options}", file=sys.stderr) print(f"Options: {self.options}", file=sys.stderr)
local = False local = False
@@ -267,9 +234,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
elif request.PipelineType == "DiffusionPipeline": elif request.PipelineType == "DiffusionPipeline":
self.pipe = DiffusionPipeline.from_pretrained(request.Model, self.pipe = DiffusionPipeline.from_pretrained(request.Model,
torch_dtype=torchType) torch_dtype=torchType)
elif request.PipelineType == "QwenImageEditPipeline":
self.pipe = QwenImageEditPipeline.from_pretrained(request.Model,
torch_dtype=torchType)
elif request.PipelineType == "VideoDiffusionPipeline": elif request.PipelineType == "VideoDiffusionPipeline":
self.txt2vid = True self.txt2vid = True
self.pipe = DiffusionPipeline.from_pretrained(request.Model, self.pipe = DiffusionPipeline.from_pretrained(request.Model,
@@ -338,32 +302,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
torch_dtype=torch.bfloat16) torch_dtype=torch.bfloat16)
self.pipe.vae.to(torch.bfloat16) self.pipe.vae.to(torch.bfloat16)
self.pipe.text_encoder.to(torch.bfloat16) self.pipe.text_encoder.to(torch.bfloat16)
elif request.PipelineType == "WanPipeline":
# WAN2.2 pipeline requires special VAE handling
vae = AutoencoderKLWan.from_pretrained(
request.Model,
subfolder="vae",
torch_dtype=torch.float32
)
self.pipe = WanPipeline.from_pretrained(
request.Model,
vae=vae,
torch_dtype=torchType
)
self.txt2vid = True # WAN2.2 is a text-to-video pipeline
elif request.PipelineType == "WanImageToVideoPipeline":
# WAN2.2 image-to-video pipeline
vae = AutoencoderKLWan.from_pretrained(
request.Model,
subfolder="vae",
torch_dtype=torch.float32
)
self.pipe = WanImageToVideoPipeline.from_pretrained(
request.Model,
vae=vae,
torch_dtype=torchType
)
self.img2vid = True # WAN2.2 image-to-video pipeline
if CLIPSKIP and request.CLIPSkip != 0: if CLIPSKIP and request.CLIPSkip != 0:
self.clip_skip = request.CLIPSkip self.clip_skip = request.CLIPSkip
@@ -398,9 +336,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
device = "cpu" if not request.CUDA else "cuda" device = "cpu" if not request.CUDA else "cuda"
if XPU: if XPU:
device = "xpu" device = "xpu"
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
if mps_available:
device = "mps"
self.device = device self.device = device
if request.LoraAdapter: if request.LoraAdapter:
# Check if its a local file and not a directory ( we load lora differently for a safetensor file ) # Check if its a local file and not a directory ( we load lora differently for a safetensor file )
@@ -505,24 +440,11 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
"num_inference_steps": steps, "num_inference_steps": steps,
} }
# Handle image source: prioritize RefImages over request.src if request.src != "" and not self.controlnet and not self.img2vid:
image_src = None image = Image.open(request.src)
if hasattr(request, 'ref_images') and request.ref_images and len(request.ref_images) > 0:
# Use the first reference image if available
image_src = request.ref_images[0]
print(f"Using reference image: {image_src}", file=sys.stderr)
elif request.src != "":
# Fall back to request.src if no ref_images
image_src = request.src
print(f"Using source image: {image_src}", file=sys.stderr)
else:
print("No image source provided", file=sys.stderr)
if image_src and not self.controlnet and not self.img2vid:
image = Image.open(image_src)
options["image"] = image options["image"] = image
elif self.controlnet and image_src: elif self.controlnet and request.src:
pose_image = load_image(image_src) pose_image = load_image(request.src)
options["image"] = pose_image options["image"] = pose_image
if CLIPSKIP and self.clip_skip != 0: if CLIPSKIP and self.clip_skip != 0:
@@ -564,11 +486,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
if self.img2vid: if self.img2vid:
# Load the conditioning image # Load the conditioning image
if image_src: image = load_image(request.src)
image = load_image(image_src)
else:
# Fallback to request.src for img2vid if no ref_images
image = load_image(request.src)
image = image.resize((1024, 576)) image = image.resize((1024, 576))
generator = torch.manual_seed(request.seed) generator = torch.manual_seed(request.seed)
@@ -605,96 +523,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
return backend_pb2.Result(message="Media generated", success=True) return backend_pb2.Result(message="Media generated", success=True)
def GenerateVideo(self, request, context):
try:
prompt = request.prompt
if not prompt:
return backend_pb2.Result(success=False, message="No prompt provided for video generation")
# Set default values from request or use defaults
num_frames = request.num_frames if request.num_frames > 0 else 81
fps = request.fps if request.fps > 0 else 16
cfg_scale = request.cfg_scale if request.cfg_scale > 0 else 4.0
num_inference_steps = request.step if request.step > 0 else 40
# Prepare generation parameters
kwargs = {
"prompt": prompt,
"negative_prompt": request.negative_prompt if request.negative_prompt else "",
"height": request.height if request.height > 0 else 720,
"width": request.width if request.width > 0 else 1280,
"num_frames": num_frames,
"guidance_scale": cfg_scale,
"num_inference_steps": num_inference_steps,
}
# Add custom options from self.options (including guidance_scale_2 if specified)
kwargs.update(self.options)
# Set seed if provided
if request.seed > 0:
kwargs["generator"] = torch.Generator(device=self.device).manual_seed(request.seed)
# Handle start and end images for video generation
if request.start_image:
kwargs["start_image"] = load_image(request.start_image)
if request.end_image:
kwargs["end_image"] = load_image(request.end_image)
print(f"Generating video with {kwargs=}", file=sys.stderr)
# Generate video frames based on pipeline type
if self.PipelineType == "WanPipeline":
# WAN2.2 text-to-video generation
output = self.pipe(**kwargs)
frames = output.frames[0] # WAN2.2 returns frames in this format
elif self.PipelineType == "WanImageToVideoPipeline":
# WAN2.2 image-to-video generation
if request.start_image:
# Load and resize the input image according to WAN2.2 requirements
image = load_image(request.start_image)
# Use request dimensions or defaults, but respect WAN2.2 constraints
request_height = request.height if request.height > 0 else 480
request_width = request.width if request.width > 0 else 832
max_area = request_height * request_width
aspect_ratio = image.height / image.width
mod_value = self.pipe.vae_scale_factor_spatial * self.pipe.transformer.config.patch_size[1]
height = round((max_area * aspect_ratio) ** 0.5 / mod_value) * mod_value
width = round((max_area / aspect_ratio) ** 0.5 / mod_value) * mod_value
image = image.resize((width, height))
kwargs["image"] = image
kwargs["height"] = height
kwargs["width"] = width
output = self.pipe(**kwargs)
frames = output.frames[0]
elif self.img2vid:
# Generic image-to-video generation
if request.start_image:
image = load_image(request.start_image)
image = image.resize((request.width if request.width > 0 else 1024,
request.height if request.height > 0 else 576))
kwargs["image"] = image
output = self.pipe(**kwargs)
frames = output.frames[0]
elif self.txt2vid:
# Generic text-to-video generation
output = self.pipe(**kwargs)
frames = output.frames[0]
else:
return backend_pb2.Result(success=False, message=f"Pipeline {self.PipelineType} does not support video generation")
# Export video
export_to_video(frames, request.dst, fps=fps)
return backend_pb2.Result(message="Video generated successfully", success=True)
except Exception as err:
print(f"Error generating video: {err}", file=sys.stderr)
traceback.print_exc()
return backend_pb2.Result(success=False, message=f"Error generating video: {err}")
def serve(address): def serve(address):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS), server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),

View File

@@ -1,12 +1,9 @@
--extra-index-url https://download.pytorch.org/whl/cpu diffusers
git+https://github.com/huggingface/diffusers
opencv-python opencv-python
transformers transformers
torchvision==0.22.1
accelerate accelerate
compel compel
peft peft
sentencepiece sentencepiece
torch==2.7.1 torch==2.4.1
optimum-quanto optimum-quanto
ftfy

View File

@@ -1,12 +1,10 @@
--extra-index-url https://download.pytorch.org/whl/cu118 --extra-index-url https://download.pytorch.org/whl/cu118
git+https://github.com/huggingface/diffusers torch==2.4.1+cu118
diffusers
opencv-python opencv-python
transformers transformers
torchvision==0.22.1
accelerate accelerate
compel compel
peft peft
sentencepiece sentencepiece
torch==2.7.1 optimum-quanto
optimum-quanto
ftfy

View File

@@ -1,12 +1,9 @@
--extra-index-url https://download.pytorch.org/whl/cu121 torch==2.4.1
git+https://github.com/huggingface/diffusers diffusers
opencv-python opencv-python
transformers transformers
torchvision
accelerate accelerate
compel compel
peft peft
sentencepiece sentencepiece
torch optimum-quanto
ftfy
optimum-quanto

Some files were not shown because too many files have changed in this diff Show More