Compare commits

..

1 Commits

Author SHA1 Message Date
Ettore Di Giacinto
3e8a54f4b6 chore(docs): improve
Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-11-17 19:34:25 +01:00
416 changed files with 19674 additions and 40632 deletions

9
.env
View File

@@ -32,6 +32,15 @@
# Forces shutdown of the backends if busy (only if LOCALAI_SINGLE_ACTIVE_BACKEND is set)
# LOCALAI_FORCE_BACKEND_SHUTDOWN=true
## Specify a build type. Available: cublas, openblas, clblas.
## cuBLAS: This is a GPU-accelerated version of the complete standard BLAS (Basic Linear Algebra Subprograms) library. It's provided by Nvidia and is part of their CUDA toolkit.
## OpenBLAS: This is an open-source implementation of the BLAS library that aims to provide highly optimized code for various platforms. It includes support for multi-threading and can be compiled to use hardware-specific features for additional performance. OpenBLAS can run on many kinds of hardware, including CPUs from Intel, AMD, and ARM.
## clBLAS: This is an open-source implementation of the BLAS library that uses OpenCL, a framework for writing programs that execute across heterogeneous platforms consisting of CPUs, GPUs, and other processors. clBLAS is designed to take advantage of the parallel computing power of GPUs but can also run on any hardware that supports OpenCL. This includes hardware from different vendors like Nvidia, AMD, and Intel.
# BUILD_TYPE=openblas
## Uncomment and set to true to enable rebuilding from source
# REBUILD=true
## Path where to store generated images
# LOCALAI_IMAGE_PATH=/tmp/generated/images

View File

@@ -2,16 +2,11 @@ package main
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"regexp"
"slices"
"strings"
"github.com/ghodss/yaml"
hfapi "github.com/mudler/LocalAI/pkg/huggingface-api"
cogito "github.com/mudler/cogito"
@@ -50,12 +45,7 @@ func cleanTextContent(text string) string {
}
// Remove trailing empty lines from the result
result := strings.Join(cleanedLines, "\n")
return stripThinkingTags(strings.TrimRight(result, "\n"))
}
type galleryModel struct {
Name string `yaml:"name"`
Urls []string `yaml:"urls"`
return strings.TrimRight(result, "\n")
}
// isModelExisting checks if a specific model ID exists in the gallery using text search
@@ -66,20 +56,9 @@ func isModelExisting(modelID string) (bool, error) {
return false, fmt.Errorf("failed to read %s: %w", indexPath, err)
}
var galleryModels []galleryModel
err = yaml.Unmarshal(content, &galleryModels)
if err != nil {
return false, fmt.Errorf("failed to unmarshal %s: %w", indexPath, err)
}
for _, galleryModel := range galleryModels {
if slices.Contains(galleryModel.Urls, modelID) {
return true, nil
}
}
return false, nil
contentStr := string(content)
// Simple text search - if the model ID appears anywhere in the file, it exists
return strings.Contains(contentStr, modelID), nil
}
// filterExistingModels removes models that already exist in the gallery
@@ -113,16 +92,6 @@ func getGalleryIndexPath() string {
return "gallery/index.yaml"
}
func stripThinkingTags(content string) string {
// Remove content between <thinking> and </thinking> (including multi-line)
content = regexp.MustCompile(`(?s)<thinking>.*?</thinking>`).ReplaceAllString(content, "")
// Remove content between <think> and </think> (including multi-line)
content = regexp.MustCompile(`(?s)<think>.*?</think>`).ReplaceAllString(content, "")
// Clean up any extra whitespace
content = strings.TrimSpace(content)
return content
}
func getRealReadme(ctx context.Context, repository string) (string, error) {
// Create a conversation fragment
fragment := cogito.NewEmptyFragment().
@@ -151,11 +120,6 @@ func getRealReadme(ctx context.Context, repository string) (string, error) {
}
func selectMostInterestingModels(ctx context.Context, searchResult *SearchResult) ([]ProcessedModel, error) {
if len(searchResult.Models) == 1 {
return searchResult.Models, nil
}
// Create a conversation fragment
fragment := cogito.NewEmptyFragment().
AddMessage("user",
@@ -254,192 +218,71 @@ Return your analysis and selection reasoning.`)
return filteredModels, nil
}
// ModelMetadata represents extracted metadata from a model
type ModelMetadata struct {
Tags []string `json:"tags"`
License string `json:"license"`
// ModelFamily represents a YAML anchor/family
type ModelFamily struct {
Anchor string `json:"anchor"`
Name string `json:"name"`
}
// extractModelMetadata extracts tags and license from model README and documentation
func extractModelMetadata(ctx context.Context, model ProcessedModel) ([]string, string, error) {
// selectModelFamily selects the appropriate model family/anchor for a given model
func selectModelFamily(ctx context.Context, model ProcessedModel, availableFamilies []ModelFamily) (string, error) {
// Create a conversation fragment
fragment := cogito.NewEmptyFragment().
AddMessage("user",
`Your task is to extract metadata from an AI model's README and documentation. You will be provided with:
1. Model information (ID, author, description)
2. README content
`Your task is to select the most appropriate model family/anchor for a given AI model. You will be provided with:
1. Information about the model (name, description, etc.)
2. A list of available model families/anchors
You need to extract:
1. **Tags**: An array of relevant tags that describe the model. Use common tags from the gallery such as:
- llm, gguf, gpu, cpu, multimodal, image-to-text, text-to-text, text-to-speech, tts
- thinking, reasoning, chat, instruction-tuned, code, vision
- Model family names (e.g., llama, qwen, mistral, gemma) if applicable
- Any other relevant descriptive tags
Select 3-8 most relevant tags.
You need to select the family that best matches the model's architecture, capabilities, or characteristics. Consider:
- Model architecture (e.g., Llama, Qwen, Mistral, etc.)
- Model capabilities (e.g., vision, coding, chat, etc.)
- Model size/type (e.g., small, medium, large)
- Model purpose (e.g., general purpose, specialized, etc.)
2. **License**: The license identifier (e.g., "apache-2.0", "mit", "llama2", "gpl-3.0", "bsd", "cc-by-4.0").
If no license is found, return an empty string.
Return the extracted metadata in a structured format.`)
Return the anchor name that best fits the model.`)
// Add model information
modelInfo := "Model Information:\n"
modelInfo += fmt.Sprintf(" ID: %s\n", model.ModelID)
modelInfo += fmt.Sprintf(" Author: %s\n", model.Author)
modelInfo += fmt.Sprintf(" Downloads: %d\n", model.Downloads)
if model.ReadmeContent != "" {
modelInfo += fmt.Sprintf(" README Content:\n%s\n", model.ReadmeContent)
} else if model.ReadmeContentPreview != "" {
modelInfo += fmt.Sprintf(" README Preview: %s\n", model.ReadmeContentPreview)
}
modelInfo += fmt.Sprintf(" Description: %s\n", model.ReadmeContentPreview)
fragment = fragment.AddMessage("user", modelInfo)
fragment = fragment.AddMessage("user", "Extract the tags and license from the model information. Return the metadata as a JSON object with 'tags' (array of strings) and 'license' (string).")
// Add available families
familiesInfo := "Available Model Families:\n"
for _, family := range availableFamilies {
familiesInfo += fmt.Sprintf(" - %s (%s)\n", family.Anchor, family.Name)
}
fragment = fragment.AddMessage("user", familiesInfo)
fragment = fragment.AddMessage("user", "Select the most appropriate family anchor for this model. Return just the anchor name.")
// Get a response
newFragment, err := llm.Ask(ctx, fragment)
if err != nil {
return nil, "", err
return "", err
}
// Extract structured metadata
metadata := ModelMetadata{}
// Extract the selected family
selectedFamily := strings.TrimSpace(newFragment.LastMessage().Content)
s := structures.Structure{
Schema: jsonschema.Definition{
Type: jsonschema.Object,
AdditionalProperties: false,
Properties: map[string]jsonschema.Definition{
"tags": {
Type: jsonschema.Array,
Items: &jsonschema.Definition{Type: jsonschema.String},
Description: "Array of relevant tags describing the model",
},
"license": {
Type: jsonschema.String,
Description: "License identifier (e.g., apache-2.0, mit, llama2). Empty string if not found.",
},
},
Required: []string{"tags", "license"},
},
Object: &metadata,
}
err = newFragment.ExtractStructure(ctx, llm, s)
if err != nil {
return nil, "", err
}
return metadata.Tags, metadata.License, nil
}
// extractIconFromReadme scans the README content for image URLs and returns the first suitable icon URL found
func extractIconFromReadme(readmeContent string) string {
if readmeContent == "" {
return ""
}
// Regular expressions to match image URLs in various formats (case-insensitive)
// Match markdown image syntax: ![alt](url) - case insensitive extensions
markdownImageRegex := regexp.MustCompile(`(?i)!\[[^\]]*\]\(([^)]+\.(png|jpg|jpeg|svg|webp|gif))\)`)
// Match HTML img tags: <img src="url">
htmlImageRegex := regexp.MustCompile(`(?i)<img[^>]+src=["']([^"']+\.(png|jpg|jpeg|svg|webp|gif))["']`)
// Match plain URLs ending with image extensions
plainImageRegex := regexp.MustCompile(`(?i)https?://[^\s<>"']+\.(png|jpg|jpeg|svg|webp|gif)`)
// Try markdown format first
matches := markdownImageRegex.FindStringSubmatch(readmeContent)
if len(matches) > 1 && matches[1] != "" {
url := strings.TrimSpace(matches[1])
// Prefer HuggingFace CDN URLs or absolute URLs
if strings.HasPrefix(strings.ToLower(url), "http") {
return url
// Validate that the selected family exists in our list
for _, family := range availableFamilies {
if family.Anchor == selectedFamily {
return selectedFamily, nil
}
}
// Try HTML img tags
matches = htmlImageRegex.FindStringSubmatch(readmeContent)
if len(matches) > 1 && matches[1] != "" {
url := strings.TrimSpace(matches[1])
if strings.HasPrefix(strings.ToLower(url), "http") {
return url
// If no exact match, try to find a close match
for _, family := range availableFamilies {
if strings.Contains(strings.ToLower(family.Anchor), strings.ToLower(selectedFamily)) ||
strings.Contains(strings.ToLower(selectedFamily), strings.ToLower(family.Anchor)) {
return family.Anchor, nil
}
}
// Try plain URLs
matches = plainImageRegex.FindStringSubmatch(readmeContent)
if len(matches) > 0 {
url := strings.TrimSpace(matches[0])
if strings.HasPrefix(strings.ToLower(url), "http") {
return url
}
}
return ""
}
// getHuggingFaceAvatarURL attempts to get the HuggingFace avatar URL for a user
func getHuggingFaceAvatarURL(author string) string {
if author == "" {
return ""
}
// Try to fetch user info from HuggingFace API
// HuggingFace API endpoint: https://huggingface.co/api/users/{username}
baseURL := "https://huggingface.co"
userURL := fmt.Sprintf("%s/api/users/%s", baseURL, author)
req, err := http.NewRequest("GET", userURL, nil)
if err != nil {
return ""
}
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return ""
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return ""
}
// Parse the response to get avatar URL
var userInfo map[string]interface{}
body, err := io.ReadAll(resp.Body)
if err != nil {
return ""
}
if err := json.Unmarshal(body, &userInfo); err != nil {
return ""
}
// Try to extract avatar URL from response
if avatar, ok := userInfo["avatarUrl"].(string); ok && avatar != "" {
return avatar
}
if avatar, ok := userInfo["avatar"].(string); ok && avatar != "" {
return avatar
}
return ""
}
// extractModelIcon extracts icon URL from README or falls back to HuggingFace avatar
func extractModelIcon(model ProcessedModel) string {
// First, try to extract icon from README
if icon := extractIconFromReadme(model.ReadmeContent); icon != "" {
return icon
}
// Fallback: Try to get HuggingFace user avatar
if model.Author != "" {
if avatar := getHuggingFaceAvatarURL(model.Author); avatar != "" {
return avatar
}
}
return ""
// Default fallback
return "llama3", nil
}

View File

@@ -2,61 +2,13 @@ package main
import (
"context"
"encoding/json"
"fmt"
"os"
"strings"
"github.com/ghodss/yaml"
"github.com/mudler/LocalAI/core/gallery/importers"
)
func formatTextContent(text string) string {
return formatTextContentWithIndent(text, 4, 6)
}
// formatTextContentWithIndent formats text content with specified base and list item indentation
func formatTextContentWithIndent(text string, baseIndent int, listItemIndent int) string {
var formattedLines []string
lines := strings.Split(text, "\n")
for _, line := range lines {
trimmed := strings.TrimRight(line, " \t\r")
if trimmed == "" {
// Keep empty lines as empty (no indentation)
formattedLines = append(formattedLines, "")
} else {
// Preserve relative indentation from yaml.Marshal output
// Count existing leading spaces to preserve relative structure
leadingSpaces := len(trimmed) - len(strings.TrimLeft(trimmed, " \t"))
trimmedStripped := strings.TrimLeft(trimmed, " \t")
var totalIndent int
if strings.HasPrefix(trimmedStripped, "-") {
// List items: use listItemIndent (ignore existing leading spaces)
totalIndent = listItemIndent
} else {
// Regular lines: use baseIndent + preserve relative indentation
// This handles both top-level keys (leadingSpaces=0) and nested properties (leadingSpaces>0)
totalIndent = baseIndent + leadingSpaces
}
indentStr := strings.Repeat(" ", totalIndent)
formattedLines = append(formattedLines, indentStr+trimmedStripped)
}
}
formattedText := strings.Join(formattedLines, "\n")
// Remove any trailing spaces from the formatted description
formattedText = strings.TrimRight(formattedText, " \t")
return formattedText
}
// generateYAMLEntry generates a YAML entry for a model using the specified anchor
func generateYAMLEntry(model ProcessedModel, quantization string) string {
modelConfig, err := importers.DiscoverModelConfig("https://huggingface.co/"+model.ModelID, json.RawMessage(`{ "quantization": "`+quantization+`"}`))
if err != nil {
panic(err)
}
func generateYAMLEntry(model ProcessedModel, familyAnchor string) string {
// Extract model name from ModelID
parts := strings.Split(model.ModelID, "/")
modelName := model.ModelID
@@ -70,6 +22,18 @@ func generateYAMLEntry(model ProcessedModel, quantization string) string {
modelName = strings.ReplaceAll(modelName, "-q3_k_m", "")
modelName = strings.ReplaceAll(modelName, "-q2_k", "")
fileName := ""
checksum := ""
if model.PreferredModelFile != nil {
fileParts := strings.Split(model.PreferredModelFile.Path, "/")
if len(fileParts) > 0 {
fileName = fileParts[len(fileParts)-1]
}
checksum = model.PreferredModelFile.SHA256
} else {
fileName = model.ModelID
}
description := model.ReadmeContent
if description == "" {
description = fmt.Sprintf("AI model: %s", modelName)
@@ -77,88 +41,142 @@ func generateYAMLEntry(model ProcessedModel, quantization string) string {
// Clean up description to prevent YAML linting issues
description = cleanTextContent(description)
formattedDescription := formatTextContent(description)
configFile := formatTextContent(modelConfig.ConfigFile)
filesYAML, _ := yaml.Marshal(modelConfig.Files)
// Files section: list items need 4 spaces (not 6), since files: is at 2 spaces
files := formatTextContentWithIndent(string(filesYAML), 4, 4)
// Build metadata sections
var metadataSections []string
// Add license if present
if model.License != "" {
metadataSections = append(metadataSections, fmt.Sprintf(` license: "%s"`, model.License))
// Format description for YAML (indent each line and ensure no trailing spaces)
lines := strings.Split(description, "\n")
var formattedLines []string
for _, line := range lines {
if strings.TrimSpace(line) == "" {
// Keep empty lines as empty (no indentation)
formattedLines = append(formattedLines, "")
} else {
// Add indentation to non-empty lines
formattedLines = append(formattedLines, " "+line)
}
}
// Add tags if present
if len(model.Tags) > 0 {
tagsYAML, _ := yaml.Marshal(model.Tags)
tagsFormatted := formatTextContentWithIndent(string(tagsYAML), 4, 4)
tagsFormatted = strings.TrimRight(tagsFormatted, "\n")
metadataSections = append(metadataSections, fmt.Sprintf(" tags:\n%s", tagsFormatted))
}
// Add icon if present
if model.Icon != "" {
metadataSections = append(metadataSections, fmt.Sprintf(` icon: %s`, model.Icon))
}
// Build the metadata block
metadataBlock := ""
if len(metadataSections) > 0 {
metadataBlock = strings.Join(metadataSections, "\n") + "\n"
}
formattedDescription := strings.Join(formattedLines, "\n")
// Remove any trailing spaces from the formatted description
formattedDescription = strings.TrimRight(formattedDescription, " \t")
yamlTemplate := ""
yamlTemplate = `- name: "%s"
url: "github:mudler/LocalAI/gallery/virtual.yaml@master"
if checksum != "" {
yamlTemplate = `- !!merge <<: *%s
name: "%s"
urls:
- https://huggingface.co/%s
description: |
%s%s
overrides:
%s
overrides:
parameters:
model: %s
files:
%s`
// Trim trailing newlines from formatted sections to prevent extra blank lines
formattedDescription = strings.TrimRight(formattedDescription, "\n")
configFile = strings.TrimRight(configFile, "\n")
files = strings.TrimRight(files, "\n")
// Add newline before metadata block if present
if metadataBlock != "" {
metadataBlock = "\n" + strings.TrimRight(metadataBlock, "\n")
- filename: %s
sha256: %s
uri: huggingface://%s/%s`
return fmt.Sprintf(yamlTemplate,
familyAnchor,
modelName,
model.ModelID,
formattedDescription,
fileName,
fileName,
checksum,
model.ModelID,
fileName,
)
} else {
yamlTemplate = `- !!merge <<: *%s
name: "%s"
urls:
- https://huggingface.co/%s
description: |
%s
overrides:
parameters:
model: %s`
return fmt.Sprintf(yamlTemplate,
familyAnchor,
modelName,
model.ModelID,
formattedDescription,
fileName,
)
}
return fmt.Sprintf(yamlTemplate,
modelName,
model.ModelID,
formattedDescription,
metadataBlock,
configFile,
files,
)
}
// extractModelFamilies extracts all YAML anchors from the gallery index.yaml file
func extractModelFamilies() ([]ModelFamily, error) {
// Read the index.yaml file
indexPath := getGalleryIndexPath()
content, err := os.ReadFile(indexPath)
if err != nil {
return nil, fmt.Errorf("failed to read %s: %w", indexPath, err)
}
lines := strings.Split(string(content), "\n")
var families []ModelFamily
for _, line := range lines {
line = strings.TrimSpace(line)
// Look for YAML anchors (lines starting with "- &")
if strings.HasPrefix(line, "- &") {
// Extract the anchor name (everything after "- &")
anchor := strings.TrimPrefix(line, "- &")
// Remove any trailing colon or other characters
anchor = strings.Split(anchor, ":")[0]
anchor = strings.Split(anchor, " ")[0]
if anchor != "" {
families = append(families, ModelFamily{
Anchor: anchor,
Name: anchor, // Use anchor as name for now
})
}
}
}
return families, nil
}
// generateYAMLForModels generates YAML entries for selected models and appends to index.yaml
func generateYAMLForModels(ctx context.Context, models []ProcessedModel, quantization string) error {
func generateYAMLForModels(ctx context.Context, models []ProcessedModel) error {
// Extract available model families
families, err := extractModelFamilies()
if err != nil {
return fmt.Errorf("failed to extract model families: %w", err)
}
fmt.Printf("Found %d model families: %v\n", len(families),
func() []string {
var names []string
for _, f := range families {
names = append(names, f.Anchor)
}
return names
}())
// Generate YAML entries for each model
var yamlEntries []string
for _, model := range models {
fmt.Printf("Generating YAML entry for model: %s\n", model.ModelID)
fmt.Printf("Selecting family for model: %s\n", model.ModelID)
// Select appropriate family for this model
familyAnchor, err := selectModelFamily(ctx, model, families)
if err != nil {
fmt.Printf("Error selecting family for %s: %v, using default\n", model.ModelID, err)
familyAnchor = "llama3" // Default fallback
}
fmt.Printf("Selected family '%s' for model %s\n", familyAnchor, model.ModelID)
// Generate YAML entry
yamlEntry := generateYAMLEntry(model, quantization)
yamlEntry := generateYAMLEntry(model, familyAnchor)
yamlEntries = append(yamlEntries, yamlEntry)
}
// Prepend to index.yaml (write at the top)
// Append to index.yaml
if len(yamlEntries) > 0 {
indexPath := getGalleryIndexPath()
fmt.Printf("Prepending YAML entries to %s...\n", indexPath)
fmt.Printf("Appending YAML entries to %s...\n", indexPath)
// Read current content
content, err := os.ReadFile(indexPath)
@@ -166,26 +184,11 @@ func generateYAMLForModels(ctx context.Context, models []ProcessedModel, quantiz
return fmt.Errorf("failed to read %s: %w", indexPath, err)
}
existingContent := string(content)
// Append new entries
// Remove trailing whitespace from existing content and join entries without extra newlines
existingContent := strings.TrimRight(string(content), " \t\n\r")
yamlBlock := strings.Join(yamlEntries, "\n")
// Check if file starts with "---"
var newContent string
if strings.HasPrefix(existingContent, "---\n") {
// File starts with "---", prepend new entries after it
restOfContent := strings.TrimPrefix(existingContent, "---\n")
// Ensure proper spacing: "---\n" + new entries + "\n" + rest of content
newContent = "---\n" + yamlBlock + "\n" + restOfContent
} else if strings.HasPrefix(existingContent, "---") {
// File starts with "---" but no newline after
restOfContent := strings.TrimPrefix(existingContent, "---")
newContent = "---\n" + yamlBlock + "\n" + strings.TrimPrefix(restOfContent, "\n")
} else {
// No "---" at start, prepend new entries at the very beginning
// Trim leading whitespace from existing content
existingContent = strings.TrimLeft(existingContent, " \t\n\r")
newContent = yamlBlock + "\n" + existingContent
}
newContent := existingContent + "\n" + yamlBlock + "\n"
// Write back to file
err = os.WriteFile(indexPath, []byte(newContent), 0644)
@@ -193,7 +196,7 @@ func generateYAMLForModels(ctx context.Context, models []ProcessedModel, quantiz
return fmt.Errorf("failed to write %s: %w", indexPath, err)
}
fmt.Printf("Successfully prepended %d models to %s\n", len(yamlEntries), indexPath)
fmt.Printf("Successfully added %d models to %s\n", len(yamlEntries), indexPath)
}
return nil

View File

@@ -34,9 +34,6 @@ type ProcessedModel struct {
ReadmeContentPreview string `json:"readme_content_preview,omitempty"`
QuantizationPreferences []string `json:"quantization_preferences"`
ProcessingError string `json:"processing_error,omitempty"`
Tags []string `json:"tags,omitempty"`
License string `json:"license,omitempty"`
Icon string `json:"icon,omitempty"`
}
// SearchResult represents the complete result of searching and processing models
@@ -119,24 +116,14 @@ func main() {
}
fmt.Println(result.FormattedOutput)
var models []ProcessedModel
if len(result.Models) > 1 {
fmt.Println("More than one model found (", len(result.Models), "), using AI agent to select the most interesting models")
for _, model := range result.Models {
fmt.Println("Model: ", model.ModelID)
}
// Use AI agent to select the most interesting models
fmt.Println("Using AI agent to select the most interesting models...")
models, err = selectMostInterestingModels(context.Background(), result)
if err != nil {
fmt.Fprintf(os.Stderr, "Error in model selection: %v\n", err)
// Continue with original result if selection fails
models = result.Models
}
} else if len(result.Models) == 1 {
// Use AI agent to select the most interesting models
fmt.Println("Using AI agent to select the most interesting models...")
models, err := selectMostInterestingModels(context.Background(), result)
if err != nil {
fmt.Fprintf(os.Stderr, "Error in model selection: %v\n", err)
// Continue with original result if selection fails
models = result.Models
fmt.Println("Only one model found, using it directly")
}
fmt.Print(models)
@@ -167,7 +154,7 @@ func main() {
addedModelURLs = append(addedModelURLs, modelURL)
}
fmt.Println("Generating YAML entries for selected models...")
err = generateYAMLForModels(context.Background(), models, quantization)
err = generateYAMLForModels(context.Background(), models)
if err != nil {
fmt.Fprintf(os.Stderr, "Error generating YAML entries: %v\n", err)
os.Exit(1)
@@ -325,28 +312,9 @@ func searchAndProcessModels(searchTerm string, limit int, quantization string) (
outputBuilder.WriteString(fmt.Sprintf(" README Content Preview: %s\n",
processedModel.ReadmeContentPreview))
} else {
fmt.Printf(" Warning: Failed to get real readme: %v\n", err)
continue
}
fmt.Println("Real readme got", readmeContent)
// Extract metadata (tags, license) from README using LLM
fmt.Println("Extracting metadata for", model.ModelID, "waiting...")
tags, license, err := extractModelMetadata(context.Background(), processedModel)
if err == nil {
processedModel.Tags = tags
processedModel.License = license
outputBuilder.WriteString(fmt.Sprintf(" Tags: %v\n", tags))
outputBuilder.WriteString(fmt.Sprintf(" License: %s\n", license))
} else {
fmt.Printf(" Warning: Failed to extract metadata: %v\n", err)
}
// Extract icon from README or use HuggingFace avatar
icon := extractModelIcon(processedModel)
if icon != "" {
processedModel.Icon = icon
outputBuilder.WriteString(fmt.Sprintf(" Icon: %s\n", icon))
}
// Get README content
// readmeContent, err := client.GetReadmeContent(model.ModelID, details.ReadmeFile.Path)
// if err == nil {

View File

@@ -25,7 +25,7 @@ func runSyntheticMode() error {
// Generate YAML entries and append to gallery/index.yaml
fmt.Println("Generating YAML entries for synthetic models...")
err := generateYAMLForModels(context.Background(), models, "Q4_K_M")
err := generateYAMLForModels(context.Background(), models)
if err != nil {
return fmt.Errorf("error generating YAML entries: %w", err)
}
@@ -138,25 +138,6 @@ func (g *SyntheticDataGenerator) GenerateProcessedModel() ProcessedModel {
readmeContent := g.generateReadmeContent(modelName, author)
// Generate sample metadata
licenses := []string{"apache-2.0", "mit", "llama2", "gpl-3.0", "bsd", ""}
license := licenses[g.rand.Intn(len(licenses))]
sampleTags := []string{"llm", "gguf", "gpu", "cpu", "text-to-text", "chat", "instruction-tuned"}
numTags := g.rand.Intn(4) + 3 // 3-6 tags
tags := make([]string, numTags)
for i := 0; i < numTags; i++ {
tags[i] = sampleTags[g.rand.Intn(len(sampleTags))]
}
// Remove duplicates
tags = g.removeDuplicates(tags)
// Optionally include icon (50% chance)
icon := ""
if g.rand.Intn(2) == 0 {
icon = fmt.Sprintf("https://cdn-avatars.huggingface.co/v1/production/uploads/%s.png", g.randomString(24))
}
return ProcessedModel{
ModelID: modelID,
Author: author,
@@ -169,9 +150,6 @@ func (g *SyntheticDataGenerator) GenerateProcessedModel() ProcessedModel {
ReadmeContentPreview: truncateString(readmeContent, 200),
QuantizationPreferences: []string{"Q4_K_M", "Q4_K_S", "Q3_K_M", "Q2_K"},
ProcessingError: "",
Tags: tags,
License: license,
Icon: icon,
}
}
@@ -201,18 +179,6 @@ func (g *SyntheticDataGenerator) randomDate() string {
return pastDate.Format("2006-01-02T15:04:05.000Z")
}
func (g *SyntheticDataGenerator) removeDuplicates(slice []string) []string {
keys := make(map[string]bool)
result := []string{}
for _, item := range slice {
if !keys[item] {
keys[item] = true
result = append(result, item)
}
}
return result
}
func (g *SyntheticDataGenerator) generateReadmeContent(modelName, author string) string {
templates := []string{
fmt.Sprintf("# %s Model\n\nThis is a %s model developed by %s. It's designed for various natural language processing tasks including text generation, question answering, and conversation.\n\n## Features\n\n- High-quality text generation\n- Efficient inference\n- Multiple quantization options\n- Easy to use with LocalAI\n\n## Usage\n\nUse this model with LocalAI for various AI tasks.", strings.Title(modelName), modelName, author),

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
---
name: 'build backend container images (reusable)'
name: 'build python backend container images (reusable)'
on:
workflow_call:
@@ -53,11 +53,6 @@ on:
description: 'Skip drivers'
default: 'false'
type: string
ubuntu-version:
description: 'Ubuntu version'
required: false
default: '2204'
type: string
secrets:
dockerUsername:
required: false
@@ -102,7 +97,7 @@ jobs:
&& sudo apt-get install -y git
- name: Checkout
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Release space from worker
if: inputs.runs-on == 'ubuntu-latest'
@@ -213,7 +208,6 @@ jobs:
CUDA_MINOR_VERSION=${{ inputs.cuda-minor-version }}
BASE_IMAGE=${{ inputs.base-image }}
BACKEND=${{ inputs.backend }}
UBUNTU_VERSION=${{ inputs.ubuntu-version }}
context: ${{ inputs.context }}
file: ${{ inputs.dockerfile }}
cache-from: type=gha
@@ -234,7 +228,6 @@ jobs:
CUDA_MINOR_VERSION=${{ inputs.cuda-minor-version }}
BASE_IMAGE=${{ inputs.base-image }}
BACKEND=${{ inputs.backend }}
UBUNTU_VERSION=${{ inputs.ubuntu-version }}
context: ${{ inputs.context }}
file: ${{ inputs.dockerfile }}
cache-from: type=gha

View File

@@ -50,7 +50,7 @@ jobs:
go-version: ['${{ inputs.go-version }}']
steps:
- name: Clone
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
submodules: true
@@ -74,7 +74,7 @@ jobs:
BACKEND=${{ inputs.backend }} BUILD_TYPE=${{ inputs.build-type }} USE_PIP=${{ inputs.use-pip }} make build-darwin-${{ inputs.lang }}-backend
- name: Upload ${{ inputs.backend }}.tar
uses: actions/upload-artifact@v6
uses: actions/upload-artifact@v5
with:
name: ${{ inputs.backend }}-tar
path: backend-images/${{ inputs.backend }}.tar
@@ -85,7 +85,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Download ${{ inputs.backend }}.tar
uses: actions/download-artifact@v7
uses: actions/download-artifact@v6
with:
name: ${{ inputs.backend }}-tar
path: .

View File

@@ -17,7 +17,7 @@ jobs:
has-backends-darwin: ${{ steps.set-matrix.outputs.has-backends-darwin }}
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Setup Bun
uses: oven-sh/setup-bun@v2
@@ -52,7 +52,6 @@ jobs:
dockerfile: ${{ matrix.dockerfile }}
skip-drivers: ${{ matrix.skip-drivers }}
context: ${{ matrix.context }}
ubuntu-version: ${{ matrix.ubuntu-version }}
secrets:
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
@@ -70,7 +69,7 @@ jobs:
tag-suffix: ${{ matrix.tag-suffix }}
lang: ${{ matrix.lang || 'python' }}
use-pip: ${{ matrix.backend == 'diffusers' }}
runs-on: "macos-latest"
runs-on: "macOS-14"
secrets:
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}

View File

@@ -11,7 +11,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Set up Go
@@ -25,7 +25,7 @@ jobs:
runs-on: macos-latest
steps:
- name: Checkout
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Set up Go
@@ -37,7 +37,7 @@ jobs:
make build-launcher-darwin
ls -liah dist
- name: Upload macOS launcher artifacts
uses: actions/upload-artifact@v6
uses: actions/upload-artifact@v5
with:
name: launcher-macos
path: dist/
@@ -47,7 +47,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Set up Go
@@ -60,7 +60,7 @@ jobs:
sudo apt-get install golang gcc libgl1-mesa-dev xorg-dev libxkbcommon-dev
make build-launcher-linux
- name: Upload Linux launcher artifacts
uses: actions/upload-artifact@v6
uses: actions/upload-artifact@v5
with:
name: launcher-linux
path: local-ai-launcher-linux.tar.xz

View File

@@ -31,7 +31,7 @@ jobs:
file: "backend/go/piper/Makefile"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- name: Bump dependencies 🔧
id: bump
run: |
@@ -49,7 +49,7 @@ jobs:
rm -rfv ${{ matrix.variable }}_message.txt
rm -rfv ${{ matrix.variable }}_commit.txt
- name: Create Pull Request
uses: peter-evans/create-pull-request@v8
uses: peter-evans/create-pull-request@v7
with:
token: ${{ secrets.UPDATE_BOT_TOKEN }}
push-to-fork: ci-forks/LocalAI

View File

@@ -12,12 +12,12 @@ jobs:
- repository: "mudler/LocalAI"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- name: Bump dependencies 🔧
run: |
bash .github/bump_docs.sh ${{ matrix.repository }}
- name: Create Pull Request
uses: peter-evans/create-pull-request@v8
uses: peter-evans/create-pull-request@v7
with:
token: ${{ secrets.UPDATE_BOT_TOKEN }}
push-to-fork: ci-forks/LocalAI

View File

@@ -15,7 +15,7 @@ jobs:
&& sudo add-apt-repository -y ppa:git-core/ppa \
&& sudo apt-get update \
&& sudo apt-get install -y git
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- name: Install dependencies
run: |
sudo apt-get update
@@ -35,7 +35,7 @@ jobs:
sudo chmod 777 /hf_cache
bash .github/checksum_checker.sh gallery/index.yaml
- name: Create Pull Request
uses: peter-evans/create-pull-request@v8
uses: peter-evans/create-pull-request@v7
with:
token: ${{ secrets.UPDATE_BOT_TOKEN }}
push-to-fork: ci-forks/LocalAI

View File

@@ -14,13 +14,13 @@ jobs:
steps:
- name: Dependabot metadata
id: metadata
uses: dependabot/fetch-metadata@v2.5.0
uses: dependabot/fetch-metadata@v2.4.0
with:
github-token: "${{ secrets.GITHUB_TOKEN }}"
skip-commit-verification: true
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Approve a PR if not already approved
run: |

View File

@@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Clone
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
submodules: true
- uses: actions/setup-go@v5
@@ -33,7 +33,7 @@ jobs:
run: |
CGO_ENABLED=0 make build
- name: rm
uses: appleboy/ssh-action@v1.2.4
uses: appleboy/ssh-action@v1.2.3
with:
host: ${{ secrets.EXPLORER_SSH_HOST }}
username: ${{ secrets.EXPLORER_SSH_USERNAME }}
@@ -53,7 +53,7 @@ jobs:
rm: true
target: ./local-ai
- name: restarting
uses: appleboy/ssh-action@v1.2.4
uses: appleboy/ssh-action@v1.2.3
with:
host: ${{ secrets.EXPLORER_SSH_HOST }}
username: ${{ secrets.EXPLORER_SSH_USERNAME }}

View File

@@ -30,7 +30,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
token: ${{ secrets.GITHUB_TOKEN }}
@@ -38,33 +38,20 @@ jobs:
uses: actions/setup-go@v5
with:
go-version: '1.21'
- name: Proto Dependencies
run: |
# Install protoc
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v26.1/protoc-26.1-linux-x86_64.zip -o protoc.zip && \
unzip -j -d /usr/local/bin protoc.zip bin/protoc && \
rm protoc.zip
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
PATH="$PATH:$HOME/go/bin" make protogen-go
- uses: mudler/localai-github-action@v1.1
with:
model: 'https://huggingface.co/bartowski/Qwen_Qwen3-1.7B-GGUF'
- name: Run gallery agent
env:
#OPENAI_MODEL: ${{ secrets.OPENAI_MODEL }}
OPENAI_MODE: Qwen_Qwen3-1.7B-GGUF
OPENAI_BASE_URL: "http://localhost:8080"
OPENAI_MODEL: ${{ secrets.OPENAI_MODEL }}
OPENAI_KEY: ${{ secrets.OPENAI_KEY }}
#OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }}
OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }}
SEARCH_TERM: ${{ github.event.inputs.search_term || 'GGUF' }}
LIMIT: ${{ github.event.inputs.limit || '15' }}
QUANTIZATION: ${{ github.event.inputs.quantization || 'Q4_K_M' }}
MAX_MODELS: ${{ github.event.inputs.max_models || '1' }}
run: |
export GALLERY_INDEX_PATH=$PWD/gallery/index.yaml
go run ./.github/gallery-agent
go run .github/gallery-agent
- name: Check for changes
id: check_changes
@@ -82,28 +69,28 @@ jobs:
id: read_summary
if: steps.check_changes.outputs.changes == 'true'
run: |
if [ -f "./gallery-agent-summary.json" ]; then
if [ -f ".github/gallery-agent/gallery-agent-summary.json" ]; then
echo "summary_exists=true" >> $GITHUB_OUTPUT
# Extract summary data using jq
echo "search_term=$(jq -r '.search_term' ./gallery-agent-summary.json)" >> $GITHUB_OUTPUT
echo "total_found=$(jq -r '.total_found' ./gallery-agent-summary.json)" >> $GITHUB_OUTPUT
echo "models_added=$(jq -r '.models_added' ./gallery-agent-summary.json)" >> $GITHUB_OUTPUT
echo "quantization=$(jq -r '.quantization' ./gallery-agent-summary.json)" >> $GITHUB_OUTPUT
echo "processing_time=$(jq -r '.processing_time' ./gallery-agent-summary.json)" >> $GITHUB_OUTPUT
echo "search_term=$(jq -r '.search_term' .github/gallery-agent/gallery-agent-summary.json)" >> $GITHUB_OUTPUT
echo "total_found=$(jq -r '.total_found' .github/gallery-agent/gallery-agent-summary.json)" >> $GITHUB_OUTPUT
echo "models_added=$(jq -r '.models_added' .github/gallery-agent/gallery-agent-summary.json)" >> $GITHUB_OUTPUT
echo "quantization=$(jq -r '.quantization' .github/gallery-agent/gallery-agent-summary.json)" >> $GITHUB_OUTPUT
echo "processing_time=$(jq -r '.processing_time' .github/gallery-agent/gallery-agent-summary.json)" >> $GITHUB_OUTPUT
# Create a formatted list of added models with URLs
added_models=$(jq -r 'range(0; .added_model_ids | length) as $i | "- [\(.added_model_ids[$i])](\(.added_model_urls[$i]))"' ./gallery-agent-summary.json | tr '\n' '\n')
added_models=$(jq -r 'range(0; .added_model_ids | length) as $i | "- [\(.added_model_ids[$i])](\(.added_model_urls[$i]))"' .github/gallery-agent/gallery-agent-summary.json | tr '\n' '\n')
echo "added_models<<EOF" >> $GITHUB_OUTPUT
echo "$added_models" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
rm -f ./gallery-agent-summary.json
rm -f .github/gallery-agent/gallery-agent-summary.json
else
echo "summary_exists=false" >> $GITHUB_OUTPUT
fi
- name: Create Pull Request
if: steps.check_changes.outputs.changes == 'true'
uses: peter-evans/create-pull-request@v8
uses: peter-evans/create-pull-request@v7
with:
token: ${{ secrets.UPDATE_BOT_TOKEN }}
push-to-fork: ci-forks/LocalAI

View File

@@ -16,7 +16,7 @@ jobs:
strategy:
matrix:
include:
- grpc-base-image: ubuntu:24.04
- grpc-base-image: ubuntu:22.04
runs-on: 'ubuntu-latest'
platforms: 'linux/amd64,linux/arm64'
runs-on: ${{matrix.runs-on}}
@@ -73,7 +73,7 @@ jobs:
uses: docker/setup-buildx-action@master
- name: Checkout
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Cache GRPC
uses: docker/build-push-action@v6

View File

@@ -15,8 +15,8 @@ jobs:
strategy:
matrix:
include:
- base-image: intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04
runs-on: 'arc-runner-set'
- base-image: intel/oneapi-basekit:2025.2.0-0-devel-ubuntu22.04
runs-on: 'ubuntu-latest'
platforms: 'linux/amd64'
runs-on: ${{matrix.runs-on}}
steps:
@@ -43,7 +43,7 @@ jobs:
uses: docker/setup-buildx-action@master
- name: Checkout
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Cache Intel images
uses: docker/build-push-action@v6
@@ -53,7 +53,7 @@ jobs:
BASE_IMAGE=${{ matrix.base-image }}
context: .
file: ./Dockerfile
tags: quay.io/go-skynet/intel-oneapi-base:24.04
tags: quay.io/go-skynet/intel-oneapi-base:latest
push: true
target: intel
platforms: ${{ matrix.platforms }}

View File

@@ -1,95 +1,68 @@
---
name: 'build container images tests'
on:
pull_request:
concurrency:
group: ci-${{ github.head_ref || github.ref }}-${{ github.repository }}
cancel-in-progress: true
jobs:
image-build:
uses: ./.github/workflows/image_build.yml
with:
tag-latest: ${{ matrix.tag-latest }}
tag-suffix: ${{ matrix.tag-suffix }}
build-type: ${{ matrix.build-type }}
cuda-major-version: ${{ matrix.cuda-major-version }}
cuda-minor-version: ${{ matrix.cuda-minor-version }}
platforms: ${{ matrix.platforms }}
runs-on: ${{ matrix.runs-on }}
base-image: ${{ matrix.base-image }}
grpc-base-image: ${{ matrix.grpc-base-image }}
makeflags: ${{ matrix.makeflags }}
ubuntu-version: ${{ matrix.ubuntu-version }}
secrets:
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
strategy:
# Pushing with all jobs in parallel
# eats the bandwidth of all the nodes
max-parallel: ${{ github.event_name != 'pull_request' && 4 || 8 }}
fail-fast: false
matrix:
include:
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "9"
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-gpu-nvidia-cuda-12'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
makeflags: "--jobs=3 --output-sync=target"
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "13"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-gpu-nvidia-cuda-13'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
makeflags: "--jobs=3 --output-sync=target"
ubuntu-version: '2404'
- build-type: 'hipblas'
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-hipblas'
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
grpc-base-image: "ubuntu:24.04"
runs-on: 'ubuntu-latest'
makeflags: "--jobs=3 --output-sync=target"
ubuntu-version: '2404'
- build-type: 'sycl'
platforms: 'linux/amd64'
tag-latest: 'false'
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
grpc-base-image: "ubuntu:24.04"
tag-suffix: 'sycl'
runs-on: 'ubuntu-latest'
makeflags: "--jobs=3 --output-sync=target"
ubuntu-version: '2404'
- build-type: 'vulkan'
platforms: 'linux/amd64,linux/arm64'
tag-latest: 'false'
tag-suffix: '-vulkan-core'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
makeflags: "--jobs=4 --output-sync=target"
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "13"
cuda-minor-version: "0"
platforms: 'linux/arm64'
tag-latest: 'false'
tag-suffix: '-nvidia-l4t-arm64-cuda-13'
base-image: "ubuntu:24.04"
runs-on: 'ubuntu-24.04-arm'
makeflags: "--jobs=4 --output-sync=target"
skip-drivers: 'false'
ubuntu-version: '2404'
name: 'build container images tests'
on:
pull_request:
concurrency:
group: ci-${{ github.head_ref || github.ref }}-${{ github.repository }}
cancel-in-progress: true
jobs:
image-build:
uses: ./.github/workflows/image_build.yml
with:
tag-latest: ${{ matrix.tag-latest }}
tag-suffix: ${{ matrix.tag-suffix }}
build-type: ${{ matrix.build-type }}
cuda-major-version: ${{ matrix.cuda-major-version }}
cuda-minor-version: ${{ matrix.cuda-minor-version }}
platforms: ${{ matrix.platforms }}
runs-on: ${{ matrix.runs-on }}
base-image: ${{ matrix.base-image }}
grpc-base-image: ${{ matrix.grpc-base-image }}
makeflags: ${{ matrix.makeflags }}
secrets:
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
strategy:
# Pushing with all jobs in parallel
# eats the bandwidth of all the nodes
max-parallel: ${{ github.event_name != 'pull_request' && 4 || 8 }}
fail-fast: false
matrix:
include:
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-gpu-nvidia-cuda-12'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
makeflags: "--jobs=3 --output-sync=target"
- build-type: 'hipblas'
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-hipblas'
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
grpc-base-image: "ubuntu:22.04"
runs-on: 'ubuntu-latest'
makeflags: "--jobs=3 --output-sync=target"
- build-type: 'sycl'
platforms: 'linux/amd64'
tag-latest: 'false'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
grpc-base-image: "ubuntu:22.04"
tag-suffix: 'sycl'
runs-on: 'ubuntu-latest'
makeflags: "--jobs=3 --output-sync=target"
- build-type: 'vulkan'
platforms: 'linux/amd64'
tag-latest: 'false'
tag-suffix: '-vulkan-core'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
makeflags: "--jobs=4 --output-sync=target"

View File

@@ -1,187 +1,154 @@
---
name: 'build container images'
on:
push:
branches:
- master
tags:
- '*'
concurrency:
group: ci-${{ github.head_ref || github.ref }}-${{ github.repository }}
cancel-in-progress: true
jobs:
hipblas-jobs:
uses: ./.github/workflows/image_build.yml
with:
tag-latest: ${{ matrix.tag-latest }}
tag-suffix: ${{ matrix.tag-suffix }}
build-type: ${{ matrix.build-type }}
cuda-major-version: ${{ matrix.cuda-major-version }}
cuda-minor-version: ${{ matrix.cuda-minor-version }}
platforms: ${{ matrix.platforms }}
runs-on: ${{ matrix.runs-on }}
base-image: ${{ matrix.base-image }}
grpc-base-image: ${{ matrix.grpc-base-image }}
aio: ${{ matrix.aio }}
makeflags: ${{ matrix.makeflags }}
ubuntu-version: ${{ matrix.ubuntu-version }}
ubuntu-codename: ${{ matrix.ubuntu-codename }}
secrets:
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
strategy:
matrix:
include:
- build-type: 'hipblas'
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-hipblas'
base-image: "rocm/dev-ubuntu-24.04:6.4.4"
grpc-base-image: "ubuntu:24.04"
runs-on: 'ubuntu-latest'
makeflags: "--jobs=3 --output-sync=target"
aio: "-aio-gpu-hipblas"
ubuntu-version: '2404'
ubuntu-codename: 'noble'
core-image-build:
uses: ./.github/workflows/image_build.yml
with:
tag-latest: ${{ matrix.tag-latest }}
tag-suffix: ${{ matrix.tag-suffix }}
build-type: ${{ matrix.build-type }}
cuda-major-version: ${{ matrix.cuda-major-version }}
cuda-minor-version: ${{ matrix.cuda-minor-version }}
platforms: ${{ matrix.platforms }}
runs-on: ${{ matrix.runs-on }}
aio: ${{ matrix.aio }}
base-image: ${{ matrix.base-image }}
grpc-base-image: ${{ matrix.grpc-base-image }}
makeflags: ${{ matrix.makeflags }}
skip-drivers: ${{ matrix.skip-drivers }}
ubuntu-version: ${{ matrix.ubuntu-version }}
ubuntu-codename: ${{ matrix.ubuntu-codename }}
secrets:
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
strategy:
#max-parallel: ${{ github.event_name != 'pull_request' && 2 || 4 }}
matrix:
include:
- build-type: ''
platforms: 'linux/amd64,linux/arm64'
tag-latest: 'auto'
tag-suffix: ''
base-image: "ubuntu:24.04"
runs-on: 'ubuntu-latest'
aio: "-aio-cpu"
makeflags: "--jobs=4 --output-sync=target"
skip-drivers: 'false'
ubuntu-version: '2404'
ubuntu-codename: 'noble'
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "9"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
makeflags: "--jobs=4 --output-sync=target"
aio: "-aio-gpu-nvidia-cuda-12"
ubuntu-version: '2404'
ubuntu-codename: 'noble'
- build-type: 'cublas'
cuda-major-version: "13"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-13'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
makeflags: "--jobs=4 --output-sync=target"
aio: "-aio-gpu-nvidia-cuda-13"
ubuntu-version: '2404'
ubuntu-codename: 'noble'
- build-type: 'vulkan'
platforms: 'linux/amd64,linux/arm64'
tag-latest: 'auto'
tag-suffix: '-gpu-vulkan'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
skip-drivers: 'false'
makeflags: "--jobs=4 --output-sync=target"
aio: "-aio-gpu-vulkan"
ubuntu-version: '2404'
ubuntu-codename: 'noble'
- build-type: 'intel'
platforms: 'linux/amd64'
tag-latest: 'auto'
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
grpc-base-image: "ubuntu:24.04"
tag-suffix: '-gpu-intel'
runs-on: 'ubuntu-latest'
makeflags: "--jobs=3 --output-sync=target"
aio: "-aio-gpu-intel"
ubuntu-version: '2404'
ubuntu-codename: 'noble'
gh-runner:
uses: ./.github/workflows/image_build.yml
with:
tag-latest: ${{ matrix.tag-latest }}
tag-suffix: ${{ matrix.tag-suffix }}
build-type: ${{ matrix.build-type }}
cuda-major-version: ${{ matrix.cuda-major-version }}
cuda-minor-version: ${{ matrix.cuda-minor-version }}
platforms: ${{ matrix.platforms }}
runs-on: ${{ matrix.runs-on }}
aio: ${{ matrix.aio }}
base-image: ${{ matrix.base-image }}
grpc-base-image: ${{ matrix.grpc-base-image }}
makeflags: ${{ matrix.makeflags }}
skip-drivers: ${{ matrix.skip-drivers }}
ubuntu-version: ${{ matrix.ubuntu-version }}
ubuntu-codename: ${{ matrix.ubuntu-codename }}
secrets:
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
strategy:
matrix:
include:
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/arm64'
tag-latest: 'auto'
tag-suffix: '-nvidia-l4t-arm64'
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
runs-on: 'ubuntu-24.04-arm'
makeflags: "--jobs=4 --output-sync=target"
skip-drivers: 'true'
ubuntu-version: "2204"
ubuntu-codename: 'jammy'
- build-type: 'cublas'
cuda-major-version: "13"
cuda-minor-version: "0"
platforms: 'linux/arm64'
tag-latest: 'auto'
tag-suffix: '-nvidia-l4t-arm64-cuda-13'
base-image: "ubuntu:24.04"
runs-on: 'ubuntu-24.04-arm'
makeflags: "--jobs=4 --output-sync=target"
skip-drivers: 'false'
ubuntu-version: '2404'
ubuntu-codename: 'noble'
name: 'build container images'
on:
push:
branches:
- master
tags:
- '*'
concurrency:
group: ci-${{ github.head_ref || github.ref }}-${{ github.repository }}
cancel-in-progress: true
jobs:
hipblas-jobs:
uses: ./.github/workflows/image_build.yml
with:
tag-latest: ${{ matrix.tag-latest }}
tag-suffix: ${{ matrix.tag-suffix }}
build-type: ${{ matrix.build-type }}
cuda-major-version: ${{ matrix.cuda-major-version }}
cuda-minor-version: ${{ matrix.cuda-minor-version }}
platforms: ${{ matrix.platforms }}
runs-on: ${{ matrix.runs-on }}
base-image: ${{ matrix.base-image }}
grpc-base-image: ${{ matrix.grpc-base-image }}
aio: ${{ matrix.aio }}
makeflags: ${{ matrix.makeflags }}
secrets:
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
strategy:
matrix:
include:
- build-type: 'hipblas'
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-hipblas'
base-image: "rocm/dev-ubuntu-22.04:6.4.3"
grpc-base-image: "ubuntu:22.04"
runs-on: 'ubuntu-latest'
makeflags: "--jobs=3 --output-sync=target"
aio: "-aio-gpu-hipblas"
core-image-build:
uses: ./.github/workflows/image_build.yml
with:
tag-latest: ${{ matrix.tag-latest }}
tag-suffix: ${{ matrix.tag-suffix }}
build-type: ${{ matrix.build-type }}
cuda-major-version: ${{ matrix.cuda-major-version }}
cuda-minor-version: ${{ matrix.cuda-minor-version }}
platforms: ${{ matrix.platforms }}
runs-on: ${{ matrix.runs-on }}
aio: ${{ matrix.aio }}
base-image: ${{ matrix.base-image }}
grpc-base-image: ${{ matrix.grpc-base-image }}
makeflags: ${{ matrix.makeflags }}
skip-drivers: ${{ matrix.skip-drivers }}
secrets:
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
strategy:
#max-parallel: ${{ github.event_name != 'pull_request' && 2 || 4 }}
matrix:
include:
- build-type: ''
platforms: 'linux/amd64,linux/arm64'
tag-latest: 'auto'
tag-suffix: ''
base-image: "ubuntu:22.04"
runs-on: 'ubuntu-latest'
aio: "-aio-cpu"
makeflags: "--jobs=4 --output-sync=target"
skip-drivers: 'false'
- build-type: 'cublas'
cuda-major-version: "11"
cuda-minor-version: "7"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-11'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
makeflags: "--jobs=4 --output-sync=target"
skip-drivers: 'false'
aio: "-aio-gpu-nvidia-cuda-11"
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-nvidia-cuda-12'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
makeflags: "--jobs=4 --output-sync=target"
aio: "-aio-gpu-nvidia-cuda-12"
- build-type: 'vulkan'
platforms: 'linux/amd64'
tag-latest: 'auto'
tag-suffix: '-gpu-vulkan'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:22.04"
skip-drivers: 'false'
makeflags: "--jobs=4 --output-sync=target"
aio: "-aio-gpu-vulkan"
- build-type: 'intel'
platforms: 'linux/amd64'
tag-latest: 'auto'
base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
grpc-base-image: "ubuntu:22.04"
tag-suffix: '-gpu-intel'
runs-on: 'ubuntu-latest'
makeflags: "--jobs=3 --output-sync=target"
aio: "-aio-gpu-intel"
gh-runner:
uses: ./.github/workflows/image_build.yml
with:
tag-latest: ${{ matrix.tag-latest }}
tag-suffix: ${{ matrix.tag-suffix }}
build-type: ${{ matrix.build-type }}
cuda-major-version: ${{ matrix.cuda-major-version }}
cuda-minor-version: ${{ matrix.cuda-minor-version }}
platforms: ${{ matrix.platforms }}
runs-on: ${{ matrix.runs-on }}
aio: ${{ matrix.aio }}
base-image: ${{ matrix.base-image }}
grpc-base-image: ${{ matrix.grpc-base-image }}
makeflags: ${{ matrix.makeflags }}
skip-drivers: ${{ matrix.skip-drivers }}
secrets:
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
strategy:
matrix:
include:
- build-type: 'cublas'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/arm64'
tag-latest: 'auto'
tag-suffix: '-nvidia-l4t-arm64'
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
runs-on: 'ubuntu-24.04-arm'
makeflags: "--jobs=4 --output-sync=target"
skip-drivers: 'true'

View File

@@ -23,7 +23,7 @@ on:
type: string
cuda-minor-version:
description: 'CUDA minor version'
default: "9"
default: "4"
type: string
platforms:
description: 'Platforms'
@@ -56,16 +56,6 @@ on:
required: false
default: ''
type: string
ubuntu-version:
description: 'Ubuntu version'
required: false
default: '2204'
type: string
ubuntu-codename:
description: 'Ubuntu codename'
required: false
default: 'noble'
type: string
secrets:
dockerUsername:
required: true
@@ -104,7 +94,7 @@ jobs:
&& sudo apt-get update \
&& sudo apt-get install -y git
- name: Checkout
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Release space from worker
if: inputs.runs-on == 'ubuntu-latest'
@@ -248,8 +238,6 @@ jobs:
GRPC_VERSION=v1.65.0
MAKEFLAGS=${{ inputs.makeflags }}
SKIP_DRIVERS=${{ inputs.skip-drivers }}
UBUNTU_VERSION=${{ inputs.ubuntu-version }}
UBUNTU_CODENAME=${{ inputs.ubuntu-codename }}
context: .
file: ./Dockerfile
cache-from: type=gha
@@ -277,8 +265,6 @@ jobs:
GRPC_VERSION=v1.65.0
MAKEFLAGS=${{ inputs.makeflags }}
SKIP_DRIVERS=${{ inputs.skip-drivers }}
UBUNTU_VERSION=${{ inputs.ubuntu-version }}
UBUNTU_CODENAME=${{ inputs.ubuntu-codename }}
context: .
file: ./Dockerfile
cache-from: type=gha

View File

@@ -14,7 +14,7 @@ jobs:
if: ${{ github.actor == 'localai-bot' && !contains(github.event.pull_request.title, 'chore(model gallery):') }}
steps:
- name: Checkout repository
uses: actions/checkout@v6
uses: actions/checkout@v5
- name: Approve a PR if not already approved
run: |

View File

@@ -15,7 +15,7 @@ jobs:
MODEL_NAME: gemma-3-12b-it-qat
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
with:
fetch-depth: 0 # needed to checkout all branches for this Action to work
ref: ${{ github.event.pull_request.head.sha }} # Checkout the PR head to get the actual changes
@@ -95,7 +95,7 @@ jobs:
MODEL_NAME: gemma-3-12b-it-qat
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
with:
fetch-depth: 0 # needed to checkout all branches for this Action to work
ref: ${{ github.event.pull_request.head.sha }} # Checkout the PR head to get the actual changes

View File

@@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Set up Go
@@ -28,7 +28,7 @@ jobs:
runs-on: macos-latest
steps:
- name: Checkout
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Set up Go
@@ -46,7 +46,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Set up Go

View File

@@ -14,7 +14,7 @@ jobs:
GO111MODULE: on
steps:
- name: Checkout Source
uses: actions/checkout@v6
uses: actions/checkout@v5
if: ${{ github.actor != 'dependabot[bot]' }}
- name: Run Gosec Security Scanner
if: ${{ github.actor != 'dependabot[bot]' }}

View File

@@ -10,7 +10,7 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v9
- uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v9
with:
stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
stale-pr-message: 'This PR is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 10 days.'

View File

@@ -19,7 +19,7 @@ jobs:
# runs-on: ubuntu-latest
# steps:
# - name: Clone
# uses: actions/checkout@v6
# uses: actions/checkout@v5
# with:
# submodules: true
# - name: Dependencies
@@ -40,7 +40,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Clone
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
submodules: true
- name: Dependencies
@@ -61,7 +61,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Clone
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
submodules: true
- name: Dependencies
@@ -83,7 +83,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Clone
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
submodules: true
- name: Dependencies
@@ -104,7 +104,7 @@ jobs:
# runs-on: ubuntu-latest
# steps:
# - name: Clone
# uses: actions/checkout@v6
# uses: actions/checkout@v5
# with:
# submodules: true
# - name: Dependencies
@@ -124,7 +124,7 @@ jobs:
# runs-on: ubuntu-latest
# steps:
# - name: Clone
# uses: actions/checkout@v6
# uses: actions/checkout@v5
# with:
# submodules: true
# - name: Dependencies
@@ -186,7 +186,7 @@ jobs:
# sudo rm -rf "$AGENT_TOOLSDIRECTORY" || true
# df -h
# - name: Clone
# uses: actions/checkout@v6
# uses: actions/checkout@v5
# with:
# submodules: true
# - name: Dependencies
@@ -211,7 +211,7 @@ jobs:
# runs-on: ubuntu-latest
# steps:
# - name: Clone
# uses: actions/checkout@v6
# uses: actions/checkout@v5
# with:
# submodules: true
# - name: Dependencies
@@ -232,7 +232,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Clone
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
submodules: true
- name: Dependencies
@@ -247,22 +247,3 @@ jobs:
run: |
make --jobs=5 --output-sync=target -C backend/python/coqui
make --jobs=5 --output-sync=target -C backend/python/coqui test
tests-moonshine:
runs-on: ubuntu-latest
steps:
- name: Clone
uses: actions/checkout@v6
with:
submodules: true
- name: Dependencies
run: |
sudo apt-get update
sudo apt-get install build-essential ffmpeg
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
# Install UV
curl -LsSf https://astral.sh/uv/install.sh | sh
pip install --user --no-cache-dir grpcio-tools==1.64.1
- name: Test moonshine
run: |
make --jobs=5 --output-sync=target -C backend/python/moonshine
make --jobs=5 --output-sync=target -C backend/python/moonshine test

View File

@@ -70,7 +70,7 @@ jobs:
sudo rm -rfv build || true
df -h
- name: Clone
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
submodules: true
- name: Setup Go ${{ matrix.go-version }}
@@ -109,6 +109,11 @@ jobs:
sudo apt-get update
sudo apt-get install -y cuda-nvcc-${CUDA_VERSION} libcublas-dev-${CUDA_VERSION}
export CUDACXX=/usr/local/cuda/bin/nvcc
# The python3-grpc-tools package in 22.04 is too old
pip install --user grpcio-tools==1.71.0 grpcio==1.71.0
make -C backend/python/transformers
make backends/huggingface backends/llama-cpp backends/local-store backends/silero-vad backends/piper backends/whisper backends/stablediffusion-ggml
@@ -161,7 +166,7 @@ jobs:
sudo rm -rfv build || true
df -h
- name: Clone
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
submodules: true
- name: Dependencies
@@ -185,13 +190,13 @@ jobs:
limit-access-to-actor: true
tests-apple:
runs-on: macos-latest
runs-on: macOS-14
strategy:
matrix:
go-version: ['1.25.x']
steps:
- name: Clone
uses: actions/checkout@v6
uses: actions/checkout@v5
with:
submodules: true
- name: Setup Go ${{ matrix.go-version }}
@@ -205,7 +210,7 @@ jobs:
- name: Dependencies
run: |
brew install protobuf grpc make protoc-gen-go protoc-gen-go-grpc libomp llvm
pip install --user --no-cache-dir grpcio-tools grpcio
pip install --user --no-cache-dir grpcio-tools==1.71.0 grpcio==1.71.0
- name: Build llama-cpp-darwin
run: |
make protogen-go

View File

@@ -9,7 +9,7 @@ jobs:
fail-fast: false
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v5
- uses: actions/setup-go@v5
with:
go-version: 'stable'
@@ -25,7 +25,7 @@ jobs:
run: |
make protogen-go swagger
- name: Create Pull Request
uses: peter-evans/create-pull-request@v8
uses: peter-evans/create-pull-request@v7
with:
token: ${{ secrets.UPDATE_BOT_TOKEN }}
push-to-fork: ci-forks/LocalAI

1
.gitignore vendored
View File

@@ -25,7 +25,6 @@ go-bert
# LocalAI build binary
LocalAI
/local-ai
/local-ai-launcher
# prevent above rules from omitting the helm chart
!charts/*
# prevent above rules from omitting the api/localai folder

3
.gitmodules vendored
View File

@@ -1,3 +1,6 @@
[submodule "docs/themes/hugo-theme-relearn"]
path = docs/themes/hugo-theme-relearn
url = https://github.com/McShelby/hugo-theme-relearn.git
[submodule "docs/themes/lotusdocs"]
path = docs/themes/lotusdocs
url = https://github.com/colinwilson/lotusdocs

View File

@@ -22,9 +22,6 @@ builds:
goarch:
- amd64
- arm64
ignore:
- goos: darwin
goarch: amd64
archives:
- formats: [ 'binary' ] # this removes the tar of the archives, leaving the binaries alone
name_template: local-ai-{{ .Tag }}-{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}

282
AGENTS.md
View File

@@ -1,282 +0,0 @@
# Build and testing
Building and testing the project depends on the components involved and the platform where development is taking place. Due to the amount of context required it's usually best not to try building or testing the project unless the user requests it. If you must build the project then inspect the Makefile in the project root and the Makefiles of any backends that are effected by changes you are making. In addition the workflows in .github/workflows can be used as a reference when it is unclear how to build or test a component. The primary Makefile contains targets for building inside or outside Docker, if the user has not previously specified a preference then ask which they would like to use.
## Building a specified backend
Let's say the user wants to build a particular backend for a given platform. For example let's say they want to build bark for ROCM/hipblas
- The Makefile has targets like `docker-build-bark` created with `generate-docker-build-target` at the time of writing. Recently added backends may require a new target.
- At a minimum we need to set the BUILD_TYPE, BASE_IMAGE build-args
- Use .github/workflows/backend.yml as a reference it lists the needed args in the `include` job strategy matrix
- l4t and cublas also requires the CUDA major and minor version
- You can pretty print a command like `DOCKER_MAKEFLAGS=-j$(nproc --ignore=1) BUILD_TYPE=hipblas BASE_IMAGE=rocm/dev-ubuntu-24.04:6.4.4 make docker-build-bark`
- Unless the user specifies that they want you to run the command, then just print it because not all agent frontends handle long running jobs well and the output may overflow your context
- The user may say they want to build AMD or ROCM instead of hipblas, or Intel instead of SYCL or NVIDIA insted of l4t or cublas. Ask for confirmation if there is ambiguity.
- Sometimes the user may need extra parameters to be added to `docker build` (e.g. `--platform` for cross-platform builds or `--progress` to view the full logs), in which case you can generate the `docker build` command directly.
## Adding a New Backend
When adding a new backend to LocalAI, you need to update several files to ensure the backend is properly built, tested, and registered. Here's a step-by-step guide based on the pattern used for adding backends like `moonshine`:
### 1. Create Backend Directory Structure
Create the backend directory under the appropriate location:
- **Python backends**: `backend/python/<backend-name>/`
- **Go backends**: `backend/go/<backend-name>/`
- **C++ backends**: `backend/cpp/<backend-name>/`
For Python backends, you'll typically need:
- `backend.py` - Main gRPC server implementation
- `Makefile` - Build configuration
- `install.sh` - Installation script for dependencies
- `protogen.sh` - Protocol buffer generation script
- `requirements.txt` - Python dependencies
- `run.sh` - Runtime script
- `test.py` / `test.sh` - Test files
### 2. Add Build Configurations to `.github/workflows/backend.yml`
Add build matrix entries for each platform/GPU type you want to support. Look at similar backends (e.g., `chatterbox`, `faster-whisper`) for reference.
**Placement in file:**
- CPU builds: Add after other CPU builds (e.g., after `cpu-chatterbox`)
- CUDA 12 builds: Add after other CUDA 12 builds (e.g., after `gpu-nvidia-cuda-12-chatterbox`)
- CUDA 13 builds: Add after other CUDA 13 builds (e.g., after `gpu-nvidia-cuda-13-chatterbox`)
**Additional build types you may need:**
- ROCm/HIP: Use `build-type: 'hipblas'` with `base-image: "rocm/dev-ubuntu-24.04:6.4.4"`
- Intel/SYCL: Use `build-type: 'intel'` or `build-type: 'sycl_f16'`/`sycl_f32` with `base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"`
- L4T (ARM): Use `build-type: 'l4t'` with `platforms: 'linux/arm64'` and `runs-on: 'ubuntu-24.04-arm'`
### 3. Add Backend Metadata to `backend/index.yaml`
**Step 3a: Add Meta Definition**
Add a YAML anchor definition in the `## metas` section (around line 2-300). Look for similar backends to use as a template such as `diffusers` or `chatterbox`
**Step 3b: Add Image Entries**
Add image entries at the end of the file, following the pattern of similar backends such as `diffusers` or `chatterbox`. Include both `latest` (production) and `master` (development) tags.
### 4. Update the Makefile
The Makefile needs to be updated in several places to support building and testing the new backend:
**Step 4a: Add to `.NOTPARALLEL`**
Add `backends/<backend-name>` to the `.NOTPARALLEL` line (around line 2) to prevent parallel execution conflicts:
```makefile
.NOTPARALLEL: ... backends/<backend-name>
```
**Step 4b: Add to `prepare-test-extra`**
Add the backend to the `prepare-test-extra` target (around line 312) to prepare it for testing:
```makefile
prepare-test-extra: protogen-python
...
$(MAKE) -C backend/python/<backend-name>
```
**Step 4c: Add to `test-extra`**
Add the backend to the `test-extra` target (around line 319) to run its tests:
```makefile
test-extra: prepare-test-extra
...
$(MAKE) -C backend/python/<backend-name> test
```
**Step 4d: Add Backend Definition**
Add a backend definition variable in the backend definitions section (around line 428-457). The format depends on the backend type:
**For Python backends with root context** (like `faster-whisper`, `bark`):
```makefile
BACKEND_<BACKEND_NAME> = <backend-name>|python|.|false|true
```
**For Python backends with `./backend` context** (like `chatterbox`, `moonshine`):
```makefile
BACKEND_<BACKEND_NAME> = <backend-name>|python|./backend|false|true
```
**For Go backends**:
```makefile
BACKEND_<BACKEND_NAME> = <backend-name>|golang|.|false|true
```
**Step 4e: Generate Docker Build Target**
Add an eval call to generate the docker-build target (around line 480-501):
```makefile
$(eval $(call generate-docker-build-target,$(BACKEND_<BACKEND_NAME>)))
```
**Step 4f: Add to `docker-build-backends`**
Add `docker-build-<backend-name>` to the `docker-build-backends` target (around line 507):
```makefile
docker-build-backends: ... docker-build-<backend-name>
```
**Determining the Context:**
- If the backend is in `backend/python/<backend-name>/` and uses `./backend` as context in the workflow file, use `./backend` context
- If the backend is in `backend/python/<backend-name>/` but uses `.` as context in the workflow file, use `.` context
- Check similar backends to determine the correct context
### 5. Verification Checklist
After adding a new backend, verify:
- [ ] Backend directory structure is complete with all necessary files
- [ ] Build configurations added to `.github/workflows/backend.yml` for all desired platforms
- [ ] Meta definition added to `backend/index.yaml` in the `## metas` section
- [ ] Image entries added to `backend/index.yaml` for all build variants (latest + development)
- [ ] Tag suffixes match between workflow file and index.yaml
- [ ] Makefile updated with all 6 required changes (`.NOTPARALLEL`, `prepare-test-extra`, `test-extra`, backend definition, docker-build target eval, `docker-build-backends`)
- [ ] No YAML syntax errors (check with linter)
- [ ] No Makefile syntax errors (check with linter)
- [ ] Follows the same pattern as similar backends (e.g., if it's a transcription backend, follow `faster-whisper` pattern)
### 6. Example: Adding a Python Backend
For reference, when `moonshine` was added:
- **Files created**: `backend/python/moonshine/{backend.py, Makefile, install.sh, protogen.sh, requirements.txt, run.sh, test.py, test.sh}`
- **Workflow entries**: 3 build configurations (CPU, CUDA 12, CUDA 13)
- **Index entries**: 1 meta definition + 6 image entries (cpu, cuda12, cuda13 × latest/development)
- **Makefile updates**:
- Added to `.NOTPARALLEL` line
- Added to `prepare-test-extra` and `test-extra` targets
- Added `BACKEND_MOONSHINE = moonshine|python|./backend|false|true`
- Added eval for docker-build target generation
- Added `docker-build-moonshine` to `docker-build-backends`
# Coding style
- The project has the following .editorconfig
```
root = true
[*]
indent_style = space
indent_size = 2
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true
[*.go]
indent_style = tab
[Makefile]
indent_style = tab
[*.proto]
indent_size = 2
[*.py]
indent_size = 4
[*.js]
indent_size = 2
[*.yaml]
indent_size = 2
[*.md]
trim_trailing_whitespace = false
```
- Use comments sparingly to explain why code does something, not what it does. Comments are there to add context that would be difficult to deduce from reading the code.
- Prefer modern Go e.g. use `any` not `interface{}`
# Logging
Use `github.com/mudler/xlog` for logging which has the same API as slog.
# llama.cpp Backend
The llama.cpp backend (`backend/cpp/llama-cpp/grpc-server.cpp`) is a gRPC adaptation of the upstream HTTP server (`llama.cpp/tools/server/server.cpp`). It uses the same underlying server infrastructure from `llama.cpp/tools/server/server-context.cpp`.
## Building and Testing
- Test llama.cpp backend compilation: `make backends/llama-cpp`
- The backend is built as part of the main build process
- Check `backend/cpp/llama-cpp/Makefile` for build configuration
## Architecture
- **grpc-server.cpp**: gRPC server implementation, adapts HTTP server patterns to gRPC
- Uses shared server infrastructure: `server-context.cpp`, `server-task.cpp`, `server-queue.cpp`, `server-common.cpp`
- The gRPC server mirrors the HTTP server's functionality but uses gRPC instead of HTTP
## Common Issues When Updating llama.cpp
When fixing compilation errors after upstream changes:
1. Check how `server.cpp` (HTTP server) handles the same change
2. Look for new public APIs or getter methods
3. Store copies of needed data instead of accessing private members
4. Update function calls to match new signatures
5. Test with `make backends/llama-cpp`
## Key Differences from HTTP Server
- gRPC uses `BackendServiceImpl` class with gRPC service methods
- HTTP server uses `server_routes` with HTTP handlers
- Both use the same `server_context` and task queue infrastructure
- gRPC methods: `LoadModel`, `Predict`, `PredictStream`, `Embedding`, `Rerank`, `TokenizeString`, `GetMetrics`, `Health`
## Tool Call Parsing Maintenance
When working on JSON/XML tool call parsing functionality, always check llama.cpp for reference implementation and updates:
### Checking for XML Parsing Changes
1. **Review XML Format Definitions**: Check `llama.cpp/common/chat-parser-xml-toolcall.h` for `xml_tool_call_format` struct changes
2. **Review Parsing Logic**: Check `llama.cpp/common/chat-parser-xml-toolcall.cpp` for parsing algorithm updates
3. **Review Format Presets**: Check `llama.cpp/common/chat-parser.cpp` for new XML format presets (search for `xml_tool_call_format form`)
4. **Review Model Lists**: Check `llama.cpp/common/chat.h` for `COMMON_CHAT_FORMAT_*` enum values that use XML parsing:
- `COMMON_CHAT_FORMAT_GLM_4_5`
- `COMMON_CHAT_FORMAT_MINIMAX_M2`
- `COMMON_CHAT_FORMAT_KIMI_K2`
- `COMMON_CHAT_FORMAT_QWEN3_CODER_XML`
- `COMMON_CHAT_FORMAT_APRIEL_1_5`
- `COMMON_CHAT_FORMAT_XIAOMI_MIMO`
- Any new formats added
### Model Configuration Options
Always check `llama.cpp` for new model configuration options that should be supported in LocalAI:
1. **Check Server Context**: Review `llama.cpp/tools/server/server-context.cpp` for new parameters
2. **Check Chat Params**: Review `llama.cpp/common/chat.h` for `common_chat_params` struct changes
3. **Check Server Options**: Review `llama.cpp/tools/server/server.cpp` for command-line argument changes
4. **Examples of options to check**:
- `ctx_shift` - Context shifting support
- `parallel_tool_calls` - Parallel tool calling
- `reasoning_format` - Reasoning format options
- Any new flags or parameters
### Implementation Guidelines
1. **Feature Parity**: Always aim for feature parity with llama.cpp's implementation
2. **Test Coverage**: Add tests for new features matching llama.cpp's behavior
3. **Documentation**: Update relevant documentation when adding new formats or options
4. **Backward Compatibility**: Ensure changes don't break existing functionality
### Files to Monitor
- `llama.cpp/common/chat-parser-xml-toolcall.h` - Format definitions
- `llama.cpp/common/chat-parser-xml-toolcall.cpp` - Parsing logic
- `llama.cpp/common/chat-parser.cpp` - Format presets and model-specific handlers
- `llama.cpp/common/chat.h` - Format enums and parameter structures
- `llama.cpp/tools/server/server-context.cpp` - Server configuration options

View File

@@ -78,20 +78,6 @@ LOCALAI_IMAGE_TAG=test LOCALAI_IMAGE=local-ai-aio make run-e2e-aio
We are welcome the contribution of the documents, please open new PR or create a new issue. The documentation is available under `docs/` https://github.com/mudler/LocalAI/tree/master/docs
### Gallery YAML Schema
LocalAI provides a JSON Schema for gallery model YAML files at:
`core/schema/gallery-model.schema.json`
This schema mirrors the internal gallery model configuration and can be used by editors (such as VS Code) to enable autocomplete, validation, and inline documentation when creating or modifying gallery files.
To use it with the YAML language server, add the following comment at the top of a gallery YAML file:
```yaml
# yaml-language-server: $schema=../core/schema/gallery-model.schema.json
```
## Community and Communication
- You can reach out via the Github issue tracker.

View File

@@ -1,7 +1,6 @@
ARG BASE_IMAGE=ubuntu:24.04
ARG BASE_IMAGE=ubuntu:22.04
ARG GRPC_BASE_IMAGE=${BASE_IMAGE}
ARG INTEL_BASE_IMAGE=${BASE_IMAGE}
ARG UBUNTU_CODENAME=noble
FROM ${BASE_IMAGE} AS requirements
@@ -10,7 +9,7 @@ ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -y --no-install-recommends \
ca-certificates curl wget espeak-ng libgomp1 \
ffmpeg libopenblas0 libopenblas-dev && \
ffmpeg libopenblas-base libopenblas-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
@@ -24,7 +23,6 @@ ARG SKIP_DRIVERS=false
ARG TARGETARCH
ARG TARGETVARIANT
ENV BUILD_TYPE=${BUILD_TYPE}
ARG UBUNTU_VERSION=2404
RUN mkdir -p /run/localai
RUN echo "default" > /run/localai/capability
@@ -35,45 +33,11 @@ RUN <<EOT bash
apt-get update && \
apt-get install -y --no-install-recommends \
software-properties-common pciutils wget gpg-agent && \
apt-get install -y libglm-dev cmake libxcb-dri3-0 libxcb-present0 libpciaccess0 \
libpng-dev libxcb-keysyms1-dev libxcb-dri3-dev libx11-dev g++ gcc \
libwayland-dev libxrandr-dev libxcb-randr0-dev libxcb-ewmh-dev \
git python-is-python3 bison libx11-xcb-dev liblz4-dev libzstd-dev \
ocaml-core ninja-build pkg-config libxml2-dev wayland-protocols python3-jsonschema \
clang-format qtbase5-dev qt6-base-dev libxcb-glx0-dev sudo xz-utils mesa-vulkan-drivers
if [ "amd64" = "$TARGETARCH" ]; then
wget "https://sdk.lunarg.com/sdk/download/1.4.328.1/linux/vulkansdk-linux-x86_64-1.4.328.1.tar.xz" && \
tar -xf vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
rm vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
mkdir -p /opt/vulkan-sdk && \
mv 1.4.328.1 /opt/vulkan-sdk/ && \
cd /opt/vulkan-sdk/1.4.328.1 && \
./vulkansdk --no-deps --maxjobs \
vulkan-loader \
vulkan-validationlayers \
vulkan-extensionlayer \
vulkan-tools \
shaderc && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/bin/* /usr/bin/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/lib/* /usr/lib/x86_64-linux-gnu/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/include/* /usr/include/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/share/* /usr/share/ && \
rm -rf /opt/vulkan-sdk
fi
if [ "arm64" = "$TARGETARCH" ]; then
mkdir vulkan && cd vulkan && \
curl -L -o vulkan-sdk.tar.xz https://github.com/mudler/vulkan-sdk-arm/releases/download/1.4.335.0/vulkansdk-ubuntu-24.04-arm-1.4.335.0.tar.xz && \
tar -xvf vulkan-sdk.tar.xz && \
rm vulkan-sdk.tar.xz && \
cd 1.4.335.0 && \
cp -rfv aarch64/bin/* /usr/bin/ && \
cp -rfv aarch64/lib/* /usr/lib/aarch64-linux-gnu/ && \
cp -rfv aarch64/include/* /usr/include/ && \
cp -rfv aarch64/share/* /usr/share/ && \
cd ../.. && \
rm -rf vulkan
fi
ldconfig && \
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
apt-get update && \
apt-get install -y \
vulkan-sdk && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* && \
echo "vulkan" > /run/localai/capability
@@ -82,19 +46,15 @@ EOT
# CuBLAS requirements
RUN <<EOT bash
if ( [ "${BUILD_TYPE}" = "cublas" ] || [ "${BUILD_TYPE}" = "l4t" ] ) && [ "${SKIP_DRIVERS}" = "false" ]; then
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
apt-get update && \
apt-get install -y --no-install-recommends \
software-properties-common pciutils
if [ "amd64" = "$TARGETARCH" ]; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/x86_64/cuda-keyring_1.1-1_all.deb
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb
fi
if [ "arm64" = "$TARGETARCH" ]; then
if [ "${CUDA_MAJOR_VERSION}" = "13" ]; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/sbsa/cuda-keyring_1.1-1_all.deb
else
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/arm64/cuda-keyring_1.1-1_all.deb
fi
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/arm64/cuda-keyring_1.1-1_all.deb
fi
dpkg -i cuda-keyring_1.1-1_all.deb && \
rm -f cuda-keyring_1.1-1_all.deb && \
@@ -105,34 +65,26 @@ RUN <<EOT bash
libcurand-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION}
if [ "${CUDA_MAJOR_VERSION}" = "13" ] && [ "arm64" = "$TARGETARCH" ]; then
apt-get install -y --no-install-recommends \
libcufile-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libcudnn9-cuda-${CUDA_MAJOR_VERSION} cuda-cupti-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libnvjitlink-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION}
fi
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* && \
echo "nvidia-cuda-${CUDA_MAJOR_VERSION}" > /run/localai/capability
echo "nvidia" > /run/localai/capability
fi
EOT
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${TARGETARCH}" = "arm64" ]; then
echo "nvidia-l4t-cuda-${CUDA_MAJOR_VERSION}" > /run/localai/capability
echo "nvidia-l4t" > /run/localai/capability
fi
EOT
# https://github.com/NVIDIA/Isaac-GR00T/issues/343
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${TARGETARCH}" = "arm64" ]; then
wget https://developer.download.nvidia.com/compute/cudss/0.6.0/local_installers/cudss-local-tegra-repo-ubuntu${UBUNTU_VERSION}-0.6.0_0.6.0-1_arm64.deb && \
dpkg -i cudss-local-tegra-repo-ubuntu${UBUNTU_VERSION}-0.6.0_0.6.0-1_arm64.deb && \
cp /var/cudss-local-tegra-repo-ubuntu${UBUNTU_VERSION}-0.6.0/cudss-*-keyring.gpg /usr/share/keyrings/ && \
apt-get update && apt-get -y install cudss cudss-cuda-${CUDA_MAJOR_VERSION} && \
wget https://developer.download.nvidia.com/compute/nvpl/25.5/local_installers/nvpl-local-repo-ubuntu${UBUNTU_VERSION}-25.5_1.0-1_arm64.deb && \
dpkg -i nvpl-local-repo-ubuntu${UBUNTU_VERSION}-25.5_1.0-1_arm64.deb && \
cp /var/nvpl-local-repo-ubuntu${UBUNTU_VERSION}-25.5/nvpl-*-keyring.gpg /usr/share/keyrings/ && \
apt-get update && apt-get install -y nvpl
wget https://developer.download.nvidia.com/compute/cudss/0.6.0/local_installers/cudss-local-tegra-repo-ubuntu2204-0.6.0_0.6.0-1_arm64.deb && \
dpkg -i cudss-local-tegra-repo-ubuntu2204-0.6.0_0.6.0-1_arm64.deb && \
cp /var/cudss-local-tegra-repo-ubuntu2204-0.6.0/cudss-*-keyring.gpg /usr/share/keyrings/ && \
apt-get update && apt-get -y install cudss
fi
EOT
@@ -176,12 +128,13 @@ ENV PATH=/opt/rocm/bin:${PATH}
# The requirements-core target is common to all images. It should not be placed in requirements-core unless every single build will use it.
FROM requirements-drivers AS build-requirements
ARG GO_VERSION=1.25.4
ARG CMAKE_VERSION=3.31.10
ARG GO_VERSION=1.22.6
ARG CMAKE_VERSION=3.26.4
ARG CMAKE_FROM_SOURCE=false
ARG TARGETARCH
ARG TARGETVARIANT
RUN apt-get update && \
apt-get install -y --no-install-recommends \
build-essential \
@@ -218,6 +171,14 @@ RUN go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2 && \
COPY --chmod=644 custom-ca-certs/* /usr/local/share/ca-certificates/
RUN update-ca-certificates
# OpenBLAS requirements and stable diffusion
RUN apt-get update && \
apt-get install -y --no-install-recommends \
libopenblas-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
RUN test -n "$TARGETARCH" \
|| (echo 'warn: missing $TARGETARCH, either set this `ARG` manually, or run using `docker buildkit`')
@@ -238,10 +199,9 @@ WORKDIR /build
# https://community.intel.com/t5/Intel-oneAPI-Math-Kernel-Library/APT-Repository-not-working-signatures-invalid/m-p/1599436/highlight/true#M36143
# This is a temporary workaround until Intel fixes their repository
FROM ${INTEL_BASE_IMAGE} AS intel
ARG UBUNTU_CODENAME=noble
RUN wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | \
gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
RUN echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu ${UBUNTU_CODENAME}/lts/2350 unified" > /etc/apt/sources.list.d/intel-graphics.list
RUN echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy/lts/2350 unified" > /etc/apt/sources.list.d/intel-graphics.list
RUN apt-get update && \
apt-get install -y --no-install-recommends \
intel-oneapi-runtime-libs && \
@@ -372,6 +332,6 @@ RUN mkdir -p /models /backends
HEALTHCHECK --interval=1m --timeout=10m --retries=10 \
CMD curl -f ${HEALTHCHECK_ENDPOINT} || exit 1
VOLUME /models /backends /configuration
VOLUME /models /backends
EXPOSE 8080
ENTRYPOINT [ "/entrypoint.sh" ]

View File

@@ -1,4 +1,4 @@
ARG BASE_IMAGE=ubuntu:24.04
ARG BASE_IMAGE=ubuntu:22.04
FROM ${BASE_IMAGE}

285
Makefile
View File

@@ -1,22 +1,12 @@
# Disable parallel execution for backend builds
.NOTPARALLEL: backends/diffusers backends/llama-cpp backends/piper backends/stablediffusion-ggml backends/whisper backends/faster-whisper backends/silero-vad backends/local-store backends/huggingface backends/rfdetr backends/kitten-tts backends/kokoro backends/chatterbox backends/llama-cpp-darwin backends/neutts build-darwin-python-backend build-darwin-go-backend backends/mlx backends/diffuser-darwin backends/mlx-vlm backends/mlx-audio backends/stablediffusion-ggml-darwin backends/vllm backends/moonshine
GOCMD=go
GOTEST=$(GOCMD) test
GOVET=$(GOCMD) vet
BINARY_NAME=local-ai
LAUNCHER_BINARY_NAME=local-ai-launcher
CUDA_MAJOR_VERSION?=13
CUDA_MINOR_VERSION?=0
UBUNTU_VERSION?=2204
UBUNTU_CODENAME?=noble
GORELEASER?=
export BUILD_TYPE?=
export CUDA_MAJOR_VERSION?=12
export CUDA_MINOR_VERSION?=9
GO_TAGS?=
BUILD_ID?=
@@ -162,17 +152,7 @@ test: test-models/testmodel.ggml protogen-go
########################################################
docker-build-aio:
docker build \
--build-arg MAKEFLAGS="--jobs=5 --output-sync=target" \
--build-arg BASE_IMAGE=$(BASE_IMAGE) \
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
--build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) \
--build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) \
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
--build-arg GO_TAGS="$(GO_TAGS)" \
-t local-ai:tests -f Dockerfile .
docker build --build-arg MAKEFLAGS="--jobs=5 --output-sync=target" -t local-ai:tests -f Dockerfile .
BASE_IMAGE=local-ai:tests DOCKER_AIO_IMAGE=local-ai-aio:test $(MAKE) docker-aio
e2e-aio:
@@ -194,17 +174,7 @@ prepare-e2e:
mkdir -p $(TEST_DIR)
cp -rfv $(abspath ./tests/e2e-fixtures)/gpu.yaml $(TEST_DIR)/gpu.yaml
test -e $(TEST_DIR)/ggllm-test-model.bin || wget -q https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GGUF/resolve/main/codellama-7b-instruct.Q2_K.gguf -O $(TEST_DIR)/ggllm-test-model.bin
docker build \
--build-arg IMAGE_TYPE=core \
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
--build-arg BASE_IMAGE=$(BASE_IMAGE) \
--build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) \
--build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) \
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
--build-arg GO_TAGS="$(GO_TAGS)" \
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
-t localai-tests .
docker build --build-arg IMAGE_TYPE=core --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg CUDA_MAJOR_VERSION=12 --build-arg CUDA_MINOR_VERSION=0 -t localai-tests .
run-e2e-image:
ls -liah $(abspath ./tests/e2e-fixtures)
@@ -295,7 +265,7 @@ protoc:
echo "Unsupported OS: $$OS_NAME"; exit 1; \
fi; \
URL=https://github.com/protocolbuffers/protobuf/releases/download/v31.1/$$FILE; \
curl -L $$URL -o protoc.zip && \
curl -L -s $$URL -o protoc.zip && \
unzip -j -d $(CURDIR) protoc.zip bin/protoc && rm protoc.zip
.PHONY: protogen-go
@@ -314,21 +284,17 @@ prepare-test-extra: protogen-python
$(MAKE) -C backend/python/diffusers
$(MAKE) -C backend/python/chatterbox
$(MAKE) -C backend/python/vllm
$(MAKE) -C backend/python/vibevoice
$(MAKE) -C backend/python/moonshine
test-extra: prepare-test-extra
$(MAKE) -C backend/python/transformers test
$(MAKE) -C backend/python/diffusers test
$(MAKE) -C backend/python/chatterbox test
$(MAKE) -C backend/python/vllm test
$(MAKE) -C backend/python/vibevoice test
$(MAKE) -C backend/python/moonshine test
DOCKER_IMAGE?=local-ai
DOCKER_AIO_IMAGE?=local-ai-aio
IMAGE_TYPE?=core
BASE_IMAGE?=ubuntu:24.04
BASE_IMAGE?=ubuntu:22.04
docker:
docker build \
@@ -337,34 +303,24 @@ docker:
--build-arg GO_TAGS="$(GO_TAGS)" \
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
--build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) \
--build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) \
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
-t $(DOCKER_IMAGE) .
docker-cuda12:
docker-cuda11:
docker build \
--build-arg CUDA_MAJOR_VERSION=${CUDA_MAJOR_VERSION} \
--build-arg CUDA_MINOR_VERSION=${CUDA_MINOR_VERSION} \
--build-arg CUDA_MAJOR_VERSION=11 \
--build-arg CUDA_MINOR_VERSION=8 \
--build-arg BASE_IMAGE=$(BASE_IMAGE) \
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
--build-arg GO_TAGS="$(GO_TAGS)" \
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
-t $(DOCKER_IMAGE)-cuda-12 .
-t $(DOCKER_IMAGE)-cuda-11 .
docker-aio:
@echo "Building AIO image with base $(BASE_IMAGE) as $(DOCKER_AIO_IMAGE)"
docker build \
--build-arg BASE_IMAGE=$(BASE_IMAGE) \
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
--build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) \
--build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) \
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
-t $(DOCKER_AIO_IMAGE) -f Dockerfile.aio .
docker-aio-all:
@@ -373,31 +329,60 @@ docker-aio-all:
docker-image-intel:
docker build \
--build-arg BASE_IMAGE=intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04 \
--build-arg BASE_IMAGE=quay.io/go-skynet/intel-oneapi-base:latest \
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
--build-arg GO_TAGS="$(GO_TAGS)" \
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
--build-arg BUILD_TYPE=intel \
--build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) \
--build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) \
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
-t $(DOCKER_IMAGE) .
--build-arg BUILD_TYPE=intel -t $(DOCKER_IMAGE) .
########################################################
## Backends
########################################################
# Pattern rule for standard backends (docker-based)
# This matches all backends that use docker-build-* and docker-save-*
backends/%: docker-build-% docker-save-% build
./local-ai backends install "ocifile://$(abspath ./backend-images/$*.tar)"
# Darwin-specific backends (keep as explicit targets since they have special build logic)
backends/diffusers: docker-build-diffusers docker-save-diffusers build
./local-ai backends install "ocifile://$(abspath ./backend-images/diffusers.tar)"
backends/llama-cpp: docker-build-llama-cpp docker-save-llama-cpp build
./local-ai backends install "ocifile://$(abspath ./backend-images/llama-cpp.tar)"
backends/piper: docker-build-piper docker-save-piper build
./local-ai backends install "ocifile://$(abspath ./backend-images/piper.tar)"
backends/stablediffusion-ggml: docker-build-stablediffusion-ggml docker-save-stablediffusion-ggml build
./local-ai backends install "ocifile://$(abspath ./backend-images/stablediffusion-ggml.tar)"
backends/whisper: docker-build-whisper docker-save-whisper build
./local-ai backends install "ocifile://$(abspath ./backend-images/whisper.tar)"
backends/silero-vad: docker-build-silero-vad docker-save-silero-vad build
./local-ai backends install "ocifile://$(abspath ./backend-images/silero-vad.tar)"
backends/local-store: docker-build-local-store docker-save-local-store build
./local-ai backends install "ocifile://$(abspath ./backend-images/local-store.tar)"
backends/huggingface: docker-build-huggingface docker-save-huggingface build
./local-ai backends install "ocifile://$(abspath ./backend-images/huggingface.tar)"
backends/rfdetr: docker-build-rfdetr docker-save-rfdetr build
./local-ai backends install "ocifile://$(abspath ./backend-images/rfdetr.tar)"
backends/kitten-tts: docker-build-kitten-tts docker-save-kitten-tts build
./local-ai backends install "ocifile://$(abspath ./backend-images/kitten-tts.tar)"
backends/kokoro: docker-build-kokoro docker-save-kokoro build
./local-ai backends install "ocifile://$(abspath ./backend-images/kokoro.tar)"
backends/chatterbox: docker-build-chatterbox docker-save-chatterbox build
./local-ai backends install "ocifile://$(abspath ./backend-images/chatterbox.tar)"
backends/llama-cpp-darwin: build
bash ./scripts/build/llama-cpp-darwin.sh
./local-ai backends install "ocifile://$(abspath ./backend-images/llama-cpp.tar)"
backends/neutts: docker-build-neutts docker-save-neutts build
./local-ai backends install "ocifile://$(abspath ./backend-images/neutts.tar)"
build-darwin-python-backend: build
bash ./scripts/build/python-darwin.sh
@@ -427,88 +412,112 @@ backends/stablediffusion-ggml-darwin:
backend-images:
mkdir -p backend-images
# Backend metadata: BACKEND_NAME | DOCKERFILE_TYPE | BUILD_CONTEXT | PROGRESS_FLAG | NEEDS_BACKEND_ARG
# llama-cpp is special - uses llama-cpp Dockerfile and doesn't need BACKEND arg
BACKEND_LLAMA_CPP = llama-cpp|llama-cpp|.|false|false
docker-build-llama-cpp:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:llama-cpp -f backend/Dockerfile.llama-cpp .
# Golang backends
BACKEND_BARK_CPP = bark-cpp|golang|.|false|true
BACKEND_PIPER = piper|golang|.|false|true
BACKEND_LOCAL_STORE = local-store|golang|.|false|true
BACKEND_HUGGINGFACE = huggingface|golang|.|false|true
BACKEND_SILERO_VAD = silero-vad|golang|.|false|true
BACKEND_STABLEDIFFUSION_GGML = stablediffusion-ggml|golang|.|--progress=plain|true
BACKEND_WHISPER = whisper|golang|.|false|true
docker-build-bark-cpp:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:bark-cpp -f backend/Dockerfile.golang --build-arg BACKEND=bark-cpp .
# Python backends with root context
BACKEND_RERANKERS = rerankers|python|.|false|true
BACKEND_TRANSFORMERS = transformers|python|.|false|true
BACKEND_FASTER_WHISPER = faster-whisper|python|.|false|true
BACKEND_COQUI = coqui|python|.|false|true
BACKEND_BARK = bark|python|.|false|true
BACKEND_EXLLAMA2 = exllama2|python|.|false|true
docker-build-piper:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:piper -f backend/Dockerfile.golang --build-arg BACKEND=piper .
# Python backends with ./backend context
BACKEND_RFDETR = rfdetr|python|./backend|false|true
BACKEND_KITTEN_TTS = kitten-tts|python|./backend|false|true
BACKEND_NEUTTS = neutts|python|./backend|false|true
BACKEND_KOKORO = kokoro|python|./backend|false|true
BACKEND_VLLM = vllm|python|./backend|false|true
BACKEND_DIFFUSERS = diffusers|python|./backend|--progress=plain|true
BACKEND_CHATTERBOX = chatterbox|python|./backend|false|true
BACKEND_VIBEVOICE = vibevoice|python|./backend|--progress=plain|true
BACKEND_MOONSHINE = moonshine|python|./backend|false|true
docker-build-local-store:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:local-store -f backend/Dockerfile.golang --build-arg BACKEND=local-store .
# Helper function to build docker image for a backend
# Usage: $(call docker-build-backend,BACKEND_NAME,DOCKERFILE_TYPE,BUILD_CONTEXT,PROGRESS_FLAG,NEEDS_BACKEND_ARG)
define docker-build-backend
docker build $(if $(filter-out false,$(4)),$(4)) \
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
--build-arg BASE_IMAGE=$(BASE_IMAGE) \
--build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) \
--build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) \
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
$(if $(filter true,$(5)),--build-arg BACKEND=$(1)) \
-t local-ai-backend:$(1) -f backend/Dockerfile.$(2) $(3)
endef
docker-build-huggingface:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:huggingface -f backend/Dockerfile.golang --build-arg BACKEND=huggingface .
# Generate docker-build targets from backend definitions
define generate-docker-build-target
docker-build-$(word 1,$(subst |, ,$(1))):
$$(call docker-build-backend,$(word 1,$(subst |, ,$(1))),$(word 2,$(subst |, ,$(1))),$(word 3,$(subst |, ,$(1))),$(word 4,$(subst |, ,$(1))),$(word 5,$(subst |, ,$(1))))
endef
docker-build-rfdetr:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:rfdetr -f backend/Dockerfile.python --build-arg BACKEND=rfdetr ./backend
# Generate all docker-build targets
$(eval $(call generate-docker-build-target,$(BACKEND_LLAMA_CPP)))
$(eval $(call generate-docker-build-target,$(BACKEND_BARK_CPP)))
$(eval $(call generate-docker-build-target,$(BACKEND_PIPER)))
$(eval $(call generate-docker-build-target,$(BACKEND_LOCAL_STORE)))
$(eval $(call generate-docker-build-target,$(BACKEND_HUGGINGFACE)))
$(eval $(call generate-docker-build-target,$(BACKEND_SILERO_VAD)))
$(eval $(call generate-docker-build-target,$(BACKEND_STABLEDIFFUSION_GGML)))
$(eval $(call generate-docker-build-target,$(BACKEND_WHISPER)))
$(eval $(call generate-docker-build-target,$(BACKEND_RERANKERS)))
$(eval $(call generate-docker-build-target,$(BACKEND_TRANSFORMERS)))
$(eval $(call generate-docker-build-target,$(BACKEND_FASTER_WHISPER)))
$(eval $(call generate-docker-build-target,$(BACKEND_COQUI)))
$(eval $(call generate-docker-build-target,$(BACKEND_BARK)))
$(eval $(call generate-docker-build-target,$(BACKEND_EXLLAMA2)))
$(eval $(call generate-docker-build-target,$(BACKEND_RFDETR)))
$(eval $(call generate-docker-build-target,$(BACKEND_KITTEN_TTS)))
$(eval $(call generate-docker-build-target,$(BACKEND_NEUTTS)))
$(eval $(call generate-docker-build-target,$(BACKEND_KOKORO)))
$(eval $(call generate-docker-build-target,$(BACKEND_VLLM)))
$(eval $(call generate-docker-build-target,$(BACKEND_DIFFUSERS)))
$(eval $(call generate-docker-build-target,$(BACKEND_CHATTERBOX)))
$(eval $(call generate-docker-build-target,$(BACKEND_VIBEVOICE)))
$(eval $(call generate-docker-build-target,$(BACKEND_MOONSHINE)))
docker-build-kitten-tts:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:kitten-tts -f backend/Dockerfile.python --build-arg BACKEND=kitten-tts ./backend
# Pattern rule for docker-save targets
docker-save-%: backend-images
docker save local-ai-backend:$* -o backend-images/$*.tar
docker-save-kitten-tts: backend-images
docker save local-ai-backend:kitten-tts -o backend-images/kitten-tts.tar
docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-bark docker-build-chatterbox docker-build-vibevoice docker-build-exllama2 docker-build-moonshine
docker-save-chatterbox: backend-images
docker save local-ai-backend:chatterbox -o backend-images/chatterbox.tar
docker-build-neutts:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:neutts -f backend/Dockerfile.python --build-arg BACKEND=neutts ./backend
docker-save-neutts: backend-images
docker save local-ai-backend:neutts -o backend-images/neutts.tar
docker-build-kokoro:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:kokoro -f backend/Dockerfile.python --build-arg BACKEND=kokoro ./backend
docker-save-kokoro: backend-images
docker save local-ai-backend:kokoro -o backend-images/kokoro.tar
docker-save-rfdetr: backend-images
docker save local-ai-backend:rfdetr -o backend-images/rfdetr.tar
docker-save-huggingface: backend-images
docker save local-ai-backend:huggingface -o backend-images/huggingface.tar
docker-save-local-store: backend-images
docker save local-ai-backend:local-store -o backend-images/local-store.tar
docker-build-silero-vad:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:silero-vad -f backend/Dockerfile.golang --build-arg BACKEND=silero-vad .
docker-save-silero-vad: backend-images
docker save local-ai-backend:silero-vad -o backend-images/silero-vad.tar
docker-save-piper: backend-images
docker save local-ai-backend:piper -o backend-images/piper.tar
docker-save-llama-cpp: backend-images
docker save local-ai-backend:llama-cpp -o backend-images/llama-cpp.tar
docker-save-bark-cpp: backend-images
docker save local-ai-backend:bark-cpp -o backend-images/bark-cpp.tar
docker-build-stablediffusion-ggml:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:stablediffusion-ggml -f backend/Dockerfile.golang --build-arg BACKEND=stablediffusion-ggml .
docker-save-stablediffusion-ggml: backend-images
docker save local-ai-backend:stablediffusion-ggml -o backend-images/stablediffusion-ggml.tar
docker-build-rerankers:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:rerankers -f backend/Dockerfile.python --build-arg BACKEND=rerankers .
docker-build-vllm:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:vllm -f backend/Dockerfile.python --build-arg BACKEND=vllm .
docker-build-transformers:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:transformers -f backend/Dockerfile.python --build-arg BACKEND=transformers .
docker-build-diffusers:
docker build --progress=plain --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:diffusers -f backend/Dockerfile.python --build-arg BACKEND=diffusers ./backend
docker-save-diffusers: backend-images
docker save local-ai-backend:diffusers -o backend-images/diffusers.tar
docker-build-whisper:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:whisper -f backend/Dockerfile.golang --build-arg BACKEND=whisper .
docker-save-whisper: backend-images
docker save local-ai-backend:whisper -o backend-images/whisper.tar
docker-build-faster-whisper:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:faster-whisper -f backend/Dockerfile.python --build-arg BACKEND=faster-whisper .
docker-build-coqui:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:coqui -f backend/Dockerfile.python --build-arg BACKEND=coqui .
docker-build-bark:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:bark -f backend/Dockerfile.python --build-arg BACKEND=bark .
docker-build-chatterbox:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:chatterbox -f backend/Dockerfile.python --build-arg BACKEND=chatterbox ./backend
docker-build-exllama2:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:exllama2 -f backend/Dockerfile.python --build-arg BACKEND=exllama2 .
docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-bark docker-build-chatterbox docker-build-exllama2
########################################################
### END Backends

View File

@@ -33,7 +33,7 @@
<img src="https://img.shields.io/badge/X-%23000000.svg?style=for-the-badge&logo=X&logoColor=white&label=LocalAI_API" alt="Follow LocalAI_API"/>
</a>
<a href="https://discord.gg/uJAeKSAGDy" target="blank">
<img src="https://img.shields.io/badge/dynamic/json?color=blue&label=Discord&style=for-the-badge&query=approximate_member_count&url=https%3A%2F%2Fdiscordapp.com%2Fapi%2Finvites%2FuJAeKSAGDy%3Fwith_counts%3Dtrue&logo=discord" alt="Join LocalAI Discord Community"/>
<img src="https://dcbadge.vercel.app/api/server/uJAeKSAGDy?style=flat-square&theme=default-inverted" alt="Join LocalAI Discord Community"/>
</a>
</p>
@@ -80,18 +80,8 @@
</tr>
</table>
## Screenshots / Video
## Screenshots
### Youtube video
<h1 align="center">
<br>
<a href="https://www.youtube.com/watch?v=PDqYhB9nNHA" target="_blank"> <img width="300" src="https://img.youtube.com/vi/PDqYhB9nNHA/0.jpg"> </a><br>
<br>
</h1>
### Screenshots
| Talk Interface | Generate Audio |
| --- | --- |
@@ -118,7 +108,7 @@ Run the installer script:
curl https://localai.io/install.sh | sh
```
For more installation options, see [Installer Options](https://localai.io/installation/).
For more installation options, see [Installer Options](https://localai.io/docs/advanced/installer/).
### macOS Download:
@@ -146,18 +136,14 @@ docker run -ti --name local-ai -p 8080:8080 localai/localai:latest
### NVIDIA GPU Images:
```bash
# CUDA 13.0
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-gpu-nvidia-cuda-13
# CUDA 12.0
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-gpu-nvidia-cuda-12
# NVIDIA Jetson (L4T) ARM64
# CUDA 12 (for Nvidia AGX Orin and similar platforms)
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-nvidia-l4t-arm64
# CUDA 11.7
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-gpu-nvidia-cuda-11
# CUDA 13 (for Nvidia DGX Spark)
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-nvidia-l4t-arm64-cuda-13
# NVIDIA Jetson (L4T) ARM64
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-nvidia-l4t-arm64
```
### AMD GPU Images (ROCm):
@@ -184,12 +170,12 @@ docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-gpu-vulkan
# CPU version
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-cpu
# NVIDIA CUDA 13 version
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-13
# NVIDIA CUDA 12 version
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-12
# NVIDIA CUDA 11 version
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-11
# Intel GPU version
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-gpu-intel
@@ -220,8 +206,6 @@ For more information, see [💻 Getting started](https://localai.io/basics/getti
## 📰 Latest project news
- December 2025: [Dynamic Memory Resource reclaimer](https://github.com/mudler/LocalAI/pull/7583), [Automatic fitting of models to multiple GPUS(llama.cpp)](https://github.com/mudler/LocalAI/pull/7584), [Added Vibevoice backend](https://github.com/mudler/LocalAI/pull/7494)
- November 2025: Major improvements to the UX. Among these: [Import models via URL](https://github.com/mudler/LocalAI/pull/7245) and [Multiple chats and history](https://github.com/mudler/LocalAI/pull/7325)
- October 2025: 🔌 [Model Context Protocol (MCP)](https://localai.io/docs/features/mcp/) support added for agentic capabilities with external tools
- September 2025: New Launcher application for MacOS and Linux, extended support to many backends for Mac and Nvidia L4T devices. Models: Added MLX-Audio, WAN 2.2. WebUI improvements and Python-based backends now ships portable python environments.
- August 2025: MLX, MLX-VLM, Diffusers and llama.cpp are now supported on Mac M1/M2/M3+ chips ( with `development` suffix in the gallery ): https://github.com/mudler/LocalAI/pull/6049 https://github.com/mudler/LocalAI/pull/6119 https://github.com/mudler/LocalAI/pull/6121 https://github.com/mudler/LocalAI/pull/6060
@@ -273,40 +257,39 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
### Text Generation & Language Models
| Backend | Description | Acceleration Support |
|---------|-------------|---------------------|
| **llama.cpp** | LLM inference in C/C++ | CUDA 12/13, ROCm, Intel SYCL, Vulkan, Metal, CPU |
| **vLLM** | Fast LLM inference with PagedAttention | CUDA 12/13, ROCm, Intel |
| **transformers** | HuggingFace transformers framework | CUDA 12/13, ROCm, Intel, CPU |
| **exllama2** | GPTQ inference library | CUDA 12/13 |
| **llama.cpp** | LLM inference in C/C++ | CUDA 11/12, ROCm, Intel SYCL, Vulkan, Metal, CPU |
| **vLLM** | Fast LLM inference with PagedAttention | CUDA 12, ROCm, Intel |
| **transformers** | HuggingFace transformers framework | CUDA 11/12, ROCm, Intel, CPU |
| **exllama2** | GPTQ inference library | CUDA 12 |
| **MLX** | Apple Silicon LLM inference | Metal (M1/M2/M3+) |
| **MLX-VLM** | Apple Silicon Vision-Language Models | Metal (M1/M2/M3+) |
### Audio & Speech Processing
| Backend | Description | Acceleration Support |
|---------|-------------|---------------------|
| **whisper.cpp** | OpenAI Whisper in C/C++ | CUDA 12/13, ROCm, Intel SYCL, Vulkan, CPU |
| **faster-whisper** | Fast Whisper with CTranslate2 | CUDA 12/13, ROCm, Intel, CPU |
| **bark** | Text-to-audio generation | CUDA 12/13, ROCm, Intel |
| **whisper.cpp** | OpenAI Whisper in C/C++ | CUDA 12, ROCm, Intel SYCL, Vulkan, CPU |
| **faster-whisper** | Fast Whisper with CTranslate2 | CUDA 12, ROCm, Intel, CPU |
| **bark** | Text-to-audio generation | CUDA 12, ROCm, Intel |
| **bark-cpp** | C++ implementation of Bark | CUDA, Metal, CPU |
| **coqui** | Advanced TTS with 1100+ languages | CUDA 12/13, ROCm, Intel, CPU |
| **kokoro** | Lightweight TTS model | CUDA 12/13, ROCm, Intel, CPU |
| **chatterbox** | Production-grade TTS | CUDA 12/13, CPU |
| **coqui** | Advanced TTS with 1100+ languages | CUDA 12, ROCm, Intel, CPU |
| **kokoro** | Lightweight TTS model | CUDA 12, ROCm, Intel, CPU |
| **chatterbox** | Production-grade TTS | CUDA 11/12, CPU |
| **piper** | Fast neural TTS system | CPU |
| **kitten-tts** | Kitten TTS models | CPU |
| **silero-vad** | Voice Activity Detection | CPU |
| **neutts** | Text-to-speech with voice cloning | CUDA 12/13, ROCm, CPU |
| **vibevoice** | Real-time TTS with voice cloning | CUDA 12/13, ROCm, Intel, CPU |
| **neutts** | Text-to-speech with voice cloning | CUDA 12, ROCm, CPU |
### Image & Video Generation
| Backend | Description | Acceleration Support |
|---------|-------------|---------------------|
| **stablediffusion.cpp** | Stable Diffusion in C/C++ | CUDA 12/13, Intel SYCL, Vulkan, CPU |
| **diffusers** | HuggingFace diffusion models | CUDA 12/13, ROCm, Intel, Metal, CPU |
| **stablediffusion.cpp** | Stable Diffusion in C/C++ | CUDA 12, Intel SYCL, Vulkan, CPU |
| **diffusers** | HuggingFace diffusion models | CUDA 11/12, ROCm, Intel, Metal, CPU |
### Specialized AI Tasks
| Backend | Description | Acceleration Support |
|---------|-------------|---------------------|
| **rfdetr** | Real-time object detection | CUDA 12/13, Intel, CPU |
| **rerankers** | Document reranking API | CUDA 12/13, ROCm, Intel, CPU |
| **rfdetr** | Real-time object detection | CUDA 12, Intel, CPU |
| **rerankers** | Document reranking API | CUDA 11/12, ROCm, Intel, CPU |
| **local-store** | Vector database | CPU |
| **huggingface** | HuggingFace API integration | API-based |
@@ -314,14 +297,13 @@ LocalAI supports a comprehensive range of AI backends with multiple acceleration
| Acceleration Type | Supported Backends | Hardware Support |
|-------------------|-------------------|------------------|
| **NVIDIA CUDA 11** | llama.cpp, whisper, stablediffusion, diffusers, rerankers, bark, chatterbox | Nvidia hardware |
| **NVIDIA CUDA 12** | All CUDA-compatible backends | Nvidia hardware |
| **NVIDIA CUDA 13** | All CUDA-compatible backends | Nvidia hardware |
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, bark, neutts, vibevoice | AMD Graphics |
| **Intel oneAPI** | llama.cpp, whisper, stablediffusion, vllm, transformers, diffusers, rfdetr, rerankers, exllama2, coqui, kokoro, bark, vibevoice | Intel Arc, Intel iGPUs |
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, bark, neutts | AMD Graphics |
| **Intel oneAPI** | llama.cpp, whisper, stablediffusion, vllm, transformers, diffusers, rfdetr, rerankers, exllama2, coqui, kokoro, bark | Intel Arc, Intel iGPUs |
| **Apple Metal** | llama.cpp, whisper, diffusers, MLX, MLX-VLM, bark-cpp | Apple M1/M2/M3+ |
| **Vulkan** | llama.cpp, whisper, stablediffusion | Cross-platform GPUs |
| **NVIDIA Jetson (CUDA 12)** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr | ARM64 embedded AI (AGX Orin, etc.) |
| **NVIDIA Jetson (CUDA 13)** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr | ARM64 embedded AI (DGX Spark) |
| **NVIDIA Jetson** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr | ARM64 embedded AI |
| **CPU Optimized** | All backends | AVX/AVX2/AVX512, quantization support |
### 🔗 Community and integrations
@@ -414,10 +396,6 @@ A huge thank you to our generous sponsors who support this project covering CI e
</a>
</p>
### Individual sponsors
A special thanks to individual sponsors that contributed to the project, a full list is in [Github](https://github.com/sponsors/mudler) and [buymeacoffee](https://buymeacoffee.com/mudler), a special shout out goes to [drikster80](https://github.com/drikster80) for being generous. Thank you everyone!
## 🌟 Star history
[![LocalAI Star history Chart](https://api.star-history.com/svg?repos=go-skynet/LocalAI&type=Date)](https://star-history.com/#go-skynet/LocalAI&Date)

View File

@@ -1,4 +1,4 @@
ARG BASE_IMAGE=ubuntu:24.04
ARG BASE_IMAGE=ubuntu:22.04
FROM ${BASE_IMAGE} AS builder
ARG BACKEND=rerankers
@@ -12,15 +12,14 @@ ENV CUDA_MINOR_VERSION=${CUDA_MINOR_VERSION}
ENV DEBIAN_FRONTEND=noninteractive
ARG TARGETARCH
ARG TARGETVARIANT
ARG GO_VERSION=1.25.4
ARG UBUNTU_VERSION=2404
ARG GO_VERSION=1.22.6
RUN apt-get update && \
apt-get install -y --no-install-recommends \
build-essential \
git ccache \
ca-certificates \
make cmake wget \
make cmake \
curl unzip \
libssl-dev && \
apt-get clean && \
@@ -33,52 +32,17 @@ ENV PATH=/usr/local/cuda/bin:${PATH}
# HipBLAS requirements
ENV PATH=/opt/rocm/bin:${PATH}
# Vulkan requirements
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "vulkan" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
apt-get update && \
apt-get install -y --no-install-recommends \
software-properties-common pciutils wget gpg-agent && \
apt-get install -y libglm-dev cmake libxcb-dri3-0 libxcb-present0 libpciaccess0 \
libpng-dev libxcb-keysyms1-dev libxcb-dri3-dev libx11-dev g++ gcc \
libwayland-dev libxrandr-dev libxcb-randr0-dev libxcb-ewmh-dev \
git python-is-python3 bison libx11-xcb-dev liblz4-dev libzstd-dev \
ocaml-core ninja-build pkg-config libxml2-dev wayland-protocols python3-jsonschema \
clang-format qtbase5-dev qt6-base-dev libxcb-glx0-dev sudo xz-utils
if [ "amd64" = "$TARGETARCH" ]; then
wget "https://sdk.lunarg.com/sdk/download/1.4.328.1/linux/vulkansdk-linux-x86_64-1.4.328.1.tar.xz" && \
tar -xf vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
rm vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
mkdir -p /opt/vulkan-sdk && \
mv 1.4.328.1 /opt/vulkan-sdk/ && \
cd /opt/vulkan-sdk/1.4.328.1 && \
./vulkansdk --no-deps --maxjobs \
vulkan-loader \
vulkan-validationlayers \
vulkan-extensionlayer \
vulkan-tools \
shaderc && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/bin/* /usr/bin/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/lib/* /usr/lib/x86_64-linux-gnu/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/include/* /usr/include/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/share/* /usr/share/ && \
rm -rf /opt/vulkan-sdk
fi
if [ "arm64" = "$TARGETARCH" ]; then
mkdir vulkan && cd vulkan && \
curl -L -o vulkan-sdk.tar.xz https://github.com/mudler/vulkan-sdk-arm/releases/download/1.4.335.0/vulkansdk-ubuntu-24.04-arm-1.4.335.0.tar.xz && \
tar -xvf vulkan-sdk.tar.xz && \
rm vulkan-sdk.tar.xz && \
cd 1.4.335.0 && \
cp -rfv aarch64/bin/* /usr/bin/ && \
cp -rfv aarch64/lib/* /usr/lib/aarch64-linux-gnu/ && \
cp -rfv aarch64/include/* /usr/include/ && \
cp -rfv aarch64/share/* /usr/share/ && \
cd ../.. && \
rm -rf vulkan
fi
ldconfig && \
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
apt-get update && \
apt-get install -y \
vulkan-sdk && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
fi
@@ -86,19 +50,15 @@ EOT
# CuBLAS requirements
RUN <<EOT bash
if ( [ "${BUILD_TYPE}" = "cublas" ] || [ "${BUILD_TYPE}" = "l4t" ] ) && [ "${SKIP_DRIVERS}" = "false" ]; then
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
apt-get update && \
apt-get install -y --no-install-recommends \
software-properties-common pciutils
if [ "amd64" = "$TARGETARCH" ]; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/x86_64/cuda-keyring_1.1-1_all.deb
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb
fi
if [ "arm64" = "$TARGETARCH" ]; then
if [ "${CUDA_MAJOR_VERSION}" = "13" ]; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/sbsa/cuda-keyring_1.1-1_all.deb
else
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/arm64/cuda-keyring_1.1-1_all.deb
fi
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/arm64/cuda-keyring_1.1-1_all.deb
fi
dpkg -i cuda-keyring_1.1-1_all.deb && \
rm -f cuda-keyring_1.1-1_all.deb && \
@@ -109,31 +69,12 @@ RUN <<EOT bash
libcurand-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION}
if [ "${CUDA_MAJOR_VERSION}" = "13" ] && [ "arm64" = "$TARGETARCH" ]; then
apt-get install -y --no-install-recommends \
libcufile-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libcudnn9-cuda-${CUDA_MAJOR_VERSION} cuda-cupti-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libnvjitlink-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION}
fi
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
fi
EOT
# https://github.com/NVIDIA/Isaac-GR00T/issues/343
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${TARGETARCH}" = "arm64" ]; then
wget https://developer.download.nvidia.com/compute/cudss/0.6.0/local_installers/cudss-local-tegra-repo-ubuntu${UBUNTU_VERSION}-0.6.0_0.6.0-1_arm64.deb && \
dpkg -i cudss-local-tegra-repo-ubuntu${UBUNTU_VERSION}-0.6.0_0.6.0-1_arm64.deb && \
cp /var/cudss-local-tegra-repo-ubuntu${UBUNTU_VERSION}-0.6.0/cudss-*-keyring.gpg /usr/share/keyrings/ && \
apt-get update && apt-get -y install cudss cudss-cuda-${CUDA_MAJOR_VERSION} && \
wget https://developer.download.nvidia.com/compute/nvpl/25.5/local_installers/nvpl-local-repo-ubuntu${UBUNTU_VERSION}-25.5_1.0-1_arm64.deb && \
dpkg -i nvpl-local-repo-ubuntu${UBUNTU_VERSION}-25.5_1.0-1_arm64.deb && \
cp /var/nvpl-local-repo-ubuntu${UBUNTU_VERSION}-25.5/nvpl-*-keyring.gpg /usr/share/keyrings/ && \
apt-get update && apt-get install -y nvpl
fi
EOT
# If we are building with clblas support, we need the libraries for the builds
RUN if [ "${BUILD_TYPE}" = "clblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
apt-get update && \
@@ -182,8 +123,6 @@ EOT
COPY . /LocalAI
RUN git config --global --add safe.directory /LocalAI
RUN cd /LocalAI && make protogen-go && make -C /LocalAI/backend/go/${BACKEND} build
FROM scratch

View File

@@ -1,4 +1,4 @@
ARG BASE_IMAGE=ubuntu:24.04
ARG BASE_IMAGE=ubuntu:22.04
ARG GRPC_BASE_IMAGE=${BASE_IMAGE}
@@ -10,8 +10,7 @@ FROM ${GRPC_BASE_IMAGE} AS grpc
ARG GRPC_MAKEFLAGS="-j4 -Otarget"
ARG GRPC_VERSION=v1.65.0
ARG CMAKE_FROM_SOURCE=false
# CUDA Toolkit 13.x compatibility: CMake 3.31.9+ fixes toolchain detection/arch table issues
ARG CMAKE_VERSION=3.31.10
ARG CMAKE_VERSION=3.26.4
ENV MAKEFLAGS=${GRPC_MAKEFLAGS}
@@ -21,13 +20,13 @@ RUN apt-get update && \
apt-get install -y --no-install-recommends \
ca-certificates \
build-essential curl libssl-dev \
git wget && \
git && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Install CMake (the version in 22.04 is too old)
RUN <<EOT bash
if [ "${CMAKE_FROM_SOURCE}" = "true" ]; then
if [ "${CMAKE_FROM_SOURCE}}" = "true" ]; then
curl -L -s https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz -o cmake.tar.gz && tar xvf cmake.tar.gz && cd cmake-${CMAKE_VERSION} && ./configure && make && make install
else
apt-get update && \
@@ -51,13 +50,6 @@ RUN git clone --recurse-submodules --jobs 4 -b ${GRPC_VERSION} --depth 1 --shall
rm -rf /build
FROM ${BASE_IMAGE} AS builder
ARG CMAKE_FROM_SOURCE=false
ARG CMAKE_VERSION=3.31.10
# We can target specific CUDA ARCHITECTURES like --build-arg CUDA_DOCKER_ARCH='75;86;89;120'
ARG CUDA_DOCKER_ARCH
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
ARG CMAKE_ARGS
ENV CMAKE_ARGS=${CMAKE_ARGS}
ARG BACKEND=rerankers
ARG BUILD_TYPE
ENV BUILD_TYPE=${BUILD_TYPE}
@@ -69,8 +61,7 @@ ENV CUDA_MINOR_VERSION=${CUDA_MINOR_VERSION}
ENV DEBIAN_FRONTEND=noninteractive
ARG TARGETARCH
ARG TARGETVARIANT
ARG GO_VERSION=1.25.4
ARG UBUNTU_VERSION=2404
ARG GO_VERSION=1.22.6
RUN apt-get update && \
apt-get install -y --no-install-recommends \
@@ -78,9 +69,8 @@ RUN apt-get update && \
ccache git \
ca-certificates \
make \
pkg-config libcurl4-openssl-dev \
curl unzip \
libssl-dev wget && \
libssl-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
@@ -90,52 +80,17 @@ ENV PATH=/usr/local/cuda/bin:${PATH}
# HipBLAS requirements
ENV PATH=/opt/rocm/bin:${PATH}
# Vulkan requirements
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "vulkan" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
apt-get update && \
apt-get install -y --no-install-recommends \
software-properties-common pciutils wget gpg-agent && \
apt-get install -y libglm-dev cmake libxcb-dri3-0 libxcb-present0 libpciaccess0 \
libpng-dev libxcb-keysyms1-dev libxcb-dri3-dev libx11-dev g++ gcc \
libwayland-dev libxrandr-dev libxcb-randr0-dev libxcb-ewmh-dev \
git python-is-python3 bison libx11-xcb-dev liblz4-dev libzstd-dev \
ocaml-core ninja-build pkg-config libxml2-dev wayland-protocols python3-jsonschema \
clang-format qtbase5-dev qt6-base-dev libxcb-glx0-dev sudo xz-utils
if [ "amd64" = "$TARGETARCH" ]; then
wget "https://sdk.lunarg.com/sdk/download/1.4.328.1/linux/vulkansdk-linux-x86_64-1.4.328.1.tar.xz" && \
tar -xf vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
rm vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
mkdir -p /opt/vulkan-sdk && \
mv 1.4.328.1 /opt/vulkan-sdk/ && \
cd /opt/vulkan-sdk/1.4.328.1 && \
./vulkansdk --no-deps --maxjobs \
vulkan-loader \
vulkan-validationlayers \
vulkan-extensionlayer \
vulkan-tools \
shaderc && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/bin/* /usr/bin/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/lib/* /usr/lib/x86_64-linux-gnu/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/include/* /usr/include/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/share/* /usr/share/ && \
rm -rf /opt/vulkan-sdk
fi
if [ "arm64" = "$TARGETARCH" ]; then
mkdir vulkan && cd vulkan && \
curl -L -o vulkan-sdk.tar.xz https://github.com/mudler/vulkan-sdk-arm/releases/download/1.4.335.0/vulkansdk-ubuntu-24.04-arm-1.4.335.0.tar.xz && \
tar -xvf vulkan-sdk.tar.xz && \
rm vulkan-sdk.tar.xz && \
cd 1.4.335.0 && \
cp -rfv aarch64/bin/* /usr/bin/ && \
cp -rfv aarch64/lib/* /usr/lib/aarch64-linux-gnu/ && \
cp -rfv aarch64/include/* /usr/include/ && \
cp -rfv aarch64/share/* /usr/share/ && \
cd ../.. && \
rm -rf vulkan
fi
ldconfig && \
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
apt-get update && \
apt-get install -y \
vulkan-sdk && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
fi
@@ -143,19 +98,15 @@ EOT
# CuBLAS requirements
RUN <<EOT bash
if ( [ "${BUILD_TYPE}" = "cublas" ] || [ "${BUILD_TYPE}" = "l4t" ] ) && [ "${SKIP_DRIVERS}" = "false" ]; then
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
apt-get update && \
apt-get install -y --no-install-recommends \
software-properties-common pciutils
if [ "amd64" = "$TARGETARCH" ]; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/x86_64/cuda-keyring_1.1-1_all.deb
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb
fi
if [ "arm64" = "$TARGETARCH" ]; then
if [ "${CUDA_MAJOR_VERSION}" = "13" ]; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/sbsa/cuda-keyring_1.1-1_all.deb
else
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/arm64/cuda-keyring_1.1-1_all.deb
fi
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/arm64/cuda-keyring_1.1-1_all.deb
fi
dpkg -i cuda-keyring_1.1-1_all.deb && \
rm -f cuda-keyring_1.1-1_all.deb && \
@@ -166,31 +117,12 @@ RUN <<EOT bash
libcurand-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION}
if [ "${CUDA_MAJOR_VERSION}" = "13" ] && [ "arm64" = "$TARGETARCH" ]; then
apt-get install -y --no-install-recommends \
libcufile-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libcudnn9-cuda-${CUDA_MAJOR_VERSION} cuda-cupti-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libnvjitlink-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION}
fi
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
fi
EOT
# https://github.com/NVIDIA/Isaac-GR00T/issues/343
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${TARGETARCH}" = "arm64" ]; then
wget https://developer.download.nvidia.com/compute/cudss/0.6.0/local_installers/cudss-local-tegra-repo-ubuntu${UBUNTU_VERSION}-0.6.0_0.6.0-1_arm64.deb && \
dpkg -i cudss-local-tegra-repo-ubuntu${UBUNTU_VERSION}-0.6.0_0.6.0-1_arm64.deb && \
cp /var/cudss-local-tegra-repo-ubuntu${UBUNTU_VERSION}-0.6.0/cudss-*-keyring.gpg /usr/share/keyrings/ && \
apt-get update && apt-get -y install cudss cudss-cuda-${CUDA_MAJOR_VERSION} && \
wget https://developer.download.nvidia.com/compute/nvpl/25.5/local_installers/nvpl-local-repo-ubuntu${UBUNTU_VERSION}-25.5_1.0-1_arm64.deb && \
dpkg -i nvpl-local-repo-ubuntu${UBUNTU_VERSION}-25.5_1.0-1_arm64.deb && \
cp /var/nvpl-local-repo-ubuntu${UBUNTU_VERSION}-25.5/nvpl-*-keyring.gpg /usr/share/keyrings/ && \
apt-get update && apt-get install -y nvpl
fi
EOT
# If we are building with clblas support, we need the libraries for the builds
RUN if [ "${BUILD_TYPE}" = "clblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
apt-get update && \
@@ -232,7 +164,7 @@ EOT
# Install CMake (the version in 22.04 is too old)
RUN <<EOT bash
if [ "${CMAKE_FROM_SOURCE}" = "true" ]; then
if [ "${CMAKE_FROM_SOURCE}}" = "true" ]; then
curl -L -s https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz -o cmake.tar.gz && tar xvf cmake.tar.gz && cd cmake-${CMAKE_VERSION} && ./configure && make && make install
else
apt-get update && \
@@ -248,30 +180,19 @@ COPY --from=grpc /opt/grpc /usr/local
COPY . /LocalAI
RUN <<'EOT' bash
set -euxo pipefail
if [[ -n "${CUDA_DOCKER_ARCH:-}" ]]; then
CUDA_ARCH_ESC="${CUDA_DOCKER_ARCH//;/\\;}"
export CMAKE_ARGS="${CMAKE_ARGS:-} -DCMAKE_CUDA_ARCHITECTURES=${CUDA_ARCH_ESC}"
echo "CMAKE_ARGS(env) = ${CMAKE_ARGS}"
rm -rf /LocalAI/backend/cpp/llama-cpp-*-build
fi
if [ "${TARGETARCH}" = "arm64" ] || [ "${BUILD_TYPE}" = "hipblas" ]; then
cd /LocalAI/backend/cpp/llama-cpp
make llama-cpp-fallback
make llama-cpp-grpc
make llama-cpp-rpc-server
else
cd /LocalAI/backend/cpp/llama-cpp
make llama-cpp-avx
make llama-cpp-avx2
make llama-cpp-avx512
make llama-cpp-fallback
make llama-cpp-grpc
make llama-cpp-rpc-server
fi
## Otherwise just run the normal build
RUN <<EOT bash
if [ "${TARGETARCH}" = "arm64" ] || [ "${BUILD_TYPE}" = "hipblas" ]; then \
cd /LocalAI/backend/cpp/llama-cpp && make llama-cpp-fallback && \
make llama-cpp-grpc && make llama-cpp-rpc-server; \
else \
cd /LocalAI/backend/cpp/llama-cpp && make llama-cpp-avx && \
make llama-cpp-avx2 && \
make llama-cpp-avx512 && \
make llama-cpp-fallback && \
make llama-cpp-grpc && \
make llama-cpp-rpc-server; \
fi
EOT

View File

@@ -1,4 +1,4 @@
ARG BASE_IMAGE=ubuntu:24.04
ARG BASE_IMAGE=ubuntu:22.04
FROM ${BASE_IMAGE} AS builder
ARG BACKEND=rerankers
@@ -12,7 +12,6 @@ ENV CUDA_MINOR_VERSION=${CUDA_MINOR_VERSION}
ENV DEBIAN_FRONTEND=noninteractive
ARG TARGETARCH
ARG TARGETVARIANT
ARG UBUNTU_VERSION=2404
RUN apt-get update && \
apt-get install -y --no-install-recommends \
@@ -22,7 +21,7 @@ RUN apt-get update && \
espeak-ng \
curl \
libssl-dev \
git wget \
git \
git-lfs \
unzip clang \
upx-ucl \
@@ -31,15 +30,8 @@ RUN apt-get update && \
python3-dev llvm \
python3-venv make cmake && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
RUN <<EOT bash
if [ "${UBUNTU_VERSION}" = "2404" ]; then
pip install --break-system-packages --user --upgrade pip
else
pip install --upgrade pip
fi
EOT
rm -rf /var/lib/apt/lists/* && \
pip install --upgrade pip
# Cuda
@@ -54,45 +46,11 @@ RUN <<EOT bash
apt-get update && \
apt-get install -y --no-install-recommends \
software-properties-common pciutils wget gpg-agent && \
apt-get install -y libglm-dev cmake libxcb-dri3-0 libxcb-present0 libpciaccess0 \
libpng-dev libxcb-keysyms1-dev libxcb-dri3-dev libx11-dev g++ gcc \
libwayland-dev libxrandr-dev libxcb-randr0-dev libxcb-ewmh-dev \
git python-is-python3 bison libx11-xcb-dev liblz4-dev libzstd-dev \
ocaml-core ninja-build pkg-config libxml2-dev wayland-protocols python3-jsonschema \
clang-format qtbase5-dev qt6-base-dev libxcb-glx0-dev sudo xz-utils
if [ "amd64" = "$TARGETARCH" ]; then
wget "https://sdk.lunarg.com/sdk/download/1.4.328.1/linux/vulkansdk-linux-x86_64-1.4.328.1.tar.xz" && \
tar -xf vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
rm vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
mkdir -p /opt/vulkan-sdk && \
mv 1.4.328.1 /opt/vulkan-sdk/ && \
cd /opt/vulkan-sdk/1.4.328.1 && \
./vulkansdk --no-deps --maxjobs \
vulkan-loader \
vulkan-validationlayers \
vulkan-extensionlayer \
vulkan-tools \
shaderc && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/bin/* /usr/bin/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/lib/* /usr/lib/x86_64-linux-gnu/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/include/* /usr/include/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/share/* /usr/share/ && \
rm -rf /opt/vulkan-sdk
fi
if [ "arm64" = "$TARGETARCH" ]; then
mkdir vulkan && cd vulkan && \
curl -L -o vulkan-sdk.tar.xz https://github.com/mudler/vulkan-sdk-arm/releases/download/1.4.335.0/vulkansdk-ubuntu-24.04-arm-1.4.335.0.tar.xz && \
tar -xvf vulkan-sdk.tar.xz && \
rm vulkan-sdk.tar.xz && \
cd 1.4.335.0 && \
cp -rfv aarch64/bin/* /usr/bin/ && \
cp -rfv aarch64/lib/* /usr/lib/aarch64-linux-gnu/ && \
cp -rfv aarch64/include/* /usr/include/ && \
cp -rfv aarch64/share/* /usr/share/ && \
cd ../.. && \
rm -rf vulkan
fi
ldconfig && \
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
apt-get update && \
apt-get install -y \
vulkan-sdk && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
fi
@@ -100,19 +58,15 @@ EOT
# CuBLAS requirements
RUN <<EOT bash
if ( [ "${BUILD_TYPE}" = "cublas" ] || [ "${BUILD_TYPE}" = "l4t" ] ) && [ "${SKIP_DRIVERS}" = "false" ]; then
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
apt-get update && \
apt-get install -y --no-install-recommends \
software-properties-common pciutils
if [ "amd64" = "$TARGETARCH" ]; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/x86_64/cuda-keyring_1.1-1_all.deb
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb
fi
if [ "arm64" = "$TARGETARCH" ]; then
if [ "${CUDA_MAJOR_VERSION}" = "13" ]; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/sbsa/cuda-keyring_1.1-1_all.deb
else
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/arm64/cuda-keyring_1.1-1_all.deb
fi
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/arm64/cuda-keyring_1.1-1_all.deb
fi
dpkg -i cuda-keyring_1.1-1_all.deb && \
rm -f cuda-keyring_1.1-1_all.deb && \
@@ -123,31 +77,12 @@ RUN <<EOT bash
libcurand-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION}
if [ "${CUDA_MAJOR_VERSION}" = "13" ] && [ "arm64" = "$TARGETARCH" ]; then
apt-get install -y --no-install-recommends \
libcufile-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libcudnn9-cuda-${CUDA_MAJOR_VERSION} cuda-cupti-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libnvjitlink-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION}
fi
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
fi
EOT
# https://github.com/NVIDIA/Isaac-GR00T/issues/343
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${TARGETARCH}" = "arm64" ]; then
wget https://developer.download.nvidia.com/compute/cudss/0.6.0/local_installers/cudss-local-tegra-repo-ubuntu${UBUNTU_VERSION}-0.6.0_0.6.0-1_arm64.deb && \
dpkg -i cudss-local-tegra-repo-ubuntu${UBUNTU_VERSION}-0.6.0_0.6.0-1_arm64.deb && \
cp /var/cudss-local-tegra-repo-ubuntu${UBUNTU_VERSION}-0.6.0/cudss-*-keyring.gpg /usr/share/keyrings/ && \
apt-get update && apt-get -y install cudss cudss-cuda-${CUDA_MAJOR_VERSION} && \
wget https://developer.download.nvidia.com/compute/nvpl/25.5/local_installers/nvpl-local-repo-ubuntu${UBUNTU_VERSION}-25.5_1.0-1_arm64.deb && \
dpkg -i nvpl-local-repo-ubuntu${UBUNTU_VERSION}-25.5_1.0-1_arm64.deb && \
cp /var/nvpl-local-repo-ubuntu${UBUNTU_VERSION}-25.5/nvpl-*-keyring.gpg /usr/share/keyrings/ && \
apt-get update && apt-get install -y nvpl
fi
EOT
# If we are building with clblas support, we need the libraries for the builds
RUN if [ "${BUILD_TYPE}" = "clblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
apt-get update && \
@@ -168,40 +103,21 @@ RUN if [ "${BUILD_TYPE}" = "hipblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
# to locate the libraries. We run ldconfig ourselves to work around this packaging deficiency
ldconfig \
; fi
RUN if [ "${BUILD_TYPE}" = "hipblas" ]; then \
ln -s /opt/rocm-**/lib/llvm/lib/libomp.so /usr/lib/libomp.so \
; fi
# Install uv as a system package
RUN curl -LsSf https://astral.sh/uv/install.sh | UV_INSTALL_DIR=/usr/bin sh
ENV PATH="/root/.cargo/bin:${PATH}"
# Increase timeout for uv installs behind slow networks
ENV UV_HTTP_TIMEOUT=180
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
# Install grpcio-tools (the version in 22.04 is too old)
RUN <<EOT bash
if [ "${UBUNTU_VERSION}" = "2404" ]; then
pip install --break-system-packages --user grpcio-tools==1.71.0 grpcio==1.71.0
else
pip install grpcio-tools==1.71.0 grpcio==1.71.0
fi
EOT
RUN pip install --user grpcio-tools==1.71.0 grpcio==1.71.0
COPY backend/python/${BACKEND} /${BACKEND}
COPY backend/backend.proto /${BACKEND}/backend.proto
COPY backend/python/common/ /${BACKEND}/common
COPY scripts/build/package-gpu-libs.sh /package-gpu-libs.sh
COPY python/${BACKEND} /${BACKEND}
COPY backend.proto /${BACKEND}/backend.proto
COPY python/common/ /${BACKEND}/common
RUN cd /${BACKEND} && PORTABLE_PYTHON=true make
# Package GPU libraries into the backend's lib directory
RUN mkdir -p /${BACKEND}/lib && \
TARGET_LIB_DIR="/${BACKEND}/lib" BUILD_TYPE="${BUILD_TYPE}" CUDA_MAJOR_VERSION="${CUDA_MAJOR_VERSION}" \
bash /package-gpu-libs.sh "/${BACKEND}/lib"
FROM scratch
ARG BACKEND=rerankers
COPY --from=builder /${BACKEND}/ /

View File

@@ -65,7 +65,7 @@ The backend system provides language-specific Dockerfiles that handle the build
## Hardware Acceleration Support
### CUDA (NVIDIA)
- **Versions**: CUDA 12.x, 13.x
- **Versions**: CUDA 11.x, 12.x
- **Features**: cuBLAS, cuDNN, TensorRT optimization
- **Targets**: x86_64, ARM64 (Jetson)
@@ -132,7 +132,8 @@ For ARM64/Mac builds, docker can't be used, and the makefile in the respective b
### Build Types
- **`cpu`**: CPU-only optimization
- **`cublas12`**, **`cublas13`**: CUDA 12.x, 13.x with cuBLAS
- **`cublas11`**: CUDA 11.x with cuBLAS
- **`cublas12`**: CUDA 12.x with cuBLAS
- **`hipblas`**: ROCm with rocBLAS
- **`intel`**: Intel oneAPI optimization
- **`vulkan`**: Vulkan-based acceleration
@@ -209,4 +210,4 @@ When contributing to the backend system:
2. **Add Tests**: Include comprehensive test coverage
3. **Document**: Provide clear usage examples
4. **Optimize**: Consider performance and resource usage
5. **Validate**: Test across different hardware targets
5. **Validate**: Test across different hardware targets

View File

@@ -282,7 +282,6 @@ message TranscriptRequest {
uint32 threads = 4;
bool translate = 5;
bool diarize = 6;
string prompt = 7;
}
message TranscriptResult {
@@ -301,6 +300,7 @@ message TranscriptSegment {
message GenerateImageRequest {
int32 height = 1;
int32 width = 2;
int32 mode = 3;
int32 step = 4;
int32 seed = 5;
string positive_prompt = 6;

View File

@@ -57,7 +57,7 @@ add_library(hw_grpc_proto
${hw_proto_srcs}
${hw_proto_hdrs} )
add_executable(${TARGET} grpc-server.cpp json.hpp httplib.h)
add_executable(${TARGET} grpc-server.cpp utils.hpp json.hpp httplib.h)
target_include_directories(${TARGET} PRIVATE ../llava)
target_include_directories(${TARGET} PRIVATE ${CMAKE_SOURCE_DIR})
@@ -70,4 +70,4 @@ target_link_libraries(${TARGET} PRIVATE common llama mtmd ${CMAKE_THREAD_LIBS_IN
target_compile_features(${TARGET} PRIVATE cxx_std_11)
if(TARGET BUILD_INFO)
add_dependencies(${TARGET} BUILD_INFO)
endif()
endif()

View File

@@ -1,5 +1,5 @@
LLAMA_VERSION?=593da7fa49503b68f9f01700be9f508f1e528992
LLAMA_VERSION?=80deff3648b93727422461c41c7279ef1dac7452
LLAMA_REPO?=https://github.com/ggerganov/llama.cpp
CMAKE_ARGS?=
@@ -7,8 +7,7 @@ BUILD_TYPE?=
NATIVE?=false
ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh
TARGET?=--target grpc-server
JOBS?=$(shell nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 1)
ARCH?=$(shell uname -m)
JOBS?=$(shell nproc)
# Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF -DLLAMA_CURL=OFF
@@ -107,21 +106,21 @@ llama-cpp-avx: llama.cpp
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build purge
$(info ${GREEN}I llama-cpp build info:avx${RESET})
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_BMI2=off" $(MAKE) VARIANT="llama-cpp-avx-build" build-llama-cpp-grpc-server
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE) VARIANT="llama-cpp-avx-build" build-llama-cpp-grpc-server
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build/grpc-server llama-cpp-avx
llama-cpp-fallback: llama.cpp
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build purge
$(info ${GREEN}I llama-cpp build info:fallback${RESET})
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_BMI2=off" $(MAKE) VARIANT="llama-cpp-fallback-build" build-llama-cpp-grpc-server
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE) VARIANT="llama-cpp-fallback-build" build-llama-cpp-grpc-server
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build/grpc-server llama-cpp-fallback
llama-cpp-grpc: llama.cpp
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build purge
$(info ${GREEN}I llama-cpp build info:grpc${RESET})
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_RPC=ON -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_BMI2=off" TARGET="--target grpc-server --target rpc-server" $(MAKE) VARIANT="llama-cpp-grpc-build" build-llama-cpp-grpc-server
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_RPC=ON -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" TARGET="--target grpc-server --target rpc-server" $(MAKE) VARIANT="llama-cpp-grpc-build" build-llama-cpp-grpc-server
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build/grpc-server llama-cpp-grpc
llama-cpp-rpc-server: llama-cpp-grpc

View File

File diff suppressed because it is too large Load Diff

View File

@@ -6,7 +6,6 @@
set -e
CURDIR=$(dirname "$(realpath $0)")
REPO_ROOT="${CURDIR}/../../.."
# Create lib directory
mkdir -p $CURDIR/package/lib
@@ -38,15 +37,6 @@ else
exit 1
fi
# Package GPU libraries based on BUILD_TYPE
# The GPU library packaging script will detect BUILD_TYPE and copy appropriate GPU libraries
GPU_LIB_SCRIPT="${REPO_ROOT}/scripts/build/package-gpu-libs.sh"
if [ -f "$GPU_LIB_SCRIPT" ]; then
echo "Packaging GPU libraries for BUILD_TYPE=${BUILD_TYPE:-cpu}..."
source "$GPU_LIB_SCRIPT" "$CURDIR/package/lib"
package_gpu_libs
fi
echo "Packaging completed successfully"
ls -liah $CURDIR/package/
ls -liah $CURDIR/package/lib/

View File

@@ -0,0 +1,13 @@
diff --git a/tools/mtmd/clip.cpp b/tools/mtmd/clip.cpp
index 3cd0d2fa..6c5e811a 100644
--- a/tools/mtmd/clip.cpp
+++ b/tools/mtmd/clip.cpp
@@ -2608,7 +2608,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
struct ggml_tensor * patches = ggml_graph_get_tensor(gf, "patches");
int* patches_data = (int*)malloc(ggml_nbytes(patches));
for (int i = 0; i < num_patches; i++) {
- patches_data[i] = i + 1;
+ patches_data[i] = i;
}
ggml_backend_tensor_set(patches, patches_data, 0, ggml_nbytes(patches));
free(patches_data);

View File

@@ -1,24 +1,18 @@
#!/bin/bash
## Patches
## Apply patches from the `patches` directory
if [ -d "patches" ]; then
for patch in $(ls patches); do
echo "Applying patch $patch"
patch -d llama.cpp/ -p1 < patches/$patch
done
fi
for patch in $(ls patches); do
echo "Applying patch $patch"
patch -d llama.cpp/ -p1 < patches/$patch
done
set -e
for file in $(ls llama.cpp/tools/server/); do
cp -rfv llama.cpp/tools/server/$file llama.cpp/tools/grpc-server/
done
cp -r CMakeLists.txt llama.cpp/tools/grpc-server/
cp -r grpc-server.cpp llama.cpp/tools/grpc-server/
cp -rfv llama.cpp/vendor/nlohmann/json.hpp llama.cpp/tools/grpc-server/
cp -rfv llama.cpp/tools/server/utils.hpp llama.cpp/tools/grpc-server/
cp -rfv llama.cpp/vendor/cpp-httplib/httplib.h llama.cpp/tools/grpc-server/
set +e
@@ -29,3 +23,30 @@ else
fi
set -e
# Now to keep maximum compatibility with the original server.cpp, we need to remove the index.html.gz.hpp and loading.html.hpp includes
# and remove the main function
# TODO: upstream this to the original server.cpp by extracting the upstream main function to a separate file
awk '
/int[ \t]+main[ \t]*\(/ { # If the line starts the main function
in_main=1; # Set a flag
open_braces=0; # Track number of open braces
}
in_main {
open_braces += gsub(/\{/, "{"); # Count opening braces
open_braces -= gsub(/\}/, "}"); # Count closing braces
if (open_braces == 0) { # If all braces are closed
in_main=0; # End skipping
}
next; # Skip lines inside main
}
!in_main # Print lines not inside main
' "llama.cpp/tools/server/server.cpp" > llama.cpp/tools/grpc-server/server.cpp
# remove index.html.gz.hpp and loading.html.hpp includes
if [[ "$OSTYPE" == "darwin"* ]]; then
# macOS
sed -i '' '/#include "index\.html\.gz\.hpp"/d; /#include "loading\.html\.hpp"/d' llama.cpp/tools/grpc-server/server.cpp
else
# Linux and others
sed -i '/#include "index\.html\.gz\.hpp"/d; /#include "loading\.html\.hpp"/d' llama.cpp/tools/grpc-server/server.cpp
fi

View File

@@ -4,11 +4,11 @@
package main
import (
"github.com/mudler/xlog"
"github.com/rs/zerolog/log"
)
func assert(cond bool, msg string) {
if !cond {
xlog.Fatal().Stack().Msg(msg)
log.Fatal().Stack().Msg(msg)
}
}

View File

@@ -7,7 +7,8 @@ import (
"os"
grpc "github.com/mudler/LocalAI/pkg/grpc"
"github.com/mudler/xlog"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
)
var (
@@ -15,7 +16,7 @@ var (
)
func main() {
xlog.SetLogger(xlog.NewLogger(xlog.LogLevel(os.Getenv("LOCALAI_LOG_LEVEL")), os.Getenv("LOCALAI_LOG_FORMAT")))
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr})
flag.Parse()

View File

@@ -12,7 +12,7 @@ import (
"github.com/mudler/LocalAI/pkg/grpc/base"
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
"github.com/mudler/xlog"
"github.com/rs/zerolog/log"
)
type Store struct {
@@ -135,7 +135,7 @@ func (s *Store) StoresSet(opts *pb.StoresSetOptions) error {
} else {
sample = k.Floats
}
xlog.Debug("Key is not normalized", "sample", sample)
log.Debug().Msgf("Key is not normalized: %v", sample)
}
kvs[i] = Pair{
@@ -238,7 +238,7 @@ func (s *Store) StoresDelete(opts *pb.StoresDeleteOptions) error {
assert(!hasKey(s.keys, k), fmt.Sprintf("Key exists, but was not found: t=%d, %v", len(tail_ks), k))
}
xlog.Debug("Delete", "found", found, "tailLen", len(tail_ks), "j", j, "mergeKeysLen", len(merge_ks), "mergeValuesLen", len(merge_vs))
log.Debug().Msgf("Delete: found = %v, t = %d, j = %d, len(merge_ks) = %d, len(merge_vs) = %d", found, len(tail_ks), j, len(merge_ks), len(merge_vs))
}
merge_ks = append(merge_ks, tail_ks...)
@@ -261,7 +261,7 @@ func (s *Store) StoresDelete(opts *pb.StoresDeleteOptions) error {
}(), "Keys to delete still present")
if len(s.keys) != l {
xlog.Debug("Delete: Some keys not found", "keysLen", len(s.keys), "expectedLen", l)
log.Debug().Msgf("Delete: Some keys not found: len(s.keys) = %d, l = %d", len(s.keys), l)
}
return nil
@@ -273,7 +273,7 @@ func (s *Store) StoresGet(opts *pb.StoresGetOptions) (pb.StoresGetResult, error)
ks := sortIntoKeySlicese(opts.Keys)
if len(s.keys) == 0 {
xlog.Debug("Get: No keys in store")
log.Debug().Msgf("Get: No keys in store")
}
if s.keyLen == -1 {
@@ -305,7 +305,7 @@ func (s *Store) StoresGet(opts *pb.StoresGetOptions) (pb.StoresGetResult, error)
}
if len(pbKeys) != len(opts.Keys) {
xlog.Debug("Get: Some keys not found", "pbKeysLen", len(pbKeys), "optsKeysLen", len(opts.Keys), "storeKeysLen", len(s.keys))
log.Debug().Msgf("Get: Some keys not found: len(pbKeys) = %d, len(opts.Keys) = %d, len(s.Keys) = %d", len(pbKeys), len(opts.Keys), len(s.keys))
}
return pb.StoresGetResult{
@@ -507,7 +507,7 @@ func (s *Store) StoresFind(opts *pb.StoresFindOptions) (pb.StoresFindResult, err
} else {
sample = tk
}
xlog.Debug("Trying to compare non-normalized key with normalized keys", "sample", sample)
log.Debug().Msgf("Trying to compare non-normalized key with normalized keys: %v", sample)
}
return s.StoresFindFallback(opts)

View File

@@ -8,7 +8,7 @@ JOBS?=$(shell nproc --ignore=1)
# stablediffusion.cpp (ggml)
STABLEDIFFUSION_GGML_REPO?=https://github.com/leejet/stable-diffusion.cpp
STABLEDIFFUSION_GGML_VERSION?=0e52afc6513cc2dea9a1a017afc4a008d5acf2b0
STABLEDIFFUSION_GGML_VERSION?=0ebe6fe118f125665939b27c89f34ed38716bff8
CMAKE_ARGS+=-DGGML_MAX_NAME=128
@@ -28,12 +28,7 @@ else ifeq ($(BUILD_TYPE),clblas)
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
else ifeq ($(BUILD_TYPE),hipblas)
ROCM_HOME ?= /opt/rocm
ROCM_PATH ?= /opt/rocm
export CXX=$(ROCM_HOME)/llvm/bin/clang++
export CC=$(ROCM_HOME)/llvm/bin/clang
AMDGPU_TARGETS?=gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102,gfx1200,gfx1201
CMAKE_ARGS+=-DSD_HIPBLAS=ON -DGGML_HIPBLAS=ON -DAMDGPU_TARGETS=$(AMDGPU_TARGETS)
CMAKE_ARGS+=-DSD_HIPBLAS=ON -DGGML_HIPBLAS=ON
else ifeq ($(BUILD_TYPE),vulkan)
CMAKE_ARGS+=-DSD_VULKAN=ON -DGGML_VULKAN=ON
else ifeq ($(OS),Darwin)

View File

@@ -1,5 +1,3 @@
#include "stable-diffusion.h"
#include <cmath>
#include <cstdint>
#define GGML_MAX_NAME 128
@@ -8,9 +6,7 @@
#include <time.h>
#include <string>
#include <vector>
#include <map>
#include <filesystem>
#include <algorithm>
#include "gosd.h"
#define STB_IMAGE_IMPLEMENTATION
@@ -24,13 +20,11 @@
#define STB_IMAGE_RESIZE_IMPLEMENTATION
#define STB_IMAGE_RESIZE_STATIC
#include "stb_image_resize.h"
#include <stdlib.h>
#include <regex>
// Names of the sampler method, same order as enum sample_method in stable-diffusion.h
const char* sample_method_str[] = {
"default",
"euler",
"euler_a",
"heun",
"dpm2",
"dpm++2s_a",
@@ -41,384 +35,29 @@ const char* sample_method_str[] = {
"lcm",
"ddim_trailing",
"tcd",
"euler_a",
};
static_assert(std::size(sample_method_str) == SAMPLE_METHOD_COUNT, "sample method mismatch");
// Names of the sigma schedule overrides, same order as sample_schedule in stable-diffusion.h
const char* schedulers[] = {
"default",
"discrete",
"karras",
"exponential",
"ays",
"gits",
"sgm_uniform",
"simple",
"smoothstep",
"kl_optimal",
"lcm",
};
static_assert(std::size(schedulers) == SCHEDULER_COUNT, "schedulers mismatch");
static_assert(std::size(schedulers) == SCHEDULE_COUNT, "schedulers mismatch");
// New enum string arrays
const char* rng_type_str[] = {
"std_default",
"cuda",
"cpu",
};
static_assert(std::size(rng_type_str) == RNG_TYPE_COUNT, "rng type mismatch");
const char* prediction_str[] = {
"epsilon",
"v",
"edm_v",
"flow",
"flux_flow",
"flux2_flow",
};
static_assert(std::size(prediction_str) == PREDICTION_COUNT, "prediction mismatch");
const char* lora_apply_mode_str[] = {
"auto",
"immediately",
"at_runtime",
};
static_assert(std::size(lora_apply_mode_str) == LORA_APPLY_MODE_COUNT, "lora apply mode mismatch");
constexpr const char* sd_type_str[] = {
"f32", // 0
"f16", // 1
"q4_0", // 2
"q4_1", // 3
nullptr, // 4
nullptr, // 5
"q5_0", // 6
"q5_1", // 7
"q8_0", // 8
"q8_1", // 9
"q2_k", // 10
"q3_k", // 11
"q4_k", // 12
"q5_k", // 13
"q6_k", // 14
"q8_k", // 15
"iq2_xxs", // 16
"iq2_xs", // 17
"iq3_xxs", // 18
"iq1_s", // 19
"iq4_nl", // 20
"iq3_s", // 21
"iq2_s", // 22
"iq4_xs", // 23
"i8", // 24
"i16", // 25
"i32", // 26
"i64", // 27
"f64", // 28
"iq1_m", // 29
"bf16", // 30
nullptr, nullptr, nullptr, nullptr, // 31-34
"tq1_0", // 35
"tq2_0", // 36
nullptr, nullptr, // 37-38
"mxfp4" // 39
};
static_assert(std::size(sd_type_str) == SD_TYPE_COUNT, "sd type mismatch");
sd_ctx_params_t ctx_params;
sd_ctx_t* sd_c;
// Moved from the context (load time) to generation time params
scheduler_t scheduler = SCHEDULER_COUNT;
sample_method_t sample_method = SAMPLE_METHOD_COUNT;
scheduler_t scheduler = scheduler_t::DEFAULT;
// Storage for embeddings (needs to persist for the lifetime of ctx_params)
static std::vector<sd_embedding_t> embedding_vec;
// Storage for embedding strings (needs to persist as long as embedding_vec references them)
static std::vector<std::string> embedding_strings;
// Storage for LoRAs (needs to persist for the lifetime of generation params)
static std::vector<sd_lora_t> lora_vec;
// Storage for LoRA strings (needs to persist as long as lora_vec references them)
static std::vector<std::string> lora_strings;
// Storage for lora_dir path
static std::string lora_dir_path;
// Build embeddings vector from directory, similar to upstream CLI
static void build_embedding_vec(const char* embedding_dir) {
embedding_vec.clear();
embedding_strings.clear();
if (!embedding_dir || strlen(embedding_dir) == 0) {
return;
}
if (!std::filesystem::exists(embedding_dir) || !std::filesystem::is_directory(embedding_dir)) {
fprintf(stderr, "Embedding directory does not exist or is not a directory: %s\n", embedding_dir);
return;
}
static const std::vector<std::string> valid_ext = {".pt", ".safetensors", ".gguf"};
for (const auto& entry : std::filesystem::directory_iterator(embedding_dir)) {
if (!entry.is_regular_file()) {
continue;
}
auto path = entry.path();
std::string ext = path.extension().string();
bool valid = false;
for (const auto& e : valid_ext) {
if (ext == e) {
valid = true;
break;
}
}
if (!valid) {
continue;
}
std::string name = path.stem().string();
std::string full_path = path.string();
// Store strings in persistent storage
embedding_strings.push_back(name);
embedding_strings.push_back(full_path);
sd_embedding_t item;
item.name = embedding_strings[embedding_strings.size() - 2].c_str();
item.path = embedding_strings[embedding_strings.size() - 1].c_str();
embedding_vec.push_back(item);
fprintf(stderr, "Found embedding: %s -> %s\n", item.name, item.path);
}
fprintf(stderr, "Loaded %zu embeddings from %s\n", embedding_vec.size(), embedding_dir);
}
// Discover LoRA files in directory and build a map of name -> path
static std::map<std::string, std::string> discover_lora_files(const char* lora_dir) {
std::map<std::string, std::string> lora_map;
if (!lora_dir || strlen(lora_dir) == 0) {
fprintf(stderr, "LoRA directory not specified\n");
return lora_map;
}
if (!std::filesystem::exists(lora_dir) || !std::filesystem::is_directory(lora_dir)) {
fprintf(stderr, "LoRA directory does not exist or is not a directory: %s\n", lora_dir);
return lora_map;
}
static const std::vector<std::string> valid_ext = {".safetensors", ".ckpt", ".pt", ".gguf"};
fprintf(stderr, "Discovering LoRA files in: %s\n", lora_dir);
for (const auto& entry : std::filesystem::directory_iterator(lora_dir)) {
if (!entry.is_regular_file()) {
continue;
}
auto path = entry.path();
std::string ext = path.extension().string();
bool valid = false;
for (const auto& e : valid_ext) {
if (ext == e) {
valid = true;
break;
}
}
if (!valid) {
continue;
}
std::string name = path.stem().string(); // stem() already removes extension
std::string full_path = path.string();
// Store the name (without extension) -> full path mapping
// This allows users to specify just the name in <lora:name:strength>
lora_map[name] = full_path;
fprintf(stderr, "Found LoRA file: %s -> %s\n", name.c_str(), full_path.c_str());
}
fprintf(stderr, "Discovered %zu LoRA files in %s\n", lora_map.size(), lora_dir);
return lora_map;
}
// Helper function to check if a path is absolute (matches upstream)
static bool is_absolute_path(const std::string& p) {
#ifdef _WIN32
// Windows: C:/path or C:\path
return p.size() > 1 && std::isalpha(static_cast<unsigned char>(p[0])) && p[1] == ':';
#else
// Unix: /path
return !p.empty() && p[0] == '/';
#endif
}
// Parse LoRAs from prompt string (e.g., "<lora:name:1.0>" or "<lora:name>")
// Returns a vector of LoRA info and the cleaned prompt with LoRA tags removed
// Matches upstream implementation more closely
static std::pair<std::vector<sd_lora_t>, std::string> parse_loras_from_prompt(const std::string& prompt, const char* lora_dir) {
std::vector<sd_lora_t> loras;
std::string cleaned_prompt = prompt;
if (!lora_dir || strlen(lora_dir) == 0) {
fprintf(stderr, "LoRA directory not set, cannot parse LoRAs from prompt\n");
return {loras, cleaned_prompt};
}
// Discover LoRA files for name-based lookup
std::map<std::string, std::string> discovered_lora_map = discover_lora_files(lora_dir);
// Map to accumulate multipliers for the same LoRA (matches upstream)
std::map<std::string, float> lora_map;
std::map<std::string, float> high_noise_lora_map;
static const std::regex re(R"(<lora:([^:>]+):([^>]+)>)");
static const std::vector<std::string> valid_ext = {".pt", ".safetensors", ".gguf"};
std::smatch m;
std::string tmp = prompt;
fprintf(stderr, "Parsing LoRAs from prompt: %s\n", prompt.c_str());
while (std::regex_search(tmp, m, re)) {
std::string raw_path = m[1].str();
const std::string raw_mul = m[2].str();
float mul = 0.f;
try {
mul = std::stof(raw_mul);
} catch (...) {
tmp = m.suffix().str();
cleaned_prompt = std::regex_replace(cleaned_prompt, re, "", std::regex_constants::format_first_only);
fprintf(stderr, "Invalid LoRA multiplier '%s', skipping\n", raw_mul.c_str());
continue;
}
bool is_high_noise = false;
static const std::string prefix = "|high_noise|";
if (raw_path.rfind(prefix, 0) == 0) {
raw_path.erase(0, prefix.size());
is_high_noise = true;
}
std::filesystem::path final_path;
if (is_absolute_path(raw_path)) {
final_path = raw_path;
} else {
// Try name-based lookup first
auto it = discovered_lora_map.find(raw_path);
if (it != discovered_lora_map.end()) {
final_path = it->second;
} else {
// Try case-insensitive lookup
bool found = false;
for (const auto& pair : discovered_lora_map) {
std::string lower_name = raw_path;
std::string lower_key = pair.first;
std::transform(lower_name.begin(), lower_name.end(), lower_name.begin(), ::tolower);
std::transform(lower_key.begin(), lower_key.end(), lower_key.begin(), ::tolower);
if (lower_name == lower_key) {
final_path = pair.second;
found = true;
break;
}
}
if (!found) {
// Try as relative path in lora_dir
final_path = std::filesystem::path(lora_dir) / raw_path;
}
}
}
// Try adding extensions if file doesn't exist
if (!std::filesystem::exists(final_path)) {
bool found = false;
for (const auto& ext : valid_ext) {
std::filesystem::path try_path = final_path;
try_path += ext;
if (std::filesystem::exists(try_path)) {
final_path = try_path;
found = true;
break;
}
}
if (!found) {
fprintf(stderr, "WARNING: LoRA file not found: %s\n", final_path.lexically_normal().string().c_str());
tmp = m.suffix().str();
cleaned_prompt = std::regex_replace(cleaned_prompt, re, "", std::regex_constants::format_first_only);
continue;
}
}
// Normalize path (matches upstream)
const std::string key = final_path.lexically_normal().string();
// Accumulate multiplier if same LoRA appears multiple times (matches upstream)
if (is_high_noise) {
high_noise_lora_map[key] += mul;
} else {
lora_map[key] += mul;
}
fprintf(stderr, "Parsed LoRA: path='%s', multiplier=%.2f, is_high_noise=%s\n",
key.c_str(), mul, is_high_noise ? "true" : "false");
cleaned_prompt = std::regex_replace(cleaned_prompt, re, "", std::regex_constants::format_first_only);
tmp = m.suffix().str();
}
// Build final LoRA vector from accumulated maps (matches upstream)
// Store all path strings first to ensure they persist
for (const auto& kv : lora_map) {
lora_strings.push_back(kv.first);
}
for (const auto& kv : high_noise_lora_map) {
lora_strings.push_back(kv.first);
}
// Now build the LoRA vector with pointers to the stored strings
size_t string_idx = 0;
for (const auto& kv : lora_map) {
sd_lora_t item;
item.is_high_noise = false;
item.path = lora_strings[string_idx].c_str();
item.multiplier = kv.second;
loras.push_back(item);
string_idx++;
}
for (const auto& kv : high_noise_lora_map) {
sd_lora_t item;
item.is_high_noise = true;
item.path = lora_strings[string_idx].c_str();
item.multiplier = kv.second;
loras.push_back(item);
string_idx++;
}
// Clean up extra spaces
std::regex space_regex(R"(\s+)");
cleaned_prompt = std::regex_replace(cleaned_prompt, space_regex, " ");
// Trim leading/trailing spaces
size_t first = cleaned_prompt.find_first_not_of(" \t");
if (first != std::string::npos) {
cleaned_prompt.erase(0, first);
}
size_t last = cleaned_prompt.find_last_not_of(" \t");
if (last != std::string::npos) {
cleaned_prompt.erase(last + 1);
}
fprintf(stderr, "Parsed %zu LoRA(s) from prompt. Cleaned prompt: %s\n", loras.size(), cleaned_prompt.c_str());
return {loras, cleaned_prompt};
}
sample_method_t sample_method;
// Copied from the upstream CLI
static void sd_log_cb(enum sd_log_level_t level, const char* log, void* data) {
@@ -459,7 +98,7 @@ int load_model(const char *model, char *model_path, char* options[], int threads
const char *stableDiffusionModel = "";
if (diff == 1 ) {
stableDiffusionModel = strdup(model);
stableDiffusionModel = model;
model = "";
}
@@ -470,38 +109,8 @@ int load_model(const char *model, char *model_path, char* options[], int threads
const char *vae_path = "";
const char *scheduler_str = "";
const char *sampler = "";
const char *clip_vision_path = "";
const char *llm_path = "";
const char *llm_vision_path = "";
const char *diffusion_model_path = stableDiffusionModel;
const char *high_noise_diffusion_model_path = "";
const char *taesd_path = "";
const char *control_net_path = "";
const char *embedding_dir = "";
const char *photo_maker_path = "";
const char *tensor_type_rules = "";
char *lora_dir = model_path;
bool vae_decode_only = true;
int n_threads = threads;
enum sd_type_t wtype = SD_TYPE_COUNT;
enum rng_type_t rng_type = CUDA_RNG;
enum rng_type_t sampler_rng_type = RNG_TYPE_COUNT;
enum prediction_t prediction = PREDICTION_COUNT;
enum lora_apply_mode_t lora_apply_mode = LORA_APPLY_AUTO;
bool offload_params_to_cpu = false;
bool keep_clip_on_cpu = false;
bool keep_control_net_on_cpu = false;
bool keep_vae_on_cpu = false;
bool diffusion_flash_attn = false;
bool tae_preview_only = false;
bool diffusion_conv_direct = false;
bool vae_conv_direct = false;
bool force_sdxl_vae_conv_scale = false;
bool chroma_use_dit_mask = true;
bool chroma_use_t5_mask = false;
int chroma_t5_mask_pad = 1;
float flow_shift = INFINITY;
bool lora_dir_allocated = false;
fprintf(stderr, "parsing options: %p\n", options);
@@ -514,16 +123,16 @@ int load_model(const char *model, char *model_path, char* options[], int threads
}
if (!strcmp(optname, "clip_l_path")) {
clip_l_path = strdup(optval);
clip_l_path = optval;
}
if (!strcmp(optname, "clip_g_path")) {
clip_g_path = strdup(optval);
clip_g_path = optval;
}
if (!strcmp(optname, "t5xxl_path")) {
t5xxl_path = strdup(optval);
t5xxl_path = optval;
}
if (!strcmp(optname, "vae_path")) {
vae_path = strdup(optval);
vae_path = optval;
}
if (!strcmp(optname, "scheduler")) {
scheduler_str = optval;
@@ -538,201 +147,18 @@ int load_model(const char *model, char *model_path, char* options[], int threads
std::filesystem::path lora_path(optval);
std::filesystem::path full_lora_path = model_path_str / lora_path;
lora_dir = strdup(full_lora_path.string().c_str());
lora_dir_path = full_lora_path.string();
fprintf(stderr, "LoRA dir resolved to: %s\n", lora_dir);
lora_dir_allocated = true;
fprintf(stderr, "Lora dir resolved to: %s\n", lora_dir);
} else {
lora_dir = strdup(optval);
lora_dir_path = std::string(optval);
lora_dir_allocated = true;
fprintf(stderr, "No model path provided, using lora dir as-is: %s\n", lora_dir);
}
// Discover LoRAs immediately when directory is set
if (lora_dir && strlen(lora_dir) > 0) {
discover_lora_files(lora_dir);
}
}
// New parsing
if (!strcmp(optname, "clip_vision_path")) clip_vision_path = strdup(optval);
if (!strcmp(optname, "llm_path")) llm_path = strdup(optval);
if (!strcmp(optname, "llm_vision_path")) llm_vision_path = strdup(optval);
if (!strcmp(optname, "diffusion_model_path")) diffusion_model_path = strdup(optval);
if (!strcmp(optname, "high_noise_diffusion_model_path")) high_noise_diffusion_model_path = strdup(optval);
if (!strcmp(optname, "taesd_path")) taesd_path = strdup(optval);
if (!strcmp(optname, "control_net_path")) control_net_path = strdup(optval);
if (!strcmp(optname, "embedding_dir")) {
// Path join with model dir
if (model_path && strlen(model_path) > 0) {
std::filesystem::path model_path_str(model_path);
std::filesystem::path embedding_path(optval);
std::filesystem::path full_embedding_path = model_path_str / embedding_path;
embedding_dir = strdup(full_embedding_path.string().c_str());
fprintf(stderr, "Embedding dir resolved to: %s\n", embedding_dir);
} else {
embedding_dir = strdup(optval);
fprintf(stderr, "No model path provided, using embedding dir as-is: %s\n", embedding_dir);
}
}
if (!strcmp(optname, "photo_maker_path")) photo_maker_path = strdup(optval);
if (!strcmp(optname, "tensor_type_rules")) tensor_type_rules = strdup(optval);
if (!strcmp(optname, "vae_decode_only")) vae_decode_only = (strcmp(optval, "true") == 0 || strcmp(optval, "1") == 0);
if (!strcmp(optname, "offload_params_to_cpu")) offload_params_to_cpu = (strcmp(optval, "true") == 0 || strcmp(optval, "1") == 0);
if (!strcmp(optname, "keep_clip_on_cpu")) keep_clip_on_cpu = (strcmp(optval, "true") == 0 || strcmp(optval, "1") == 0);
if (!strcmp(optname, "keep_control_net_on_cpu")) keep_control_net_on_cpu = (strcmp(optval, "true") == 0 || strcmp(optval, "1") == 0);
if (!strcmp(optname, "keep_vae_on_cpu")) keep_vae_on_cpu = (strcmp(optval, "true") == 0 || strcmp(optval, "1") == 0);
if (!strcmp(optname, "diffusion_flash_attn")) diffusion_flash_attn = (strcmp(optval, "true") == 0 || strcmp(optval, "1") == 0);
if (!strcmp(optname, "tae_preview_only")) tae_preview_only = (strcmp(optval, "true") == 0 || strcmp(optval, "1") == 0);
if (!strcmp(optname, "diffusion_conv_direct")) diffusion_conv_direct = (strcmp(optval, "true") == 0 || strcmp(optval, "1") == 0);
if (!strcmp(optname, "vae_conv_direct")) vae_conv_direct = (strcmp(optval, "true") == 0 || strcmp(optval, "1") == 0);
if (!strcmp(optname, "force_sdxl_vae_conv_scale")) force_sdxl_vae_conv_scale = (strcmp(optval, "true") == 0 || strcmp(optval, "1") == 0);
if (!strcmp(optname, "chroma_use_dit_mask")) chroma_use_dit_mask = (strcmp(optval, "true") == 0 || strcmp(optval, "1") == 0);
if (!strcmp(optname, "chroma_use_t5_mask")) chroma_use_t5_mask = (strcmp(optval, "true") == 0 || strcmp(optval, "1") == 0);
if (!strcmp(optname, "n_threads")) n_threads = atoi(optval);
if (!strcmp(optname, "chroma_t5_mask_pad")) chroma_t5_mask_pad = atoi(optval);
if (!strcmp(optname, "flow_shift")) flow_shift = atof(optval);
if (!strcmp(optname, "rng_type")) {
int found = -1;
for (int m = 0; m < RNG_TYPE_COUNT; m++) {
if (!strcmp(optval, rng_type_str[m])) {
found = m;
break;
}
}
if (found != -1) {
rng_type = (rng_type_t)found;
fprintf(stderr, "Found rng_type: %s\n", optval);
} else {
fprintf(stderr, "Invalid rng_type: %s, using default\n", optval);
}
}
if (!strcmp(optname, "sampler_rng_type")) {
int found = -1;
for (int m = 0; m < RNG_TYPE_COUNT; m++) {
if (!strcmp(optval, rng_type_str[m])) {
found = m;
break;
}
}
if (found != -1) {
sampler_rng_type = (rng_type_t)found;
fprintf(stderr, "Found sampler_rng_type: %s\n", optval);
} else {
fprintf(stderr, "Invalid sampler_rng_type: %s, using default\n", optval);
}
}
if (!strcmp(optname, "prediction")) {
int found = -1;
for (int m = 0; m < PREDICTION_COUNT; m++) {
if (!strcmp(optval, prediction_str[m])) {
found = m;
break;
}
}
if (found != -1) {
prediction = (prediction_t)found;
fprintf(stderr, "Found prediction: %s\n", optval);
} else {
fprintf(stderr, "Invalid prediction: %s, using default\n", optval);
}
}
if (!strcmp(optname, "lora_apply_mode")) {
int found = -1;
for (int m = 0; m < LORA_APPLY_MODE_COUNT; m++) {
if (!strcmp(optval, lora_apply_mode_str[m])) {
found = m;
break;
}
}
if (found != -1) {
lora_apply_mode = (lora_apply_mode_t)found;
fprintf(stderr, "Found lora_apply_mode: %s\n", optval);
} else {
fprintf(stderr, "Invalid lora_apply_mode: %s, using default\n", optval);
}
}
if (!strcmp(optname, "wtype")) {
int found = -1;
for (int m = 0; m < SD_TYPE_COUNT; m++) {
if (sd_type_str[m] && !strcmp(optval, sd_type_str[m])) {
found = m;
break;
}
}
if (found != -1) {
wtype = (sd_type_t)found;
fprintf(stderr, "Found wtype: %s\n", optval);
} else {
fprintf(stderr, "Invalid wtype: %s, using default\n", optval);
}
}
}
fprintf(stderr, "parsed options\n");
// Build embeddings vector from directory if provided
build_embedding_vec(embedding_dir);
fprintf (stderr, "Creating context\n");
sd_ctx_params_init(&ctx_params);
ctx_params.model_path = model;
ctx_params.clip_l_path = clip_l_path;
ctx_params.clip_g_path = clip_g_path;
ctx_params.clip_vision_path = clip_vision_path;
ctx_params.t5xxl_path = t5xxl_path;
ctx_params.llm_path = llm_path;
ctx_params.llm_vision_path = llm_vision_path;
ctx_params.diffusion_model_path = diffusion_model_path;
ctx_params.high_noise_diffusion_model_path = high_noise_diffusion_model_path;
ctx_params.vae_path = vae_path;
ctx_params.taesd_path = taesd_path;
ctx_params.control_net_path = control_net_path;
if (lora_dir && strlen(lora_dir) > 0) {
lora_dir_path = std::string(lora_dir);
fprintf(stderr, "LoRA model directory set to: %s\n", lora_dir);
// Discover LoRAs at load time for logging
discover_lora_files(lora_dir);
} else {
fprintf(stderr, "WARNING: LoRA model directory not set. LoRAs in prompts will not be loaded.\n");
}
// Set embeddings array and count
ctx_params.embeddings = embedding_vec.empty() ? NULL : embedding_vec.data();
ctx_params.embedding_count = static_cast<uint32_t>(embedding_vec.size());
ctx_params.photo_maker_path = photo_maker_path;
ctx_params.tensor_type_rules = tensor_type_rules;
ctx_params.vae_decode_only = vae_decode_only;
// XXX: Setting to true causes a segfault on the second run
ctx_params.free_params_immediately = false;
ctx_params.n_threads = n_threads;
ctx_params.rng_type = rng_type;
ctx_params.keep_clip_on_cpu = keep_clip_on_cpu;
if (wtype != SD_TYPE_COUNT) ctx_params.wtype = wtype;
if (sampler_rng_type != RNG_TYPE_COUNT) ctx_params.sampler_rng_type = sampler_rng_type;
if (prediction != PREDICTION_COUNT) ctx_params.prediction = prediction;
if (lora_apply_mode != LORA_APPLY_MODE_COUNT) ctx_params.lora_apply_mode = lora_apply_mode;
ctx_params.offload_params_to_cpu = offload_params_to_cpu;
ctx_params.keep_control_net_on_cpu = keep_control_net_on_cpu;
ctx_params.keep_vae_on_cpu = keep_vae_on_cpu;
ctx_params.diffusion_flash_attn = diffusion_flash_attn;
ctx_params.tae_preview_only = tae_preview_only;
ctx_params.diffusion_conv_direct = diffusion_conv_direct;
ctx_params.vae_conv_direct = vae_conv_direct;
ctx_params.force_sdxl_vae_conv_scale = force_sdxl_vae_conv_scale;
ctx_params.chroma_use_dit_mask = chroma_use_dit_mask;
ctx_params.chroma_use_t5_mask = chroma_use_t5_mask;
ctx_params.chroma_t5_mask_pad = chroma_t5_mask_pad;
ctx_params.flow_shift = flow_shift;
sd_ctx_t* sd_ctx = new_sd_ctx(&ctx_params);
if (sd_ctx == NULL) {
fprintf (stderr, "failed loading model (generic error)\n");
// TODO: Clean up allocated memory
return 1;
}
fprintf (stderr, "Created context: OK\n");
int sample_method_found = -1;
for (int m = 0; m < SAMPLE_METHOD_COUNT; m++) {
if (!strcmp(sampler, sample_method_str[m])) {
@@ -741,24 +167,54 @@ int load_model(const char *model, char *model_path, char* options[], int threads
}
}
if (sample_method_found == -1) {
sample_method_found = sd_get_default_sample_method(sd_ctx);
fprintf(stderr, "Invalid sample method, using default: %s\n", sample_method_str[sample_method_found]);
fprintf(stderr, "Invalid sample method, default to EULER_A!\n");
sample_method_found = sample_method_t::SAMPLE_METHOD_DEFAULT;
}
sample_method = (sample_method_t)sample_method_found;
for (int d = 0; d < SCHEDULER_COUNT; d++) {
for (int d = 0; d < SCHEDULE_COUNT; d++) {
if (!strcmp(scheduler_str, schedulers[d])) {
scheduler = (scheduler_t)d;
fprintf (stderr, "Found scheduler: %s\n", scheduler_str);
}
}
if (scheduler == SCHEDULER_COUNT) {
scheduler = sd_get_default_scheduler(sd_ctx, sample_method);
fprintf(stderr, "Invalid scheduler, using default: %s\n", schedulers[scheduler]);
fprintf (stderr, "Creating context\n");
sd_ctx_params_t ctx_params;
sd_ctx_params_init(&ctx_params);
ctx_params.model_path = model;
ctx_params.clip_l_path = clip_l_path;
ctx_params.clip_g_path = clip_g_path;
ctx_params.t5xxl_path = t5xxl_path;
ctx_params.diffusion_model_path = stableDiffusionModel;
ctx_params.vae_path = vae_path;
ctx_params.taesd_path = "";
ctx_params.control_net_path = "";
ctx_params.lora_model_dir = lora_dir;
ctx_params.embedding_dir = "";
ctx_params.vae_decode_only = false;
ctx_params.free_params_immediately = false;
ctx_params.n_threads = threads;
ctx_params.rng_type = STD_DEFAULT_RNG;
sd_ctx_t* sd_ctx = new_sd_ctx(&ctx_params);
if (sd_ctx == NULL) {
fprintf (stderr, "failed loading model (generic error)\n");
// Clean up allocated memory
if (lora_dir_allocated && lora_dir) {
free(lora_dir);
}
return 1;
}
fprintf (stderr, "Created context: OK\n");
sd_c = sd_ctx;
// Clean up allocated memory
if (lora_dir_allocated && lora_dir) {
free(lora_dir);
}
return 0;
}
@@ -787,66 +243,12 @@ sd_tiling_params_t* sd_img_gen_params_get_vae_tiling_params(sd_img_gen_params_t
sd_img_gen_params_t* sd_img_gen_params_new(void) {
sd_img_gen_params_t *params = (sd_img_gen_params_t *)std::malloc(sizeof(sd_img_gen_params_t));
sd_img_gen_params_init(params);
sd_sample_params_init(&params->sample_params);
sd_cache_params_init(&params->cache);
params->control_strength = 0.9f;
return params;
}
// Storage for cleaned prompt strings (needs to persist)
static std::string cleaned_prompt_storage;
static std::string cleaned_negative_prompt_storage;
void sd_img_gen_params_set_prompts(sd_img_gen_params_t *params, const char *prompt, const char *negative_prompt) {
// Clear previous LoRA data
lora_vec.clear();
lora_strings.clear();
// Parse LoRAs from prompt
std::string prompt_str = prompt ? prompt : "";
std::string negative_prompt_str = negative_prompt ? negative_prompt : "";
// Get lora_dir from ctx_params if available, otherwise use stored path
const char* lora_dir_to_use = lora_dir_path.empty() ? nullptr : lora_dir_path.c_str();
auto [loras, cleaned_prompt] = parse_loras_from_prompt(prompt_str, lora_dir_to_use);
lora_vec = loras;
cleaned_prompt_storage = cleaned_prompt;
// Also check negative prompt for LoRAs (though this is less common)
auto [neg_loras, cleaned_negative] = parse_loras_from_prompt(negative_prompt_str, lora_dir_to_use);
// Merge negative prompt LoRAs (though typically not used)
if (!neg_loras.empty()) {
fprintf(stderr, "Note: Found %zu LoRAs in negative prompt (may not be supported)\n", neg_loras.size());
}
cleaned_negative_prompt_storage = cleaned_negative;
// Set the cleaned prompts
params->prompt = cleaned_prompt_storage.c_str();
params->negative_prompt = cleaned_negative_prompt_storage.c_str();
// Set LoRAs in params
params->loras = lora_vec.empty() ? nullptr : lora_vec.data();
params->lora_count = static_cast<uint32_t>(lora_vec.size());
fprintf(stderr, "Set prompts with %zu LoRAs. Original prompt: %s\n", lora_vec.size(), prompt ? prompt : "(null)");
fprintf(stderr, "Cleaned prompt: %s\n", cleaned_prompt_storage.c_str());
// Debug: Verify LoRAs are set correctly
if (params->loras && params->lora_count > 0) {
fprintf(stderr, "DEBUG: LoRAs set in params structure:\n");
for (uint32_t i = 0; i < params->lora_count; i++) {
fprintf(stderr, " params->loras[%u]: path='%s' (ptr=%p), multiplier=%.2f, is_high_noise=%s\n",
i,
params->loras[i].path ? params->loras[i].path : "(null)",
(void*)params->loras[i].path,
params->loras[i].multiplier,
params->loras[i].is_high_noise ? "true" : "false");
}
} else {
fprintf(stderr, "DEBUG: No LoRAs set in params structure (loras=%p, lora_count=%u)\n",
(void*)params->loras, params->lora_count);
}
params->prompt = prompt;
params->negative_prompt = negative_prompt;
}
void sd_img_gen_params_set_dimensions(sd_img_gen_params_t *params, int width, int height) {
@@ -858,7 +260,7 @@ void sd_img_gen_params_set_seed(sd_img_gen_params_t *params, int64_t seed) {
params->seed = seed;
}
int gen_image(sd_img_gen_params_t *p, int steps, char *dst, float cfg_scale, char *src_image, float strength, char *mask_image, char* ref_images[], int ref_images_count) {
int gen_image(sd_img_gen_params_t *p, int steps, char *dst, float cfg_scale, char *src_image, float strength, char *mask_image, char **ref_images, int ref_images_count) {
sd_image_t* results;
@@ -1038,24 +440,6 @@ int gen_image(sd_img_gen_params_t *p, int steps, char *dst, float cfg_scale, cha
}
}
// Log LoRA information
if (p->loras && p->lora_count > 0) {
fprintf(stderr, "Using %u LoRA(s) in generation:\n", p->lora_count);
for (uint32_t i = 0; i < p->lora_count; i++) {
fprintf(stderr, " LoRA[%u]: path='%s', multiplier=%.2f, is_high_noise=%s\n",
i,
p->loras[i].path ? p->loras[i].path : "(null)",
p->loras[i].multiplier,
p->loras[i].is_high_noise ? "true" : "false");
}
} else {
fprintf(stderr, "No LoRAs specified for this generation\n");
}
fprintf(stderr, "Generating image with params: \nctx\n---\n%s\ngen\n---\n%s\n",
sd_ctx_params_to_str(&ctx_params),
sd_img_gen_params_to_str(p));
results = generate_image(sd_c, p);
std::free(p);
@@ -1088,12 +472,9 @@ int gen_image(sd_img_gen_params_t *p, int steps, char *dst, float cfg_scale, cha
fprintf (stderr, "Channel: %d\n", results[0].channel);
fprintf (stderr, "Data: %p\n", results[0].data);
int ret = stbi_write_png(dst, results[0].width, results[0].height, results[0].channel,
results[0].data, 0, NULL);
if (ret)
fprintf (stderr, "Saved resulting image to '%s'\n", dst);
else
fprintf(stderr, "Failed to write image to '%s'\n", dst);
stbi_write_png(dst, results[0].width, results[0].height, results[0].channel,
results[0].data, 0, NULL);
fprintf (stderr, "Saved resulting image to '%s'\n", dst);
// Clean up
free(results[0].data);
@@ -1104,14 +485,12 @@ int gen_image(sd_img_gen_params_t *p, int steps, char *dst, float cfg_scale, cha
for (auto buffer : ref_image_buffers) {
if (buffer) free(buffer);
}
fprintf (stderr, "gen_image is done: %s\n", dst);
fflush(stderr);
fprintf (stderr, "gen_image is done: %s", dst);
return !ret;
return 0;
}
int unload() {
free_sd_ctx(sd_c);
return 0;
}

View File

@@ -22,7 +22,7 @@ type SDGGML struct {
var (
LoadModel func(model, model_apth string, options []uintptr, threads int32, diff int) int
GenImage func(params uintptr, steps int, dst string, cfgScale float32, srcImage string, strength float32, maskImage string, refImages []uintptr, refImagesCount int) int
GenImage func(params uintptr, steps int, dst string, cfgScale float32, srcImage string, strength float32, maskImage string, refImages []string, refImagesCount int) int
TilingParamsSetEnabled func(params uintptr, enabled bool)
TilingParamsSetTileSizes func(params uintptr, tileSizeX int, tileSizeY int)
@@ -95,12 +95,12 @@ func (sd *SDGGML) Load(opts *pb.ModelOptions) error {
sd.cfgScale = opts.CFGScale
ret := LoadModel(modelFile, modelPathC, options, opts.Threads, diffusionModel)
runtime.KeepAlive(keepAlive)
fmt.Fprintf(os.Stderr, "LoadModel: %d\n", ret)
if ret != 0 {
return fmt.Errorf("could not load model")
}
runtime.KeepAlive(keepAlive)
return nil
}
@@ -123,15 +123,10 @@ func (sd *SDGGML) GenerateImage(opts *pb.GenerateImageRequest) error {
}
}
// At the time of writing Purego doesn't recurse into slices and convert Go strings to pointers so we need to do that
var keepAlive []any
refImagesCount := len(opts.RefImages)
refImages := make([]uintptr, refImagesCount, refImagesCount+1)
for i, ri := range opts.RefImages {
bytep := CString(ri)
refImages[i] = uintptr(unsafe.Pointer(bytep))
keepAlive = append(keepAlive, bytep)
}
refImages := make([]string, refImagesCount, refImagesCount+1)
copy(refImages, opts.RefImages)
*(*uintptr)(unsafe.Add(unsafe.Pointer(&refImages), refImagesCount)) = 0
// Default strength for img2img (0.75 is a good default)
strength := float32(0.75)
@@ -145,8 +140,6 @@ func (sd *SDGGML) GenerateImage(opts *pb.GenerateImageRequest) error {
TilingParamsSetEnabled(vaep, false)
ret := GenImage(p, int(opts.Step), dst, sd.cfgScale, srcImage, strength, maskImage, refImages, refImagesCount)
runtime.KeepAlive(keepAlive)
fmt.Fprintf(os.Stderr, "GenImage: %d\n", ret)
if ret != 0 {
return fmt.Errorf("inference failed")
}

View File

@@ -17,7 +17,7 @@ void sd_img_gen_params_set_dimensions(sd_img_gen_params_t *params, int width, in
void sd_img_gen_params_set_seed(sd_img_gen_params_t *params, int64_t seed);
int load_model(const char *model, char *model_path, char* options[], int threads, int diffusionModel);
int gen_image(sd_img_gen_params_t *p, int steps, char *dst, float cfg_scale, char *src_image, float strength, char *mask_image, char* ref_images[], int ref_images_count);
int gen_image(sd_img_gen_params_t *p, int steps, char *dst, float cfg_scale, char *src_image, float strength, char *mask_image, char **ref_images, int ref_images_count);
#ifdef __cplusplus
}
#endif

View File

@@ -6,7 +6,6 @@
set -e
CURDIR=$(dirname "$(realpath $0)")
REPO_ROOT="${CURDIR}/../../.."
# Create lib directory
mkdir -p $CURDIR/package/lib
@@ -51,15 +50,6 @@ else
exit 1
fi
# Package GPU libraries based on BUILD_TYPE
# The GPU library packaging script will detect BUILD_TYPE and copy appropriate GPU libraries
GPU_LIB_SCRIPT="${REPO_ROOT}/scripts/build/package-gpu-libs.sh"
if [ -f "$GPU_LIB_SCRIPT" ]; then
echo "Packaging GPU libraries for BUILD_TYPE=${BUILD_TYPE:-cpu}..."
source "$GPU_LIB_SCRIPT" "$CURDIR/package/lib"
package_gpu_libs
fi
echo "Packaging completed successfully"
ls -liah $CURDIR/package/
ls -liah $CURDIR/package/lib/

View File

@@ -3,5 +3,5 @@ sources/
build/
package/
whisper
*.so
compile_commands.json
libgowhisper.so

View File

@@ -8,7 +8,7 @@ JOBS?=$(shell nproc --ignore=1)
# whisper.cpp version
WHISPER_REPO?=https://github.com/ggml-org/whisper.cpp
WHISPER_CPP_VERSION?=679bdb53dbcbfb3e42685f50c7ff367949fd4d48
WHISPER_CPP_VERSION?=d9b7613b34a343848af572cc14467fc5e82fc788
SO_TARGET?=libgowhisper.so
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF

View File

@@ -107,7 +107,7 @@ int vad(float pcmf32[], size_t pcmf32_len, float **segs_out,
}
int transcribe(uint32_t threads, char *lang, bool translate, bool tdrz,
float pcmf32[], size_t pcmf32_len, size_t *segs_out_len, char *prompt) {
float pcmf32[], size_t pcmf32_len, size_t *segs_out_len) {
whisper_full_params wparams =
whisper_full_default_params(WHISPER_SAMPLING_GREEDY);
@@ -122,10 +122,8 @@ int transcribe(uint32_t threads, char *lang, bool translate, bool tdrz,
wparams.debug_mode = true;
wparams.print_progress = true;
wparams.tdrz_enable = tdrz;
wparams.initial_prompt = prompt;
fprintf(stderr, "info: Enable tdrz: %d\n", tdrz);
fprintf(stderr, "info: Initial prompt: \"%s\"\n", prompt);
if (whisper_full(ctx, wparams, pcmf32, pcmf32_len)) {
fprintf(stderr, "error: transcription failed\n");

View File

@@ -17,7 +17,7 @@ var (
CppLoadModel func(modelPath string) int
CppLoadModelVAD func(modelPath string) int
CppVAD func(pcmf32 []float32, pcmf32Size uintptr, segsOut unsafe.Pointer, segsOutLen unsafe.Pointer) int
CppTranscribe func(threads uint32, lang string, translate bool, diarize bool, pcmf32 []float32, pcmf32Len uintptr, segsOutLen unsafe.Pointer, prompt string) int
CppTranscribe func(threads uint32, lang string, translate bool, diarize bool, pcmf32 []float32, pcmf32Len uintptr, segsOutLen unsafe.Pointer) int
CppGetSegmentText func(i int) string
CppGetSegmentStart func(i int) int64
CppGetSegmentEnd func(i int) int64
@@ -123,7 +123,7 @@ func (w *Whisper) AudioTranscription(opts *pb.TranscriptRequest) (pb.TranscriptR
segsLen := uintptr(0xdeadbeef)
segsLenPtr := unsafe.Pointer(&segsLen)
if ret := CppTranscribe(opts.Threads, opts.Language, opts.Translate, opts.Diarize, data, uintptr(len(data)), segsLenPtr, opts.Prompt); ret != 0 {
if ret := CppTranscribe(opts.Threads, opts.Language, opts.Translate, opts.Diarize, data, uintptr(len(data)), segsLenPtr); ret != 0 {
return pb.TranscriptResult{}, fmt.Errorf("Failed Transcribe")
}

View File

@@ -7,8 +7,7 @@ int load_model_vad(const char *const model_path);
int vad(float pcmf32[], size_t pcmf32_size, float **segs_out,
size_t *segs_out_len);
int transcribe(uint32_t threads, char *lang, bool translate, bool tdrz,
float pcmf32[], size_t pcmf32_len, size_t *segs_out_len,
char *prompt);
float pcmf32[], size_t pcmf32_len, size_t *segs_out_len);
const char *get_segment_text(int i);
int64_t get_segment_t0(int i);
int64_t get_segment_t1(int i);

View File

@@ -6,7 +6,6 @@
set -e
CURDIR=$(dirname "$(realpath $0)")
REPO_ROOT="${CURDIR}/../../.."
# Create lib directory
mkdir -p $CURDIR/package/lib
@@ -51,15 +50,6 @@ else
exit 1
fi
# Package GPU libraries based on BUILD_TYPE
# The GPU library packaging script will detect BUILD_TYPE and copy appropriate GPU libraries
GPU_LIB_SCRIPT="${REPO_ROOT}/scripts/build/package-gpu-libs.sh"
if [ -f "$GPU_LIB_SCRIPT" ]; then
echo "Packaging GPU libraries for BUILD_TYPE=${BUILD_TYPE:-cpu}..."
source "$GPU_LIB_SCRIPT" "$CURDIR/package/lib"
package_gpu_libs
fi
echo "Packaging completed successfully"
ls -liah $CURDIR/package/
ls -liah $CURDIR/package/lib/

View File

@@ -25,10 +25,7 @@
metal: "metal-llama-cpp"
vulkan: "vulkan-llama-cpp"
nvidia-l4t: "nvidia-l4t-arm64-llama-cpp"
nvidia-cuda-13: "cuda13-llama-cpp"
nvidia-cuda-12: "cuda12-llama-cpp"
nvidia-l4t-cuda-12: "nvidia-l4t-arm64-llama-cpp"
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-llama-cpp"
darwin-x86: "darwin-x86-llama-cpp"
- &whispercpp
name: "whisper"
alias: "whisper"
@@ -52,10 +49,6 @@
amd: "rocm-whisper"
vulkan: "vulkan-whisper"
nvidia-l4t: "nvidia-l4t-arm64-whisper"
nvidia-cuda-13: "cuda13-whisper"
nvidia-cuda-12: "cuda12-whisper"
nvidia-l4t-cuda-12: "nvidia-l4t-arm64-whisper"
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-whisper"
- &stablediffusionggml
name: "stablediffusion-ggml"
alias: "stablediffusion-ggml"
@@ -80,10 +73,7 @@
vulkan: "vulkan-stablediffusion-ggml"
nvidia-l4t: "nvidia-l4t-arm64-stablediffusion-ggml"
metal: "metal-stablediffusion-ggml"
nvidia-cuda-13: "cuda13-stablediffusion-ggml"
nvidia-cuda-12: "cuda12-stablediffusion-ggml"
nvidia-l4t-cuda-12: "nvidia-l4t-arm64-stablediffusion-ggml"
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-stablediffusion-ggml"
# darwin-x86: "darwin-x86-stablediffusion-ggml"
- &rfdetr
name: "rfdetr"
alias: "rfdetr"
@@ -106,9 +96,6 @@
#amd: "rocm-rfdetr"
nvidia-l4t: "nvidia-l4t-arm64-rfdetr"
default: "cpu-rfdetr"
nvidia-cuda-13: "cuda13-rfdetr"
nvidia-cuda-12: "cuda12-rfdetr"
nvidia-l4t-cuda-12: "nvidia-l4t-arm64-rfdetr"
- &vllm
name: "vllm"
license: apache-2.0
@@ -141,7 +128,6 @@
nvidia: "cuda12-vllm"
amd: "rocm-vllm"
intel: "intel-vllm"
nvidia-cuda-12: "cuda12-vllm"
- &mlx
name: "mlx"
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-mlx"
@@ -215,8 +201,6 @@
nvidia: "cuda12-transformers"
intel: "intel-transformers"
amd: "rocm-transformers"
nvidia-cuda-13: "cuda13-transformers"
nvidia-cuda-12: "cuda12-transformers"
- &diffusers
name: "diffusers"
icon: https://raw.githubusercontent.com/huggingface/diffusers/main/docs/source/en/imgs/diffusers_library.jpg
@@ -237,10 +221,6 @@
nvidia-l4t: "nvidia-l4t-diffusers"
metal: "metal-diffusers"
default: "cpu-diffusers"
nvidia-cuda-13: "cuda13-diffusers"
nvidia-cuda-12: "cuda12-diffusers"
nvidia-l4t-cuda-12: "nvidia-l4t-arm64-diffusers"
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-diffusers"
- &exllama2
name: "exllama2"
urls:
@@ -256,7 +236,6 @@
capabilities:
nvidia: "cuda12-exllama2"
intel: "intel-exllama2"
nvidia-cuda-12: "cuda12-exllama2"
- &faster-whisper
icon: https://avatars.githubusercontent.com/u/1520500?s=200&v=4
description: |
@@ -273,26 +252,6 @@
nvidia: "cuda12-faster-whisper"
intel: "intel-faster-whisper"
amd: "rocm-faster-whisper"
nvidia-cuda-13: "cuda13-faster-whisper"
nvidia-cuda-12: "cuda12-faster-whisper"
- &moonshine
description: |
Moonshine is a fast, accurate, and efficient speech-to-text transcription model using ONNX Runtime.
It provides real-time transcription capabilities with support for multiple model sizes and GPU acceleration.
urls:
- https://github.com/moonshine-ai/moonshine
tags:
- speech-to-text
- transcription
- ONNX
license: MIT
name: "moonshine"
alias: "moonshine"
capabilities:
nvidia: "cuda12-moonshine"
default: "cpu-moonshine"
nvidia-cuda-13: "cuda13-moonshine"
nvidia-cuda-12: "cuda12-moonshine"
- &kokoro
icon: https://avatars.githubusercontent.com/u/166769057?v=4
description: |
@@ -312,9 +271,6 @@
intel: "intel-kokoro"
amd: "rocm-kokoro"
nvidia-l4t: "nvidia-l4t-kokoro"
nvidia-cuda-13: "cuda13-kokoro"
nvidia-cuda-12: "cuda12-kokoro"
nvidia-l4t-cuda-12: "nvidia-l4t-arm64-kokoro"
- &coqui
urls:
- https://github.com/idiap/coqui-ai-TTS
@@ -336,8 +292,6 @@
nvidia: "cuda12-coqui"
intel: "intel-coqui"
amd: "rocm-coqui"
nvidia-cuda-13: "cuda13-coqui"
nvidia-cuda-12: "cuda12-coqui"
icon: https://avatars.githubusercontent.com/u/1338804?s=200&v=4
- &bark
urls:
@@ -354,8 +308,6 @@
cuda: "cuda12-bark"
intel: "intel-bark"
rocm: "rocm-bark"
nvidia-cuda-13: "cuda13-bark"
nvidia-cuda-12: "cuda12-bark"
icon: https://avatars.githubusercontent.com/u/99442120?s=200&v=4
- &barkcpp
urls:
@@ -402,32 +354,6 @@
metal: "metal-chatterbox"
default: "cpu-chatterbox"
nvidia-l4t: "nvidia-l4t-arm64-chatterbox"
nvidia-cuda-13: "cuda13-chatterbox"
nvidia-cuda-12: "cuda12-chatterbox"
nvidia-l4t-cuda-12: "nvidia-l4t-arm64-chatterbox"
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-chatterbox"
- &vibevoice
urls:
- https://github.com/microsoft/VibeVoice
description: |
VibeVoice-Realtime is a real-time text-to-speech model that generates natural-sounding speech.
tags:
- text-to-speech
- TTS
license: mit
name: "vibevoice"
alias: "vibevoice"
capabilities:
nvidia: "cuda12-vibevoice"
intel: "intel-vibevoice"
amd: "rocm-vibevoice"
nvidia-l4t: "nvidia-l4t-vibevoice"
default: "cpu-vibevoice"
nvidia-cuda-13: "cuda13-vibevoice"
nvidia-cuda-12: "cuda12-vibevoice"
nvidia-l4t-cuda-12: "nvidia-l4t-vibevoice"
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-vibevoice"
icon: https://avatars.githubusercontent.com/u/6154722?s=200&v=4
- &piper
name: "piper"
uri: "quay.io/go-skynet/local-ai-backends:latest-piper"
@@ -516,8 +442,6 @@
nvidia: "cuda12-neutts"
amd: "rocm-neutts"
nvidia-l4t: "nvidia-l4t-neutts"
nvidia-cuda-12: "cuda12-neutts"
nvidia-l4t-cuda-12: "nvidia-l4t-arm64-neutts"
- !!merge <<: *neutts
name: "neutts-development"
capabilities:
@@ -525,22 +449,6 @@
nvidia: "cuda12-neutts-development"
amd: "rocm-neutts-development"
nvidia-l4t: "nvidia-l4t-neutts-development"
nvidia-cuda-12: "cuda12-neutts-development"
nvidia-l4t-cuda-12: "nvidia-l4t-arm64-neutts-development"
- !!merge <<: *llamacpp
name: "llama-cpp-development"
capabilities:
default: "cpu-llama-cpp-development"
nvidia: "cuda12-llama-cpp-development"
intel: "intel-sycl-f16-llama-cpp-development"
amd: "rocm-llama-cpp-development"
metal: "metal-llama-cpp-development"
vulkan: "vulkan-llama-cpp-development"
nvidia-l4t: "nvidia-l4t-arm64-llama-cpp-development"
nvidia-cuda-13: "cuda13-llama-cpp-development"
nvidia-cuda-12: "cuda12-llama-cpp-development"
nvidia-l4t-cuda-12: "nvidia-l4t-arm64-llama-cpp-development"
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-llama-cpp-development"
- !!merge <<: *neutts
name: "cpu-neutts"
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-neutts"
@@ -557,7 +465,7 @@
mirrors:
- localai/localai-backends:latest-gpu-rocm-hipblas-neutts
- !!merge <<: *neutts
name: "nvidia-l4t-arm64-neutts"
name: "nvidia-l4t-neutts"
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-arm64-neutts"
mirrors:
- localai/localai-backends:latest-nvidia-l4t-arm64-neutts
@@ -577,7 +485,7 @@
mirrors:
- localai/localai-backends:master-gpu-rocm-hipblas-neutts
- !!merge <<: *neutts
name: "nvidia-l4t-arm64-neutts-development"
name: "nvidia-l4t-neutts-development"
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-arm64-neutts"
mirrors:
- localai/localai-backends:master-nvidia-l4t-arm64-neutts
@@ -622,6 +530,16 @@
mirrors:
- localai/localai-backends:master-piper
## llama-cpp
- !!merge <<: *llamacpp
name: "darwin-x86-llama-cpp"
uri: "quay.io/go-skynet/local-ai-backends:latest-darwin-x86-llama-cpp"
mirrors:
- localai/localai-backends:latest-darwin-x86-llama-cpp
- !!merge <<: *llamacpp
name: "darwin-x86-llama-cpp-development"
uri: "quay.io/go-skynet/local-ai-backends:master-darwin-x86-llama-cpp"
mirrors:
- localai/localai-backends:master-darwin-x86-llama-cpp
- !!merge <<: *llamacpp
name: "nvidia-l4t-arm64-llama-cpp"
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-arm64-llama-cpp"
@@ -632,16 +550,6 @@
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-arm64-llama-cpp"
mirrors:
- localai/localai-backends:master-nvidia-l4t-arm64-llama-cpp
- !!merge <<: *llamacpp
name: "cuda13-nvidia-l4t-arm64-llama-cpp"
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-cuda-13-arm64-llama-cpp"
mirrors:
- localai/localai-backends:latest-nvidia-l4t-cuda-13-arm64-llama-cpp
- !!merge <<: *llamacpp
name: "cuda13-nvidia-l4t-arm64-llama-cpp-development"
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-llama-cpp"
mirrors:
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-llama-cpp
- !!merge <<: *llamacpp
name: "cpu-llama-cpp"
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-llama-cpp"
@@ -652,6 +560,11 @@
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-llama-cpp"
mirrors:
- localai/localai-backends:master-cpu-llama-cpp
- !!merge <<: *llamacpp
name: "cuda11-llama-cpp"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-llama-cpp"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-11-llama-cpp
- !!merge <<: *llamacpp
name: "cuda12-llama-cpp"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-llama-cpp"
@@ -692,6 +605,11 @@
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-llama-cpp"
mirrors:
- localai/localai-backends:master-metal-darwin-arm64-llama-cpp
- !!merge <<: *llamacpp
name: "cuda11-llama-cpp-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-llama-cpp"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-11-llama-cpp
- !!merge <<: *llamacpp
name: "cuda12-llama-cpp-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-llama-cpp"
@@ -712,16 +630,6 @@
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f16-llama-cpp"
mirrors:
- localai/localai-backends:master-gpu-intel-sycl-f16-llama-cpp
- !!merge <<: *llamacpp
name: "cuda13-llama-cpp"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-llama-cpp"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-13-llama-cpp
- !!merge <<: *llamacpp
name: "cuda13-llama-cpp-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-llama-cpp"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-13-llama-cpp
## whisper
- !!merge <<: *whispercpp
name: "nvidia-l4t-arm64-whisper"
@@ -733,16 +641,6 @@
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-arm64-whisper"
mirrors:
- localai/localai-backends:master-nvidia-l4t-arm64-whisper
- !!merge <<: *whispercpp
name: "cuda13-nvidia-l4t-arm64-whisper"
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-cuda-13-arm64-whisper"
mirrors:
- localai/localai-backends:latest-nvidia-l4t-cuda-13-arm64-whisper
- !!merge <<: *whispercpp
name: "cuda13-nvidia-l4t-arm64-whisper-development"
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-whisper"
mirrors:
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-whisper
- !!merge <<: *whispercpp
name: "cpu-whisper"
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-whisper"
@@ -763,6 +661,11 @@
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-whisper"
mirrors:
- localai/localai-backends:master-cpu-whisper
- !!merge <<: *whispercpp
name: "cuda11-whisper"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-whisper"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-11-whisper
- !!merge <<: *whispercpp
name: "cuda12-whisper"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-whisper"
@@ -803,6 +706,11 @@
uri: "quay.io/go-skynet/local-ai-backends:master-metal-darwin-arm64-whisper"
mirrors:
- localai/localai-backends:master-metal-darwin-arm64-whisper
- !!merge <<: *whispercpp
name: "cuda11-whisper-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-whisper"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-11-whisper
- !!merge <<: *whispercpp
name: "cuda12-whisper-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-whisper"
@@ -823,16 +731,6 @@
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f16-whisper"
mirrors:
- localai/localai-backends:master-gpu-intel-sycl-f16-whisper
- !!merge <<: *whispercpp
name: "cuda13-whisper"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-whisper"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-13-whisper
- !!merge <<: *whispercpp
name: "cuda13-whisper-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-whisper"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-13-whisper
## stablediffusion-ggml
- !!merge <<: *stablediffusionggml
name: "cpu-stablediffusion-ggml"
@@ -877,6 +775,11 @@
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-sycl-f16-stablediffusion-ggml"
mirrors:
- localai/localai-backends:latest-gpu-intel-sycl-f16-stablediffusion-ggml
- !!merge <<: *stablediffusionggml
name: "cuda11-stablediffusion-ggml"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-stablediffusion-ggml"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-11-stablediffusion-ggml
- !!merge <<: *stablediffusionggml
name: "cuda12-stablediffusion-ggml-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-stablediffusion-ggml"
@@ -892,6 +795,11 @@
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-sycl-f16-stablediffusion-ggml"
mirrors:
- localai/localai-backends:master-gpu-intel-sycl-f16-stablediffusion-ggml
- !!merge <<: *stablediffusionggml
name: "cuda11-stablediffusion-ggml-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-stablediffusion-ggml"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-11-stablediffusion-ggml
- !!merge <<: *stablediffusionggml
name: "nvidia-l4t-arm64-stablediffusion-ggml-development"
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-arm64-stablediffusion-ggml"
@@ -902,26 +810,6 @@
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-arm64-stablediffusion-ggml"
mirrors:
- localai/localai-backends:latest-nvidia-l4t-arm64-stablediffusion-ggml
- !!merge <<: *stablediffusionggml
name: "cuda13-nvidia-l4t-arm64-stablediffusion-ggml"
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-cuda-13-arm64-stablediffusion-ggml"
mirrors:
- localai/localai-backends:latest-nvidia-l4t-cuda-13-arm64-stablediffusion-ggml
- !!merge <<: *stablediffusionggml
name: "cuda13-nvidia-l4t-arm64-stablediffusion-ggml-development"
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-stablediffusion-ggml"
mirrors:
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-stablediffusion-ggml
- !!merge <<: *stablediffusionggml
name: "cuda13-stablediffusion-ggml"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-stablediffusion-ggml"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-13-stablediffusion-ggml
- !!merge <<: *stablediffusionggml
name: "cuda13-stablediffusion-ggml-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-stablediffusion-ggml"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-13-stablediffusion-ggml
# vllm
- !!merge <<: *vllm
name: "vllm-development"
@@ -968,7 +856,6 @@
#amd: "rocm-rfdetr-development"
nvidia-l4t: "nvidia-l4t-arm64-rfdetr-development"
default: "cpu-rfdetr-development"
nvidia-cuda-13: "cuda13-rfdetr-development"
- !!merge <<: *rfdetr
name: "cuda12-rfdetr"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-rfdetr"
@@ -989,11 +876,6 @@
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-arm64-rfdetr"
mirrors:
- localai/localai-backends:latest-nvidia-l4t-arm64-rfdetr
- !!merge <<: *rfdetr
name: "nvidia-l4t-arm64-rfdetr-development"
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-arm64-rfdetr"
mirrors:
- localai/localai-backends:master-nvidia-l4t-arm64-rfdetr
- !!merge <<: *rfdetr
name: "cpu-rfdetr"
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-rfdetr"
@@ -1024,16 +906,6 @@
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-rfdetr"
mirrors:
- localai/localai-backends:latest-gpu-intel-rfdetr
- !!merge <<: *rfdetr
name: "cuda13-rfdetr"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-rfdetr"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-13-rfdetr
- !!merge <<: *rfdetr
name: "cuda13-rfdetr-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-rfdetr"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-13-rfdetr
## Rerankers
- !!merge <<: *rerankers
name: "rerankers-development"
@@ -1041,7 +913,11 @@
nvidia: "cuda12-rerankers-development"
intel: "intel-rerankers-development"
amd: "rocm-rerankers-development"
nvidia-cuda-13: "cuda13-rerankers-development"
- !!merge <<: *rerankers
name: "cuda11-rerankers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-rerankers"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-11-rerankers
- !!merge <<: *rerankers
name: "cuda12-rerankers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-rerankers"
@@ -1057,6 +933,11 @@
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-rerankers"
mirrors:
- localai/localai-backends:latest-gpu-rocm-hipblas-rerankers
- !!merge <<: *rerankers
name: "cuda11-rerankers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-rerankers"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-11-rerankers
- !!merge <<: *rerankers
name: "cuda12-rerankers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-rerankers"
@@ -1072,16 +953,6 @@
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-rerankers"
mirrors:
- localai/localai-backends:master-gpu-intel-rerankers
- !!merge <<: *rerankers
name: "cuda13-rerankers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-rerankers"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-13-rerankers
- !!merge <<: *rerankers
name: "cuda13-rerankers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-rerankers"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-13-rerankers
## Transformers
- !!merge <<: *transformers
name: "transformers-development"
@@ -1089,7 +960,6 @@
nvidia: "cuda12-transformers-development"
intel: "intel-transformers-development"
amd: "rocm-transformers-development"
nvidia-cuda-13: "cuda13-transformers-development"
- !!merge <<: *transformers
name: "cuda12-transformers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-transformers"
@@ -1105,6 +975,16 @@
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-transformers"
mirrors:
- localai/localai-backends:latest-gpu-intel-transformers
- !!merge <<: *transformers
name: "cuda11-transformers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-transformers"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-11-transformers
- !!merge <<: *transformers
name: "cuda11-transformers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-transformers"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-11-transformers
- !!merge <<: *transformers
name: "cuda12-transformers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-transformers"
@@ -1120,16 +1000,6 @@
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-transformers"
mirrors:
- localai/localai-backends:master-gpu-intel-transformers
- !!merge <<: *transformers
name: "cuda13-transformers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-transformers"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-13-transformers
- !!merge <<: *transformers
name: "cuda13-transformers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-transformers"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-13-transformers
## Diffusers
- !!merge <<: *diffusers
name: "diffusers-development"
@@ -1140,7 +1010,6 @@
nvidia-l4t: "nvidia-l4t-diffusers-development"
metal: "metal-diffusers-development"
default: "cpu-diffusers-development"
nvidia-cuda-13: "cuda13-diffusers-development"
- !!merge <<: *diffusers
name: "cpu-diffusers"
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-diffusers"
@@ -1153,24 +1022,14 @@
- localai/localai-backends:master-cpu-diffusers
- !!merge <<: *diffusers
name: "nvidia-l4t-diffusers"
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-diffusers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-l4t-diffusers"
mirrors:
- localai/localai-backends:latest-nvidia-l4t-diffusers
- localai/localai-backends:latest-gpu-nvidia-l4t-diffusers
- !!merge <<: *diffusers
name: "nvidia-l4t-diffusers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-diffusers"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-l4t-diffusers"
mirrors:
- localai/localai-backends:master-nvidia-l4t-diffusers
- !!merge <<: *diffusers
name: "cuda13-nvidia-l4t-arm64-diffusers"
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-cuda-13-arm64-diffusers"
mirrors:
- localai/localai-backends:latest-nvidia-l4t-cuda-13-arm64-diffusers
- !!merge <<: *diffusers
name: "cuda13-nvidia-l4t-arm64-diffusers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-diffusers"
mirrors:
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-diffusers
- localai/localai-backends:master-gpu-nvidia-l4t-diffusers
- !!merge <<: *diffusers
name: "cuda12-diffusers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-diffusers"
@@ -1181,11 +1040,21 @@
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-diffusers"
mirrors:
- localai/localai-backends:latest-gpu-rocm-hipblas-diffusers
- !!merge <<: *diffusers
name: "cuda11-diffusers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-diffusers"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-11-diffusers
- !!merge <<: *diffusers
name: "intel-diffusers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-diffusers"
mirrors:
- localai/localai-backends:latest-gpu-intel-diffusers
- !!merge <<: *diffusers
name: "cuda11-diffusers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-diffusers"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-11-diffusers
- !!merge <<: *diffusers
name: "cuda12-diffusers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-diffusers"
@@ -1201,16 +1070,6 @@
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-diffusers"
mirrors:
- localai/localai-backends:master-gpu-intel-diffusers
- !!merge <<: *diffusers
name: "cuda13-diffusers"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-diffusers"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-13-diffusers
- !!merge <<: *diffusers
name: "cuda13-diffusers-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-diffusers"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-13-diffusers
- !!merge <<: *diffusers
name: "metal-diffusers"
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-diffusers"
@@ -1227,11 +1086,21 @@
capabilities:
nvidia: "cuda12-exllama2-development"
intel: "intel-exllama2-development"
- !!merge <<: *exllama2
name: "cuda11-exllama2"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-exllama2"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-11-exllama2
- !!merge <<: *exllama2
name: "cuda12-exllama2"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-exllama2"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-exllama2
- !!merge <<: *exllama2
name: "cuda11-exllama2-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-exllama2"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-11-exllama2
- !!merge <<: *exllama2
name: "cuda12-exllama2-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-exllama2"
@@ -1245,6 +1114,11 @@
intel: "intel-kokoro-development"
amd: "rocm-kokoro-development"
nvidia-l4t: "nvidia-l4t-kokoro-development"
- !!merge <<: *kokoro
name: "cuda11-kokoro-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-kokoro"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-11-kokoro
- !!merge <<: *kokoro
name: "cuda12-kokoro-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-kokoro"
@@ -1267,14 +1141,19 @@
- localai/localai-backends:master-gpu-intel-kokoro
- !!merge <<: *kokoro
name: "nvidia-l4t-kokoro"
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-kokoro"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-l4t-kokoro"
mirrors:
- localai/localai-backends:latest-nvidia-l4t-kokoro
- localai/localai-backends:latest-gpu-nvidia-l4t-kokoro
- !!merge <<: *kokoro
name: "nvidia-l4t-kokoro-development"
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-kokoro"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-l4t-kokoro"
mirrors:
- localai/localai-backends:master-nvidia-l4t-kokoro
- localai/localai-backends:master-gpu-nvidia-l4t-kokoro
- !!merge <<: *kokoro
name: "cuda11-kokoro"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-kokoro"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-11-kokoro
- !!merge <<: *kokoro
name: "cuda12-kokoro"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-kokoro"
@@ -1285,16 +1164,6 @@
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-kokoro"
mirrors:
- localai/localai-backends:latest-gpu-rocm-hipblas-kokoro
- !!merge <<: *kokoro
name: "cuda13-kokoro"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-kokoro"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-13-kokoro
- !!merge <<: *kokoro
name: "cuda13-kokoro-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-kokoro"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-13-kokoro
## faster-whisper
- !!merge <<: *faster-whisper
name: "faster-whisper-development"
@@ -1302,7 +1171,11 @@
nvidia: "cuda12-faster-whisper-development"
intel: "intel-faster-whisper-development"
amd: "rocm-faster-whisper-development"
nvidia-cuda-13: "cuda13-faster-whisper-development"
- !!merge <<: *faster-whisper
name: "cuda11-faster-whisper"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-faster-whisper"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-11-faster-whisper
- !!merge <<: *faster-whisper
name: "cuda12-faster-whisper-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-faster-whisper"
@@ -1323,54 +1196,6 @@
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-faster-whisper"
mirrors:
- localai/localai-backends:master-gpu-intel-faster-whisper
- !!merge <<: *faster-whisper
name: "cuda13-faster-whisper"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-faster-whisper"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-13-faster-whisper
- !!merge <<: *faster-whisper
name: "cuda13-faster-whisper-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-faster-whisper"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-13-faster-whisper
## moonshine
- !!merge <<: *moonshine
name: "moonshine-development"
capabilities:
nvidia: "cuda12-moonshine-development"
default: "cpu-moonshine-development"
nvidia-cuda-13: "cuda13-moonshine-development"
nvidia-cuda-12: "cuda12-moonshine-development"
- !!merge <<: *moonshine
name: "cpu-moonshine"
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-moonshine"
mirrors:
- localai/localai-backends:latest-cpu-moonshine
- !!merge <<: *moonshine
name: "cpu-moonshine-development"
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-moonshine"
mirrors:
- localai/localai-backends:master-cpu-moonshine
- !!merge <<: *moonshine
name: "cuda12-moonshine"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-moonshine"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-moonshine
- !!merge <<: *moonshine
name: "cuda12-moonshine-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-moonshine"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-12-moonshine
- !!merge <<: *moonshine
name: "cuda13-moonshine"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-moonshine"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-13-moonshine
- !!merge <<: *moonshine
name: "cuda13-moonshine-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-moonshine"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-13-moonshine
## coqui
- !!merge <<: *coqui
@@ -1379,11 +1204,21 @@
nvidia: "cuda12-coqui-development"
intel: "intel-coqui-development"
amd: "rocm-coqui-development"
- !!merge <<: *coqui
name: "cuda11-coqui"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-coqui"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-11-coqui
- !!merge <<: *coqui
name: "cuda12-coqui"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-coqui"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-coqui
- !!merge <<: *coqui
name: "cuda11-coqui-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-coqui"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-11-coqui
- !!merge <<: *coqui
name: "cuda12-coqui-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-coqui"
@@ -1416,6 +1251,16 @@
nvidia: "cuda12-bark-development"
intel: "intel-bark-development"
amd: "rocm-bark-development"
- !!merge <<: *bark
name: "cuda11-bark-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-bark"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-11-bark
- !!merge <<: *bark
name: "cuda11-bark"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-bark"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-11-bark
- !!merge <<: *bark
name: "rocm-bark-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-bark"
@@ -1458,10 +1303,6 @@
metal: "metal-chatterbox-development"
default: "cpu-chatterbox-development"
nvidia-l4t: "nvidia-l4t-arm64-chatterbox"
nvidia-cuda-13: "cuda13-chatterbox-development"
nvidia-cuda-12: "cuda12-chatterbox-development"
nvidia-l4t-cuda-12: "nvidia-l4t-arm64-chatterbox"
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-chatterbox"
- !!merge <<: *chatterbox
name: "cpu-chatterbox"
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-chatterbox"
@@ -1474,14 +1315,14 @@
- localai/localai-backends:master-cpu-chatterbox
- !!merge <<: *chatterbox
name: "nvidia-l4t-arm64-chatterbox"
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-arm64-chatterbox"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-l4t-arm64-chatterbox"
mirrors:
- localai/localai-backends:latest-nvidia-l4t-arm64-chatterbox
- localai/localai-backends:latest-gpu-nvidia-l4t-arm64-chatterbox
- !!merge <<: *chatterbox
name: "nvidia-l4t-arm64-chatterbox-development"
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-arm64-chatterbox"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-l4t-arm64-chatterbox"
mirrors:
- localai/localai-backends:master-nvidia-l4t-arm64-chatterbox
- localai/localai-backends:master-gpu-nvidia-l4t-arm64-chatterbox
- !!merge <<: *chatterbox
name: "metal-chatterbox"
uri: "quay.io/go-skynet/local-ai-backends:latest-metal-darwin-arm64-chatterbox"
@@ -1497,111 +1338,18 @@
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-chatterbox"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-12-chatterbox
- !!merge <<: *chatterbox
name: "cuda11-chatterbox"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-11-chatterbox"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-11-chatterbox
- !!merge <<: *chatterbox
name: "cuda11-chatterbox-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-11-chatterbox"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-11-chatterbox
- !!merge <<: *chatterbox
name: "cuda12-chatterbox"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-chatterbox"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-chatterbox
- !!merge <<: *chatterbox
name: "cuda13-chatterbox"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-chatterbox"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-13-chatterbox
- !!merge <<: *chatterbox
name: "cuda13-chatterbox-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-chatterbox"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-13-chatterbox
- !!merge <<: *chatterbox
name: "cuda13-nvidia-l4t-arm64-chatterbox"
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-cuda-13-arm64-chatterbox"
mirrors:
- localai/localai-backends:latest-nvidia-l4t-cuda-13-arm64-chatterbox
- !!merge <<: *chatterbox
name: "cuda13-nvidia-l4t-arm64-chatterbox-development"
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-chatterbox"
mirrors:
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-chatterbox
## vibevoice
- !!merge <<: *vibevoice
name: "vibevoice-development"
capabilities:
nvidia: "cuda12-vibevoice-development"
intel: "intel-vibevoice-development"
amd: "rocm-vibevoice-development"
nvidia-l4t: "nvidia-l4t-vibevoice-development"
default: "cpu-vibevoice-development"
nvidia-cuda-13: "cuda13-vibevoice-development"
nvidia-cuda-12: "cuda12-vibevoice-development"
nvidia-l4t-cuda-12: "nvidia-l4t-vibevoice-development"
nvidia-l4t-cuda-13: "cuda13-nvidia-l4t-arm64-vibevoice-development"
- !!merge <<: *vibevoice
name: "cpu-vibevoice"
uri: "quay.io/go-skynet/local-ai-backends:latest-cpu-vibevoice"
mirrors:
- localai/localai-backends:latest-cpu-vibevoice
- !!merge <<: *vibevoice
name: "cpu-vibevoice-development"
uri: "quay.io/go-skynet/local-ai-backends:master-cpu-vibevoice"
mirrors:
- localai/localai-backends:master-cpu-vibevoice
- !!merge <<: *vibevoice
name: "cuda12-vibevoice"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-12-vibevoice"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-12-vibevoice
- !!merge <<: *vibevoice
name: "cuda12-vibevoice-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-12-vibevoice"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-12-vibevoice
- !!merge <<: *vibevoice
name: "cuda13-vibevoice"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-nvidia-cuda-13-vibevoice"
mirrors:
- localai/localai-backends:latest-gpu-nvidia-cuda-13-vibevoice
- !!merge <<: *vibevoice
name: "cuda13-vibevoice-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-nvidia-cuda-13-vibevoice"
mirrors:
- localai/localai-backends:master-gpu-nvidia-cuda-13-vibevoice
- !!merge <<: *vibevoice
name: "intel-vibevoice"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-intel-vibevoice"
mirrors:
- localai/localai-backends:latest-gpu-intel-vibevoice
- !!merge <<: *vibevoice
name: "intel-vibevoice-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-intel-vibevoice"
mirrors:
- localai/localai-backends:master-gpu-intel-vibevoice
- !!merge <<: *vibevoice
name: "rocm-vibevoice"
uri: "quay.io/go-skynet/local-ai-backends:latest-gpu-rocm-hipblas-vibevoice"
mirrors:
- localai/localai-backends:latest-gpu-rocm-hipblas-vibevoice
- !!merge <<: *vibevoice
name: "rocm-vibevoice-development"
uri: "quay.io/go-skynet/local-ai-backends:master-gpu-rocm-hipblas-vibevoice"
mirrors:
- localai/localai-backends:master-gpu-rocm-hipblas-vibevoice
- !!merge <<: *vibevoice
name: "nvidia-l4t-vibevoice"
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-vibevoice"
mirrors:
- localai/localai-backends:latest-nvidia-l4t-vibevoice
- !!merge <<: *vibevoice
name: "nvidia-l4t-vibevoice-development"
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-vibevoice"
mirrors:
- localai/localai-backends:master-nvidia-l4t-vibevoice
- !!merge <<: *vibevoice
name: "cuda13-nvidia-l4t-arm64-vibevoice"
uri: "quay.io/go-skynet/local-ai-backends:latest-nvidia-l4t-cuda-13-arm64-vibevoice"
mirrors:
- localai/localai-backends:latest-nvidia-l4t-cuda-13-arm64-vibevoice
- !!merge <<: *vibevoice
name: "cuda13-nvidia-l4t-arm64-vibevoice-development"
uri: "quay.io/go-skynet/local-ai-backends:master-nvidia-l4t-cuda-13-arm64-vibevoice"
mirrors:
- localai/localai-backends:master-nvidia-l4t-cuda-13-arm64-vibevoice

View File

@@ -85,7 +85,7 @@ runUnittests
The build system automatically detects and configures for different hardware:
- **CPU** - Standard CPU-only builds
- **CUDA** - NVIDIA GPU acceleration (supports CUDA 12/13)
- **CUDA** - NVIDIA GPU acceleration (supports CUDA 11/12)
- **Intel** - Intel XPU/GPU optimization
- **MLX** - Apple Silicon (M1/M2/M3) optimization
- **HIP** - AMD GPU acceleration
@@ -95,8 +95,8 @@ The build system automatically detects and configures for different hardware:
Backends can specify hardware-specific dependencies:
- `requirements.txt` - Base requirements
- `requirements-cpu.txt` - CPU-specific packages
- `requirements-cublas11.txt` - CUDA 11 packages
- `requirements-cublas12.txt` - CUDA 12 packages
- `requirements-cublas13.txt` - CUDA 13 packages
- `requirements-intel.txt` - Intel-optimized packages
- `requirements-mps.txt` - Apple Silicon packages

View File

@@ -0,0 +1,5 @@
--extra-index-url https://download.pytorch.org/whl/cu118
torch==2.4.1+cu118
torchaudio==2.4.1+cu118
transformers
accelerate

View File

@@ -1,5 +1,5 @@
--extra-index-url https://download.pytorch.org/whl/rocm6.4
torch==2.8.0+rocm6.4
torchaudio==2.8.0+rocm6.4
--extra-index-url https://download.pytorch.org/whl/rocm6.0
torch==2.4.1+rocm6.0
torchaudio==2.4.1+rocm6.0
transformers
accelerate

View File

@@ -1,8 +1,8 @@
--extra-index-url https://download.pytorch.org/whl/cu130
torch
torchaudio
transformers
--extra-index-url https://download.pytorch.org/whl/cu118
torch==2.6.0+cu118
torchaudio==2.6.0+cu118
transformers==4.46.3
numpy>=1.24.0,<1.26.0
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
accelerate
accelerate

View File

@@ -1,6 +1,6 @@
--extra-index-url https://download.pytorch.org/whl/rocm6.4
torch==2.9.1+rocm6.4
torchaudio==2.9.1+rocm6.4
--extra-index-url https://download.pytorch.org/whl/rocm6.0
torch==2.6.0+rocm6.1
torchaudio==2.6.0+rocm6.1
transformers
numpy>=1.24.0,<1.26.0
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289

View File

@@ -1,7 +0,0 @@
--extra-index-url https://download.pytorch.org/whl/cu130
torch
torchaudio
transformers
numpy>=1.24.0,<1.26.0
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
accelerate

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env bash
set -euo pipefail
#
#
# use the library by adding the following line to a script:
# source $(dirname $0)/../common/libbackend.sh
#
@@ -206,12 +206,12 @@ function init() {
# getBuildProfile will inspect the system to determine which build profile is appropriate:
# returns one of the following:
# - cublas11
# - cublas12
# - cublas13
# - hipblas
# - intel
function getBuildProfile() {
if [ x"${BUILD_TYPE:-}" == "xcublas" ] || [ x"${BUILD_TYPE:-}" == "xl4t" ]; then
if [ x"${BUILD_TYPE:-}" == "xcublas" ]; then
if [ ! -z "${CUDA_MAJOR_VERSION:-}" ]; then
echo ${BUILD_TYPE}${CUDA_MAJOR_VERSION}
else
@@ -237,14 +237,7 @@ function getBuildProfile() {
# Make the venv relocatable:
# - rewrite venv/bin/python{,3} to relative symlinks into $(_portable_dir)
# - normalize entrypoint shebangs to /usr/bin/env python3
# - optionally update pyvenv.cfg to point to the portable Python directory (only at runtime)
# Usage: _makeVenvPortable [--update-pyvenv-cfg]
_makeVenvPortable() {
local update_pyvenv_cfg=false
if [ "${1:-}" = "--update-pyvenv-cfg" ]; then
update_pyvenv_cfg=true
fi
local venv_dir="${EDIR}/venv"
local vbin="${venv_dir}/bin"
@@ -262,39 +255,7 @@ _makeVenvPortable() {
ln -s "${rel_py}" "${vbin}/python3"
ln -s "python3" "${vbin}/python"
# 2) Update pyvenv.cfg to point to the portable Python directory (only at runtime)
# Use absolute path resolved at runtime so it works when the venv is copied
if [ "$update_pyvenv_cfg" = "true" ]; then
local pyvenv_cfg="${venv_dir}/pyvenv.cfg"
if [ -f "${pyvenv_cfg}" ]; then
local portable_dir="$(_portable_dir)"
# Resolve to absolute path - this ensures it works when the backend is copied
# Only resolve if the directory exists (it should if ensurePortablePython was called)
if [ -d "${portable_dir}" ]; then
portable_dir="$(cd "${portable_dir}" && pwd)"
else
# Fallback to relative path if directory doesn't exist yet
portable_dir="../python"
fi
local sed_i=(sed -i)
# macOS/BSD sed needs a backup suffix; GNU sed doesn't. Make it portable:
if sed --version >/dev/null 2>&1; then
sed_i=(sed -i)
else
sed_i=(sed -i '')
fi
# Update the home field in pyvenv.cfg
# Handle both absolute paths (starting with /) and relative paths
if grep -q "^home = " "${pyvenv_cfg}"; then
"${sed_i[@]}" "s|^home = .*|home = ${portable_dir}|" "${pyvenv_cfg}"
else
# If home field doesn't exist, add it
echo "home = ${portable_dir}" >> "${pyvenv_cfg}"
fi
fi
fi
# 3) Rewrite shebangs of entry points to use env, so the venv is relocatable
# 2) Rewrite shebangs of entry points to use env, so the venv is relocatable
# Only touch text files that start with #! and reference the current venv.
local ve_abs="${vbin}/python"
local sed_i=(sed -i)
@@ -355,7 +316,6 @@ function ensureVenv() {
fi
fi
if [ "x${PORTABLE_PYTHON}" == "xtrue" ]; then
# During install, only update symlinks and shebangs, not pyvenv.cfg
_makeVenvPortable
fi
fi
@@ -392,7 +352,7 @@ function runProtogen() {
# - requirements-${BUILD_TYPE}.txt
# - requirements-${BUILD_PROFILE}.txt
#
# BUILD_PROFILE is a more specific version of BUILD_TYPE, ex: cuda-12 or cuda-13
# BUILD_PROFILE is a more specific version of BUILD_TYPE, ex: cuda-11 or cuda-12
# it can also include some options that we do not have BUILD_TYPES for, ex: intel
#
# NOTE: for BUILD_PROFILE==intel, this function does NOT automatically use the Intel python package index.
@@ -460,19 +420,6 @@ function installRequirements() {
# - ${BACKEND_NAME}.py
function startBackend() {
ensureVenv
# Update pyvenv.cfg before running to ensure paths are correct for current location
# This is critical when the backend position is dynamic (e.g., copied from container)
if [ "x${PORTABLE_PYTHON}" == "xtrue" ] || [ -x "$(_portable_python)" ]; then
_makeVenvPortable --update-pyvenv-cfg
fi
# Set up GPU library paths if a lib directory exists
# This allows backends to include their own GPU libraries (CUDA, ROCm, etc.)
if [ -d "${EDIR}/lib" ]; then
export LD_LIBRARY_PATH="${EDIR}/lib:${LD_LIBRARY_PATH:-}"
echo "Added ${EDIR}/lib to LD_LIBRARY_PATH for GPU libraries"
fi
if [ ! -z "${BACKEND_FILE:-}" ]; then
exec "${EDIR}/venv/bin/python" "${BACKEND_FILE}" "$@"
elif [ -e "${MY_DIR}/server.py" ]; then

View File

@@ -1,2 +1,2 @@
--extra-index-url https://download.pytorch.org/whl/rocm6.4
--extra-index-url https://download.pytorch.org/whl/rocm6.0
torch

View File

@@ -0,0 +1,6 @@
--extra-index-url https://download.pytorch.org/whl/cu118
torch==2.4.1+cu118
torchaudio==2.4.1+cu118
transformers==4.48.3
accelerate
coqui-tts

View File

@@ -1,6 +1,6 @@
--extra-index-url https://download.pytorch.org/whl/rocm6.4
torch==2.8.0+rocm6.4
torchaudio==2.8.0+rocm6.4
--extra-index-url https://download.pytorch.org/whl/rocm6.0
torch==2.4.1+rocm6.0
torchaudio==2.4.1+rocm6.0
transformers==4.48.3
accelerate
coqui-tts

View File

@@ -1,136 +1,5 @@
# LocalAI Diffusers Backend
This backend provides gRPC access to Hugging Face diffusers pipelines with dynamic pipeline loading.
## Creating a separate environment for the diffusers project
# Creating a separate environment for the diffusers project
```
make diffusers
```
## Dynamic Pipeline Loader
The diffusers backend includes a dynamic pipeline loader (`diffusers_dynamic_loader.py`) that automatically discovers and loads diffusers pipelines at runtime. This eliminates the need for per-pipeline conditional statements - new pipelines added to diffusers become available automatically without code changes.
### How It Works
1. **Pipeline Discovery**: On first use, the loader scans the `diffusers` package to find all classes that inherit from `DiffusionPipeline`.
2. **Registry Caching**: Discovery results are cached for the lifetime of the process to avoid repeated scanning.
3. **Task Aliases**: The loader automatically derives task aliases from class names (e.g., "text-to-image", "image-to-image", "inpainting") without hardcoding.
4. **Multiple Resolution Methods**: Pipelines can be resolved by:
- Exact class name (e.g., `StableDiffusionPipeline`)
- Task alias (e.g., `text-to-image`, `img2img`)
- Model ID (uses HuggingFace Hub to infer pipeline type)
### Usage Examples
```python
from diffusers_dynamic_loader import (
load_diffusers_pipeline,
get_available_pipelines,
get_available_tasks,
resolve_pipeline_class,
discover_diffusers_classes,
get_available_classes,
)
# List all available pipelines
pipelines = get_available_pipelines()
print(f"Available pipelines: {pipelines[:10]}...")
# List all task aliases
tasks = get_available_tasks()
print(f"Available tasks: {tasks}")
# Resolve a pipeline class by name
cls = resolve_pipeline_class(class_name="StableDiffusionPipeline")
# Resolve by task alias
cls = resolve_pipeline_class(task="stable-diffusion")
# Load and instantiate a pipeline
pipe = load_diffusers_pipeline(
class_name="StableDiffusionPipeline",
model_id="runwayml/stable-diffusion-v1-5",
torch_dtype=torch.float16
)
# Load from single file
pipe = load_diffusers_pipeline(
class_name="StableDiffusionPipeline",
model_id="/path/to/model.safetensors",
from_single_file=True,
torch_dtype=torch.float16
)
# Discover other diffusers classes (schedulers, models, etc.)
schedulers = discover_diffusers_classes("SchedulerMixin")
print(f"Available schedulers: {list(schedulers.keys())[:5]}...")
# Get list of available scheduler classes
scheduler_list = get_available_classes("SchedulerMixin")
```
### Generic Class Discovery
The dynamic loader can discover not just pipelines but any class type from diffusers:
```python
# Discover all scheduler classes
schedulers = discover_diffusers_classes("SchedulerMixin")
# Discover all model classes
models = discover_diffusers_classes("ModelMixin")
# Get a sorted list of available classes
scheduler_names = get_available_classes("SchedulerMixin")
```
### Special Pipeline Handling
Most pipelines are loaded dynamically through `load_diffusers_pipeline()`. Only pipelines requiring truly custom initialization logic are handled explicitly:
- `FluxTransformer2DModel`: Requires quantization and custom transformer loading (cannot use dynamic loader)
- `WanPipeline` / `WanImageToVideoPipeline`: Uses dynamic loader with special VAE (float32 dtype)
- `SanaPipeline`: Uses dynamic loader with post-load dtype conversion for VAE/text encoder
- `StableVideoDiffusionPipeline`: Uses dynamic loader with CPU offload handling
- `VideoDiffusionPipeline`: Alias for DiffusionPipeline with video flags
All other pipelines (StableDiffusionPipeline, StableDiffusionXLPipeline, FluxPipeline, etc.) are loaded purely through the dynamic loader.
### Error Handling
When a pipeline cannot be resolved, the loader provides helpful error messages listing available pipelines and tasks:
```
ValueError: Unknown pipeline class 'NonExistentPipeline'.
Available pipelines: AnimateDiffPipeline, AnimateDiffVideoToVideoPipeline, ...
```
## Environment Variables
| Variable | Default | Description |
|----------|---------|-------------|
| `COMPEL` | `0` | Enable Compel for prompt weighting |
| `XPU` | `0` | Enable Intel XPU support |
| `CLIPSKIP` | `1` | Enable CLIP skip support |
| `SAFETENSORS` | `1` | Use safetensors format |
| `CHUNK_SIZE` | `8` | Decode chunk size for video |
| `FPS` | `7` | Video frames per second |
| `DISABLE_CPU_OFFLOAD` | `0` | Disable CPU offload |
| `FRAMES` | `64` | Number of video frames |
| `BFL_REPO` | `ChuckMcSneed/FLUX.1-dev` | Flux base repo |
| `PYTHON_GRPC_MAX_WORKERS` | `1` | Max gRPC workers |
## Running Tests
```bash
./test.sh
```
The test suite includes:
- Unit tests for the dynamic loader (`test_dynamic_loader.py`)
- Integration tests for the gRPC backend (`test.py`)
```

View File

@@ -1,10 +1,4 @@
#!/usr/bin/env python3
"""
LocalAI Diffusers Backend
This backend provides gRPC access to diffusers pipelines with dynamic pipeline loading.
New pipelines added to diffusers become available automatically without code changes.
"""
from concurrent import futures
import traceback
import argparse
@@ -23,22 +17,14 @@ import backend_pb2_grpc
import grpc
# Import dynamic loader for pipeline discovery
from diffusers_dynamic_loader import (
get_pipeline_registry,
resolve_pipeline_class,
get_available_pipelines,
load_diffusers_pipeline,
)
# Import specific items still needed for special cases and safety checker
from diffusers import DiffusionPipeline, ControlNetModel
from diffusers import FluxPipeline, FluxTransformer2DModel, AutoencoderKLWan
from diffusers import SanaPipeline, StableDiffusion3Pipeline, StableDiffusionXLPipeline, StableDiffusionDepth2ImgPipeline, DPMSolverMultistepScheduler, StableDiffusionPipeline, DiffusionPipeline, \
EulerAncestralDiscreteScheduler, FluxPipeline, FluxTransformer2DModel, QwenImageEditPipeline, AutoencoderKLWan, WanPipeline, WanImageToVideoPipeline
from diffusers import StableDiffusionImg2ImgPipeline, AutoPipelineForText2Image, ControlNetModel, StableVideoDiffusionPipeline, Lumina2Text2ImgPipeline
from diffusers.pipelines.stable_diffusion import safety_checker
from diffusers.utils import load_image, export_to_video
from compel import Compel, ReturnedEmbeddingsType
from optimum.quanto import freeze, qfloat8, quantize
from transformers import T5EncoderModel
from transformers import CLIPTextModel, T5EncoderModel
from safetensors.torch import load_file
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
@@ -172,165 +158,6 @@ def get_scheduler(name: str, config: dict = {}):
# Implement the BackendServicer class with the service methods
class BackendServicer(backend_pb2_grpc.BackendServicer):
def _load_pipeline(self, request, modelFile, fromSingleFile, torchType, variant):
"""
Load a diffusers pipeline dynamically using the dynamic loader.
This method uses load_diffusers_pipeline() for most pipelines, falling back
to explicit handling only for pipelines requiring custom initialization
(e.g., quantization, special VAE handling).
Args:
request: The gRPC request containing pipeline configuration
modelFile: Path to the model file (for single file loading)
fromSingleFile: Whether to use from_single_file() vs from_pretrained()
torchType: The torch dtype to use
variant: Model variant (e.g., "fp16")
Returns:
The loaded pipeline instance
"""
pipeline_type = request.PipelineType
# Handle IMG2IMG request flag with default pipeline
if request.IMG2IMG and pipeline_type == "":
pipeline_type = "StableDiffusionImg2ImgPipeline"
# ================================================================
# Special cases requiring custom initialization logic
# Only handle pipelines that truly need custom code (quantization,
# special VAE handling, etc.). All other pipelines use dynamic loading.
# ================================================================
# FluxTransformer2DModel - requires quantization and custom transformer loading
if pipeline_type == "FluxTransformer2DModel":
dtype = torch.bfloat16
bfl_repo = os.environ.get("BFL_REPO", "ChuckMcSneed/FLUX.1-dev")
transformer = FluxTransformer2DModel.from_single_file(modelFile, torch_dtype=dtype)
quantize(transformer, weights=qfloat8)
freeze(transformer)
text_encoder_2 = T5EncoderModel.from_pretrained(bfl_repo, subfolder="text_encoder_2", torch_dtype=dtype)
quantize(text_encoder_2, weights=qfloat8)
freeze(text_encoder_2)
pipe = FluxPipeline.from_pretrained(bfl_repo, transformer=None, text_encoder_2=None, torch_dtype=dtype)
pipe.transformer = transformer
pipe.text_encoder_2 = text_encoder_2
if request.LowVRAM:
pipe.enable_model_cpu_offload()
return pipe
# WanPipeline - requires special VAE with float32 dtype
if pipeline_type == "WanPipeline":
vae = AutoencoderKLWan.from_pretrained(
request.Model,
subfolder="vae",
torch_dtype=torch.float32
)
pipe = load_diffusers_pipeline(
class_name="WanPipeline",
model_id=request.Model,
vae=vae,
torch_dtype=torchType
)
self.txt2vid = True
return pipe
# WanImageToVideoPipeline - requires special VAE with float32 dtype
if pipeline_type == "WanImageToVideoPipeline":
vae = AutoencoderKLWan.from_pretrained(
request.Model,
subfolder="vae",
torch_dtype=torch.float32
)
pipe = load_diffusers_pipeline(
class_name="WanImageToVideoPipeline",
model_id=request.Model,
vae=vae,
torch_dtype=torchType
)
self.img2vid = True
return pipe
# SanaPipeline - requires special VAE and text encoder dtype conversion
if pipeline_type == "SanaPipeline":
pipe = load_diffusers_pipeline(
class_name="SanaPipeline",
model_id=request.Model,
variant="bf16",
torch_dtype=torch.bfloat16
)
pipe.vae.to(torch.bfloat16)
pipe.text_encoder.to(torch.bfloat16)
return pipe
# VideoDiffusionPipeline - alias for DiffusionPipeline with txt2vid flag
if pipeline_type == "VideoDiffusionPipeline":
self.txt2vid = True
pipe = load_diffusers_pipeline(
class_name="DiffusionPipeline",
model_id=request.Model,
torch_dtype=torchType
)
return pipe
# StableVideoDiffusionPipeline - needs img2vid flag and CPU offload
if pipeline_type == "StableVideoDiffusionPipeline":
self.img2vid = True
pipe = load_diffusers_pipeline(
class_name="StableVideoDiffusionPipeline",
model_id=request.Model,
torch_dtype=torchType,
variant=variant
)
if not DISABLE_CPU_OFFLOAD:
pipe.enable_model_cpu_offload()
return pipe
# ================================================================
# Dynamic pipeline loading - the default path for most pipelines
# Uses the dynamic loader to instantiate any pipeline by class name
# ================================================================
# Build kwargs for dynamic loading
load_kwargs = {"torch_dtype": torchType}
# Add variant if not loading from single file
if not fromSingleFile and variant:
load_kwargs["variant"] = variant
# Add use_safetensors for from_pretrained
if not fromSingleFile:
load_kwargs["use_safetensors"] = SAFETENSORS
# Determine pipeline class name - default to AutoPipelineForText2Image
effective_pipeline_type = pipeline_type if pipeline_type else "AutoPipelineForText2Image"
# Use dynamic loader for all pipelines
try:
pipe = load_diffusers_pipeline(
class_name=effective_pipeline_type,
model_id=modelFile if fromSingleFile else request.Model,
from_single_file=fromSingleFile,
**load_kwargs
)
except Exception as e:
# Provide helpful error with available pipelines
available = get_available_pipelines()
raise ValueError(
f"Failed to load pipeline '{effective_pipeline_type}': {e}\n"
f"Available pipelines: {', '.join(available[:30])}..."
) from e
# Apply LowVRAM optimization if supported and requested
if request.LowVRAM and hasattr(pipe, 'enable_model_cpu_offload'):
pipe.enable_model_cpu_offload()
return pipe
def Health(self, request, context):
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
@@ -404,16 +231,139 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
fromSingleFile = request.Model.startswith("http") or request.Model.startswith("/") or local
self.img2vid = False
self.txt2vid = False
## img2img
if (request.PipelineType == "StableDiffusionImg2ImgPipeline") or (request.IMG2IMG and request.PipelineType == ""):
if fromSingleFile:
self.pipe = StableDiffusionImg2ImgPipeline.from_single_file(modelFile,
torch_dtype=torchType)
else:
self.pipe = StableDiffusionImg2ImgPipeline.from_pretrained(request.Model,
torch_dtype=torchType)
# Load pipeline using dynamic loader
# Special cases that require custom initialization are handled first
self.pipe = self._load_pipeline(
request=request,
modelFile=modelFile,
fromSingleFile=fromSingleFile,
torchType=torchType,
variant=variant
)
elif request.PipelineType == "StableDiffusionDepth2ImgPipeline":
self.pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(request.Model,
torch_dtype=torchType)
## img2vid
elif request.PipelineType == "StableVideoDiffusionPipeline":
self.img2vid = True
self.pipe = StableVideoDiffusionPipeline.from_pretrained(
request.Model, torch_dtype=torchType, variant=variant
)
if not DISABLE_CPU_OFFLOAD:
self.pipe.enable_model_cpu_offload()
## text2img
elif request.PipelineType == "AutoPipelineForText2Image" or request.PipelineType == "":
self.pipe = AutoPipelineForText2Image.from_pretrained(request.Model,
torch_dtype=torchType,
use_safetensors=SAFETENSORS,
variant=variant)
elif request.PipelineType == "StableDiffusionPipeline":
if fromSingleFile:
self.pipe = StableDiffusionPipeline.from_single_file(modelFile,
torch_dtype=torchType)
else:
self.pipe = StableDiffusionPipeline.from_pretrained(request.Model,
torch_dtype=torchType)
elif request.PipelineType == "DiffusionPipeline":
self.pipe = DiffusionPipeline.from_pretrained(request.Model,
torch_dtype=torchType)
elif request.PipelineType == "QwenImageEditPipeline":
self.pipe = QwenImageEditPipeline.from_pretrained(request.Model,
torch_dtype=torchType)
elif request.PipelineType == "VideoDiffusionPipeline":
self.txt2vid = True
self.pipe = DiffusionPipeline.from_pretrained(request.Model,
torch_dtype=torchType)
elif request.PipelineType == "StableDiffusionXLPipeline":
if fromSingleFile:
self.pipe = StableDiffusionXLPipeline.from_single_file(modelFile,
torch_dtype=torchType,
use_safetensors=True)
else:
self.pipe = StableDiffusionXLPipeline.from_pretrained(
request.Model,
torch_dtype=torchType,
use_safetensors=True,
variant=variant)
elif request.PipelineType == "StableDiffusion3Pipeline":
if fromSingleFile:
self.pipe = StableDiffusion3Pipeline.from_single_file(modelFile,
torch_dtype=torchType,
use_safetensors=True)
else:
self.pipe = StableDiffusion3Pipeline.from_pretrained(
request.Model,
torch_dtype=torchType,
use_safetensors=True,
variant=variant)
elif request.PipelineType == "FluxPipeline":
if fromSingleFile:
self.pipe = FluxPipeline.from_single_file(modelFile,
torch_dtype=torchType,
use_safetensors=True)
else:
self.pipe = FluxPipeline.from_pretrained(
request.Model,
torch_dtype=torch.bfloat16)
if request.LowVRAM:
self.pipe.enable_model_cpu_offload()
elif request.PipelineType == "FluxTransformer2DModel":
dtype = torch.bfloat16
# specify from environment or default to "ChuckMcSneed/FLUX.1-dev"
bfl_repo = os.environ.get("BFL_REPO", "ChuckMcSneed/FLUX.1-dev")
transformer = FluxTransformer2DModel.from_single_file(modelFile, torch_dtype=dtype)
quantize(transformer, weights=qfloat8)
freeze(transformer)
text_encoder_2 = T5EncoderModel.from_pretrained(bfl_repo, subfolder="text_encoder_2", torch_dtype=dtype)
quantize(text_encoder_2, weights=qfloat8)
freeze(text_encoder_2)
self.pipe = FluxPipeline.from_pretrained(bfl_repo, transformer=None, text_encoder_2=None, torch_dtype=dtype)
self.pipe.transformer = transformer
self.pipe.text_encoder_2 = text_encoder_2
if request.LowVRAM:
self.pipe.enable_model_cpu_offload()
elif request.PipelineType == "Lumina2Text2ImgPipeline":
self.pipe = Lumina2Text2ImgPipeline.from_pretrained(
request.Model,
torch_dtype=torch.bfloat16)
if request.LowVRAM:
self.pipe.enable_model_cpu_offload()
elif request.PipelineType == "SanaPipeline":
self.pipe = SanaPipeline.from_pretrained(
request.Model,
variant="bf16",
torch_dtype=torch.bfloat16)
self.pipe.vae.to(torch.bfloat16)
self.pipe.text_encoder.to(torch.bfloat16)
elif request.PipelineType == "WanPipeline":
# WAN2.2 pipeline requires special VAE handling
vae = AutoencoderKLWan.from_pretrained(
request.Model,
subfolder="vae",
torch_dtype=torch.float32
)
self.pipe = WanPipeline.from_pretrained(
request.Model,
vae=vae,
torch_dtype=torchType
)
self.txt2vid = True # WAN2.2 is a text-to-video pipeline
elif request.PipelineType == "WanImageToVideoPipeline":
# WAN2.2 image-to-video pipeline
vae = AutoencoderKLWan.from_pretrained(
request.Model,
subfolder="vae",
torch_dtype=torch.float32
)
self.pipe = WanImageToVideoPipeline.from_pretrained(
request.Model,
vae=vae,
torch_dtype=torchType
)
self.img2vid = True # WAN2.2 image-to-video pipeline
if CLIPSKIP and request.CLIPSkip != 0:
self.clip_skip = request.CLIPSkip
@@ -551,12 +501,10 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
# create a dictionary of values for the parameters
options = {
"negative_prompt": request.negative_prompt,
"num_inference_steps": steps,
}
if hasattr(request, 'negative_prompt') and request.negative_prompt != "":
options["negative_prompt"] = request.negative_prompt
# Handle image source: prioritize RefImages over request.src
image_src = None
if hasattr(request, 'ref_images') and request.ref_images and len(request.ref_images) > 0:
@@ -580,7 +528,17 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
if CLIPSKIP and self.clip_skip != 0:
options["clip_skip"] = self.clip_skip
kwargs = {}
# Get the keys that we will build the args for our pipe for
keys = options.keys()
if request.EnableParameters != "":
keys = [key.strip() for key in request.EnableParameters.split(",")]
if request.EnableParameters == "none":
keys = []
# create a dictionary of parameters by using the keys from EnableParameters and the values from defaults
kwargs = {key: options.get(key) for key in keys if key in options}
# populate kwargs from self.options.
kwargs.update(self.options)

View File

@@ -1,538 +0,0 @@
"""
Dynamic Diffusers Pipeline Loader
This module provides dynamic discovery and loading of diffusers pipelines at runtime,
eliminating the need for per-pipeline conditional statements. New pipelines added to
diffusers become available automatically without code changes.
The module also supports discovering other diffusers classes like schedulers, models,
and other components, making it a generic solution for dynamic class loading.
Usage:
from diffusers_dynamic_loader import load_diffusers_pipeline, get_available_pipelines
# Load by class name
pipe = load_diffusers_pipeline(class_name="StableDiffusionPipeline", model_id="...", torch_dtype=torch.float16)
# Load by task alias
pipe = load_diffusers_pipeline(task="text-to-image", model_id="...", torch_dtype=torch.float16)
# Load using model_id (infers from HuggingFace Hub if possible)
pipe = load_diffusers_pipeline(model_id="runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
# Get list of available pipelines
available = get_available_pipelines()
# Discover other diffusers classes (schedulers, models, etc.)
schedulers = discover_diffusers_classes("SchedulerMixin")
models = discover_diffusers_classes("ModelMixin")
"""
import importlib
import re
import sys
from typing import Any, Dict, List, Optional, Tuple, Type
# Global cache for discovered pipelines - computed once per process
_pipeline_registry: Optional[Dict[str, Type]] = None
_task_aliases: Optional[Dict[str, List[str]]] = None
# Global cache for other discovered class types
_class_registries: Dict[str, Dict[str, Type]] = {}
def _camel_to_kebab(name: str) -> str:
"""
Convert CamelCase to kebab-case.
Examples:
StableDiffusionPipeline -> stable-diffusion-pipeline
StableDiffusionXLImg2ImgPipeline -> stable-diffusion-xl-img-2-img-pipeline
"""
# Insert hyphen before uppercase letters (but not at the start)
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1-\2', name)
# Insert hyphen before uppercase letters following lowercase letters or numbers
s2 = re.sub('([a-z0-9])([A-Z])', r'\1-\2', s1)
return s2.lower()
def _extract_task_keywords(class_name: str) -> List[str]:
"""
Extract task-related keywords from a pipeline class name.
This function derives useful task aliases from the class name without
hardcoding per-pipeline branches.
Returns a list of potential task aliases for this pipeline.
"""
aliases = []
name_lower = class_name.lower()
# Direct task mappings based on common patterns in class names
task_patterns = {
'text2image': ['text-to-image', 'txt2img', 'text2image'],
'texttoimage': ['text-to-image', 'txt2img', 'text2image'],
'txt2img': ['text-to-image', 'txt2img', 'text2image'],
'img2img': ['image-to-image', 'img2img', 'image2image'],
'image2image': ['image-to-image', 'img2img', 'image2image'],
'imagetoimage': ['image-to-image', 'img2img', 'image2image'],
'img2video': ['image-to-video', 'img2vid', 'img2video'],
'imagetovideo': ['image-to-video', 'img2vid', 'img2video'],
'text2video': ['text-to-video', 'txt2vid', 'text2video'],
'texttovideo': ['text-to-video', 'txt2vid', 'text2video'],
'inpaint': ['inpainting', 'inpaint'],
'depth2img': ['depth-to-image', 'depth2img'],
'depthtoimage': ['depth-to-image', 'depth2img'],
'controlnet': ['controlnet', 'control-net'],
'upscale': ['upscaling', 'upscale', 'super-resolution'],
'superresolution': ['upscaling', 'upscale', 'super-resolution'],
}
# Check for each pattern in the class name
for pattern, task_aliases in task_patterns.items():
if pattern in name_lower:
aliases.extend(task_aliases)
# Also detect general pipeline types from the class name structure
# E.g., StableDiffusionPipeline -> stable-diffusion, flux -> flux
# Remove "Pipeline" suffix and convert to kebab case
if class_name.endswith('Pipeline'):
base_name = class_name[:-8] # Remove "Pipeline"
kebab_name = _camel_to_kebab(base_name)
aliases.append(kebab_name)
# Extract model family name (e.g., "stable-diffusion" from "stable-diffusion-xl-img-2-img")
parts = kebab_name.split('-')
if len(parts) >= 2:
# Try the first two words as a family name
family = '-'.join(parts[:2])
if family not in aliases:
aliases.append(family)
# If no specific task pattern matched but class contains "Pipeline", add "text-to-image" as default
# since most diffusion pipelines support text-to-image generation
if 'text-to-image' not in aliases and 'image-to-image' not in aliases:
# Only add for pipelines that seem to be generation pipelines (not schedulers, etc.)
if 'pipeline' in name_lower and not any(x in name_lower for x in ['scheduler', 'processor', 'encoder']):
# Don't automatically add - let it be explicit
pass
return list(set(aliases)) # Remove duplicates
def discover_diffusers_classes(
base_class_name: str,
include_base: bool = True
) -> Dict[str, Type]:
"""
Discover all subclasses of a given base class from diffusers.
This function provides a generic way to discover any type of diffusers class,
not just pipelines. It can be used to discover schedulers, models, processors,
and other components.
Args:
base_class_name: Name of the base class to search for subclasses
(e.g., "DiffusionPipeline", "SchedulerMixin", "ModelMixin")
include_base: Whether to include the base class itself in results
Returns:
Dict mapping class names to class objects
Examples:
# Discover all pipeline classes
pipelines = discover_diffusers_classes("DiffusionPipeline")
# Discover all scheduler classes
schedulers = discover_diffusers_classes("SchedulerMixin")
# Discover all model classes
models = discover_diffusers_classes("ModelMixin")
# Discover AutoPipeline classes
auto_pipelines = discover_diffusers_classes("AutoPipelineForText2Image")
"""
global _class_registries
# Check cache first
if base_class_name in _class_registries:
return _class_registries[base_class_name]
import diffusers
# Try to get the base class from diffusers
base_class = None
try:
base_class = getattr(diffusers, base_class_name)
except AttributeError:
# Try to find in submodules
for submodule in ['schedulers', 'models', 'pipelines']:
try:
module = importlib.import_module(f'diffusers.{submodule}')
if hasattr(module, base_class_name):
base_class = getattr(module, base_class_name)
break
except (ImportError, ModuleNotFoundError):
continue
if base_class is None:
raise ValueError(f"Could not find base class '{base_class_name}' in diffusers")
registry: Dict[str, Type] = {}
# Include base class if requested
if include_base:
registry[base_class_name] = base_class
# Scan diffusers module for subclasses
for attr_name in dir(diffusers):
try:
attr = getattr(diffusers, attr_name)
if (isinstance(attr, type) and
issubclass(attr, base_class) and
(include_base or attr is not base_class)):
registry[attr_name] = attr
except (ImportError, AttributeError, TypeError, RuntimeError, ModuleNotFoundError):
continue
# Cache the results
_class_registries[base_class_name] = registry
return registry
def get_available_classes(base_class_name: str) -> List[str]:
"""
Get a sorted list of all discovered class names for a given base class.
Args:
base_class_name: Name of the base class (e.g., "SchedulerMixin")
Returns:
Sorted list of discovered class names
"""
return sorted(discover_diffusers_classes(base_class_name).keys())
def _discover_pipelines() -> Tuple[Dict[str, Type], Dict[str, List[str]]]:
"""
Discover all subclasses of DiffusionPipeline from diffusers.
This function uses the generic discover_diffusers_classes() internally
and adds pipeline-specific task alias generation. It also includes
AutoPipeline classes which are special utility classes for automatic
pipeline selection.
Returns:
A tuple of (pipeline_registry, task_aliases) where:
- pipeline_registry: Dict mapping class names to class objects
- task_aliases: Dict mapping task aliases to lists of class names
"""
# Use the generic discovery function
pipeline_registry = discover_diffusers_classes("DiffusionPipeline", include_base=True)
# Also add AutoPipeline classes - these are special utility classes that are
# NOT subclasses of DiffusionPipeline but are commonly used
import diffusers
auto_pipeline_classes = [
"AutoPipelineForText2Image",
"AutoPipelineForImage2Image",
"AutoPipelineForInpainting",
]
for cls_name in auto_pipeline_classes:
try:
cls = getattr(diffusers, cls_name)
if cls is not None:
pipeline_registry[cls_name] = cls
except AttributeError:
# Class not available in this version of diffusers
pass
# Generate task aliases for pipelines
task_aliases: Dict[str, List[str]] = {}
for attr_name in pipeline_registry:
if attr_name == "DiffusionPipeline":
continue # Skip base class for alias generation
aliases = _extract_task_keywords(attr_name)
for alias in aliases:
if alias not in task_aliases:
task_aliases[alias] = []
if attr_name not in task_aliases[alias]:
task_aliases[alias].append(attr_name)
return pipeline_registry, task_aliases
def get_pipeline_registry() -> Dict[str, Type]:
"""
Get the cached pipeline registry.
Returns a dictionary mapping pipeline class names to their class objects.
The registry is built on first access and cached for subsequent calls.
"""
global _pipeline_registry, _task_aliases
if _pipeline_registry is None:
_pipeline_registry, _task_aliases = _discover_pipelines()
return _pipeline_registry
def get_task_aliases() -> Dict[str, List[str]]:
"""
Get the cached task aliases dictionary.
Returns a dictionary mapping task aliases (e.g., "text-to-image") to
lists of pipeline class names that support that task.
"""
global _pipeline_registry, _task_aliases
if _task_aliases is None:
_pipeline_registry, _task_aliases = _discover_pipelines()
return _task_aliases
def get_available_pipelines() -> List[str]:
"""
Get a sorted list of all discovered pipeline class names.
Returns:
List of pipeline class names available for loading.
"""
return sorted(get_pipeline_registry().keys())
def get_available_tasks() -> List[str]:
"""
Get a sorted list of all available task aliases.
Returns:
List of task aliases (e.g., ["text-to-image", "image-to-image", ...])
"""
return sorted(get_task_aliases().keys())
def resolve_pipeline_class(
class_name: Optional[str] = None,
task: Optional[str] = None,
model_id: Optional[str] = None
) -> Type:
"""
Resolve a pipeline class from class_name, task, or model_id.
Priority:
1. If class_name is provided, look it up directly
2. If task is provided, resolve through task aliases
3. If model_id is provided, try to infer from HuggingFace Hub
Args:
class_name: Exact pipeline class name (e.g., "StableDiffusionPipeline")
task: Task alias (e.g., "text-to-image", "img2img")
model_id: HuggingFace model ID (e.g., "runwayml/stable-diffusion-v1-5")
Returns:
The resolved pipeline class.
Raises:
ValueError: If no pipeline could be resolved.
"""
registry = get_pipeline_registry()
aliases = get_task_aliases()
# 1. Direct class name lookup
if class_name:
if class_name in registry:
return registry[class_name]
# Try case-insensitive match
for name, cls in registry.items():
if name.lower() == class_name.lower():
return cls
raise ValueError(
f"Unknown pipeline class '{class_name}'. "
f"Available pipelines: {', '.join(sorted(registry.keys())[:20])}..."
)
# 2. Task alias lookup
if task:
task_lower = task.lower().replace('_', '-')
if task_lower in aliases:
# Return the first matching pipeline for this task
matching_classes = aliases[task_lower]
if matching_classes:
return registry[matching_classes[0]]
# Try partial matching
for alias, classes in aliases.items():
if task_lower in alias or alias in task_lower:
if classes:
return registry[classes[0]]
raise ValueError(
f"Unknown task '{task}'. "
f"Available tasks: {', '.join(sorted(aliases.keys())[:20])}..."
)
# 3. Try to infer from HuggingFace Hub
if model_id:
try:
from huggingface_hub import model_info
info = model_info(model_id)
# Check pipeline_tag
if hasattr(info, 'pipeline_tag') and info.pipeline_tag:
tag = info.pipeline_tag.lower().replace('_', '-')
if tag in aliases:
matching_classes = aliases[tag]
if matching_classes:
return registry[matching_classes[0]]
# Check model card for hints
if hasattr(info, 'cardData') and info.cardData:
card = info.cardData
if 'pipeline_tag' in card:
tag = card['pipeline_tag'].lower().replace('_', '-')
if tag in aliases:
matching_classes = aliases[tag]
if matching_classes:
return registry[matching_classes[0]]
except ImportError:
# huggingface_hub not available
pass
except (KeyError, AttributeError, ValueError, OSError):
# Model info lookup failed - common cases:
# - KeyError: Missing keys in model card
# - AttributeError: Missing attributes on model info
# - ValueError: Invalid model data
# - OSError: Network or file access issues
pass
# Fallback: use DiffusionPipeline.from_pretrained which auto-detects
# DiffusionPipeline is always added to registry in _discover_pipelines (line 132)
# but use .get() with import fallback for extra safety
from diffusers import DiffusionPipeline
return registry.get('DiffusionPipeline', DiffusionPipeline)
raise ValueError(
"Must provide at least one of: class_name, task, or model_id. "
f"Available pipelines: {', '.join(sorted(registry.keys())[:20])}... "
f"Available tasks: {', '.join(sorted(aliases.keys())[:20])}..."
)
def load_diffusers_pipeline(
class_name: Optional[str] = None,
task: Optional[str] = None,
model_id: Optional[str] = None,
from_single_file: bool = False,
**kwargs
) -> Any:
"""
Load a diffusers pipeline dynamically.
This function resolves the appropriate pipeline class based on the provided
parameters and instantiates it with the given kwargs.
Args:
class_name: Exact pipeline class name (e.g., "StableDiffusionPipeline")
task: Task alias (e.g., "text-to-image", "img2img")
model_id: HuggingFace model ID or local path
from_single_file: If True, use from_single_file() instead of from_pretrained()
**kwargs: Additional arguments passed to from_pretrained() or from_single_file()
Returns:
An instantiated pipeline object.
Raises:
ValueError: If no pipeline could be resolved.
Exception: If pipeline loading fails.
Examples:
# Load by class name
pipe = load_diffusers_pipeline(
class_name="StableDiffusionPipeline",
model_id="runwayml/stable-diffusion-v1-5",
torch_dtype=torch.float16
)
# Load by task
pipe = load_diffusers_pipeline(
task="text-to-image",
model_id="runwayml/stable-diffusion-v1-5",
torch_dtype=torch.float16
)
# Load from single file
pipe = load_diffusers_pipeline(
class_name="StableDiffusionPipeline",
model_id="/path/to/model.safetensors",
from_single_file=True,
torch_dtype=torch.float16
)
"""
# Resolve the pipeline class
pipeline_class = resolve_pipeline_class(
class_name=class_name,
task=task,
model_id=model_id
)
# If no model_id provided but we have a class, we can't load
if model_id is None:
raise ValueError("model_id is required to load a pipeline")
# Load the pipeline
try:
if from_single_file:
# Check if the class has from_single_file method
if hasattr(pipeline_class, 'from_single_file'):
return pipeline_class.from_single_file(model_id, **kwargs)
else:
raise ValueError(
f"Pipeline class {pipeline_class.__name__} does not support from_single_file(). "
f"Use from_pretrained() instead."
)
else:
return pipeline_class.from_pretrained(model_id, **kwargs)
except Exception as e:
# Provide helpful error message
available = get_available_pipelines()
raise RuntimeError(
f"Failed to load pipeline '{pipeline_class.__name__}' from '{model_id}': {e}\n"
f"Available pipelines: {', '.join(available[:20])}..."
) from e
def get_pipeline_info(class_name: str) -> Dict[str, Any]:
"""
Get information about a specific pipeline class.
Args:
class_name: The pipeline class name
Returns:
Dictionary with pipeline information including:
- name: Class name
- aliases: List of task aliases
- supports_single_file: Whether from_single_file() is available
- docstring: Class docstring (if available)
"""
registry = get_pipeline_registry()
aliases = get_task_aliases()
if class_name not in registry:
raise ValueError(f"Unknown pipeline: {class_name}")
cls = registry[class_name]
# Find all aliases for this pipeline
pipeline_aliases = []
for alias, classes in aliases.items():
if class_name in classes:
pipeline_aliases.append(alias)
return {
'name': class_name,
'aliases': pipeline_aliases,
'supports_single_file': hasattr(cls, 'from_single_file'),
'docstring': cls.__doc__[:200] if cls.__doc__ else None
}

View File

@@ -16,11 +16,4 @@ if [ "x${BUILD_PROFILE}" == "xintel" ]; then
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
fi
# Use python 3.12 for l4t
if [ "x${BUILD_PROFILE}" == "xl4t13" ]; then
PYTHON_VERSION="3.12"
PYTHON_PATCH="12"
PY_STANDALONE_TAG="20251120"
fi
installRequirements

View File

@@ -1,12 +1,12 @@
--extra-index-url https://download.pytorch.org/whl/cu130
--extra-index-url https://download.pytorch.org/whl/cu118
git+https://github.com/huggingface/diffusers
opencv-python
transformers
torchvision
torchvision==0.22.1
accelerate
compel
peft
sentencepiece
torch
ftfy
torch==2.7.1
optimum-quanto
ftfy

View File

@@ -1,6 +1,6 @@
--extra-index-url https://download.pytorch.org/whl/rocm6.4
torch==2.8.0+rocm6.4
torchvision==0.23.0+rocm6.4
--extra-index-url https://download.pytorch.org/whl/rocm6.3
torch==2.7.1+rocm6.3
torchvision==0.22.1+rocm6.3
git+https://github.com/huggingface/diffusers
opencv-python
transformers

View File

@@ -0,0 +1,12 @@
--extra-index-url https://pypi.jetson-ai-lab.io/jp6/cu126/
torch
diffusers
transformers
accelerate
compel
peft
optimum-quanto
numpy<2
sentencepiece
torchvision
ftfy

View File

@@ -1,12 +0,0 @@
--extra-index-url https://pypi.jetson-ai-lab.io/jp6/cu129/
torch
git+https://github.com/huggingface/diffusers
transformers
accelerate
compel
peft
optimum-quanto
numpy<2
sentencepiece
torchvision
ftfy

View File

@@ -1,13 +0,0 @@
--extra-index-url https://download.pytorch.org/whl/cu130
torch
git+https://github.com/huggingface/diffusers
transformers
accelerate
compel
peft
optimum-quanto
numpy<2
sentencepiece
torchvision
ftfy
chardet

View File

@@ -1,26 +1,15 @@
"""
A test script to test the gRPC service and dynamic loader
A test script to test the gRPC service
"""
import unittest
import subprocess
import time
from unittest.mock import patch, MagicMock
import backend_pb2
import backend_pb2_grpc
# Import dynamic loader for testing (these don't need gRPC)
import diffusers_dynamic_loader as loader
from diffusers import DiffusionPipeline, StableDiffusionPipeline
# Try to import gRPC modules - may not be available during unit testing
try:
import grpc
import backend_pb2
import backend_pb2_grpc
GRPC_AVAILABLE = True
except ImportError:
GRPC_AVAILABLE = False
import grpc
@unittest.skipUnless(GRPC_AVAILABLE, "gRPC modules not available")
class TestBackendServicer(unittest.TestCase):
"""
TestBackendServicer is the class that tests the gRPC service
@@ -93,222 +82,3 @@ class TestBackendServicer(unittest.TestCase):
self.fail("Image gen service failed")
finally:
self.tearDown()
class TestDiffusersDynamicLoader(unittest.TestCase):
"""Test cases for the diffusers dynamic loader functionality."""
@classmethod
def setUpClass(cls):
"""Set up test fixtures - clear caches to ensure fresh discovery."""
# Reset the caches to ensure fresh discovery
loader._pipeline_registry = None
loader._task_aliases = None
def test_camel_to_kebab_conversion(self):
"""Test CamelCase to kebab-case conversion."""
test_cases = [
("StableDiffusionPipeline", "stable-diffusion-pipeline"),
("StableDiffusionXLPipeline", "stable-diffusion-xl-pipeline"),
("FluxPipeline", "flux-pipeline"),
("DiffusionPipeline", "diffusion-pipeline"),
]
for input_val, expected in test_cases:
with self.subTest(input=input_val):
result = loader._camel_to_kebab(input_val)
self.assertEqual(result, expected)
def test_extract_task_keywords(self):
"""Test task keyword extraction from class names."""
# Test text-to-image detection
aliases = loader._extract_task_keywords("StableDiffusionPipeline")
self.assertIn("stable-diffusion", aliases)
# Test img2img detection
aliases = loader._extract_task_keywords("StableDiffusionImg2ImgPipeline")
self.assertIn("image-to-image", aliases)
self.assertIn("img2img", aliases)
# Test inpainting detection
aliases = loader._extract_task_keywords("StableDiffusionInpaintPipeline")
self.assertIn("inpainting", aliases)
self.assertIn("inpaint", aliases)
# Test depth2img detection
aliases = loader._extract_task_keywords("StableDiffusionDepth2ImgPipeline")
self.assertIn("depth-to-image", aliases)
def test_discover_pipelines_finds_known_classes(self):
"""Test that pipeline discovery finds at least one known pipeline class."""
registry = loader.get_pipeline_registry()
# Check that the registry is not empty
self.assertGreater(len(registry), 0, "Pipeline registry should not be empty")
# Check for known pipeline classes
known_pipelines = [
"StableDiffusionPipeline",
"DiffusionPipeline",
]
for pipeline_name in known_pipelines:
with self.subTest(pipeline=pipeline_name):
self.assertIn(
pipeline_name,
registry,
f"Expected to find {pipeline_name} in registry"
)
def test_discover_pipelines_caches_results(self):
"""Test that pipeline discovery results are cached."""
# Get registry twice
registry1 = loader.get_pipeline_registry()
registry2 = loader.get_pipeline_registry()
# Should be the same object (cached)
self.assertIs(registry1, registry2, "Registry should be cached")
def test_get_available_pipelines(self):
"""Test getting list of available pipelines."""
available = loader.get_available_pipelines()
# Should return a list
self.assertIsInstance(available, list)
# Should contain known pipelines
self.assertIn("StableDiffusionPipeline", available)
self.assertIn("DiffusionPipeline", available)
# Should be sorted
self.assertEqual(available, sorted(available))
def test_get_available_tasks(self):
"""Test getting list of available task aliases."""
tasks = loader.get_available_tasks()
# Should return a list
self.assertIsInstance(tasks, list)
# Should be sorted
self.assertEqual(tasks, sorted(tasks))
def test_resolve_pipeline_class_by_name(self):
"""Test resolving pipeline class by exact name."""
cls = loader.resolve_pipeline_class(class_name="StableDiffusionPipeline")
self.assertEqual(cls, StableDiffusionPipeline)
def test_resolve_pipeline_class_by_name_case_insensitive(self):
"""Test that class name resolution is case-insensitive."""
cls1 = loader.resolve_pipeline_class(class_name="StableDiffusionPipeline")
cls2 = loader.resolve_pipeline_class(class_name="stablediffusionpipeline")
self.assertEqual(cls1, cls2)
def test_resolve_pipeline_class_by_task(self):
"""Test resolving pipeline class by task alias."""
# Get the registry to find available tasks
aliases = loader.get_task_aliases()
# Test with a common task that should be available
if "stable-diffusion" in aliases:
cls = loader.resolve_pipeline_class(task="stable-diffusion")
self.assertIsNotNone(cls)
def test_resolve_pipeline_class_unknown_name_raises(self):
"""Test that resolving unknown class name raises ValueError with helpful message."""
with self.assertRaises(ValueError) as ctx:
loader.resolve_pipeline_class(class_name="NonExistentPipeline")
# Check that error message includes available pipelines
error_msg = str(ctx.exception)
self.assertIn("Unknown pipeline class", error_msg)
self.assertIn("Available pipelines", error_msg)
def test_resolve_pipeline_class_unknown_task_raises(self):
"""Test that resolving unknown task raises ValueError with helpful message."""
with self.assertRaises(ValueError) as ctx:
loader.resolve_pipeline_class(task="nonexistent-task-xyz")
# Check that error message includes available tasks
error_msg = str(ctx.exception)
self.assertIn("Unknown task", error_msg)
self.assertIn("Available tasks", error_msg)
def test_resolve_pipeline_class_no_params_raises(self):
"""Test that calling with no parameters raises helpful ValueError."""
with self.assertRaises(ValueError) as ctx:
loader.resolve_pipeline_class()
error_msg = str(ctx.exception)
self.assertIn("Must provide at least one of", error_msg)
def test_get_pipeline_info(self):
"""Test getting pipeline information."""
info = loader.get_pipeline_info("StableDiffusionPipeline")
self.assertEqual(info['name'], "StableDiffusionPipeline")
self.assertIsInstance(info['aliases'], list)
self.assertIsInstance(info['supports_single_file'], bool)
def test_get_pipeline_info_unknown_raises(self):
"""Test that getting info for unknown pipeline raises ValueError."""
with self.assertRaises(ValueError) as ctx:
loader.get_pipeline_info("NonExistentPipeline")
self.assertIn("Unknown pipeline", str(ctx.exception))
def test_discover_diffusers_classes_pipelines(self):
"""Test generic class discovery for DiffusionPipeline."""
classes = loader.discover_diffusers_classes("DiffusionPipeline")
# Should return a dict
self.assertIsInstance(classes, dict)
# Should contain known pipeline classes
self.assertIn("DiffusionPipeline", classes)
self.assertIn("StableDiffusionPipeline", classes)
def test_discover_diffusers_classes_caches_results(self):
"""Test that class discovery results are cached."""
classes1 = loader.discover_diffusers_classes("DiffusionPipeline")
classes2 = loader.discover_diffusers_classes("DiffusionPipeline")
# Should be the same object (cached)
self.assertIs(classes1, classes2)
def test_discover_diffusers_classes_exclude_base(self):
"""Test discovering classes without base class."""
classes = loader.discover_diffusers_classes("DiffusionPipeline", include_base=False)
# Should still contain subclasses
self.assertIn("StableDiffusionPipeline", classes)
def test_get_available_classes(self):
"""Test getting list of available classes for a base class."""
classes = loader.get_available_classes("DiffusionPipeline")
# Should return a sorted list
self.assertIsInstance(classes, list)
self.assertEqual(classes, sorted(classes))
# Should contain known classes
self.assertIn("StableDiffusionPipeline", classes)
class TestDiffusersDynamicLoaderWithMocks(unittest.TestCase):
"""Test cases using mocks to test edge cases."""
def test_load_pipeline_requires_model_id(self):
"""Test that load_diffusers_pipeline requires model_id."""
with self.assertRaises(ValueError) as ctx:
loader.load_diffusers_pipeline(class_name="StableDiffusionPipeline")
self.assertIn("model_id is required", str(ctx.exception))
def test_resolve_with_model_id_uses_diffusion_pipeline_fallback(self):
"""Test that resolving with only model_id falls back to DiffusionPipeline."""
# When model_id is provided, if hub lookup is not successful,
# should fall back to DiffusionPipeline.
# This tests the fallback behavior - the actual hub lookup may succeed
# or fail depending on network, but the fallback path should work.
cls = loader.resolve_pipeline_class(model_id="some/nonexistent/model")
self.assertEqual(cls, DiffusionPipeline)

View File

@@ -0,0 +1,4 @@
--extra-index-url https://download.pytorch.org/whl/cu118
torch==2.4.1+cu118
transformers
accelerate

View File

@@ -0,0 +1,9 @@
--extra-index-url https://download.pytorch.org/whl/cu118
torch==2.4.1+cu118
faster-whisper
opencv-python
accelerate
compel
peft
sentencepiece
optimum-quanto

View File

@@ -1,9 +0,0 @@
--extra-index-url https://download.pytorch.org/whl/cu130
torch==2.9.1
faster-whisper
opencv-python
accelerate
compel
peft
sentencepiece
optimum-quanto

View File

@@ -1,3 +1,3 @@
--extra-index-url https://download.pytorch.org/whl/rocm6.4
--extra-index-url https://download.pytorch.org/whl/rocm6.0
torch
faster-whisper

View File

@@ -0,0 +1,7 @@
--extra-index-url https://download.pytorch.org/whl/cu118
torch==2.7.1+cu118
torchaudio==2.7.1+cu118
transformers
accelerate
kokoro
soundfile

View File

@@ -1,7 +0,0 @@
--extra-index-url https://download.pytorch.org/whl/cu130
torch==2.9.1
torchaudio==2.9.1
transformers
accelerate
kokoro
soundfile

View File

@@ -1,6 +1,6 @@
--extra-index-url https://download.pytorch.org/whl/rocm6.4
torch==2.8.0+rocm6.4
torchaudio==2.8.0+rocm6.4
--extra-index-url https://download.pytorch.org/whl/rocm6.3
torch==2.7.1+rocm6.3
torchaudio==2.7.1+rocm6.3
transformers
accelerate
kokoro

View File

@@ -14,13 +14,11 @@ import backend_pb2_grpc
import grpc
from mlx_lm import load, generate, stream_generate
from mlx_lm.sample_utils import make_sampler
from mlx_lm.models.cache import make_prompt_cache, can_trim_prompt_cache, trim_prompt_cache
from mlx_lm.models.cache import make_prompt_cache
import mlx.core as mx
import base64
import io
from mlx_cache import ThreadSafeLRUPromptCache
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
@@ -120,16 +118,10 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
self.model, self.tokenizer = load(request.Model, tokenizer_config=tokenizer_config)
else:
self.model, self.tokenizer = load(request.Model)
# Initialize thread-safe LRU prompt cache for efficient generation
max_cache_entries = self.options.get("max_cache_entries", 10)
self.max_kv_size = self.options.get("max_kv_size", None)
self.model_key = request.Model
self.lru_cache = ThreadSafeLRUPromptCache(
max_size=max_cache_entries,
can_trim_fn=can_trim_prompt_cache,
trim_fn=trim_prompt_cache,
)
# Initialize prompt cache for efficient generation
max_kv_size = self.options.get("max_kv_size", None)
self.prompt_cache = make_prompt_cache(self.model, max_kv_size)
except Exception as err:
print(f"Error loading MLX model {err=}, {type(err)=}", file=sys.stderr)
@@ -142,8 +134,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
"""
Generates text based on the given prompt and sampling parameters using MLX.
Uses thread-safe LRU prompt cache for efficient prefix reuse across requests.
Args:
request: The predict request.
context: The gRPC context.
@@ -151,48 +141,31 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
Returns:
backend_pb2.Reply: The predict result.
"""
prompt_cache = None
cache_key = None
try:
# Prepare the prompt and tokenize for cache key
prompt_text = self._prepare_prompt(request)
cache_key = self._get_tokens_from_prompt(prompt_text)
# Fetch nearest cache (exact, shorter prefix, or create new)
prompt_cache, remaining_tokens = self.lru_cache.fetch_nearest_cache(
self.model_key, cache_key
)
if prompt_cache is None:
prompt_cache = make_prompt_cache(self.model, self.max_kv_size)
remaining_tokens = cache_key
# Prepare the prompt
prompt = self._prepare_prompt(request)
# Build generation parameters using request attributes and options
max_tokens, sampler_params = self._build_generation_params(request)
print(f"Generating text with MLX - max_tokens: {max_tokens}, cache_hit: {len(remaining_tokens) < len(cache_key)}", file=sys.stderr)
print(f"Generating text with MLX - max_tokens: {max_tokens}, sampler_params: {sampler_params}", file=sys.stderr)
# Create sampler with parameters
sampler = make_sampler(**sampler_params)
# Use stream_generate to track generated tokens for cache key
generated_text = []
for response in stream_generate(
# Generate text using MLX with proper parameters
response = generate(
self.model,
self.tokenizer,
prompt=remaining_tokens if remaining_tokens else cache_key,
prompt=prompt,
max_tokens=max_tokens,
sampler=sampler,
prompt_cache=prompt_cache,
):
generated_text.append(response.text)
cache_key.append(response.token)
# Insert completed cache
self.lru_cache.insert_cache(self.model_key, cache_key, prompt_cache)
return backend_pb2.Reply(message=bytes(''.join(generated_text), encoding='utf-8'))
prompt_cache=self.prompt_cache,
verbose=False
)
return backend_pb2.Reply(message=bytes(response, encoding='utf-8'))
except Exception as e:
print(f"Error in MLX Predict: {e}", file=sys.stderr)
context.set_code(grpc.StatusCode.INTERNAL)
@@ -221,8 +194,6 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
"""
Generates text based on the given prompt and sampling parameters, and streams the results using MLX.
Uses thread-safe LRU prompt cache for efficient prefix reuse across requests.
Args:
request: The predict stream request.
context: The gRPC context.
@@ -230,56 +201,35 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
Yields:
backend_pb2.Reply: Streaming predict results.
"""
prompt_cache = None
cache_key = None
try:
# Prepare the prompt and tokenize for cache key
prompt_text = self._prepare_prompt(request)
cache_key = self._get_tokens_from_prompt(prompt_text)
# Fetch nearest cache (exact, shorter prefix, or create new)
prompt_cache, remaining_tokens = self.lru_cache.fetch_nearest_cache(
self.model_key, cache_key
)
if prompt_cache is None:
prompt_cache = make_prompt_cache(self.model, self.max_kv_size)
remaining_tokens = cache_key
# Prepare the prompt
prompt = self._prepare_prompt(request)
# Build generation parameters using request attributes and options
max_tokens, sampler_params = self._build_generation_params(request, default_max_tokens=512)
print(f"Streaming text with MLX - max_tokens: {max_tokens}, cache_hit: {len(remaining_tokens) < len(cache_key)}", file=sys.stderr)
print(f"Streaming text with MLX - max_tokens: {max_tokens}, sampler_params: {sampler_params}", file=sys.stderr)
# Create sampler with parameters
sampler = make_sampler(**sampler_params)
# Stream text generation using MLX with proper parameters
for response in stream_generate(
self.model,
self.tokenizer,
prompt=remaining_tokens if remaining_tokens else cache_key,
prompt=prompt,
max_tokens=max_tokens,
sampler=sampler,
prompt_cache=prompt_cache,
prompt_cache=self.prompt_cache,
):
cache_key.append(response.token)
yield backend_pb2.Reply(message=bytes(response.text, encoding='utf-8'))
except Exception as e:
print(f"Error in MLX PredictStream: {e}", file=sys.stderr)
context.set_code(grpc.StatusCode.INTERNAL)
context.set_details(f"Streaming generation failed: {str(e)}")
yield backend_pb2.Reply(message=bytes("", encoding='utf-8'))
finally:
# Always insert cache, even on interruption
if prompt_cache is not None and cache_key is not None:
try:
self.lru_cache.insert_cache(self.model_key, cache_key, prompt_cache)
except Exception as e:
print(f"Error inserting cache: {e}", file=sys.stderr)
def _prepare_prompt(self, request):
"""
Prepare the prompt for MLX generation, handling chat templates if needed.
@@ -296,31 +246,16 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
messages = []
for msg in request.Messages:
messages.append({"role": msg.role, "content": msg.content})
prompt = self.tokenizer.apply_chat_template(
messages,
tokenize=False,
messages,
tokenize=False,
add_generation_prompt=True
)
return prompt
else:
return request.Prompt
def _get_tokens_from_prompt(self, prompt_text: str) -> List[int]:
"""
Tokenize prompt text for cache key generation.
Args:
prompt_text: The prompt string to tokenize.
Returns:
List[int]: List of token IDs.
"""
tokens = self.tokenizer.encode(prompt_text)
if hasattr(tokens, 'tolist'):
return tokens.tolist()
return list(tokens)
@@ -349,19 +284,11 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
top_p = getattr(request, 'TopP', 0.0)
if top_p == 0.0:
top_p = 1.0 # Default top_p
min_p = getattr(request, 'MinP', 0.0)
# min_p default of 0.0 means disabled (no filtering)
top_k = getattr(request, 'TopK', 0)
# top_k default of 0 means disabled (no filtering)
# Initialize sampler parameters
sampler_params = {
'temp': temp,
'top_p': top_p,
'min_p': min_p,
'top_k': top_k,
'xtc_threshold': 0.0,
'xtc_probability': 0.0,
}
@@ -381,9 +308,7 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
sampler_option_mapping = {
'temp': 'temp',
'temperature': 'temp', # alias
'top_p': 'top_p',
'min_p': 'min_p',
'top_k': 'top_k',
'top_p': 'top_p',
'xtc_threshold': 'xtc_threshold',
'xtc_probability': 'xtc_probability',
}

View File

@@ -1,266 +0,0 @@
"""
Thread-safe LRU prompt cache for MLX-based backends.
Ported from mlx_lm/server.py (MIT License, Copyright 2023-2024 Apple Inc.)
with thread-safety additions for LocalAI's gRPC backend.
Usage:
from mlx_cache import ThreadSafeLRUPromptCache
# In LoadModel:
self.lru_cache = ThreadSafeLRUPromptCache(max_size=10)
# In Predict/PredictStream:
prompt_cache, remaining_tokens = self.lru_cache.fetch_nearest_cache(model_key, tokens)
# ... generate ...
self.lru_cache.insert_cache(model_key, tokens, prompt_cache)
"""
import copy
import threading
from collections import deque
from dataclasses import dataclass
from typing import Any, List, Optional, Tuple
@dataclass
class CacheEntry:
"""A cache entry with reference counting."""
prompt_cache: List[Any]
count: int
@dataclass
class SearchResult:
"""Result of searching the cache trie."""
model: Any
exact: Optional[List[int]]
shorter: Optional[List[int]]
longer: Optional[List[int]]
common_prefix: int
class ThreadSafeLRUPromptCache:
"""
Thread-safe LRU cache with prefix matching for prompt KV caches.
This cache stores KV caches keyed by token sequences and supports:
- Exact match: Return the cache for the exact token sequence
- Shorter prefix match: Return a cache for a prefix of the tokens
- Longer prefix match: If a longer sequence is cached and can be trimmed
- LRU eviction: When max_size is exceeded, evict least recently used
Thread safety is provided via a threading.Lock that protects all
cache operations.
Args:
max_size: Maximum number of cache entries (default: 10)
can_trim_fn: Optional function to check if a cache can be trimmed
trim_fn: Optional function to trim a cache
"""
def __init__(
self,
max_size: int = 10,
can_trim_fn: Optional[Any] = None,
trim_fn: Optional[Any] = None,
):
self.max_size = max_size
self._cache = {}
self._lru = deque()
self._lock = threading.Lock()
# Optional trim functions (for longer prefix reuse)
self._can_trim_fn = can_trim_fn
self._trim_fn = trim_fn
def _search(self, model, tokens: List[int]) -> SearchResult:
"""
Search the cache for a prompt cache. Return exact or close match.
The cache is organized as a trie where each node is keyed by a token.
This allows efficient prefix matching.
"""
if model not in self._cache:
return SearchResult(model, None, None, None, 0)
current = self._cache[model]
last_cache_index = -1
index = 0
# Traverse the trie following the token sequence
while index < len(tokens) and tokens[index] in current:
current = current[tokens[index]]
if "cache" in current:
last_cache_index = index
index += 1
# Exact match - no need to search for longer or shorter caches
if last_cache_index == len(tokens) - 1:
return SearchResult(model, tuple(tokens), None, None, 0)
# Find the shorter cache (a prefix that has a cache)
# Note: Uses > 0 (not >= 0) to match upstream mlx_lm/server.py behavior.
# Single-token prefixes are not matched, which allows longer cached
# sequences to be preferred for trimming. This is acceptable because
# real prompts with chat templates are always many tokens.
shorter = None
if last_cache_index > 0:
shorter = tuple(tokens[: last_cache_index + 1])
# Check for caches that are longer than our token sequence
longer = None
common_prefix = index
if index > 0 and last_cache_index <= 0:
best = None
stack = [(current, [])]
while stack:
current, extra = stack.pop()
if "cache" in current:
if best is None or len(extra) < len(best):
best = extra
else:
for tok in current:
stack.append((current[tok], extra + [tok]))
if best is not None:
longer = tuple(tokens[:index] + best)
return SearchResult(model, None, shorter, longer, common_prefix)
def _get(self, model, tokens: Tuple[int, ...]) -> CacheEntry:
"""Get a cache entry by traversing the trie."""
current = self._cache[model]
for tok in tokens:
current = current[tok]
return current["cache"]
def _delete(self, model, tokens: Tuple[int, ...]) -> None:
"""Delete a cache entry and clean up empty trie nodes."""
path = [self._cache[model]]
for tok in tokens:
path.append(path[-1][tok])
del path[-1]["cache"]
# Clean up empty nodes bottom-up
for i in reversed(range(len(tokens))):
d_prev, d, t = path[i], path[i + 1], tokens[i]
if len(d) > 0:
break
del d_prev[t]
def _extract(self, model, tokens: Tuple[int, ...]) -> CacheEntry:
"""
Extract a cache entry for exclusive use.
If the entry has count > 1, deep copy and decrement.
If count == 1, remove from cache entirely.
"""
cache_entry = self._get(model, tokens)
if cache_entry.count == 1:
self._delete(model, tokens)
self._lru.remove((model, tokens))
return cache_entry
cache_entry.count -= 1
return CacheEntry(
copy.deepcopy(cache_entry.prompt_cache),
1,
)
def fetch_nearest_cache(
self, model, tokens: List[int]
) -> Tuple[Optional[List[Any]], List[int]]:
"""
Fetch the nearest cache for the given token sequence.
Thread-safe. Returns (cache, remaining_tokens) where:
- cache: The KV cache to use (or None if no cache found)
- remaining_tokens: Tokens that still need to be processed
Args:
model: Model identifier (used to namespace caches)
tokens: The full token sequence for the prompt
Returns:
Tuple of (prompt_cache, remaining_tokens)
"""
with self._lock:
tokens_tuple = tuple(tokens)
result = self._search(model, tokens)
# Exact match - extract and return
if result.exact is not None:
cache_entry = self._extract(result.model, result.exact)
return cache_entry.prompt_cache, []
# Shorter prefix match - extract and return remaining
if result.shorter is not None:
cache_entry = self._extract(result.model, result.shorter)
prefix_len = len(result.shorter)
return cache_entry.prompt_cache, list(tokens[prefix_len:])
# Longer prefix match - try to trim if possible
if result.longer is not None and self._can_trim_fn is not None:
cache_entry = self._get(result.model, result.longer)
if self._can_trim_fn(cache_entry.prompt_cache):
# Deep copy and trim
trimmed_cache = copy.deepcopy(cache_entry.prompt_cache)
prefix = min(len(tokens) - 1, result.common_prefix)
num_to_trim = len(result.longer) - prefix
if self._trim_fn is not None:
self._trim_fn(trimmed_cache, num_to_trim)
return trimmed_cache, list(tokens[prefix:])
# No match found
return None, list(tokens)
def insert_cache(
self, model, tokens: List[int], prompt_cache: List[Any]
) -> None:
"""
Insert a cache entry after generation completes.
Thread-safe. Handles LRU eviction if max_size is exceeded.
Args:
model: Model identifier (used to namespace caches)
tokens: The full token sequence (prompt + generated)
prompt_cache: The KV cache to store
"""
with self._lock:
tokens_tuple = tuple(tokens)
if model not in self._cache:
self._cache[model] = {}
current = self._cache[model]
# Build trie path
for tok in tokens_tuple:
if tok not in current:
current[tok] = {}
current = current[tok]
# Update or create entry
if "cache" in current:
current["cache"].count += 1
self._lru.remove((model, tokens_tuple))
else:
current["cache"] = CacheEntry(prompt_cache, 1)
# Update LRU order
self._lru.append((model, tokens_tuple))
# Evict if over capacity
if len(self._lru) > self.max_size:
evict_model, evict_tokens = self._lru.popleft()
self._delete(evict_model, evict_tokens)
def clear(self) -> None:
"""Clear all cache entries. Thread-safe."""
with self._lock:
self._cache.clear()
self._lru.clear()
def __len__(self) -> int:
"""Return the number of cache entries. Thread-safe."""
with self._lock:
return len(self._lru)

View File

@@ -1,11 +1,18 @@
import unittest
import subprocess
import time
import grpc
import backend_pb2
import backend_pb2_grpc
import grpc
import unittest
import subprocess
import time
import grpc
import backend_pb2_grpc
import backend_pb2
class TestBackendServicer(unittest.TestCase):
"""
TestBackendServicer is the class that tests the gRPC service.
@@ -40,9 +47,9 @@ class TestBackendServicer(unittest.TestCase):
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
response = stub.LoadModel(backend_pb2.ModelOptions(Model="mlx-community/Llama-3.2-1B-Instruct-4bit"))
response = stub.LoadModel(backend_pb2.ModelOptions(Model="facebook/opt-125m"))
self.assertTrue(response.success)
self.assertEqual(response.message, "MLX model loaded successfully")
self.assertEqual(response.message, "Model loaded successfully")
except Exception as err:
print(err)
self.fail("LoadModel service failed")
@@ -57,7 +64,7 @@ class TestBackendServicer(unittest.TestCase):
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
response = stub.LoadModel(backend_pb2.ModelOptions(Model="mlx-community/Llama-3.2-1B-Instruct-4bit"))
response = stub.LoadModel(backend_pb2.ModelOptions(Model="facebook/opt-125m"))
self.assertTrue(response.success)
req = backend_pb2.PredictOptions(Prompt="The capital of France is")
resp = stub.Predict(req)
@@ -77,7 +84,7 @@ class TestBackendServicer(unittest.TestCase):
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
response = stub.LoadModel(backend_pb2.ModelOptions(Model="mlx-community/Llama-3.2-1B-Instruct-4bit"))
response = stub.LoadModel(backend_pb2.ModelOptions(Model="facebook/opt-125m"))
self.assertTrue(response.success)
req = backend_pb2.PredictOptions(
@@ -88,13 +95,26 @@ class TestBackendServicer(unittest.TestCase):
TopK=40,
PresencePenalty=0.1,
FrequencyPenalty=0.2,
RepetitionPenalty=1.1,
MinP=0.05,
Seed=42,
StopPrompts=["\n"],
StopTokenIds=[50256],
BadWords=["badword"],
IncludeStopStrInOutput=True,
IgnoreEOS=True,
MinTokens=5,
Logprobs=5,
PromptLogprobs=5,
SkipSpecialTokens=True,
SpacesBetweenSpecialTokens=True,
TruncatePromptTokens=10,
GuidedDecoding=True,
N=2,
)
resp = stub.Predict(req)
self.assertIsNotNone(resp.message)
self.assertIsNotNone(resp.logprobs)
except Exception as err:
print(err)
self.fail("sampling params service failed")
@@ -123,112 +143,4 @@ class TestBackendServicer(unittest.TestCase):
print(err)
self.fail("Embedding service failed")
finally:
self.tearDown()
def test_concurrent_requests(self):
"""
This method tests that concurrent requests don't corrupt each other's cache state.
This is a regression test for the race condition in the original implementation.
"""
import concurrent.futures
try:
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
response = stub.LoadModel(backend_pb2.ModelOptions(Model="mlx-community/Llama-3.2-1B-Instruct-4bit"))
self.assertTrue(response.success)
def make_request(prompt):
req = backend_pb2.PredictOptions(Prompt=prompt, Tokens=20)
return stub.Predict(req)
# Run 5 concurrent requests with different prompts
prompts = [
"The capital of France is",
"The capital of Germany is",
"The capital of Italy is",
"The capital of Spain is",
"The capital of Portugal is",
]
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
futures = [executor.submit(make_request, p) for p in prompts]
results = [f.result() for f in concurrent.futures.as_completed(futures)]
# All results should be non-empty
messages = [r.message for r in results]
self.assertTrue(all(len(m) > 0 for m in messages), "All requests should return non-empty responses")
print(f"Concurrent test passed: {len(messages)} responses received")
except Exception as err:
print(err)
self.fail("Concurrent requests test failed")
finally:
self.tearDown()
def test_cache_reuse(self):
"""
This method tests that repeated prompts reuse cached KV states.
The second request should benefit from the cached prompt processing.
"""
try:
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
response = stub.LoadModel(backend_pb2.ModelOptions(Model="mlx-community/Llama-3.2-1B-Instruct-4bit"))
self.assertTrue(response.success)
prompt = "The quick brown fox jumps over the lazy dog. "
# First request - populates cache
req1 = backend_pb2.PredictOptions(Prompt=prompt, Tokens=10)
resp1 = stub.Predict(req1)
self.assertIsNotNone(resp1.message)
# Second request with same prompt - should reuse cache
req2 = backend_pb2.PredictOptions(Prompt=prompt, Tokens=10)
resp2 = stub.Predict(req2)
self.assertIsNotNone(resp2.message)
print(f"Cache reuse test passed: first={len(resp1.message)} bytes, second={len(resp2.message)} bytes")
except Exception as err:
print(err)
self.fail("Cache reuse test failed")
finally:
self.tearDown()
def test_prefix_cache_reuse(self):
"""
This method tests that prompts sharing a common prefix benefit from cached KV states.
"""
try:
self.setUp()
with grpc.insecure_channel("localhost:50051") as channel:
stub = backend_pb2_grpc.BackendStub(channel)
response = stub.LoadModel(backend_pb2.ModelOptions(Model="mlx-community/Llama-3.2-1B-Instruct-4bit"))
self.assertTrue(response.success)
# First request with base prompt
prompt_base = "Once upon a time in a land far away, "
req1 = backend_pb2.PredictOptions(Prompt=prompt_base, Tokens=10)
resp1 = stub.Predict(req1)
self.assertIsNotNone(resp1.message)
# Second request with extended prompt (same prefix)
prompt_extended = prompt_base + "there lived a brave knight who "
req2 = backend_pb2.PredictOptions(Prompt=prompt_extended, Tokens=10)
resp2 = stub.Predict(req2)
self.assertIsNotNone(resp2.message)
print(f"Prefix cache test passed: base={len(resp1.message)} bytes, extended={len(resp2.message)} bytes")
except Exception as err:
print(err)
self.fail("Prefix cache reuse test failed")
finally:
self.tearDown()
# Unit tests for ThreadSafeLRUPromptCache are in test_mlx_cache.py
self.tearDown()

View File

@@ -1,480 +0,0 @@
"""
Comprehensive unit tests for ThreadSafeLRUPromptCache.
Tests all cache operation modes:
- Exact match
- Shorter prefix match
- Longer prefix match (with trimming)
- No match
- LRU eviction
- Reference counting
- Multi-model namespacing
- Thread safety with data integrity verification
"""
import unittest
import concurrent.futures
import threading
import copy
from mlx_cache import ThreadSafeLRUPromptCache
class TestCacheExactMatch(unittest.TestCase):
"""Tests for exact match cache behavior."""
def setUp(self):
self.cache = ThreadSafeLRUPromptCache(max_size=10)
def test_exact_match_returns_cache_and_empty_remaining(self):
"""Exact match should return the cache with no remaining tokens."""
tokens = [1, 2, 3, 4, 5]
mock_cache = ["kv_cache_data"]
self.cache.insert_cache("model1", tokens, mock_cache)
result_cache, remaining = self.cache.fetch_nearest_cache("model1", tokens)
self.assertEqual(result_cache, mock_cache)
self.assertEqual(remaining, [])
def test_exact_match_extracts_and_removes_from_cache(self):
"""Fetching exact match with count=1 should remove entry from cache."""
tokens = [1, 2, 3]
self.cache.insert_cache("model1", tokens, ["cache"])
self.assertEqual(len(self.cache), 1)
# First fetch extracts the entry
self.cache.fetch_nearest_cache("model1", tokens)
# Cache should now be empty
self.assertEqual(len(self.cache), 0)
# Second fetch should return None (no match)
result_cache, remaining = self.cache.fetch_nearest_cache("model1", tokens)
self.assertIsNone(result_cache)
self.assertEqual(remaining, tokens)
class TestCacheShorterPrefix(unittest.TestCase):
"""Tests for shorter prefix match behavior."""
def setUp(self):
self.cache = ThreadSafeLRUPromptCache(max_size=10)
def test_shorter_prefix_returns_cache_with_remaining_tokens(self):
"""When cached prefix is shorter, return cache and remaining suffix."""
short_tokens = [1, 2, 3]
long_tokens = [1, 2, 3, 4, 5, 6]
mock_cache = ["prefix_cache"]
self.cache.insert_cache("model1", short_tokens, mock_cache)
result_cache, remaining = self.cache.fetch_nearest_cache("model1", long_tokens)
self.assertEqual(result_cache, mock_cache)
self.assertEqual(remaining, [4, 5, 6])
def test_shorter_prefix_correct_remaining_calculation(self):
"""Verify remaining tokens are calculated correctly for various prefix lengths."""
# Note: Single-token prefixes ([1] -> [1,2,3]) are deliberately not matched
# to allow longer cached sequences to be preferred for trimming.
# This matches upstream mlx_lm/server.py behavior.
test_cases = [
# (cached_tokens, requested_tokens, expected_remaining)
([1, 2], [1, 2, 3, 4, 5], [3, 4, 5]),
([10, 20, 30, 40], [10, 20, 30, 40, 50], [50]),
]
for cached, requested, expected_remaining in test_cases:
with self.subTest(cached=cached, requested=requested):
cache = ThreadSafeLRUPromptCache(max_size=10)
cache.insert_cache("model", cached, ["cache"])
result_cache, remaining = cache.fetch_nearest_cache("model", requested)
self.assertIsNotNone(result_cache)
self.assertEqual(remaining, expected_remaining)
def test_single_token_prefix_not_matched(self):
"""Single-token prefixes are not matched (by design, matches upstream).
This allows longer cached sequences to be preferred for trimming,
which provides better KV cache reuse. Single-token caches are rare
in practice since real prompts with chat templates are many tokens.
"""
cache = ThreadSafeLRUPromptCache(max_size=10)
cache.insert_cache("model", [1], ["cache"])
result_cache, remaining = cache.fetch_nearest_cache("model", [1, 2, 3])
# Single-token prefix is NOT matched
self.assertIsNone(result_cache)
self.assertEqual(remaining, [1, 2, 3])
class TestCacheLongerPrefix(unittest.TestCase):
"""Tests for longer prefix match behavior (trimming)."""
def setUp(self):
# Track trim calls for verification
self.trim_calls = []
def mock_can_trim(cache):
return True
def mock_trim(cache, num_to_trim):
self.trim_calls.append(num_to_trim)
# Simulate trimming by modifying the cache
cache.append(f"trimmed_{num_to_trim}")
self.cache = ThreadSafeLRUPromptCache(
max_size=10,
can_trim_fn=mock_can_trim,
trim_fn=mock_trim,
)
def test_longer_prefix_triggers_trim(self):
"""When cached sequence is longer, should trim to match requested prefix."""
long_tokens = [1, 2, 3, 4, 5]
short_tokens = [1, 2, 3]
self.cache.insert_cache("model1", long_tokens, ["original_cache"])
result_cache, remaining = self.cache.fetch_nearest_cache("model1", short_tokens)
# Should have called trim
self.assertTrue(len(self.trim_calls) > 0, "trim_fn should have been called")
# Result should be a trimmed copy, not the original
self.assertIn("trimmed_", str(result_cache))
def test_longer_prefix_without_trim_fn_returns_no_match(self):
"""Without trim functions, longer prefix should not match."""
cache_no_trim = ThreadSafeLRUPromptCache(max_size=10)
long_tokens = [1, 2, 3, 4, 5]
short_tokens = [1, 2, 3]
cache_no_trim.insert_cache("model1", long_tokens, ["cache"])
result_cache, remaining = cache_no_trim.fetch_nearest_cache("model1", short_tokens)
# Without trim_fn, should return no match
self.assertIsNone(result_cache)
self.assertEqual(remaining, short_tokens)
def test_longer_prefix_can_trim_false_returns_no_match(self):
"""When can_trim_fn returns False, should not attempt trim."""
cache = ThreadSafeLRUPromptCache(
max_size=10,
can_trim_fn=lambda c: False,
trim_fn=lambda c, n: None,
)
cache.insert_cache("model1", [1, 2, 3, 4, 5], ["cache"])
result_cache, remaining = cache.fetch_nearest_cache("model1", [1, 2, 3])
self.assertIsNone(result_cache)
self.assertEqual(remaining, [1, 2, 3])
class TestCacheNoMatch(unittest.TestCase):
"""Tests for no match behavior."""
def setUp(self):
self.cache = ThreadSafeLRUPromptCache(max_size=10)
def test_empty_cache_returns_none(self):
"""Empty cache should return None and all tokens as remaining."""
tokens = [1, 2, 3]
result_cache, remaining = self.cache.fetch_nearest_cache("model1", tokens)
self.assertIsNone(result_cache)
self.assertEqual(remaining, tokens)
def test_different_prefix_returns_none(self):
"""Tokens with different prefix should not match."""
self.cache.insert_cache("model1", [1, 2, 3], ["cache"])
# Completely different tokens
result_cache, remaining = self.cache.fetch_nearest_cache("model1", [4, 5, 6])
self.assertIsNone(result_cache)
self.assertEqual(remaining, [4, 5, 6])
def test_partial_prefix_mismatch_returns_none(self):
"""Tokens that diverge mid-sequence should not match."""
self.cache.insert_cache("model1", [1, 2, 3], ["cache"])
# Same start but diverges
result_cache, remaining = self.cache.fetch_nearest_cache("model1", [1, 2, 99])
self.assertIsNone(result_cache)
self.assertEqual(remaining, [1, 2, 99])
def test_wrong_model_returns_none(self):
"""Different model key should not match."""
self.cache.insert_cache("model1", [1, 2, 3], ["cache"])
result_cache, remaining = self.cache.fetch_nearest_cache("model2", [1, 2, 3])
self.assertIsNone(result_cache)
self.assertEqual(remaining, [1, 2, 3])
class TestCacheLRUEviction(unittest.TestCase):
"""Tests for LRU eviction behavior."""
def setUp(self):
self.cache = ThreadSafeLRUPromptCache(max_size=3)
def test_evicts_oldest_when_full(self):
"""Should evict least recently used entry when capacity exceeded."""
self.cache.insert_cache("model", [1], ["cache1"])
self.cache.insert_cache("model", [2], ["cache2"])
self.cache.insert_cache("model", [3], ["cache3"])
self.assertEqual(len(self.cache), 3)
# Insert 4th entry - should evict [1]
self.cache.insert_cache("model", [4], ["cache4"])
self.assertEqual(len(self.cache), 3)
# [1] should be evicted
result, _ = self.cache.fetch_nearest_cache("model", [1])
self.assertIsNone(result)
# [2], [3], [4] should still exist
for tokens in [[2], [3], [4]]:
# Re-insert since fetch extracts
self.cache.insert_cache("model", tokens, [f"cache{tokens[0]}"])
result2, _ = self.cache.fetch_nearest_cache("model", [2])
self.assertIsNotNone(result2)
def test_access_updates_lru_order(self):
"""Accessing an entry should move it to most recently used."""
self.cache.insert_cache("model", [1], ["cache1"])
self.cache.insert_cache("model", [2], ["cache2"])
self.cache.insert_cache("model", [3], ["cache3"])
# Access [1] to make it most recently used
cache1, _ = self.cache.fetch_nearest_cache("model", [1])
# Re-insert it (simulating normal usage pattern)
self.cache.insert_cache("model", [1], cache1)
# Now insert two more entries - should evict [2] then [3], not [1]
self.cache.insert_cache("model", [4], ["cache4"])
self.cache.insert_cache("model", [5], ["cache5"])
# [1] should still exist (was accessed, so not evicted)
result1, _ = self.cache.fetch_nearest_cache("model", [1])
self.assertIsNotNone(result1)
# [2] should be evicted (was oldest after [1] was accessed)
result2, _ = self.cache.fetch_nearest_cache("model", [2])
self.assertIsNone(result2)
class TestCacheReferenceCount(unittest.TestCase):
"""Tests for reference counting behavior."""
def setUp(self):
self.cache = ThreadSafeLRUPromptCache(max_size=10)
def test_multiple_inserts_increment_count(self):
"""Inserting same tokens multiple times should increment count."""
tokens = [1, 2, 3]
self.cache.insert_cache("model", tokens, ["cache"])
self.cache.insert_cache("model", tokens, ["cache"])
self.cache.insert_cache("model", tokens, ["cache"])
# Should still be one entry (with count=3 internally)
self.assertEqual(len(self.cache), 1)
# First two fetches should return copies (count decremented)
result1, _ = self.cache.fetch_nearest_cache("model", tokens)
self.assertIsNotNone(result1)
result2, _ = self.cache.fetch_nearest_cache("model", tokens)
self.assertIsNotNone(result2)
# Third fetch extracts the last reference
result3, _ = self.cache.fetch_nearest_cache("model", tokens)
self.assertIsNotNone(result3)
# Fourth fetch should return None (entry fully extracted)
result4, _ = self.cache.fetch_nearest_cache("model", tokens)
self.assertIsNone(result4)
def test_extract_with_high_count_returns_deep_copy(self):
"""When count > 1, extract should return a deep copy."""
tokens = [1, 2, 3]
original_cache = [{"nested": "data"}]
self.cache.insert_cache("model", tokens, original_cache)
self.cache.insert_cache("model", tokens, original_cache) # count=2
result1, _ = self.cache.fetch_nearest_cache("model", tokens)
# Modify the returned cache
result1[0]["nested"] = "modified"
# Second fetch should get unmodified copy
result2, _ = self.cache.fetch_nearest_cache("model", tokens)
self.assertEqual(result2[0]["nested"], "data")
class TestCacheMultiModel(unittest.TestCase):
"""Tests for multi-model namespacing."""
def setUp(self):
self.cache = ThreadSafeLRUPromptCache(max_size=10)
def test_same_tokens_different_models_are_separate(self):
"""Same token sequence under different models should be independent."""
tokens = [1, 2, 3]
self.cache.insert_cache("model_a", tokens, ["cache_a"])
self.cache.insert_cache("model_b", tokens, ["cache_b"])
self.assertEqual(len(self.cache), 2)
result_a, _ = self.cache.fetch_nearest_cache("model_a", tokens)
result_b, _ = self.cache.fetch_nearest_cache("model_b", tokens)
self.assertEqual(result_a, ["cache_a"])
self.assertEqual(result_b, ["cache_b"])
def test_eviction_across_models(self):
"""LRU eviction should work across different models."""
cache = ThreadSafeLRUPromptCache(max_size=3)
cache.insert_cache("model_a", [1], ["a1"])
cache.insert_cache("model_b", [1], ["b1"])
cache.insert_cache("model_a", [2], ["a2"])
self.assertEqual(len(cache), 3)
# Insert 4th - should evict model_a:[1] (oldest)
cache.insert_cache("model_b", [2], ["b2"])
result, _ = cache.fetch_nearest_cache("model_a", [1])
self.assertIsNone(result)
class TestCacheThreadSafety(unittest.TestCase):
"""Tests for thread safety with data integrity verification."""
def test_concurrent_inserts_no_data_loss(self):
"""Concurrent inserts should not lose data."""
cache = ThreadSafeLRUPromptCache(max_size=100)
num_threads = 10
inserts_per_thread = 20
def insert_entries(thread_id):
for i in range(inserts_per_thread):
tokens = [thread_id, i]
cache.insert_cache("model", tokens, [f"cache_{thread_id}_{i}"])
with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor:
futures = [executor.submit(insert_entries, tid) for tid in range(num_threads)]
concurrent.futures.wait(futures)
# Verify expected number of entries (may be less due to LRU eviction with max_size=100)
# But should be exactly 100 since we inserted exactly 200 and max_size is 100
self.assertEqual(len(cache), 100)
def test_concurrent_fetch_and_insert_no_corruption(self):
"""Concurrent fetches and inserts should not corrupt data."""
cache = ThreadSafeLRUPromptCache(max_size=50)
errors = []
lock = threading.Lock()
# Pre-populate with known data
for i in range(20):
cache.insert_cache("model", [i], [f"original_{i}"])
def fetch_and_verify(thread_id):
try:
for _ in range(50):
token_id = thread_id % 20
result, remaining = cache.fetch_nearest_cache("model", [token_id])
if result is not None:
# Verify data integrity
expected_prefix = f"original_{token_id}"
if not str(result[0]).startswith("original_"):
with lock:
errors.append(f"Corrupted data: {result}")
# Re-insert to keep cache populated
cache.insert_cache("model", [token_id], result)
except Exception as e:
with lock:
errors.append(str(e))
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
futures = [executor.submit(fetch_and_verify, tid) for tid in range(10)]
concurrent.futures.wait(futures)
self.assertEqual(errors, [], f"Thread safety errors: {errors}")
def test_concurrent_operations_maintain_cache_bounds(self):
"""Cache size should never exceed max_size under concurrent operations."""
max_size = 10
cache = ThreadSafeLRUPromptCache(max_size=max_size)
size_violations = []
lock = threading.Lock()
def random_operations(thread_id):
import random
for i in range(100):
tokens = [random.randint(0, 50)]
if random.random() < 0.7:
cache.insert_cache("model", tokens, [f"cache_{thread_id}_{i}"])
else:
cache.fetch_nearest_cache("model", tokens)
current_size = len(cache)
if current_size > max_size:
with lock:
size_violations.append(current_size)
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
futures = [executor.submit(random_operations, tid) for tid in range(10)]
concurrent.futures.wait(futures)
self.assertEqual(size_violations, [], f"Size exceeded max: {size_violations}")
self.assertLessEqual(len(cache), max_size)
class TestCacheClear(unittest.TestCase):
"""Tests for cache clear operation."""
def setUp(self):
self.cache = ThreadSafeLRUPromptCache(max_size=10)
def test_clear_removes_all_entries(self):
"""Clear should remove all entries."""
self.cache.insert_cache("model1", [1, 2], ["cache1"])
self.cache.insert_cache("model2", [3, 4], ["cache2"])
self.cache.insert_cache("model1", [5, 6], ["cache3"])
self.assertEqual(len(self.cache), 3)
self.cache.clear()
self.assertEqual(len(self.cache), 0)
def test_clear_allows_new_inserts(self):
"""After clear, new inserts should work normally."""
self.cache.insert_cache("model", [1], ["cache1"])
self.cache.clear()
self.cache.insert_cache("model", [2], ["cache2"])
self.assertEqual(len(self.cache), 1)
result, _ = self.cache.fetch_nearest_cache("model", [2])
self.assertEqual(result, ["cache2"])
if __name__ == "__main__":
unittest.main()

View File

@@ -1,16 +0,0 @@
.DEFAULT_GOAL := install
.PHONY: install
install:
bash install.sh
.PHONY: protogen-clean
protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py
.PHONY: clean
clean: protogen-clean
rm -rf venv __pycache__
test: install
bash test.sh

View File

@@ -1,113 +0,0 @@
#!/usr/bin/env python3
"""
This is an extra gRPC server of LocalAI for Moonshine transcription
"""
from concurrent import futures
import time
import argparse
import signal
import sys
import os
import backend_pb2
import backend_pb2_grpc
import moonshine_onnx
import grpc
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
# Implement the BackendServicer class with the service methods
class BackendServicer(backend_pb2_grpc.BackendServicer):
"""
BackendServicer is the class that implements the gRPC service
"""
def Health(self, request, context):
return backend_pb2.Reply(message=bytes("OK", 'utf-8'))
def LoadModel(self, request, context):
try:
print("Preparing models, please wait", file=sys.stderr)
# Store the model name for use in transcription
# Model name format: e.g., "moonshine/tiny"
self.model_name = request.Model
print(f"Model name set to: {self.model_name}", file=sys.stderr)
except Exception as err:
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
return backend_pb2.Result(message="Model loaded successfully", success=True)
def AudioTranscription(self, request, context):
resultSegments = []
text = ""
try:
# moonshine_onnx.transcribe returns a list of strings
transcriptions = moonshine_onnx.transcribe(request.dst, self.model_name)
# Combine all transcriptions into a single text
if isinstance(transcriptions, list):
text = " ".join(transcriptions)
# Create segments for each transcription in the list
for id, trans in enumerate(transcriptions):
# Since moonshine doesn't provide timing info, we'll create a single segment
# with id and text, using approximate timing
resultSegments.append(backend_pb2.TranscriptSegment(
id=id,
start=0,
end=0,
text=trans
))
else:
# Handle case where it's not a list (shouldn't happen, but be safe)
text = str(transcriptions)
resultSegments.append(backend_pb2.TranscriptSegment(
id=0,
start=0,
end=0,
text=text
))
except Exception as err:
print(f"Unexpected {err=}, {type(err)=}", file=sys.stderr)
return backend_pb2.TranscriptResult(segments=[], text="")
return backend_pb2.TranscriptResult(segments=resultSegments, text=text)
def serve(address):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=MAX_WORKERS),
options=[
('grpc.max_message_length', 50 * 1024 * 1024), # 50MB
('grpc.max_send_message_length', 50 * 1024 * 1024), # 50MB
('grpc.max_receive_message_length', 50 * 1024 * 1024), # 50MB
])
backend_pb2_grpc.add_BackendServicer_to_server(BackendServicer(), server)
server.add_insecure_port(address)
server.start()
print("Server started. Listening on: " + address, file=sys.stderr)
# Define the signal handler function
def signal_handler(sig, frame):
print("Received termination signal. Shutting down...")
server.stop(0)
sys.exit(0)
# Set the signal handlers for SIGINT and SIGTERM
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the gRPC server.")
parser.add_argument(
"--addr", default="localhost:50051", help="The address to bind the server to."
)
args = parser.parse_args()
serve(args.addr)

View File

@@ -1,12 +0,0 @@
#!/bin/bash
set -e
backend_dir=$(dirname $0)
if [ -d $backend_dir/common ]; then
source $backend_dir/common/libbackend.sh
else
source $backend_dir/../common/libbackend.sh
fi
installRequirements

Some files were not shown because too many files have changed in this diff Show More