Compare commits

..

1 Commits

Author SHA1 Message Date
Ettore Di Giacinto
3f52776a1c WIP 2025-07-23 21:18:47 +02:00
644 changed files with 26414 additions and 78634 deletions

View File

@@ -1,8 +0,0 @@
# .air.toml
[build]
cmd = "make build"
bin = "./local-ai"
args_bin = [ "--debug" ]
include_ext = ["go", "html", "yaml", "toml", "json", "txt", "md"]
exclude_dir = ["pkg/grpc/proto"]
delay = 1000

View File

@@ -6,10 +6,6 @@ models
backends backends
examples/chatbot-ui/models examples/chatbot-ui/models
backend/go/image/stablediffusion-ggml/build/ backend/go/image/stablediffusion-ggml/build/
backend/go/*/build
backend/go/*/.cache
backend/go/*/sources
backend/go/*/package
examples/rwkv/models examples/rwkv/models
examples/**/models examples/**/models
Dockerfile* Dockerfile*

9
.env
View File

@@ -32,6 +32,15 @@
# Forces shutdown of the backends if busy (only if LOCALAI_SINGLE_ACTIVE_BACKEND is set) # Forces shutdown of the backends if busy (only if LOCALAI_SINGLE_ACTIVE_BACKEND is set)
# LOCALAI_FORCE_BACKEND_SHUTDOWN=true # LOCALAI_FORCE_BACKEND_SHUTDOWN=true
## Specify a build type. Available: cublas, openblas, clblas.
## cuBLAS: This is a GPU-accelerated version of the complete standard BLAS (Basic Linear Algebra Subprograms) library. It's provided by Nvidia and is part of their CUDA toolkit.
## OpenBLAS: This is an open-source implementation of the BLAS library that aims to provide highly optimized code for various platforms. It includes support for multi-threading and can be compiled to use hardware-specific features for additional performance. OpenBLAS can run on many kinds of hardware, including CPUs from Intel, AMD, and ARM.
## clBLAS: This is an open-source implementation of the BLAS library that uses OpenCL, a framework for writing programs that execute across heterogeneous platforms consisting of CPUs, GPUs, and other processors. clBLAS is designed to take advantage of the parallel computing power of GPUs but can also run on any hardware that supports OpenCL. This includes hardware from different vendors like Nvidia, AMD, and Intel.
# BUILD_TYPE=openblas
## Uncomment and set to true to enable rebuilding from source
# REBUILD=true
## Path where to store generated images ## Path where to store generated images
# LOCALAI_IMAGE_PATH=/tmp/generated/images # LOCALAI_IMAGE_PATH=/tmp/generated/images

View File

@@ -1,445 +0,0 @@
package main
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"regexp"
"slices"
"strings"
"github.com/ghodss/yaml"
hfapi "github.com/mudler/LocalAI/pkg/huggingface-api"
cogito "github.com/mudler/cogito"
"github.com/mudler/cogito/structures"
"github.com/sashabaranov/go-openai/jsonschema"
)
var (
openAIModel = os.Getenv("OPENAI_MODEL")
openAIKey = os.Getenv("OPENAI_KEY")
openAIBaseURL = os.Getenv("OPENAI_BASE_URL")
galleryIndexPath = os.Getenv("GALLERY_INDEX_PATH")
//defaultclient
llm = cogito.NewOpenAILLM(openAIModel, openAIKey, openAIBaseURL)
)
// cleanTextContent removes trailing spaces, tabs, and normalizes line endings
// to prevent YAML linting issues like trailing spaces and multiple empty lines
func cleanTextContent(text string) string {
lines := strings.Split(text, "\n")
var cleanedLines []string
var prevEmpty bool
for _, line := range lines {
// Remove all trailing whitespace (spaces, tabs, etc.)
trimmed := strings.TrimRight(line, " \t\r")
// Avoid multiple consecutive empty lines
if trimmed == "" {
if !prevEmpty {
cleanedLines = append(cleanedLines, "")
}
prevEmpty = true
} else {
cleanedLines = append(cleanedLines, trimmed)
prevEmpty = false
}
}
// Remove trailing empty lines from the result
result := strings.Join(cleanedLines, "\n")
return stripThinkingTags(strings.TrimRight(result, "\n"))
}
type galleryModel struct {
Name string `yaml:"name"`
Urls []string `yaml:"urls"`
}
// isModelExisting checks if a specific model ID exists in the gallery using text search
func isModelExisting(modelID string) (bool, error) {
indexPath := getGalleryIndexPath()
content, err := os.ReadFile(indexPath)
if err != nil {
return false, fmt.Errorf("failed to read %s: %w", indexPath, err)
}
var galleryModels []galleryModel
err = yaml.Unmarshal(content, &galleryModels)
if err != nil {
return false, fmt.Errorf("failed to unmarshal %s: %w", indexPath, err)
}
for _, galleryModel := range galleryModels {
if slices.Contains(galleryModel.Urls, modelID) {
return true, nil
}
}
return false, nil
}
// filterExistingModels removes models that already exist in the gallery
func filterExistingModels(models []ProcessedModel) ([]ProcessedModel, error) {
var filteredModels []ProcessedModel
for _, model := range models {
exists, err := isModelExisting(model.ModelID)
if err != nil {
fmt.Printf("Error checking if model %s exists: %v, skipping\n", model.ModelID, err)
continue
}
if !exists {
filteredModels = append(filteredModels, model)
} else {
fmt.Printf("Skipping existing model: %s\n", model.ModelID)
}
}
fmt.Printf("Filtered out %d existing models, %d new models remaining\n",
len(models)-len(filteredModels), len(filteredModels))
return filteredModels, nil
}
// getGalleryIndexPath returns the gallery index file path, with a default fallback
func getGalleryIndexPath() string {
if galleryIndexPath != "" {
return galleryIndexPath
}
return "gallery/index.yaml"
}
func stripThinkingTags(content string) string {
// Remove content between <thinking> and </thinking> (including multi-line)
content = regexp.MustCompile(`(?s)<thinking>.*?</thinking>`).ReplaceAllString(content, "")
// Remove content between <think> and </think> (including multi-line)
content = regexp.MustCompile(`(?s)<think>.*?</think>`).ReplaceAllString(content, "")
// Clean up any extra whitespace
content = strings.TrimSpace(content)
return content
}
func getRealReadme(ctx context.Context, repository string) (string, error) {
// Create a conversation fragment
fragment := cogito.NewEmptyFragment().
AddMessage("user",
`Your task is to get a clear description of a large language model from huggingface by using the provided tool. I will share with you a repository that might be quantized, and as such probably not by the original model author. We need to get the real description of the model, and not the one that might be quantized. You will have to call the tool to get the readme more than once by figuring out from the quantized readme which is the base model readme. This is the repository: `+repository)
// Execute with tools
result, err := cogito.ExecuteTools(llm, fragment,
cogito.WithIterations(3),
cogito.WithMaxAttempts(3),
cogito.WithTools(&HFReadmeTool{client: hfapi.NewClient()}))
if err != nil {
return "", err
}
result = result.AddMessage("user", "Describe the model in a clear and concise way that can be shared in a model gallery.")
// Get a response
newFragment, err := llm.Ask(ctx, result)
if err != nil {
return "", err
}
content := newFragment.LastMessage().Content
return cleanTextContent(content), nil
}
func selectMostInterestingModels(ctx context.Context, searchResult *SearchResult) ([]ProcessedModel, error) {
if len(searchResult.Models) == 1 {
return searchResult.Models, nil
}
// Create a conversation fragment
fragment := cogito.NewEmptyFragment().
AddMessage("user",
`Your task is to analyze a list of AI models and select the most interesting ones for a model gallery. You will be given detailed information about multiple models including their metadata, file information, and README content.
Consider the following criteria when selecting models:
1. Model popularity (download count)
2. Model recency (last modified date)
3. Model completeness (has preferred model file, README, etc.)
4. Model uniqueness (not duplicates or very similar models)
5. Model quality (based on README content and description)
6. Model utility (practical applications)
You should select models that would be most valuable for users browsing a model gallery. Prioritize models that are:
- Well-documented with clear READMEs
- Recently updated
- Popular (high download count)
- Have the preferred quantization format available
- Offer unique capabilities or are from reputable authors
Return your analysis and selection reasoning.`)
// Add the search results as context
modelsInfo := fmt.Sprintf("Found %d models matching '%s' with quantization preference '%s':\n\n",
searchResult.TotalModelsFound, searchResult.SearchTerm, searchResult.Quantization)
for i, model := range searchResult.Models {
modelsInfo += fmt.Sprintf("Model %d:\n", i+1)
modelsInfo += fmt.Sprintf(" ID: %s\n", model.ModelID)
modelsInfo += fmt.Sprintf(" Author: %s\n", model.Author)
modelsInfo += fmt.Sprintf(" Downloads: %d\n", model.Downloads)
modelsInfo += fmt.Sprintf(" Last Modified: %s\n", model.LastModified)
modelsInfo += fmt.Sprintf(" Files: %d files\n", len(model.Files))
if model.PreferredModelFile != nil {
modelsInfo += fmt.Sprintf(" Preferred Model File: %s (%d bytes)\n",
model.PreferredModelFile.Path, model.PreferredModelFile.Size)
} else {
modelsInfo += " No preferred model file found\n"
}
if model.ReadmeContent != "" {
modelsInfo += fmt.Sprintf(" README: %s\n", model.ReadmeContent)
}
if model.ProcessingError != "" {
modelsInfo += fmt.Sprintf(" Processing Error: %s\n", model.ProcessingError)
}
modelsInfo += "\n"
}
fragment = fragment.AddMessage("user", modelsInfo)
fragment = fragment.AddMessage("user", "Based on your analysis, select the top 5 most interesting models and provide a brief explanation for each selection. Also, create a filtered SearchResult with only the selected models. Return just a list of repositories IDs, you will later be asked to output it as a JSON array with the json tool.")
// Get a response
newFragment, err := llm.Ask(ctx, fragment)
if err != nil {
return nil, err
}
fmt.Println(newFragment.LastMessage().Content)
repositories := struct {
Repositories []string `json:"repositories"`
}{}
s := structures.Structure{
Schema: jsonschema.Definition{
Type: jsonschema.Object,
AdditionalProperties: false,
Properties: map[string]jsonschema.Definition{
"repositories": {
Type: jsonschema.Array,
Items: &jsonschema.Definition{Type: jsonschema.String},
Description: "The trending repositories IDs",
},
},
Required: []string{"repositories"},
},
Object: &repositories,
}
err = newFragment.ExtractStructure(ctx, llm, s)
if err != nil {
return nil, err
}
filteredModels := []ProcessedModel{}
for _, m := range searchResult.Models {
if slices.Contains(repositories.Repositories, m.ModelID) {
filteredModels = append(filteredModels, m)
}
}
return filteredModels, nil
}
// ModelMetadata represents extracted metadata from a model
type ModelMetadata struct {
Tags []string `json:"tags"`
License string `json:"license"`
}
// extractModelMetadata extracts tags and license from model README and documentation
func extractModelMetadata(ctx context.Context, model ProcessedModel) ([]string, string, error) {
// Create a conversation fragment
fragment := cogito.NewEmptyFragment().
AddMessage("user",
`Your task is to extract metadata from an AI model's README and documentation. You will be provided with:
1. Model information (ID, author, description)
2. README content
You need to extract:
1. **Tags**: An array of relevant tags that describe the model. Use common tags from the gallery such as:
- llm, gguf, gpu, cpu, multimodal, image-to-text, text-to-text, text-to-speech, tts
- thinking, reasoning, chat, instruction-tuned, code, vision
- Model family names (e.g., llama, qwen, mistral, gemma) if applicable
- Any other relevant descriptive tags
Select 3-8 most relevant tags.
2. **License**: The license identifier (e.g., "apache-2.0", "mit", "llama2", "gpl-3.0", "bsd", "cc-by-4.0").
If no license is found, return an empty string.
Return the extracted metadata in a structured format.`)
// Add model information
modelInfo := "Model Information:\n"
modelInfo += fmt.Sprintf(" ID: %s\n", model.ModelID)
modelInfo += fmt.Sprintf(" Author: %s\n", model.Author)
modelInfo += fmt.Sprintf(" Downloads: %d\n", model.Downloads)
if model.ReadmeContent != "" {
modelInfo += fmt.Sprintf(" README Content:\n%s\n", model.ReadmeContent)
} else if model.ReadmeContentPreview != "" {
modelInfo += fmt.Sprintf(" README Preview: %s\n", model.ReadmeContentPreview)
}
fragment = fragment.AddMessage("user", modelInfo)
fragment = fragment.AddMessage("user", "Extract the tags and license from the model information. Return the metadata as a JSON object with 'tags' (array of strings) and 'license' (string).")
// Get a response
newFragment, err := llm.Ask(ctx, fragment)
if err != nil {
return nil, "", err
}
// Extract structured metadata
metadata := ModelMetadata{}
s := structures.Structure{
Schema: jsonschema.Definition{
Type: jsonschema.Object,
AdditionalProperties: false,
Properties: map[string]jsonschema.Definition{
"tags": {
Type: jsonschema.Array,
Items: &jsonschema.Definition{Type: jsonschema.String},
Description: "Array of relevant tags describing the model",
},
"license": {
Type: jsonschema.String,
Description: "License identifier (e.g., apache-2.0, mit, llama2). Empty string if not found.",
},
},
Required: []string{"tags", "license"},
},
Object: &metadata,
}
err = newFragment.ExtractStructure(ctx, llm, s)
if err != nil {
return nil, "", err
}
return metadata.Tags, metadata.License, nil
}
// extractIconFromReadme scans the README content for image URLs and returns the first suitable icon URL found
func extractIconFromReadme(readmeContent string) string {
if readmeContent == "" {
return ""
}
// Regular expressions to match image URLs in various formats (case-insensitive)
// Match markdown image syntax: ![alt](url) - case insensitive extensions
markdownImageRegex := regexp.MustCompile(`(?i)!\[[^\]]*\]\(([^)]+\.(png|jpg|jpeg|svg|webp|gif))\)`)
// Match HTML img tags: <img src="url">
htmlImageRegex := regexp.MustCompile(`(?i)<img[^>]+src=["']([^"']+\.(png|jpg|jpeg|svg|webp|gif))["']`)
// Match plain URLs ending with image extensions
plainImageRegex := regexp.MustCompile(`(?i)https?://[^\s<>"']+\.(png|jpg|jpeg|svg|webp|gif)`)
// Try markdown format first
matches := markdownImageRegex.FindStringSubmatch(readmeContent)
if len(matches) > 1 && matches[1] != "" {
url := strings.TrimSpace(matches[1])
// Prefer HuggingFace CDN URLs or absolute URLs
if strings.HasPrefix(strings.ToLower(url), "http") {
return url
}
}
// Try HTML img tags
matches = htmlImageRegex.FindStringSubmatch(readmeContent)
if len(matches) > 1 && matches[1] != "" {
url := strings.TrimSpace(matches[1])
if strings.HasPrefix(strings.ToLower(url), "http") {
return url
}
}
// Try plain URLs
matches = plainImageRegex.FindStringSubmatch(readmeContent)
if len(matches) > 0 {
url := strings.TrimSpace(matches[0])
if strings.HasPrefix(strings.ToLower(url), "http") {
return url
}
}
return ""
}
// getHuggingFaceAvatarURL attempts to get the HuggingFace avatar URL for a user
func getHuggingFaceAvatarURL(author string) string {
if author == "" {
return ""
}
// Try to fetch user info from HuggingFace API
// HuggingFace API endpoint: https://huggingface.co/api/users/{username}
baseURL := "https://huggingface.co"
userURL := fmt.Sprintf("%s/api/users/%s", baseURL, author)
req, err := http.NewRequest("GET", userURL, nil)
if err != nil {
return ""
}
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return ""
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return ""
}
// Parse the response to get avatar URL
var userInfo map[string]interface{}
body, err := io.ReadAll(resp.Body)
if err != nil {
return ""
}
if err := json.Unmarshal(body, &userInfo); err != nil {
return ""
}
// Try to extract avatar URL from response
if avatar, ok := userInfo["avatarUrl"].(string); ok && avatar != "" {
return avatar
}
if avatar, ok := userInfo["avatar"].(string); ok && avatar != "" {
return avatar
}
return ""
}
// extractModelIcon extracts icon URL from README or falls back to HuggingFace avatar
func extractModelIcon(model ProcessedModel) string {
// First, try to extract icon from README
if icon := extractIconFromReadme(model.ReadmeContent); icon != "" {
return icon
}
// Fallback: Try to get HuggingFace user avatar
if model.Author != "" {
if avatar := getHuggingFaceAvatarURL(model.Author); avatar != "" {
return avatar
}
}
return ""
}

View File

@@ -1,200 +0,0 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
"strings"
"github.com/ghodss/yaml"
"github.com/mudler/LocalAI/core/gallery/importers"
)
func formatTextContent(text string) string {
return formatTextContentWithIndent(text, 4, 6)
}
// formatTextContentWithIndent formats text content with specified base and list item indentation
func formatTextContentWithIndent(text string, baseIndent int, listItemIndent int) string {
var formattedLines []string
lines := strings.Split(text, "\n")
for _, line := range lines {
trimmed := strings.TrimRight(line, " \t\r")
if trimmed == "" {
// Keep empty lines as empty (no indentation)
formattedLines = append(formattedLines, "")
} else {
// Preserve relative indentation from yaml.Marshal output
// Count existing leading spaces to preserve relative structure
leadingSpaces := len(trimmed) - len(strings.TrimLeft(trimmed, " \t"))
trimmedStripped := strings.TrimLeft(trimmed, " \t")
var totalIndent int
if strings.HasPrefix(trimmedStripped, "-") {
// List items: use listItemIndent (ignore existing leading spaces)
totalIndent = listItemIndent
} else {
// Regular lines: use baseIndent + preserve relative indentation
// This handles both top-level keys (leadingSpaces=0) and nested properties (leadingSpaces>0)
totalIndent = baseIndent + leadingSpaces
}
indentStr := strings.Repeat(" ", totalIndent)
formattedLines = append(formattedLines, indentStr+trimmedStripped)
}
}
formattedText := strings.Join(formattedLines, "\n")
// Remove any trailing spaces from the formatted description
formattedText = strings.TrimRight(formattedText, " \t")
return formattedText
}
// generateYAMLEntry generates a YAML entry for a model using the specified anchor
func generateYAMLEntry(model ProcessedModel, quantization string) string {
modelConfig, err := importers.DiscoverModelConfig("https://huggingface.co/"+model.ModelID, json.RawMessage(`{ "quantization": "`+quantization+`"}`))
if err != nil {
panic(err)
}
// Extract model name from ModelID
parts := strings.Split(model.ModelID, "/")
modelName := model.ModelID
if len(parts) > 0 {
modelName = strings.ToLower(parts[len(parts)-1])
}
// Remove common suffixes
modelName = strings.ReplaceAll(modelName, "-gguf", "")
modelName = strings.ReplaceAll(modelName, "-q4_k_m", "")
modelName = strings.ReplaceAll(modelName, "-q4_k_s", "")
modelName = strings.ReplaceAll(modelName, "-q3_k_m", "")
modelName = strings.ReplaceAll(modelName, "-q2_k", "")
description := model.ReadmeContent
if description == "" {
description = fmt.Sprintf("AI model: %s", modelName)
}
// Clean up description to prevent YAML linting issues
description = cleanTextContent(description)
formattedDescription := formatTextContent(description)
configFile := formatTextContent(modelConfig.ConfigFile)
filesYAML, _ := yaml.Marshal(modelConfig.Files)
// Files section: list items need 4 spaces (not 6), since files: is at 2 spaces
files := formatTextContentWithIndent(string(filesYAML), 4, 4)
// Build metadata sections
var metadataSections []string
// Add license if present
if model.License != "" {
metadataSections = append(metadataSections, fmt.Sprintf(` license: "%s"`, model.License))
}
// Add tags if present
if len(model.Tags) > 0 {
tagsYAML, _ := yaml.Marshal(model.Tags)
tagsFormatted := formatTextContentWithIndent(string(tagsYAML), 4, 4)
tagsFormatted = strings.TrimRight(tagsFormatted, "\n")
metadataSections = append(metadataSections, fmt.Sprintf(" tags:\n%s", tagsFormatted))
}
// Add icon if present
if model.Icon != "" {
metadataSections = append(metadataSections, fmt.Sprintf(` icon: %s`, model.Icon))
}
// Build the metadata block
metadataBlock := ""
if len(metadataSections) > 0 {
metadataBlock = strings.Join(metadataSections, "\n") + "\n"
}
yamlTemplate := ""
yamlTemplate = `- name: "%s"
url: "github:mudler/LocalAI/gallery/virtual.yaml@master"
urls:
- https://huggingface.co/%s
description: |
%s%s
overrides:
%s
files:
%s`
// Trim trailing newlines from formatted sections to prevent extra blank lines
formattedDescription = strings.TrimRight(formattedDescription, "\n")
configFile = strings.TrimRight(configFile, "\n")
files = strings.TrimRight(files, "\n")
// Add newline before metadata block if present
if metadataBlock != "" {
metadataBlock = "\n" + strings.TrimRight(metadataBlock, "\n")
}
return fmt.Sprintf(yamlTemplate,
modelName,
model.ModelID,
formattedDescription,
metadataBlock,
configFile,
files,
)
}
// generateYAMLForModels generates YAML entries for selected models and appends to index.yaml
func generateYAMLForModels(ctx context.Context, models []ProcessedModel, quantization string) error {
// Generate YAML entries for each model
var yamlEntries []string
for _, model := range models {
fmt.Printf("Generating YAML entry for model: %s\n", model.ModelID)
// Generate YAML entry
yamlEntry := generateYAMLEntry(model, quantization)
yamlEntries = append(yamlEntries, yamlEntry)
}
// Prepend to index.yaml (write at the top)
if len(yamlEntries) > 0 {
indexPath := getGalleryIndexPath()
fmt.Printf("Prepending YAML entries to %s...\n", indexPath)
// Read current content
content, err := os.ReadFile(indexPath)
if err != nil {
return fmt.Errorf("failed to read %s: %w", indexPath, err)
}
existingContent := string(content)
yamlBlock := strings.Join(yamlEntries, "\n")
// Check if file starts with "---"
var newContent string
if strings.HasPrefix(existingContent, "---\n") {
// File starts with "---", prepend new entries after it
restOfContent := strings.TrimPrefix(existingContent, "---\n")
// Ensure proper spacing: "---\n" + new entries + "\n" + rest of content
newContent = "---\n" + yamlBlock + "\n" + restOfContent
} else if strings.HasPrefix(existingContent, "---") {
// File starts with "---" but no newline after
restOfContent := strings.TrimPrefix(existingContent, "---")
newContent = "---\n" + yamlBlock + "\n" + strings.TrimPrefix(restOfContent, "\n")
} else {
// No "---" at start, prepend new entries at the very beginning
// Trim leading whitespace from existing content
existingContent = strings.TrimLeft(existingContent, " \t\n\r")
newContent = yamlBlock + "\n" + existingContent
}
// Write back to file
err = os.WriteFile(indexPath, []byte(newContent), 0644)
if err != nil {
return fmt.Errorf("failed to write %s: %w", indexPath, err)
}
fmt.Printf("Successfully prepended %d models to %s\n", len(yamlEntries), indexPath)
}
return nil
}

View File

@@ -1,383 +0,0 @@
package main
import (
"context"
"encoding/json"
"fmt"
"os"
"strconv"
"strings"
"time"
hfapi "github.com/mudler/LocalAI/pkg/huggingface-api"
)
// ProcessedModelFile represents a processed model file with additional metadata
type ProcessedModelFile struct {
Path string `json:"path"`
Size int64 `json:"size"`
SHA256 string `json:"sha256"`
IsReadme bool `json:"is_readme"`
FileType string `json:"file_type"` // "model", "readme", "other"
}
// ProcessedModel represents a processed model with all gathered metadata
type ProcessedModel struct {
ModelID string `json:"model_id"`
Author string `json:"author"`
Downloads int `json:"downloads"`
LastModified string `json:"last_modified"`
Files []ProcessedModelFile `json:"files"`
PreferredModelFile *ProcessedModelFile `json:"preferred_model_file,omitempty"`
ReadmeFile *ProcessedModelFile `json:"readme_file,omitempty"`
ReadmeContent string `json:"readme_content,omitempty"`
ReadmeContentPreview string `json:"readme_content_preview,omitempty"`
QuantizationPreferences []string `json:"quantization_preferences"`
ProcessingError string `json:"processing_error,omitempty"`
Tags []string `json:"tags,omitempty"`
License string `json:"license,omitempty"`
Icon string `json:"icon,omitempty"`
}
// SearchResult represents the complete result of searching and processing models
type SearchResult struct {
SearchTerm string `json:"search_term"`
Limit int `json:"limit"`
Quantization string `json:"quantization"`
TotalModelsFound int `json:"total_models_found"`
Models []ProcessedModel `json:"models"`
FormattedOutput string `json:"formatted_output"`
}
// AddedModelSummary represents a summary of models added to the gallery
type AddedModelSummary struct {
SearchTerm string `json:"search_term"`
TotalFound int `json:"total_found"`
ModelsAdded int `json:"models_added"`
AddedModelIDs []string `json:"added_model_ids"`
AddedModelURLs []string `json:"added_model_urls"`
Quantization string `json:"quantization"`
ProcessingTime string `json:"processing_time"`
}
func main() {
startTime := time.Now()
// Check for synthetic mode
syntheticMode := os.Getenv("SYNTHETIC_MODE")
if syntheticMode == "true" || syntheticMode == "1" {
fmt.Println("Running in SYNTHETIC MODE - generating random test data")
err := runSyntheticMode()
if err != nil {
fmt.Fprintf(os.Stderr, "Error in synthetic mode: %v\n", err)
os.Exit(1)
}
return
}
// Get configuration from environment variables
searchTerm := os.Getenv("SEARCH_TERM")
if searchTerm == "" {
searchTerm = "GGUF"
}
limitStr := os.Getenv("LIMIT")
if limitStr == "" {
limitStr = "5"
}
limit, err := strconv.Atoi(limitStr)
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing LIMIT: %v\n", err)
os.Exit(1)
}
quantization := os.Getenv("QUANTIZATION")
maxModels := os.Getenv("MAX_MODELS")
if maxModels == "" {
maxModels = "1"
}
maxModelsInt, err := strconv.Atoi(maxModels)
if err != nil {
fmt.Fprintf(os.Stderr, "Error parsing MAX_MODELS: %v\n", err)
os.Exit(1)
}
// Print configuration
fmt.Printf("Gallery Agent Configuration:\n")
fmt.Printf(" Search Term: %s\n", searchTerm)
fmt.Printf(" Limit: %d\n", limit)
fmt.Printf(" Quantization: %s\n", quantization)
fmt.Printf(" Max Models to Add: %d\n", maxModelsInt)
fmt.Printf(" Gallery Index Path: %s\n", os.Getenv("GALLERY_INDEX_PATH"))
fmt.Println()
result, err := searchAndProcessModels(searchTerm, limit, quantization)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
fmt.Println(result.FormattedOutput)
var models []ProcessedModel
if len(result.Models) > 1 {
fmt.Println("More than one model found (", len(result.Models), "), using AI agent to select the most interesting models")
for _, model := range result.Models {
fmt.Println("Model: ", model.ModelID)
}
// Use AI agent to select the most interesting models
fmt.Println("Using AI agent to select the most interesting models...")
models, err = selectMostInterestingModels(context.Background(), result)
if err != nil {
fmt.Fprintf(os.Stderr, "Error in model selection: %v\n", err)
// Continue with original result if selection fails
models = result.Models
}
} else if len(result.Models) == 1 {
models = result.Models
fmt.Println("Only one model found, using it directly")
}
fmt.Print(models)
// Filter out models that already exist in the gallery
fmt.Println("Filtering out existing models...")
models, err = filterExistingModels(models)
if err != nil {
fmt.Fprintf(os.Stderr, "Error filtering existing models: %v\n", err)
os.Exit(1)
}
// Limit to maxModelsInt after filtering
if len(models) > maxModelsInt {
models = models[:maxModelsInt]
}
// Track added models for summary
var addedModelIDs []string
var addedModelURLs []string
// Generate YAML entries and append to gallery/index.yaml
if len(models) > 0 {
for _, model := range models {
addedModelIDs = append(addedModelIDs, model.ModelID)
// Generate Hugging Face URL for the model
modelURL := fmt.Sprintf("https://huggingface.co/%s", model.ModelID)
addedModelURLs = append(addedModelURLs, modelURL)
}
fmt.Println("Generating YAML entries for selected models...")
err = generateYAMLForModels(context.Background(), models, quantization)
if err != nil {
fmt.Fprintf(os.Stderr, "Error generating YAML entries: %v\n", err)
os.Exit(1)
}
} else {
fmt.Println("No new models to add to the gallery.")
}
// Create and write summary
processingTime := time.Since(startTime).String()
summary := AddedModelSummary{
SearchTerm: searchTerm,
TotalFound: result.TotalModelsFound,
ModelsAdded: len(addedModelIDs),
AddedModelIDs: addedModelIDs,
AddedModelURLs: addedModelURLs,
Quantization: quantization,
ProcessingTime: processingTime,
}
// Write summary to file
summaryData, err := json.MarshalIndent(summary, "", " ")
if err != nil {
fmt.Fprintf(os.Stderr, "Error marshaling summary: %v\n", err)
} else {
err = os.WriteFile("gallery-agent-summary.json", summaryData, 0644)
if err != nil {
fmt.Fprintf(os.Stderr, "Error writing summary file: %v\n", err)
} else {
fmt.Printf("Summary written to gallery-agent-summary.json\n")
}
}
}
func searchAndProcessModels(searchTerm string, limit int, quantization string) (*SearchResult, error) {
client := hfapi.NewClient()
var outputBuilder strings.Builder
fmt.Println("Searching for models...")
// Initialize the result struct
result := &SearchResult{
SearchTerm: searchTerm,
Limit: limit,
Quantization: quantization,
Models: []ProcessedModel{},
}
models, err := client.GetLatest(searchTerm, limit)
if err != nil {
return nil, fmt.Errorf("failed to fetch models: %w", err)
}
fmt.Println("Models found:", len(models))
result.TotalModelsFound = len(models)
if len(models) == 0 {
outputBuilder.WriteString("No models found.\n")
result.FormattedOutput = outputBuilder.String()
return result, nil
}
outputBuilder.WriteString(fmt.Sprintf("Found %d models matching '%s':\n\n", len(models), searchTerm))
// Process each model
for i, model := range models {
outputBuilder.WriteString(fmt.Sprintf("%d. Processing Model: %s\n", i+1, model.ModelID))
outputBuilder.WriteString(fmt.Sprintf(" Author: %s\n", model.Author))
outputBuilder.WriteString(fmt.Sprintf(" Downloads: %d\n", model.Downloads))
outputBuilder.WriteString(fmt.Sprintf(" Last Modified: %s\n", model.LastModified))
// Initialize processed model struct
processedModel := ProcessedModel{
ModelID: model.ModelID,
Author: model.Author,
Downloads: model.Downloads,
LastModified: model.LastModified,
QuantizationPreferences: []string{quantization, "Q4_K_M", "Q4_K_S", "Q3_K_M", "Q2_K"},
}
// Get detailed model information
details, err := client.GetModelDetails(model.ModelID)
if err != nil {
errorMsg := fmt.Sprintf(" Error getting model details: %v\n", err)
outputBuilder.WriteString(errorMsg)
processedModel.ProcessingError = err.Error()
result.Models = append(result.Models, processedModel)
continue
}
// Define quantization preferences (in order of preference)
quantizationPreferences := []string{quantization, "Q4_K_M", "Q4_K_S", "Q3_K_M", "Q2_K"}
// Find preferred model file
preferredModelFile := hfapi.FindPreferredModelFile(details.Files, quantizationPreferences)
// Process files
processedFiles := make([]ProcessedModelFile, len(details.Files))
for j, file := range details.Files {
fileType := "other"
if file.IsReadme {
fileType = "readme"
} else if preferredModelFile != nil && file.Path == preferredModelFile.Path {
fileType = "model"
}
processedFiles[j] = ProcessedModelFile{
Path: file.Path,
Size: file.Size,
SHA256: file.SHA256,
IsReadme: file.IsReadme,
FileType: fileType,
}
}
processedModel.Files = processedFiles
// Set preferred model file
if preferredModelFile != nil {
for _, file := range processedFiles {
if file.Path == preferredModelFile.Path {
processedModel.PreferredModelFile = &file
break
}
}
}
// Print file information
outputBuilder.WriteString(fmt.Sprintf(" Files found: %d\n", len(details.Files)))
if preferredModelFile != nil {
outputBuilder.WriteString(fmt.Sprintf(" Preferred Model File: %s (SHA256: %s)\n",
preferredModelFile.Path,
preferredModelFile.SHA256))
} else {
outputBuilder.WriteString(fmt.Sprintf(" No model file found with quantization preferences: %v\n", quantizationPreferences))
}
if details.ReadmeFile != nil {
outputBuilder.WriteString(fmt.Sprintf(" README File: %s\n", details.ReadmeFile.Path))
// Find and set readme file
for _, file := range processedFiles {
if file.IsReadme {
processedModel.ReadmeFile = &file
break
}
}
fmt.Println("Getting real readme for", model.ModelID, "waiting...")
// Use agent to get the real readme and prepare the model description
readmeContent, err := getRealReadme(context.Background(), model.ModelID)
if err == nil {
processedModel.ReadmeContent = readmeContent
processedModel.ReadmeContentPreview = truncateString(readmeContent, 200)
outputBuilder.WriteString(fmt.Sprintf(" README Content Preview: %s\n",
processedModel.ReadmeContentPreview))
} else {
fmt.Printf(" Warning: Failed to get real readme: %v\n", err)
}
fmt.Println("Real readme got", readmeContent)
// Extract metadata (tags, license) from README using LLM
fmt.Println("Extracting metadata for", model.ModelID, "waiting...")
tags, license, err := extractModelMetadata(context.Background(), processedModel)
if err == nil {
processedModel.Tags = tags
processedModel.License = license
outputBuilder.WriteString(fmt.Sprintf(" Tags: %v\n", tags))
outputBuilder.WriteString(fmt.Sprintf(" License: %s\n", license))
} else {
fmt.Printf(" Warning: Failed to extract metadata: %v\n", err)
}
// Extract icon from README or use HuggingFace avatar
icon := extractModelIcon(processedModel)
if icon != "" {
processedModel.Icon = icon
outputBuilder.WriteString(fmt.Sprintf(" Icon: %s\n", icon))
}
// Get README content
// readmeContent, err := client.GetReadmeContent(model.ModelID, details.ReadmeFile.Path)
// if err == nil {
// processedModel.ReadmeContent = readmeContent
// processedModel.ReadmeContentPreview = truncateString(readmeContent, 200)
// outputBuilder.WriteString(fmt.Sprintf(" README Content Preview: %s\n",
// processedModel.ReadmeContentPreview))
// }
}
// Print all files with their checksums
outputBuilder.WriteString(" All Files:\n")
for _, file := range processedFiles {
outputBuilder.WriteString(fmt.Sprintf(" - %s (%s, %d bytes", file.Path, file.FileType, file.Size))
if file.SHA256 != "" {
outputBuilder.WriteString(fmt.Sprintf(", SHA256: %s", file.SHA256))
}
outputBuilder.WriteString(")\n")
}
outputBuilder.WriteString("\n")
result.Models = append(result.Models, processedModel)
}
result.FormattedOutput = outputBuilder.String()
return result, nil
}
func truncateString(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
return s[:maxLen] + "..."
}

View File

@@ -1,224 +0,0 @@
package main
import (
"context"
"fmt"
"math/rand"
"strings"
"time"
)
// runSyntheticMode generates synthetic test data and appends it to the gallery
func runSyntheticMode() error {
generator := NewSyntheticDataGenerator()
// Generate a random number of synthetic models (1-3)
numModels := generator.rand.Intn(3) + 1
fmt.Printf("Generating %d synthetic models for testing...\n", numModels)
var models []ProcessedModel
for i := 0; i < numModels; i++ {
model := generator.GenerateProcessedModel()
models = append(models, model)
fmt.Printf("Generated synthetic model: %s\n", model.ModelID)
}
// Generate YAML entries and append to gallery/index.yaml
fmt.Println("Generating YAML entries for synthetic models...")
err := generateYAMLForModels(context.Background(), models, "Q4_K_M")
if err != nil {
return fmt.Errorf("error generating YAML entries: %w", err)
}
fmt.Printf("Successfully added %d synthetic models to the gallery for testing!\n", len(models))
return nil
}
// SyntheticDataGenerator provides methods to generate synthetic test data
type SyntheticDataGenerator struct {
rand *rand.Rand
}
// NewSyntheticDataGenerator creates a new synthetic data generator
func NewSyntheticDataGenerator() *SyntheticDataGenerator {
return &SyntheticDataGenerator{
rand: rand.New(rand.NewSource(time.Now().UnixNano())),
}
}
// GenerateProcessedModelFile creates a synthetic ProcessedModelFile
func (g *SyntheticDataGenerator) GenerateProcessedModelFile() ProcessedModelFile {
fileTypes := []string{"model", "readme", "other"}
fileType := fileTypes[g.rand.Intn(len(fileTypes))]
var path string
var isReadme bool
switch fileType {
case "model":
path = fmt.Sprintf("model-%s.gguf", g.randomString(8))
isReadme = false
case "readme":
path = "README.md"
isReadme = true
default:
path = fmt.Sprintf("file-%s.txt", g.randomString(6))
isReadme = false
}
return ProcessedModelFile{
Path: path,
Size: int64(g.rand.Intn(1000000000) + 1000000), // 1MB to 1GB
SHA256: g.randomSHA256(),
IsReadme: isReadme,
FileType: fileType,
}
}
// GenerateProcessedModel creates a synthetic ProcessedModel
func (g *SyntheticDataGenerator) GenerateProcessedModel() ProcessedModel {
authors := []string{"microsoft", "meta", "google", "openai", "anthropic", "mistralai", "huggingface"}
modelNames := []string{"llama", "gpt", "claude", "mistral", "gemma", "phi", "qwen", "codellama"}
author := authors[g.rand.Intn(len(authors))]
modelName := modelNames[g.rand.Intn(len(modelNames))]
modelID := fmt.Sprintf("%s/%s-%s", author, modelName, g.randomString(6))
// Generate files
numFiles := g.rand.Intn(5) + 2 // 2-6 files
files := make([]ProcessedModelFile, numFiles)
// Ensure at least one model file and one readme
hasModelFile := false
hasReadme := false
for i := 0; i < numFiles; i++ {
files[i] = g.GenerateProcessedModelFile()
if files[i].FileType == "model" {
hasModelFile = true
}
if files[i].FileType == "readme" {
hasReadme = true
}
}
// Add required files if missing
if !hasModelFile {
modelFile := g.GenerateProcessedModelFile()
modelFile.FileType = "model"
modelFile.Path = fmt.Sprintf("%s-Q4_K_M.gguf", modelName)
files = append(files, modelFile)
}
if !hasReadme {
readmeFile := g.GenerateProcessedModelFile()
readmeFile.FileType = "readme"
readmeFile.Path = "README.md"
readmeFile.IsReadme = true
files = append(files, readmeFile)
}
// Find preferred model file
var preferredModelFile *ProcessedModelFile
for i := range files {
if files[i].FileType == "model" {
preferredModelFile = &files[i]
break
}
}
// Find readme file
var readmeFile *ProcessedModelFile
for i := range files {
if files[i].FileType == "readme" {
readmeFile = &files[i]
break
}
}
readmeContent := g.generateReadmeContent(modelName, author)
// Generate sample metadata
licenses := []string{"apache-2.0", "mit", "llama2", "gpl-3.0", "bsd", ""}
license := licenses[g.rand.Intn(len(licenses))]
sampleTags := []string{"llm", "gguf", "gpu", "cpu", "text-to-text", "chat", "instruction-tuned"}
numTags := g.rand.Intn(4) + 3 // 3-6 tags
tags := make([]string, numTags)
for i := 0; i < numTags; i++ {
tags[i] = sampleTags[g.rand.Intn(len(sampleTags))]
}
// Remove duplicates
tags = g.removeDuplicates(tags)
// Optionally include icon (50% chance)
icon := ""
if g.rand.Intn(2) == 0 {
icon = fmt.Sprintf("https://cdn-avatars.huggingface.co/v1/production/uploads/%s.png", g.randomString(24))
}
return ProcessedModel{
ModelID: modelID,
Author: author,
Downloads: g.rand.Intn(1000000) + 1000,
LastModified: g.randomDate(),
Files: files,
PreferredModelFile: preferredModelFile,
ReadmeFile: readmeFile,
ReadmeContent: readmeContent,
ReadmeContentPreview: truncateString(readmeContent, 200),
QuantizationPreferences: []string{"Q4_K_M", "Q4_K_S", "Q3_K_M", "Q2_K"},
ProcessingError: "",
Tags: tags,
License: license,
Icon: icon,
}
}
// Helper methods for synthetic data generation
func (g *SyntheticDataGenerator) randomString(length int) string {
const charset = "abcdefghijklmnopqrstuvwxyz0123456789"
b := make([]byte, length)
for i := range b {
b[i] = charset[g.rand.Intn(len(charset))]
}
return string(b)
}
func (g *SyntheticDataGenerator) randomSHA256() string {
const charset = "0123456789abcdef"
b := make([]byte, 64)
for i := range b {
b[i] = charset[g.rand.Intn(len(charset))]
}
return string(b)
}
func (g *SyntheticDataGenerator) randomDate() string {
now := time.Now()
daysAgo := g.rand.Intn(365) // Random date within last year
pastDate := now.AddDate(0, 0, -daysAgo)
return pastDate.Format("2006-01-02T15:04:05.000Z")
}
func (g *SyntheticDataGenerator) removeDuplicates(slice []string) []string {
keys := make(map[string]bool)
result := []string{}
for _, item := range slice {
if !keys[item] {
keys[item] = true
result = append(result, item)
}
}
return result
}
func (g *SyntheticDataGenerator) generateReadmeContent(modelName, author string) string {
templates := []string{
fmt.Sprintf("# %s Model\n\nThis is a %s model developed by %s. It's designed for various natural language processing tasks including text generation, question answering, and conversation.\n\n## Features\n\n- High-quality text generation\n- Efficient inference\n- Multiple quantization options\n- Easy to use with LocalAI\n\n## Usage\n\nUse this model with LocalAI for various AI tasks.", strings.Title(modelName), modelName, author),
fmt.Sprintf("# %s\n\nA powerful language model from %s. This model excels at understanding and generating human-like text across multiple domains.\n\n## Capabilities\n\n- Text completion\n- Code generation\n- Creative writing\n- Technical documentation\n\n## Model Details\n\n- Architecture: Transformer-based\n- Training: Large-scale supervised learning\n- Quantization: Available in multiple formats", strings.Title(modelName), author),
fmt.Sprintf("# %s Language Model\n\nDeveloped by %s, this model represents state-of-the-art performance in natural language understanding and generation.\n\n## Key Features\n\n- Multilingual support\n- Context-aware responses\n- Efficient memory usage\n- Fast inference speed\n\n## Applications\n\n- Chatbots and virtual assistants\n- Content generation\n- Code completion\n- Educational tools", strings.Title(modelName), author),
}
return templates[g.rand.Intn(len(templates))]
}

View File

@@ -1,46 +0,0 @@
package main
import (
"fmt"
hfapi "github.com/mudler/LocalAI/pkg/huggingface-api"
openai "github.com/sashabaranov/go-openai"
jsonschema "github.com/sashabaranov/go-openai/jsonschema"
)
// Get repository README from HF
type HFReadmeTool struct {
client *hfapi.Client
}
func (s *HFReadmeTool) Execute(args map[string]any) (string, error) {
q, ok := args["repository"].(string)
if !ok {
return "", fmt.Errorf("no query")
}
readme, err := s.client.GetReadmeContent(q, "README.md")
if err != nil {
return "", err
}
return readme, nil
}
func (s *HFReadmeTool) Tool() openai.Tool {
return openai.Tool{
Type: openai.ToolTypeFunction,
Function: &openai.FunctionDefinition{
Name: "hf_readme",
Description: "A tool to get the README content of a huggingface repository",
Parameters: jsonschema.Definition{
Type: jsonschema.Object,
Properties: map[string]jsonschema.Definition{
"repository": {
Type: jsonschema.String,
Description: "The huggingface repository to get the README content of",
},
},
Required: []string{"repository"},
},
},
}
}

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
--- ---
name: 'build backend container images (reusable)' name: 'build python backend container images (reusable)'
on: on:
workflow_call: workflow_call:
@@ -53,16 +53,11 @@ on:
description: 'Skip drivers' description: 'Skip drivers'
default: 'false' default: 'false'
type: string type: string
ubuntu-version:
description: 'Ubuntu version'
required: false
default: '2204'
type: string
secrets: secrets:
dockerUsername: dockerUsername:
required: false required: true
dockerPassword: dockerPassword:
required: false required: true
quayUsername: quayUsername:
required: true required: true
quayPassword: quayPassword:
@@ -71,8 +66,6 @@ on:
jobs: jobs:
backend-build: backend-build:
runs-on: ${{ inputs.runs-on }} runs-on: ${{ inputs.runs-on }}
env:
quay_username: ${{ secrets.quayUsername }}
steps: steps:
@@ -102,7 +95,7 @@ jobs:
&& sudo apt-get install -y git && sudo apt-get install -y git
- name: Checkout - name: Checkout
uses: actions/checkout@v6 uses: actions/checkout@v4
- name: Release space from worker - name: Release space from worker
if: inputs.runs-on == 'ubuntu-latest' if: inputs.runs-on == 'ubuntu-latest'
@@ -194,7 +187,7 @@ jobs:
password: ${{ secrets.dockerPassword }} password: ${{ secrets.dockerPassword }}
- name: Login to Quay.io - name: Login to Quay.io
if: ${{ env.quay_username != '' }} # if: github.event_name != 'pull_request'
uses: docker/login-action@v3 uses: docker/login-action@v3
with: with:
registry: quay.io registry: quay.io
@@ -213,7 +206,6 @@ jobs:
CUDA_MINOR_VERSION=${{ inputs.cuda-minor-version }} CUDA_MINOR_VERSION=${{ inputs.cuda-minor-version }}
BASE_IMAGE=${{ inputs.base-image }} BASE_IMAGE=${{ inputs.base-image }}
BACKEND=${{ inputs.backend }} BACKEND=${{ inputs.backend }}
UBUNTU_VERSION=${{ inputs.ubuntu-version }}
context: ${{ inputs.context }} context: ${{ inputs.context }}
file: ${{ inputs.dockerfile }} file: ${{ inputs.dockerfile }}
cache-from: type=gha cache-from: type=gha
@@ -234,12 +226,11 @@ jobs:
CUDA_MINOR_VERSION=${{ inputs.cuda-minor-version }} CUDA_MINOR_VERSION=${{ inputs.cuda-minor-version }}
BASE_IMAGE=${{ inputs.base-image }} BASE_IMAGE=${{ inputs.base-image }}
BACKEND=${{ inputs.backend }} BACKEND=${{ inputs.backend }}
UBUNTU_VERSION=${{ inputs.ubuntu-version }}
context: ${{ inputs.context }} context: ${{ inputs.context }}
file: ${{ inputs.dockerfile }} file: ${{ inputs.dockerfile }}
cache-from: type=gha cache-from: type=gha
platforms: ${{ inputs.platforms }} platforms: ${{ inputs.platforms }}
push: ${{ env.quay_username != '' }} push: true
tags: ${{ steps.meta_pull_request.outputs.tags }} tags: ${{ steps.meta_pull_request.outputs.tags }}
labels: ${{ steps.meta_pull_request.outputs.labels }} labels: ${{ steps.meta_pull_request.outputs.labels }}
@@ -247,4 +238,4 @@ jobs:
- name: job summary - name: job summary
run: | run: |
echo "Built image: ${{ steps.meta.outputs.labels }}" >> $GITHUB_STEP_SUMMARY echo "Built image: ${{ steps.meta.outputs.labels }}" >> $GITHUB_STEP_SUMMARY

View File

@@ -1,144 +0,0 @@
---
name: 'build darwin python backend container images (reusable)'
on:
workflow_call:
inputs:
backend:
description: 'Backend to build'
required: true
type: string
build-type:
description: 'Build type (e.g., mps)'
default: ''
type: string
use-pip:
description: 'Use pip to install dependencies'
default: false
type: boolean
lang:
description: 'Programming language (e.g. go)'
default: 'python'
type: string
go-version:
description: 'Go version to use'
default: '1.24.x'
type: string
tag-suffix:
description: 'Tag suffix for the built image'
required: true
type: string
runs-on:
description: 'Runner to use'
default: 'macOS-14'
type: string
secrets:
dockerUsername:
required: false
dockerPassword:
required: false
quayUsername:
required: true
quayPassword:
required: true
jobs:
darwin-backend-build:
runs-on: ${{ inputs.runs-on }}
strategy:
matrix:
go-version: ['${{ inputs.go-version }}']
steps:
- name: Clone
uses: actions/checkout@v6
with:
submodules: true
- name: Setup Go ${{ matrix.go-version }}
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
cache: false
# You can test your matrix by printing the current Go version
- name: Display Go version
run: go version
- name: Dependencies
run: |
brew install protobuf grpc make protoc-gen-go protoc-gen-go-grpc libomp llvm
- name: Build ${{ inputs.backend }}-darwin
run: |
make protogen-go
BACKEND=${{ inputs.backend }} BUILD_TYPE=${{ inputs.build-type }} USE_PIP=${{ inputs.use-pip }} make build-darwin-${{ inputs.lang }}-backend
- name: Upload ${{ inputs.backend }}.tar
uses: actions/upload-artifact@v6
with:
name: ${{ inputs.backend }}-tar
path: backend-images/${{ inputs.backend }}.tar
darwin-backend-publish:
needs: darwin-backend-build
if: github.event_name != 'pull_request'
runs-on: ubuntu-latest
steps:
- name: Download ${{ inputs.backend }}.tar
uses: actions/download-artifact@v7
with:
name: ${{ inputs.backend }}-tar
path: .
- name: Install crane
run: |
curl -L https://github.com/google/go-containerregistry/releases/latest/download/go-containerregistry_Linux_x86_64.tar.gz | tar -xz
sudo mv crane /usr/local/bin/
- name: Log in to DockerHub
run: |
echo "${{ secrets.dockerPassword }}" | crane auth login docker.io -u "${{ secrets.dockerUsername }}" --password-stdin
- name: Log in to quay.io
run: |
echo "${{ secrets.quayPassword }}" | crane auth login quay.io -u "${{ secrets.quayUsername }}" --password-stdin
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: |
localai/localai-backends
tags: |
type=ref,event=branch
type=semver,pattern={{raw}}
type=sha
flavor: |
latest=auto
suffix=${{ inputs.tag-suffix }},onlatest=true
- name: Docker meta
id: quaymeta
uses: docker/metadata-action@v5
with:
images: |
quay.io/go-skynet/local-ai-backends
tags: |
type=ref,event=branch
type=semver,pattern={{raw}}
type=sha
flavor: |
latest=auto
suffix=${{ inputs.tag-suffix }},onlatest=true
- name: Push Docker image (DockerHub)
run: |
for tag in $(echo "${{ steps.meta.outputs.tags }}" | tr ',' '\n'); do
crane push ${{ inputs.backend }}.tar $tag
done
- name: Push Docker image (Quay)
run: |
for tag in $(echo "${{ steps.quaymeta.outputs.tags }}" | tr ',' '\n'); do
crane push ${{ inputs.backend }}.tar $tag
done

View File

@@ -1,79 +0,0 @@
name: 'build backend container images (PR-filtered)'
on:
pull_request:
concurrency:
group: ci-backends-pr-${{ github.head_ref || github.ref }}-${{ github.repository }}
cancel-in-progress: true
jobs:
generate-matrix:
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
matrix-darwin: ${{ steps.set-matrix.outputs.matrix-darwin }}
has-backends: ${{ steps.set-matrix.outputs.has-backends }}
has-backends-darwin: ${{ steps.set-matrix.outputs.has-backends-darwin }}
steps:
- name: Checkout repository
uses: actions/checkout@v6
- name: Setup Bun
uses: oven-sh/setup-bun@v2
- name: Install dependencies
run: |
bun add js-yaml
bun add @octokit/core
# filters the matrix in backend.yml
- name: Filter matrix for changed backends
id: set-matrix
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GITHUB_EVENT_PATH: ${{ github.event_path }}
run: bun run scripts/changed-backends.js
backend-jobs:
needs: generate-matrix
uses: ./.github/workflows/backend_build.yml
if: needs.generate-matrix.outputs.has-backends == 'true'
with:
tag-latest: ${{ matrix.tag-latest }}
tag-suffix: ${{ matrix.tag-suffix }}
build-type: ${{ matrix.build-type }}
cuda-major-version: ${{ matrix.cuda-major-version }}
cuda-minor-version: ${{ matrix.cuda-minor-version }}
platforms: ${{ matrix.platforms }}
runs-on: ${{ matrix.runs-on }}
base-image: ${{ matrix.base-image }}
backend: ${{ matrix.backend }}
dockerfile: ${{ matrix.dockerfile }}
skip-drivers: ${{ matrix.skip-drivers }}
context: ${{ matrix.context }}
ubuntu-version: ${{ matrix.ubuntu-version }}
secrets:
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
strategy:
fail-fast: true
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
backend-jobs-darwin:
needs: generate-matrix
uses: ./.github/workflows/backend_build_darwin.yml
if: needs.generate-matrix.outputs.has-backends-darwin == 'true'
with:
backend: ${{ matrix.backend }}
build-type: ${{ matrix.build-type }}
go-version: "1.24.x"
tag-suffix: ${{ matrix.tag-suffix }}
lang: ${{ matrix.lang || 'python' }}
use-pip: ${{ matrix.backend == 'diffusers' }}
runs-on: "macos-latest"
secrets:
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
strategy:
fail-fast: true
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix-darwin) }}

View File

@@ -11,57 +11,13 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v5 uses: actions/setup-go@v5
with: with:
go-version: 1.25 go-version: 1.23
- name: Run GoReleaser - name: Run GoReleaser
run: | run: |
make dev-dist make dev-dist
launcher-build-darwin:
runs-on: macos-latest
steps:
- name: Checkout
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.25
- name: Build launcher for macOS ARM64
run: |
make build-launcher-darwin
ls -liah dist
- name: Upload macOS launcher artifacts
uses: actions/upload-artifact@v6
with:
name: launcher-macos
path: dist/
retention-days: 30
launcher-build-linux:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.25
- name: Build launcher for Linux
run: |
sudo apt-get update
sudo apt-get install golang gcc libgl1-mesa-dev xorg-dev libxkbcommon-dev
make build-launcher-linux
- name: Upload Linux launcher artifacts
uses: actions/upload-artifact@v6
with:
name: launcher-linux
path: local-ai-launcher-linux.tar.xz
retention-days: 30

View File

@@ -1,10 +1,10 @@
name: Bump Backend dependencies name: Bump dependencies
on: on:
schedule: schedule:
- cron: 0 20 * * * - cron: 0 20 * * *
workflow_dispatch: workflow_dispatch:
jobs: jobs:
bump-backends: bump:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
@@ -21,7 +21,7 @@ jobs:
variable: "BARKCPP_VERSION" variable: "BARKCPP_VERSION"
branch: "main" branch: "main"
file: "Makefile" file: "Makefile"
- repository: "leejet/stable-diffusion.cpp" - repository: "richiejp/stable-diffusion.cpp"
variable: "STABLEDIFFUSION_GGML_VERSION" variable: "STABLEDIFFUSION_GGML_VERSION"
branch: "master" branch: "master"
file: "backend/go/stablediffusion-ggml/Makefile" file: "backend/go/stablediffusion-ggml/Makefile"
@@ -31,7 +31,7 @@ jobs:
file: "backend/go/piper/Makefile" file: "backend/go/piper/Makefile"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v6 - uses: actions/checkout@v4
- name: Bump dependencies 🔧 - name: Bump dependencies 🔧
id: bump id: bump
run: | run: |
@@ -49,7 +49,7 @@ jobs:
rm -rfv ${{ matrix.variable }}_message.txt rm -rfv ${{ matrix.variable }}_message.txt
rm -rfv ${{ matrix.variable }}_commit.txt rm -rfv ${{ matrix.variable }}_commit.txt
- name: Create Pull Request - name: Create Pull Request
uses: peter-evans/create-pull-request@v8 uses: peter-evans/create-pull-request@v7
with: with:
token: ${{ secrets.UPDATE_BOT_TOKEN }} token: ${{ secrets.UPDATE_BOT_TOKEN }}
push-to-fork: ci-forks/LocalAI push-to-fork: ci-forks/LocalAI

View File

@@ -1,10 +1,10 @@
name: Bump Documentation name: Bump dependencies
on: on:
schedule: schedule:
- cron: 0 20 * * * - cron: 0 20 * * *
workflow_dispatch: workflow_dispatch:
jobs: jobs:
bump-docs: bump:
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
@@ -12,12 +12,12 @@ jobs:
- repository: "mudler/LocalAI" - repository: "mudler/LocalAI"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v6 - uses: actions/checkout@v4
- name: Bump dependencies 🔧 - name: Bump dependencies 🔧
run: | run: |
bash .github/bump_docs.sh ${{ matrix.repository }} bash .github/bump_docs.sh ${{ matrix.repository }}
- name: Create Pull Request - name: Create Pull Request
uses: peter-evans/create-pull-request@v8 uses: peter-evans/create-pull-request@v7
with: with:
token: ${{ secrets.UPDATE_BOT_TOKEN }} token: ${{ secrets.UPDATE_BOT_TOKEN }}
push-to-fork: ci-forks/LocalAI push-to-fork: ci-forks/LocalAI

View File

@@ -15,7 +15,7 @@ jobs:
&& sudo add-apt-repository -y ppa:git-core/ppa \ && sudo add-apt-repository -y ppa:git-core/ppa \
&& sudo apt-get update \ && sudo apt-get update \
&& sudo apt-get install -y git && sudo apt-get install -y git
- uses: actions/checkout@v6 - uses: actions/checkout@v4
- name: Install dependencies - name: Install dependencies
run: | run: |
sudo apt-get update sudo apt-get update
@@ -35,7 +35,7 @@ jobs:
sudo chmod 777 /hf_cache sudo chmod 777 /hf_cache
bash .github/checksum_checker.sh gallery/index.yaml bash .github/checksum_checker.sh gallery/index.yaml
- name: Create Pull Request - name: Create Pull Request
uses: peter-evans/create-pull-request@v8 uses: peter-evans/create-pull-request@v7
with: with:
token: ${{ secrets.UPDATE_BOT_TOKEN }} token: ${{ secrets.UPDATE_BOT_TOKEN }}
push-to-fork: ci-forks/LocalAI push-to-fork: ci-forks/LocalAI

View File

@@ -14,13 +14,13 @@ jobs:
steps: steps:
- name: Dependabot metadata - name: Dependabot metadata
id: metadata id: metadata
uses: dependabot/fetch-metadata@v2.5.0 uses: dependabot/fetch-metadata@v2.4.0
with: with:
github-token: "${{ secrets.GITHUB_TOKEN }}" github-token: "${{ secrets.GITHUB_TOKEN }}"
skip-commit-verification: true skip-commit-verification: true
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v6 uses: actions/checkout@v4
- name: Approve a PR if not already approved - name: Approve a PR if not already approved
run: | run: |

View File

@@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Clone - name: Clone
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- uses: actions/setup-go@v5 - uses: actions/setup-go@v5
@@ -33,7 +33,7 @@ jobs:
run: | run: |
CGO_ENABLED=0 make build CGO_ENABLED=0 make build
- name: rm - name: rm
uses: appleboy/ssh-action@v1.2.4 uses: appleboy/ssh-action@v1.2.2
with: with:
host: ${{ secrets.EXPLORER_SSH_HOST }} host: ${{ secrets.EXPLORER_SSH_HOST }}
username: ${{ secrets.EXPLORER_SSH_USERNAME }} username: ${{ secrets.EXPLORER_SSH_USERNAME }}
@@ -53,7 +53,7 @@ jobs:
rm: true rm: true
target: ./local-ai target: ./local-ai
- name: restarting - name: restarting
uses: appleboy/ssh-action@v1.2.4 uses: appleboy/ssh-action@v1.2.2
with: with:
host: ${{ secrets.EXPLORER_SSH_HOST }} host: ${{ secrets.EXPLORER_SSH_HOST }}
username: ${{ secrets.EXPLORER_SSH_USERNAME }} username: ${{ secrets.EXPLORER_SSH_USERNAME }}

View File

@@ -1,132 +0,0 @@
name: Gallery Agent
on:
schedule:
- cron: '0 */3 * * *' # Run every 4 hours
workflow_dispatch:
inputs:
search_term:
description: 'Search term for models'
required: false
default: 'GGUF'
type: string
limit:
description: 'Maximum number of models to process'
required: false
default: '15'
type: string
quantization:
description: 'Preferred quantization format'
required: false
default: 'Q4_K_M'
type: string
max_models:
description: 'Maximum number of models to add to the gallery'
required: false
default: '1'
type: string
jobs:
gallery-agent:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v6
with:
token: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.21'
- name: Proto Dependencies
run: |
# Install protoc
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v26.1/protoc-26.1-linux-x86_64.zip -o protoc.zip && \
unzip -j -d /usr/local/bin protoc.zip bin/protoc && \
rm protoc.zip
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
PATH="$PATH:$HOME/go/bin" make protogen-go
- uses: mudler/localai-github-action@v1.1
with:
model: 'https://huggingface.co/bartowski/Qwen_Qwen3-1.7B-GGUF'
- name: Run gallery agent
env:
#OPENAI_MODEL: ${{ secrets.OPENAI_MODEL }}
OPENAI_MODE: Qwen_Qwen3-1.7B-GGUF
OPENAI_BASE_URL: "http://localhost:8080"
OPENAI_KEY: ${{ secrets.OPENAI_KEY }}
#OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }}
SEARCH_TERM: ${{ github.event.inputs.search_term || 'GGUF' }}
LIMIT: ${{ github.event.inputs.limit || '15' }}
QUANTIZATION: ${{ github.event.inputs.quantization || 'Q4_K_M' }}
MAX_MODELS: ${{ github.event.inputs.max_models || '1' }}
run: |
export GALLERY_INDEX_PATH=$PWD/gallery/index.yaml
go run ./.github/gallery-agent
- name: Check for changes
id: check_changes
run: |
if git diff --quiet gallery/index.yaml; then
echo "changes=false" >> $GITHUB_OUTPUT
echo "No changes detected in gallery/index.yaml"
else
echo "changes=true" >> $GITHUB_OUTPUT
echo "Changes detected in gallery/index.yaml"
git diff gallery/index.yaml
fi
- name: Read gallery agent summary
id: read_summary
if: steps.check_changes.outputs.changes == 'true'
run: |
if [ -f "./gallery-agent-summary.json" ]; then
echo "summary_exists=true" >> $GITHUB_OUTPUT
# Extract summary data using jq
echo "search_term=$(jq -r '.search_term' ./gallery-agent-summary.json)" >> $GITHUB_OUTPUT
echo "total_found=$(jq -r '.total_found' ./gallery-agent-summary.json)" >> $GITHUB_OUTPUT
echo "models_added=$(jq -r '.models_added' ./gallery-agent-summary.json)" >> $GITHUB_OUTPUT
echo "quantization=$(jq -r '.quantization' ./gallery-agent-summary.json)" >> $GITHUB_OUTPUT
echo "processing_time=$(jq -r '.processing_time' ./gallery-agent-summary.json)" >> $GITHUB_OUTPUT
# Create a formatted list of added models with URLs
added_models=$(jq -r 'range(0; .added_model_ids | length) as $i | "- [\(.added_model_ids[$i])](\(.added_model_urls[$i]))"' ./gallery-agent-summary.json | tr '\n' '\n')
echo "added_models<<EOF" >> $GITHUB_OUTPUT
echo "$added_models" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
rm -f ./gallery-agent-summary.json
else
echo "summary_exists=false" >> $GITHUB_OUTPUT
fi
- name: Create Pull Request
if: steps.check_changes.outputs.changes == 'true'
uses: peter-evans/create-pull-request@v8
with:
token: ${{ secrets.UPDATE_BOT_TOKEN }}
push-to-fork: ci-forks/LocalAI
commit-message: 'chore(model gallery): :robot: add new models via gallery agent'
title: 'chore(model gallery): :robot: add ${{ steps.read_summary.outputs.models_added || 0 }} new models via gallery agent'
# Branch has to be unique so PRs are not overriding each other
branch-suffix: timestamp
body: |
This PR was automatically created by the gallery agent workflow.
**Summary:**
- **Search Term:** ${{ steps.read_summary.outputs.search_term || github.event.inputs.search_term || 'GGUF' }}
- **Models Found:** ${{ steps.read_summary.outputs.total_found || 'N/A' }}
- **Models Added:** ${{ steps.read_summary.outputs.models_added || '0' }}
- **Quantization:** ${{ steps.read_summary.outputs.quantization || github.event.inputs.quantization || 'Q4_K_M' }}
- **Processing Time:** ${{ steps.read_summary.outputs.processing_time || 'N/A' }}
**Added Models:**
${{ steps.read_summary.outputs.added_models || '- No models added' }}
**Workflow Details:**
- Triggered by: `${{ github.event_name }}`
- Run ID: `${{ github.run_id }}`
- Commit: `${{ github.sha }}`
signoff: true
delete-branch: true

View File

@@ -16,7 +16,7 @@ jobs:
strategy: strategy:
matrix: matrix:
include: include:
- grpc-base-image: ubuntu:24.04 - grpc-base-image: ubuntu:22.04
runs-on: 'ubuntu-latest' runs-on: 'ubuntu-latest'
platforms: 'linux/amd64,linux/arm64' platforms: 'linux/amd64,linux/arm64'
runs-on: ${{matrix.runs-on}} runs-on: ${{matrix.runs-on}}
@@ -73,7 +73,7 @@ jobs:
uses: docker/setup-buildx-action@master uses: docker/setup-buildx-action@master
- name: Checkout - name: Checkout
uses: actions/checkout@v6 uses: actions/checkout@v4
- name: Cache GRPC - name: Cache GRPC
uses: docker/build-push-action@v6 uses: docker/build-push-action@v6

View File

@@ -15,8 +15,8 @@ jobs:
strategy: strategy:
matrix: matrix:
include: include:
- base-image: intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04 - base-image: intel/oneapi-basekit:2025.2.0-0-devel-ubuntu22.04
runs-on: 'arc-runner-set' runs-on: 'ubuntu-latest'
platforms: 'linux/amd64' platforms: 'linux/amd64'
runs-on: ${{matrix.runs-on}} runs-on: ${{matrix.runs-on}}
steps: steps:
@@ -43,7 +43,7 @@ jobs:
uses: docker/setup-buildx-action@master uses: docker/setup-buildx-action@master
- name: Checkout - name: Checkout
uses: actions/checkout@v6 uses: actions/checkout@v4
- name: Cache Intel images - name: Cache Intel images
uses: docker/build-push-action@v6 uses: docker/build-push-action@v6
@@ -53,7 +53,7 @@ jobs:
BASE_IMAGE=${{ matrix.base-image }} BASE_IMAGE=${{ matrix.base-image }}
context: . context: .
file: ./Dockerfile file: ./Dockerfile
tags: quay.io/go-skynet/intel-oneapi-base:24.04 tags: quay.io/go-skynet/intel-oneapi-base:latest
push: true push: true
target: intel target: intel
platforms: ${{ matrix.platforms }} platforms: ${{ matrix.platforms }}

View File

@@ -1,95 +1,68 @@
--- ---
name: 'build container images tests' name: 'build container images tests'
on: on:
pull_request: pull_request:
concurrency: concurrency:
group: ci-${{ github.head_ref || github.ref }}-${{ github.repository }} group: ci-${{ github.head_ref || github.ref }}-${{ github.repository }}
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
image-build: image-build:
uses: ./.github/workflows/image_build.yml uses: ./.github/workflows/image_build.yml
with: with:
tag-latest: ${{ matrix.tag-latest }} tag-latest: ${{ matrix.tag-latest }}
tag-suffix: ${{ matrix.tag-suffix }} tag-suffix: ${{ matrix.tag-suffix }}
build-type: ${{ matrix.build-type }} build-type: ${{ matrix.build-type }}
cuda-major-version: ${{ matrix.cuda-major-version }} cuda-major-version: ${{ matrix.cuda-major-version }}
cuda-minor-version: ${{ matrix.cuda-minor-version }} cuda-minor-version: ${{ matrix.cuda-minor-version }}
platforms: ${{ matrix.platforms }} platforms: ${{ matrix.platforms }}
runs-on: ${{ matrix.runs-on }} runs-on: ${{ matrix.runs-on }}
base-image: ${{ matrix.base-image }} base-image: ${{ matrix.base-image }}
grpc-base-image: ${{ matrix.grpc-base-image }} grpc-base-image: ${{ matrix.grpc-base-image }}
makeflags: ${{ matrix.makeflags }} makeflags: ${{ matrix.makeflags }}
ubuntu-version: ${{ matrix.ubuntu-version }} secrets:
secrets: dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }} dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }} quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }} quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }} strategy:
strategy: # Pushing with all jobs in parallel
# Pushing with all jobs in parallel # eats the bandwidth of all the nodes
# eats the bandwidth of all the nodes max-parallel: ${{ github.event_name != 'pull_request' && 4 || 8 }}
max-parallel: ${{ github.event_name != 'pull_request' && 4 || 8 }} fail-fast: false
fail-fast: false matrix:
matrix: include:
include: - build-type: 'cublas'
- build-type: 'cublas' cuda-major-version: "12"
cuda-major-version: "12" cuda-minor-version: "0"
cuda-minor-version: "9" platforms: 'linux/amd64'
platforms: 'linux/amd64' tag-latest: 'false'
tag-latest: 'false' tag-suffix: '-gpu-nvidia-cuda12'
tag-suffix: '-gpu-nvidia-cuda-12' runs-on: 'ubuntu-latest'
runs-on: 'ubuntu-latest' base-image: "ubuntu:22.04"
base-image: "ubuntu:24.04" makeflags: "--jobs=3 --output-sync=target"
makeflags: "--jobs=3 --output-sync=target" - build-type: 'hipblas'
ubuntu-version: '2404' platforms: 'linux/amd64'
- build-type: 'cublas' tag-latest: 'false'
cuda-major-version: "13" tag-suffix: '-hipblas'
cuda-minor-version: "0" base-image: "rocm/dev-ubuntu-22.04:6.1"
platforms: 'linux/amd64' grpc-base-image: "ubuntu:22.04"
tag-latest: 'false' runs-on: 'ubuntu-latest'
tag-suffix: '-gpu-nvidia-cuda-13' makeflags: "--jobs=3 --output-sync=target"
runs-on: 'ubuntu-latest' - build-type: 'sycl_f16'
base-image: "ubuntu:22.04" platforms: 'linux/amd64'
makeflags: "--jobs=3 --output-sync=target" tag-latest: 'false'
ubuntu-version: '2404' base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
- build-type: 'hipblas' grpc-base-image: "ubuntu:22.04"
platforms: 'linux/amd64' tag-suffix: 'sycl-f16'
tag-latest: 'false' runs-on: 'ubuntu-latest'
tag-suffix: '-hipblas' makeflags: "--jobs=3 --output-sync=target"
base-image: "rocm/dev-ubuntu-24.04:6.4.4" - build-type: 'vulkan'
grpc-base-image: "ubuntu:24.04" platforms: 'linux/amd64'
runs-on: 'ubuntu-latest' tag-latest: 'false'
makeflags: "--jobs=3 --output-sync=target" tag-suffix: '-vulkan-core'
ubuntu-version: '2404' runs-on: 'ubuntu-latest'
- build-type: 'sycl' base-image: "ubuntu:22.04"
platforms: 'linux/amd64' makeflags: "--jobs=4 --output-sync=target"
tag-latest: 'false'
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"
grpc-base-image: "ubuntu:24.04"
tag-suffix: 'sycl'
runs-on: 'ubuntu-latest'
makeflags: "--jobs=3 --output-sync=target"
ubuntu-version: '2404'
- build-type: 'vulkan'
platforms: 'linux/amd64,linux/arm64'
tag-latest: 'false'
tag-suffix: '-vulkan-core'
runs-on: 'ubuntu-latest'
base-image: "ubuntu:24.04"
makeflags: "--jobs=4 --output-sync=target"
ubuntu-version: '2404'
- build-type: 'cublas'
cuda-major-version: "13"
cuda-minor-version: "0"
platforms: 'linux/arm64'
tag-latest: 'false'
tag-suffix: '-nvidia-l4t-arm64-cuda-13'
base-image: "ubuntu:24.04"
runs-on: 'ubuntu-24.04-arm'
makeflags: "--jobs=4 --output-sync=target"
skip-drivers: 'false'
ubuntu-version: '2404'

View File

@@ -1,187 +1,163 @@
--- ---
name: 'build container images' name: 'build container images'
on: on:
push: push:
branches: branches:
- master - master
tags: tags:
- '*' - '*'
concurrency: concurrency:
group: ci-${{ github.head_ref || github.ref }}-${{ github.repository }} group: ci-${{ github.head_ref || github.ref }}-${{ github.repository }}
cancel-in-progress: true cancel-in-progress: true
jobs: jobs:
hipblas-jobs: hipblas-jobs:
uses: ./.github/workflows/image_build.yml uses: ./.github/workflows/image_build.yml
with: with:
tag-latest: ${{ matrix.tag-latest }} tag-latest: ${{ matrix.tag-latest }}
tag-suffix: ${{ matrix.tag-suffix }} tag-suffix: ${{ matrix.tag-suffix }}
build-type: ${{ matrix.build-type }} build-type: ${{ matrix.build-type }}
cuda-major-version: ${{ matrix.cuda-major-version }} cuda-major-version: ${{ matrix.cuda-major-version }}
cuda-minor-version: ${{ matrix.cuda-minor-version }} cuda-minor-version: ${{ matrix.cuda-minor-version }}
platforms: ${{ matrix.platforms }} platforms: ${{ matrix.platforms }}
runs-on: ${{ matrix.runs-on }} runs-on: ${{ matrix.runs-on }}
base-image: ${{ matrix.base-image }} base-image: ${{ matrix.base-image }}
grpc-base-image: ${{ matrix.grpc-base-image }} grpc-base-image: ${{ matrix.grpc-base-image }}
aio: ${{ matrix.aio }} aio: ${{ matrix.aio }}
makeflags: ${{ matrix.makeflags }} makeflags: ${{ matrix.makeflags }}
ubuntu-version: ${{ matrix.ubuntu-version }} secrets:
ubuntu-codename: ${{ matrix.ubuntu-codename }} dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
secrets: dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }} quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }} quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }} strategy:
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }} matrix:
strategy: include:
matrix: - build-type: 'hipblas'
include: platforms: 'linux/amd64'
- build-type: 'hipblas' tag-latest: 'auto'
platforms: 'linux/amd64' tag-suffix: '-gpu-hipblas'
tag-latest: 'auto' base-image: "rocm/dev-ubuntu-22.04:6.1"
tag-suffix: '-gpu-hipblas' grpc-base-image: "ubuntu:22.04"
base-image: "rocm/dev-ubuntu-24.04:6.4.4" runs-on: 'ubuntu-latest'
grpc-base-image: "ubuntu:24.04" makeflags: "--jobs=3 --output-sync=target"
runs-on: 'ubuntu-latest' aio: "-aio-gpu-hipblas"
makeflags: "--jobs=3 --output-sync=target"
aio: "-aio-gpu-hipblas" core-image-build:
ubuntu-version: '2404' uses: ./.github/workflows/image_build.yml
ubuntu-codename: 'noble' with:
tag-latest: ${{ matrix.tag-latest }}
core-image-build: tag-suffix: ${{ matrix.tag-suffix }}
uses: ./.github/workflows/image_build.yml build-type: ${{ matrix.build-type }}
with: cuda-major-version: ${{ matrix.cuda-major-version }}
tag-latest: ${{ matrix.tag-latest }} cuda-minor-version: ${{ matrix.cuda-minor-version }}
tag-suffix: ${{ matrix.tag-suffix }} platforms: ${{ matrix.platforms }}
build-type: ${{ matrix.build-type }} runs-on: ${{ matrix.runs-on }}
cuda-major-version: ${{ matrix.cuda-major-version }} aio: ${{ matrix.aio }}
cuda-minor-version: ${{ matrix.cuda-minor-version }} base-image: ${{ matrix.base-image }}
platforms: ${{ matrix.platforms }} grpc-base-image: ${{ matrix.grpc-base-image }}
runs-on: ${{ matrix.runs-on }} makeflags: ${{ matrix.makeflags }}
aio: ${{ matrix.aio }} skip-drivers: ${{ matrix.skip-drivers }}
base-image: ${{ matrix.base-image }} secrets:
grpc-base-image: ${{ matrix.grpc-base-image }} dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
makeflags: ${{ matrix.makeflags }} dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
skip-drivers: ${{ matrix.skip-drivers }} quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
ubuntu-version: ${{ matrix.ubuntu-version }} quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
ubuntu-codename: ${{ matrix.ubuntu-codename }} strategy:
secrets: #max-parallel: ${{ github.event_name != 'pull_request' && 2 || 4 }}
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }} matrix:
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }} include:
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }} - build-type: ''
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }} platforms: 'linux/amd64,linux/arm64'
strategy: tag-latest: 'auto'
#max-parallel: ${{ github.event_name != 'pull_request' && 2 || 4 }} tag-suffix: ''
matrix: base-image: "ubuntu:22.04"
include: runs-on: 'ubuntu-latest'
- build-type: '' aio: "-aio-cpu"
platforms: 'linux/amd64,linux/arm64' makeflags: "--jobs=4 --output-sync=target"
tag-latest: 'auto' skip-drivers: 'false'
tag-suffix: '' - build-type: 'cublas'
base-image: "ubuntu:24.04" cuda-major-version: "11"
runs-on: 'ubuntu-latest' cuda-minor-version: "7"
aio: "-aio-cpu" platforms: 'linux/amd64'
makeflags: "--jobs=4 --output-sync=target" tag-latest: 'auto'
skip-drivers: 'false' tag-suffix: '-gpu-nvidia-cuda11'
ubuntu-version: '2404' runs-on: 'ubuntu-latest'
ubuntu-codename: 'noble' base-image: "ubuntu:22.04"
- build-type: 'cublas' makeflags: "--jobs=4 --output-sync=target"
cuda-major-version: "12" skip-drivers: 'false'
cuda-minor-version: "9" aio: "-aio-gpu-nvidia-cuda-11"
platforms: 'linux/amd64' - build-type: 'cublas'
tag-latest: 'auto' cuda-major-version: "12"
tag-suffix: '-gpu-nvidia-cuda-12' cuda-minor-version: "0"
runs-on: 'ubuntu-latest' platforms: 'linux/amd64'
base-image: "ubuntu:24.04" tag-latest: 'auto'
skip-drivers: 'false' tag-suffix: '-gpu-nvidia-cuda12'
makeflags: "--jobs=4 --output-sync=target" runs-on: 'ubuntu-latest'
aio: "-aio-gpu-nvidia-cuda-12" base-image: "ubuntu:22.04"
ubuntu-version: '2404' skip-drivers: 'false'
ubuntu-codename: 'noble' makeflags: "--jobs=4 --output-sync=target"
- build-type: 'cublas' aio: "-aio-gpu-nvidia-cuda-12"
cuda-major-version: "13" - build-type: 'vulkan'
cuda-minor-version: "0" platforms: 'linux/amd64'
platforms: 'linux/amd64' tag-latest: 'auto'
tag-latest: 'auto' tag-suffix: '-vulkan'
tag-suffix: '-gpu-nvidia-cuda-13' runs-on: 'ubuntu-latest'
runs-on: 'ubuntu-latest' base-image: "ubuntu:22.04"
base-image: "ubuntu:22.04" skip-drivers: 'false'
skip-drivers: 'false' makeflags: "--jobs=4 --output-sync=target"
makeflags: "--jobs=4 --output-sync=target" aio: "-aio-gpu-vulkan"
aio: "-aio-gpu-nvidia-cuda-13" - build-type: 'sycl_f16'
ubuntu-version: '2404' platforms: 'linux/amd64'
ubuntu-codename: 'noble' tag-latest: 'auto'
- build-type: 'vulkan' base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
platforms: 'linux/amd64,linux/arm64' grpc-base-image: "ubuntu:22.04"
tag-latest: 'auto' tag-suffix: '-gpu-intel-f16'
tag-suffix: '-gpu-vulkan' runs-on: 'ubuntu-latest'
runs-on: 'ubuntu-latest' makeflags: "--jobs=3 --output-sync=target"
base-image: "ubuntu:24.04" aio: "-aio-gpu-intel-f16"
skip-drivers: 'false' - build-type: 'sycl_f32'
makeflags: "--jobs=4 --output-sync=target" platforms: 'linux/amd64'
aio: "-aio-gpu-vulkan" tag-latest: 'auto'
ubuntu-version: '2404' base-image: "quay.io/go-skynet/intel-oneapi-base:latest"
ubuntu-codename: 'noble' grpc-base-image: "ubuntu:22.04"
- build-type: 'intel' tag-suffix: '-gpu-intel-f32'
platforms: 'linux/amd64' runs-on: 'ubuntu-latest'
tag-latest: 'auto' makeflags: "--jobs=3 --output-sync=target"
base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04" aio: "-aio-gpu-intel-f32"
grpc-base-image: "ubuntu:24.04"
tag-suffix: '-gpu-intel' gh-runner:
runs-on: 'ubuntu-latest' uses: ./.github/workflows/image_build.yml
makeflags: "--jobs=3 --output-sync=target" with:
aio: "-aio-gpu-intel" tag-latest: ${{ matrix.tag-latest }}
ubuntu-version: '2404' tag-suffix: ${{ matrix.tag-suffix }}
ubuntu-codename: 'noble' build-type: ${{ matrix.build-type }}
cuda-major-version: ${{ matrix.cuda-major-version }}
gh-runner: cuda-minor-version: ${{ matrix.cuda-minor-version }}
uses: ./.github/workflows/image_build.yml platforms: ${{ matrix.platforms }}
with: runs-on: ${{ matrix.runs-on }}
tag-latest: ${{ matrix.tag-latest }} aio: ${{ matrix.aio }}
tag-suffix: ${{ matrix.tag-suffix }} base-image: ${{ matrix.base-image }}
build-type: ${{ matrix.build-type }} grpc-base-image: ${{ matrix.grpc-base-image }}
cuda-major-version: ${{ matrix.cuda-major-version }} makeflags: ${{ matrix.makeflags }}
cuda-minor-version: ${{ matrix.cuda-minor-version }} skip-drivers: ${{ matrix.skip-drivers }}
platforms: ${{ matrix.platforms }} secrets:
runs-on: ${{ matrix.runs-on }} dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }}
aio: ${{ matrix.aio }} dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }}
base-image: ${{ matrix.base-image }} quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }}
grpc-base-image: ${{ matrix.grpc-base-image }} quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }}
makeflags: ${{ matrix.makeflags }} strategy:
skip-drivers: ${{ matrix.skip-drivers }} matrix:
ubuntu-version: ${{ matrix.ubuntu-version }} include:
ubuntu-codename: ${{ matrix.ubuntu-codename }} - build-type: 'cublas'
secrets: cuda-major-version: "12"
dockerUsername: ${{ secrets.DOCKERHUB_USERNAME }} cuda-minor-version: "0"
dockerPassword: ${{ secrets.DOCKERHUB_PASSWORD }} platforms: 'linux/arm64'
quayUsername: ${{ secrets.LOCALAI_REGISTRY_USERNAME }} tag-latest: 'auto'
quayPassword: ${{ secrets.LOCALAI_REGISTRY_PASSWORD }} tag-suffix: '-nvidia-l4t-arm64'
strategy: base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
matrix: runs-on: 'ubuntu-24.04-arm'
include: makeflags: "--jobs=4 --output-sync=target"
- build-type: 'cublas' skip-drivers: 'true'
cuda-major-version: "12"
cuda-minor-version: "0"
platforms: 'linux/arm64'
tag-latest: 'auto'
tag-suffix: '-nvidia-l4t-arm64'
base-image: "nvcr.io/nvidia/l4t-jetpack:r36.4.0"
runs-on: 'ubuntu-24.04-arm'
makeflags: "--jobs=4 --output-sync=target"
skip-drivers: 'true'
ubuntu-version: "2204"
ubuntu-codename: 'jammy'
- build-type: 'cublas'
cuda-major-version: "13"
cuda-minor-version: "0"
platforms: 'linux/arm64'
tag-latest: 'auto'
tag-suffix: '-nvidia-l4t-arm64-cuda-13'
base-image: "ubuntu:24.04"
runs-on: 'ubuntu-24.04-arm'
makeflags: "--jobs=4 --output-sync=target"
skip-drivers: 'false'
ubuntu-version: '2404'
ubuntu-codename: 'noble'

View File

@@ -23,7 +23,7 @@ on:
type: string type: string
cuda-minor-version: cuda-minor-version:
description: 'CUDA minor version' description: 'CUDA minor version'
default: "9" default: "4"
type: string type: string
platforms: platforms:
description: 'Platforms' description: 'Platforms'
@@ -56,16 +56,6 @@ on:
required: false required: false
default: '' default: ''
type: string type: string
ubuntu-version:
description: 'Ubuntu version'
required: false
default: '2204'
type: string
ubuntu-codename:
description: 'Ubuntu codename'
required: false
default: 'noble'
type: string
secrets: secrets:
dockerUsername: dockerUsername:
required: true required: true
@@ -104,7 +94,7 @@ jobs:
&& sudo apt-get update \ && sudo apt-get update \
&& sudo apt-get install -y git && sudo apt-get install -y git
- name: Checkout - name: Checkout
uses: actions/checkout@v6 uses: actions/checkout@v4
- name: Release space from worker - name: Release space from worker
if: inputs.runs-on == 'ubuntu-latest' if: inputs.runs-on == 'ubuntu-latest'
@@ -248,8 +238,6 @@ jobs:
GRPC_VERSION=v1.65.0 GRPC_VERSION=v1.65.0
MAKEFLAGS=${{ inputs.makeflags }} MAKEFLAGS=${{ inputs.makeflags }}
SKIP_DRIVERS=${{ inputs.skip-drivers }} SKIP_DRIVERS=${{ inputs.skip-drivers }}
UBUNTU_VERSION=${{ inputs.ubuntu-version }}
UBUNTU_CODENAME=${{ inputs.ubuntu-codename }}
context: . context: .
file: ./Dockerfile file: ./Dockerfile
cache-from: type=gha cache-from: type=gha
@@ -277,8 +265,6 @@ jobs:
GRPC_VERSION=v1.65.0 GRPC_VERSION=v1.65.0
MAKEFLAGS=${{ inputs.makeflags }} MAKEFLAGS=${{ inputs.makeflags }}
SKIP_DRIVERS=${{ inputs.skip-drivers }} SKIP_DRIVERS=${{ inputs.skip-drivers }}
UBUNTU_VERSION=${{ inputs.ubuntu-version }}
UBUNTU_CODENAME=${{ inputs.ubuntu-codename }}
context: . context: .
file: ./Dockerfile file: ./Dockerfile
cache-from: type=gha cache-from: type=gha

View File

@@ -9,4 +9,4 @@ jobs:
pull-requests: write pull-requests: write
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/labeler@v6 - uses: actions/labeler@v5

View File

@@ -6,15 +6,14 @@ permissions:
contents: write contents: write
pull-requests: write pull-requests: write
packages: read packages: read
issues: write # for Homebrew/actions/post-comment
actions: write # to dispatch publish workflow
jobs: jobs:
dependabot: dependabot:
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{ github.actor == 'localai-bot' && !contains(github.event.pull_request.title, 'chore(model gallery):') }} if: ${{ github.actor == 'localai-bot' }}
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v6 uses: actions/checkout@v4
- name: Approve a PR if not already approved - name: Approve a PR if not already approved
run: | run: |

View File

@@ -1,27 +1,22 @@
name: Notifications for new models name: Notifications for new models
on: on:
pull_request_target: pull_request:
types: types:
- closed - closed
permissions:
contents: read
pull-requests: read
jobs: jobs:
notify-discord: notify-discord:
if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'area/ai-model')) }} if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'area/ai-model')) }}
env: env:
MODEL_NAME: gemma-3-12b-it-qat MODEL_NAME: gemma-3-12b-it
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v6 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 # needed to checkout all branches for this Action to work fetch-depth: 0 # needed to checkout all branches for this Action to work
ref: ${{ github.event.pull_request.head.sha }} # Checkout the PR head to get the actual changes
- uses: mudler/localai-github-action@v1 - uses: mudler/localai-github-action@v1
with: with:
model: 'gemma-3-12b-it-qat' # Any from models.localai.io, or from huggingface.com with: "huggingface://<repository>/file" model: 'gemma-3-12b-it' # Any from models.localai.io, or from huggingface.com with: "huggingface://<repository>/file"
# Check the PR diff using the current branch and the base branch of the PR # Check the PR diff using the current branch and the base branch of the PR
- uses: GrantBirki/git-diff-action@v2.8.1 - uses: GrantBirki/git-diff-action@v2.8.1
id: git-diff-action id: git-diff-action
@@ -84,7 +79,7 @@ jobs:
args: ${{ steps.summarize.outputs.message }} args: ${{ steps.summarize.outputs.message }}
- name: Setup tmate session if fails - name: Setup tmate session if fails
if: ${{ failure() }} if: ${{ failure() }}
uses: mxschmitt/action-tmate@v3.23 uses: mxschmitt/action-tmate@v3.22
with: with:
detached: true detached: true
connect-timeout-seconds: 180 connect-timeout-seconds: 180
@@ -92,13 +87,12 @@ jobs:
notify-twitter: notify-twitter:
if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'area/ai-model')) }} if: ${{ (github.event.pull_request.merged == true) && (contains(github.event.pull_request.labels.*.name, 'area/ai-model')) }}
env: env:
MODEL_NAME: gemma-3-12b-it-qat MODEL_NAME: gemma-3-12b-it
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v6 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 # needed to checkout all branches for this Action to work fetch-depth: 0 # needed to checkout all branches for this Action to work
ref: ${{ github.event.pull_request.head.sha }} # Checkout the PR head to get the actual changes
- name: Start LocalAI - name: Start LocalAI
run: | run: |
echo "Starting LocalAI..." echo "Starting LocalAI..."
@@ -167,7 +161,7 @@ jobs:
TWITTER_ACCESS_TOKEN_SECRET: ${{ secrets.TWITTER_ACCESS_TOKEN_SECRET }} TWITTER_ACCESS_TOKEN_SECRET: ${{ secrets.TWITTER_ACCESS_TOKEN_SECRET }}
- name: Setup tmate session if fails - name: Setup tmate session if fails
if: ${{ failure() }} if: ${{ failure() }}
uses: mxschmitt/action-tmate@v3.23 uses: mxschmitt/action-tmate@v3.22
with: with:
detached: true detached: true
connect-timeout-seconds: 180 connect-timeout-seconds: 180

View File

@@ -11,11 +11,10 @@ jobs:
RELEASE_BODY: ${{ github.event.release.body }} RELEASE_BODY: ${{ github.event.release.body }}
RELEASE_TITLE: ${{ github.event.release.name }} RELEASE_TITLE: ${{ github.event.release.name }}
RELEASE_TAG_NAME: ${{ github.event.release.tag_name }} RELEASE_TAG_NAME: ${{ github.event.release.tag_name }}
MODEL_NAME: gemma-3-12b-it-qat
steps: steps:
- uses: mudler/localai-github-action@v1 - uses: mudler/localai-github-action@v1
with: with:
model: 'gemma-3-12b-it-qat' # Any from models.localai.io, or from huggingface.com with: "huggingface://<repository>/file" model: 'gemma-3-12b-it' # Any from models.localai.io, or from huggingface.com with: "huggingface://<repository>/file"
- name: Summarize - name: Summarize
id: summarize id: summarize
run: | run: |

View File

@@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Set up Go - name: Set up Go
@@ -23,42 +23,4 @@ jobs:
version: v2.11.0 version: v2.11.0
args: release --clean args: release --clean
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
launcher-build-darwin:
runs-on: macos-latest
steps:
- name: Checkout
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.23
- name: Build launcher for macOS ARM64
run: |
make build-launcher-darwin
- name: Upload DMG to Release
uses: softprops/action-gh-release@v2
with:
files: ./dist/LocalAI.dmg
launcher-build-linux:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.23
- name: Build launcher for Linux
run: |
sudo apt-get update
sudo apt-get install golang gcc libgl1-mesa-dev xorg-dev libxkbcommon-dev
make build-launcher-linux
- name: Upload Linux launcher artifacts
uses: softprops/action-gh-release@v2
with:
files: ./local-ai-launcher-linux.tar.xz

View File

@@ -14,17 +14,17 @@ jobs:
GO111MODULE: on GO111MODULE: on
steps: steps:
- name: Checkout Source - name: Checkout Source
uses: actions/checkout@v6 uses: actions/checkout@v4
if: ${{ github.actor != 'dependabot[bot]' }} if: ${{ github.actor != 'dependabot[bot]' }}
- name: Run Gosec Security Scanner - name: Run Gosec Security Scanner
if: ${{ github.actor != 'dependabot[bot]' }} if: ${{ github.actor != 'dependabot[bot]' }}
uses: securego/gosec@v2.22.9 uses: securego/gosec@v2.22.7
with: with:
# we let the report trigger content trigger a failure using the GitHub Security features. # we let the report trigger content trigger a failure using the GitHub Security features.
args: '-no-fail -fmt sarif -out results.sarif ./...' args: '-no-fail -fmt sarif -out results.sarif ./...'
- name: Upload SARIF file - name: Upload SARIF file
if: ${{ github.actor != 'dependabot[bot]' }} if: ${{ github.actor != 'dependabot[bot]' }}
uses: github/codeql-action/upload-sarif@v4 uses: github/codeql-action/upload-sarif@v3
with: with:
# Path to SARIF file relative to the root of the repository # Path to SARIF file relative to the root of the repository
sarif_file: results.sarif sarif_file: results.sarif

View File

@@ -10,7 +10,7 @@ jobs:
stale: stale:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v9 - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9
with: with:
stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 5 days.' stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
stale-pr-message: 'This PR is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 10 days.' stale-pr-message: 'This PR is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 10 days.'

View File

@@ -19,7 +19,7 @@ jobs:
# runs-on: ubuntu-latest # runs-on: ubuntu-latest
# steps: # steps:
# - name: Clone # - name: Clone
# uses: actions/checkout@v6 # uses: actions/checkout@v4
# with: # with:
# submodules: true # submodules: true
# - name: Dependencies # - name: Dependencies
@@ -40,7 +40,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Clone - name: Clone
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Dependencies - name: Dependencies
@@ -61,7 +61,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Clone - name: Clone
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Dependencies - name: Dependencies
@@ -83,7 +83,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Clone - name: Clone
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Dependencies - name: Dependencies
@@ -104,7 +104,7 @@ jobs:
# runs-on: ubuntu-latest # runs-on: ubuntu-latest
# steps: # steps:
# - name: Clone # - name: Clone
# uses: actions/checkout@v6 # uses: actions/checkout@v4
# with: # with:
# submodules: true # submodules: true
# - name: Dependencies # - name: Dependencies
@@ -124,7 +124,7 @@ jobs:
# runs-on: ubuntu-latest # runs-on: ubuntu-latest
# steps: # steps:
# - name: Clone # - name: Clone
# uses: actions/checkout@v6 # uses: actions/checkout@v4
# with: # with:
# submodules: true # submodules: true
# - name: Dependencies # - name: Dependencies
@@ -186,7 +186,7 @@ jobs:
# sudo rm -rf "$AGENT_TOOLSDIRECTORY" || true # sudo rm -rf "$AGENT_TOOLSDIRECTORY" || true
# df -h # df -h
# - name: Clone # - name: Clone
# uses: actions/checkout@v6 # uses: actions/checkout@v4
# with: # with:
# submodules: true # submodules: true
# - name: Dependencies # - name: Dependencies
@@ -211,7 +211,7 @@ jobs:
# runs-on: ubuntu-latest # runs-on: ubuntu-latest
# steps: # steps:
# - name: Clone # - name: Clone
# uses: actions/checkout@v6 # uses: actions/checkout@v4
# with: # with:
# submodules: true # submodules: true
# - name: Dependencies # - name: Dependencies
@@ -232,7 +232,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Clone - name: Clone
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Dependencies - name: Dependencies
@@ -247,22 +247,3 @@ jobs:
run: | run: |
make --jobs=5 --output-sync=target -C backend/python/coqui make --jobs=5 --output-sync=target -C backend/python/coqui
make --jobs=5 --output-sync=target -C backend/python/coqui test make --jobs=5 --output-sync=target -C backend/python/coqui test
tests-moonshine:
runs-on: ubuntu-latest
steps:
- name: Clone
uses: actions/checkout@v6
with:
submodules: true
- name: Dependencies
run: |
sudo apt-get update
sudo apt-get install build-essential ffmpeg
sudo apt-get install -y ca-certificates cmake curl patch python3-pip
# Install UV
curl -LsSf https://astral.sh/uv/install.sh | sh
pip install --user --no-cache-dir grpcio-tools==1.64.1
- name: Test moonshine
run: |
make --jobs=5 --output-sync=target -C backend/python/moonshine
make --jobs=5 --output-sync=target -C backend/python/moonshine test

View File

@@ -21,22 +21,8 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
matrix: matrix:
go-version: ['1.25.x'] go-version: ['1.21.x']
steps: steps:
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
# this might remove tools that are actually needed,
# if set to "true" but frees about 6 GB
tool-cache: true
# all of these default to true, but feel free to set to
# "false" if necessary for your workflow
android: true
dotnet: true
haskell: true
large-packages: true
docker-images: true
swap-storage: true
- name: Release space from worker - name: Release space from worker
run: | run: |
echo "Listing top largest packages" echo "Listing top largest packages"
@@ -70,7 +56,7 @@ jobs:
sudo rm -rfv build || true sudo rm -rfv build || true
df -h df -h
- name: Clone - name: Clone
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Setup Go ${{ matrix.go-version }} - name: Setup Go ${{ matrix.go-version }}
@@ -109,6 +95,11 @@ jobs:
sudo apt-get update sudo apt-get update
sudo apt-get install -y cuda-nvcc-${CUDA_VERSION} libcublas-dev-${CUDA_VERSION} sudo apt-get install -y cuda-nvcc-${CUDA_VERSION} libcublas-dev-${CUDA_VERSION}
export CUDACXX=/usr/local/cuda/bin/nvcc export CUDACXX=/usr/local/cuda/bin/nvcc
# The python3-grpc-tools package in 22.04 is too old
pip install --user grpcio-tools==1.71.0 grpcio==1.71.0
make -C backend/python/transformers make -C backend/python/transformers
make backends/huggingface backends/llama-cpp backends/local-store backends/silero-vad backends/piper backends/whisper backends/stablediffusion-ggml make backends/huggingface backends/llama-cpp backends/local-store backends/silero-vad backends/piper backends/whisper backends/stablediffusion-ggml
@@ -119,7 +110,7 @@ jobs:
PATH="$PATH:/root/go/bin" GO_TAGS="tts" make --jobs 5 --output-sync=target test PATH="$PATH:/root/go/bin" GO_TAGS="tts" make --jobs 5 --output-sync=target test
- name: Setup tmate session if tests fail - name: Setup tmate session if tests fail
if: ${{ failure() }} if: ${{ failure() }}
uses: mxschmitt/action-tmate@v3.23 uses: mxschmitt/action-tmate@v3.22
with: with:
detached: true detached: true
connect-timeout-seconds: 180 connect-timeout-seconds: 180
@@ -161,7 +152,7 @@ jobs:
sudo rm -rfv build || true sudo rm -rfv build || true
df -h df -h
- name: Clone - name: Clone
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Dependencies - name: Dependencies
@@ -178,20 +169,20 @@ jobs:
PATH="$PATH:$HOME/go/bin" make backends/local-store backends/silero-vad backends/llama-cpp backends/whisper backends/piper backends/stablediffusion-ggml docker-build-aio e2e-aio PATH="$PATH:$HOME/go/bin" make backends/local-store backends/silero-vad backends/llama-cpp backends/whisper backends/piper backends/stablediffusion-ggml docker-build-aio e2e-aio
- name: Setup tmate session if tests fail - name: Setup tmate session if tests fail
if: ${{ failure() }} if: ${{ failure() }}
uses: mxschmitt/action-tmate@v3.23 uses: mxschmitt/action-tmate@v3.22
with: with:
detached: true detached: true
connect-timeout-seconds: 180 connect-timeout-seconds: 180
limit-access-to-actor: true limit-access-to-actor: true
tests-apple: tests-apple:
runs-on: macos-latest runs-on: macOS-14
strategy: strategy:
matrix: matrix:
go-version: ['1.25.x'] go-version: ['1.21.x']
steps: steps:
- name: Clone - name: Clone
uses: actions/checkout@v6 uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Setup Go ${{ matrix.go-version }} - name: Setup Go ${{ matrix.go-version }}
@@ -205,11 +196,15 @@ jobs:
- name: Dependencies - name: Dependencies
run: | run: |
brew install protobuf grpc make protoc-gen-go protoc-gen-go-grpc libomp llvm brew install protobuf grpc make protoc-gen-go protoc-gen-go-grpc libomp llvm
pip install --user --no-cache-dir grpcio-tools grpcio pip install --user --no-cache-dir grpcio-tools==1.71.0 grpcio==1.71.0
- name: Build llama-cpp-darwin - name: Build llama-cpp-darwin
run: | run: |
make protogen-go make protogen-go
make backends/llama-cpp-darwin make build
bash scripts/build-llama-cpp-darwin.sh
ls -la build/darwin.tar
mv build/darwin.tar build/llama-cpp.tar
./local-ai backends install "ocifile://$PWD/build/llama-cpp.tar"
- name: Test - name: Test
run: | run: |
export C_INCLUDE_PATH=/usr/local/include export C_INCLUDE_PATH=/usr/local/include
@@ -221,7 +216,7 @@ jobs:
PATH="$PATH:$HOME/go/bin" BUILD_TYPE="GITHUB_CI_HAS_BROKEN_METAL" CMAKE_ARGS="-DGGML_F16C=OFF -DGGML_AVX512=OFF -DGGML_AVX2=OFF -DGGML_FMA=OFF" make --jobs 4 --output-sync=target test PATH="$PATH:$HOME/go/bin" BUILD_TYPE="GITHUB_CI_HAS_BROKEN_METAL" CMAKE_ARGS="-DGGML_F16C=OFF -DGGML_AVX512=OFF -DGGML_AVX2=OFF -DGGML_FMA=OFF" make --jobs 4 --output-sync=target test
- name: Setup tmate session if tests fail - name: Setup tmate session if tests fail
if: ${{ failure() }} if: ${{ failure() }}
uses: mxschmitt/action-tmate@v3.23 uses: mxschmitt/action-tmate@v3.22
with: with:
detached: true detached: true
connect-timeout-seconds: 180 connect-timeout-seconds: 180

View File

@@ -9,7 +9,7 @@ jobs:
fail-fast: false fail-fast: false
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v6 - uses: actions/checkout@v4
- uses: actions/setup-go@v5 - uses: actions/setup-go@v5
with: with:
go-version: 'stable' go-version: 'stable'
@@ -25,7 +25,7 @@ jobs:
run: | run: |
make protogen-go swagger make protogen-go swagger
- name: Create Pull Request - name: Create Pull Request
uses: peter-evans/create-pull-request@v8 uses: peter-evans/create-pull-request@v7
with: with:
token: ${{ secrets.UPDATE_BOT_TOKEN }} token: ${{ secrets.UPDATE_BOT_TOKEN }}
push-to-fork: ci-forks/LocalAI push-to-fork: ci-forks/LocalAI

4
.gitignore vendored
View File

@@ -12,7 +12,6 @@ prepare-sources
/backends /backends
/backend-images /backend-images
/result.yaml /result.yaml
protoc
*.log *.log
@@ -24,8 +23,7 @@ go-bert
# LocalAI build binary # LocalAI build binary
LocalAI LocalAI
/local-ai local-ai
/local-ai-launcher
# prevent above rules from omitting the helm chart # prevent above rules from omitting the helm chart
!charts/* !charts/*
# prevent above rules from omitting the api/localai folder # prevent above rules from omitting the api/localai folder

3
.gitmodules vendored
View File

@@ -1,3 +1,6 @@
[submodule "docs/themes/hugo-theme-relearn"] [submodule "docs/themes/hugo-theme-relearn"]
path = docs/themes/hugo-theme-relearn path = docs/themes/hugo-theme-relearn
url = https://github.com/McShelby/hugo-theme-relearn.git url = https://github.com/McShelby/hugo-theme-relearn.git
[submodule "docs/themes/lotusdocs"]
path = docs/themes/lotusdocs
url = https://github.com/colinwilson/lotusdocs

View File

@@ -8,7 +8,7 @@ source:
enabled: true enabled: true
name_template: '{{ .ProjectName }}-{{ .Tag }}-source' name_template: '{{ .ProjectName }}-{{ .Tag }}-source'
builds: builds:
- main: ./cmd/local-ai -
env: env:
- CGO_ENABLED=0 - CGO_ENABLED=0
ldflags: ldflags:
@@ -22,9 +22,6 @@ builds:
goarch: goarch:
- amd64 - amd64
- arm64 - arm64
ignore:
- goos: darwin
goarch: amd64
archives: archives:
- formats: [ 'binary' ] # this removes the tar of the archives, leaving the binaries alone - formats: [ 'binary' ] # this removes the tar of the archives, leaving the binaries alone
name_template: local-ai-{{ .Tag }}-{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }} name_template: local-ai-{{ .Tag }}-{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}

282
AGENTS.md
View File

@@ -1,282 +0,0 @@
# Build and testing
Building and testing the project depends on the components involved and the platform where development is taking place. Due to the amount of context required it's usually best not to try building or testing the project unless the user requests it. If you must build the project then inspect the Makefile in the project root and the Makefiles of any backends that are effected by changes you are making. In addition the workflows in .github/workflows can be used as a reference when it is unclear how to build or test a component. The primary Makefile contains targets for building inside or outside Docker, if the user has not previously specified a preference then ask which they would like to use.
## Building a specified backend
Let's say the user wants to build a particular backend for a given platform. For example let's say they want to build bark for ROCM/hipblas
- The Makefile has targets like `docker-build-bark` created with `generate-docker-build-target` at the time of writing. Recently added backends may require a new target.
- At a minimum we need to set the BUILD_TYPE, BASE_IMAGE build-args
- Use .github/workflows/backend.yml as a reference it lists the needed args in the `include` job strategy matrix
- l4t and cublas also requires the CUDA major and minor version
- You can pretty print a command like `DOCKER_MAKEFLAGS=-j$(nproc --ignore=1) BUILD_TYPE=hipblas BASE_IMAGE=rocm/dev-ubuntu-24.04:6.4.4 make docker-build-bark`
- Unless the user specifies that they want you to run the command, then just print it because not all agent frontends handle long running jobs well and the output may overflow your context
- The user may say they want to build AMD or ROCM instead of hipblas, or Intel instead of SYCL or NVIDIA insted of l4t or cublas. Ask for confirmation if there is ambiguity.
- Sometimes the user may need extra parameters to be added to `docker build` (e.g. `--platform` for cross-platform builds or `--progress` to view the full logs), in which case you can generate the `docker build` command directly.
## Adding a New Backend
When adding a new backend to LocalAI, you need to update several files to ensure the backend is properly built, tested, and registered. Here's a step-by-step guide based on the pattern used for adding backends like `moonshine`:
### 1. Create Backend Directory Structure
Create the backend directory under the appropriate location:
- **Python backends**: `backend/python/<backend-name>/`
- **Go backends**: `backend/go/<backend-name>/`
- **C++ backends**: `backend/cpp/<backend-name>/`
For Python backends, you'll typically need:
- `backend.py` - Main gRPC server implementation
- `Makefile` - Build configuration
- `install.sh` - Installation script for dependencies
- `protogen.sh` - Protocol buffer generation script
- `requirements.txt` - Python dependencies
- `run.sh` - Runtime script
- `test.py` / `test.sh` - Test files
### 2. Add Build Configurations to `.github/workflows/backend.yml`
Add build matrix entries for each platform/GPU type you want to support. Look at similar backends (e.g., `chatterbox`, `faster-whisper`) for reference.
**Placement in file:**
- CPU builds: Add after other CPU builds (e.g., after `cpu-chatterbox`)
- CUDA 12 builds: Add after other CUDA 12 builds (e.g., after `gpu-nvidia-cuda-12-chatterbox`)
- CUDA 13 builds: Add after other CUDA 13 builds (e.g., after `gpu-nvidia-cuda-13-chatterbox`)
**Additional build types you may need:**
- ROCm/HIP: Use `build-type: 'hipblas'` with `base-image: "rocm/dev-ubuntu-24.04:6.4.4"`
- Intel/SYCL: Use `build-type: 'intel'` or `build-type: 'sycl_f16'`/`sycl_f32` with `base-image: "intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04"`
- L4T (ARM): Use `build-type: 'l4t'` with `platforms: 'linux/arm64'` and `runs-on: 'ubuntu-24.04-arm'`
### 3. Add Backend Metadata to `backend/index.yaml`
**Step 3a: Add Meta Definition**
Add a YAML anchor definition in the `## metas` section (around line 2-300). Look for similar backends to use as a template such as `diffusers` or `chatterbox`
**Step 3b: Add Image Entries**
Add image entries at the end of the file, following the pattern of similar backends such as `diffusers` or `chatterbox`. Include both `latest` (production) and `master` (development) tags.
### 4. Update the Makefile
The Makefile needs to be updated in several places to support building and testing the new backend:
**Step 4a: Add to `.NOTPARALLEL`**
Add `backends/<backend-name>` to the `.NOTPARALLEL` line (around line 2) to prevent parallel execution conflicts:
```makefile
.NOTPARALLEL: ... backends/<backend-name>
```
**Step 4b: Add to `prepare-test-extra`**
Add the backend to the `prepare-test-extra` target (around line 312) to prepare it for testing:
```makefile
prepare-test-extra: protogen-python
...
$(MAKE) -C backend/python/<backend-name>
```
**Step 4c: Add to `test-extra`**
Add the backend to the `test-extra` target (around line 319) to run its tests:
```makefile
test-extra: prepare-test-extra
...
$(MAKE) -C backend/python/<backend-name> test
```
**Step 4d: Add Backend Definition**
Add a backend definition variable in the backend definitions section (around line 428-457). The format depends on the backend type:
**For Python backends with root context** (like `faster-whisper`, `bark`):
```makefile
BACKEND_<BACKEND_NAME> = <backend-name>|python|.|false|true
```
**For Python backends with `./backend` context** (like `chatterbox`, `moonshine`):
```makefile
BACKEND_<BACKEND_NAME> = <backend-name>|python|./backend|false|true
```
**For Go backends**:
```makefile
BACKEND_<BACKEND_NAME> = <backend-name>|golang|.|false|true
```
**Step 4e: Generate Docker Build Target**
Add an eval call to generate the docker-build target (around line 480-501):
```makefile
$(eval $(call generate-docker-build-target,$(BACKEND_<BACKEND_NAME>)))
```
**Step 4f: Add to `docker-build-backends`**
Add `docker-build-<backend-name>` to the `docker-build-backends` target (around line 507):
```makefile
docker-build-backends: ... docker-build-<backend-name>
```
**Determining the Context:**
- If the backend is in `backend/python/<backend-name>/` and uses `./backend` as context in the workflow file, use `./backend` context
- If the backend is in `backend/python/<backend-name>/` but uses `.` as context in the workflow file, use `.` context
- Check similar backends to determine the correct context
### 5. Verification Checklist
After adding a new backend, verify:
- [ ] Backend directory structure is complete with all necessary files
- [ ] Build configurations added to `.github/workflows/backend.yml` for all desired platforms
- [ ] Meta definition added to `backend/index.yaml` in the `## metas` section
- [ ] Image entries added to `backend/index.yaml` for all build variants (latest + development)
- [ ] Tag suffixes match between workflow file and index.yaml
- [ ] Makefile updated with all 6 required changes (`.NOTPARALLEL`, `prepare-test-extra`, `test-extra`, backend definition, docker-build target eval, `docker-build-backends`)
- [ ] No YAML syntax errors (check with linter)
- [ ] No Makefile syntax errors (check with linter)
- [ ] Follows the same pattern as similar backends (e.g., if it's a transcription backend, follow `faster-whisper` pattern)
### 6. Example: Adding a Python Backend
For reference, when `moonshine` was added:
- **Files created**: `backend/python/moonshine/{backend.py, Makefile, install.sh, protogen.sh, requirements.txt, run.sh, test.py, test.sh}`
- **Workflow entries**: 3 build configurations (CPU, CUDA 12, CUDA 13)
- **Index entries**: 1 meta definition + 6 image entries (cpu, cuda12, cuda13 × latest/development)
- **Makefile updates**:
- Added to `.NOTPARALLEL` line
- Added to `prepare-test-extra` and `test-extra` targets
- Added `BACKEND_MOONSHINE = moonshine|python|./backend|false|true`
- Added eval for docker-build target generation
- Added `docker-build-moonshine` to `docker-build-backends`
# Coding style
- The project has the following .editorconfig
```
root = true
[*]
indent_style = space
indent_size = 2
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true
[*.go]
indent_style = tab
[Makefile]
indent_style = tab
[*.proto]
indent_size = 2
[*.py]
indent_size = 4
[*.js]
indent_size = 2
[*.yaml]
indent_size = 2
[*.md]
trim_trailing_whitespace = false
```
- Use comments sparingly to explain why code does something, not what it does. Comments are there to add context that would be difficult to deduce from reading the code.
- Prefer modern Go e.g. use `any` not `interface{}`
# Logging
Use `github.com/mudler/xlog` for logging which has the same API as slog.
# llama.cpp Backend
The llama.cpp backend (`backend/cpp/llama-cpp/grpc-server.cpp`) is a gRPC adaptation of the upstream HTTP server (`llama.cpp/tools/server/server.cpp`). It uses the same underlying server infrastructure from `llama.cpp/tools/server/server-context.cpp`.
## Building and Testing
- Test llama.cpp backend compilation: `make backends/llama-cpp`
- The backend is built as part of the main build process
- Check `backend/cpp/llama-cpp/Makefile` for build configuration
## Architecture
- **grpc-server.cpp**: gRPC server implementation, adapts HTTP server patterns to gRPC
- Uses shared server infrastructure: `server-context.cpp`, `server-task.cpp`, `server-queue.cpp`, `server-common.cpp`
- The gRPC server mirrors the HTTP server's functionality but uses gRPC instead of HTTP
## Common Issues When Updating llama.cpp
When fixing compilation errors after upstream changes:
1. Check how `server.cpp` (HTTP server) handles the same change
2. Look for new public APIs or getter methods
3. Store copies of needed data instead of accessing private members
4. Update function calls to match new signatures
5. Test with `make backends/llama-cpp`
## Key Differences from HTTP Server
- gRPC uses `BackendServiceImpl` class with gRPC service methods
- HTTP server uses `server_routes` with HTTP handlers
- Both use the same `server_context` and task queue infrastructure
- gRPC methods: `LoadModel`, `Predict`, `PredictStream`, `Embedding`, `Rerank`, `TokenizeString`, `GetMetrics`, `Health`
## Tool Call Parsing Maintenance
When working on JSON/XML tool call parsing functionality, always check llama.cpp for reference implementation and updates:
### Checking for XML Parsing Changes
1. **Review XML Format Definitions**: Check `llama.cpp/common/chat-parser-xml-toolcall.h` for `xml_tool_call_format` struct changes
2. **Review Parsing Logic**: Check `llama.cpp/common/chat-parser-xml-toolcall.cpp` for parsing algorithm updates
3. **Review Format Presets**: Check `llama.cpp/common/chat-parser.cpp` for new XML format presets (search for `xml_tool_call_format form`)
4. **Review Model Lists**: Check `llama.cpp/common/chat.h` for `COMMON_CHAT_FORMAT_*` enum values that use XML parsing:
- `COMMON_CHAT_FORMAT_GLM_4_5`
- `COMMON_CHAT_FORMAT_MINIMAX_M2`
- `COMMON_CHAT_FORMAT_KIMI_K2`
- `COMMON_CHAT_FORMAT_QWEN3_CODER_XML`
- `COMMON_CHAT_FORMAT_APRIEL_1_5`
- `COMMON_CHAT_FORMAT_XIAOMI_MIMO`
- Any new formats added
### Model Configuration Options
Always check `llama.cpp` for new model configuration options that should be supported in LocalAI:
1. **Check Server Context**: Review `llama.cpp/tools/server/server-context.cpp` for new parameters
2. **Check Chat Params**: Review `llama.cpp/common/chat.h` for `common_chat_params` struct changes
3. **Check Server Options**: Review `llama.cpp/tools/server/server.cpp` for command-line argument changes
4. **Examples of options to check**:
- `ctx_shift` - Context shifting support
- `parallel_tool_calls` - Parallel tool calling
- `reasoning_format` - Reasoning format options
- Any new flags or parameters
### Implementation Guidelines
1. **Feature Parity**: Always aim for feature parity with llama.cpp's implementation
2. **Test Coverage**: Add tests for new features matching llama.cpp's behavior
3. **Documentation**: Update relevant documentation when adding new formats or options
4. **Backward Compatibility**: Ensure changes don't break existing functionality
### Files to Monitor
- `llama.cpp/common/chat-parser-xml-toolcall.h` - Format definitions
- `llama.cpp/common/chat-parser-xml-toolcall.cpp` - Parsing logic
- `llama.cpp/common/chat-parser.cpp` - Format presets and model-specific handlers
- `llama.cpp/common/chat.h` - Format enums and parameter structures
- `llama.cpp/tools/server/server-context.cpp` - Server configuration options

View File

@@ -30,7 +30,6 @@ Thank you for your interest in contributing to LocalAI! We appreciate your time
3. Install the required dependencies ( see https://localai.io/basics/build/#build-localai-locally ) 3. Install the required dependencies ( see https://localai.io/basics/build/#build-localai-locally )
4. Build LocalAI: `make build` 4. Build LocalAI: `make build`
5. Run LocalAI: `./local-ai` 5. Run LocalAI: `./local-ai`
6. To Build and live reload: `make build-dev`
## Contributing ## Contributing
@@ -77,21 +76,7 @@ LOCALAI_IMAGE_TAG=test LOCALAI_IMAGE=local-ai-aio make run-e2e-aio
## Documentation ## Documentation
We are welcome the contribution of the documents, please open new PR or create a new issue. The documentation is available under `docs/` https://github.com/mudler/LocalAI/tree/master/docs We are welcome the contribution of the documents, please open new PR or create a new issue. The documentation is available under `docs/` https://github.com/mudler/LocalAI/tree/master/docs
### Gallery YAML Schema
LocalAI provides a JSON Schema for gallery model YAML files at:
`core/schema/gallery-model.schema.json`
This schema mirrors the internal gallery model configuration and can be used by editors (such as VS Code) to enable autocomplete, validation, and inline documentation when creating or modifying gallery files.
To use it with the YAML language server, add the following comment at the top of a gallery YAML file:
```yaml
# yaml-language-server: $schema=../core/schema/gallery-model.schema.json
```
## Community and Communication ## Community and Communication
- You can reach out via the Github issue tracker. - You can reach out via the Github issue tracker.

View File

@@ -1,7 +1,6 @@
ARG BASE_IMAGE=ubuntu:24.04 ARG BASE_IMAGE=ubuntu:22.04
ARG GRPC_BASE_IMAGE=${BASE_IMAGE} ARG GRPC_BASE_IMAGE=${BASE_IMAGE}
ARG INTEL_BASE_IMAGE=${BASE_IMAGE} ARG INTEL_BASE_IMAGE=${BASE_IMAGE}
ARG UBUNTU_CODENAME=noble
FROM ${BASE_IMAGE} AS requirements FROM ${BASE_IMAGE} AS requirements
@@ -10,13 +9,13 @@ ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y --no-install-recommends \ apt-get install -y --no-install-recommends \
ca-certificates curl wget espeak-ng libgomp1 \ ca-certificates curl wget espeak-ng libgomp1 \
ffmpeg libopenblas0 libopenblas-dev && \ python3 python-is-python3 ffmpeg && \
apt-get clean && \ apt-get clean && \
rm -rf /var/lib/apt/lists/* rm -rf /var/lib/apt/lists/*
# The requirements-drivers target is for BUILD_TYPE specific items. If you need to install something specific to CUDA, or specific to ROCM, it goes here. # The requirements-drivers target is for BUILD_TYPE specific items. If you need to install something specific to CUDA, or specific to ROCM, it goes here.
FROM requirements AS requirements-drivers FROM requirements AS requirements-drivers
ARG VULKAN_FROM_SOURCE=false
ARG BUILD_TYPE ARG BUILD_TYPE
ARG CUDA_MAJOR_VERSION=12 ARG CUDA_MAJOR_VERSION=12
ARG CUDA_MINOR_VERSION=0 ARG CUDA_MINOR_VERSION=0
@@ -24,7 +23,6 @@ ARG SKIP_DRIVERS=false
ARG TARGETARCH ARG TARGETARCH
ARG TARGETVARIANT ARG TARGETVARIANT
ENV BUILD_TYPE=${BUILD_TYPE} ENV BUILD_TYPE=${BUILD_TYPE}
ARG UBUNTU_VERSION=2404
RUN mkdir -p /run/localai RUN mkdir -p /run/localai
RUN echo "default" > /run/localai/capability RUN echo "default" > /run/localai/capability
@@ -35,50 +33,11 @@ RUN <<EOT bash
apt-get update && \ apt-get update && \
apt-get install -y --no-install-recommends \ apt-get install -y --no-install-recommends \
software-properties-common pciutils wget gpg-agent && \ software-properties-common pciutils wget gpg-agent && \
apt-get install -y libglm-dev cmake libxcb-dri3-0 libxcb-present0 libpciaccess0 \ wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
libpng-dev libxcb-keysyms1-dev libxcb-dri3-dev libx11-dev g++ gcc \ wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
libwayland-dev libxrandr-dev libxcb-randr0-dev libxcb-ewmh-dev \ apt-get update && \
git python-is-python3 bison libx11-xcb-dev liblz4-dev libzstd-dev \ apt-get install -y \
ocaml-core ninja-build pkg-config libxml2-dev wayland-protocols python3-jsonschema \ vulkan-sdk && \
clang-format qtbase5-dev qt6-base-dev libxcb-glx0-dev sudo xz-utils mesa-vulkan-drivers
if [ "amd64" = "$TARGETARCH" ] && [ "${VULKAN_FROM_SOURCE}" = "true" ]; then
wget "https://sdk.lunarg.com/sdk/download/1.4.328.1/linux/vulkansdk-linux-x86_64-1.4.328.1.tar.xz" && \
tar -xf vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
rm vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
mkdir -p /opt/vulkan-sdk && \
mv 1.4.328.1 /opt/vulkan-sdk/ && \
cd /opt/vulkan-sdk/1.4.328.1 && \
./vulkansdk --no-deps --maxjobs \
vulkan-loader \
vulkan-validationlayers \
vulkan-extensionlayer \
vulkan-tools \
shaderc && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/bin/* /usr/bin/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/lib/* /usr/lib/x86_64-linux-gnu/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/include/* /usr/include/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/share/* /usr/share/ && \
rm -rf /opt/vulkan-sdk
elif [ "amd64" = "${TARGETARCH}}" ]; then
wget -qO- https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo tee /etc/apt/trusted.gpg.d/lunarg.asc && \
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-noble.list http://packages.lunarg.com/vulkan/lunarg-vulkan-noble.list && \
apt-get update && \
apt-get install -y vulkan-sdk
fi
if [ "arm64" = "$TARGETARCH" ]; then
mkdir vulkan && cd vulkan && \
curl -L -o vulkan-sdk.tar.xz https://github.com/mudler/vulkan-sdk-arm/releases/download/1.4.335.0/vulkansdk-ubuntu-24.04-arm-1.4.335.0.tar.xz && \
tar -xvf vulkan-sdk.tar.xz && \
rm vulkan-sdk.tar.xz && \
cd 1.4.335.0 && \
cp -rfv aarch64/bin/* /usr/bin/ && \
cp -rfv aarch64/lib/* /usr/lib/aarch64-linux-gnu/ && \
cp -rfv aarch64/include/* /usr/include/ && \
cp -rfv aarch64/share/* /usr/share/ && \
cd ../.. && \
rm -rf vulkan
fi
ldconfig && \
apt-get clean && \ apt-get clean && \
rm -rf /var/lib/apt/lists/* && \ rm -rf /var/lib/apt/lists/* && \
echo "vulkan" > /run/localai/capability echo "vulkan" > /run/localai/capability
@@ -87,19 +46,15 @@ EOT
# CuBLAS requirements # CuBLAS requirements
RUN <<EOT bash RUN <<EOT bash
if ( [ "${BUILD_TYPE}" = "cublas" ] || [ "${BUILD_TYPE}" = "l4t" ] ) && [ "${SKIP_DRIVERS}" = "false" ]; then if [ "${BUILD_TYPE}" = "cublas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
apt-get update && \ apt-get update && \
apt-get install -y --no-install-recommends \ apt-get install -y --no-install-recommends \
software-properties-common pciutils software-properties-common pciutils
if [ "amd64" = "$TARGETARCH" ]; then if [ "amd64" = "$TARGETARCH" ]; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/x86_64/cuda-keyring_1.1-1_all.deb curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb
fi fi
if [ "arm64" = "$TARGETARCH" ]; then if [ "arm64" = "$TARGETARCH" ]; then
if [ "${CUDA_MAJOR_VERSION}" = "13" ]; then curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/arm64/cuda-keyring_1.1-1_all.deb
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/sbsa/cuda-keyring_1.1-1_all.deb
else
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/arm64/cuda-keyring_1.1-1_all.deb
fi
fi fi
dpkg -i cuda-keyring_1.1-1_all.deb && \ dpkg -i cuda-keyring_1.1-1_all.deb && \
rm -f cuda-keyring_1.1-1_all.deb && \ rm -f cuda-keyring_1.1-1_all.deb && \
@@ -110,34 +65,10 @@ RUN <<EOT bash
libcurand-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \ libcurand-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \ libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \ libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} && \
if [ "${CUDA_MAJOR_VERSION}" = "13" ] && [ "arm64" = "$TARGETARCH" ]; then
apt-get install -y --no-install-recommends \
libcufile-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libcudnn9-cuda-${CUDA_MAJOR_VERSION} cuda-cupti-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libnvjitlink-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION}
fi
apt-get clean && \ apt-get clean && \
rm -rf /var/lib/apt/lists/* && \ rm -rf /var/lib/apt/lists/* && \
echo "nvidia-cuda-${CUDA_MAJOR_VERSION}" > /run/localai/capability echo "nvidia" > /run/localai/capability
fi
EOT
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${TARGETARCH}" = "arm64" ]; then
echo "nvidia-l4t-cuda-${CUDA_MAJOR_VERSION}" > /run/localai/capability
fi
EOT
# https://github.com/NVIDIA/Isaac-GR00T/issues/343
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${TARGETARCH}" = "arm64" ]; then
wget https://developer.download.nvidia.com/compute/cudss/0.6.0/local_installers/cudss-local-tegra-repo-ubuntu${UBUNTU_VERSION}-0.6.0_0.6.0-1_arm64.deb && \
dpkg -i cudss-local-tegra-repo-ubuntu${UBUNTU_VERSION}-0.6.0_0.6.0-1_arm64.deb && \
cp /var/cudss-local-tegra-repo-ubuntu${UBUNTU_VERSION}-0.6.0/cudss-*-keyring.gpg /usr/share/keyrings/ && \
apt-get update && apt-get -y install cudss cudss-cuda-${CUDA_MAJOR_VERSION} && \
wget https://developer.download.nvidia.com/compute/nvpl/25.5/local_installers/nvpl-local-repo-ubuntu${UBUNTU_VERSION}-25.5_1.0-1_arm64.deb && \
dpkg -i nvpl-local-repo-ubuntu${UBUNTU_VERSION}-25.5_1.0-1_arm64.deb && \
cp /var/nvpl-local-repo-ubuntu${UBUNTU_VERSION}-25.5/nvpl-*-keyring.gpg /usr/share/keyrings/ && \
apt-get update && apt-get install -y nvpl
fi fi
EOT EOT
@@ -163,12 +94,6 @@ RUN if [ "${BUILD_TYPE}" = "hipblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
ldconfig \ ldconfig \
; fi ; fi
RUN if [ "${BUILD_TYPE}" = "hipblas" ]; then \
ln -s /opt/rocm-**/lib/llvm/lib/libomp.so /usr/lib/libomp.so \
; fi
RUN expr "${BUILD_TYPE}" = intel && echo "intel" > /run/localai/capability || echo "not intel"
# Cuda # Cuda
ENV PATH=/usr/local/cuda/bin:${PATH} ENV PATH=/usr/local/cuda/bin:${PATH}
@@ -181,12 +106,13 @@ ENV PATH=/opt/rocm/bin:${PATH}
# The requirements-core target is common to all images. It should not be placed in requirements-core unless every single build will use it. # The requirements-core target is common to all images. It should not be placed in requirements-core unless every single build will use it.
FROM requirements-drivers AS build-requirements FROM requirements-drivers AS build-requirements
ARG GO_VERSION=1.25.4 ARG GO_VERSION=1.22.6
ARG CMAKE_VERSION=3.31.10 ARG CMAKE_VERSION=3.26.4
ARG CMAKE_FROM_SOURCE=false ARG CMAKE_FROM_SOURCE=false
ARG TARGETARCH ARG TARGETARCH
ARG TARGETVARIANT ARG TARGETVARIANT
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y --no-install-recommends \ apt-get install -y --no-install-recommends \
build-essential \ build-essential \
@@ -223,6 +149,14 @@ RUN go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2 && \
COPY --chmod=644 custom-ca-certs/* /usr/local/share/ca-certificates/ COPY --chmod=644 custom-ca-certs/* /usr/local/share/ca-certificates/
RUN update-ca-certificates RUN update-ca-certificates
# OpenBLAS requirements and stable diffusion
RUN apt-get update && \
apt-get install -y --no-install-recommends \
libopenblas-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
RUN test -n "$TARGETARCH" \ RUN test -n "$TARGETARCH" \
|| (echo 'warn: missing $TARGETARCH, either set this `ARG` manually, or run using `docker buildkit`') || (echo 'warn: missing $TARGETARCH, either set this `ARG` manually, or run using `docker buildkit`')
@@ -243,10 +177,9 @@ WORKDIR /build
# https://community.intel.com/t5/Intel-oneAPI-Math-Kernel-Library/APT-Repository-not-working-signatures-invalid/m-p/1599436/highlight/true#M36143 # https://community.intel.com/t5/Intel-oneAPI-Math-Kernel-Library/APT-Repository-not-working-signatures-invalid/m-p/1599436/highlight/true#M36143
# This is a temporary workaround until Intel fixes their repository # This is a temporary workaround until Intel fixes their repository
FROM ${INTEL_BASE_IMAGE} AS intel FROM ${INTEL_BASE_IMAGE} AS intel
ARG UBUNTU_CODENAME=noble
RUN wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | \ RUN wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | \
gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
RUN echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu ${UBUNTU_CODENAME}/lts/2350 unified" > /etc/apt/sources.list.d/intel-graphics.list RUN echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy/lts/2350 unified" > /etc/apt/sources.list.d/intel-graphics.list
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y --no-install-recommends \ apt-get install -y --no-install-recommends \
intel-oneapi-runtime-libs && \ intel-oneapi-runtime-libs && \
@@ -377,6 +310,6 @@ RUN mkdir -p /models /backends
HEALTHCHECK --interval=1m --timeout=10m --retries=10 \ HEALTHCHECK --interval=1m --timeout=10m --retries=10 \
CMD curl -f ${HEALTHCHECK_ENDPOINT} || exit 1 CMD curl -f ${HEALTHCHECK_ENDPOINT} || exit 1
VOLUME /models /backends /configuration VOLUME /models /backends
EXPOSE 8080 EXPOSE 8080
ENTRYPOINT [ "/entrypoint.sh" ] ENTRYPOINT [ "/entrypoint.sh" ]

View File

@@ -1,4 +1,4 @@
ARG BASE_IMAGE=ubuntu:24.04 ARG BASE_IMAGE=ubuntu:22.04
FROM ${BASE_IMAGE} FROM ${BASE_IMAGE}

410
Makefile
View File

@@ -1,22 +1,13 @@
# Disable parallel execution for backend builds
.NOTPARALLEL: backends/diffusers backends/llama-cpp backends/piper backends/stablediffusion-ggml backends/whisper backends/faster-whisper backends/silero-vad backends/local-store backends/huggingface backends/rfdetr backends/kitten-tts backends/kokoro backends/chatterbox backends/llama-cpp-darwin backends/neutts build-darwin-python-backend build-darwin-go-backend backends/mlx backends/diffuser-darwin backends/mlx-vlm backends/mlx-audio backends/stablediffusion-ggml-darwin backends/vllm backends/moonshine
GOCMD=go GOCMD=go
GOTEST=$(GOCMD) test GOTEST=$(GOCMD) test
GOVET=$(GOCMD) vet GOVET=$(GOCMD) vet
BINARY_NAME=local-ai BINARY_NAME=local-ai
LAUNCHER_BINARY_NAME=local-ai-launcher
CUDA_MAJOR_VERSION?=13
CUDA_MINOR_VERSION?=0
UBUNTU_VERSION?=2204
UBUNTU_CODENAME?=noble
GORELEASER?= GORELEASER?=
ONEAPI_VERSION?=2025.2
export BUILD_TYPE?= export BUILD_TYPE?=
export CUDA_MAJOR_VERSION?=12
export CUDA_MINOR_VERSION?=9
GO_TAGS?= GO_TAGS?=
BUILD_ID?= BUILD_ID?=
@@ -101,21 +92,7 @@ build: protogen-go install-go-tools ## Build the project
$(info ${GREEN}I LD_FLAGS: ${YELLOW}$(LD_FLAGS)${RESET}) $(info ${GREEN}I LD_FLAGS: ${YELLOW}$(LD_FLAGS)${RESET})
$(info ${GREEN}I UPX: ${YELLOW}$(UPX)${RESET}) $(info ${GREEN}I UPX: ${YELLOW}$(UPX)${RESET})
rm -rf $(BINARY_NAME) || true rm -rf $(BINARY_NAME) || true
CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o $(BINARY_NAME) ./cmd/local-ai CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o $(BINARY_NAME) ./
build-launcher: ## Build the launcher application
$(info ${GREEN}I local-ai launcher build info:${RESET})
$(info ${GREEN}I BUILD_TYPE: ${YELLOW}$(BUILD_TYPE)${RESET})
$(info ${GREEN}I GO_TAGS: ${YELLOW}$(GO_TAGS)${RESET})
$(info ${GREEN}I LD_FLAGS: ${YELLOW}$(LD_FLAGS)${RESET})
rm -rf $(LAUNCHER_BINARY_NAME) || true
CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o $(LAUNCHER_BINARY_NAME) ./cmd/launcher
build-all: build build-launcher ## Build both server and launcher
build-dev: ## Run LocalAI in dev mode with live reload
@command -v air >/dev/null 2>&1 || go install github.com/air-verse/air@latest
air -c .air.toml
dev-dist: dev-dist:
$(GORELEASER) build --snapshot --clean $(GORELEASER) build --snapshot --clean
@@ -131,8 +108,8 @@ run: ## run local-ai
CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) run ./ CGO_LDFLAGS="$(CGO_LDFLAGS)" $(GOCMD) run ./
test-models/testmodel.ggml: test-models/testmodel.ggml:
mkdir -p test-models mkdir test-models
mkdir -p test-dir mkdir test-dir
wget -q https://huggingface.co/mradermacher/gpt2-alpaca-gpt4-GGUF/resolve/main/gpt2-alpaca-gpt4.Q4_K_M.gguf -O test-models/testmodel.ggml wget -q https://huggingface.co/mradermacher/gpt2-alpaca-gpt4-GGUF/resolve/main/gpt2-alpaca-gpt4.Q4_K_M.gguf -O test-models/testmodel.ggml
wget -q https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-base.en.bin -O test-models/whisper-en wget -q https://huggingface.co/ggerganov/whisper.cpp/resolve/main/ggml-base.en.bin -O test-models/whisper-en
wget -q https://huggingface.co/mudler/all-MiniLM-L6-v2/resolve/main/ggml-model-q4_0.bin -O test-models/bert wget -q https://huggingface.co/mudler/all-MiniLM-L6-v2/resolve/main/ggml-model-q4_0.bin -O test-models/bert
@@ -157,22 +134,33 @@ test: test-models/testmodel.ggml protogen-go
$(MAKE) test-tts $(MAKE) test-tts
$(MAKE) test-stablediffusion $(MAKE) test-stablediffusion
backends/llama-cpp: docker-build-llama-cpp docker-save-llama-cpp build
./local-ai backends install "ocifile://$(abspath ./backend-images/llama-cpp.tar)"
backends/piper: docker-build-piper docker-save-piper build
./local-ai backends install "ocifile://$(abspath ./backend-images/piper.tar)"
backends/stablediffusion-ggml: docker-build-stablediffusion-ggml docker-save-stablediffusion-ggml build
./local-ai backends install "ocifile://$(abspath ./backend-images/stablediffusion-ggml.tar)"
backends/whisper: docker-build-whisper docker-save-whisper build
./local-ai backends install "ocifile://$(abspath ./backend-images/whisper.tar)"
backends/silero-vad: docker-build-silero-vad docker-save-silero-vad build
./local-ai backends install "ocifile://$(abspath ./backend-images/silero-vad.tar)"
backends/local-store: docker-build-local-store docker-save-local-store build
./local-ai backends install "ocifile://$(abspath ./backend-images/local-store.tar)"
backends/huggingface: docker-build-huggingface docker-save-huggingface build
./local-ai backends install "ocifile://$(abspath ./backend-images/huggingface.tar)"
######################################################## ########################################################
## AIO tests ## AIO tests
######################################################## ########################################################
docker-build-aio: docker-build-aio:
docker build \ docker build --build-arg MAKEFLAGS="--jobs=5 --output-sync=target" -t local-ai:tests -f Dockerfile .
--build-arg MAKEFLAGS="--jobs=5 --output-sync=target" \
--build-arg BASE_IMAGE=$(BASE_IMAGE) \
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
--build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) \
--build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) \
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
--build-arg GO_TAGS="$(GO_TAGS)" \
-t local-ai:tests -f Dockerfile .
BASE_IMAGE=local-ai:tests DOCKER_AIO_IMAGE=local-ai-aio:test $(MAKE) docker-aio BASE_IMAGE=local-ai:tests DOCKER_AIO_IMAGE=local-ai-aio:test $(MAKE) docker-aio
e2e-aio: e2e-aio:
@@ -194,17 +182,7 @@ prepare-e2e:
mkdir -p $(TEST_DIR) mkdir -p $(TEST_DIR)
cp -rfv $(abspath ./tests/e2e-fixtures)/gpu.yaml $(TEST_DIR)/gpu.yaml cp -rfv $(abspath ./tests/e2e-fixtures)/gpu.yaml $(TEST_DIR)/gpu.yaml
test -e $(TEST_DIR)/ggllm-test-model.bin || wget -q https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GGUF/resolve/main/codellama-7b-instruct.Q2_K.gguf -O $(TEST_DIR)/ggllm-test-model.bin test -e $(TEST_DIR)/ggllm-test-model.bin || wget -q https://huggingface.co/TheBloke/CodeLlama-7B-Instruct-GGUF/resolve/main/codellama-7b-instruct.Q2_K.gguf -O $(TEST_DIR)/ggllm-test-model.bin
docker build \ docker build --build-arg IMAGE_TYPE=core --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg CUDA_MAJOR_VERSION=12 --build-arg CUDA_MINOR_VERSION=0 -t localai-tests .
--build-arg IMAGE_TYPE=core \
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
--build-arg BASE_IMAGE=$(BASE_IMAGE) \
--build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) \
--build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) \
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
--build-arg GO_TAGS="$(GO_TAGS)" \
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
-t localai-tests .
run-e2e-image: run-e2e-image:
ls -liah $(abspath ./tests/e2e-fixtures) ls -liah $(abspath ./tests/e2e-fixtures)
@@ -264,7 +242,10 @@ help: ## Show this help.
######################################################## ########################################################
.PHONY: protogen .PHONY: protogen
protogen: protogen-go protogen: protogen-go protogen-python
.PHONY: protogen-clean
protogen-clean: protogen-go-clean protogen-python-clean
protoc: protoc:
@OS_NAME=$$(uname -s | tr '[:upper:]' '[:lower:]'); \ @OS_NAME=$$(uname -s | tr '[:upper:]' '[:lower:]'); \
@@ -295,7 +276,7 @@ protoc:
echo "Unsupported OS: $$OS_NAME"; exit 1; \ echo "Unsupported OS: $$OS_NAME"; exit 1; \
fi; \ fi; \
URL=https://github.com/protocolbuffers/protobuf/releases/download/v31.1/$$FILE; \ URL=https://github.com/protocolbuffers/protobuf/releases/download/v31.1/$$FILE; \
curl -L $$URL -o protoc.zip && \ curl -L -s $$URL -o protoc.zip && \
unzip -j -d $(CURDIR) protoc.zip bin/protoc && rm protoc.zip unzip -j -d $(CURDIR) protoc.zip bin/protoc && rm protoc.zip
.PHONY: protogen-go .PHONY: protogen-go
@@ -309,26 +290,109 @@ protogen-go-clean:
$(RM) pkg/grpc/proto/backend.pb.go pkg/grpc/proto/backend_grpc.pb.go $(RM) pkg/grpc/proto/backend.pb.go pkg/grpc/proto/backend_grpc.pb.go
$(RM) bin/* $(RM) bin/*
.PHONY: protogen-python
protogen-python: bark-protogen coqui-protogen chatterbox-protogen diffusers-protogen exllama2-protogen rerankers-protogen transformers-protogen kokoro-protogen vllm-protogen faster-whisper-protogen
.PHONY: protogen-python-clean
protogen-python-clean: bark-protogen-clean coqui-protogen-clean chatterbox-protogen-clean diffusers-protogen-clean exllama2-protogen-clean rerankers-protogen-clean transformers-protogen-clean kokoro-protogen-clean vllm-protogen-clean faster-whisper-protogen-clean
.PHONY: bark-protogen
bark-protogen:
$(MAKE) -C backend/python/bark protogen
.PHONY: bark-protogen-clean
bark-protogen-clean:
$(MAKE) -C backend/python/bark protogen-clean
.PHONY: coqui-protogen
coqui-protogen:
$(MAKE) -C backend/python/coqui protogen
.PHONY: coqui-protogen-clean
coqui-protogen-clean:
$(MAKE) -C backend/python/coqui protogen-clean
.PHONY: diffusers-protogen
diffusers-protogen:
$(MAKE) -C backend/python/diffusers protogen
.PHONY: chatterbox-protogen
chatterbox-protogen:
$(MAKE) -C backend/python/chatterbox protogen
.PHONY: diffusers-protogen-clean
diffusers-protogen-clean:
$(MAKE) -C backend/python/diffusers protogen-clean
.PHONY: chatterbox-protogen-clean
chatterbox-protogen-clean:
$(MAKE) -C backend/python/chatterbox protogen-clean
.PHONY: faster-whisper-protogen
faster-whisper-protogen:
$(MAKE) -C backend/python/faster-whisper protogen
.PHONY: faster-whisper-protogen-clean
faster-whisper-protogen-clean:
$(MAKE) -C backend/python/faster-whisper protogen-clean
.PHONY: exllama2-protogen
exllama2-protogen:
$(MAKE) -C backend/python/exllama2 protogen
.PHONY: exllama2-protogen-clean
exllama2-protogen-clean:
$(MAKE) -C backend/python/exllama2 protogen-clean
.PHONY: rerankers-protogen
rerankers-protogen:
$(MAKE) -C backend/python/rerankers protogen
.PHONY: rerankers-protogen-clean
rerankers-protogen-clean:
$(MAKE) -C backend/python/rerankers protogen-clean
.PHONY: transformers-protogen
transformers-protogen:
$(MAKE) -C backend/python/transformers protogen
.PHONY: transformers-protogen-clean
transformers-protogen-clean:
$(MAKE) -C backend/python/transformers protogen-clean
.PHONY: kokoro-protogen
kokoro-protogen:
$(MAKE) -C backend/python/kokoro protogen
.PHONY: kokoro-protogen-clean
kokoro-protogen-clean:
$(MAKE) -C backend/python/kokoro protogen-clean
.PHONY: vllm-protogen
vllm-protogen:
$(MAKE) -C backend/python/vllm protogen
.PHONY: vllm-protogen-clean
vllm-protogen-clean:
$(MAKE) -C backend/python/vllm protogen-clean
prepare-test-extra: protogen-python prepare-test-extra: protogen-python
$(MAKE) -C backend/python/transformers $(MAKE) -C backend/python/transformers
$(MAKE) -C backend/python/diffusers $(MAKE) -C backend/python/diffusers
$(MAKE) -C backend/python/chatterbox $(MAKE) -C backend/python/chatterbox
$(MAKE) -C backend/python/vllm $(MAKE) -C backend/python/vllm
$(MAKE) -C backend/python/vibevoice
$(MAKE) -C backend/python/moonshine
test-extra: prepare-test-extra test-extra: prepare-test-extra
$(MAKE) -C backend/python/transformers test $(MAKE) -C backend/python/transformers test
$(MAKE) -C backend/python/diffusers test $(MAKE) -C backend/python/diffusers test
$(MAKE) -C backend/python/chatterbox test $(MAKE) -C backend/python/chatterbox test
$(MAKE) -C backend/python/vllm test $(MAKE) -C backend/python/vllm test
$(MAKE) -C backend/python/vibevoice test
$(MAKE) -C backend/python/moonshine test
DOCKER_IMAGE?=local-ai DOCKER_IMAGE?=local-ai
DOCKER_AIO_IMAGE?=local-ai-aio DOCKER_AIO_IMAGE?=local-ai-aio
IMAGE_TYPE?=core IMAGE_TYPE?=core
BASE_IMAGE?=ubuntu:24.04 BASE_IMAGE?=ubuntu:22.04
docker: docker:
docker build \ docker build \
@@ -337,34 +401,24 @@ docker:
--build-arg GO_TAGS="$(GO_TAGS)" \ --build-arg GO_TAGS="$(GO_TAGS)" \
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \ --build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
--build-arg BUILD_TYPE=$(BUILD_TYPE) \ --build-arg BUILD_TYPE=$(BUILD_TYPE) \
--build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) \
--build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) \
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
-t $(DOCKER_IMAGE) . -t $(DOCKER_IMAGE) .
docker-cuda12: docker-cuda11:
docker build \ docker build \
--build-arg CUDA_MAJOR_VERSION=${CUDA_MAJOR_VERSION} \ --build-arg CUDA_MAJOR_VERSION=11 \
--build-arg CUDA_MINOR_VERSION=${CUDA_MINOR_VERSION} \ --build-arg CUDA_MINOR_VERSION=8 \
--build-arg BASE_IMAGE=$(BASE_IMAGE) \ --build-arg BASE_IMAGE=$(BASE_IMAGE) \
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \ --build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
--build-arg GO_TAGS="$(GO_TAGS)" \ --build-arg GO_TAGS="$(GO_TAGS)" \
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \ --build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
--build-arg BUILD_TYPE=$(BUILD_TYPE) \ --build-arg BUILD_TYPE=$(BUILD_TYPE) \
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \ -t $(DOCKER_IMAGE)-cuda11 .
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
-t $(DOCKER_IMAGE)-cuda-12 .
docker-aio: docker-aio:
@echo "Building AIO image with base $(BASE_IMAGE) as $(DOCKER_AIO_IMAGE)" @echo "Building AIO image with base $(BASE_IMAGE) as $(DOCKER_AIO_IMAGE)"
docker build \ docker build \
--build-arg BASE_IMAGE=$(BASE_IMAGE) \ --build-arg BASE_IMAGE=$(BASE_IMAGE) \
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \ --build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
--build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) \
--build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) \
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
-t $(DOCKER_AIO_IMAGE) -f Dockerfile.aio . -t $(DOCKER_AIO_IMAGE) -f Dockerfile.aio .
docker-aio-all: docker-aio-all:
@@ -373,142 +427,106 @@ docker-aio-all:
docker-image-intel: docker-image-intel:
docker build \ docker build \
--build-arg BASE_IMAGE=intel/oneapi-basekit:2025.3.0-0-devel-ubuntu24.04 \ --build-arg BASE_IMAGE=intel/oneapi-basekit:${ONEAPI_VERSION}.0-0-devel-ubuntu24.04 \
--build-arg IMAGE_TYPE=$(IMAGE_TYPE) \ --build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
--build-arg GO_TAGS="$(GO_TAGS)" \ --build-arg GO_TAGS="$(GO_TAGS)" \
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \ --build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
--build-arg BUILD_TYPE=intel \ --build-arg BUILD_TYPE=sycl_f32 -t $(DOCKER_IMAGE) .
--build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) \
--build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) \ docker-image-intel-xpu:
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \ docker build \
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \ --build-arg BASE_IMAGE=intel/oneapi-basekit:${ONEAPI_VERSION}.0-0-devel-ubuntu22.04 \
-t $(DOCKER_IMAGE) . --build-arg IMAGE_TYPE=$(IMAGE_TYPE) \
--build-arg GO_TAGS="$(GO_TAGS)" \
--build-arg MAKEFLAGS="$(DOCKER_MAKEFLAGS)" \
--build-arg BUILD_TYPE=sycl_f32 -t $(DOCKER_IMAGE) .
######################################################## ########################################################
## Backends ## Backends
######################################################## ########################################################
# Pattern rule for standard backends (docker-based)
# This matches all backends that use docker-build-* and docker-save-*
backends/%: docker-build-% docker-save-% build
./local-ai backends install "ocifile://$(abspath ./backend-images/$*.tar)"
# Darwin-specific backends (keep as explicit targets since they have special build logic)
backends/llama-cpp-darwin: build
bash ./scripts/build/llama-cpp-darwin.sh
./local-ai backends install "ocifile://$(abspath ./backend-images/llama-cpp.tar)"
build-darwin-python-backend: build
bash ./scripts/build/python-darwin.sh
build-darwin-go-backend: build
bash ./scripts/build/golang-darwin.sh
backends/mlx:
BACKEND=mlx $(MAKE) build-darwin-python-backend
./local-ai backends install "ocifile://$(abspath ./backend-images/mlx.tar)"
backends/diffuser-darwin:
BACKEND=diffusers $(MAKE) build-darwin-python-backend
./local-ai backends install "ocifile://$(abspath ./backend-images/diffusers.tar)"
backends/mlx-vlm:
BACKEND=mlx-vlm $(MAKE) build-darwin-python-backend
./local-ai backends install "ocifile://$(abspath ./backend-images/mlx-vlm.tar)"
backends/mlx-audio:
BACKEND=mlx-audio $(MAKE) build-darwin-python-backend
./local-ai backends install "ocifile://$(abspath ./backend-images/mlx-audio.tar)"
backends/stablediffusion-ggml-darwin:
BACKEND=stablediffusion-ggml BUILD_TYPE=metal $(MAKE) build-darwin-go-backend
./local-ai backends install "ocifile://$(abspath ./backend-images/stablediffusion-ggml.tar)"
backend-images: backend-images:
mkdir -p backend-images mkdir -p backend-images
# Backend metadata: BACKEND_NAME | DOCKERFILE_TYPE | BUILD_CONTEXT | PROGRESS_FLAG | NEEDS_BACKEND_ARG docker-build-llama-cpp:
# llama-cpp is special - uses llama-cpp Dockerfile and doesn't need BACKEND arg docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg IMAGE_BASE=$(IMAGE_BASE) -t local-ai-backend:llama-cpp -f backend/Dockerfile.llama-cpp .
BACKEND_LLAMA_CPP = llama-cpp|llama-cpp|.|false|false
# Golang backends docker-build-bark-cpp:
BACKEND_BARK_CPP = bark-cpp|golang|.|false|true docker build -t local-ai-backend:bark-cpp -f backend/Dockerfile.go --build-arg BACKEND=bark-cpp .
BACKEND_PIPER = piper|golang|.|false|true
BACKEND_LOCAL_STORE = local-store|golang|.|false|true
BACKEND_HUGGINGFACE = huggingface|golang|.|false|true
BACKEND_SILERO_VAD = silero-vad|golang|.|false|true
BACKEND_STABLEDIFFUSION_GGML = stablediffusion-ggml|golang|.|--progress=plain|true
BACKEND_WHISPER = whisper|golang|.|false|true
# Python backends with root context docker-build-piper:
BACKEND_RERANKERS = rerankers|python|.|false|true docker build -t local-ai-backend:piper -f backend/Dockerfile.go --build-arg BACKEND=piper .
BACKEND_TRANSFORMERS = transformers|python|.|false|true
BACKEND_FASTER_WHISPER = faster-whisper|python|.|false|true
BACKEND_COQUI = coqui|python|.|false|true
BACKEND_BARK = bark|python|.|false|true
BACKEND_EXLLAMA2 = exllama2|python|.|false|true
# Python backends with ./backend context docker-build-local-store:
BACKEND_RFDETR = rfdetr|python|./backend|false|true docker build -t local-ai-backend:local-store -f backend/Dockerfile.go --build-arg BACKEND=local-store .
BACKEND_KITTEN_TTS = kitten-tts|python|./backend|false|true
BACKEND_NEUTTS = neutts|python|./backend|false|true
BACKEND_KOKORO = kokoro|python|./backend|false|true
BACKEND_VLLM = vllm|python|./backend|false|true
BACKEND_DIFFUSERS = diffusers|python|./backend|--progress=plain|true
BACKEND_CHATTERBOX = chatterbox|python|./backend|false|true
BACKEND_VIBEVOICE = vibevoice|python|./backend|--progress=plain|true
BACKEND_MOONSHINE = moonshine|python|./backend|false|true
# Helper function to build docker image for a backend docker-build-huggingface:
# Usage: $(call docker-build-backend,BACKEND_NAME,DOCKERFILE_TYPE,BUILD_CONTEXT,PROGRESS_FLAG,NEEDS_BACKEND_ARG) docker build -t local-ai-backend:huggingface -f backend/Dockerfile.go --build-arg BACKEND=huggingface .
define docker-build-backend
docker build $(if $(filter-out false,$(4)),$(4)) \
--build-arg BUILD_TYPE=$(BUILD_TYPE) \
--build-arg BASE_IMAGE=$(BASE_IMAGE) \
--build-arg CUDA_MAJOR_VERSION=$(CUDA_MAJOR_VERSION) \
--build-arg CUDA_MINOR_VERSION=$(CUDA_MINOR_VERSION) \
--build-arg UBUNTU_VERSION=$(UBUNTU_VERSION) \
--build-arg UBUNTU_CODENAME=$(UBUNTU_CODENAME) \
$(if $(filter true,$(5)),--build-arg BACKEND=$(1)) \
-t local-ai-backend:$(1) -f backend/Dockerfile.$(2) $(3)
endef
# Generate docker-build targets from backend definitions docker-save-huggingface: backend-images
define generate-docker-build-target docker save local-ai-backend:huggingface -o backend-images/huggingface.tar
docker-build-$(word 1,$(subst |, ,$(1))):
$$(call docker-build-backend,$(word 1,$(subst |, ,$(1))),$(word 2,$(subst |, ,$(1))),$(word 3,$(subst |, ,$(1))),$(word 4,$(subst |, ,$(1))),$(word 5,$(subst |, ,$(1))))
endef
# Generate all docker-build targets docker-save-local-store: backend-images
$(eval $(call generate-docker-build-target,$(BACKEND_LLAMA_CPP))) docker save local-ai-backend:local-store -o backend-images/local-store.tar
$(eval $(call generate-docker-build-target,$(BACKEND_BARK_CPP)))
$(eval $(call generate-docker-build-target,$(BACKEND_PIPER)))
$(eval $(call generate-docker-build-target,$(BACKEND_LOCAL_STORE)))
$(eval $(call generate-docker-build-target,$(BACKEND_HUGGINGFACE)))
$(eval $(call generate-docker-build-target,$(BACKEND_SILERO_VAD)))
$(eval $(call generate-docker-build-target,$(BACKEND_STABLEDIFFUSION_GGML)))
$(eval $(call generate-docker-build-target,$(BACKEND_WHISPER)))
$(eval $(call generate-docker-build-target,$(BACKEND_RERANKERS)))
$(eval $(call generate-docker-build-target,$(BACKEND_TRANSFORMERS)))
$(eval $(call generate-docker-build-target,$(BACKEND_FASTER_WHISPER)))
$(eval $(call generate-docker-build-target,$(BACKEND_COQUI)))
$(eval $(call generate-docker-build-target,$(BACKEND_BARK)))
$(eval $(call generate-docker-build-target,$(BACKEND_EXLLAMA2)))
$(eval $(call generate-docker-build-target,$(BACKEND_RFDETR)))
$(eval $(call generate-docker-build-target,$(BACKEND_KITTEN_TTS)))
$(eval $(call generate-docker-build-target,$(BACKEND_NEUTTS)))
$(eval $(call generate-docker-build-target,$(BACKEND_KOKORO)))
$(eval $(call generate-docker-build-target,$(BACKEND_VLLM)))
$(eval $(call generate-docker-build-target,$(BACKEND_DIFFUSERS)))
$(eval $(call generate-docker-build-target,$(BACKEND_CHATTERBOX)))
$(eval $(call generate-docker-build-target,$(BACKEND_VIBEVOICE)))
$(eval $(call generate-docker-build-target,$(BACKEND_MOONSHINE)))
# Pattern rule for docker-save targets docker-build-silero-vad:
docker-save-%: backend-images docker build -t local-ai-backend:silero-vad -f backend/Dockerfile.go --build-arg BACKEND=silero-vad .
docker save local-ai-backend:$* -o backend-images/$*.tar
docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-bark docker-build-chatterbox docker-build-vibevoice docker-build-exllama2 docker-build-moonshine docker-save-silero-vad: backend-images
docker save local-ai-backend:silero-vad -o backend-images/silero-vad.tar
docker-save-piper: backend-images
docker save local-ai-backend:piper -o backend-images/piper.tar
docker-save-llama-cpp: backend-images
docker save local-ai-backend:llama-cpp -o backend-images/llama-cpp.tar
docker-save-bark-cpp: backend-images
docker save local-ai-backend:bark-cpp -o backend-images/bark-cpp.tar
docker-build-stablediffusion-ggml:
docker build -t local-ai-backend:stablediffusion-ggml -f backend/Dockerfile.go --build-arg BACKEND=stablediffusion-ggml .
docker-save-stablediffusion-ggml: backend-images
docker save local-ai-backend:stablediffusion-ggml -o backend-images/stablediffusion-ggml.tar
docker-build-rerankers:
docker build -t local-ai-backend:rerankers -f backend/Dockerfile.python --build-arg BACKEND=rerankers .
docker-build-vllm:
docker build -t local-ai-backend:vllm -f backend/Dockerfile.python --build-arg BACKEND=vllm .
docker-build-transformers:
docker build -t local-ai-backend:transformers -f backend/Dockerfile.python --build-arg BACKEND=transformers .
docker-build-diffusers:
docker build -t local-ai-backend:diffusers -f backend/Dockerfile.python --build-arg BACKEND=diffusers .
docker-build-kokoro:
docker build -t local-ai-backend:kokoro -f backend/Dockerfile.python --build-arg BACKEND=kokoro .
docker-build-whisper:
docker build --build-arg BUILD_TYPE=$(BUILD_TYPE) --build-arg BASE_IMAGE=$(BASE_IMAGE) -t local-ai-backend:whisper -f backend/Dockerfile.go --build-arg BACKEND=whisper .
docker-save-whisper: backend-images
docker save local-ai-backend:whisper -o backend-images/whisper.tar
docker-build-faster-whisper:
docker build -t local-ai-backend:faster-whisper -f backend/Dockerfile.python --build-arg BACKEND=faster-whisper .
docker-build-coqui:
docker build -t local-ai-backend:coqui -f backend/Dockerfile.python --build-arg BACKEND=coqui .
docker-build-bark:
docker build -t local-ai-backend:bark -f backend/Dockerfile.python --build-arg BACKEND=bark .
docker-build-chatterbox:
docker build -t local-ai-backend:chatterbox -f backend/Dockerfile.python --build-arg BACKEND=chatterbox .
docker-build-exllama2:
docker build -t local-ai-backend:exllama2 -f backend/Dockerfile.python --build-arg BACKEND=exllama2 .
docker-build-backends: docker-build-llama-cpp docker-build-rerankers docker-build-vllm docker-build-transformers docker-build-diffusers docker-build-kokoro docker-build-faster-whisper docker-build-coqui docker-build-bark docker-build-chatterbox docker-build-exllama2
######################################################## ########################################################
### END Backends ### END Backends
@@ -539,19 +557,3 @@ docs-clean:
.PHONY: docs .PHONY: docs
docs: docs/static/gallery.html docs: docs/static/gallery.html
cd docs && hugo serve cd docs && hugo serve
########################################################
## Platform-specific builds
########################################################
## fyne cross-platform build
build-launcher-darwin: build-launcher
go run github.com/tiagomelo/macos-dmg-creator/cmd/createdmg@latest \
--appName "LocalAI" \
--appBinaryPath "$(LAUNCHER_BINARY_NAME)" \
--bundleIdentifier "com.localai.launcher" \
--iconPath "core/http/static/logo.png" \
--outputDir "dist/"
build-launcher-linux:
cd cmd/launcher && go run fyne.io/tools/cmd/fyne@latest package -os linux -icon ../../core/http/static/logo.png --executable $(LAUNCHER_BINARY_NAME)-linux && mv launcher.tar.xz ../../$(LAUNCHER_BINARY_NAME)-linux.tar.xz

143
README.md
View File

@@ -33,7 +33,7 @@
<img src="https://img.shields.io/badge/X-%23000000.svg?style=for-the-badge&logo=X&logoColor=white&label=LocalAI_API" alt="Follow LocalAI_API"/> <img src="https://img.shields.io/badge/X-%23000000.svg?style=for-the-badge&logo=X&logoColor=white&label=LocalAI_API" alt="Follow LocalAI_API"/>
</a> </a>
<a href="https://discord.gg/uJAeKSAGDy" target="blank"> <a href="https://discord.gg/uJAeKSAGDy" target="blank">
<img src="https://img.shields.io/badge/dynamic/json?color=blue&label=Discord&style=for-the-badge&query=approximate_member_count&url=https%3A%2F%2Fdiscordapp.com%2Fapi%2Finvites%2FuJAeKSAGDy%3Fwith_counts%3Dtrue&logo=discord" alt="Join LocalAI Discord Community"/> <img src="https://dcbadge.vercel.app/api/server/uJAeKSAGDy?style=flat-square&theme=default-inverted" alt="Join LocalAI Discord Community"/>
</a> </a>
</p> </p>
@@ -43,7 +43,7 @@
> :bulb: Get help - [❓FAQ](https://localai.io/faq/) [💭Discussions](https://github.com/go-skynet/LocalAI/discussions) [:speech_balloon: Discord](https://discord.gg/uJAeKSAGDy) [:book: Documentation website](https://localai.io/) > :bulb: Get help - [❓FAQ](https://localai.io/faq/) [💭Discussions](https://github.com/go-skynet/LocalAI/discussions) [:speech_balloon: Discord](https://discord.gg/uJAeKSAGDy) [:book: Documentation website](https://localai.io/)
> >
> [💻 Quickstart](https://localai.io/basics/getting_started/) [🖼️ Models](https://models.localai.io/) [🚀 Roadmap](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) [🛫 Examples](https://github.com/mudler/LocalAI-examples) Try on > [💻 Quickstart](https://localai.io/basics/getting_started/) [🖼️ Models](https://models.localai.io/) [🚀 Roadmap](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap) [🥽 Demo](https://demo.localai.io) [🌍 Explorer](https://explorer.localai.io) [🛫 Examples](https://github.com/mudler/LocalAI-examples) Try on
[![Telegram](https://img.shields.io/badge/Telegram-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white)](https://t.me/localaiofficial_bot) [![Telegram](https://img.shields.io/badge/Telegram-2CA5E0?style=for-the-badge&logo=telegram&logoColor=white)](https://t.me/localaiofficial_bot)
[![tests](https://github.com/go-skynet/LocalAI/actions/workflows/test.yml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/test.yml)[![Build and Release](https://github.com/go-skynet/LocalAI/actions/workflows/release.yaml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/release.yaml)[![build container images](https://github.com/go-skynet/LocalAI/actions/workflows/image.yml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/image.yml)[![Bump dependencies](https://github.com/go-skynet/LocalAI/actions/workflows/bump_deps.yaml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/bump_deps.yaml)[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/localai)](https://artifacthub.io/packages/search?repo=localai) [![tests](https://github.com/go-skynet/LocalAI/actions/workflows/test.yml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/test.yml)[![Build and Release](https://github.com/go-skynet/LocalAI/actions/workflows/release.yaml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/release.yaml)[![build container images](https://github.com/go-skynet/LocalAI/actions/workflows/image.yml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/image.yml)[![Bump dependencies](https://github.com/go-skynet/LocalAI/actions/workflows/bump_deps.yaml/badge.svg)](https://github.com/go-skynet/LocalAI/actions/workflows/bump_deps.yaml)[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/localai)](https://artifacthub.io/packages/search?repo=localai)
@@ -80,18 +80,8 @@
</tr> </tr>
</table> </table>
## Screenshots / Video ## Screenshots
### Youtube video
<h1 align="center">
<br>
<a href="https://www.youtube.com/watch?v=PDqYhB9nNHA" target="_blank"> <img width="300" src="https://img.youtube.com/vi/PDqYhB9nNHA/0.jpg"> </a><br>
<br>
</h1>
### Screenshots
| Talk Interface | Generate Audio | | Talk Interface | Generate Audio |
| --- | --- | | --- | --- |
@@ -118,25 +108,10 @@ Run the installer script:
curl https://localai.io/install.sh | sh curl https://localai.io/install.sh | sh
``` ```
For more installation options, see [Installer Options](https://localai.io/installation/). For more installation options, see [Installer Options](https://localai.io/docs/advanced/installer/).
### macOS Download:
<a href="https://github.com/mudler/LocalAI/releases/latest/download/LocalAI.dmg">
<img src="https://img.shields.io/badge/Download-macOS-blue?style=for-the-badge&logo=apple&logoColor=white" alt="Download LocalAI for macOS"/>
</a>
> Note: the DMGs are not signed by Apple as quarantined. See https://github.com/mudler/LocalAI/issues/6268 for a workaround, fix is tracked here: https://github.com/mudler/LocalAI/issues/6244
Or run with docker: Or run with docker:
> **💡 Docker Run vs Docker Start**
>
> - `docker run` creates and starts a new container. If a container with the same name already exists, this command will fail.
> - `docker start` starts an existing container that was previously created with `docker run`.
>
> If you've already run LocalAI before and want to start it again, use: `docker start -i local-ai`
### CPU only image: ### CPU only image:
```bash ```bash
@@ -146,18 +121,14 @@ docker run -ti --name local-ai -p 8080:8080 localai/localai:latest
### NVIDIA GPU Images: ### NVIDIA GPU Images:
```bash ```bash
# CUDA 13.0
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-gpu-nvidia-cuda-13
# CUDA 12.0 # CUDA 12.0
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-gpu-nvidia-cuda-12 docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-gpu-nvidia-cuda-12
# NVIDIA Jetson (L4T) ARM64 # CUDA 11.7
# CUDA 12 (for Nvidia AGX Orin and similar platforms) docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-gpu-nvidia-cuda-11
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-nvidia-l4t-arm64
# CUDA 13 (for Nvidia DGX Spark) # NVIDIA Jetson (L4T) ARM64
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-nvidia-l4t-arm64-cuda-13 docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-nvidia-l4t-arm64
``` ```
### AMD GPU Images (ROCm): ### AMD GPU Images (ROCm):
@@ -169,7 +140,11 @@ docker run -ti --name local-ai -p 8080:8080 --device=/dev/kfd --device=/dev/dri
### Intel GPU Images (oneAPI): ### Intel GPU Images (oneAPI):
```bash ```bash
docker run -ti --name local-ai -p 8080:8080 --device=/dev/dri/card1 --device=/dev/dri/renderD128 localai/localai:latest-gpu-intel # Intel GPU with FP16 support
docker run -ti --name local-ai -p 8080:8080 --device=/dev/dri/card1 --device=/dev/dri/renderD128 localai/localai:latest-gpu-intel-f16
# Intel GPU with FP32 support
docker run -ti --name local-ai -p 8080:8080 --device=/dev/dri/card1 --device=/dev/dri/renderD128 localai/localai:latest-gpu-intel-f32
``` ```
### Vulkan GPU Images: ### Vulkan GPU Images:
@@ -184,14 +159,14 @@ docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-gpu-vulkan
# CPU version # CPU version
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-cpu docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-cpu
# NVIDIA CUDA 13 version
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-13
# NVIDIA CUDA 12 version # NVIDIA CUDA 12 version
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-12 docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-12
# NVIDIA CUDA 11 version
docker run -ti --name local-ai -p 8080:8080 --gpus all localai/localai:latest-aio-gpu-nvidia-cuda-11
# Intel GPU version # Intel GPU version
docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-gpu-intel docker run -ti --name local-ai -p 8080:8080 localai/localai:latest-aio-gpu-intel-f16
# AMD GPU version # AMD GPU version
docker run -ti --name local-ai -p 8080:8080 --device=/dev/kfd --device=/dev/dri --group-add=video localai/localai:latest-aio-gpu-hipblas docker run -ti --name local-ai -p 8080:8080 --device=/dev/kfd --device=/dev/dri --group-add=video localai/localai:latest-aio-gpu-hipblas
@@ -214,19 +189,10 @@ local-ai run https://gist.githubusercontent.com/.../phi-2.yaml
local-ai run oci://localai/phi-2:latest local-ai run oci://localai/phi-2:latest
``` ```
> ⚡ **Automatic Backend Detection**: When you install models from the gallery or YAML files, LocalAI automatically detects your system's GPU capabilities (NVIDIA, AMD, Intel) and downloads the appropriate backend. For advanced configuration options, see [GPU Acceleration](https://localai.io/features/gpu-acceleration/#automatic-backend-detection). For more information, see [💻 Getting started](https://localai.io/basics/getting_started/index.html)
For more information, see [💻 Getting started](https://localai.io/basics/getting_started/index.html), if you are interested in our roadmap items and future enhancements, you can see the [Issues labeled as Roadmap here](https://github.com/mudler/LocalAI/issues?q=is%3Aissue+is%3Aopen+label%3Aroadmap)
## 📰 Latest project news ## 📰 Latest project news
- December 2025: [Dynamic Memory Resource reclaimer](https://github.com/mudler/LocalAI/pull/7583), [Automatic fitting of models to multiple GPUS(llama.cpp)](https://github.com/mudler/LocalAI/pull/7584), [Added Vibevoice backend](https://github.com/mudler/LocalAI/pull/7494)
- November 2025: Major improvements to the UX. Among these: [Import models via URL](https://github.com/mudler/LocalAI/pull/7245) and [Multiple chats and history](https://github.com/mudler/LocalAI/pull/7325)
- October 2025: 🔌 [Model Context Protocol (MCP)](https://localai.io/docs/features/mcp/) support added for agentic capabilities with external tools
- September 2025: New Launcher application for MacOS and Linux, extended support to many backends for Mac and Nvidia L4T devices. Models: Added MLX-Audio, WAN 2.2. WebUI improvements and Python-based backends now ships portable python environments.
- August 2025: MLX, MLX-VLM, Diffusers and llama.cpp are now supported on Mac M1/M2/M3+ chips ( with `development` suffix in the gallery ): https://github.com/mudler/LocalAI/pull/6049 https://github.com/mudler/LocalAI/pull/6119 https://github.com/mudler/LocalAI/pull/6121 https://github.com/mudler/LocalAI/pull/6060
- July/August 2025: 🔍 [Object Detection](https://localai.io/features/object-detection/) added to the API featuring [rf-detr](https://github.com/roboflow/rf-detr)
- July 2025: All backends migrated outside of the main binary. LocalAI is now more lightweight, small, and automatically downloads the required backend to run the model. [Read the release notes](https://github.com/mudler/LocalAI/releases/tag/v3.2.0)
- June 2025: [Backend management](https://github.com/mudler/LocalAI/pull/5607) has been added. Attention: extras images are going to be deprecated from the next release! Read [the backend management PR](https://github.com/mudler/LocalAI/pull/5607). - June 2025: [Backend management](https://github.com/mudler/LocalAI/pull/5607) has been added. Attention: extras images are going to be deprecated from the next release! Read [the backend management PR](https://github.com/mudler/LocalAI/pull/5607).
- May 2025: [Audio input](https://github.com/mudler/LocalAI/pull/5466) and [Reranking](https://github.com/mudler/LocalAI/pull/5396) in llama.cpp backend, [Realtime API](https://github.com/mudler/LocalAI/pull/5392), Support to Gemma, SmollVLM, and more multimodal models (available in the gallery). - May 2025: [Audio input](https://github.com/mudler/LocalAI/pull/5466) and [Reranking](https://github.com/mudler/LocalAI/pull/5396) in llama.cpp backend, [Realtime API](https://github.com/mudler/LocalAI/pull/5392), Support to Gemma, SmollVLM, and more multimodal models (available in the gallery).
- May 2025: Important: image name changes [See release](https://github.com/mudler/LocalAI/releases/tag/v2.29.0) - May 2025: Important: image name changes [See release](https://github.com/mudler/LocalAI/releases/tag/v2.29.0)
@@ -259,70 +225,12 @@ Roadmap items: [List of issues](https://github.com/mudler/LocalAI/issues?q=is%3A
- ✍️ [Constrained grammars](https://localai.io/features/constrained_grammars/) - ✍️ [Constrained grammars](https://localai.io/features/constrained_grammars/)
- 🖼️ [Download Models directly from Huggingface ](https://localai.io/models/) - 🖼️ [Download Models directly from Huggingface ](https://localai.io/models/)
- 🥽 [Vision API](https://localai.io/features/gpt-vision/) - 🥽 [Vision API](https://localai.io/features/gpt-vision/)
- 🔍 [Object Detection](https://localai.io/features/object-detection/)
- 📈 [Reranker API](https://localai.io/features/reranker/) - 📈 [Reranker API](https://localai.io/features/reranker/)
- 🆕🖧 [P2P Inferencing](https://localai.io/features/distribute/) - 🆕🖧 [P2P Inferencing](https://localai.io/features/distribute/)
- 🆕🔌 [Model Context Protocol (MCP)](https://localai.io/docs/features/mcp/) - Agentic capabilities with external tools and [LocalAGI's Agentic capabilities](https://github.com/mudler/LocalAGI) - [Agentic capabilities](https://github.com/mudler/LocalAGI)
- 🔊 Voice activity detection (Silero-VAD support) - 🔊 Voice activity detection (Silero-VAD support)
- 🌍 Integrated WebUI! - 🌍 Integrated WebUI!
## 🧩 Supported Backends & Acceleration
LocalAI supports a comprehensive range of AI backends with multiple acceleration options:
### Text Generation & Language Models
| Backend | Description | Acceleration Support |
|---------|-------------|---------------------|
| **llama.cpp** | LLM inference in C/C++ | CUDA 12/13, ROCm, Intel SYCL, Vulkan, Metal, CPU |
| **vLLM** | Fast LLM inference with PagedAttention | CUDA 12/13, ROCm, Intel |
| **transformers** | HuggingFace transformers framework | CUDA 12/13, ROCm, Intel, CPU |
| **exllama2** | GPTQ inference library | CUDA 12/13 |
| **MLX** | Apple Silicon LLM inference | Metal (M1/M2/M3+) |
| **MLX-VLM** | Apple Silicon Vision-Language Models | Metal (M1/M2/M3+) |
### Audio & Speech Processing
| Backend | Description | Acceleration Support |
|---------|-------------|---------------------|
| **whisper.cpp** | OpenAI Whisper in C/C++ | CUDA 12/13, ROCm, Intel SYCL, Vulkan, CPU |
| **faster-whisper** | Fast Whisper with CTranslate2 | CUDA 12/13, ROCm, Intel, CPU |
| **bark** | Text-to-audio generation | CUDA 12/13, ROCm, Intel |
| **bark-cpp** | C++ implementation of Bark | CUDA, Metal, CPU |
| **coqui** | Advanced TTS with 1100+ languages | CUDA 12/13, ROCm, Intel, CPU |
| **kokoro** | Lightweight TTS model | CUDA 12/13, ROCm, Intel, CPU |
| **chatterbox** | Production-grade TTS | CUDA 12/13, CPU |
| **piper** | Fast neural TTS system | CPU |
| **kitten-tts** | Kitten TTS models | CPU |
| **silero-vad** | Voice Activity Detection | CPU |
| **neutts** | Text-to-speech with voice cloning | CUDA 12/13, ROCm, CPU |
| **vibevoice** | Real-time TTS with voice cloning | CUDA 12/13, ROCm, Intel, CPU |
### Image & Video Generation
| Backend | Description | Acceleration Support |
|---------|-------------|---------------------|
| **stablediffusion.cpp** | Stable Diffusion in C/C++ | CUDA 12/13, Intel SYCL, Vulkan, CPU |
| **diffusers** | HuggingFace diffusion models | CUDA 12/13, ROCm, Intel, Metal, CPU |
### Specialized AI Tasks
| Backend | Description | Acceleration Support |
|---------|-------------|---------------------|
| **rfdetr** | Real-time object detection | CUDA 12/13, Intel, CPU |
| **rerankers** | Document reranking API | CUDA 12/13, ROCm, Intel, CPU |
| **local-store** | Vector database | CPU |
| **huggingface** | HuggingFace API integration | API-based |
### Hardware Acceleration Matrix
| Acceleration Type | Supported Backends | Hardware Support |
|-------------------|-------------------|------------------|
| **NVIDIA CUDA 12** | All CUDA-compatible backends | Nvidia hardware |
| **NVIDIA CUDA 13** | All CUDA-compatible backends | Nvidia hardware |
| **AMD ROCm** | llama.cpp, whisper, vllm, transformers, diffusers, rerankers, coqui, kokoro, bark, neutts, vibevoice | AMD Graphics |
| **Intel oneAPI** | llama.cpp, whisper, stablediffusion, vllm, transformers, diffusers, rfdetr, rerankers, exllama2, coqui, kokoro, bark, vibevoice | Intel Arc, Intel iGPUs |
| **Apple Metal** | llama.cpp, whisper, diffusers, MLX, MLX-VLM, bark-cpp | Apple M1/M2/M3+ |
| **Vulkan** | llama.cpp, whisper, stablediffusion | Cross-platform GPUs |
| **NVIDIA Jetson (CUDA 12)** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr | ARM64 embedded AI (AGX Orin, etc.) |
| **NVIDIA Jetson (CUDA 13)** | llama.cpp, whisper, stablediffusion, diffusers, rfdetr | ARM64 embedded AI (DGX Spark) |
| **CPU Optimized** | All backends | AVX/AVX2/AVX512, quantization support |
### 🔗 Community and integrations ### 🔗 Community and integrations
@@ -334,18 +242,9 @@ WebUIs:
- https://github.com/go-skynet/LocalAI-frontend - https://github.com/go-skynet/LocalAI-frontend
- QA-Pilot(An interactive chat project that leverages LocalAI LLMs for rapid understanding and navigation of GitHub code repository) https://github.com/reid41/QA-Pilot - QA-Pilot(An interactive chat project that leverages LocalAI LLMs for rapid understanding and navigation of GitHub code repository) https://github.com/reid41/QA-Pilot
Agentic Libraries:
- https://github.com/mudler/cogito
MCPs:
- https://github.com/mudler/MCPs
Model galleries Model galleries
- https://github.com/go-skynet/model-gallery - https://github.com/go-skynet/model-gallery
Voice:
- https://github.com/richiejp/VoxInput
Other: Other:
- Helm chart https://github.com/go-skynet/helm-charts - Helm chart https://github.com/go-skynet/helm-charts
- VSCode extension https://github.com/badgooooor/localai-vscode-plugin - VSCode extension https://github.com/badgooooor/localai-vscode-plugin
@@ -414,10 +313,6 @@ A huge thank you to our generous sponsors who support this project covering CI e
</a> </a>
</p> </p>
### Individual sponsors
A special thanks to individual sponsors that contributed to the project, a full list is in [Github](https://github.com/sponsors/mudler) and [buymeacoffee](https://buymeacoffee.com/mudler), a special shout out goes to [drikster80](https://github.com/drikster80) for being generous. Thank you everyone!
## 🌟 Star history ## 🌟 Star history
[![LocalAI Star history Chart](https://api.star-history.com/svg?repos=go-skynet/LocalAI&type=Date)](https://star-history.com/#go-skynet/LocalAI&Date) [![LocalAI Star history Chart](https://api.star-history.com/svg?repos=go-skynet/LocalAI&type=Date)](https://star-history.com/#go-skynet/LocalAI&Date)

View File

@@ -2,10 +2,10 @@ context_size: 4096
f16: true f16: true
backend: llama-cpp backend: llama-cpp
mmap: true mmap: true
mmproj: minicpm-v-4_5-mmproj-f16.gguf mmproj: minicpm-v-2_6-mmproj-f16.gguf
name: gpt-4o name: gpt-4o
parameters: parameters:
model: minicpm-v-4_5-Q4_K_M.gguf model: minicpm-v-2_6-Q4_K_M.gguf
stopwords: stopwords:
- <|im_end|> - <|im_end|>
- <dummy32000> - <dummy32000>
@@ -42,9 +42,9 @@ template:
<|im_start|>assistant <|im_start|>assistant
download_files: download_files:
- filename: minicpm-v-4_5-Q4_K_M.gguf - filename: minicpm-v-2_6-Q4_K_M.gguf
sha256: c1c3c33100b15b4caf7319acce4e23c0eb0ce1cbd12f70e8d24f05aa67b7512f sha256: 3a4078d53b46f22989adbf998ce5a3fd090b6541f112d7e936eb4204a04100b1
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/ggml-model-Q4_K_M.gguf uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/ggml-model-Q4_K_M.gguf
- filename: minicpm-v-4_5-mmproj-f16.gguf - filename: minicpm-v-2_6-mmproj-f16.gguf
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/mmproj-model-f16.gguf uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/mmproj-model-f16.gguf
sha256: 7a7225a32e8d453aaa3d22d8c579b5bf833c253f784cdb05c99c9a76fd616df8 sha256: 4485f68a0f1aa404c391e788ea88ea653c100d8e98fe572698f701e5809711fd

View File

@@ -2,10 +2,10 @@ context_size: 4096
backend: llama-cpp backend: llama-cpp
f16: true f16: true
mmap: true mmap: true
mmproj: minicpm-v-4_5-mmproj-f16.gguf mmproj: minicpm-v-2_6-mmproj-f16.gguf
name: gpt-4o name: gpt-4o
parameters: parameters:
model: minicpm-v-4_5-Q4_K_M.gguf model: minicpm-v-2_6-Q4_K_M.gguf
stopwords: stopwords:
- <|im_end|> - <|im_end|>
- <dummy32000> - <dummy32000>
@@ -42,9 +42,9 @@ template:
<|im_start|>assistant <|im_start|>assistant
download_files: download_files:
- filename: minicpm-v-4_5-Q4_K_M.gguf - filename: minicpm-v-2_6-Q4_K_M.gguf
sha256: c1c3c33100b15b4caf7319acce4e23c0eb0ce1cbd12f70e8d24f05aa67b7512f sha256: 3a4078d53b46f22989adbf998ce5a3fd090b6541f112d7e936eb4204a04100b1
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/ggml-model-Q4_K_M.gguf uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/ggml-model-Q4_K_M.gguf
- filename: minicpm-v-4_5-mmproj-f16.gguf - filename: minicpm-v-2_6-mmproj-f16.gguf
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/mmproj-model-f16.gguf uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/mmproj-model-f16.gguf
sha256: 7a7225a32e8d453aaa3d22d8c579b5bf833c253f784cdb05c99c9a76fd616df8 sha256: 4485f68a0f1aa404c391e788ea88ea653c100d8e98fe572698f701e5809711fd

View File

@@ -2,10 +2,10 @@ context_size: 4096
backend: llama-cpp backend: llama-cpp
f16: true f16: true
mmap: true mmap: true
mmproj: minicpm-v-4_5-mmproj-f16.gguf mmproj: minicpm-v-2_6-mmproj-f16.gguf
name: gpt-4o name: gpt-4o
parameters: parameters:
model: minicpm-v-4_5-Q4_K_M.gguf model: minicpm-v-2_6-Q4_K_M.gguf
stopwords: stopwords:
- <|im_end|> - <|im_end|>
- <dummy32000> - <dummy32000>
@@ -43,9 +43,9 @@ template:
download_files: download_files:
- filename: minicpm-v-4_5-Q4_K_M.gguf - filename: minicpm-v-2_6-Q4_K_M.gguf
sha256: c1c3c33100b15b4caf7319acce4e23c0eb0ce1cbd12f70e8d24f05aa67b7512f sha256: 3a4078d53b46f22989adbf998ce5a3fd090b6541f112d7e936eb4204a04100b1
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/ggml-model-Q4_K_M.gguf uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/ggml-model-Q4_K_M.gguf
- filename: minicpm-v-4_5-mmproj-f16.gguf - filename: minicpm-v-2_6-mmproj-f16.gguf
uri: huggingface://openbmb/MiniCPM-V-4_5-gguf/mmproj-model-f16.gguf uri: huggingface://openbmb/MiniCPM-V-2_6-gguf/mmproj-model-f16.gguf
sha256: 7a7225a32e8d453aaa3d22d8c579b5bf833c253f784cdb05c99c9a76fd616df8 sha256: 4485f68a0f1aa404c391e788ea88ea653c100d8e98fe572698f701e5809711fd

142
backend/Dockerfile.go Normal file
View File

@@ -0,0 +1,142 @@
ARG BASE_IMAGE=ubuntu:22.04
FROM ${BASE_IMAGE} AS builder
ARG BACKEND=rerankers
ARG BUILD_TYPE
ENV BUILD_TYPE=${BUILD_TYPE}
ARG CUDA_MAJOR_VERSION
ARG CUDA_MINOR_VERSION
ARG SKIP_DRIVERS=false
ENV CUDA_MAJOR_VERSION=${CUDA_MAJOR_VERSION}
ENV CUDA_MINOR_VERSION=${CUDA_MINOR_VERSION}
ENV DEBIAN_FRONTEND=noninteractive
ARG TARGETARCH
ARG TARGETVARIANT
ARG GO_VERSION=1.22.6
RUN apt-get update && \
apt-get install -y --no-install-recommends \
build-essential \
git ccache \
ca-certificates \
make cmake \
curl unzip \
libssl-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Cuda
ENV PATH=/usr/local/cuda/bin:${PATH}
# HipBLAS requirements
ENV PATH=/opt/rocm/bin:${PATH}
# Vulkan requirements
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "vulkan" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
apt-get update && \
apt-get install -y --no-install-recommends \
software-properties-common pciutils wget gpg-agent && \
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
apt-get update && \
apt-get install -y \
vulkan-sdk && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
fi
EOT
# CuBLAS requirements
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
apt-get update && \
apt-get install -y --no-install-recommends \
software-properties-common pciutils
if [ "amd64" = "$TARGETARCH" ]; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb
fi
if [ "arm64" = "$TARGETARCH" ]; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/arm64/cuda-keyring_1.1-1_all.deb
fi
dpkg -i cuda-keyring_1.1-1_all.deb && \
rm -f cuda-keyring_1.1-1_all.deb && \
apt-get update && \
apt-get install -y --no-install-recommends \
cuda-nvcc-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcufft-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcurand-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
fi
EOT
# If we are building with clblas support, we need the libraries for the builds
RUN if [ "${BUILD_TYPE}" = "clblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
apt-get update && \
apt-get install -y --no-install-recommends \
libclblast-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* \
; fi
RUN if [ "${BUILD_TYPE}" = "hipblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
apt-get update && \
apt-get install -y --no-install-recommends \
hipblas-dev \
rocblas-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* && \
# I have no idea why, but the ROCM lib packages don't trigger ldconfig after they install, which results in local-ai and others not being able
# to locate the libraries. We run ldconfig ourselves to work around this packaging deficiency
ldconfig \
; fi
# Intel oneAPI requirements
RUN <<EOT bash
if [[ "${BUILD_TYPE}" == sycl* ]] && [ "${SKIP_DRIVERS}" = "false" ]; then
apt-get update && \
apt-get install -y --no-install-recommends \
intel-oneapi-runtime-libs && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
fi
EOT
# Install Go
RUN curl -L -s https://go.dev/dl/go${GO_VERSION}.linux-${TARGETARCH}.tar.gz | tar -C /usr/local -xz
ENV PATH=$PATH:/root/go/bin:/usr/local/go/bin:/usr/local/bin
# Install grpc compilers
RUN go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2 && \
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
RUN echo "TARGETARCH: $TARGETARCH"
# We need protoc installed, and the version in 22.04 is too old. We will create one as part installing the GRPC build below
# but that will also being in a newer version of absl which stablediffusion cannot compile with. This version of protoc is only
# here so that we can generate the grpc code for the stablediffusion build
RUN <<EOT bash
if [ "amd64" = "$TARGETARCH" ]; then
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v27.1/protoc-27.1-linux-x86_64.zip -o protoc.zip && \
unzip -j -d /usr/local/bin protoc.zip bin/protoc && \
rm protoc.zip
fi
if [ "arm64" = "$TARGETARCH" ]; then
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v27.1/protoc-27.1-linux-aarch_64.zip -o protoc.zip && \
unzip -j -d /usr/local/bin protoc.zip bin/protoc && \
rm protoc.zip
fi
EOT
COPY . /LocalAI
RUN cd /LocalAI && make protogen-go && make -C /LocalAI/backend/go/${BACKEND} build
FROM scratch
ARG BACKEND=rerankers
COPY --from=builder /LocalAI/backend/go/${BACKEND}/package/. ./

View File

@@ -1,197 +0,0 @@
ARG BASE_IMAGE=ubuntu:24.04
FROM ${BASE_IMAGE} AS builder
ARG BACKEND=rerankers
ARG BUILD_TYPE
ENV BUILD_TYPE=${BUILD_TYPE}
ARG CUDA_MAJOR_VERSION
ARG CUDA_MINOR_VERSION
ARG SKIP_DRIVERS=false
ENV CUDA_MAJOR_VERSION=${CUDA_MAJOR_VERSION}
ENV CUDA_MINOR_VERSION=${CUDA_MINOR_VERSION}
ENV DEBIAN_FRONTEND=noninteractive
ARG TARGETARCH
ARG TARGETVARIANT
ARG GO_VERSION=1.25.4
ARG UBUNTU_VERSION=2404
RUN apt-get update && \
apt-get install -y --no-install-recommends \
build-essential \
git ccache \
ca-certificates \
make cmake wget \
curl unzip \
libssl-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Cuda
ENV PATH=/usr/local/cuda/bin:${PATH}
# HipBLAS requirements
ENV PATH=/opt/rocm/bin:${PATH}
# Vulkan requirements
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "vulkan" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
apt-get update && \
apt-get install -y --no-install-recommends \
software-properties-common pciutils wget gpg-agent && \
apt-get install -y libglm-dev cmake libxcb-dri3-0 libxcb-present0 libpciaccess0 \
libpng-dev libxcb-keysyms1-dev libxcb-dri3-dev libx11-dev g++ gcc \
libwayland-dev libxrandr-dev libxcb-randr0-dev libxcb-ewmh-dev \
git python-is-python3 bison libx11-xcb-dev liblz4-dev libzstd-dev \
ocaml-core ninja-build pkg-config libxml2-dev wayland-protocols python3-jsonschema \
clang-format qtbase5-dev qt6-base-dev libxcb-glx0-dev sudo xz-utils
if [ "amd64" = "$TARGETARCH" ] && [ "${VULKAN_FROM_SOURCE}" = "true" ]; then
wget "https://sdk.lunarg.com/sdk/download/1.4.328.1/linux/vulkansdk-linux-x86_64-1.4.328.1.tar.xz" && \
tar -xf vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
rm vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
mkdir -p /opt/vulkan-sdk && \
mv 1.4.328.1 /opt/vulkan-sdk/ && \
cd /opt/vulkan-sdk/1.4.328.1 && \
./vulkansdk --no-deps --maxjobs \
vulkan-loader \
vulkan-validationlayers \
vulkan-extensionlayer \
vulkan-tools \
shaderc && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/bin/* /usr/bin/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/lib/* /usr/lib/x86_64-linux-gnu/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/include/* /usr/include/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/share/* /usr/share/ && \
rm -rf /opt/vulkan-sdk
elif [ "amd64" = "${TARGETARCH}}" ]; then
wget -qO- https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo tee /etc/apt/trusted.gpg.d/lunarg.asc && \
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-noble.list http://packages.lunarg.com/vulkan/lunarg-vulkan-noble.list && \
apt-get update && \
apt-get install -y vulkan-sdk
fi
if [ "arm64" = "$TARGETARCH" ]; then
mkdir vulkan && cd vulkan && \
curl -L -o vulkan-sdk.tar.xz https://github.com/mudler/vulkan-sdk-arm/releases/download/1.4.335.0/vulkansdk-ubuntu-24.04-arm-1.4.335.0.tar.xz && \
tar -xvf vulkan-sdk.tar.xz && \
rm vulkan-sdk.tar.xz && \
cd 1.4.335.0 && \
cp -rfv aarch64/bin/* /usr/bin/ && \
cp -rfv aarch64/lib/* /usr/lib/aarch64-linux-gnu/ && \
cp -rfv aarch64/include/* /usr/include/ && \
cp -rfv aarch64/share/* /usr/share/ && \
cd ../.. && \
rm -rf vulkan
fi
ldconfig && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
fi
EOT
# CuBLAS requirements
RUN <<EOT bash
if ( [ "${BUILD_TYPE}" = "cublas" ] || [ "${BUILD_TYPE}" = "l4t" ] ) && [ "${SKIP_DRIVERS}" = "false" ]; then
apt-get update && \
apt-get install -y --no-install-recommends \
software-properties-common pciutils
if [ "amd64" = "$TARGETARCH" ]; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/x86_64/cuda-keyring_1.1-1_all.deb
fi
if [ "arm64" = "$TARGETARCH" ]; then
if [ "${CUDA_MAJOR_VERSION}" = "13" ]; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/sbsa/cuda-keyring_1.1-1_all.deb
else
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/arm64/cuda-keyring_1.1-1_all.deb
fi
fi
dpkg -i cuda-keyring_1.1-1_all.deb && \
rm -f cuda-keyring_1.1-1_all.deb && \
apt-get update && \
apt-get install -y --no-install-recommends \
cuda-nvcc-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcufft-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcurand-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION}
if [ "${CUDA_MAJOR_VERSION}" = "13" ] && [ "arm64" = "$TARGETARCH" ]; then
apt-get install -y --no-install-recommends \
libcufile-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libcudnn9-cuda-${CUDA_MAJOR_VERSION} cuda-cupti-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libnvjitlink-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION}
fi
apt-get clean && \
rm -rf /var/lib/apt/lists/*
fi
EOT
# https://github.com/NVIDIA/Isaac-GR00T/issues/343
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${TARGETARCH}" = "arm64" ]; then
wget https://developer.download.nvidia.com/compute/cudss/0.6.0/local_installers/cudss-local-tegra-repo-ubuntu${UBUNTU_VERSION}-0.6.0_0.6.0-1_arm64.deb && \
dpkg -i cudss-local-tegra-repo-ubuntu${UBUNTU_VERSION}-0.6.0_0.6.0-1_arm64.deb && \
cp /var/cudss-local-tegra-repo-ubuntu${UBUNTU_VERSION}-0.6.0/cudss-*-keyring.gpg /usr/share/keyrings/ && \
apt-get update && apt-get -y install cudss cudss-cuda-${CUDA_MAJOR_VERSION} && \
wget https://developer.download.nvidia.com/compute/nvpl/25.5/local_installers/nvpl-local-repo-ubuntu${UBUNTU_VERSION}-25.5_1.0-1_arm64.deb && \
dpkg -i nvpl-local-repo-ubuntu${UBUNTU_VERSION}-25.5_1.0-1_arm64.deb && \
cp /var/nvpl-local-repo-ubuntu${UBUNTU_VERSION}-25.5/nvpl-*-keyring.gpg /usr/share/keyrings/ && \
apt-get update && apt-get install -y nvpl
fi
EOT
# If we are building with clblas support, we need the libraries for the builds
RUN if [ "${BUILD_TYPE}" = "clblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
apt-get update && \
apt-get install -y --no-install-recommends \
libclblast-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* \
; fi
RUN if [ "${BUILD_TYPE}" = "hipblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
apt-get update && \
apt-get install -y --no-install-recommends \
hipblas-dev \
rocblas-dev && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* && \
# I have no idea why, but the ROCM lib packages don't trigger ldconfig after they install, which results in local-ai and others not being able
# to locate the libraries. We run ldconfig ourselves to work around this packaging deficiency
ldconfig \
; fi
# Install Go
RUN curl -L -s https://go.dev/dl/go${GO_VERSION}.linux-${TARGETARCH}.tar.gz | tar -C /usr/local -xz
ENV PATH=$PATH:/root/go/bin:/usr/local/go/bin:/usr/local/bin
# Install grpc compilers
RUN go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2 && \
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@1958fcbe2ca8bd93af633f11e97d44e567e945af
RUN echo "TARGETARCH: $TARGETARCH"
# We need protoc installed, and the version in 22.04 is too old. We will create one as part installing the GRPC build below
# but that will also being in a newer version of absl which stablediffusion cannot compile with. This version of protoc is only
# here so that we can generate the grpc code for the stablediffusion build
RUN <<EOT bash
if [ "amd64" = "$TARGETARCH" ]; then
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v27.1/protoc-27.1-linux-x86_64.zip -o protoc.zip && \
unzip -j -d /usr/local/bin protoc.zip bin/protoc && \
rm protoc.zip
fi
if [ "arm64" = "$TARGETARCH" ]; then
curl -L -s https://github.com/protocolbuffers/protobuf/releases/download/v27.1/protoc-27.1-linux-aarch_64.zip -o protoc.zip && \
unzip -j -d /usr/local/bin protoc.zip bin/protoc && \
rm protoc.zip
fi
EOT
COPY . /LocalAI
RUN git config --global --add safe.directory /LocalAI
RUN cd /LocalAI && make protogen-go && make -C /LocalAI/backend/go/${BACKEND} build
FROM scratch
ARG BACKEND=rerankers
COPY --from=builder /LocalAI/backend/go/${BACKEND}/package/. ./

View File

@@ -1,4 +1,4 @@
ARG BASE_IMAGE=ubuntu:24.04 ARG BASE_IMAGE=ubuntu:22.04
ARG GRPC_BASE_IMAGE=${BASE_IMAGE} ARG GRPC_BASE_IMAGE=${BASE_IMAGE}
@@ -10,8 +10,8 @@ FROM ${GRPC_BASE_IMAGE} AS grpc
ARG GRPC_MAKEFLAGS="-j4 -Otarget" ARG GRPC_MAKEFLAGS="-j4 -Otarget"
ARG GRPC_VERSION=v1.65.0 ARG GRPC_VERSION=v1.65.0
ARG CMAKE_FROM_SOURCE=false ARG CMAKE_FROM_SOURCE=false
# CUDA Toolkit 13.x compatibility: CMake 3.31.9+ fixes toolchain detection/arch table issues ARG CMAKE_VERSION=3.26.4
ARG CMAKE_VERSION=3.31.10 ARG PROTOBUF_VERSION=v21.12
ENV MAKEFLAGS=${GRPC_MAKEFLAGS} ENV MAKEFLAGS=${GRPC_MAKEFLAGS}
@@ -21,13 +21,13 @@ RUN apt-get update && \
apt-get install -y --no-install-recommends \ apt-get install -y --no-install-recommends \
ca-certificates \ ca-certificates \
build-essential curl libssl-dev \ build-essential curl libssl-dev \
git wget && \ git && \
apt-get clean && \ apt-get clean && \
rm -rf /var/lib/apt/lists/* rm -rf /var/lib/apt/lists/*
# Install CMake (the version in 22.04 is too old) # Install CMake (the version in 22.04 is too old)
RUN <<EOT bash RUN <<EOT bash
if [ "${CMAKE_FROM_SOURCE}" = "true" ]; then if [ "${CMAKE_FROM_SOURCE}}" = "true" ]; then
curl -L -s https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz -o cmake.tar.gz && tar xvf cmake.tar.gz && cd cmake-${CMAKE_VERSION} && ./configure && make && make install curl -L -s https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz -o cmake.tar.gz && tar xvf cmake.tar.gz && cd cmake-${CMAKE_VERSION} && ./configure && make && make install
else else
apt-get update && \ apt-get update && \
@@ -50,14 +50,15 @@ RUN git clone --recurse-submodules --jobs 4 -b ${GRPC_VERSION} --depth 1 --shall
make install && \ make install && \
rm -rf /build rm -rf /build
RUN git clone --recurse-submodules --branch ${PROTOBUF_VERSION} https://github.com/protocolbuffers/protobuf.git && \
mkdir -p /build/protobuf/build && \
cd /build/protobuf/build && \
cmake -Dprotobuf_BUILD_SHARED_LIBS=ON -Dprotobuf_BUILD_TESTS=OFF .. && \
make && \
make install && \
rm -rf /build
FROM ${BASE_IMAGE} AS builder FROM ${BASE_IMAGE} AS builder
ARG CMAKE_FROM_SOURCE=false
ARG CMAKE_VERSION=3.31.10
# We can target specific CUDA ARCHITECTURES like --build-arg CUDA_DOCKER_ARCH='75;86;89;120'
ARG CUDA_DOCKER_ARCH
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
ARG CMAKE_ARGS
ENV CMAKE_ARGS=${CMAKE_ARGS}
ARG BACKEND=rerankers ARG BACKEND=rerankers
ARG BUILD_TYPE ARG BUILD_TYPE
ENV BUILD_TYPE=${BUILD_TYPE} ENV BUILD_TYPE=${BUILD_TYPE}
@@ -69,8 +70,7 @@ ENV CUDA_MINOR_VERSION=${CUDA_MINOR_VERSION}
ENV DEBIAN_FRONTEND=noninteractive ENV DEBIAN_FRONTEND=noninteractive
ARG TARGETARCH ARG TARGETARCH
ARG TARGETVARIANT ARG TARGETVARIANT
ARG GO_VERSION=1.25.4 ARG GO_VERSION=1.22.6
ARG UBUNTU_VERSION=2404
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y --no-install-recommends \ apt-get install -y --no-install-recommends \
@@ -78,9 +78,8 @@ RUN apt-get update && \
ccache git \ ccache git \
ca-certificates \ ca-certificates \
make \ make \
pkg-config libcurl4-openssl-dev \
curl unzip \ curl unzip \
libssl-dev wget && \ libssl-dev && \
apt-get clean && \ apt-get clean && \
rm -rf /var/lib/apt/lists/* rm -rf /var/lib/apt/lists/*
@@ -90,57 +89,17 @@ ENV PATH=/usr/local/cuda/bin:${PATH}
# HipBLAS requirements # HipBLAS requirements
ENV PATH=/opt/rocm/bin:${PATH} ENV PATH=/opt/rocm/bin:${PATH}
# Vulkan requirements # Vulkan requirements
RUN <<EOT bash RUN <<EOT bash
if [ "${BUILD_TYPE}" = "vulkan" ] && [ "${SKIP_DRIVERS}" = "false" ]; then if [ "${BUILD_TYPE}" = "vulkan" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
apt-get update && \ apt-get update && \
apt-get install -y --no-install-recommends \ apt-get install -y --no-install-recommends \
software-properties-common pciutils wget gpg-agent && \ software-properties-common pciutils wget gpg-agent && \
apt-get install -y libglm-dev cmake libxcb-dri3-0 libxcb-present0 libpciaccess0 \ wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
libpng-dev libxcb-keysyms1-dev libxcb-dri3-dev libx11-dev g++ gcc \ wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
libwayland-dev libxrandr-dev libxcb-randr0-dev libxcb-ewmh-dev \ apt-get update && \
git python-is-python3 bison libx11-xcb-dev liblz4-dev libzstd-dev \ apt-get install -y \
ocaml-core ninja-build pkg-config libxml2-dev wayland-protocols python3-jsonschema \ vulkan-sdk && \
clang-format qtbase5-dev qt6-base-dev libxcb-glx0-dev sudo xz-utils
if [ "amd64" = "$TARGETARCH" ] && [ "${VULKAN_FROM_SOURCE}" = "true" ]; then
wget "https://sdk.lunarg.com/sdk/download/1.4.328.1/linux/vulkansdk-linux-x86_64-1.4.328.1.tar.xz" && \
tar -xf vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
rm vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
mkdir -p /opt/vulkan-sdk && \
mv 1.4.328.1 /opt/vulkan-sdk/ && \
cd /opt/vulkan-sdk/1.4.328.1 && \
./vulkansdk --no-deps --maxjobs \
vulkan-loader \
vulkan-validationlayers \
vulkan-extensionlayer \
vulkan-tools \
shaderc && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/bin/* /usr/bin/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/lib/* /usr/lib/x86_64-linux-gnu/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/include/* /usr/include/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/share/* /usr/share/ && \
rm -rf /opt/vulkan-sdk
elif [ "amd64" = "${TARGETARCH}}" ]; then
wget -qO- https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo tee /etc/apt/trusted.gpg.d/lunarg.asc && \
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-noble.list http://packages.lunarg.com/vulkan/lunarg-vulkan-noble.list && \
apt-get update && \
apt-get install -y vulkan-sdk
fi
if [ "arm64" = "$TARGETARCH" ]; then
mkdir vulkan && cd vulkan && \
curl -L -o vulkan-sdk.tar.xz https://github.com/mudler/vulkan-sdk-arm/releases/download/1.4.335.0/vulkansdk-ubuntu-24.04-arm-1.4.335.0.tar.xz && \
tar -xvf vulkan-sdk.tar.xz && \
rm vulkan-sdk.tar.xz && \
cd 1.4.335.0 && \
cp -rfv aarch64/bin/* /usr/bin/ && \
cp -rfv aarch64/lib/* /usr/lib/aarch64-linux-gnu/ && \
cp -rfv aarch64/include/* /usr/include/ && \
cp -rfv aarch64/share/* /usr/share/ && \
cd ../.. && \
rm -rf vulkan
fi
ldconfig && \
apt-get clean && \ apt-get clean && \
rm -rf /var/lib/apt/lists/* rm -rf /var/lib/apt/lists/*
fi fi
@@ -148,19 +107,15 @@ EOT
# CuBLAS requirements # CuBLAS requirements
RUN <<EOT bash RUN <<EOT bash
if ( [ "${BUILD_TYPE}" = "cublas" ] || [ "${BUILD_TYPE}" = "l4t" ] ) && [ "${SKIP_DRIVERS}" = "false" ]; then if [ "${BUILD_TYPE}" = "cublas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
apt-get update && \ apt-get update && \
apt-get install -y --no-install-recommends \ apt-get install -y --no-install-recommends \
software-properties-common pciutils software-properties-common pciutils
if [ "amd64" = "$TARGETARCH" ]; then if [ "amd64" = "$TARGETARCH" ]; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/x86_64/cuda-keyring_1.1-1_all.deb curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb
fi fi
if [ "arm64" = "$TARGETARCH" ]; then if [ "arm64" = "$TARGETARCH" ]; then
if [ "${CUDA_MAJOR_VERSION}" = "13" ]; then curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/arm64/cuda-keyring_1.1-1_all.deb
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/sbsa/cuda-keyring_1.1-1_all.deb
else
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/arm64/cuda-keyring_1.1-1_all.deb
fi
fi fi
dpkg -i cuda-keyring_1.1-1_all.deb && \ dpkg -i cuda-keyring_1.1-1_all.deb && \
rm -f cuda-keyring_1.1-1_all.deb && \ rm -f cuda-keyring_1.1-1_all.deb && \
@@ -171,31 +126,12 @@ RUN <<EOT bash
libcurand-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \ libcurand-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \ libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \ libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} && \
if [ "${CUDA_MAJOR_VERSION}" = "13" ] && [ "arm64" = "$TARGETARCH" ]; then
apt-get install -y --no-install-recommends \
libcufile-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libcudnn9-cuda-${CUDA_MAJOR_VERSION} cuda-cupti-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libnvjitlink-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION}
fi
apt-get clean && \ apt-get clean && \
rm -rf /var/lib/apt/lists/* rm -rf /var/lib/apt/lists/*
fi fi
EOT EOT
# https://github.com/NVIDIA/Isaac-GR00T/issues/343
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${TARGETARCH}" = "arm64" ]; then
wget https://developer.download.nvidia.com/compute/cudss/0.6.0/local_installers/cudss-local-tegra-repo-ubuntu${UBUNTU_VERSION}-0.6.0_0.6.0-1_arm64.deb && \
dpkg -i cudss-local-tegra-repo-ubuntu${UBUNTU_VERSION}-0.6.0_0.6.0-1_arm64.deb && \
cp /var/cudss-local-tegra-repo-ubuntu${UBUNTU_VERSION}-0.6.0/cudss-*-keyring.gpg /usr/share/keyrings/ && \
apt-get update && apt-get -y install cudss cudss-cuda-${CUDA_MAJOR_VERSION} && \
wget https://developer.download.nvidia.com/compute/nvpl/25.5/local_installers/nvpl-local-repo-ubuntu${UBUNTU_VERSION}-25.5_1.0-1_arm64.deb && \
dpkg -i nvpl-local-repo-ubuntu${UBUNTU_VERSION}-25.5_1.0-1_arm64.deb && \
cp /var/nvpl-local-repo-ubuntu${UBUNTU_VERSION}-25.5/nvpl-*-keyring.gpg /usr/share/keyrings/ && \
apt-get update && apt-get install -y nvpl
fi
EOT
# If we are building with clblas support, we need the libraries for the builds # If we are building with clblas support, we need the libraries for the builds
RUN if [ "${BUILD_TYPE}" = "clblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \ RUN if [ "${BUILD_TYPE}" = "clblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
apt-get update && \ apt-get update && \
@@ -237,7 +173,7 @@ EOT
# Install CMake (the version in 22.04 is too old) # Install CMake (the version in 22.04 is too old)
RUN <<EOT bash RUN <<EOT bash
if [ "${CMAKE_FROM_SOURCE}" = "true" ]; then if [ "${CMAKE_FROM_SOURCE}}" = "true" ]; then
curl -L -s https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz -o cmake.tar.gz && tar xvf cmake.tar.gz && cd cmake-${CMAKE_VERSION} && ./configure && make && make install curl -L -s https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.tar.gz -o cmake.tar.gz && tar xvf cmake.tar.gz && cd cmake-${CMAKE_VERSION} && ./configure && make && make install
else else
apt-get update && \ apt-get update && \
@@ -253,35 +189,12 @@ COPY --from=grpc /opt/grpc /usr/local
COPY . /LocalAI COPY . /LocalAI
RUN <<'EOT' bash RUN make -C /LocalAI/backend/cpp/llama-cpp llama-cpp
set -euxo pipefail RUN make -C /LocalAI/backend/cpp/llama-cpp llama-cpp-grpc
RUN make -C /LocalAI/backend/cpp/llama-cpp llama-cpp-rpc-server
if [[ -n "${CUDA_DOCKER_ARCH:-}" ]]; then
CUDA_ARCH_ESC="${CUDA_DOCKER_ARCH//;/\\;}"
export CMAKE_ARGS="${CMAKE_ARGS:-} -DCMAKE_CUDA_ARCHITECTURES=${CUDA_ARCH_ESC}"
echo "CMAKE_ARGS(env) = ${CMAKE_ARGS}"
rm -rf /LocalAI/backend/cpp/llama-cpp-*-build
fi
if [ "${TARGETARCH}" = "arm64" ] || [ "${BUILD_TYPE}" = "hipblas" ]; then
cd /LocalAI/backend/cpp/llama-cpp
make llama-cpp-fallback
make llama-cpp-grpc
make llama-cpp-rpc-server
else
cd /LocalAI/backend/cpp/llama-cpp
make llama-cpp-avx
make llama-cpp-avx2
make llama-cpp-avx512
make llama-cpp-fallback
make llama-cpp-grpc
make llama-cpp-rpc-server
fi
EOT
# Copy libraries using a script to handle architecture differences # Copy libraries using a script to handle architecture differences
RUN make -BC /LocalAI/backend/cpp/llama-cpp package RUN make -C /LocalAI/backend/cpp/llama-cpp package
FROM scratch FROM scratch

View File

@@ -1,4 +1,4 @@
ARG BASE_IMAGE=ubuntu:24.04 ARG BASE_IMAGE=ubuntu:22.04
FROM ${BASE_IMAGE} AS builder FROM ${BASE_IMAGE} AS builder
ARG BACKEND=rerankers ARG BACKEND=rerankers
@@ -12,7 +12,6 @@ ENV CUDA_MINOR_VERSION=${CUDA_MINOR_VERSION}
ENV DEBIAN_FRONTEND=noninteractive ENV DEBIAN_FRONTEND=noninteractive
ARG TARGETARCH ARG TARGETARCH
ARG TARGETVARIANT ARG TARGETVARIANT
ARG UBUNTU_VERSION=2404
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y --no-install-recommends \ apt-get install -y --no-install-recommends \
@@ -22,24 +21,17 @@ RUN apt-get update && \
espeak-ng \ espeak-ng \
curl \ curl \
libssl-dev \ libssl-dev \
git wget \ git \
git-lfs \ git-lfs \
unzip clang \ unzip \
upx-ucl \ upx-ucl \
curl python3-pip \ curl python3-pip \
python-is-python3 \ python-is-python3 \
python3-dev llvm \ python3-dev llvm \
python3-venv make cmake && \ python3-venv make && \
apt-get clean && \ apt-get clean && \
rm -rf /var/lib/apt/lists/* rm -rf /var/lib/apt/lists/* && \
pip install --upgrade pip
RUN <<EOT bash
if [ "${UBUNTU_VERSION}" = "2404" ]; then
pip install --break-system-packages --user --upgrade pip
else
pip install --upgrade pip
fi
EOT
# Cuda # Cuda
@@ -54,50 +46,11 @@ RUN <<EOT bash
apt-get update && \ apt-get update && \
apt-get install -y --no-install-recommends \ apt-get install -y --no-install-recommends \
software-properties-common pciutils wget gpg-agent && \ software-properties-common pciutils wget gpg-agent && \
apt-get install -y libglm-dev cmake libxcb-dri3-0 libxcb-present0 libpciaccess0 \ wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \
libpng-dev libxcb-keysyms1-dev libxcb-dri3-dev libx11-dev g++ gcc \ wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \
libwayland-dev libxrandr-dev libxcb-randr0-dev libxcb-ewmh-dev \ apt-get update && \
git python-is-python3 bison libx11-xcb-dev liblz4-dev libzstd-dev \ apt-get install -y \
ocaml-core ninja-build pkg-config libxml2-dev wayland-protocols python3-jsonschema \ vulkan-sdk && \
clang-format qtbase5-dev qt6-base-dev libxcb-glx0-dev sudo xz-utils
if [ "amd64" = "$TARGETARCH" ] && [ "${VULKAN_FROM_SOURCE}" = "true" ]; then
wget "https://sdk.lunarg.com/sdk/download/1.4.328.1/linux/vulkansdk-linux-x86_64-1.4.328.1.tar.xz" && \
tar -xf vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
rm vulkansdk-linux-x86_64-1.4.328.1.tar.xz && \
mkdir -p /opt/vulkan-sdk && \
mv 1.4.328.1 /opt/vulkan-sdk/ && \
cd /opt/vulkan-sdk/1.4.328.1 && \
./vulkansdk --no-deps --maxjobs \
vulkan-loader \
vulkan-validationlayers \
vulkan-extensionlayer \
vulkan-tools \
shaderc && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/bin/* /usr/bin/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/lib/* /usr/lib/x86_64-linux-gnu/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/include/* /usr/include/ && \
cp -rfv /opt/vulkan-sdk/1.4.328.1/x86_64/share/* /usr/share/ && \
rm -rf /opt/vulkan-sdk
elif [ "amd64" = "${TARGETARCH}}" ]; then
wget -qO- https://packages.lunarg.com/lunarg-signing-key-pub.asc | sudo tee /etc/apt/trusted.gpg.d/lunarg.asc && \
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-noble.list http://packages.lunarg.com/vulkan/lunarg-vulkan-noble.list && \
apt-get update && \
apt-get install -y vulkan-sdk
fi
if [ "arm64" = "$TARGETARCH" ]; then
mkdir vulkan && cd vulkan && \
curl -L -o vulkan-sdk.tar.xz https://github.com/mudler/vulkan-sdk-arm/releases/download/1.4.335.0/vulkansdk-ubuntu-24.04-arm-1.4.335.0.tar.xz && \
tar -xvf vulkan-sdk.tar.xz && \
rm vulkan-sdk.tar.xz && \
cd 1.4.335.0 && \
cp -rfv aarch64/bin/* /usr/bin/ && \
cp -rfv aarch64/lib/* /usr/lib/aarch64-linux-gnu/ && \
cp -rfv aarch64/include/* /usr/include/ && \
cp -rfv aarch64/share/* /usr/share/ && \
cd ../.. && \
rm -rf vulkan
fi
ldconfig && \
apt-get clean && \ apt-get clean && \
rm -rf /var/lib/apt/lists/* rm -rf /var/lib/apt/lists/*
fi fi
@@ -105,19 +58,15 @@ EOT
# CuBLAS requirements # CuBLAS requirements
RUN <<EOT bash RUN <<EOT bash
if ( [ "${BUILD_TYPE}" = "cublas" ] || [ "${BUILD_TYPE}" = "l4t" ] ) && [ "${SKIP_DRIVERS}" = "false" ]; then if [ "${BUILD_TYPE}" = "cublas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
apt-get update && \ apt-get update && \
apt-get install -y --no-install-recommends \ apt-get install -y --no-install-recommends \
software-properties-common pciutils software-properties-common pciutils
if [ "amd64" = "$TARGETARCH" ]; then if [ "amd64" = "$TARGETARCH" ]; then
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/x86_64/cuda-keyring_1.1-1_all.deb curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.1-1_all.deb
fi fi
if [ "arm64" = "$TARGETARCH" ]; then if [ "arm64" = "$TARGETARCH" ]; then
if [ "${CUDA_MAJOR_VERSION}" = "13" ]; then curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/arm64/cuda-keyring_1.1-1_all.deb
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/sbsa/cuda-keyring_1.1-1_all.deb
else
curl -O https://developer.download.nvidia.com/compute/cuda/repos/ubuntu${UBUNTU_VERSION}/arm64/cuda-keyring_1.1-1_all.deb
fi
fi fi
dpkg -i cuda-keyring_1.1-1_all.deb && \ dpkg -i cuda-keyring_1.1-1_all.deb && \
rm -f cuda-keyring_1.1-1_all.deb && \ rm -f cuda-keyring_1.1-1_all.deb && \
@@ -128,31 +77,12 @@ RUN <<EOT bash
libcurand-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \ libcurand-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \ libcublas-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \ libcusparse-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} \
libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libcusolver-dev-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} && \
if [ "${CUDA_MAJOR_VERSION}" = "13" ] && [ "arm64" = "$TARGETARCH" ]; then
apt-get install -y --no-install-recommends \
libcufile-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libcudnn9-cuda-${CUDA_MAJOR_VERSION} cuda-cupti-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION} libnvjitlink-${CUDA_MAJOR_VERSION}-${CUDA_MINOR_VERSION}
fi
apt-get clean && \ apt-get clean && \
rm -rf /var/lib/apt/lists/* rm -rf /var/lib/apt/lists/*
fi fi
EOT EOT
# https://github.com/NVIDIA/Isaac-GR00T/issues/343
RUN <<EOT bash
if [ "${BUILD_TYPE}" = "cublas" ] && [ "${TARGETARCH}" = "arm64" ]; then
wget https://developer.download.nvidia.com/compute/cudss/0.6.0/local_installers/cudss-local-tegra-repo-ubuntu${UBUNTU_VERSION}-0.6.0_0.6.0-1_arm64.deb && \
dpkg -i cudss-local-tegra-repo-ubuntu${UBUNTU_VERSION}-0.6.0_0.6.0-1_arm64.deb && \
cp /var/cudss-local-tegra-repo-ubuntu${UBUNTU_VERSION}-0.6.0/cudss-*-keyring.gpg /usr/share/keyrings/ && \
apt-get update && apt-get -y install cudss cudss-cuda-${CUDA_MAJOR_VERSION} && \
wget https://developer.download.nvidia.com/compute/nvpl/25.5/local_installers/nvpl-local-repo-ubuntu${UBUNTU_VERSION}-25.5_1.0-1_arm64.deb && \
dpkg -i nvpl-local-repo-ubuntu${UBUNTU_VERSION}-25.5_1.0-1_arm64.deb && \
cp /var/nvpl-local-repo-ubuntu${UBUNTU_VERSION}-25.5/nvpl-*-keyring.gpg /usr/share/keyrings/ && \
apt-get update && apt-get install -y nvpl
fi
EOT
# If we are building with clblas support, we need the libraries for the builds # If we are building with clblas support, we need the libraries for the builds
RUN if [ "${BUILD_TYPE}" = "clblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \ RUN if [ "${BUILD_TYPE}" = "clblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then \
apt-get update && \ apt-get update && \
@@ -173,39 +103,20 @@ RUN if [ "${BUILD_TYPE}" = "hipblas" ] && [ "${SKIP_DRIVERS}" = "false" ]; then
# to locate the libraries. We run ldconfig ourselves to work around this packaging deficiency # to locate the libraries. We run ldconfig ourselves to work around this packaging deficiency
ldconfig \ ldconfig \
; fi ; fi
RUN if [ "${BUILD_TYPE}" = "hipblas" ]; then \
ln -s /opt/rocm-**/lib/llvm/lib/libomp.so /usr/lib/libomp.so \
; fi
# Install uv as a system package # Install uv as a system package
RUN curl -LsSf https://astral.sh/uv/install.sh | UV_INSTALL_DIR=/usr/bin sh RUN curl -LsSf https://astral.sh/uv/install.sh | UV_INSTALL_DIR=/usr/bin sh
ENV PATH="/root/.cargo/bin:${PATH}" ENV PATH="/root/.cargo/bin:${PATH}"
# Increase timeout for uv installs behind slow networks
ENV UV_HTTP_TIMEOUT=180
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
# Install grpcio-tools (the version in 22.04 is too old) # Install grpcio-tools (the version in 22.04 is too old)
RUN <<EOT bash RUN pip install --user grpcio-tools==1.71.0 grpcio==1.71.0
if [ "${UBUNTU_VERSION}" = "2404" ]; then
pip install --break-system-packages --user grpcio-tools==1.71.0 grpcio==1.71.0
else
pip install grpcio-tools==1.71.0 grpcio==1.71.0
fi
EOT
COPY python/${BACKEND} /${BACKEND}
COPY backend.proto /${BACKEND}/backend.proto
COPY python/common/ /${BACKEND}/common
COPY backend/python/${BACKEND} /${BACKEND} RUN cd /${BACKEND} && make
COPY backend/backend.proto /${BACKEND}/backend.proto
COPY backend/python/common/ /${BACKEND}/common
COPY scripts/build/package-gpu-libs.sh /package-gpu-libs.sh
RUN cd /${BACKEND} && PORTABLE_PYTHON=true make
# Package GPU libraries into the backend's lib directory
RUN mkdir -p /${BACKEND}/lib && \
TARGET_LIB_DIR="/${BACKEND}/lib" BUILD_TYPE="${BUILD_TYPE}" CUDA_MAJOR_VERSION="${CUDA_MAJOR_VERSION}" \
bash /package-gpu-libs.sh "/${BACKEND}/lib"
FROM scratch FROM scratch
ARG BACKEND=rerankers ARG BACKEND=rerankers

View File

@@ -1,212 +0,0 @@
# LocalAI Backend Architecture
This directory contains the core backend infrastructure for LocalAI, including the gRPC protocol definition, multi-language Dockerfiles, and language-specific backend implementations.
## Overview
LocalAI uses a unified gRPC-based architecture that allows different programming languages to implement AI backends while maintaining consistent interfaces and capabilities. The backend system supports multiple hardware acceleration targets and provides a standardized way to integrate various AI models and frameworks.
## Architecture Components
### 1. Protocol Definition (`backend.proto`)
The `backend.proto` file defines the gRPC service interface that all backends must implement. This ensures consistency across different language implementations and provides a contract for communication between LocalAI core and backend services.
#### Core Services
- **Text Generation**: `Predict`, `PredictStream` for LLM inference
- **Embeddings**: `Embedding` for text vectorization
- **Image Generation**: `GenerateImage` for stable diffusion and image models
- **Audio Processing**: `AudioTranscription`, `TTS`, `SoundGeneration`
- **Video Generation**: `GenerateVideo` for video synthesis
- **Object Detection**: `Detect` for computer vision tasks
- **Vector Storage**: `StoresSet`, `StoresGet`, `StoresFind` for RAG operations
- **Reranking**: `Rerank` for document relevance scoring
- **Voice Activity Detection**: `VAD` for audio segmentation
#### Key Message Types
- **`PredictOptions`**: Comprehensive configuration for text generation
- **`ModelOptions`**: Model loading and configuration parameters
- **`Result`**: Standardized response format
- **`StatusResponse`**: Backend health and memory usage information
### 2. Multi-Language Dockerfiles
The backend system provides language-specific Dockerfiles that handle the build environment and dependencies for different programming languages:
- `Dockerfile.python`
- `Dockerfile.golang`
- `Dockerfile.llama-cpp`
### 3. Language-Specific Implementations
#### Python Backends (`python/`)
- **transformers**: Hugging Face Transformers framework
- **vllm**: High-performance LLM inference
- **mlx**: Apple Silicon optimization
- **diffusers**: Stable Diffusion models
- **Audio**: bark, coqui, faster-whisper, kitten-tts
- **Vision**: mlx-vlm, rfdetr
- **Specialized**: rerankers, chatterbox, kokoro
#### Go Backends (`go/`)
- **whisper**: OpenAI Whisper speech recognition in Go with GGML cpp backend (whisper.cpp)
- **stablediffusion-ggml**: Stable Diffusion in Go with GGML Cpp backend
- **huggingface**: Hugging Face model integration
- **piper**: Text-to-speech synthesis Golang with C bindings using rhaspy/piper
- **bark-cpp**: Bark TTS models Golang with Cpp bindings
- **local-store**: Vector storage backend
#### C++ Backends (`cpp/`)
- **llama-cpp**: Llama.cpp integration
- **grpc**: GRPC utilities and helpers
## Hardware Acceleration Support
### CUDA (NVIDIA)
- **Versions**: CUDA 12.x, 13.x
- **Features**: cuBLAS, cuDNN, TensorRT optimization
- **Targets**: x86_64, ARM64 (Jetson)
### ROCm (AMD)
- **Features**: HIP, rocBLAS, MIOpen
- **Targets**: AMD GPUs with ROCm support
### Intel
- **Features**: oneAPI, Intel Extension for PyTorch
- **Targets**: Intel GPUs, XPUs, CPUs
### Vulkan
- **Features**: Cross-platform GPU acceleration
- **Targets**: Windows, Linux, Android, macOS
### Apple Silicon
- **Features**: MLX framework, Metal Performance Shaders
- **Targets**: M1/M2/M3 Macs
## Backend Registry (`index.yaml`)
The `index.yaml` file serves as a central registry for all available backends, providing:
- **Metadata**: Name, description, license, icons
- **Capabilities**: Hardware targets and optimization profiles
- **Tags**: Categorization for discovery
- **URLs**: Source code and documentation links
## Building Backends
### Prerequisites
- Docker with multi-architecture support
- Appropriate hardware drivers (CUDA, ROCm, etc.)
- Build tools (make, cmake, compilers)
### Build Commands
Example of build commands with Docker
```bash
# Build Python backend
docker build -f backend/Dockerfile.python \
--build-arg BACKEND=transformers \
--build-arg BUILD_TYPE=cublas12 \
--build-arg CUDA_MAJOR_VERSION=12 \
--build-arg CUDA_MINOR_VERSION=0 \
-t localai-backend-transformers .
# Build Go backend
docker build -f backend/Dockerfile.golang \
--build-arg BACKEND=whisper \
--build-arg BUILD_TYPE=cpu \
-t localai-backend-whisper .
# Build C++ backend
docker build -f backend/Dockerfile.llama-cpp \
--build-arg BACKEND=llama-cpp \
--build-arg BUILD_TYPE=cublas12 \
-t localai-backend-llama-cpp .
```
For ARM64/Mac builds, docker can't be used, and the makefile in the respective backend has to be used.
### Build Types
- **`cpu`**: CPU-only optimization
- **`cublas12`**, **`cublas13`**: CUDA 12.x, 13.x with cuBLAS
- **`hipblas`**: ROCm with rocBLAS
- **`intel`**: Intel oneAPI optimization
- **`vulkan`**: Vulkan-based acceleration
- **`metal`**: Apple Metal optimization
## Backend Development
### Creating a New Backend
1. **Choose Language**: Select Python, Go, or C++ based on requirements
2. **Implement Interface**: Implement the gRPC service defined in `backend.proto`
3. **Add Dependencies**: Create appropriate requirements files
4. **Configure Build**: Set up Dockerfile and build scripts
5. **Register Backend**: Add entry to `index.yaml`
6. **Test Integration**: Verify gRPC communication and functionality
### Backend Structure
```
backend-name/
├── backend.py/go/cpp # Main implementation
├── requirements.txt # Dependencies
├── Dockerfile # Build configuration
├── install.sh # Installation script
├── run.sh # Execution script
├── test.sh # Test script
└── README.md # Backend documentation
```
### Required gRPC Methods
At minimum, backends must implement:
- `Health()` - Service health check
- `LoadModel()` - Model loading and initialization
- `Predict()` - Main inference endpoint
- `Status()` - Backend status and metrics
## Integration with LocalAI Core
Backends communicate with LocalAI core through gRPC:
1. **Service Discovery**: Core discovers available backends
2. **Model Loading**: Core requests model loading via `LoadModel`
3. **Inference**: Core sends requests via `Predict` or specialized endpoints
4. **Streaming**: Core handles streaming responses for real-time generation
5. **Monitoring**: Core tracks backend health and performance
## Performance Optimization
### Memory Management
- **Model Caching**: Efficient model loading and caching
- **Batch Processing**: Optimize for multiple concurrent requests
- **Memory Pinning**: GPU memory optimization for CUDA/ROCm
### Hardware Utilization
- **Multi-GPU**: Support for tensor parallelism
- **Mixed Precision**: FP16/BF16 for memory efficiency
- **Kernel Fusion**: Optimized CUDA/ROCm kernels
## Troubleshooting
### Common Issues
1. **GRPC Connection**: Verify backend service is running and accessible
2. **Model Loading**: Check model paths and dependencies
3. **Hardware Detection**: Ensure appropriate drivers and libraries
4. **Memory Issues**: Monitor GPU memory usage and model sizes
## Contributing
When contributing to the backend system:
1. **Follow Protocol**: Implement the exact gRPC interface
2. **Add Tests**: Include comprehensive test coverage
3. **Document**: Provide clear usage examples
4. **Optimize**: Consider performance and resource usage
5. **Validate**: Test across different hardware targets

View File

@@ -20,7 +20,6 @@ service Backend {
rpc SoundGeneration(SoundGenerationRequest) returns (Result) {} rpc SoundGeneration(SoundGenerationRequest) returns (Result) {}
rpc TokenizeString(PredictOptions) returns (TokenizationResponse) {} rpc TokenizeString(PredictOptions) returns (TokenizationResponse) {}
rpc Status(HealthMessage) returns (StatusResponse) {} rpc Status(HealthMessage) returns (StatusResponse) {}
rpc Detect(DetectOptions) returns (DetectResponse) {}
rpc StoresSet(StoresSetOptions) returns (Result) {} rpc StoresSet(StoresSetOptions) returns (Result) {}
rpc StoresDelete(StoresDeleteOptions) returns (Result) {} rpc StoresDelete(StoresDeleteOptions) returns (Result) {}
@@ -154,10 +153,6 @@ message PredictOptions {
repeated string Videos = 45; repeated string Videos = 45;
repeated string Audios = 46; repeated string Audios = 46;
string CorrelationId = 47; string CorrelationId = 47;
string Tools = 48; // JSON array of available tools/functions for tool calling
string ToolChoice = 49; // JSON string or object specifying tool choice behavior
int32 Logprobs = 50; // Number of top logprobs to return (maps to OpenAI logprobs parameter)
int32 TopLogprobs = 51; // Number of top logprobs to return per token (maps to OpenAI top_logprobs parameter)
} }
// The response message containing the result // The response message containing the result
@@ -168,7 +163,6 @@ message Reply {
double timing_prompt_processing = 4; double timing_prompt_processing = 4;
double timing_token_generation = 5; double timing_token_generation = 5;
bytes audio = 6; bytes audio = 6;
bytes logprobs = 7; // JSON-encoded logprobs data matching OpenAI format
} }
message GrammarTrigger { message GrammarTrigger {
@@ -247,7 +241,7 @@ message ModelOptions {
string Type = 49; string Type = 49;
string FlashAttention = 56; bool FlashAttention = 56;
bool NoKVOffload = 57; bool NoKVOffload = 57;
string ModelPath = 59; string ModelPath = 59;
@@ -281,8 +275,6 @@ message TranscriptRequest {
string language = 3; string language = 3;
uint32 threads = 4; uint32 threads = 4;
bool translate = 5; bool translate = 5;
bool diarize = 6;
string prompt = 7;
} }
message TranscriptResult { message TranscriptResult {
@@ -301,6 +293,7 @@ message TranscriptSegment {
message GenerateImageRequest { message GenerateImageRequest {
int32 height = 1; int32 height = 1;
int32 width = 2; int32 width = 2;
int32 mode = 3;
int32 step = 4; int32 step = 4;
int32 seed = 5; int32 seed = 5;
string positive_prompt = 6; string positive_prompt = 6;
@@ -311,24 +304,19 @@ message GenerateImageRequest {
// Diffusers // Diffusers
string EnableParameters = 10; string EnableParameters = 10;
int32 CLIPSkip = 11; int32 CLIPSkip = 11;
// Reference images for models that support them (e.g., Flux Kontext)
repeated string ref_images = 12;
} }
message GenerateVideoRequest { message GenerateVideoRequest {
string prompt = 1; string prompt = 1;
string negative_prompt = 2; // Negative prompt for video generation string start_image = 2; // Path or base64 encoded image for the start frame
string start_image = 3; // Path or base64 encoded image for the start frame string end_image = 3; // Path or base64 encoded image for the end frame
string end_image = 4; // Path or base64 encoded image for the end frame int32 width = 4;
int32 width = 5; int32 height = 5;
int32 height = 6; int32 num_frames = 6; // Number of frames to generate
int32 num_frames = 7; // Number of frames to generate int32 fps = 7; // Frames per second
int32 fps = 8; // Frames per second int32 seed = 8;
int32 seed = 9; float cfg_scale = 9; // Classifier-free guidance scale
float cfg_scale = 10; // Classifier-free guidance scale string dst = 10; // Output path for the generated video
int32 step = 11; // Number of inference steps
string dst = 12; // Output path for the generated video
} }
message TTSRequest { message TTSRequest {
@@ -387,26 +375,4 @@ message StatusResponse {
message Message { message Message {
string role = 1; string role = 1;
string content = 2; string content = 2;
// Optional fields for OpenAI-compatible message format
string name = 3; // Tool name (for tool messages)
string tool_call_id = 4; // Tool call ID (for tool messages)
string reasoning_content = 5; // Reasoning content (for thinking models)
string tool_calls = 6; // Tool calls as JSON string (for assistant messages with tool calls)
}
message DetectOptions {
string src = 1;
}
message Detection {
float x = 1;
float y = 2;
float width = 3;
float height = 4;
float confidence = 5;
string class_name = 6;
}
message DetectResponse {
repeated Detection Detections = 1;
} }

View File

@@ -17,6 +17,8 @@ if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
include_directories("${HOMEBREW_DEFAULT_PREFIX}/include") include_directories("${HOMEBREW_DEFAULT_PREFIX}/include")
endif() endif()
set(Protobuf_USE_STATIC_LIBS OFF)
set(gRPC_USE_STATIC_LIBS OFF)
find_package(absl CONFIG REQUIRED) find_package(absl CONFIG REQUIRED)
find_package(Protobuf CONFIG REQUIRED) find_package(Protobuf CONFIG REQUIRED)
find_package(gRPC CONFIG REQUIRED) find_package(gRPC CONFIG REQUIRED)
@@ -57,7 +59,7 @@ add_library(hw_grpc_proto
${hw_proto_srcs} ${hw_proto_srcs}
${hw_proto_hdrs} ) ${hw_proto_hdrs} )
add_executable(${TARGET} grpc-server.cpp json.hpp httplib.h) add_executable(${TARGET} grpc-server.cpp utils.hpp json.hpp httplib.h)
target_include_directories(${TARGET} PRIVATE ../llava) target_include_directories(${TARGET} PRIVATE ../llava)
target_include_directories(${TARGET} PRIVATE ${CMAKE_SOURCE_DIR}) target_include_directories(${TARGET} PRIVATE ${CMAKE_SOURCE_DIR})
@@ -70,4 +72,4 @@ target_link_libraries(${TARGET} PRIVATE common llama mtmd ${CMAKE_THREAD_LIBS_IN
target_compile_features(${TARGET} PRIVATE cxx_std_11) target_compile_features(${TARGET} PRIVATE cxx_std_11)
if(TARGET BUILD_INFO) if(TARGET BUILD_INFO)
add_dependencies(${TARGET} BUILD_INFO) add_dependencies(${TARGET} BUILD_INFO)
endif() endif()

View File

@@ -1,5 +1,5 @@
LLAMA_VERSION?=b1377188784f9aea26b8abde56d4aee8c733eec7 LLAMA_VERSION?=acd6cb1c41676f6bbb25c2a76fa5abeb1719301e
LLAMA_REPO?=https://github.com/ggerganov/llama.cpp LLAMA_REPO?=https://github.com/ggerganov/llama.cpp
CMAKE_ARGS?= CMAKE_ARGS?=
@@ -7,15 +7,13 @@ BUILD_TYPE?=
NATIVE?=false NATIVE?=false
ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh
TARGET?=--target grpc-server TARGET?=--target grpc-server
JOBS?=$(shell nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 1)
ARCH?=$(shell uname -m)
# Disable Shared libs as we are linking on static gRPC and we can't mix shared and static # Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF -DLLAMA_CURL=OFF CMAKE_ARGS+=-DBUILD_SHARED_LIBS=ON -DLLAMA_CURL=OFF -DGGML_CPU_ALL_VARIANTS=ON -DGGML_BACKEND_DL=ON
CURRENT_MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) CURRENT_MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
ifeq ($(NATIVE),false) ifeq ($(NATIVE),false)
CMAKE_ARGS+=-DGGML_NATIVE=OFF -DLLAMA_OPENSSL=OFF CMAKE_ARGS+=-DGGML_NATIVE=OFF
endif endif
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically # If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
ifeq ($(BUILD_TYPE),cublas) ifeq ($(BUILD_TYPE),cublas)
@@ -27,14 +25,16 @@ else ifeq ($(BUILD_TYPE),openblas)
# If build type is clblas (openCL) we set -DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path # If build type is clblas (openCL) we set -DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
else ifeq ($(BUILD_TYPE),clblas) else ifeq ($(BUILD_TYPE),clblas)
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++ # If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
else ifeq ($(BUILD_TYPE),hipblas) else ifeq ($(BUILD_TYPE),hipblas)
ROCM_HOME ?= /opt/rocm ROCM_HOME ?= /opt/rocm
ROCM_PATH ?= /opt/rocm ROCM_PATH ?= /opt/rocm
export CXX=$(ROCM_HOME)/llvm/bin/clang++ export CXX=$(ROCM_HOME)/llvm/bin/clang++
export CC=$(ROCM_HOME)/llvm/bin/clang export CC=$(ROCM_HOME)/llvm/bin/clang
AMDGPU_TARGETS?=gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102,gfx1200,gfx1201 # GPU_TARGETS ?= gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102
CMAKE_ARGS+=-DGGML_HIP=ON -DAMDGPU_TARGETS=$(AMDGPU_TARGETS) # AMDGPU_TARGETS ?= "$(GPU_TARGETS)"
CMAKE_ARGS+=-DGGML_HIP=ON
# CMAKE_ARGS+=-DGGML_HIP=ON -DAMDGPU_TARGETS="$(AMDGPU_TARGETS)" -DGPU_TARGETS="$(GPU_TARGETS)"
else ifeq ($(BUILD_TYPE),vulkan) else ifeq ($(BUILD_TYPE),vulkan)
CMAKE_ARGS+=-DGGML_VULKAN=1 CMAKE_ARGS+=-DGGML_VULKAN=1
else ifeq ($(OS),Darwin) else ifeq ($(OS),Darwin)
@@ -89,39 +89,18 @@ else
LLAMA_VERSION=$(LLAMA_VERSION) $(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../$(VARIANT) grpc-server LLAMA_VERSION=$(LLAMA_VERSION) $(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../$(VARIANT) grpc-server
endif endif
llama-cpp-avx2: llama.cpp llama-cpp: llama.cpp
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx2-build cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-build
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx2-build purge $(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-build purge
$(info ${GREEN}I llama-cpp build info:avx2${RESET}) $(info ${GREEN}I llama-cpp build info:${RESET})
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on" $(MAKE) VARIANT="llama-cpp-avx2-build" build-llama-cpp-grpc-server CMAKE_ARGS="$(CMAKE_ARGS)" $(MAKE) VARIANT="llama-cpp-build" build-llama-cpp-grpc-server
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx2-build/grpc-server llama-cpp-avx2 cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-build/grpc-server llama-cpp
llama-cpp-avx512: llama.cpp
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx512-build
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx512-build purge
$(info ${GREEN}I llama-cpp build info:avx512${RESET})
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=on -DGGML_FMA=on -DGGML_F16C=on" $(MAKE) VARIANT="llama-cpp-avx512-build" build-llama-cpp-grpc-server
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx512-build/grpc-server llama-cpp-avx512
llama-cpp-avx: llama.cpp
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build purge
$(info ${GREEN}I llama-cpp build info:avx${RESET})
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_BMI2=off" $(MAKE) VARIANT="llama-cpp-avx-build" build-llama-cpp-grpc-server
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build/grpc-server llama-cpp-avx
llama-cpp-fallback: llama.cpp
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build purge
$(info ${GREEN}I llama-cpp build info:fallback${RESET})
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_BMI2=off" $(MAKE) VARIANT="llama-cpp-fallback-build" build-llama-cpp-grpc-server
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build/grpc-server llama-cpp-fallback
llama-cpp-grpc: llama.cpp llama-cpp-grpc: llama.cpp
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build purge $(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build purge
$(info ${GREEN}I llama-cpp build info:grpc${RESET}) $(info ${GREEN}I llama-cpp build info:grpc${RESET})
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_RPC=ON -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_BMI2=off" TARGET="--target grpc-server --target rpc-server" $(MAKE) VARIANT="llama-cpp-grpc-build" build-llama-cpp-grpc-server CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_RPC=ON -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" TARGET="--target grpc-server --target rpc-server" $(MAKE) VARIANT="llama-cpp-grpc-build" build-llama-cpp-grpc-server
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build/grpc-server llama-cpp-grpc cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build/grpc-server llama-cpp-grpc
llama-cpp-rpc-server: llama-cpp-grpc llama-cpp-rpc-server: llama-cpp-grpc
@@ -160,8 +139,8 @@ grpc-server: llama.cpp llama.cpp/tools/grpc-server
@echo "Building grpc-server with $(BUILD_TYPE) build type and $(CMAKE_ARGS)" @echo "Building grpc-server with $(BUILD_TYPE) build type and $(CMAKE_ARGS)"
ifneq (,$(findstring sycl,$(BUILD_TYPE))) ifneq (,$(findstring sycl,$(BUILD_TYPE)))
+bash -c "source $(ONEAPI_VARS); \ +bash -c "source $(ONEAPI_VARS); \
cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release -j $(JOBS) $(TARGET)" cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release $(TARGET)"
else else
+cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release -j $(JOBS) $(TARGET) +cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release $(TARGET)
endif endif
cp llama.cpp/build/bin/grpc-server . cp llama.cpp/build/bin/grpc-server .

View File

File diff suppressed because it is too large Load Diff

View File

@@ -6,7 +6,6 @@
set -e set -e
CURDIR=$(dirname "$(realpath $0)") CURDIR=$(dirname "$(realpath $0)")
REPO_ROOT="${CURDIR}/../../.."
# Create lib directory # Create lib directory
mkdir -p $CURDIR/package/lib mkdir -p $CURDIR/package/lib
@@ -38,15 +37,6 @@ else
exit 1 exit 1
fi fi
# Package GPU libraries based on BUILD_TYPE
# The GPU library packaging script will detect BUILD_TYPE and copy appropriate GPU libraries
GPU_LIB_SCRIPT="${REPO_ROOT}/scripts/build/package-gpu-libs.sh"
if [ -f "$GPU_LIB_SCRIPT" ]; then
echo "Packaging GPU libraries for BUILD_TYPE=${BUILD_TYPE:-cpu}..."
source "$GPU_LIB_SCRIPT" "$CURDIR/package/lib"
package_gpu_libs
fi
echo "Packaging completed successfully" echo "Packaging completed successfully"
ls -liah $CURDIR/package/ ls -liah $CURDIR/package/
ls -liah $CURDIR/package/lib/ ls -liah $CURDIR/package/lib/

View File

@@ -0,0 +1,13 @@
diff --git a/tools/mtmd/clip.cpp b/tools/mtmd/clip.cpp
index 3cd0d2fa..6c5e811a 100644
--- a/tools/mtmd/clip.cpp
+++ b/tools/mtmd/clip.cpp
@@ -2608,7 +2608,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
struct ggml_tensor * patches = ggml_graph_get_tensor(gf, "patches");
int* patches_data = (int*)malloc(ggml_nbytes(patches));
for (int i = 0; i < num_patches; i++) {
- patches_data[i] = i + 1;
+ patches_data[i] = i;
}
ggml_backend_tensor_set(patches, patches_data, 0, ggml_nbytes(patches));
free(patches_data);

View File

@@ -1,24 +1,18 @@
#!/bin/bash #!/bin/bash
## Patches ## Patches
## Apply patches from the `patches` directory ## Apply patches from the `patches` directory
if [ -d "patches" ]; then for patch in $(ls patches); do
for patch in $(ls patches); do echo "Applying patch $patch"
echo "Applying patch $patch" patch -d llama.cpp/ -p1 < patches/$patch
patch -d llama.cpp/ -p1 < patches/$patch done
done
fi
set -e set -e
for file in $(ls llama.cpp/tools/server/); do
cp -rfv llama.cpp/tools/server/$file llama.cpp/tools/grpc-server/
done
cp -r CMakeLists.txt llama.cpp/tools/grpc-server/ cp -r CMakeLists.txt llama.cpp/tools/grpc-server/
cp -r grpc-server.cpp llama.cpp/tools/grpc-server/ cp -r grpc-server.cpp llama.cpp/tools/grpc-server/
cp -rfv llama.cpp/vendor/nlohmann/json.hpp llama.cpp/tools/grpc-server/ cp -rfv llama.cpp/vendor/nlohmann/json.hpp llama.cpp/tools/grpc-server/
cp -rfv llama.cpp/tools/server/utils.hpp llama.cpp/tools/grpc-server/
cp -rfv llama.cpp/vendor/cpp-httplib/httplib.h llama.cpp/tools/grpc-server/ cp -rfv llama.cpp/vendor/cpp-httplib/httplib.h llama.cpp/tools/grpc-server/
set +e set +e
@@ -29,3 +23,30 @@ else
fi fi
set -e set -e
# Now to keep maximum compatibility with the original server.cpp, we need to remove the index.html.gz.hpp and loading.html.hpp includes
# and remove the main function
# TODO: upstream this to the original server.cpp by extracting the upstream main function to a separate file
awk '
/int[ \t]+main[ \t]*\(/ { # If the line starts the main function
in_main=1; # Set a flag
open_braces=0; # Track number of open braces
}
in_main {
open_braces += gsub(/\{/, "{"); # Count opening braces
open_braces -= gsub(/\}/, "}"); # Count closing braces
if (open_braces == 0) { # If all braces are closed
in_main=0; # End skipping
}
next; # Skip lines inside main
}
!in_main # Print lines not inside main
' "llama.cpp/tools/server/server.cpp" > llama.cpp/tools/grpc-server/server.cpp
# remove index.html.gz.hpp and loading.html.hpp includes
if [[ "$OSTYPE" == "darwin"* ]]; then
# macOS
sed -i '' '/#include "index\.html\.gz\.hpp"/d; /#include "loading\.html\.hpp"/d' llama.cpp/tools/grpc-server/server.cpp
else
# Linux and others
sed -i '/#include "index\.html\.gz\.hpp"/d; /#include "loading\.html\.hpp"/d' llama.cpp/tools/grpc-server/server.cpp
fi

View File

@@ -6,34 +6,9 @@ CURDIR=$(dirname "$(realpath $0)")
cd / cd /
echo "CPU info:" BINARY=llama-cpp
grep -e "model\sname" /proc/cpuinfo | head -1
grep -e "flags" /proc/cpuinfo | head -1
BINARY=llama-cpp-fallback
if grep -q -e "\savx\s" /proc/cpuinfo ; then
echo "CPU: AVX found OK"
if [ -e $CURDIR/llama-cpp-avx ]; then
BINARY=llama-cpp-avx
fi
fi
if grep -q -e "\savx2\s" /proc/cpuinfo ; then
echo "CPU: AVX2 found OK"
if [ -e $CURDIR/llama-cpp-avx2 ]; then
BINARY=llama-cpp-avx2
fi
fi
# Check avx 512
if grep -q -e "\savx512f\s" /proc/cpuinfo ; then
echo "CPU: AVX512F found OK"
if [ -e $CURDIR/llama-cpp-avx512 ]; then
BINARY=llama-cpp-avx512
fi
fi
## P2P/GRPC mode
if [ -n "$LLAMACPP_GRPC_SERVERS" ]; then if [ -n "$LLAMACPP_GRPC_SERVERS" ]; then
if [ -e $CURDIR/llama-cpp-grpc ]; then if [ -e $CURDIR/llama-cpp-grpc ]; then
BINARY=llama-cpp-grpc BINARY=llama-cpp-grpc
@@ -42,8 +17,7 @@ fi
# Extend ld library path with the dir where this script is located/lib # Extend ld library path with the dir where this script is located/lib
if [ "$(uname)" == "Darwin" ]; then if [ "$(uname)" == "Darwin" ]; then
export DYLD_LIBRARY_PATH=$CURDIR/lib:$DYLD_LIBRARY_PATH DYLD_FALLBACK_LIBRARY_PATH=$CURDIR/lib:$DYLD_FALLBACK_LIBRARY_PATH
#export DYLD_FALLBACK_LIBRARY_PATH=$CURDIR/lib:$DYLD_FALLBACK_LIBRARY_PATH
else else
export LD_LIBRARY_PATH=$CURDIR/lib:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=$CURDIR/lib:$LD_LIBRARY_PATH
fi fi
@@ -57,6 +31,3 @@ fi
echo "Using binary: $BINARY" echo "Using binary: $BINARY"
exec $CURDIR/$BINARY "$@" exec $CURDIR/$BINARY "$@"
# We should never reach this point, however just in case we do, run fallback
exec $CURDIR/llama-cpp-fallback "$@"

View File

@@ -4,11 +4,11 @@
package main package main
import ( import (
"github.com/mudler/xlog" "github.com/rs/zerolog/log"
) )
func assert(cond bool, msg string) { func assert(cond bool, msg string) {
if !cond { if !cond {
xlog.Fatal().Stack().Msg(msg) log.Fatal().Stack().Msg(msg)
} }
} }

View File

@@ -7,7 +7,8 @@ import (
"os" "os"
grpc "github.com/mudler/LocalAI/pkg/grpc" grpc "github.com/mudler/LocalAI/pkg/grpc"
"github.com/mudler/xlog" "github.com/rs/zerolog"
"github.com/rs/zerolog/log"
) )
var ( var (
@@ -15,7 +16,7 @@ var (
) )
func main() { func main() {
xlog.SetLogger(xlog.NewLogger(xlog.LogLevel(os.Getenv("LOCALAI_LOG_LEVEL")), os.Getenv("LOCALAI_LOG_FORMAT"))) log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr})
flag.Parse() flag.Parse()

View File

@@ -12,7 +12,7 @@ import (
"github.com/mudler/LocalAI/pkg/grpc/base" "github.com/mudler/LocalAI/pkg/grpc/base"
pb "github.com/mudler/LocalAI/pkg/grpc/proto" pb "github.com/mudler/LocalAI/pkg/grpc/proto"
"github.com/mudler/xlog" "github.com/rs/zerolog/log"
) )
type Store struct { type Store struct {
@@ -135,7 +135,7 @@ func (s *Store) StoresSet(opts *pb.StoresSetOptions) error {
} else { } else {
sample = k.Floats sample = k.Floats
} }
xlog.Debug("Key is not normalized", "sample", sample) log.Debug().Msgf("Key is not normalized: %v", sample)
} }
kvs[i] = Pair{ kvs[i] = Pair{
@@ -238,7 +238,7 @@ func (s *Store) StoresDelete(opts *pb.StoresDeleteOptions) error {
assert(!hasKey(s.keys, k), fmt.Sprintf("Key exists, but was not found: t=%d, %v", len(tail_ks), k)) assert(!hasKey(s.keys, k), fmt.Sprintf("Key exists, but was not found: t=%d, %v", len(tail_ks), k))
} }
xlog.Debug("Delete", "found", found, "tailLen", len(tail_ks), "j", j, "mergeKeysLen", len(merge_ks), "mergeValuesLen", len(merge_vs)) log.Debug().Msgf("Delete: found = %v, t = %d, j = %d, len(merge_ks) = %d, len(merge_vs) = %d", found, len(tail_ks), j, len(merge_ks), len(merge_vs))
} }
merge_ks = append(merge_ks, tail_ks...) merge_ks = append(merge_ks, tail_ks...)
@@ -261,7 +261,7 @@ func (s *Store) StoresDelete(opts *pb.StoresDeleteOptions) error {
}(), "Keys to delete still present") }(), "Keys to delete still present")
if len(s.keys) != l { if len(s.keys) != l {
xlog.Debug("Delete: Some keys not found", "keysLen", len(s.keys), "expectedLen", l) log.Debug().Msgf("Delete: Some keys not found: len(s.keys) = %d, l = %d", len(s.keys), l)
} }
return nil return nil
@@ -273,7 +273,7 @@ func (s *Store) StoresGet(opts *pb.StoresGetOptions) (pb.StoresGetResult, error)
ks := sortIntoKeySlicese(opts.Keys) ks := sortIntoKeySlicese(opts.Keys)
if len(s.keys) == 0 { if len(s.keys) == 0 {
xlog.Debug("Get: No keys in store") log.Debug().Msgf("Get: No keys in store")
} }
if s.keyLen == -1 { if s.keyLen == -1 {
@@ -305,7 +305,7 @@ func (s *Store) StoresGet(opts *pb.StoresGetOptions) (pb.StoresGetResult, error)
} }
if len(pbKeys) != len(opts.Keys) { if len(pbKeys) != len(opts.Keys) {
xlog.Debug("Get: Some keys not found", "pbKeysLen", len(pbKeys), "optsKeysLen", len(opts.Keys), "storeKeysLen", len(s.keys)) log.Debug().Msgf("Get: Some keys not found: len(pbKeys) = %d, len(opts.Keys) = %d, len(s.Keys) = %d", len(pbKeys), len(opts.Keys), len(s.keys))
} }
return pb.StoresGetResult{ return pb.StoresGetResult{
@@ -507,7 +507,7 @@ func (s *Store) StoresFind(opts *pb.StoresFindOptions) (pb.StoresFindResult, err
} else { } else {
sample = tk sample = tk
} }
xlog.Debug("Trying to compare non-normalized key with normalized keys", "sample", sample) log.Debug().Msgf("Trying to compare non-normalized key with normalized keys: %v", sample)
} }
return s.StoresFindFallback(opts) return s.StoresFindFallback(opts)

View File

@@ -1,6 +0,0 @@
package/
sources/
.cache/
build/
libgosd.so
stablediffusion-ggml

View File

@@ -1,20 +0,0 @@
cmake_minimum_required(VERSION 3.12)
project(gosd LANGUAGES C CXX)
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
add_subdirectory(./sources/stablediffusion-ggml.cpp)
add_library(gosd MODULE gosd.cpp)
target_link_libraries(gosd PRIVATE stable-diffusion ggml)
if(CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 9.0)
target_link_libraries(gosd PRIVATE stdc++fs)
endif()
target_include_directories(gosd PUBLIC
stable-diffusion.cpp
stable-diffusion.cpp/thirdparty
)
set_property(TARGET gosd PROPERTY CXX_STANDARD 17)
set_target_properties(gosd PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})

View File

@@ -1,16 +1,28 @@
INCLUDE_PATH := $(abspath ./)
LIBRARY_PATH := $(abspath ./)
AR?=ar
CMAKE_ARGS?= CMAKE_ARGS?=
BUILD_TYPE?= BUILD_TYPE?=
NATIVE?=false NATIVE?=false
CUDA_LIBPATH?=/usr/local/cuda/lib64/
ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh
# keep standard at C11 and C++11
CXXFLAGS = -I. -I$(INCLUDE_PATH)/sources/stablediffusion-ggml.cpp/thirdparty -I$(INCLUDE_PATH)/sources/stablediffusion-ggml.cpp/ggml/include -I$(INCLUDE_PATH)/sources/stablediffusion-ggml.cpp -O3 -DNDEBUG -std=c++17 -fPIC
GOCMD?=go GOCMD?=go
CGO_LDFLAGS?=
# Avoid parent make file overwriting CGO_LDFLAGS which is needed for hipblas
CGO_LDFLAGS_SYCL=
GO_TAGS?= GO_TAGS?=
JOBS?=$(shell nproc --ignore=1) LD_FLAGS?=
# stablediffusion.cpp (ggml) # stablediffusion.cpp (ggml)
STABLEDIFFUSION_GGML_REPO?=https://github.com/leejet/stable-diffusion.cpp STABLEDIFFUSION_GGML_REPO?=https://github.com/richiejp/stable-diffusion.cpp
STABLEDIFFUSION_GGML_VERSION?=0e52afc6513cc2dea9a1a017afc4a008d5acf2b0 STABLEDIFFUSION_GGML_VERSION?=53e3b17eb3d0b5760ced06a1f98320b68b34aaae
CMAKE_ARGS+=-DGGML_MAX_NAME=128 # Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF
ifeq ($(NATIVE),false) ifeq ($(NATIVE),false)
CMAKE_ARGS+=-DGGML_NATIVE=OFF CMAKE_ARGS+=-DGGML_NATIVE=OFF
@@ -19,6 +31,7 @@ endif
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically # If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
ifeq ($(BUILD_TYPE),cublas) ifeq ($(BUILD_TYPE),cublas)
CMAKE_ARGS+=-DSD_CUDA=ON -DGGML_CUDA=ON CMAKE_ARGS+=-DSD_CUDA=ON -DGGML_CUDA=ON
CGO_LDFLAGS+=-lcublas -lcudart -L$(CUDA_LIBPATH) -L$(CUDA_LIBPATH)/stubs/ -lcuda
# If build type is openblas then we set -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS # If build type is openblas then we set -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
# to CMAKE_ARGS automatically # to CMAKE_ARGS automatically
else ifeq ($(BUILD_TYPE),openblas) else ifeq ($(BUILD_TYPE),openblas)
@@ -28,20 +41,19 @@ else ifeq ($(BUILD_TYPE),clblas)
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++ # If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
else ifeq ($(BUILD_TYPE),hipblas) else ifeq ($(BUILD_TYPE),hipblas)
ROCM_HOME ?= /opt/rocm CMAKE_ARGS+=-DSD_HIPBLAS=ON -DGGML_HIPBLAS=ON
ROCM_PATH ?= /opt/rocm # If it's OSX, DO NOT embed the metal library - -DGGML_METAL_EMBED_LIBRARY=ON requires further investigation
export CXX=$(ROCM_HOME)/llvm/bin/clang++ # But if it's OSX without metal, disable it here
export CC=$(ROCM_HOME)/llvm/bin/clang
AMDGPU_TARGETS?=gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102,gfx1200,gfx1201
CMAKE_ARGS+=-DSD_HIPBLAS=ON -DGGML_HIPBLAS=ON -DAMDGPU_TARGETS=$(AMDGPU_TARGETS)
else ifeq ($(BUILD_TYPE),vulkan) else ifeq ($(BUILD_TYPE),vulkan)
CMAKE_ARGS+=-DSD_VULKAN=ON -DGGML_VULKAN=ON CMAKE_ARGS+=-DSD_VULKAN=ON -DGGML_VULKAN=ON
CGO_LDFLAGS+=-lvulkan
else ifeq ($(OS),Darwin) else ifeq ($(OS),Darwin)
ifneq ($(BUILD_TYPE),metal) ifneq ($(BUILD_TYPE),metal)
CMAKE_ARGS+=-DSD_METAL=OFF -DGGML_METAL=OFF CMAKE_ARGS+=-DSD_METAL=OFF -DGGML_METAL=OFF
else else
CMAKE_ARGS+=-DSD_METAL=ON -DGGML_METAL=ON CMAKE_ARGS+=-DSD_METAL=ON -DGGML_METAL=ON
CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON
TARGET+=--target ggml-metal
endif endif
endif endif
@@ -51,6 +63,12 @@ ifeq ($(BUILD_TYPE),sycl_f16)
-DCMAKE_CXX_COMPILER=icpx \ -DCMAKE_CXX_COMPILER=icpx \
-DSD_SYCL=ON \ -DSD_SYCL=ON \
-DGGML_SYCL_F16=ON -DGGML_SYCL_F16=ON
export CC=icx
export CXX=icpx
CGO_LDFLAGS_SYCL += -fsycl -L${DNNLROOT}/lib -ldnnl ${MKLROOT}/lib/intel64/libmkl_sycl.a -fiopenmp -fopenmp-targets=spir64 -lOpenCL
CGO_LDFLAGS_SYCL += $(shell pkg-config --libs mkl-static-lp64-gomp)
CGO_CXXFLAGS += -fiopenmp -fopenmp-targets=spir64
CGO_CXXFLAGS += $(shell pkg-config --cflags mkl-static-lp64-gomp )
endif endif
ifeq ($(BUILD_TYPE),sycl_f32) ifeq ($(BUILD_TYPE),sycl_f32)
@@ -58,29 +76,83 @@ ifeq ($(BUILD_TYPE),sycl_f32)
-DCMAKE_C_COMPILER=icx \ -DCMAKE_C_COMPILER=icx \
-DCMAKE_CXX_COMPILER=icpx \ -DCMAKE_CXX_COMPILER=icpx \
-DSD_SYCL=ON -DSD_SYCL=ON
export CC=icx
export CXX=icpx
CGO_LDFLAGS_SYCL += -fsycl -L${DNNLROOT}/lib -ldnnl ${MKLROOT}/lib/intel64/libmkl_sycl.a -fiopenmp -fopenmp-targets=spir64 -lOpenCL
CGO_LDFLAGS_SYCL += $(shell pkg-config --libs mkl-static-lp64-gomp)
CGO_CXXFLAGS += -fiopenmp -fopenmp-targets=spir64
CGO_CXXFLAGS += $(shell pkg-config --cflags mkl-static-lp64-gomp )
endif endif
# warnings
# CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function
# Find all .a archives in ARCHIVE_DIR
# (ggml can have different backends cpu, cuda, etc., each backend generates a .a archive)
GGML_ARCHIVE_DIR := build/ggml/src/
ALL_ARCHIVES := $(shell find $(GGML_ARCHIVE_DIR) -type f -name '*.a')
# Name of the single merged library
COMBINED_LIB := libggmlall.a
# Rule to merge all the .a files into one
$(COMBINED_LIB): $(ALL_ARCHIVES)
@echo "Merging all .a into $(COMBINED_LIB)"
rm -f $@
mkdir -p merge-tmp
for a in $(ALL_ARCHIVES); do \
( cd merge-tmp && ar x ../$$a ); \
done
( cd merge-tmp && ar rcs ../$@ *.o )
# Ensure we have a proper index
ranlib $@
# Clean up
rm -rf merge-tmp
build/libstable-diffusion.a:
@echo "Building SD with $(BUILD_TYPE) build type and $(CMAKE_ARGS)"
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
+bash -c "source $(ONEAPI_VARS); \
mkdir -p build && \
cd build && \
cmake $(CMAKE_ARGS) ../sources/stablediffusion-ggml.cpp && \
cmake --build . --config Release"
else
mkdir -p build && \
cd build && \
cmake $(CMAKE_ARGS) ../sources/stablediffusion-ggml.cpp && \
cmake --build . --config Release
endif
$(MAKE) $(COMBINED_LIB)
gosd.o:
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
+bash -c "source $(ONEAPI_VARS); \
$(CXX) $(CXXFLAGS) gosd.cpp -o gosd.o -c"
else
$(CXX) $(CXXFLAGS) gosd.cpp -o gosd.o -c
endif
## stablediffusion (ggml)
sources/stablediffusion-ggml.cpp: sources/stablediffusion-ggml.cpp:
git clone --recursive $(STABLEDIFFUSION_GGML_REPO) sources/stablediffusion-ggml.cpp && \ git clone --recursive $(STABLEDIFFUSION_GGML_REPO) sources/stablediffusion-ggml.cpp && \
cd sources/stablediffusion-ggml.cpp && \ cd sources/stablediffusion-ggml.cpp && \
git checkout $(STABLEDIFFUSION_GGML_VERSION) && \ git checkout $(STABLEDIFFUSION_GGML_VERSION) && \
git submodule update --init --recursive --depth 1 --single-branch git submodule update --init --recursive --depth 1 --single-branch
libgosd.so: sources/stablediffusion-ggml.cpp CMakeLists.txt gosd.cpp gosd.h libsd.a: sources/stablediffusion-ggml.cpp build/libstable-diffusion.a gosd.o
mkdir -p build && \ cp $(INCLUDE_PATH)/build/libstable-diffusion.a ./libsd.a
cd build && \ $(AR) rcs libsd.a gosd.o
cmake .. $(CMAKE_ARGS) && \
cmake --build . --config Release -j$(JOBS) && \
cd .. && \
mv build/libgosd.so ./
stablediffusion-ggml: main.go gosd.go libgosd.so stablediffusion-ggml: libsd.a
CGO_ENABLED=0 $(GOCMD) build -tags "$(GO_TAGS)" -o stablediffusion-ggml ./ CGO_LDFLAGS="$(CGO_LDFLAGS) $(CGO_LDFLAGS_SYCL)" C_INCLUDE_PATH="$(INCLUDE_PATH)" LIBRARY_PATH="$(LIBRARY_PATH)" \
CC="$(CC)" CXX="$(CXX)" CGO_CXXFLAGS="$(CGO_CXXFLAGS)" \
$(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o stablediffusion-ggml ./
package: stablediffusion-ggml package:
bash package.sh bash package.sh
build: package build: stablediffusion-ggml package
clean: clean:
rm -rf libgosd.so build stablediffusion-ggml package sources rm -rf gosd.o libsd.a build $(COMBINED_LIB)

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,10 +1,15 @@
package main package main
// #cgo CXXFLAGS: -I${SRCDIR}/sources/stablediffusion-ggml.cpp/thirdparty -I${SRCDIR}/sources/stablediffusion-ggml.cpp -I${SRCDIR}/sources/stablediffusion-ggml.cpp/ggml/include
// #cgo LDFLAGS: -L${SRCDIR}/ -lsd -lstdc++ -lm -lggmlall -lgomp
// #include <gosd.h>
// #include <stdlib.h>
import "C"
import ( import (
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"runtime"
"strings" "strings"
"unsafe" "unsafe"
@@ -20,45 +25,20 @@ type SDGGML struct {
cfgScale float32 cfgScale float32
} }
var (
LoadModel func(model, model_apth string, options []uintptr, threads int32, diff int) int
GenImage func(params uintptr, steps int, dst string, cfgScale float32, srcImage string, strength float32, maskImage string, refImages []uintptr, refImagesCount int) int
TilingParamsSetEnabled func(params uintptr, enabled bool)
TilingParamsSetTileSizes func(params uintptr, tileSizeX int, tileSizeY int)
TilingParamsSetRelSizes func(params uintptr, relSizeX float32, relSizeY float32)
TilingParamsSetTargetOverlap func(params uintptr, targetOverlap float32)
ImgGenParamsNew func() uintptr
ImgGenParamsSetPrompts func(params uintptr, prompt string, negativePrompt string)
ImgGenParamsSetDimensions func(params uintptr, width int, height int)
ImgGenParamsSetSeed func(params uintptr, seed int64)
ImgGenParamsGetVaeTilingParams func(params uintptr) uintptr
)
// Copied from Purego internal/strings
// TODO: We should upstream sending []string
func hasSuffix(s, suffix string) bool {
return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix
}
func CString(name string) *byte {
if hasSuffix(name, "\x00") {
return &(*(*[]byte)(unsafe.Pointer(&name)))[0]
}
b := make([]byte, len(name)+1)
copy(b, name)
return &b[0]
}
func (sd *SDGGML) Load(opts *pb.ModelOptions) error { func (sd *SDGGML) Load(opts *pb.ModelOptions) error {
sd.threads = int(opts.Threads) sd.threads = int(opts.Threads)
modelPath := opts.ModelPath modelFile := C.CString(opts.ModelFile)
defer C.free(unsafe.Pointer(modelFile))
modelFile := opts.ModelFile var options **C.char
modelPathC := modelPath // prepare the options array to pass to C
size := C.size_t(unsafe.Sizeof((*C.char)(nil)))
length := C.size_t(len(opts.Options))
options = (**C.char)(C.malloc(length * size))
view := (*[1 << 30]*C.char)(unsafe.Pointer(options))[0:len(opts.Options):len(opts.Options)]
var diffusionModel int var diffusionModel int
@@ -83,20 +63,13 @@ func (sd *SDGGML) Load(opts *pb.ModelOptions) error {
fmt.Fprintf(os.Stderr, "Options: %+v\n", oo) fmt.Fprintf(os.Stderr, "Options: %+v\n", oo)
// At the time of writing Purego doesn't recurse into slices and convert Go strings to pointers so we need to do that for i, x := range oo {
var keepAlive []any view[i] = C.CString(x)
options := make([]uintptr, len(oo), len(oo)+1)
for i, op := range oo {
bytep := CString(op)
options[i] = uintptr(unsafe.Pointer(bytep))
keepAlive = append(keepAlive, bytep)
} }
sd.cfgScale = opts.CFGScale sd.cfgScale = opts.CFGScale
ret := LoadModel(modelFile, modelPathC, options, opts.Threads, diffusionModel) ret := C.load_model(modelFile, options, C.int(opts.Threads), C.int(diffusionModel))
runtime.KeepAlive(keepAlive)
fmt.Fprintf(os.Stderr, "LoadModel: %d\n", ret)
if ret != 0 { if ret != 0 {
return fmt.Errorf("could not load model") return fmt.Errorf("could not load model")
} }
@@ -105,48 +78,16 @@ func (sd *SDGGML) Load(opts *pb.ModelOptions) error {
} }
func (sd *SDGGML) GenerateImage(opts *pb.GenerateImageRequest) error { func (sd *SDGGML) GenerateImage(opts *pb.GenerateImageRequest) error {
t := opts.PositivePrompt t := C.CString(opts.PositivePrompt)
dst := opts.Dst defer C.free(unsafe.Pointer(t))
negative := opts.NegativePrompt
srcImage := opts.Src
var maskImage string dst := C.CString(opts.Dst)
if opts.EnableParameters != "" { defer C.free(unsafe.Pointer(dst))
if strings.Contains(opts.EnableParameters, "mask:") {
parts := strings.Split(opts.EnableParameters, "mask:")
if len(parts) > 1 {
maskPath := strings.TrimSpace(parts[1])
if maskPath != "" {
maskImage = maskPath
}
}
}
}
// At the time of writing Purego doesn't recurse into slices and convert Go strings to pointers so we need to do that negative := C.CString(opts.NegativePrompt)
var keepAlive []any defer C.free(unsafe.Pointer(negative))
refImagesCount := len(opts.RefImages)
refImages := make([]uintptr, refImagesCount, refImagesCount+1)
for i, ri := range opts.RefImages {
bytep := CString(ri)
refImages[i] = uintptr(unsafe.Pointer(bytep))
keepAlive = append(keepAlive, bytep)
}
// Default strength for img2img (0.75 is a good default) ret := C.gen_image(t, negative, C.int(opts.Width), C.int(opts.Height), C.int(opts.Step), C.int(opts.Seed), dst, C.float(sd.cfgScale))
strength := float32(0.75)
// free'd by GenImage
p := ImgGenParamsNew()
ImgGenParamsSetPrompts(p, t, negative)
ImgGenParamsSetDimensions(p, int(opts.Width), int(opts.Height))
ImgGenParamsSetSeed(p, int64(opts.Seed))
vaep := ImgGenParamsGetVaeTilingParams(p)
TilingParamsSetEnabled(vaep, false)
ret := GenImage(p, int(opts.Step), dst, sd.cfgScale, srcImage, strength, maskImage, refImages, refImagesCount)
runtime.KeepAlive(keepAlive)
fmt.Fprintf(os.Stderr, "GenImage: %d\n", ret)
if ret != 0 { if ret != 0 {
return fmt.Errorf("inference failed") return fmt.Errorf("inference failed")
} }

View File

@@ -1,23 +1,8 @@
#include <cstdint>
#include "stable-diffusion.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
int load_model(char *model, char* options[], int threads, int diffusionModel);
void sd_tiling_params_set_enabled(sd_tiling_params_t *params, bool enabled); int gen_image(char *text, char *negativeText, int width, int height, int steps, int seed, char *dst, float cfg_scale);
void sd_tiling_params_set_tile_sizes(sd_tiling_params_t *params, int tile_size_x, int tile_size_y);
void sd_tiling_params_set_rel_sizes(sd_tiling_params_t *params, float rel_size_x, float rel_size_y);
void sd_tiling_params_set_target_overlap(sd_tiling_params_t *params, float target_overlap);
sd_tiling_params_t* sd_img_gen_params_get_vae_tiling_params(sd_img_gen_params_t *params);
sd_img_gen_params_t* sd_img_gen_params_new(void);
void sd_img_gen_params_set_prompts(sd_img_gen_params_t *params, const char *prompt, const char *negative_prompt);
void sd_img_gen_params_set_dimensions(sd_img_gen_params_t *params, int width, int height);
void sd_img_gen_params_set_seed(sd_img_gen_params_t *params, int64_t seed);
int load_model(const char *model, char *model_path, char* options[], int threads, int diffusionModel);
int gen_image(sd_img_gen_params_t *p, int steps, char *dst, float cfg_scale, char *src_image, float strength, char *mask_image, char* ref_images[], int ref_images_count);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

View File

@@ -1,9 +1,9 @@
package main package main
// Note: this is started internally by LocalAI and a server is allocated for each model
import ( import (
"flag" "flag"
"github.com/ebitengine/purego"
grpc "github.com/mudler/LocalAI/pkg/grpc" grpc "github.com/mudler/LocalAI/pkg/grpc"
) )
@@ -11,36 +11,7 @@ var (
addr = flag.String("addr", "localhost:50051", "the address to connect to") addr = flag.String("addr", "localhost:50051", "the address to connect to")
) )
type LibFuncs struct {
FuncPtr any
Name string
}
func main() { func main() {
gosd, err := purego.Dlopen("./libgosd.so", purego.RTLD_NOW|purego.RTLD_GLOBAL)
if err != nil {
panic(err)
}
libFuncs := []LibFuncs{
{&LoadModel, "load_model"},
{&GenImage, "gen_image"},
{&TilingParamsSetEnabled, "sd_tiling_params_set_enabled"},
{&TilingParamsSetTileSizes, "sd_tiling_params_set_tile_sizes"},
{&TilingParamsSetRelSizes, "sd_tiling_params_set_rel_sizes"},
{&TilingParamsSetTargetOverlap, "sd_tiling_params_set_target_overlap"},
{&ImgGenParamsNew, "sd_img_gen_params_new"},
{&ImgGenParamsSetPrompts, "sd_img_gen_params_set_prompts"},
{&ImgGenParamsSetDimensions, "sd_img_gen_params_set_dimensions"},
{&ImgGenParamsSetSeed, "sd_img_gen_params_set_seed"},
{&ImgGenParamsGetVaeTilingParams, "sd_img_gen_params_get_vae_tiling_params"},
}
for _, lf := range libFuncs {
purego.RegisterLibFunc(lf.FuncPtr, gosd, lf.Name)
}
flag.Parse() flag.Parse()
if err := grpc.StartServer(*addr, &SDGGML{}); err != nil { if err := grpc.StartServer(*addr, &SDGGML{}); err != nil {

View File

@@ -6,14 +6,12 @@
set -e set -e
CURDIR=$(dirname "$(realpath $0)") CURDIR=$(dirname "$(realpath $0)")
REPO_ROOT="${CURDIR}/../../.."
# Create lib directory # Create lib directory
mkdir -p $CURDIR/package/lib mkdir -p $CURDIR/package/lib
cp -avf $CURDIR/libgosd.so $CURDIR/package/ cp -avrf $CURDIR/stablediffusion-ggml $CURDIR/package/
cp -avf $CURDIR/stablediffusion-ggml $CURDIR/package/ cp -rfv $CURDIR/run.sh $CURDIR/package/
cp -fv $CURDIR/run.sh $CURDIR/package/
# Detect architecture and copy appropriate libraries # Detect architecture and copy appropriate libraries
if [ -f "/lib64/ld-linux-x86-64.so.2" ]; then if [ -f "/lib64/ld-linux-x86-64.so.2" ]; then
@@ -44,22 +42,11 @@ elif [ -f "/lib/ld-linux-aarch64.so.1" ]; then
cp -arfLv /lib/aarch64-linux-gnu/libdl.so.2 $CURDIR/package/lib/libdl.so.2 cp -arfLv /lib/aarch64-linux-gnu/libdl.so.2 $CURDIR/package/lib/libdl.so.2
cp -arfLv /lib/aarch64-linux-gnu/librt.so.1 $CURDIR/package/lib/librt.so.1 cp -arfLv /lib/aarch64-linux-gnu/librt.so.1 $CURDIR/package/lib/librt.so.1
cp -arfLv /lib/aarch64-linux-gnu/libpthread.so.0 $CURDIR/package/lib/libpthread.so.0 cp -arfLv /lib/aarch64-linux-gnu/libpthread.so.0 $CURDIR/package/lib/libpthread.so.0
elif [ $(uname -s) = "Darwin" ]; then
echo "Detected Darwin"
else else
echo "Error: Could not detect architecture" echo "Error: Could not detect architecture"
exit 1 exit 1
fi fi
# Package GPU libraries based on BUILD_TYPE echo "Packaging completed successfully"
# The GPU library packaging script will detect BUILD_TYPE and copy appropriate GPU libraries
GPU_LIB_SCRIPT="${REPO_ROOT}/scripts/build/package-gpu-libs.sh"
if [ -f "$GPU_LIB_SCRIPT" ]; then
echo "Packaging GPU libraries for BUILD_TYPE=${BUILD_TYPE:-cpu}..."
source "$GPU_LIB_SCRIPT" "$CURDIR/package/lib"
package_gpu_libs
fi
echo "Packaging completed successfully"
ls -liah $CURDIR/package/ ls -liah $CURDIR/package/
ls -liah $CURDIR/package/lib/ ls -liah $CURDIR/package/lib/

View File

@@ -1,7 +0,0 @@
.cache/
sources/
build/
package/
whisper
*.so
compile_commands.json

View File

@@ -1,16 +0,0 @@
cmake_minimum_required(VERSION 3.12)
project(gowhisper LANGUAGES C CXX)
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
add_subdirectory(./sources/whisper.cpp)
add_library(gowhisper MODULE gowhisper.cpp)
target_link_libraries(gowhisper PRIVATE whisper ggml)
if(CMAKE_CXX_COMPILER_ID MATCHES "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 9.0)
target_link_libraries(gosd PRIVATE stdc++fs)
endif()
set_property(TARGET gowhisper PROPERTY CXX_STANDARD 17)
set_target_properties(gowhisper PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})

View File

@@ -1,54 +1,110 @@
CMAKE_ARGS?= GOCMD=go
BUILD_TYPE?=
NATIVE?=false NATIVE?=false
GOCMD?=go BUILD_TYPE?=
GO_TAGS?= CMAKE_ARGS?=
JOBS?=$(shell nproc --ignore=1)
# whisper.cpp version # whisper.cpp version
WHISPER_REPO?=https://github.com/ggml-org/whisper.cpp WHISPER_REPO?=https://github.com/ggml-org/whisper.cpp
WHISPER_CPP_VERSION?=679bdb53dbcbfb3e42685f50c7ff367949fd4d48 WHISPER_CPP_VERSION?=1f5cf0b2888402d57bb17b2029b2caa97e5f3baf
SO_TARGET?=libgowhisper.so
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF export WHISPER_CMAKE_ARGS?=-DBUILD_SHARED_LIBS=OFF
export WHISPER_DIR=$(abspath ./sources/whisper.cpp)
export WHISPER_INCLUDE_PATH=$(WHISPER_DIR)/include:$(WHISPER_DIR)/ggml/include
export WHISPER_LIBRARY_PATH=$(WHISPER_DIR)/build/src/:$(WHISPER_DIR)/build/ggml/src
CGO_LDFLAGS_WHISPER?=
CGO_LDFLAGS_WHISPER+=-lggml
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF -DLLAMA_CURL=OFF
CUDA_LIBPATH?=/usr/local/cuda/lib64/
ONEAPI_VERSION?=2025.2
# IF native is false, we add -DGGML_NATIVE=OFF to CMAKE_ARGS
ifeq ($(NATIVE),false)
CMAKE_ARGS+=-DGGML_NATIVE=OFF
WHISPER_CMAKE_ARGS+=-DGGML_NATIVE=OFF
endif
CURRENT_MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
ifeq ($(NATIVE),false) ifeq ($(NATIVE),false)
CMAKE_ARGS+=-DGGML_NATIVE=OFF CMAKE_ARGS+=-DGGML_NATIVE=OFF
endif endif
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
ifeq ($(BUILD_TYPE),cublas) ifeq ($(BUILD_TYPE),cublas)
CGO_LDFLAGS+=-lcublas -lcudart -L$(CUDA_LIBPATH) -L$(CUDA_LIBPATH)/stubs/ -lcuda
CMAKE_ARGS+=-DGGML_CUDA=ON CMAKE_ARGS+=-DGGML_CUDA=ON
CGO_LDFLAGS_WHISPER+=-lcufft -lggml-cuda
export WHISPER_LIBRARY_PATH:=$(WHISPER_LIBRARY_PATH):$(WHISPER_DIR)/build/ggml/src/ggml-cuda/
# If build type is openblas then we set -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
# to CMAKE_ARGS automatically
else ifeq ($(BUILD_TYPE),openblas) else ifeq ($(BUILD_TYPE),openblas)
CMAKE_ARGS+=-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS CMAKE_ARGS+=-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
# If build type is clblas (openCL) we set -DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
else ifeq ($(BUILD_TYPE),clblas) else ifeq ($(BUILD_TYPE),clblas)
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
else ifeq ($(BUILD_TYPE),hipblas) else ifeq ($(BUILD_TYPE),hipblas)
CMAKE_ARGS+=-DGGML_HIPBLAS=ON ROCM_HOME ?= /opt/rocm
ROCM_PATH ?= /opt/rocm
LD_LIBRARY_PATH ?= /opt/rocm/lib:/opt/rocm/llvm/lib
export STABLE_BUILD_TYPE=
export CXX=$(ROCM_HOME)/llvm/bin/clang++
export CC=$(ROCM_HOME)/llvm/bin/clang
# GPU_TARGETS ?= gfx803,gfx900,gfx906,gfx908,gfx90a,gfx942,gfx1010,gfx1030,gfx1032,gfx1100,gfx1101,gfx1102
# AMDGPU_TARGETS ?= "$(GPU_TARGETS)"
CMAKE_ARGS+=-DGGML_HIP=ON
CGO_LDFLAGS += -O3 --rtlib=compiler-rt -unwindlib=libgcc -lhipblas -lrocblas --hip-link -L${ROCM_HOME}/lib/llvm/lib -L$(CURRENT_MAKEFILE_DIR)/sources/whisper.cpp/build/ggml/src/ggml-hip/ -lggml-hip
# CMAKE_ARGS+=-DGGML_HIP=ON -DAMDGPU_TARGETS="$(AMDGPU_TARGETS)" -DGPU_TARGETS="$(GPU_TARGETS)"
else ifeq ($(BUILD_TYPE),vulkan) else ifeq ($(BUILD_TYPE),vulkan)
CMAKE_ARGS+=-DGGML_VULKAN=ON CMAKE_ARGS+=-DGGML_VULKAN=1
CGO_LDFLAGS_WHISPER+=-lggml-vulkan -lvulkan
export WHISPER_LIBRARY_PATH:=$(WHISPER_LIBRARY_PATH):$(WHISPER_DIR)/build/ggml/src/ggml-vulkan/
else ifeq ($(OS),Darwin) else ifeq ($(OS),Darwin)
ifeq ($(BUILD_TYPE),)
BUILD_TYPE=metal
endif
ifneq ($(BUILD_TYPE),metal) ifneq ($(BUILD_TYPE),metal)
CMAKE_ARGS+=-DGGML_METAL=OFF CMAKE_ARGS+=-DGGML_METAL=OFF
CGO_LDFLAGS_WHISPER+=-lggml-blas
export WHISPER_LIBRARY_PATH:=$(WHISPER_LIBRARY_PATH):$(WHISPER_DIR)/build/ggml/src/ggml-blas
else else
CMAKE_ARGS+=-DGGML_METAL=ON CMAKE_ARGS+=-DGGML_METAL=ON
CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON
CMAKE_ARGS+=-DGGML_METAL_USE_BF16=ON
CMAKE_ARGS+=-DGGML_OPENMP=OFF
CMAKE_ARGS+=-DWHISPER_BUILD_EXAMPLES=OFF
CMAKE_ARGS+=-DWHISPER_BUILD_TESTS=OFF
CMAKE_ARGS+=-DWHISPER_BUILD_SERVER=OFF
CGO_LDFLAGS += -framework Accelerate
CGO_LDFLAGS_WHISPER+=-lggml-metal -lggml-blas
export WHISPER_LIBRARY_PATH:=$(WHISPER_LIBRARY_PATH):$(WHISPER_DIR)/build/ggml/src/ggml-metal/:$(WHISPER_DIR)/build/ggml/src/ggml-blas
endif endif
TARGET+=--target ggml-metal
endif endif
ifeq ($(BUILD_TYPE),sycl_f16) ifneq (,$(findstring sycl,$(BUILD_TYPE)))
export CC=icx
export CXX=icpx
CGO_LDFLAGS_WHISPER += -fsycl -L${DNNLROOT}/lib -rpath ${ONEAPI_ROOT}/${ONEAPI_VERSION}/lib -ldnnl ${MKLROOT}/lib/intel64/libmkl_sycl.a -fiopenmp -fopenmp-targets=spir64 -lOpenCL -lggml-sycl
CGO_LDFLAGS_WHISPER += $(shell pkg-config --libs mkl-static-lp64-gomp)
CGO_CXXFLAGS_WHISPER += -fiopenmp -fopenmp-targets=spir64
CGO_CXXFLAGS_WHISPER += $(shell pkg-config --cflags mkl-static-lp64-gomp )
export WHISPER_LIBRARY_PATH:=$(WHISPER_LIBRARY_PATH):$(WHISPER_DIR)/build/ggml/src/ggml-sycl/
CMAKE_ARGS+=-DGGML_SYCL=ON \ CMAKE_ARGS+=-DGGML_SYCL=ON \
-DCMAKE_C_COMPILER=icx \ -DCMAKE_C_COMPILER=icx \
-DCMAKE_CXX_COMPILER=icpx \ -DCMAKE_CXX_COMPILER=icpx \
-DGGML_SYCL_F16=ON -DCMAKE_CXX_FLAGS="-fsycl"
endif endif
ifeq ($(BUILD_TYPE),sycl_f32) ifeq ($(BUILD_TYPE),sycl_f16)
CMAKE_ARGS+=-DGGML_SYCL=ON \ CMAKE_ARGS+=-DGGML_SYCL_F16=ON
-DCMAKE_C_COMPILER=icx \
-DCMAKE_CXX_COMPILER=icpx
endif endif
ifneq ($(OS),Darwin)
CGO_LDFLAGS_WHISPER+=-lgomp
endif
## whisper
sources/whisper.cpp: sources/whisper.cpp:
mkdir -p sources/whisper.cpp mkdir -p sources/whisper.cpp
cd sources/whisper.cpp && \ cd sources/whisper.cpp && \
@@ -58,65 +114,18 @@ sources/whisper.cpp:
git checkout $(WHISPER_CPP_VERSION) && \ git checkout $(WHISPER_CPP_VERSION) && \
git submodule update --init --recursive --depth 1 --single-branch git submodule update --init --recursive --depth 1 --single-branch
# Detect OS sources/whisper.cpp/build/src/libwhisper.a: sources/whisper.cpp
UNAME_S := $(shell uname -s) cd sources/whisper.cpp && cmake $(CMAKE_ARGS) $(WHISPER_CMAKE_ARGS) . -B ./build
cd sources/whisper.cpp/build && cmake --build . --config Release
# Only build CPU variants on Linux whisper: sources/whisper.cpp sources/whisper.cpp/build/src/libwhisper.a
ifeq ($(UNAME_S),Linux) $(GOCMD) mod edit -replace github.com/ggerganov/whisper.cpp=$(CURDIR)/sources/whisper.cpp
VARIANT_TARGETS = libgowhisper-avx.so libgowhisper-avx2.so libgowhisper-avx512.so libgowhisper-fallback.so $(GOCMD) mod edit -replace github.com/ggerganov/whisper.cpp/bindings/go=$(CURDIR)/sources/whisper.cpp/bindings/go
else CGO_LDFLAGS="$(CGO_LDFLAGS) $(CGO_LDFLAGS_WHISPER)" C_INCLUDE_PATH="${WHISPER_INCLUDE_PATH}" LIBRARY_PATH="${WHISPER_LIBRARY_PATH}" LD_LIBRARY_PATH="${WHISPER_LIBRARY_PATH}" \
# On non-Linux (e.g., Darwin), build only fallback variant CGO_CXXFLAGS="$(CGO_CXXFLAGS_WHISPER)" \
VARIANT_TARGETS = libgowhisper-fallback.so $(GOCMD) build -ldflags "$(LD_FLAGS)" -tags "$(GO_TAGS)" -o whisper ./
endif
whisper: main.go gowhisper.go $(VARIANT_TARGETS) package:
CGO_ENABLED=0 $(GOCMD) build -tags "$(GO_TAGS)" -o whisper ./
package: whisper
bash package.sh bash package.sh
build: package build: whisper package
clean: purge
rm -rf libgowhisper*.so sources/whisper.cpp whisper
purge:
rm -rf build*
# Build all variants (Linux only)
ifeq ($(UNAME_S),Linux)
libgowhisper-avx.so: sources/whisper.cpp
$(MAKE) purge
$(info ${GREEN}I whisper build info:avx${RESET})
SO_TARGET=libgowhisper-avx.so CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE) libgowhisper-custom
rm -rfv build*
libgowhisper-avx2.so: sources/whisper.cpp
$(MAKE) purge
$(info ${GREEN}I whisper build info:avx2${RESET})
SO_TARGET=libgowhisper-avx2.so CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on" $(MAKE) libgowhisper-custom
rm -rfv build*
libgowhisper-avx512.so: sources/whisper.cpp
$(MAKE) purge
$(info ${GREEN}I whisper build info:avx512${RESET})
SO_TARGET=libgowhisper-avx512.so CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=on -DGGML_FMA=on -DGGML_F16C=on" $(MAKE) libgowhisper-custom
rm -rfv build*
endif
# Build fallback variant (all platforms)
libgowhisper-fallback.so: sources/whisper.cpp
$(MAKE) purge
$(info ${GREEN}I whisper build info:fallback${RESET})
SO_TARGET=libgowhisper-fallback.so CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off" $(MAKE) libgowhisper-custom
rm -rfv build*
libgowhisper-custom: CMakeLists.txt gowhisper.cpp gowhisper.h
mkdir -p build-$(SO_TARGET) && \
cd build-$(SO_TARGET) && \
cmake .. $(CMAKE_ARGS) && \
cmake --build . --config Release -j$(JOBS) && \
cd .. && \
mv build-$(SO_TARGET)/libgowhisper.so ./$(SO_TARGET)
all: whisper package

View File

@@ -1,156 +0,0 @@
#include "gowhisper.h"
#include "ggml-backend.h"
#include "whisper.h"
#include <vector>
static struct whisper_vad_context *vctx;
static struct whisper_context *ctx;
static std::vector<float> flat_segs;
static void ggml_log_cb(enum ggml_log_level level, const char *log,
void *data) {
const char *level_str;
if (!log) {
return;
}
switch (level) {
case GGML_LOG_LEVEL_DEBUG:
level_str = "DEBUG";
break;
case GGML_LOG_LEVEL_INFO:
level_str = "INFO";
break;
case GGML_LOG_LEVEL_WARN:
level_str = "WARN";
break;
case GGML_LOG_LEVEL_ERROR:
level_str = "ERROR";
break;
default: /* Potential future-proofing */
level_str = "?????";
break;
}
fprintf(stderr, "[%-5s] ", level_str);
fputs(log, stderr);
fflush(stderr);
}
int load_model(const char *const model_path) {
whisper_log_set(ggml_log_cb, nullptr);
ggml_backend_load_all();
struct whisper_context_params cparams = whisper_context_default_params();
ctx = whisper_init_from_file_with_params(model_path, cparams);
if (ctx == nullptr) {
fprintf(stderr, "error: Also failed to init model as transcriber\n");
return 1;
}
return 0;
}
int load_model_vad(const char *const model_path) {
whisper_log_set(ggml_log_cb, nullptr);
ggml_backend_load_all();
struct whisper_vad_context_params vcparams =
whisper_vad_default_context_params();
// XXX: Overridden to false in upstream due to performance?
// vcparams.use_gpu = true;
vctx = whisper_vad_init_from_file_with_params(model_path, vcparams);
if (vctx == nullptr) {
fprintf(stderr, "error: Failed to init model as VAD\n");
return 1;
}
return 0;
}
int vad(float pcmf32[], size_t pcmf32_len, float **segs_out,
size_t *segs_out_len) {
if (!whisper_vad_detect_speech(vctx, pcmf32, pcmf32_len)) {
fprintf(stderr, "error: failed to detect speech\n");
return 1;
}
struct whisper_vad_params params = whisper_vad_default_params();
struct whisper_vad_segments *segs =
whisper_vad_segments_from_probs(vctx, params);
size_t segn = whisper_vad_segments_n_segments(segs);
// fprintf(stderr, "Got segments %zd\n", segn);
flat_segs.clear();
for (int i = 0; i < segn; i++) {
flat_segs.push_back(whisper_vad_segments_get_segment_t0(segs, i));
flat_segs.push_back(whisper_vad_segments_get_segment_t1(segs, i));
}
// fprintf(stderr, "setting out variables: %p=%p -> %p, %p=%zx -> %zx\n",
// segs_out, *segs_out, flat_segs.data(), segs_out_len, *segs_out_len,
// flat_segs.size());
*segs_out = flat_segs.data();
*segs_out_len = flat_segs.size();
// fprintf(stderr, "freeing segs\n");
whisper_vad_free_segments(segs);
// fprintf(stderr, "returning\n");
return 0;
}
int transcribe(uint32_t threads, char *lang, bool translate, bool tdrz,
float pcmf32[], size_t pcmf32_len, size_t *segs_out_len, char *prompt) {
whisper_full_params wparams =
whisper_full_default_params(WHISPER_SAMPLING_GREEDY);
wparams.n_threads = threads;
if (*lang != '\0')
wparams.language = lang;
else {
wparams.language = nullptr;
}
wparams.translate = translate;
wparams.debug_mode = true;
wparams.print_progress = true;
wparams.tdrz_enable = tdrz;
wparams.initial_prompt = prompt;
fprintf(stderr, "info: Enable tdrz: %d\n", tdrz);
fprintf(stderr, "info: Initial prompt: \"%s\"\n", prompt);
if (whisper_full(ctx, wparams, pcmf32, pcmf32_len)) {
fprintf(stderr, "error: transcription failed\n");
return 1;
}
*segs_out_len = whisper_full_n_segments(ctx);
return 0;
}
const char *get_segment_text(int i) {
return whisper_full_get_segment_text(ctx, i);
}
int64_t get_segment_t0(int i) { return whisper_full_get_segment_t0(ctx, i); }
int64_t get_segment_t1(int i) { return whisper_full_get_segment_t1(ctx, i); }
int n_tokens(int i) { return whisper_full_n_tokens(ctx, i); }
int32_t get_token_id(int i, int j) {
return whisper_full_get_token_id(ctx, i, j);
}
bool get_segment_speaker_turn_next(int i) {
return whisper_full_get_segment_speaker_turn_next(ctx, i);
}

View File

@@ -1,161 +0,0 @@
package main
import (
"fmt"
"os"
"path/filepath"
"strings"
"unsafe"
"github.com/go-audio/wav"
"github.com/mudler/LocalAI/pkg/grpc/base"
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
"github.com/mudler/LocalAI/pkg/utils"
)
var (
CppLoadModel func(modelPath string) int
CppLoadModelVAD func(modelPath string) int
CppVAD func(pcmf32 []float32, pcmf32Size uintptr, segsOut unsafe.Pointer, segsOutLen unsafe.Pointer) int
CppTranscribe func(threads uint32, lang string, translate bool, diarize bool, pcmf32 []float32, pcmf32Len uintptr, segsOutLen unsafe.Pointer, prompt string) int
CppGetSegmentText func(i int) string
CppGetSegmentStart func(i int) int64
CppGetSegmentEnd func(i int) int64
CppNTokens func(i int) int
CppGetTokenID func(i int, j int) int
CppGetSegmentSpeakerTurnNext func(i int) bool
)
type Whisper struct {
base.SingleThread
}
func (w *Whisper) Load(opts *pb.ModelOptions) error {
vadOnly := false
for _, oo := range opts.Options {
if oo == "vad_only" {
vadOnly = true
} else {
fmt.Fprintf(os.Stderr, "Unrecognized option: %v\n", oo)
}
}
if vadOnly {
if ret := CppLoadModelVAD(opts.ModelFile); ret != 0 {
return fmt.Errorf("Failed to load Whisper VAD model")
}
return nil
}
if ret := CppLoadModel(opts.ModelFile); ret != 0 {
return fmt.Errorf("Failed to load Whisper transcription model")
}
return nil
}
func (w *Whisper) VAD(req *pb.VADRequest) (pb.VADResponse, error) {
audio := req.Audio
// We expect 0xdeadbeef to be overwritten and if we see it in a stack trace we know it wasn't
segsPtr, segsLen := uintptr(0xdeadbeef), uintptr(0xdeadbeef)
segsPtrPtr, segsLenPtr := unsafe.Pointer(&segsPtr), unsafe.Pointer(&segsLen)
if ret := CppVAD(audio, uintptr(len(audio)), segsPtrPtr, segsLenPtr); ret != 0 {
return pb.VADResponse{}, fmt.Errorf("Failed VAD")
}
// Happens when CPP vector has not had any elements pushed to it
if segsPtr == 0 {
return pb.VADResponse{
Segments: []*pb.VADSegment{},
}, nil
}
// unsafeptr warning is caused by segsPtr being on the stack and therefor being subject to stack copying AFAICT
// however the stack shouldn't have grown between setting segsPtr and now, also the memory pointed to is allocated by C++
segs := unsafe.Slice((*float32)(unsafe.Pointer(segsPtr)), segsLen)
vadSegments := []*pb.VADSegment{}
for i := range len(segs) >> 1 {
s := segs[2*i] / 100
t := segs[2*i+1] / 100
vadSegments = append(vadSegments, &pb.VADSegment{
Start: s,
End: t,
})
}
return pb.VADResponse{
Segments: vadSegments,
}, nil
}
func (w *Whisper) AudioTranscription(opts *pb.TranscriptRequest) (pb.TranscriptResult, error) {
dir, err := os.MkdirTemp("", "whisper")
if err != nil {
return pb.TranscriptResult{}, err
}
defer os.RemoveAll(dir)
convertedPath := filepath.Join(dir, "converted.wav")
if err := utils.AudioToWav(opts.Dst, convertedPath); err != nil {
return pb.TranscriptResult{}, err
}
// Open samples
fh, err := os.Open(convertedPath)
if err != nil {
return pb.TranscriptResult{}, err
}
defer fh.Close()
// Read samples
d := wav.NewDecoder(fh)
buf, err := d.FullPCMBuffer()
if err != nil {
return pb.TranscriptResult{}, err
}
data := buf.AsFloat32Buffer().Data
segsLen := uintptr(0xdeadbeef)
segsLenPtr := unsafe.Pointer(&segsLen)
if ret := CppTranscribe(opts.Threads, opts.Language, opts.Translate, opts.Diarize, data, uintptr(len(data)), segsLenPtr, opts.Prompt); ret != 0 {
return pb.TranscriptResult{}, fmt.Errorf("Failed Transcribe")
}
segments := []*pb.TranscriptSegment{}
text := ""
for i := range int(segsLen) {
s := CppGetSegmentStart(i)
t := CppGetSegmentEnd(i)
txt := strings.Clone(CppGetSegmentText(i))
tokens := make([]int32, CppNTokens(i))
if opts.Diarize && CppGetSegmentSpeakerTurnNext(i) {
txt += " [SPEAKER_TURN]"
}
for j := range tokens {
tokens[j] = int32(CppGetTokenID(i, j))
}
segment := &pb.TranscriptSegment{
Id: int32(i),
Text: txt,
Start: s, End: t,
Tokens: tokens,
}
segments = append(segments, segment)
text += " " + strings.TrimSpace(txt)
}
return pb.TranscriptResult{
Segments: segments,
Text: strings.TrimSpace(text),
}, nil
}

View File

@@ -1,18 +0,0 @@
#include <cstddef>
#include <cstdint>
extern "C" {
int load_model(const char *const model_path);
int load_model_vad(const char *const model_path);
int vad(float pcmf32[], size_t pcmf32_size, float **segs_out,
size_t *segs_out_len);
int transcribe(uint32_t threads, char *lang, bool translate, bool tdrz,
float pcmf32[], size_t pcmf32_len, size_t *segs_out_len,
char *prompt);
const char *get_segment_text(int i);
int64_t get_segment_t0(int i);
int64_t get_segment_t1(int i);
int n_tokens(int i);
int32_t get_token_id(int i, int j);
bool get_segment_speaker_turn_next(int i);
}

View File

@@ -1,11 +1,10 @@
package main package main
// Note: this is started internally by LocalAI and a server is allocated for each model // Note: this is started internally by LocalAI and a server is allocated for each model
import ( import (
"flag" "flag"
"os"
"github.com/ebitengine/purego"
grpc "github.com/mudler/LocalAI/pkg/grpc" grpc "github.com/mudler/LocalAI/pkg/grpc"
) )
@@ -13,40 +12,7 @@ var (
addr = flag.String("addr", "localhost:50051", "the address to connect to") addr = flag.String("addr", "localhost:50051", "the address to connect to")
) )
type LibFuncs struct {
FuncPtr any
Name string
}
func main() { func main() {
// Get library name from environment variable, default to fallback
libName := os.Getenv("WHISPER_LIBRARY")
if libName == "" {
libName = "./libgowhisper-fallback.so"
}
gosd, err := purego.Dlopen(libName, purego.RTLD_NOW|purego.RTLD_GLOBAL)
if err != nil {
panic(err)
}
libFuncs := []LibFuncs{
{&CppLoadModel, "load_model"},
{&CppLoadModelVAD, "load_model_vad"},
{&CppVAD, "vad"},
{&CppTranscribe, "transcribe"},
{&CppGetSegmentText, "get_segment_text"},
{&CppGetSegmentStart, "get_segment_t0"},
{&CppGetSegmentEnd, "get_segment_t1"},
{&CppNTokens, "n_tokens"},
{&CppGetTokenID, "get_token_id"},
{&CppGetSegmentSpeakerTurnNext, "get_segment_speaker_turn_next"},
}
for _, lf := range libFuncs {
purego.RegisterLibFunc(lf.FuncPtr, gosd, lf.Name)
}
flag.Parse() flag.Parse()
if err := grpc.StartServer(*addr, &Whisper{}); err != nil { if err := grpc.StartServer(*addr, &Whisper{}); err != nil {

View File

@@ -6,14 +6,12 @@
set -e set -e
CURDIR=$(dirname "$(realpath $0)") CURDIR=$(dirname "$(realpath $0)")
REPO_ROOT="${CURDIR}/../../.."
# Create lib directory # Create lib directory
mkdir -p $CURDIR/package/lib mkdir -p $CURDIR/package/lib
cp -avf $CURDIR/whisper $CURDIR/package/ cp -avrf $CURDIR/whisper $CURDIR/package/
cp -fv $CURDIR/libgowhisper-*.so $CURDIR/package/ cp -rfv $CURDIR/run.sh $CURDIR/package/
cp -fv $CURDIR/run.sh $CURDIR/package/
# Detect architecture and copy appropriate libraries # Detect architecture and copy appropriate libraries
if [ -f "/lib64/ld-linux-x86-64.so.2" ]; then if [ -f "/lib64/ld-linux-x86-64.so.2" ]; then
@@ -44,22 +42,11 @@ elif [ -f "/lib/ld-linux-aarch64.so.1" ]; then
cp -arfLv /lib/aarch64-linux-gnu/libdl.so.2 $CURDIR/package/lib/libdl.so.2 cp -arfLv /lib/aarch64-linux-gnu/libdl.so.2 $CURDIR/package/lib/libdl.so.2
cp -arfLv /lib/aarch64-linux-gnu/librt.so.1 $CURDIR/package/lib/librt.so.1 cp -arfLv /lib/aarch64-linux-gnu/librt.so.1 $CURDIR/package/lib/librt.so.1
cp -arfLv /lib/aarch64-linux-gnu/libpthread.so.0 $CURDIR/package/lib/libpthread.so.0 cp -arfLv /lib/aarch64-linux-gnu/libpthread.so.0 $CURDIR/package/lib/libpthread.so.0
elif [ $(uname -s) = "Darwin" ]; then
echo "Detected Darwin"
else else
echo "Error: Could not detect architecture" echo "Error: Could not detect architecture"
exit 1 exit 1
fi fi
# Package GPU libraries based on BUILD_TYPE echo "Packaging completed successfully"
# The GPU library packaging script will detect BUILD_TYPE and copy appropriate GPU libraries
GPU_LIB_SCRIPT="${REPO_ROOT}/scripts/build/package-gpu-libs.sh"
if [ -f "$GPU_LIB_SCRIPT" ]; then
echo "Packaging GPU libraries for BUILD_TYPE=${BUILD_TYPE:-cpu}..."
source "$GPU_LIB_SCRIPT" "$CURDIR/package/lib"
package_gpu_libs
fi
echo "Packaging completed successfully"
ls -liah $CURDIR/package/ ls -liah $CURDIR/package/
ls -liah $CURDIR/package/lib/ ls -liah $CURDIR/package/lib/

View File

@@ -1,52 +1,14 @@
#!/bin/bash #!/bin/bash
set -ex set -ex
# Get the absolute current dir where the script is located
CURDIR=$(dirname "$(realpath $0)") CURDIR=$(dirname "$(realpath $0)")
cd /
echo "CPU info:"
if [ "$(uname)" != "Darwin" ]; then
grep -e "model\sname" /proc/cpuinfo | head -1
grep -e "flags" /proc/cpuinfo | head -1
fi
LIBRARY="$CURDIR/libgowhisper-fallback.so"
if [ "$(uname)" != "Darwin" ]; then
if grep -q -e "\savx\s" /proc/cpuinfo ; then
echo "CPU: AVX found OK"
if [ -e $CURDIR/libgowhisper-avx.so ]; then
LIBRARY="$CURDIR/libgowhisper-avx.so"
fi
fi
if grep -q -e "\savx2\s" /proc/cpuinfo ; then
echo "CPU: AVX2 found OK"
if [ -e $CURDIR/libgowhisper-avx2.so ]; then
LIBRARY="$CURDIR/libgowhisper-avx2.so"
fi
fi
# Check avx 512
if grep -q -e "\savx512f\s" /proc/cpuinfo ; then
echo "CPU: AVX512F found OK"
if [ -e $CURDIR/libgowhisper-avx512.so ]; then
LIBRARY="$CURDIR/libgowhisper-avx512.so"
fi
fi
fi
export LD_LIBRARY_PATH=$CURDIR/lib:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=$CURDIR/lib:$LD_LIBRARY_PATH
export WHISPER_LIBRARY=$LIBRARY
# If there is a lib/ld.so, use it # If there is a lib/ld.so, use it
if [ -f $CURDIR/lib/ld.so ]; then if [ -f $CURDIR/lib/ld.so ]; then
echo "Using lib/ld.so" echo "Using lib/ld.so"
echo "Using library: $LIBRARY"
exec $CURDIR/lib/ld.so $CURDIR/whisper "$@" exec $CURDIR/lib/ld.so $CURDIR/whisper "$@"
fi fi
echo "Using library: $LIBRARY"
exec $CURDIR/whisper "$@" exec $CURDIR/whisper "$@"

View File

@@ -0,0 +1,105 @@
package main
// This is a wrapper to statisfy the GRPC service interface
// It is meant to be used by the main executable that is the server for the specific backend type (falcon, gpt3, etc)
import (
"os"
"path/filepath"
"github.com/ggerganov/whisper.cpp/bindings/go/pkg/whisper"
"github.com/go-audio/wav"
"github.com/mudler/LocalAI/pkg/grpc/base"
pb "github.com/mudler/LocalAI/pkg/grpc/proto"
"github.com/mudler/LocalAI/pkg/utils"
)
type Whisper struct {
base.SingleThread
whisper whisper.Model
}
func (sd *Whisper) Load(opts *pb.ModelOptions) error {
// Note: the Model here is a path to a directory containing the model files
w, err := whisper.New(opts.ModelFile)
sd.whisper = w
return err
}
func (sd *Whisper) AudioTranscription(opts *pb.TranscriptRequest) (pb.TranscriptResult, error) {
dir, err := os.MkdirTemp("", "whisper")
if err != nil {
return pb.TranscriptResult{}, err
}
defer os.RemoveAll(dir)
convertedPath := filepath.Join(dir, "converted.wav")
if err := utils.AudioToWav(opts.Dst, convertedPath); err != nil {
return pb.TranscriptResult{}, err
}
// Open samples
fh, err := os.Open(convertedPath)
if err != nil {
return pb.TranscriptResult{}, err
}
defer fh.Close()
// Read samples
d := wav.NewDecoder(fh)
buf, err := d.FullPCMBuffer()
if err != nil {
return pb.TranscriptResult{}, err
}
data := buf.AsFloat32Buffer().Data
// Process samples
context, err := sd.whisper.NewContext()
if err != nil {
return pb.TranscriptResult{}, err
}
context.SetThreads(uint(opts.Threads))
if opts.Language != "" {
context.SetLanguage(opts.Language)
} else {
context.SetLanguage("auto")
}
if opts.Translate {
context.SetTranslate(true)
}
if err := context.Process(data, nil, nil, nil); err != nil {
return pb.TranscriptResult{}, err
}
segments := []*pb.TranscriptSegment{}
text := ""
for {
s, err := context.NextSegment()
if err != nil {
break
}
var tokens []int32
for _, t := range s.Tokens {
tokens = append(tokens, int32(t.Id))
}
segment := &pb.TranscriptSegment{Id: int32(s.Num), Text: s.Text, Start: int64(s.Start), End: int64(s.End), Tokens: tokens}
segments = append(segments, segment)
text += s.Text
}
return pb.TranscriptResult{
Segments: segments,
Text: text,
}, nil
}

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,190 +1,38 @@
# Python Backends for LocalAI # Common commands about conda environment
This directory contains Python-based AI backends for LocalAI, providing support for various AI models and hardware acceleration targets. ## Create a new empty conda environment
## Overview ```
conda create --name <env-name> python=<your version> -y
The Python backends use a unified build system based on `libbackend.sh` that provides: conda create --name autogptq python=3.11 -y
- **Automatic virtual environment management** with support for both `uv` and `pip`
- **Hardware-specific dependency installation** (CPU, CUDA, Intel, MLX, etc.)
- **Portable Python support** for standalone deployments
- **Consistent backend execution** across different environments
## Available Backends
### Core AI Models
- **transformers** - Hugging Face Transformers framework (PyTorch-based)
- **vllm** - High-performance LLM inference engine
- **mlx** - Apple Silicon optimized ML framework
- **exllama2** - ExLlama2 quantized models
### Audio & Speech
- **bark** - Text-to-speech synthesis
- **coqui** - Coqui TTS models
- **faster-whisper** - Fast Whisper speech recognition
- **kitten-tts** - Lightweight TTS
- **mlx-audio** - Apple Silicon audio processing
- **chatterbox** - TTS model
- **kokoro** - TTS models
### Computer Vision
- **diffusers** - Stable Diffusion and image generation
- **mlx-vlm** - Vision-language models for Apple Silicon
- **rfdetr** - Object detection models
### Specialized
- **rerankers** - Text reranking models
## Quick Start
### Prerequisites
- Python 3.10+ (default: 3.10.18)
- `uv` package manager (recommended) or `pip`
- Appropriate hardware drivers for your target (CUDA, Intel, etc.)
### Installation
Each backend can be installed individually:
```bash
# Navigate to a specific backend
cd backend/python/transformers
# Install dependencies
make transformers
# or
bash install.sh
# Run the backend
make run
# or
bash run.sh
``` ```
### Using the Unified Build System ## To activate the environment
The `libbackend.sh` script provides consistent commands across all backends: As of conda 4.4
```
```bash conda activate autogptq
# Source the library in your backend script
source $(dirname $0)/../common/libbackend.sh
# Install requirements (automatically handles hardware detection)
installRequirements
# Start the backend server
startBackend $@
# Run tests
runUnittests
``` ```
## Hardware Targets The conda version older than 4.4
The build system automatically detects and configures for different hardware: ```
source activate autogptq
- **CPU** - Standard CPU-only builds
- **CUDA** - NVIDIA GPU acceleration (supports CUDA 12/13)
- **Intel** - Intel XPU/GPU optimization
- **MLX** - Apple Silicon (M1/M2/M3) optimization
- **HIP** - AMD GPU acceleration
### Target-Specific Requirements
Backends can specify hardware-specific dependencies:
- `requirements.txt` - Base requirements
- `requirements-cpu.txt` - CPU-specific packages
- `requirements-cublas12.txt` - CUDA 12 packages
- `requirements-cublas13.txt` - CUDA 13 packages
- `requirements-intel.txt` - Intel-optimized packages
- `requirements-mps.txt` - Apple Silicon packages
## Configuration Options
### Environment Variables
- `PYTHON_VERSION` - Python version (default: 3.10)
- `PYTHON_PATCH` - Python patch version (default: 18)
- `BUILD_TYPE` - Force specific build target
- `USE_PIP` - Use pip instead of uv (default: false)
- `PORTABLE_PYTHON` - Enable portable Python builds
- `LIMIT_TARGETS` - Restrict backend to specific targets
### Example: CUDA 12 Only Backend
```bash
# In your backend script
LIMIT_TARGETS="cublas12"
source $(dirname $0)/../common/libbackend.sh
``` ```
### Example: Intel-Optimized Backend ## Install the packages to your environment
```bash Sometimes you need to install the packages from the conda-forge channel
# In your backend script
LIMIT_TARGETS="intel" By using `conda`
source $(dirname $0)/../common/libbackend.sh ```
conda install <your-package-name>
conda install -c conda-forge <your package-name>
``` ```
## Development Or by using `pip`
### Adding a New Backend
1. Create a new directory in `backend/python/`
2. Copy the template structure from `common/template/`
3. Implement your `backend.py` with the required gRPC interface
4. Add appropriate requirements files for your target hardware
5. Use `libbackend.sh` for consistent build and execution
### Testing
```bash
# Run backend tests
make test
# or
bash test.sh
``` ```
pip install <your-package-name>
### Building
```bash
# Install dependencies
make <backend-name>
# Clean build artifacts
make clean
``` ```
## Architecture
Each backend follows a consistent structure:
```
backend-name/
├── backend.py # Main backend implementation
├── requirements.txt # Base dependencies
├── requirements-*.txt # Hardware-specific dependencies
├── install.sh # Installation script
├── run.sh # Execution script
├── test.sh # Test script
├── Makefile # Build targets
└── test.py # Unit tests
```
## Troubleshooting
### Common Issues
1. **Missing dependencies**: Ensure all requirements files are properly configured
2. **Hardware detection**: Check that `BUILD_TYPE` matches your system
3. **Python version**: Verify Python 3.10+ is available
4. **Virtual environment**: Use `ensureVenv` to create/activate environments
## Contributing
When adding new backends or modifying existing ones:
1. Follow the established directory structure
2. Use `libbackend.sh` for consistent behavior
3. Include appropriate requirements files for all target hardware
4. Add comprehensive tests
5. Update this README if adding new backend types

View File

@@ -1,23 +1,29 @@
.PHONY: ttsbark .PHONY: ttsbark
ttsbark: ttsbark: protogen
bash install.sh bash install.sh
.PHONY: run .PHONY: run
run: ttsbark run: protogen
@echo "Running bark..." @echo "Running bark..."
bash run.sh bash run.sh
@echo "bark run." @echo "bark run."
.PHONY: test .PHONY: test
test: ttsbark test: protogen
@echo "Testing bark..." @echo "Testing bark..."
bash test.sh bash test.sh
@echo "bark tested." @echo "bark tested."
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean .PHONY: protogen-clean
protogen-clean: protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py $(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
.PHONY: clean .PHONY: clean
clean: protogen-clean clean: protogen-clean
rm -rf venv __pycache__ rm -rf venv __pycache__

View File

@@ -0,0 +1,5 @@
--extra-index-url https://download.pytorch.org/whl/cu118
torch==2.4.1+cu118
torchaudio==2.4.1+cu118
transformers
accelerate

View File

@@ -1,5 +1,5 @@
--extra-index-url https://download.pytorch.org/whl/rocm6.4 --extra-index-url https://download.pytorch.org/whl/rocm6.0
torch==2.8.0+rocm6.4 torch==2.4.1+rocm6.0
torchaudio==2.8.0+rocm6.4 torchaudio==2.4.1+rocm6.0
transformers transformers
accelerate accelerate

View File

@@ -1,5 +1,5 @@
--extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
intel-extension-for-pytorch==2.8.10+xpu intel-extension-for-pytorch==2.3.110+xpu
torch==2.3.1+cxx11.abi torch==2.3.1+cxx11.abi
torchaudio==2.3.1+cxx11.abi torchaudio==2.3.1+cxx11.abi
oneccl_bind_pt==2.3.100+xpu oneccl_bind_pt==2.3.100+xpu

View File

@@ -1,4 +1,4 @@
bark==0.1.5 bark==0.1.5
grpcio==1.76.0 grpcio==1.71.0
protobuf protobuf
certifi certifi

View File

@@ -1,23 +1,29 @@
.PHONY: chatterbox .PHONY: coqui
chatterbox: coqui: protogen
bash install.sh bash install.sh
.PHONY: run .PHONY: run
run: chatterbox run: protogen
@echo "Running coqui..." @echo "Running coqui..."
bash run.sh bash run.sh
@echo "coqui run." @echo "coqui run."
.PHONY: test .PHONY: test
test: chatterbox test: protogen
@echo "Testing coqui..." @echo "Testing coqui..."
bash test.sh bash test.sh
@echo "coqui tested." @echo "coqui tested."
.PHONY: protogen
protogen: backend_pb2_grpc.py backend_pb2.py
.PHONY: protogen-clean .PHONY: protogen-clean
protogen-clean: protogen-clean:
$(RM) backend_pb2_grpc.py backend_pb2.py $(RM) backend_pb2_grpc.py backend_pb2.py
backend_pb2_grpc.py backend_pb2.py:
python3 -m grpc_tools.protoc -I../.. -I./ --python_out=. --grpc_python_out=. backend.proto
.PHONY: clean .PHONY: clean
clean: protogen-clean clean: protogen-clean
rm -rf venv __pycache__ rm -rf venv __pycache__

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
""" """
This is an extra gRPC server of LocalAI for Chatterbox TTS This is an extra gRPC server of LocalAI for Bark TTS
""" """
from concurrent import futures from concurrent import futures
import time import time
@@ -14,98 +14,15 @@ import backend_pb2_grpc
import torch import torch
import torchaudio as ta import torchaudio as ta
from chatterbox.tts import ChatterboxTTS from chatterbox.tts import ChatterboxTTS
from chatterbox.mtl_tts import ChatterboxMultilingualTTS
import grpc import grpc
import tempfile
def is_float(s):
"""Check if a string can be converted to float."""
try:
float(s)
return True
except ValueError:
return False
def is_int(s):
"""Check if a string can be converted to int."""
try:
int(s)
return True
except ValueError:
return False
def split_text_at_word_boundary(text, max_length=250):
"""
Split text at word boundaries without truncating words.
Returns a list of text chunks.
"""
if not text or len(text) <= max_length:
return [text]
chunks = []
words = text.split()
current_chunk = ""
for word in words:
# Check if adding this word would exceed the limit
if len(current_chunk) + len(word) + 1 <= max_length:
if current_chunk:
current_chunk += " " + word
else:
current_chunk = word
else:
# If current chunk is not empty, add it to chunks
if current_chunk:
chunks.append(current_chunk)
current_chunk = word
else:
# If a single word is longer than max_length, we have to include it anyway
chunks.append(word)
current_chunk = ""
# Add the last chunk if it's not empty
if current_chunk:
chunks.append(current_chunk)
return chunks
def merge_audio_files(audio_files, output_path, sample_rate):
"""
Merge multiple audio files into a single audio file.
"""
if not audio_files:
return
if len(audio_files) == 1:
# If only one file, just copy it
import shutil
shutil.copy2(audio_files[0], output_path)
return
# Load all audio files
waveforms = []
for audio_file in audio_files:
waveform, sr = ta.load(audio_file)
if sr != sample_rate:
# Resample if necessary
resampler = ta.transforms.Resample(sr, sample_rate)
waveform = resampler(waveform)
waveforms.append(waveform)
# Concatenate all waveforms
merged_waveform = torch.cat(waveforms, dim=1)
# Save the merged audio
ta.save(output_path, merged_waveform, sample_rate)
# Clean up temporary files
for audio_file in audio_files:
if os.path.exists(audio_file):
os.remove(audio_file)
_ONE_DAY_IN_SECONDS = 60 * 60 * 24 _ONE_DAY_IN_SECONDS = 60 * 60 * 24
# If MAX_WORKERS are specified in the environment use it, otherwise default to 1 # If MAX_WORKERS are specified in the environment use it, otherwise default to 1
MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1')) MAX_WORKERS = int(os.environ.get('PYTHON_GRPC_MAX_WORKERS', '1'))
COQUI_LANGUAGE = os.environ.get('COQUI_LANGUAGE', None)
# Implement the BackendServicer class with the service methods # Implement the BackendServicer class with the service methods
class BackendServicer(backend_pb2_grpc.BackendServicer): class BackendServicer(backend_pb2_grpc.BackendServicer):
@@ -124,34 +41,10 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
else: else:
print("CUDA is not available", file=sys.stderr) print("CUDA is not available", file=sys.stderr)
device = "cpu" device = "cpu"
mps_available = hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
if mps_available:
device = "mps"
if not torch.cuda.is_available() and request.CUDA: if not torch.cuda.is_available() and request.CUDA:
return backend_pb2.Result(success=False, message="CUDA is not available") return backend_pb2.Result(success=False, message="CUDA is not available")
options = request.Options
# empty dict
self.options = {}
# The options are a list of strings in this form optname:optvalue
# We are storing all the options in a dict so we can use it later when
# generating the images
for opt in options:
if ":" not in opt:
continue
key, value = opt.split(":")
# if value is a number, convert it to the appropriate type
if is_float(value):
value = float(value)
elif is_int(value):
value = int(value)
elif value.lower() in ["true", "false"]:
value = value.lower() == "true"
self.options[key] = value
self.AudioPath = None self.AudioPath = None
if os.path.isabs(request.AudioPath): if os.path.isabs(request.AudioPath):
@@ -161,14 +54,10 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
modelFileBase = os.path.dirname(request.ModelFile) modelFileBase = os.path.dirname(request.ModelFile)
# modify LoraAdapter to be relative to modelFileBase # modify LoraAdapter to be relative to modelFileBase
self.AudioPath = os.path.join(modelFileBase, request.AudioPath) self.AudioPath = os.path.join(modelFileBase, request.AudioPath)
try: try:
print("Preparing models, please wait", file=sys.stderr) print("Preparing models, please wait", file=sys.stderr)
if "multilingual" in self.options: self.model = ChatterboxTTS.from_pretrained(device=device)
# remove key from options
del self.options["multilingual"]
self.model = ChatterboxMultilingualTTS.from_pretrained(device=device)
else:
self.model = ChatterboxTTS.from_pretrained(device=device)
except Exception as err: except Exception as err:
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}") return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")
# Implement your logic here for the LoadModel service # Implement your logic here for the LoadModel service
@@ -177,43 +66,14 @@ class BackendServicer(backend_pb2_grpc.BackendServicer):
def TTS(self, request, context): def TTS(self, request, context):
try: try:
kwargs = {} # Generate audio using ChatterboxTTS
if "language" in self.options:
kwargs["language_id"] = self.options["language"]
if self.AudioPath is not None: if self.AudioPath is not None:
kwargs["audio_prompt_path"] = self.AudioPath wav = self.model.generate(request.text, audio_prompt_path=self.AudioPath)
# add options to kwargs
kwargs.update(self.options)
# Check if text exceeds 250 characters
# (chatterbox does not support long text)
# https://github.com/resemble-ai/chatterbox/issues/60
# https://github.com/resemble-ai/chatterbox/issues/110
if len(request.text) > 250:
# Split text at word boundaries
text_chunks = split_text_at_word_boundary(request.text, max_length=250)
print(f"Splitting text into chunks of 250 characters: {len(text_chunks)}", file=sys.stderr)
# Generate audio for each chunk
temp_audio_files = []
for i, chunk in enumerate(text_chunks):
# Generate audio for this chunk
wav = self.model.generate(chunk, **kwargs)
# Create temporary file for this chunk
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.wav')
temp_file.close()
ta.save(temp_file.name, wav, self.model.sr)
temp_audio_files.append(temp_file.name)
# Merge all audio files
merge_audio_files(temp_audio_files, request.dst, self.model.sr)
else: else:
# Generate audio using ChatterboxTTS for short text wav = self.model.generate(request.text)
wav = self.model.generate(request.text, **kwargs)
# Save the generated audio # Save the generated audio
ta.save(request.dst, wav, self.model.sr) ta.save(request.dst, wav, self.model.sr)
except Exception as err: except Exception as err:
return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}") return backend_pb2.Result(success=False, message=f"Unexpected {err=}, {type(err)=}")

View File

@@ -15,11 +15,5 @@ fi
if [ "x${BUILD_PROFILE}" == "xintel" ]; then if [ "x${BUILD_PROFILE}" == "xintel" ]; then
EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match" EXTRA_PIP_INSTALL_FLAGS+=" --upgrade --index-strategy=unsafe-first-match"
fi fi
EXTRA_PIP_INSTALL_FLAGS+=" --no-build-isolation"
if [ "x${BUILD_PROFILE}" == "xl4t12" ]; then
USE_PIP=true
fi
installRequirements installRequirements

View File

@@ -1,9 +1,5 @@
--extra-index-url https://download.pytorch.org/whl/cpu
accelerate accelerate
torch torch==2.6.0
torchaudio torchaudio==2.6.0
numpy>=1.24.0,<1.26.0 transformers==4.46.3
transformers chatterbox-tts
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
#chatterbox-tts==0.1.4

View File

@@ -0,0 +1,6 @@
--extra-index-url https://download.pytorch.org/whl/cu118
torch==2.6.0+cu118
torchaudio==2.6.0+cu118
transformers==4.46.3
chatterbox-tts
accelerate

View File

@@ -1,7 +1,5 @@
torch torch==2.6.0
torchaudio torchaudio==2.6.0
transformers transformers==4.46.3
numpy>=1.24.0,<1.26.0 chatterbox-tts
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
accelerate accelerate

View File

@@ -1,8 +0,0 @@
--extra-index-url https://download.pytorch.org/whl/cu130
torch
torchaudio
transformers
numpy>=1.24.0,<1.26.0
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
accelerate

View File

@@ -1,8 +1,6 @@
--extra-index-url https://download.pytorch.org/whl/rocm6.4 --extra-index-url https://download.pytorch.org/whl/rocm6.0
torch==2.9.1+rocm6.4 torch==2.6.0+rocm6.1
torchaudio==2.9.1+rocm6.4 torchaudio==2.6.0+rocm6.1
transformers transformers==4.46.3
numpy>=1.24.0,<1.26.0 chatterbox-tts
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
accelerate accelerate

View File

@@ -1,5 +0,0 @@
# Build dependencies needed for packages installed from source (e.g., git dependencies)
# When using --no-build-isolation, these must be installed in the venv first
wheel
setuptools
packaging

View File

@@ -2,11 +2,10 @@
intel-extension-for-pytorch==2.3.110+xpu intel-extension-for-pytorch==2.3.110+xpu
torch==2.3.1+cxx11.abi torch==2.3.1+cxx11.abi
torchaudio==2.3.1+cxx11.abi torchaudio==2.3.1+cxx11.abi
transformers transformers==4.46.3
numpy>=1.24.0,<1.26.0 chatterbox-tts
# https://github.com/mudler/LocalAI/pull/6240#issuecomment-3329518289
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
accelerate accelerate
oneccl_bind_pt==2.3.100+xpu oneccl_bind_pt==2.3.100+xpu
optimum[openvino] optimum[openvino]
setuptools setuptools
accelerate

View File

@@ -1,7 +0,0 @@
--extra-index-url https://pypi.jetson-ai-lab.io/jp6/cu126/
torch
torchaudio
transformers
numpy>=1.24.0,<1.26.0
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
accelerate

View File

@@ -1,7 +0,0 @@
--extra-index-url https://download.pytorch.org/whl/cu130
torch
torchaudio
transformers
numpy>=1.24.0,<1.26.0
chatterbox-tts@git+https://git@github.com/mudler/chatterbox.git@faster
accelerate

View File

@@ -2,5 +2,4 @@ grpcio==1.71.0
protobuf protobuf
certifi certifi
packaging packaging
setuptools setuptools
poetry

Some files were not shown because too many files have changed in this diff Show More