mirror of
https://github.com/mudler/LocalAI.git
synced 2025-12-27 16:39:16 -05:00
* feat: split remaining backends and drop embedded backends - Drop silero-vad, huggingface, and stores backend from embedded binaries - Refactor Makefile and Dockerfile to avoid building grpc backends - Drop golang code that was used to embed backends - Simplify building by using goreleaser Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * chore(gallery): be specific with llama-cpp backend templates Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * chore(docs): update Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * chore(ci): minor fixes Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * chore: drop all ffmpeg references Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * fix: run protogen-go Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Always enable p2p mode Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Update gorelease file Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * fix(stores): do not always load Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Fix linting issues Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Simplify Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Mac OS fixup Signed-off-by: Ettore Di Giacinto <mudler@localai.io> --------- Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
70 lines
1.8 KiB
Go
70 lines
1.8 KiB
Go
package cli
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"os"
|
|
"path/filepath"
|
|
"strings"
|
|
|
|
"github.com/mudler/LocalAI/core/backend"
|
|
cliContext "github.com/mudler/LocalAI/core/cli/context"
|
|
"github.com/mudler/LocalAI/core/config"
|
|
"github.com/mudler/LocalAI/pkg/model"
|
|
"github.com/rs/zerolog/log"
|
|
)
|
|
|
|
type TTSCMD struct {
|
|
Text []string `arg:""`
|
|
|
|
Backend string `short:"b" default:"piper" help:"Backend to run the TTS model"`
|
|
Model string `short:"m" required:"" help:"Model name to run the TTS"`
|
|
Voice string `short:"v" help:"Voice name to run the TTS"`
|
|
Language string `short:"l" help:"Language to use with the TTS"`
|
|
OutputFile string `short:"o" type:"path" help:"The path to write the output wav file"`
|
|
ModelsPath string `env:"LOCALAI_MODELS_PATH,MODELS_PATH" type:"path" default:"${basepath}/models" help:"Path containing models used for inferencing" group:"storage"`
|
|
}
|
|
|
|
func (t *TTSCMD) Run(ctx *cliContext.Context) error {
|
|
outputFile := t.OutputFile
|
|
outputDir := os.TempDir()
|
|
if outputFile != "" {
|
|
outputDir = filepath.Dir(outputFile)
|
|
}
|
|
|
|
text := strings.Join(t.Text, " ")
|
|
|
|
opts := &config.ApplicationConfig{
|
|
ModelPath: t.ModelsPath,
|
|
Context: context.Background(),
|
|
GeneratedContentDir: outputDir,
|
|
}
|
|
ml := model.NewModelLoader(opts.ModelPath, opts.SingleBackend)
|
|
|
|
defer func() {
|
|
err := ml.StopAllGRPC()
|
|
if err != nil {
|
|
log.Error().Err(err).Msg("unable to stop all grpc processes")
|
|
}
|
|
}()
|
|
|
|
options := config.BackendConfig{}
|
|
options.SetDefaults()
|
|
options.Backend = t.Backend
|
|
options.Model = t.Model
|
|
|
|
filePath, _, err := backend.ModelTTS(text, t.Voice, t.Language, ml, opts, options)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if outputFile != "" {
|
|
if err := os.Rename(filePath, outputFile); err != nil {
|
|
return err
|
|
}
|
|
fmt.Printf("Generate file %s\n", outputFile)
|
|
} else {
|
|
fmt.Printf("Generate file %s\n", filePath)
|
|
}
|
|
return nil
|
|
}
|