mirror of
https://github.com/mudler/LocalAI.git
synced 2026-05-16 03:45:20 -04:00
fix(p2p): adapt to backend changes, general improvements (#5889)
The binary is now named "llama-cpp-rpc-server" for p2p workers. We also decrease the default token rotation interval, in this way peer discovery is much more responsive. Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
This commit is contained in:
committed by
GitHub
parent
c717b8d800
commit
5f7ece3e94
@@ -1,6 +1,7 @@
|
||||
package worker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -16,6 +17,10 @@ type LLamaCPP struct {
|
||||
WorkerFlags `embed:""`
|
||||
}
|
||||
|
||||
const (
|
||||
llamaCPPRPCBinaryName = "llama-cpp-rpc-server"
|
||||
)
|
||||
|
||||
func findLLamaCPPBackend(backendSystemPath string) (string, error) {
|
||||
backends, err := gallery.ListSystemBackends(backendSystemPath)
|
||||
if err != nil {
|
||||
@@ -33,12 +38,12 @@ func findLLamaCPPBackend(backendSystemPath string) (string, error) {
|
||||
}
|
||||
|
||||
if backendPath == "" {
|
||||
return "", fmt.Errorf("llama-cpp backend not found")
|
||||
return "", errors.New("llama-cpp backend not found, install it first")
|
||||
}
|
||||
|
||||
grpcProcess := filepath.Join(
|
||||
backendPath,
|
||||
"grpc-server",
|
||||
llamaCPPRPCBinaryName,
|
||||
)
|
||||
|
||||
return grpcProcess, nil
|
||||
|
||||
Reference in New Issue
Block a user