Files
LocalAI/core/http/endpoints/localai/system.go
Ettore Di Giacinto 1cdcaf0152 feat: migrate to echo and enable cancellation of non-streaming requests (#7270)
* WIP: migrate to echo

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

* tests

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>

---------

Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
2025-11-14 22:57:53 +01:00

37 lines
1.1 KiB
Go

package localai
import (
"github.com/labstack/echo/v4"
"github.com/mudler/LocalAI/core/config"
"github.com/mudler/LocalAI/core/schema"
"github.com/mudler/LocalAI/pkg/model"
)
// SystemInformations returns the system informations
// @Summary Show the LocalAI instance information
// @Success 200 {object} schema.SystemInformationResponse "Response"
// @Router /system [get]
func SystemInformations(ml *model.ModelLoader, appConfig *config.ApplicationConfig) echo.HandlerFunc {
return func(c echo.Context) error {
availableBackends := []string{}
loadedModels := ml.ListLoadedModels()
for b := range appConfig.ExternalGRPCBackends {
availableBackends = append(availableBackends, b)
}
for b := range ml.GetAllExternalBackends(nil) {
availableBackends = append(availableBackends, b)
}
sysmodels := []schema.SysInfoModel{}
for _, m := range loadedModels {
sysmodels = append(sysmodels, schema.SysInfoModel{ID: m.ID})
}
return c.JSON(200,
schema.SystemInformationResponse{
Backends: availableBackends,
Models: sysmodels,
},
)
}
}