mirror of
https://github.com/mudler/LocalAI.git
synced 2026-03-31 13:15:51 -04:00
fix: Add vllm-omni backend to video generation model detection - Include vllm-omni in the list of backends that support FLAG_VIDEO - This allows models like vllm-omni-wan2.2-t2v to appear in the video model selector UI - Fixes issue #8659 where video generation models using vllm-omni backend were not showing in the dropdown Co-authored-by: team-coding-agent-1 <team-coding-agent-1@localai.dev>
This commit is contained in:
@@ -641,7 +641,7 @@ func (c *ModelConfig) GuessUsecases(u ModelConfigUsecase) bool {
|
||||
|
||||
}
|
||||
if (u & FLAG_VIDEO) == FLAG_VIDEO {
|
||||
videoBackends := []string{"diffusers", "stablediffusion"}
|
||||
videoBackends := []string{"diffusers", "stablediffusion", "vllm-omni"}
|
||||
if !slices.Contains(videoBackends, c.Backend) {
|
||||
return false
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user