fix: Add vllm-omni backend to video generation model detection (#8659) (#8781)

fix: Add vllm-omni backend to video generation model detection

- Include vllm-omni in the list of backends that support FLAG_VIDEO
- This allows models like vllm-omni-wan2.2-t2v to appear in the video model selector UI
- Fixes issue #8659 where video generation models using vllm-omni backend were not showing in the dropdown

Co-authored-by: team-coding-agent-1 <team-coding-agent-1@localai.dev>
This commit is contained in:
LocalAI [bot]
2026-03-05 01:04:47 +01:00
committed by GitHub
parent 3dce20b026
commit 9fc77909e0

View File

@@ -641,7 +641,7 @@ func (c *ModelConfig) GuessUsecases(u ModelConfigUsecase) bool {
}
if (u & FLAG_VIDEO) == FLAG_VIDEO {
videoBackends := []string{"diffusers", "stablediffusion"}
videoBackends := []string{"diffusers", "stablediffusion", "vllm-omni"}
if !slices.Contains(videoBackends, c.Backend) {
return false
}