mirror of
https://github.com/mudler/LocalAI.git
synced 2026-05-17 04:56:52 -04:00
* feat(vllm): expose AsyncEngineArgs via generic engine_args YAML map
LocalAI's vLLM backend wraps a small typed subset of vLLM's
AsyncEngineArgs (quantization, tensor_parallel_size, dtype, etc.).
Anything outside that subset -- pipeline/data/expert parallelism,
speculative_config, kv_transfer_config, all2all_backend, prefix
caching, chunked prefill, etc. -- requires a new protobuf field, a
Go struct field, an options.go line, and a backend.py mapping per
feature. That cadence is the bottleneck on shipping vLLM's
production feature set.
Add a generic `engine_args:` map on the model YAML that is
JSON-serialised into a new ModelOptions.EngineArgs proto field and
applied verbatim to AsyncEngineArgs at LoadModel time. Validation
is done by the Python backend via dataclasses.fields(); unknown
keys fail with the closest valid name as a hint.
dataclasses.replace() is used so vLLM's __post_init__ re-runs and
auto-converts dict values into nested config dataclasses
(CompilationConfig, AttentionConfig, ...). speculative_config and
kv_transfer_config flow through as dicts; vLLM converts them at
engine init.
Operators can now write:
engine_args:
data_parallel_size: 8
enable_expert_parallel: true
all2all_backend: deepep_low_latency
speculative_config:
method: deepseek_mtp
num_speculative_tokens: 3
kv_cache_dtype: fp8
without further proto/Go/Python plumbing per field.
Production defaults seeded by hooks_vllm.go: enable_prefix_caching
and enable_chunked_prefill default to true unless explicitly set.
Existing typed YAML fields (gpu_memory_utilization,
tensor_parallel_size, etc.) remain for back-compat; engine_args
overrides them when both are set.
Assisted-by: Claude:claude-opus-4-7 [Claude Code]
Signed-off-by: Richard Palethorpe <io@richiejp.com>
* chore(vllm): pin cublas13 to vLLM 0.20.0 cu130 wheel
vLLM's PyPI wheel is built against CUDA 12 (libcudart.so.12) and won't
load on a cu130 host. Switch the cublas13 build to vLLM's per-tag cu130
simple-index (https://wheels.vllm.ai/0.20.0/cu130/) and pin
vllm==0.20.0. The cu130-flavoured wheel ships libcudart.so.13 and
includes the DFlash speculative-decoding method that landed in 0.20.0.
cublas13 install gets --index-strategy=unsafe-best-match so uv consults
both the cu130 index and PyPI when resolving — PyPI also publishes
vllm==0.20.0, but with cu12 binaries that error at import time.
Verified: Qwen3.5-4B + z-lab/Qwen3.5-4B-DFlash loads and serves chat
completions on RTX 5070 Ti (sm_120, cu130).
Assisted-by: Claude:claude-opus-4-7 [Claude Code]
Signed-off-by: Richard Palethorpe <io@richiejp.com>
* ci(vllm): bot job to bump cublas13 vLLM wheel pin
vLLM's cu130 wheel index URL is itself version-locked
(wheels.vllm.ai/<TAG>/cu130/, no /latest/ alias upstream), so a vLLM
bump means rewriting two values atomically — the URL segment and the
version constraint. bump_deps.sh handles git-sha-in-Makefile only;
add a sibling bump_vllm_wheel.sh and a matching workflow job that
mirrors the existing matrix's PR-creation pattern.
The bumper queries /releases/latest (which excludes prereleases),
strips the leading 'v', and seds both lines unconditionally. When the
file is already on the latest tag the rewrite is a no-op and
peter-evans/create-pull-request opens no PR.
Assisted-by: Claude:claude-opus-4-7 [Claude Code]
Signed-off-by: Richard Palethorpe <io@richiejp.com>
* docs(vllm): document engine_args and speculative decoding
The new engine_args: map plumbs arbitrary AsyncEngineArgs through to
vLLM, but the public docs only covered the basic typed fields. Add a
short subsection in the vLLM section explaining the typed/generic
split and showing a worked DFlash speculative-decoding config, with
pointers to vLLM's SpeculativeConfig reference and z-lab's drafter
collection.
Assisted-by: Claude:claude-opus-4-7 [Claude Code]
Signed-off-by: Richard Palethorpe <io@richiejp.com>
---------
Signed-off-by: Richard Palethorpe <io@richiejp.com>
Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
140 lines
4.0 KiB
Go
140 lines
4.0 KiB
Go
package config_test
|
|
|
|
import (
|
|
. "github.com/mudler/LocalAI/core/config"
|
|
"github.com/mudler/LocalAI/core/schema"
|
|
|
|
. "github.com/onsi/ginkgo/v2"
|
|
. "github.com/onsi/gomega"
|
|
)
|
|
|
|
var _ = Describe("Backend hooks and parser defaults", func() {
|
|
Context("MatchParserDefaults", func() {
|
|
It("matches Qwen3 family", func() {
|
|
parsers := MatchParserDefaults("Qwen/Qwen3-8B")
|
|
Expect(parsers).NotTo(BeNil())
|
|
Expect(parsers["tool_parser"]).To(Equal("hermes"))
|
|
Expect(parsers["reasoning_parser"]).To(Equal("qwen3"))
|
|
})
|
|
|
|
It("matches Qwen3.5 with longest-prefix-first", func() {
|
|
parsers := MatchParserDefaults("Qwen/Qwen3.5-9B")
|
|
Expect(parsers).NotTo(BeNil())
|
|
Expect(parsers["tool_parser"]).To(Equal("qwen3_xml"))
|
|
})
|
|
|
|
It("matches Llama-3.3 not Llama-3.2", func() {
|
|
parsers := MatchParserDefaults("meta/Llama-3.3-70B-Instruct")
|
|
Expect(parsers).NotTo(BeNil())
|
|
Expect(parsers["tool_parser"]).To(Equal("llama3_json"))
|
|
})
|
|
|
|
It("matches deepseek-r1", func() {
|
|
parsers := MatchParserDefaults("deepseek-ai/DeepSeek-R1")
|
|
Expect(parsers).NotTo(BeNil())
|
|
Expect(parsers["reasoning_parser"]).To(Equal("deepseek_r1"))
|
|
Expect(parsers["tool_parser"]).To(Equal("deepseek_v3"))
|
|
})
|
|
|
|
It("returns nil for unknown families", func() {
|
|
Expect(MatchParserDefaults("acme/unknown-model-xyz")).To(BeNil())
|
|
})
|
|
})
|
|
|
|
Context("Backend hook registration and execution", func() {
|
|
It("runs registered hook for a backend", func() {
|
|
called := false
|
|
RegisterBackendHook("test-backend-hook", func(cfg *ModelConfig, modelPath string) {
|
|
called = true
|
|
cfg.Description = "modified-by-hook"
|
|
})
|
|
|
|
cfg := &ModelConfig{
|
|
Backend: "test-backend-hook",
|
|
}
|
|
// Use the public Prepare path indirectly is heavy; instead exercise via vllmDefaults
|
|
// path, but here just call RegisterBackendHook + we know runBackendHooks is internal.
|
|
// Verify by leveraging Prepare on a fresh ModelConfig with no model path.
|
|
cfg.PredictionOptions = schema.PredictionOptions{}
|
|
|
|
// Trigger via Prepare with empty options; this calls runBackendHooks internally.
|
|
cfg.SetDefaults()
|
|
Expect(called).To(BeTrue())
|
|
Expect(cfg.Description).To(Equal("modified-by-hook"))
|
|
})
|
|
})
|
|
|
|
Context("vllmDefaults hook", func() {
|
|
It("auto-sets parsers for known model families on vllm backend", func() {
|
|
cfg := &ModelConfig{
|
|
Backend: "vllm",
|
|
PredictionOptions: schema.PredictionOptions{
|
|
BasicModelRequest: schema.BasicModelRequest{
|
|
Model: "Qwen/Qwen3-8B",
|
|
},
|
|
},
|
|
}
|
|
cfg.SetDefaults()
|
|
|
|
foundTool := false
|
|
foundReasoning := false
|
|
for _, opt := range cfg.Options {
|
|
if opt == "tool_parser:hermes" {
|
|
foundTool = true
|
|
}
|
|
if opt == "reasoning_parser:qwen3" {
|
|
foundReasoning = true
|
|
}
|
|
}
|
|
Expect(foundTool).To(BeTrue())
|
|
Expect(foundReasoning).To(BeTrue())
|
|
})
|
|
|
|
It("does not override user-set tool_parser", func() {
|
|
cfg := &ModelConfig{
|
|
Backend: "vllm",
|
|
Options: []string{"tool_parser:custom"},
|
|
PredictionOptions: schema.PredictionOptions{
|
|
BasicModelRequest: schema.BasicModelRequest{
|
|
Model: "Qwen/Qwen3-8B",
|
|
},
|
|
},
|
|
}
|
|
cfg.SetDefaults()
|
|
|
|
count := 0
|
|
for _, opt := range cfg.Options {
|
|
if len(opt) >= len("tool_parser:") && opt[:len("tool_parser:")] == "tool_parser:" {
|
|
count++
|
|
}
|
|
}
|
|
Expect(count).To(Equal(1))
|
|
})
|
|
|
|
It("seeds production engine_args defaults", func() {
|
|
cfg := &ModelConfig{Backend: "vllm"}
|
|
cfg.SetDefaults()
|
|
|
|
Expect(cfg.EngineArgs).NotTo(BeNil())
|
|
Expect(cfg.EngineArgs["enable_prefix_caching"]).To(Equal(true))
|
|
Expect(cfg.EngineArgs["enable_chunked_prefill"]).To(Equal(true))
|
|
})
|
|
|
|
It("does not override user-set engine_args", func() {
|
|
cfg := &ModelConfig{
|
|
Backend: "vllm",
|
|
LLMConfig: LLMConfig{
|
|
EngineArgs: map[string]any{
|
|
"enable_prefix_caching": false,
|
|
},
|
|
},
|
|
}
|
|
cfg.SetDefaults()
|
|
|
|
Expect(cfg.EngineArgs["enable_prefix_caching"]).To(Equal(false))
|
|
// chunked_prefill is still seeded since user didn't set it
|
|
Expect(cfg.EngineArgs["enable_chunked_prefill"]).To(Equal(true))
|
|
})
|
|
})
|
|
})
|