mirror of
https://github.com/mudler/LocalAI.git
synced 2026-05-17 13:10:23 -04:00
* feat(llama-cpp): bump to MTP-merge SHA and document draft-mtp spec type Update LLAMA_VERSION to 0253fb21 (post ggml-org/llama.cpp#22673 merge, 2026-05-16) to pick up Multi-Token Prediction support. No grpc-server.cpp changes are required: the existing `spec_type` option delegates to upstream's `common_speculative_types_from_names()`, which already accepts the new `draft-mtp` name. The `n_rs_seq` cparam needed by MTP is auto-derived inside `common_context_params_to_llama` from `params.speculative.need_n_rs_seq()`, and when no `draft_model` is set the upstream server builds the MTP context off the target model itself. Docs: extend the speculative-decoding section of the model-configuration guide with the new type, both load paths (MTP head embedded in the main GGUF vs. separate `mtp-*.gguf` sibling), the PR's recommended `spec_n_max:2-3`, and the chained `draft-mtp,ngram-mod` recipe. Also notes that the upstream `-hf` auto-discovery of `mtp-*.gguf` siblings is not wired through LocalAI's gRPC layer. Agent guide: short note explaining that new upstream spec types are picked up automatically and that MTP needs no gRPC plumbing. Assisted-by: Claude:claude-opus-4-7 [Claude Code] Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * feat(llama-cpp): auto-detect MTP heads and enable draft-mtp on import + load Detect upstream's `<arch>.nextn_predict_layers` GGUF metadata key (set by `convert_hf_to_gguf.py` for Qwen3.5/3.6 family models and similar) and, when present and the user has not configured a `spec_type` explicitly, auto-append the upstream-recommended speculative-decoding tuple: - spec_type:draft-mtp - spec_n_max:6 - spec_p_min:0.75 The 0.75 p_min is pinned defensively because upstream marks the current default with a "change to 0.0f" TODO; locking it here keeps acceptance thresholds stable across future llama.cpp bumps. Detection runs in two places: - The model importer (`POST /models/import-uri`, the `/import-model` UI) range-fetches the GGUF header for HuggingFace / direct-URL imports via `gguf.ParseGGUFFileRemote`, with a 30s timeout and non-fatal error handling. OCI/Ollama URIs are skipped because the artifact is not directly streamable; the load-time hook covers them once the file is on disk. - The llama-cpp load-time hook (`guessGGUFFromFile`) reads the local header on every model start and appends the same options if `spec_type` is not already set. Both paths share `ApplyMTPDefaults` and respect an explicit user-set `spec_type:` / `speculative_type:` so YAML overrides win. Ginkgo specs cover the append, preserve-user-choice, legacy alias, and nil safety paths. Assisted-by: Claude:claude-opus-4-7 [Claude Code] Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * fix(importer): resolve huggingface:// URIs before MTP header probe `gguf.ParseGGUFFileRemote` only speaks HTTP(S), but the importer was handing it the raw `huggingface://...` URI directly (and similarly for any other custom downloader scheme). Live-test against `huggingface://ggml-org/Qwen3.6-27B-MTP-GGUF/Qwen3.6-27B-MTP-Q8_0.gguf` exposed this: the probe failed with `unsupported protocol scheme "huggingface"`, was caught by the non-fatal error path, and the MTP options were silently never applied to the generated YAML. Route every candidate URI through `downloader.URI.ResolveURL()` and require the resolved form to be HTTP(S). After the fix the probe successfully reads `<arch>.nextn_predict_layers=1` from the real HF GGUF and the emitted ConfigFile carries spec_type:draft-mtp, spec_n_max:6, spec_p_min:0.75 as intended. Assisted-by: Claude:claude-opus-4-7 [Claude Code] Signed-off-by: Ettore Di Giacinto <mudler@localai.io> --------- Signed-off-by: Ettore Di Giacinto <mudler@localai.io> Co-authored-by: Ettore Di Giacinto <mudler@localai.io>
171 lines
7.1 KiB
Makefile
171 lines
7.1 KiB
Makefile
|
|
LLAMA_VERSION?=0253fb21f595246f54c192fe8332f34173be251b
|
|
LLAMA_REPO?=https://github.com/ggerganov/llama.cpp
|
|
|
|
CMAKE_ARGS?=
|
|
BUILD_TYPE?=
|
|
NATIVE?=false
|
|
ONEAPI_VARS?=/opt/intel/oneapi/setvars.sh
|
|
TARGET?=--target grpc-server
|
|
JOBS?=$(shell nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 1)
|
|
ARCH?=$(shell uname -m)
|
|
|
|
# Disable Shared libs as we are linking on static gRPC and we can't mix shared and static
|
|
CMAKE_ARGS+=-DBUILD_SHARED_LIBS=OFF -DLLAMA_CURL=OFF
|
|
|
|
CURRENT_MAKEFILE_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
|
|
ifeq ($(NATIVE),false)
|
|
CMAKE_ARGS+=-DGGML_NATIVE=OFF -DLLAMA_OPENSSL=OFF
|
|
endif
|
|
# If build type is cublas, then we set -DGGML_CUDA=ON to CMAKE_ARGS automatically
|
|
ifeq ($(BUILD_TYPE),cublas)
|
|
CMAKE_ARGS+=-DGGML_CUDA=ON
|
|
# If build type is openblas then we set -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
|
|
# to CMAKE_ARGS automatically
|
|
else ifeq ($(BUILD_TYPE),openblas)
|
|
CMAKE_ARGS+=-DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
|
|
# If build type is clblas (openCL) we set -DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
|
|
else ifeq ($(BUILD_TYPE),clblas)
|
|
CMAKE_ARGS+=-DGGML_CLBLAST=ON -DCLBlast_DIR=/some/path
|
|
# If it's hipblas we do have also to set CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++
|
|
else ifeq ($(BUILD_TYPE),hipblas)
|
|
ROCM_HOME ?= /opt/rocm
|
|
ROCM_PATH ?= /opt/rocm
|
|
export CXX=$(ROCM_HOME)/llvm/bin/clang++
|
|
export CC=$(ROCM_HOME)/llvm/bin/clang
|
|
AMDGPU_TARGETS?=gfx908,gfx90a,gfx942,gfx950,gfx1030,gfx1100,gfx1101,gfx1102,gfx1151,gfx1200,gfx1201
|
|
ifeq ($(strip $(AMDGPU_TARGETS)),)
|
|
$(error AMDGPU_TARGETS is empty — set it to a comma-separated list of gfx targets e.g. gfx1100,gfx1101)
|
|
endif
|
|
CMAKE_ARGS+=-DGGML_HIP=ON -DAMDGPU_TARGETS=$(AMDGPU_TARGETS)
|
|
else ifeq ($(BUILD_TYPE),vulkan)
|
|
CMAKE_ARGS+=-DGGML_VULKAN=1
|
|
else ifeq ($(OS),Darwin)
|
|
ifeq ($(BUILD_TYPE),)
|
|
BUILD_TYPE=metal
|
|
endif
|
|
ifneq ($(BUILD_TYPE),metal)
|
|
CMAKE_ARGS+=-DGGML_METAL=OFF
|
|
else
|
|
CMAKE_ARGS+=-DGGML_METAL=ON
|
|
CMAKE_ARGS+=-DGGML_METAL_EMBED_LIBRARY=ON
|
|
CMAKE_ARGS+=-DGGML_METAL_USE_BF16=ON
|
|
CMAKE_ARGS+=-DGGML_OPENMP=OFF
|
|
endif
|
|
TARGET+=--target ggml-metal
|
|
endif
|
|
|
|
ifeq ($(BUILD_TYPE),sycl_f16)
|
|
CMAKE_ARGS+=-DGGML_SYCL=ON \
|
|
-DCMAKE_C_COMPILER=icx \
|
|
-DCMAKE_CXX_COMPILER=icpx \
|
|
-DCMAKE_CXX_FLAGS="-fsycl" \
|
|
-DGGML_SYCL_F16=ON
|
|
endif
|
|
|
|
ifeq ($(BUILD_TYPE),sycl_f32)
|
|
CMAKE_ARGS+=-DGGML_SYCL=ON \
|
|
-DCMAKE_C_COMPILER=icx \
|
|
-DCMAKE_CXX_COMPILER=icpx \
|
|
-DCMAKE_CXX_FLAGS="-fsycl"
|
|
endif
|
|
|
|
INSTALLED_PACKAGES=$(CURDIR)/../grpc/installed_packages
|
|
INSTALLED_LIB_CMAKE=$(INSTALLED_PACKAGES)/lib/cmake
|
|
ADDED_CMAKE_ARGS=-Dabsl_DIR=${INSTALLED_LIB_CMAKE}/absl \
|
|
-DProtobuf_DIR=${INSTALLED_LIB_CMAKE}/protobuf \
|
|
-Dutf8_range_DIR=${INSTALLED_LIB_CMAKE}/utf8_range \
|
|
-DgRPC_DIR=${INSTALLED_LIB_CMAKE}/grpc \
|
|
-DCMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES=${INSTALLED_PACKAGES}/include
|
|
build-llama-cpp-grpc-server:
|
|
# Conditionally build grpc for the llama backend to use if needed
|
|
ifdef BUILD_GRPC_FOR_BACKEND_LLAMA
|
|
$(MAKE) -C ../../grpc build
|
|
_PROTOBUF_PROTOC=${INSTALLED_PACKAGES}/bin/proto \
|
|
_GRPC_CPP_PLUGIN_EXECUTABLE=${INSTALLED_PACKAGES}/bin/grpc_cpp_plugin \
|
|
PATH="${INSTALLED_PACKAGES}/bin:${PATH}" \
|
|
CMAKE_ARGS="${CMAKE_ARGS} ${ADDED_CMAKE_ARGS}" \
|
|
LLAMA_VERSION=$(LLAMA_VERSION) \
|
|
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../$(VARIANT) grpc-server
|
|
else
|
|
echo "BUILD_GRPC_FOR_BACKEND_LLAMA is not defined."
|
|
LLAMA_VERSION=$(LLAMA_VERSION) $(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../$(VARIANT) grpc-server
|
|
endif
|
|
|
|
llama-cpp-avx2: llama.cpp
|
|
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx2-build
|
|
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx2-build purge
|
|
$(info ${GREEN}I llama-cpp build info:avx2${RESET})
|
|
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on" $(MAKE) VARIANT="llama-cpp-avx2-build" build-llama-cpp-grpc-server
|
|
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx2-build/grpc-server llama-cpp-avx2
|
|
|
|
llama-cpp-avx512: llama.cpp
|
|
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx512-build
|
|
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx512-build purge
|
|
$(info ${GREEN}I llama-cpp build info:avx512${RESET})
|
|
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=on -DGGML_FMA=on -DGGML_F16C=on" $(MAKE) VARIANT="llama-cpp-avx512-build" build-llama-cpp-grpc-server
|
|
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx512-build/grpc-server llama-cpp-avx512
|
|
|
|
llama-cpp-avx: llama.cpp
|
|
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build
|
|
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build purge
|
|
$(info ${GREEN}I llama-cpp build info:avx${RESET})
|
|
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_BMI2=off" $(MAKE) VARIANT="llama-cpp-avx-build" build-llama-cpp-grpc-server
|
|
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-avx-build/grpc-server llama-cpp-avx
|
|
|
|
llama-cpp-fallback: llama.cpp
|
|
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build
|
|
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build purge
|
|
$(info ${GREEN}I llama-cpp build info:fallback${RESET})
|
|
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_BMI2=off" $(MAKE) VARIANT="llama-cpp-fallback-build" build-llama-cpp-grpc-server
|
|
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-fallback-build/grpc-server llama-cpp-fallback
|
|
|
|
llama-cpp-grpc: llama.cpp
|
|
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build
|
|
$(MAKE) -C $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build purge
|
|
$(info ${GREEN}I llama-cpp build info:grpc${RESET})
|
|
CMAKE_ARGS="$(CMAKE_ARGS) -DGGML_RPC=ON -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_BMI2=off" TARGET="--target grpc-server --target rpc-server" $(MAKE) VARIANT="llama-cpp-grpc-build" build-llama-cpp-grpc-server
|
|
cp -rfv $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build/grpc-server llama-cpp-grpc
|
|
|
|
llama-cpp-rpc-server: llama-cpp-grpc
|
|
cp -rf $(CURRENT_MAKEFILE_DIR)/../llama-cpp-grpc-build/llama.cpp/build/bin/rpc-server llama-cpp-rpc-server
|
|
|
|
llama.cpp:
|
|
mkdir -p llama.cpp
|
|
cd llama.cpp && \
|
|
git init && \
|
|
git remote add origin $(LLAMA_REPO) && \
|
|
git fetch --all --tags && \
|
|
git checkout -b build $(LLAMA_VERSION) && \
|
|
git submodule update --init --recursive --depth 1 --single-branch
|
|
|
|
llama.cpp/tools/grpc-server: llama.cpp
|
|
mkdir -p llama.cpp/tools/grpc-server
|
|
bash prepare.sh
|
|
|
|
rebuild:
|
|
bash prepare.sh
|
|
rm -rf grpc-server
|
|
$(MAKE) grpc-server
|
|
|
|
package:
|
|
bash package.sh
|
|
|
|
purge:
|
|
rm -rf llama.cpp/build
|
|
rm -rf llama.cpp/tools/grpc-server
|
|
rm -rf grpc-server
|
|
|
|
clean: purge
|
|
rm -rf llama.cpp
|
|
|
|
grpc-server: llama.cpp llama.cpp/tools/grpc-server
|
|
@echo "Building grpc-server with $(BUILD_TYPE) build type and $(CMAKE_ARGS)"
|
|
ifneq (,$(findstring sycl,$(BUILD_TYPE)))
|
|
+bash -c "source $(ONEAPI_VARS); \
|
|
cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release -j $(JOBS) $(TARGET)"
|
|
else
|
|
+cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release -j $(JOBS) $(TARGET)
|
|
endif
|
|
cp llama.cpp/build/bin/grpc-server .
|