mirror of
https://github.com/mudler/LocalAI.git
synced 2026-04-18 13:58:07 -04:00
15 lines
643 B
YAML
15 lines
643 B
YAML
# Patch sources for the llama-cpp backend.
|
|
# Each source declares a fork whose commits are extracted as patches
|
|
# and applied on top of upstream llama.cpp during the build.
|
|
# See scripts/patch_utils/apply_patches.sh for the generic patch engine.
|
|
#
|
|
# version_var: Makefile variable with the pinned fork commit SHA
|
|
# base_var: Makefile variable with the upstream base commit SHA
|
|
# Both are read from version_file (relative to backend dir) to compute the diff.
|
|
sources:
|
|
- name: turboquant
|
|
repo: https://github.com/TheTom/llama-cpp-turboquant.git
|
|
version_var: TURBOQUANT_VERSION
|
|
base_var: LLAMA_VERSION
|
|
version_file: Makefile
|