mirror of
https://github.com/mudler/LocalAI.git
synced 2025-12-26 16:09:20 -05:00
* Build llama.cpp separately Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * WIP Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * WIP Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * WIP Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Start to try to attach some tests Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Add git and small fixups Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * fix: correctly autoload external backends Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Try to run AIO tests Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Slightly update the Makefile helps Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Adapt auto-bumper Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Try to run linux test Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Add llama-cpp into build pipelines Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Add default capability (for cpu) Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Drop llama-cpp specific logic from the backend loader Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * drop grpc install in ci for tests Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * fixups Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Pass by backends path for tests Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Build protogen at start Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * fix(tests): set backends path consistently Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Correctly configure the backends path Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Try to build for darwin Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * WIP Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Compile for metal on arm64/darwin Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Try to run build off from cross-arch Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Add to the backend index nvidia-l4t and cpu's llama-cpp backends Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Build also darwin-x86 for llama-cpp Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Disable arm64 builds temporary Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Test backend build on PR Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Fixup build backend reusable workflow Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * pass by skip drivers Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Use crane Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Skip drivers Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Fixups Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * x86 darwin Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Add packaging step for llama.cpp Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * fixups Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Fix leftover from bark-cpp extraction Signed-off-by: Ettore Di Giacinto <mudler@localai.io> * Try to fix hipblas build Signed-off-by: Ettore Di Giacinto <mudler@localai.io> --------- Signed-off-by: Ettore Di Giacinto <mudler@localai.io>
53 lines
1.3 KiB
Bash
53 lines
1.3 KiB
Bash
#!/bin/bash
|
|
|
|
set -ex
|
|
|
|
IMAGE_NAME="${IMAGE_NAME:-localai/llama-cpp-darwin}"
|
|
|
|
pushd backend/cpp/llama-cpp
|
|
|
|
# make llama-cpp-avx && \
|
|
# make llama-cpp-avx2 && \
|
|
# make llama-cpp-avx512 && \
|
|
make llama-cpp-fallback && \
|
|
make llama-cpp-grpc && \
|
|
make llama-cpp-rpc-server
|
|
|
|
popd
|
|
|
|
mkdir -p build/darwin
|
|
|
|
# cp -rf backend/cpp/llama-cpp/llama-cpp-avx build/darwin/
|
|
# cp -rf backend/cpp/llama-cpp/llama-cpp-avx2 build/darwin/
|
|
# cp -rf backend/cpp/llama-cpp/llama-cpp-avx512 build/darwin/
|
|
cp -rf backend/cpp/llama-cpp/llama-cpp-fallback build/darwin/
|
|
cp -rf backend/cpp/llama-cpp/llama-cpp-grpc build/darwin/
|
|
cp -rf backend/cpp/llama-cpp/llama-cpp-rpc-server build/darwin/
|
|
|
|
for file in build/darwin/*; do
|
|
LIBS="$(otool -L $file | awk 'NR > 1 { system("echo " $1) } ' | xargs echo)"
|
|
|
|
for lib in $LIBS; do
|
|
mkdir -p build/darwin/lib
|
|
# only libraries ending in dylib
|
|
if [[ "$lib" == *.dylib ]]; then
|
|
if [ -e "$lib" ]; then
|
|
cp -rvf "$lib" build/darwin/lib
|
|
fi
|
|
fi
|
|
done
|
|
done
|
|
|
|
cp -rf backend/cpp/llama-cpp/run.sh build/darwin/
|
|
|
|
PLATFORMARCH="${PLATFORMARCH:-darwin/arm64}"
|
|
|
|
./local-ai util create-oci-image \
|
|
build/darwin/. \
|
|
--output build/darwin.tar \
|
|
--image-name $IMAGE_NAME \
|
|
--platform $PLATFORMARCH
|
|
|
|
rm -rf build/darwin
|
|
|