mirror of
https://github.com/mudler/LocalAI.git
synced 2026-02-02 18:53:32 -05:00
Compare commits
3 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
59bfc67ead | ||
|
|
f80b6dfc2d | ||
|
|
4c16957448 |
2
Makefile
2
Makefile
@@ -8,7 +8,7 @@ DETECT_LIBS?=true
|
||||
# llama.cpp versions
|
||||
GOLLAMA_REPO?=https://github.com/go-skynet/go-llama.cpp
|
||||
GOLLAMA_VERSION?=2b57a8ae43e4699d3dc5d1496a1ccd42922993be
|
||||
CPPLLAMA_VERSION?=081fe431aa8fb6307145c4feb3eed4f48cab19f8
|
||||
CPPLLAMA_VERSION?=b841d0740855c5af1344a81f261139a45a2b39ee
|
||||
|
||||
# gpt4all version
|
||||
GPT4ALL_REPO?=https://github.com/nomic-ai/gpt4all
|
||||
|
||||
@@ -75,11 +75,24 @@ add_library(hw_grpc_proto
|
||||
${hw_proto_hdrs} )
|
||||
|
||||
add_executable(${TARGET} grpc-server.cpp utils.hpp json.hpp)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama myclip ${CMAKE_THREAD_LIBS_INIT} absl::flags hw_grpc_proto
|
||||
absl::flags_parse
|
||||
gRPC::${_REFLECTION}
|
||||
gRPC::${_GRPC_GRPCPP}
|
||||
protobuf::${_PROTOBUF_LIBPROTOBUF})
|
||||
|
||||
# Conditionally link SYCL to grpc-server
|
||||
# https://github.com/ggerganov/llama.cpp/issues/8665
|
||||
if ( DEFINED ENV{ONEAPI_ROOT})
|
||||
target_link_libraries(${TARGET} PRIVATE common llama myclip ${CMAKE_THREAD_LIBS_INIT} absl::flags hw_grpc_proto
|
||||
absl::flags_parse
|
||||
gRPC::${_REFLECTION}
|
||||
gRPC::${_GRPC_GRPCPP}
|
||||
protobuf::${_PROTOBUF_LIBPROTOBUF}
|
||||
sycl)
|
||||
else()
|
||||
target_link_libraries(${TARGET} PRIVATE common llama myclip ${CMAKE_THREAD_LIBS_INIT} absl::flags hw_grpc_proto
|
||||
absl::flags_parse
|
||||
gRPC::${_REFLECTION}
|
||||
gRPC::${_GRPC_GRPCPP}
|
||||
protobuf::${_PROTOBUF_LIBPROTOBUF})
|
||||
endif()
|
||||
|
||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
||||
if(TARGET BUILD_INFO)
|
||||
add_dependencies(${TARGET} BUILD_INFO)
|
||||
|
||||
8
backend/cpp/llama/CMakeLists.txt.rpc-8662
Normal file
8
backend/cpp/llama/CMakeLists.txt.rpc-8662
Normal file
@@ -0,0 +1,8 @@
|
||||
# https://github.com/ggerganov/llama.cpp/issues/8665
|
||||
|
||||
add_executable(rpc-server rpc-server.cpp)
|
||||
if ( DEFINED ENV{ONEAPI_ROOT})
|
||||
target_link_libraries(rpc-server PRIVATE ggml llama sycl)
|
||||
else()
|
||||
target_link_libraries(rpc-server PRIVATE ggml llama)
|
||||
endif()
|
||||
@@ -17,4 +17,7 @@ cp -rfv llama.cpp/examples/llava/clip.h llama.cpp/examples/grpc-server/clip.h
|
||||
cp -rfv llama.cpp/examples/llava/llava.cpp llama.cpp/examples/grpc-server/llava.cpp
|
||||
echo '#include "llama.h"' > llama.cpp/examples/grpc-server/llava.h
|
||||
cat llama.cpp/examples/llava/llava.h >> llama.cpp/examples/grpc-server/llava.h
|
||||
cp -rfv llama.cpp/examples/llava/clip.cpp llama.cpp/examples/grpc-server/clip.cpp
|
||||
cp -rfv llama.cpp/examples/llava/clip.cpp llama.cpp/examples/grpc-server/clip.cpp
|
||||
|
||||
# https://github.com/ggerganov/llama.cpp/issues/8665
|
||||
cp -rfv CMakeLists.txt.rpc-8662 llama.cpp/examples/rpc/CMakeLists.txt
|
||||
Reference in New Issue
Block a user