From 098c66920d7e70e0221f0ebb34bec29f84b1cfe5 Mon Sep 17 00:00:00 2001 From: Morgan Funtowicz Date: Tue, 22 Oct 2024 15:23:16 +0200 Subject: [PATCH] feat(backend): tell cmake to build llama-common and link to it --- backends/llamacpp/CMakeLists.txt | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/backends/llamacpp/CMakeLists.txt b/backends/llamacpp/CMakeLists.txt index 890d99da..9f08d0f3 100644 --- a/backends/llamacpp/CMakeLists.txt +++ b/backends/llamacpp/CMakeLists.txt @@ -6,6 +6,7 @@ set(CMAKE_CXX_STANDARD 23) include(FetchContent) set(LLAMA_CPP_TARGET_VERSION "b3837" CACHE STRING "Version of llama.cpp to build against") +set(LLAMA_BUILD_COMMON ON) set(LLAMA_CPP_TARGET_CUDA_ARCHS "75-real;80-real;86-real;89-real;90-real" CACHE STRING "CUDA arch(s) to build") option(LLAMA_CPP_BUILD_OFFLINE_RUNNER "Flag to build the standalone c++ backend runner") option(LLAMA_CPP_BUILD_CUDA "Flag to build CUDA enabled inference through llama.cpp") @@ -28,7 +29,7 @@ fetchcontent_declare( llama # DOWNLOAD_EXTRACT_TIMESTAMP TRUE GIT_REPOSITORY https://github.com/ggerganov/llama.cpp.git - GIT_TAG b3837 + GIT_TAG b3958 GIT_SHALLOW FALSE ) @@ -41,7 +42,8 @@ target_link_libraries(tgi_llama_cpp_backend_impl PUBLIC fmt::fmt spdlog::spdlog if (${LLAMA_CPP_BUILD_OFFLINE_RUNNER}) message(STATUS "Building llama.cpp offline runner") add_executable(tgi_llama_cpp_offline_runner offline/main.cpp) - target_link_libraries(tgi_llama_cpp_offline_runner tgi_llama_cpp_backend_impl) + + target_link_libraries(tgi_llama_cpp_offline_runner PUBLIC tgi_llama_cpp_backend_impl llama common) endif ()