From f9c248657dbe3b418e97a3039a934d5aa628b777 Mon Sep 17 00:00:00 2001 From: Morgan Funtowicz Date: Wed, 23 Oct 2024 22:11:58 +0200 Subject: [PATCH] chore(backend): minor formatting --- backends/llamacpp/csrc/backend.cpp | 2 +- backends/llamacpp/csrc/backend.hpp | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/backends/llamacpp/csrc/backend.cpp b/backends/llamacpp/csrc/backend.cpp index 1f6dcfae..c8806957 100644 --- a/backends/llamacpp/csrc/backend.cpp +++ b/backends/llamacpp/csrc/backend.cpp @@ -15,10 +15,10 @@ #include "backend.hpp" namespace huggingface::tgi::backends::llama { - std::expected, TgiLlamaCppBackendError> CreateLlamaCppBackend(const std::filesystem::path& modelPath) { SPDLOG_DEBUG(FMT_STRING("Loading model from {}"), modelPath); + llama_backend_init(); llama_numa_init(ggml_numa_strategy::GGML_NUMA_STRATEGY_NUMACTL); diff --git a/backends/llamacpp/csrc/backend.hpp b/backends/llamacpp/csrc/backend.hpp index 5f356bc0..e4c31ad6 100644 --- a/backends/llamacpp/csrc/backend.hpp +++ b/backends/llamacpp/csrc/backend.hpp @@ -17,7 +17,6 @@ namespace huggingface::tgi::backends::llama { MODEL_FILE_DOESNT_EXIST = 1 }; - class TgiLlamaCppBackend { using TokenId = llama_token;