mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-24 00:12:08 +00:00
feat(backend): correctly load llama.cpp model from llama api and not gpt2
This commit is contained in:
parent
05ad684676
commit
0911076320
@ -2,52 +2,40 @@
|
|||||||
// Created by Morgan Funtowicz on 9/28/2024.
|
// Created by Morgan Funtowicz on 9/28/2024.
|
||||||
//
|
//
|
||||||
|
|
||||||
#include <arg.h>
|
#include <expected>
|
||||||
#include <common.h>
|
#include <filesystem>
|
||||||
|
#include <ggml.h>
|
||||||
|
#include <llama.h>
|
||||||
#include <fmt/format.h>
|
#include <fmt/format.h>
|
||||||
|
#include <fmt/std.h>
|
||||||
#include <spdlog/spdlog.h>
|
#include <spdlog/spdlog.h>
|
||||||
#include "backend.hpp"
|
#include "backend.hpp"
|
||||||
|
|
||||||
namespace huggingface::tgi::backends::llama {
|
namespace huggingface::tgi::backends::llama {
|
||||||
|
|
||||||
std::unique_ptr<huggingface::tgi::backends::llama::TgiLlamaCppBackend>
|
std::expected<std::unique_ptr<TgiLlamaCppBackend>, TgiLlamaCppBackendError>
|
||||||
CreateLlamaCppBackend(std::string_view root) {
|
CreateLlamaCppBackend(const std::filesystem::path& modelPath) {
|
||||||
SPDLOG_INFO(FMT_STRING("Loading model from {}"), root);
|
SPDLOG_INFO(FMT_STRING("Loading model from {}"), modelPath);
|
||||||
gpt_init();
|
llama_backend_init();
|
||||||
|
llama_numa_init(ggml_numa_strategy::GGML_NUMA_STRATEGY_NUMACTL);
|
||||||
|
|
||||||
// Fake argv
|
// Load the model
|
||||||
std::vector<std::string_view> args = {"tgi_llama_cpp_backend", "--model", root};
|
if(!exists(modelPath)) {
|
||||||
std::vector<char *> argv;
|
return std::unexpected(TgiLlamaCppBackendError::MODEL_FILE_DOESNT_EXIST);
|
||||||
for (const auto &arg: args) {
|
|
||||||
argv.push_back(const_cast<char *>(arg.data()));
|
|
||||||
}
|
|
||||||
argv.push_back(nullptr);
|
|
||||||
|
|
||||||
// Create the GPT parameters
|
|
||||||
gpt_params params;
|
|
||||||
if (!gpt_params_parse(args.size(), argv.data(), params, LLAMA_EXAMPLE_SERVER)) {
|
|
||||||
throw std::runtime_error("Failed to create GPT Params from model");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto params = llama_model_default_params();
|
||||||
// Create the inference engine
|
auto* model = llama_load_model_from_file(modelPath.c_str(), params);
|
||||||
SPDLOG_INFO("Allocating llama.cpp model from gpt_params");
|
auto* context = llama_new_context_with_model(model, {
|
||||||
auto result = llama_init_from_gpt_params(params);
|
.n_batch = 1,
|
||||||
|
.attention_type = llama_attention_type::LLAMA_ATTENTION_TYPE_CAUSAL,
|
||||||
// Unpack all the inference engine components
|
.flash_attn = true,
|
||||||
auto model = result.model;
|
});
|
||||||
auto context = result.context;
|
|
||||||
auto loras = result.lora_adapters;
|
|
||||||
|
|
||||||
// Make sure everything is correctly initialized
|
|
||||||
if (model == nullptr)
|
|
||||||
throw std::runtime_error(fmt::format("Failed to load model from {}", root));
|
|
||||||
|
|
||||||
return std::make_unique<huggingface::tgi::backends::llama::TgiLlamaCppBackend>(model, context);
|
return std::make_unique<huggingface::tgi::backends::llama::TgiLlamaCppBackend>(model, context);
|
||||||
}
|
}
|
||||||
|
|
||||||
huggingface::tgi::backends::llama::TgiLlamaCppBackend::TgiLlamaCppBackend(llama_model *const model,
|
huggingface::tgi::backends::llama::TgiLlamaCppBackend::TgiLlamaCppBackend(llama_model *const model, llama_context *const ctx)
|
||||||
llama_context *const ctx)
|
|
||||||
: model(model), ctx(ctx), batch() {
|
: model(model), ctx(ctx), batch() {
|
||||||
char modelName[128];
|
char modelName[128];
|
||||||
llama_model_meta_val_str(model, "general.name", modelName, sizeof(modelName));
|
llama_model_meta_val_str(model, "general.name", modelName, sizeof(modelName));
|
||||||
|
@ -4,12 +4,17 @@
|
|||||||
#ifndef TGI_LLAMA_CPP_BACKEND_BACKEND_HPP
|
#ifndef TGI_LLAMA_CPP_BACKEND_BACKEND_HPP
|
||||||
#define TGI_LLAMA_CPP_BACKEND_BACKEND_HPP
|
#define TGI_LLAMA_CPP_BACKEND_BACKEND_HPP
|
||||||
|
|
||||||
|
#include <filesystem>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <llama.h>
|
#include <llama.h>
|
||||||
|
|
||||||
namespace huggingface::tgi::backends::llama {
|
namespace huggingface::tgi::backends::llama {
|
||||||
// const char* TGI_BACKEND_LLAMA_CPP_NAME = "llama.cpp";
|
// const char* TGI_BACKEND_LLAMA_CPP_NAME = "llama.cpp";
|
||||||
|
|
||||||
|
enum TgiLlamaCppBackendError {
|
||||||
|
MODEL_FILE_DOESNT_EXIST = 1
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
class TgiLlamaCppBackend {
|
class TgiLlamaCppBackend {
|
||||||
private:
|
private:
|
||||||
@ -23,7 +28,8 @@ namespace huggingface::tgi::backends::llama {
|
|||||||
void schedule();
|
void schedule();
|
||||||
};
|
};
|
||||||
|
|
||||||
std::unique_ptr<TgiLlamaCppBackend> CreateLlamaCppBackend(std::string_view root);
|
std::expected<std::unique_ptr<TgiLlamaCppBackend>, TgiLlamaCppBackendError>
|
||||||
|
CreateLlamaCppBackend(const std::filesystem::path& root);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif //TGI_LLAMA_CPP_BACKEND_BACKEND_HPP
|
#endif //TGI_LLAMA_CPP_BACKEND_BACKEND_HPP
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
|
|
||||||
#include <string_view>
|
#include <string_view>
|
||||||
#include <fmt/format.h>
|
#include <fmt/format.h>
|
||||||
|
#include <fmt/std.h>
|
||||||
#include <fmt/color.h>
|
#include <fmt/color.h>
|
||||||
#include <spdlog/spdlog.h>
|
#include <spdlog/spdlog.h>
|
||||||
#include "../csrc/backend.hpp"
|
#include "../csrc/backend.hpp"
|
||||||
@ -16,7 +17,7 @@ int main(int argc, char** argv) {
|
|||||||
|
|
||||||
spdlog::set_level(spdlog::level::debug);
|
spdlog::set_level(spdlog::level::debug);
|
||||||
|
|
||||||
const std::string_view model_root = argv[1];
|
const auto modelPath = absolute(std::filesystem::path(argv[1]));
|
||||||
auto backend = huggingface::tgi::backends::llama::CreateLlamaCppBackend(model_root);
|
if(auto backend = huggingface::tgi::backends::llama::CreateLlamaCppBackend(modelPath); backend.has_value())
|
||||||
fmt::print(fmt::emphasis::bold | fg(fmt::color::yellow), "Successfully initialized llama.cpp model from {}\n", model_root);
|
fmt::print(fmt::emphasis::bold | fg(fmt::color::yellow), "Successfully initialized llama.cpp model from {}\n", modelPath);
|
||||||
}
|
}
|
Loading…
Reference in New Issue
Block a user