mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-09-11 12:24:53 +00:00
misc(backend): indent
This commit is contained in:
parent
8d9580669d
commit
104885c71c
@ -159,6 +159,7 @@ namespace huggingface::tgi::backends::trtllm {
|
||||
|
||||
public:
|
||||
backend_t(std::filesystem::path &engines_folder, std::filesystem::path &executor_worker_path);
|
||||
|
||||
backend_t(std::filesystem::path &&engines_folder, std::filesystem::path &&executor_worker_path)
|
||||
: backend_t(engines_folder, executor_worker_path) {};
|
||||
|
||||
@ -171,7 +172,8 @@ namespace huggingface::tgi::backends::trtllm {
|
||||
*/
|
||||
[[nodiscard("Discarded executor request_id needs to be assigned")]]
|
||||
std::expected<request_id_t, backend_error_t>
|
||||
submit(std::span<const token_id_t> token_ids, generation_params_t generation_params, sampling_params_t sampling_params) noexcept;
|
||||
submit(std::span<const token_id_t> token_ids, generation_params_t generation_params,
|
||||
sampling_params_t sampling_params) noexcept;
|
||||
|
||||
/**
|
||||
* Query the number of tokens available across all in-flight generations
|
||||
@ -198,21 +200,26 @@ namespace huggingface::tgi::backends::trtllm {
|
||||
* Create a TensorRT-LLM executor from a workspace
|
||||
*/
|
||||
const auto executor_factory_initializer = [](const backend_workspace_t &workspace) -> tle::Executor {
|
||||
return { workspace.engines_folder(), tensorrt_llm::executor::ModelType::kDECODER_ONLY, workspace.executor_config() };
|
||||
return {workspace.engines_folder(), tensorrt_llm::executor::ModelType::kDECODER_ONLY,
|
||||
workspace.executor_config()};
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper structures to define formatting strategies for various types in the backend
|
||||
*/
|
||||
template <> struct fmt::formatter<huggingface::tgi::backends::trtllm::generation_params_t>: formatter<string_view> {
|
||||
auto format(huggingface::tgi::backends::trtllm::generation_params_t const& c, format_context& ctx) const -> format_context::iterator {
|
||||
template<>
|
||||
struct fmt::formatter<huggingface::tgi::backends::trtllm::generation_params_t> : formatter<string_view> {
|
||||
auto format(huggingface::tgi::backends::trtllm::generation_params_t const &c,
|
||||
format_context &ctx) const -> format_context::iterator {
|
||||
return fmt::format_to(ctx.out(), "generation_params_t{{ max_new_tokens={:d} }}", c.max_new_tokens);
|
||||
}
|
||||
};
|
||||
|
||||
template <> struct fmt::formatter<huggingface::tgi::backends::trtllm::sampling_params_t>: formatter<string_view> {
|
||||
auto format(huggingface::tgi::backends::trtllm::sampling_params_t const& c, format_context& ctx) const -> format_context::iterator {
|
||||
template<>
|
||||
struct fmt::formatter<huggingface::tgi::backends::trtllm::sampling_params_t> : formatter<string_view> {
|
||||
auto format(huggingface::tgi::backends::trtllm::sampling_params_t const &c,
|
||||
format_context &ctx) const -> format_context::iterator {
|
||||
return fmt::format_to(
|
||||
ctx.out(),
|
||||
"sampling_params_t{{ top_k={:d}, top_p={:.3f}, repetition_penalty={:.3f}, frequency_penalty={:.3f}, temperature={:.3f}, seed={:d} }}",
|
||||
@ -220,4 +227,5 @@ template <> struct fmt::formatter<huggingface::tgi::backends::trtllm::sampling_p
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
Loading…
Reference in New Issue
Block a user