diff --git a/backends/trtllm/include/ffi.h b/backends/trtllm/include/ffi.h index 6127d29a..c2b29500 100644 --- a/backends/trtllm/include/ffi.h +++ b/backends/trtllm/include/ffi.h @@ -14,11 +14,8 @@ namespace huggingface::tgi::backends { #include "backends/trtllm/src/lib.rs.h" - namespace huggingface::tgi::backends { -// struct GenerationContext; - class TensorRtLlmBackendImpl : public TensorRtLlmBackend { public: /*** @@ -28,12 +25,6 @@ namespace huggingface::tgi::backends { */ TensorRtLlmBackendImpl(const std::string_view &engineFolder, const std::string_view &executorWorker); - /*** - * - * @return - */ - bool IsReady() const; - /*** * * @param tokens diff --git a/backends/trtllm/src/ffi.cpp b/backends/trtllm/src/ffi.cpp index e55204ab..1179fc85 100644 --- a/backends/trtllm/src/ffi.cpp +++ b/backends/trtllm/src/ffi.cpp @@ -20,10 +20,6 @@ huggingface::tgi::backends::TensorRtLlmBackendImpl::TensorRtLlmBackendImpl( ) : TensorRtLlmBackend(engineFolder, executorWorker) {} -bool huggingface::tgi::backends::TensorRtLlmBackendImpl::IsReady() const { - return TensorRtLlmBackend::IsReady(); -} - uint64_t huggingface::tgi::backends::TensorRtLlmBackendImpl::Submit( rust::Slice tokens, uint32_t maxNewTokens, int32_t topK, float_t topP, float_t temperature,