mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-22 15:32:08 +00:00
* (backend) use parking_lot crate for RwLock fairness
# Conflicts:
# backends/trtllm/src/backend.rs
* (launcher) default new server::run parameters to false for now
* (chore) fmt ... why?
* (ffi) use const for GetSamplingConfig
* (server) expose new SchedulingError
* (trt)
* (build) setup ccache if available
* (ffi) add max_new_tokens parameters
* (backend) cleanup a bit
* (backend) expose PullNewTokens
* (ffi) cleanup again
* (ffi) add missing headers imports
* (ffi) add template specialization to catch and convert to Rust Result<T, tensorrt_llm::common::TllmException>
* (looper) new looper initial implementation
* (ffi) remove narrowing type warning
* (ffi) encode the provided user prompt within each request thread
* (misc) change scope identifiers
* (backend) implement the post_processor background thread
* (misc) missing Result types for Rust
* use blocking_recv in looper to consume awaiting_requests at max before pulling in a single step
* (server) forward auth_token to server::run
* (build) fetchcontent use archives instead of git
* (ffi) fix usage of wrong vector constructor making a capacity fill call
* (ffi) missing namespace for tle::Response
* (ffi) do not use reference capture in lambda as we are not capturing anything
* (backend) refactor & cleanup
* (Dockerfile.trtllm) delete for now
* (misc) simplify [make_]move_iterator by using c++20 type inference
* (misc) no need to move for uint32_t items
* (scheduler) rework submit/pull logic
* (post) impl postprocessing
* (misc) delete backend.rs
* (misc) rerun-if-changed all the cmake modules
* (misc) move to latest trtllm
* (fix): HOPPER_SM_MAJOR is 9 not 8
* (misc: build for sm_{75,80,86,89,90} by default
* (misc): build with trtllm 0.13.0
* (misc): increase verbosity of spdlog
* (fix): do not recreate the stateful hashmap at every it
* (misc): update dependency in trtllm dockerfile
* (misc): update dependency in trtllm dockerfile
* (misc): disable logging in release mode
* (misc): improve trtllm download script robustness
* (fix): ore fixes for Dockerfile
* misc(cuda): require 12.6
* chore(cmake): use correct policy for download_timestamp
* feat(looper): check engine and executorWorker paths exist before creating the backend
* chore(cmake): download timestamp should be before URL
* feat(looper): minor optimizations to avoid growing too much the containers
* chore(trtllm): move dockerfile to right place
* chore(trtllm): disable tokenizer parallelism by default
* chore(trtllm): fmt
* chore(trtllm): post-rebase commit
* chore(trtllm): remove unused method
* feat(trtllm): cache maxNumTokens to avoid calling JSON everytime
* misc(router): remove SchedulingError
* feat(trtllm): do not tokenize twice
* Revert "chore(trtllm): remove unused method"
This reverts commit 31747163
* chore(rebase): fix invalid references
* chore(router): add python dependency
* Lint.
* Fix bad rebase
---------
Co-authored-by: Nicolas Patry <patry.nicolas@protonmail.com>
60 lines
1.7 KiB
C++
60 lines
1.7 KiB
C++
//
|
|
// Created by mfuntowicz on 7/23/24.
|
|
//
|
|
|
|
#ifndef TGI_TRTLLM_BACKEND_HARDWARE_H
|
|
#define TGI_TRTLLM_BACKEND_HARDWARE_H
|
|
|
|
#include <cstdint>
|
|
#include <limits>
|
|
#include <fmt/base.h>
|
|
#include <spdlog/spdlog.h>
|
|
#include <nvml.h>
|
|
|
|
namespace huggingface::hardware::cuda {
|
|
|
|
#define AMPERE_SM_MAJOR 8
|
|
#define HOPPER_SM_MAJOR 9
|
|
|
|
/**
|
|
* Store information about the version of the CUDA Compute Capabilities detected on the device
|
|
*/
|
|
struct CudaComputeCapabilities {
|
|
int32_t major;
|
|
int32_t minor;
|
|
|
|
[[nodiscard]] constexpr bool isPostAmpere() const { return major >= AMPERE_SM_MAJOR; }
|
|
|
|
[[nodiscard]] constexpr bool isPostHopper() const { return major >= HOPPER_SM_MAJOR; }
|
|
};
|
|
|
|
CudaComputeCapabilities GetCudaComputeCapabilities() {
|
|
// Get the compute capabilities of the current hardware
|
|
nvmlDevice_t device;
|
|
CudaComputeCapabilities capabilities{0, 0};
|
|
if (nvmlDeviceGetHandleByIndex_v2(0, &device) == NVML_SUCCESS) {
|
|
SPDLOG_DEBUG("Successfully acquired nvmlDevice_t = 0");
|
|
if (nvmlDeviceGetCudaComputeCapability(device, &capabilities.major, &capabilities.minor) == NVML_SUCCESS) {
|
|
SPDLOG_INFO("Detected sm_{:d}{:d} compute capabilities", capabilities.major, capabilities.minor);
|
|
}
|
|
}
|
|
|
|
return capabilities;
|
|
}
|
|
|
|
/**
|
|
* Return the number of GPU detected. If no GPU is detected, return size_t::max()
|
|
* @return
|
|
*/
|
|
std::optional<size_t> GetNumDevices() {
|
|
uint32_t numGpus = 0;
|
|
if (nvmlDeviceGetCount_v2(&numGpus) == NVML_SUCCESS) {
|
|
return std::optional(numGpus);
|
|
} else {
|
|
return std::nullopt;
|
|
}
|
|
}
|
|
}
|
|
|
|
#endif //TGI_TRTLLM_BACKEND_HARDWARE_H
|