mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-22 15:32:08 +00:00
Disable prefix caching for lora.
This commit is contained in:
parent
719d7b4d54
commit
f5ee062cbd
@ -1500,6 +1500,10 @@ fn main() -> Result<(), LauncherError> {
|
||||
match config.head_dim {
|
||||
Some(h) if h == 64 || h == 128 || h == 256 => {
|
||||
// std::env::set_var("ATTENTION", "flashdecoding");
|
||||
if args.lora_adapters.is_some() {
|
||||
tracing::info!("Disabling prefix caching because of lora adapters");
|
||||
std::env::set_var("USE_PREFIX_CACHING", "0");
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
tracing::info!("Forcing flash decoding because head dim is not supported by flashinfer, also disabling prefix caching");
|
||||
|
Loading…
Reference in New Issue
Block a user