mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-24 00:12:08 +00:00
Apply suggestions from code review
Co-authored-by: drbh <david.richard.holtz@gmail.com>
This commit is contained in:
parent
8d0220a695
commit
b80593bfa3
@ -1498,7 +1498,6 @@ fn main() -> Result<(), LauncherError> {
|
||||
let config: Config = config.into();
|
||||
match config.head_dim {
|
||||
Some(h) if h == 64 || h == 128 || h == 256 => {
|
||||
// std::env::set_var("ATTENTION", "flashdecoding");
|
||||
if args.lora_adapters.is_some() {
|
||||
tracing::info!("Disabling prefix caching because of lora adapters");
|
||||
std::env::set_var("USE_PREFIX_CACHING", "0");
|
||||
|
@ -233,7 +233,6 @@ if ATTENTION == "flashinfer":
|
||||
causal=True,
|
||||
softcap=0.0,
|
||||
):
|
||||
# assert window_size_left == -1, "Windowing is not supported with flash infer"
|
||||
from text_generation_server.layers.attention.flashinfer import (
|
||||
prefill_with_paged_kv_state,
|
||||
)
|
||||
|
Loading…
Reference in New Issue
Block a user