mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-09-12 04:44:52 +00:00
Update server/text_generation_server/layers/attention/common.py
Co-authored-by: OlivierDehaene <olivier@huggingface.co>
This commit is contained in:
parent
4b375004c9
commit
d77f5f2eff
@ -38,8 +38,6 @@ if ATTENTION in {"flashinfer", "flashdecoding"}:
|
||||
assert max_q is not None
|
||||
assert max_k is not None
|
||||
cu_seqlen_k = torch.zeros(shape[-1] + 1, device=device, dtype=torch.int32)
|
||||
# if max_q is not None and max_q < 1000 and max_q > 1:
|
||||
# import ipdb;ipdb.set_trace()
|
||||
|
||||
# cuda graphs don't like this and this is necessary to clamp within mistral
|
||||
# Although FA2 might not want the clamping
|
||||
|
Loading…
Reference in New Issue
Block a user