Update server/text_generation_server/layers/attention/common.py

Co-authored-by: OlivierDehaene <olivier@huggingface.co>
This commit is contained in:
Nicolas Patry 2024-08-29 11:59:31 +02:00 committed by GitHub
parent 4b375004c9
commit d77f5f2eff
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -38,8 +38,6 @@ if ATTENTION in {"flashinfer", "flashdecoding"}:
assert max_q is not None assert max_q is not None
assert max_k is not None assert max_k is not None
cu_seqlen_k = torch.zeros(shape[-1] + 1, device=device, dtype=torch.int32) cu_seqlen_k = torch.zeros(shape[-1] + 1, device=device, dtype=torch.int32)
# if max_q is not None and max_q < 1000 and max_q > 1:
# import ipdb;ipdb.set_trace()
# cuda graphs don't like this and this is necessary to clamp within mistral # cuda graphs don't like this and this is necessary to clamp within mistral
# Although FA2 might not want the clamping # Although FA2 might not want the clamping