append to all

This commit is contained in:
OlivierDehaene 2023-04-03 18:51:20 +02:00
parent 05aee8b503
commit 0523b4891f

View File

@ -46,6 +46,7 @@ __all__ = [
if FLASH_ATTENTION: if FLASH_ATTENTION:
__all__.append(FlashNeoX) __all__.append(FlashNeoX)
__all__.append(FlashNeoXSharded) __all__.append(FlashNeoXSharded)
__all__.append(FlashSantacoder)
# The flag below controls whether to allow TF32 on matmul. This flag defaults to False # The flag below controls whether to allow TF32 on matmul. This flag defaults to False
# in PyTorch 1.12 and later. # in PyTorch 1.12 and later.