__init__.py
|
feat(server): flash attention v2 (#624)
|
2023-07-18 16:21:18 +02:00 |
causal_lm.py
|
support all, test llama
|
2023-07-13 15:41:57 +00:00 |
flash_neox.py
|
support all, test llama
|
2023-07-13 15:41:57 +00:00 |
flash_rw.py
|
support all, test llama
|
2023-07-13 15:41:57 +00:00 |
flash_santacoder.py
|
support all, test llama
|
2023-07-13 15:41:57 +00:00 |
galactica.py
|
support all, test llama
|
2023-07-13 15:41:57 +00:00 |
gpt_neox.py
|
support all, test llama
|
2023-07-13 15:41:57 +00:00 |
model.py
|
Merge branch 'main' into gptq-cuda-kernels
|
2023-07-19 16:58:54 +02:00 |
mpt.py
|
support all, test llama
|
2023-07-13 15:41:57 +00:00 |
opt.py
|
support all, test llama
|
2023-07-13 15:41:57 +00:00 |
rw.py
|
support all, test llama
|
2023-07-13 15:41:57 +00:00 |
santacoder.py
|
support all, test llama
|
2023-07-13 15:41:57 +00:00 |
seq2seq_lm.py
|
support all, test llama
|
2023-07-13 15:41:57 +00:00 |
t5.py
|
support all, test llama
|
2023-07-13 15:41:57 +00:00 |