custom_modeling
|
Working version.
|
2023-05-11 12:05:35 +00:00 |
__init__.py
|
[WIP] Adding GPTQ support for llama
|
2023-05-11 12:05:35 +00:00 |
bloom.py
|
feat(server): use float16 (#304)
|
2023-05-10 15:51:10 +02:00 |
causal_lm.py
|
feat(server): use float16 (#304)
|
2023-05-10 15:51:10 +02:00 |
flash_llama.py
|
Dump.
|
2023-05-11 12:05:35 +00:00 |
flash_neox.py
|
feat(server): shard token decode (#303)
|
2023-05-10 15:48:21 +02:00 |
galactica.py
|
feat(server): use float16 (#304)
|
2023-05-10 15:51:10 +02:00 |
gpt_neox.py
|
feat(server): use float16 (#304)
|
2023-05-10 15:51:10 +02:00 |
model.py
|
feat(server): shard token decode (#303)
|
2023-05-10 15:48:21 +02:00 |
opt.py
|
feat(server): use float16 (#304)
|
2023-05-10 15:51:10 +02:00 |
santacoder.py
|
feat(server): use float16 (#304)
|
2023-05-10 15:51:10 +02:00 |
seq2seq_lm.py
|
feat(server): use float16 (#304)
|
2023-05-10 15:51:10 +02:00 |
t5.py
|
feat(server): use float16 (#304)
|
2023-05-10 15:51:10 +02:00 |