This commit is contained in:
OlivierDehaene 2023-04-06 20:09:21 +02:00
parent 1111125092
commit e4ad3066bc

View File

@ -39,7 +39,9 @@ class FlashLlama(FlashCausalLM):
raise NotImplementedError("FlashLlama does not support quantization")
tokenizer = LlamaTokenizer.from_pretrained(
model_id, revision=revision, padding_side="left",
model_id,
revision=revision,
padding_side="left",
)
config = AutoConfig.from_pretrained(
@ -155,7 +157,9 @@ class FlashLlamaSharded(FlashLlama):
raise NotImplementedError("FlashLlama does not support quantization")
tokenizer = LlamaTokenizer.from_pretrained(
model_id, revision=revision, padding_side="left",
model_id,
revision=revision,
padding_side="left",
)
config = AutoConfig.from_pretrained(