From abe4e4b1ccbd83ee3652823591fd0bf0005d1f43 Mon Sep 17 00:00:00 2001 From: Dong Shin Date: Sun, 16 Jul 2023 21:09:02 +0900 Subject: [PATCH] fix: LlamaTokenizerFast to AutoTokenizer at flash_llama.py --- server/text_generation_server/models/flash_llama.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/server/text_generation_server/models/flash_llama.py b/server/text_generation_server/models/flash_llama.py index 417ccabb..11c77e14 100644 --- a/server/text_generation_server/models/flash_llama.py +++ b/server/text_generation_server/models/flash_llama.py @@ -2,8 +2,8 @@ import torch import torch.distributed from opentelemetry import trace -from transformers import AutoConfig -from transformers.models.llama import LlamaTokenizer, LlamaTokenizerFast +from transformers import AutoConfig, AutoTokenizer +from transformers.models.llama import LlamaTokenizer from typing import Optional from text_generation_server.models import FlashCausalLM @@ -44,7 +44,7 @@ class FlashLlama(FlashCausalLM): trust_remote_code=trust_remote_code, ) except Exception: - tokenizer = LlamaTokenizerFast.from_pretrained( + tokenizer = AutoTokenizer.from_pretrained( model_id, revision=revision, padding_side="left",