mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-19 22:02:06 +00:00
Revert the Cohere tokenizer change (for now using a revision instead).
This commit is contained in:
parent
fc7ea202c2
commit
5e2932552c
@ -1748,7 +1748,7 @@ pub async fn run(
|
|||||||
let mut tokenizer = Tokenizer::from_file(filename).ok();
|
let mut tokenizer = Tokenizer::from_file(filename).ok();
|
||||||
if let Some(tokenizer) = &mut tokenizer {
|
if let Some(tokenizer) = &mut tokenizer {
|
||||||
if let Some(class) = &tokenizer_config.tokenizer_class {
|
if let Some(class) = &tokenizer_config.tokenizer_class {
|
||||||
if class == "LlamaTokenizer" || class == "LlamaTokenizerFast" || class == "CohereTokenizerFast"{
|
if class == "LlamaTokenizer" || class == "LlamaTokenizerFast"{
|
||||||
if let Ok(post_processor) = create_post_processor(tokenizer, &tokenizer_config) {
|
if let Ok(post_processor) = create_post_processor(tokenizer, &tokenizer_config) {
|
||||||
tracing::info!("Overriding LlamaTokenizer with TemplateProcessing to follow python override defined in https://github.com/huggingface/transformers/blob/4aa17d00690b7f82c95bb2949ea57e22c35b4336/src/transformers/models/llama/tokenization_llama_fast.py#L203-L205");
|
tracing::info!("Overriding LlamaTokenizer with TemplateProcessing to follow python override defined in https://github.com/huggingface/transformers/blob/4aa17d00690b7f82c95bb2949ea57e22c35b4336/src/transformers/models/llama/tokenization_llama_fast.py#L203-L205");
|
||||||
tokenizer.with_post_processor(post_processor);
|
tokenizer.with_post_processor(post_processor);
|
||||||
|
Loading…
Reference in New Issue
Block a user