mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-09-11 20:34:54 +00:00
Apply suggestions from code review
This commit is contained in:
parent
81d0def84a
commit
18d978ba0f
@ -309,7 +309,7 @@ async fn main() -> Result<(), RouterError> {
|
||||
let mut tokenizer = Tokenizer::from_file(filename).ok();
|
||||
if let Some(tokenizer) = &mut tokenizer {
|
||||
if let Some(class) = &tokenizer_config.tokenizer_class {
|
||||
if (class == "LlamaTokenizer" || class == "LlamaTokenizerFast") && (tokenizer.get_post_processor().is_none() || tokenizer.get_post_processor().unwrap().added_tokens(false) == 0) {
|
||||
if (class == "LlamaTokenizer" || class == "LlamaTokenizerFast"){
|
||||
if let Ok(post_processor) = create_post_processor(tokenizer, &tokenizer_config) {
|
||||
tracing::info!("Overriding LlamaTokenizer with TemplateProcessing to follow python override defined in https://github.com/huggingface/transformers/blob/4aa17d00690b7f82c95bb2949ea57e22c35b4336/src/transformers/models/llama/tokenization_llama_fast.py#L203-L205");
|
||||
tokenizer.with_post_processor(post_processor);
|
||||
|
Loading…
Reference in New Issue
Block a user