mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-22 15:32:08 +00:00
Fix test_pass_through_tokenizer (#117)
Co-authored-by: Sylwester Fraczek <sfraczek@habana.ai>
This commit is contained in:
parent
d957e32601
commit
757c12dbac
@ -10,6 +10,16 @@ from text_generation_server.utils.tokens import (
|
||||
)
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def skip_tokenizer_env_var():
|
||||
import os
|
||||
os.environ["SKIP_TOKENIZER_IN_TGI"] = "true"
|
||||
yield
|
||||
del os.environ['SKIP_TOKENIZER_IN_TGI']
|
||||
|
||||
|
||||
def test_stop_sequence_criteria():
|
||||
criteria = StopSequenceCriteria("/test;")
|
||||
|
||||
@ -71,8 +81,7 @@ def test_batch_top_tokens():
|
||||
assert topn_tok_logprobs[4] == [-1, -2, -3, -3, -4]
|
||||
|
||||
|
||||
|
||||
def test_pass_through_tokenizer():
|
||||
def test_pass_through_tokenizer(skip_tokenizer_env_var):
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
'meta-llama/Llama-2-7b-chat-hf',
|
||||
revision=None,
|
||||
|
Loading…
Reference in New Issue
Block a user