From cc929530c270d9ea540e8b1133d43432cbf5bce7 Mon Sep 17 00:00:00 2001 From: Joel Lamy-Poirier Date: Wed, 3 May 2023 14:28:49 -0400 Subject: [PATCH] cleanup --- server/text_generation_server/utils/tokens.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/server/text_generation_server/utils/tokens.py b/server/text_generation_server/utils/tokens.py index 08e090b4..6609d926 100644 --- a/server/text_generation_server/utils/tokens.py +++ b/server/text_generation_server/utils/tokens.py @@ -1,6 +1,5 @@ import re import torch -from loguru import logger from transformers import ( @@ -49,7 +48,6 @@ class NextTokenChooser: seed=0, device="cpu", ): - #logger.info(f"AAAA {watermark} {temperature} {repetition_penalty} {top_k} {top_p} {typical_p} {do_sample} {seed} {device}") warpers = LogitsProcessorList() # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files # all samplers can be found in `generation_utils_samplers.py`