mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-09-09 19:34:53 +00:00
Removing dead variables.
This commit is contained in:
parent
91e674bb85
commit
89ff4e901a
@ -72,7 +72,6 @@ class BLOOMSharded(BLOOM):
|
||||
quantize: Optional[str] = None,
|
||||
):
|
||||
self.process_group, rank, world_size = initialize_torch_distributed()
|
||||
self.master = rank == 0
|
||||
if torch.cuda.is_available():
|
||||
device = torch.device(f"cuda:{rank}")
|
||||
dtype = torch.float16
|
||||
|
@ -29,7 +29,6 @@ tracer = trace.get_tracer(__name__)
|
||||
|
||||
class FlashLlama(FlashCausalLM):
|
||||
def __init__(self, model_id: str, revision: Optional[str] = None, quantize=False):
|
||||
self.past_pad = None
|
||||
if torch.cuda.is_available():
|
||||
device = torch.device("cuda")
|
||||
dtype = torch.float16
|
||||
@ -150,9 +149,7 @@ class FlashLlamaSharded(FlashLlama):
|
||||
revision: Optional[str] = None,
|
||||
quantize: Optional[str] = None,
|
||||
):
|
||||
self.past_pad = None
|
||||
self.process_group, rank, world_size = initialize_torch_distributed()
|
||||
self.master = rank == 0
|
||||
if torch.cuda.is_available():
|
||||
device = torch.device(f"cuda:{rank}")
|
||||
dtype = torch.float16
|
||||
|
@ -33,9 +33,7 @@ class FlashNeoXSharded(FlashNeoX):
|
||||
def __init__(
|
||||
self, model_id: str, revision: Optional[str] = None, quantize: bool = False
|
||||
):
|
||||
self.past_pad = None
|
||||
self.process_group, rank, world_size = initialize_torch_distributed()
|
||||
self.master = rank == 0
|
||||
if torch.cuda.is_available():
|
||||
device = torch.device(f"cuda:{rank}")
|
||||
dtype = torch.float16
|
||||
@ -152,4 +150,4 @@ class FlashNeoXSharded(FlashNeoX):
|
||||
else:
|
||||
module._buffers[param_name] = tensor
|
||||
|
||||
model.post_load_weights(quantize)
|
||||
# model.post_load_weights(quantize)
|
||||
|
@ -28,7 +28,6 @@ tracer = trace.get_tracer(__name__)
|
||||
|
||||
class FlashSantacoder(FlashCausalLM):
|
||||
def __init__(self, model_id: str, revision: Optional[str] = None, quantize=False):
|
||||
self.past_pad = None
|
||||
if torch.cuda.is_available():
|
||||
device = torch.device("cuda")
|
||||
dtype = torch.float16
|
||||
@ -173,9 +172,7 @@ class FlashSantacoderSharded(FlashSantacoder):
|
||||
def __init__(
|
||||
self, model_id: str, revision: Optional[str] = None, quantize: bool = False
|
||||
):
|
||||
self.past_pad = None
|
||||
self.process_group, rank, world_size = initialize_torch_distributed()
|
||||
self.master = rank == 0
|
||||
if torch.cuda.is_available():
|
||||
device = torch.device(f"cuda:{rank}")
|
||||
dtype = torch.float16
|
||||
|
@ -199,7 +199,6 @@ class GalacticaSharded(Galactica):
|
||||
quantize: Optional[str] = None,
|
||||
):
|
||||
self.process_group, rank, world_size = initialize_torch_distributed()
|
||||
self.master = rank == 0
|
||||
if torch.cuda.is_available():
|
||||
device = torch.device(f"cuda:{rank}")
|
||||
dtype = torch.float16
|
||||
|
@ -38,7 +38,6 @@ class GPTNeoxSharded(CausalLM):
|
||||
quantize: Optional[str] = None,
|
||||
):
|
||||
self.process_group, rank, world_size = initialize_torch_distributed()
|
||||
self.master = rank == 0
|
||||
if torch.cuda.is_available():
|
||||
device = torch.device(f"cuda:{rank}")
|
||||
dtype = torch.float16
|
||||
|
@ -51,7 +51,6 @@ class OPTSharded(OPT):
|
||||
self, model_id: str, revision: Optional[str] = None, quantize: bool = False
|
||||
):
|
||||
self.process_group, rank, world_size = initialize_torch_distributed()
|
||||
self.master = rank == 0
|
||||
if torch.cuda.is_available():
|
||||
device = torch.device(f"cuda:{rank}")
|
||||
dtype = torch.float16
|
||||
|
@ -38,7 +38,6 @@ class T5Sharded(Seq2SeqLM):
|
||||
quantize: Optional[str] = None,
|
||||
):
|
||||
self.process_group, rank, world_size = initialize_torch_distributed()
|
||||
self.master = rank == 0
|
||||
if torch.cuda.is_available():
|
||||
device = torch.device(f"cuda:{rank}")
|
||||
dtype = torch.float16
|
||||
|
Loading…
Reference in New Issue
Block a user