mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-22 15:32:08 +00:00
Upgrading exl2. (#2415)
* Upgrading exl2. * Fixing the other pathways. * Fix idefics.
This commit is contained in:
parent
bae161ab84
commit
4baa6ff59f
2
.gitignore
vendored
2
.gitignore
vendored
@ -9,7 +9,7 @@ backends/client/src/v3/pb
|
|||||||
|
|
||||||
# ROCm auto-generated files
|
# ROCm auto-generated files
|
||||||
*.hip
|
*.hip
|
||||||
server/exllamav2_kernels/exllamav2_kernels/hip/
|
server/exllamav2
|
||||||
server/exllama_kernels/exllama_kernels/hip/
|
server/exllama_kernels/exllama_kernels/hip/
|
||||||
server/exllama_kernels/exllama_kernels/hip_func/
|
server/exllama_kernels/exllama_kernels/hip_func/
|
||||||
*_hip.cuh
|
*_hip.cuh
|
||||||
|
@ -93,6 +93,7 @@
|
|||||||
causal-conv1d
|
causal-conv1d
|
||||||
click
|
click
|
||||||
einops
|
einops
|
||||||
|
exllamav2
|
||||||
fbgemm-gpu
|
fbgemm-gpu
|
||||||
flashinfer
|
flashinfer
|
||||||
flash-attn
|
flash-attn
|
||||||
|
@ -6,6 +6,7 @@ include Makefile-eetq
|
|||||||
include Makefile-selective-scan
|
include Makefile-selective-scan
|
||||||
include Makefile-lorax-punica
|
include Makefile-lorax-punica
|
||||||
include Makefile-fbgemm
|
include Makefile-fbgemm
|
||||||
|
include Makefile-exllamav2
|
||||||
|
|
||||||
unit-tests:
|
unit-tests:
|
||||||
pytest -s -vv -m "not private" tests
|
pytest -s -vv -m "not private" tests
|
||||||
|
12
server/Makefile-exllamav2
Normal file
12
server/Makefile-exllamav2
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
exllamav2_commit := v0.1.8
|
||||||
|
|
||||||
|
build-exllamav2:
|
||||||
|
git clone https://github.com/turboderp/exllamav2.git exllamav2 && \
|
||||||
|
cd exllamav2 && git fetch && git checkout $(exllamav2_commit) && \
|
||||||
|
git submodule update --init --recursive && \
|
||||||
|
pip install -r requirements.txt && \
|
||||||
|
CUDA_ARCH_LIST="8.0;9.0a" NVCC_GENCODE="-gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_90a,code=sm_90a" TORCH_CUDA_ARCH_LIST="8.0;9.0a" python setup.py build
|
||||||
|
|
||||||
|
install-exllamav2: build-exllamav2
|
||||||
|
cd exllamav2/ && \
|
||||||
|
CUDA_ARCH_LIST="8.0;9.0a" NVCC_GENCODE="-gencode=arch=compute_80,code=sm_80 -gencode=arch=compute_90a,code=sm_90a" TORCH_CUDA_ARCH_LIST="8.0;9.0a" python setup.py install
|
@ -511,6 +511,7 @@ class CausalLM(Model):
|
|||||||
config_class=AutoConfig,
|
config_class=AutoConfig,
|
||||||
batch_class=CausalLMBatch,
|
batch_class=CausalLMBatch,
|
||||||
):
|
):
|
||||||
|
self.quantize = quantize
|
||||||
self.batch_class = batch_class
|
self.batch_class = batch_class
|
||||||
self.process_group, rank, world_size = initialize_torch_distributed()
|
self.process_group, rank, world_size = initialize_torch_distributed()
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
|
@ -872,6 +872,7 @@ class FlashCausalLM(Model):
|
|||||||
head_size: Optional[int] = None,
|
head_size: Optional[int] = None,
|
||||||
skip_special_tokens: bool = True,
|
skip_special_tokens: bool = True,
|
||||||
):
|
):
|
||||||
|
self.quantize = quantize
|
||||||
self.process_group, rank, world_size = initialize_torch_distributed()
|
self.process_group, rank, world_size = initialize_torch_distributed()
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
device = torch.device(f"cuda:{rank}")
|
device = torch.device(f"cuda:{rank}")
|
||||||
|
@ -33,6 +33,7 @@ class IDEFICSSharded(IdeficsCausalLM):
|
|||||||
dtype: Optional[torch.dtype] = None,
|
dtype: Optional[torch.dtype] = None,
|
||||||
trust_remote_code: bool = False,
|
trust_remote_code: bool = False,
|
||||||
):
|
):
|
||||||
|
self.quantize = quantize
|
||||||
self.process_group, rank, world_size = initialize_torch_distributed()
|
self.process_group, rank, world_size = initialize_torch_distributed()
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
device = torch.device(f"cuda:{rank}")
|
device = torch.device(f"cuda:{rank}")
|
||||||
|
@ -580,6 +580,7 @@ class IdeficsCausalLM(Model):
|
|||||||
dtype: Optional[torch.dtype] = None,
|
dtype: Optional[torch.dtype] = None,
|
||||||
trust_remote_code: bool = False,
|
trust_remote_code: bool = False,
|
||||||
):
|
):
|
||||||
|
self.quantize = quantize
|
||||||
from text_generation_server.models.custom_modeling.idefics_modeling import (
|
from text_generation_server.models.custom_modeling.idefics_modeling import (
|
||||||
IdeficsForVisionText2Text,
|
IdeficsForVisionText2Text,
|
||||||
)
|
)
|
||||||
|
@ -553,6 +553,7 @@ class Seq2SeqLM(Model):
|
|||||||
tokenizer_class=AutoTokenizer,
|
tokenizer_class=AutoTokenizer,
|
||||||
aliases=None,
|
aliases=None,
|
||||||
):
|
):
|
||||||
|
self.quantize = quantize
|
||||||
self.process_group, rank, world_size = initialize_torch_distributed()
|
self.process_group, rank, world_size = initialize_torch_distributed()
|
||||||
if torch.cuda.is_available():
|
if torch.cuda.is_available():
|
||||||
device = torch.device(f"cuda:{rank}")
|
device = torch.device(f"cuda:{rank}")
|
||||||
|
@ -50,12 +50,12 @@ class TextGenerationService(generate_pb2_grpc.TextGenerationServiceServicer):
|
|||||||
self,
|
self,
|
||||||
model: Model,
|
model: Model,
|
||||||
cache: Cache,
|
cache: Cache,
|
||||||
quantize: Optional[str],
|
|
||||||
server_urls: List[str],
|
server_urls: List[str],
|
||||||
):
|
):
|
||||||
self.cache = cache
|
self.cache = cache
|
||||||
self.model = model
|
self.model = model
|
||||||
self.quantize = quantize
|
# Quantize is resolved during model loading
|
||||||
|
self.quantize = model.quantize
|
||||||
self.server_urls = server_urls
|
self.server_urls = server_urls
|
||||||
# For some reason, inference_mode does not work well with GLOO which we use on CPU
|
# For some reason, inference_mode does not work well with GLOO which we use on CPU
|
||||||
if model.device.type == "cuda":
|
if model.device.type == "cuda":
|
||||||
@ -255,7 +255,7 @@ def serve(
|
|||||||
],
|
],
|
||||||
)
|
)
|
||||||
generate_pb2_grpc.add_TextGenerationServiceServicer_to_server(
|
generate_pb2_grpc.add_TextGenerationServiceServicer_to_server(
|
||||||
TextGenerationService(model, Cache(), quantize, server_urls), server
|
TextGenerationService(model, Cache(), server_urls), server
|
||||||
)
|
)
|
||||||
SERVICE_NAMES = (
|
SERVICE_NAMES = (
|
||||||
generate_pb2.DESCRIPTOR.services_by_name["TextGenerationService"].full_name,
|
generate_pb2.DESCRIPTOR.services_by_name["TextGenerationService"].full_name,
|
||||||
|
Loading…
Reference in New Issue
Block a user