mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-21 14:52:20 +00:00
# What does this PR do? Reworked the loading logic. Idea is to use cleaner loading code: - Remove need for `no_init_weights` - Remove all weird `bnb_linear` and `load_weights` and `post_load_weights`. New code layout: - New class `Weights` in charge of handling loading the weights from multiple files into appropiate tensors (potentially sharded) - TP layers now are "shells", they contain the code to know what kind of sharding we need + eventual `all_reduce`. They do not inherit from linear, but they contain some kind of Linear instead - the contained linear can be either FastLinear, BnbLinear or GPTq Linear next. - All modeling code is explictly made for sharding, process group is just no-ops for non sharded code (removes a lot of test cases)  --------- Co-authored-by: Ubuntu <ubuntu@ip-172-31-41-161.taildb5d.ts.net> Co-authored-by: Ubuntu <ubuntu@ip-172-31-41-161.ec2.internal> Co-authored-by: OlivierDehaene <olivier@huggingface.co> Co-authored-by: OlivierDehaene <23298448+OlivierDehaene@users.noreply.github.com>
20 lines
657 B
Python
20 lines
657 B
Python
from setuptools import setup
|
|
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
|
|
|
|
setup(
|
|
name="custom_kernels",
|
|
ext_modules=[
|
|
CUDAExtension(
|
|
name="custom_kernels.fused_bloom_attention_cuda",
|
|
sources=["custom_kernels/fused_bloom_attention_cuda.cu"],
|
|
extra_compile_args=["-arch=compute_80", "-std=c++17"],
|
|
),
|
|
CUDAExtension(
|
|
name="custom_kernels.fused_attention_cuda",
|
|
sources=["custom_kernels/fused_attention_cuda.cu"],
|
|
extra_compile_args=["-arch=compute_80", "-std=c++17"],
|
|
),
|
|
],
|
|
cmdclass={"build_ext": BuildExtension},
|
|
)
|