2025-02-28 11:14:58 +00:00
|
|
|
import torch
|
|
|
|
from torch.nn import functional as F
|
|
|
|
|
|
|
|
|
|
|
|
class FastLinear(torch.nn.Module):
|
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
weight,
|
|
|
|
bias,
|
|
|
|
) -> None:
|
|
|
|
super().__init__()
|
|
|
|
self.weight = torch.nn.Parameter(weight, requires_grad=False)
|
|
|
|
if bias is not None:
|
|
|
|
self.bias = torch.nn.Parameter(bias, requires_grad=False)
|
|
|
|
else:
|
|
|
|
self.bias = None
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def load(cls, config, prefix: str, weights, bias: bool):
|
|
|
|
weight = weights.get_tensor(f"{prefix}.weight")
|
|
|
|
if bias:
|
|
|
|
bias = weights.get_tensor(f"{prefix}.bias")
|
|
|
|
else:
|
|
|
|
bias = None
|
|
|
|
return cls(weight, bias)
|
|
|
|
|
|
|
|
def forward(self, input: torch.Tensor) -> torch.Tensor:
|
|
|
|
return F.linear(input, self.weight, self.bias)
|
|
|
|
|
|
|
|
|
|
|
|
def get_linear(weight, bias):
|
|
|
|
# Weights that are loaded through methods that are not
|
|
|
|
# quantization-aware are still bare tensors. We may want
|
|
|
|
# to change this in the future.
|
|
|
|
if isinstance(weight, torch.Tensor):
|
Gaudi: clean cuda/rocm code in hpu backend, enable flat_hpu (#3113)
* clean cuda/rocm code in hpu backend, enable flat_hpu
Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
* fix TP in pageattn
Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
* adjust block table in hpu to improve performance
Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
* enable all the model. not testet yet
Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
* use tensor cache in hpu graph to avoid replay issue
Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
* add moe support, fix qwen/mistral/mixtral crash
Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
* fix phimoe issue
Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
* gpt_bigcode could also go pageattn
Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
* enable dbrx remove some unused code
Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
* multi-modality initial PR
Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
* adjust warmup and enable vlm
Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
* fix incorrect output in qwen2 idefics if hpu graph is used
Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
* remove unused quantization code and enable awq/gptq int4
Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
* fix gptq issue
Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
* enable fp8
Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
* warmup prefill
remove model where pageattn is not used, set block table to None since it's not used
Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
* add warmup_decode
Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
* warmup decode
Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
* remove block_tables and prefill_cache_indices which will lead to dynamic shape
Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
* fix comment
Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
* missing gptj change...
Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
* fix some issue
Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
* remove torch.where to fix incorrect output in hpu graph model
Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
* match the latest vllm_extension ops
Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
---------
Signed-off-by: Wang, Yi A <yi.a.wang@intel.com>
2025-04-14 13:58:13 +00:00
|
|
|
return FastLinear(weight, bias)
|
2025-02-28 11:14:58 +00:00
|
|
|
|
|
|
|
return weight.get_linear(bias)
|