mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-19 22:02:06 +00:00
* feat: add ruff and resolve issue * fix: update client exports and adjust after rebase * fix: adjust syntax to avoid circular import * fix: adjust client ruff settings * fix: lint and refactor import check and avoid model enum as global names * fix: improve fbgemm_gpu check and lints * fix: update lints * fix: prefer comparing model enum over str * fix: adjust lints and ignore specific rules * fix: avoid unneeded quantize check
31 lines
727 B
Python
31 lines
727 B
Python
# Origin: https://github.com/predibase/lorax
|
|
# Path: lorax/server/lorax_server/adapters/config.py
|
|
# License: Apache License Version 2.0, January 2004
|
|
|
|
from abc import ABC, abstractmethod
|
|
from dataclasses import dataclass
|
|
from typing import Dict, Set, Tuple
|
|
|
|
import torch
|
|
|
|
from text_generation_server.adapters.weights import AdapterWeights
|
|
|
|
|
|
@dataclass
|
|
class ModuleMap:
|
|
module_name: str
|
|
module_weights: Dict[str, Tuple[torch.Tensor, str]]
|
|
|
|
|
|
@dataclass
|
|
class AdapterConfig(ABC):
|
|
base_model_name_or_path: str
|
|
|
|
@abstractmethod
|
|
def map_weights_for_model(
|
|
self,
|
|
adapter_weights: Dict[int, AdapterWeights],
|
|
weight_names: Tuple[str],
|
|
) -> Tuple[ModuleMap, Set[str]]:
|
|
pass
|