mirror of
https://github.com/huggingface/text-generation-inference.git
synced 2025-04-21 06:42:10 +00:00
39 lines
1.2 KiB
Python
39 lines
1.2 KiB
Python
import torch
|
|
|
|
from typing import Optional
|
|
|
|
from text_generation_server.models.flash_mistral import BaseFlashMistral
|
|
from text_generation_server.models.custom_modeling.flash_mixtral_modeling import (
|
|
MixtralConfig,
|
|
FlashMixtralForCausalLM,
|
|
)
|
|
|
|
|
|
class FlashMixtral(BaseFlashMistral):
|
|
def __init__(
|
|
self,
|
|
model_id: str,
|
|
revision: Optional[str] = None,
|
|
quantize: Optional[str] = None,
|
|
speculator: Optional[str] = None,
|
|
dtype: Optional[torch.dtype] = None,
|
|
trust_remote_code: bool = False,
|
|
):
|
|
super(FlashMixtral, self).__init__(
|
|
config_cls=MixtralConfig,
|
|
model_cls=FlashMixtralForCausalLM,
|
|
model_id=model_id,
|
|
revision=revision,
|
|
quantize=quantize,
|
|
speculator=speculator,
|
|
dtype=dtype,
|
|
trust_remote_code=trust_remote_code,
|
|
)
|
|
|
|
@property
|
|
def supports_adapter_loading(self) -> bool:
|
|
# Mixtral cannot inherit loading adapters from FlashMistral
|
|
# since it does not have the same adapter layer mapping
|
|
# TODO: implement the loading of adapters for Mixtral
|
|
return False
|