Skip to content

vllm.lora.layers.utils

LoRAMapping dataclass

Source code in vllm/lora/layers/utils.py
@dataclass
class LoRAMapping:
    index_mapping: tuple[int, ...]
    prompt_mapping: tuple[int, ...]
    is_prefill: bool = False

    def __post_init__(self):
        self.index_mapping = tuple(self.index_mapping)
        self.prompt_mapping = tuple(self.prompt_mapping)

index_mapping instance-attribute

index_mapping: tuple[int, ...]

is_prefill class-attribute instance-attribute

is_prefill: bool = False

prompt_mapping instance-attribute

prompt_mapping: tuple[int, ...]

__init__

__init__(
    index_mapping: tuple[int, ...],
    prompt_mapping: tuple[int, ...],
    is_prefill: bool = False,
) -> None

__post_init__

__post_init__()
Source code in vllm/lora/layers/utils.py
def __post_init__(self):
    self.index_mapping = tuple(self.index_mapping)
    self.prompt_mapping = tuple(self.prompt_mapping)

_fully_sharded_can_replace

_fully_sharded_can_replace(can_replace)

decorator which adds the condition of fully sharded loras intended to wrap can_replace_layer()

Source code in vllm/lora/layers/utils.py
def _fully_sharded_can_replace(can_replace):
    """
    decorator which adds the condition of fully sharded loras
    intended to wrap can_replace_layer()
    """

    def dec(*args, **kwargs):
        return (can_replace(*args, **kwargs)
                and kwargs["lora_config"].fully_sharded_loras)

    return dec

_get_lora_device

_get_lora_device(base_layer: Module) -> device

Returns the device for where to place the LoRA tensors.

Source code in vllm/lora/layers/utils.py
def _get_lora_device(base_layer: nn.Module) -> torch.device:
    # code borrowed from https://github.com/fmmoret/vllm/blob/fm-support-lora-on-quantized-models/vllm/lora/layers.py#L34
    """Returns the device for where to place the LoRA tensors."""
    # unquantizedLinear
    if hasattr(base_layer, "weight"):
        return base_layer.weight.device
    # Compressed Tensor
    elif hasattr(base_layer, "weight_packed"):
        return base_layer.weight_packed.device
    # GPTQ/AWQ
    elif hasattr(base_layer, "qweight"):
        return base_layer.qweight.device
    # HQQ marlin
    elif hasattr(base_layer, "W_q"):
        return base_layer.W_q.device
    else:
        raise ValueError(f"Unsupported base layer: {base_layer}")

_not_fully_sharded_can_replace

_not_fully_sharded_can_replace(can_replace)

decorator which adds the condition of not using fully sharded loras intended to wrap can_replace_layer()

Source code in vllm/lora/layers/utils.py
def _not_fully_sharded_can_replace(can_replace):
    """
    decorator which adds the condition of not using fully sharded loras
    intended to wrap can_replace_layer()
    """

    def dec(*args, **kwargs):
        decorate = kwargs.pop("decorate") if "decorate" in kwargs else True
        condition = (not kwargs["lora_config"].fully_sharded_loras
                     if decorate else True)
        return can_replace(*args, **kwargs) and condition

    return dec