Skip to content

vllm.model_executor.layers.fla.ops ΒΆ

Modules:

Name Description
chunk
chunk_delta_h
chunk_o
chunk_scaled_dot_kkt
cumsum
fused_recurrent
index
l2norm
layernorm_guard
op
solve_tril
utils
wy_fast

__all__ module-attribute ΒΆ

__all__ = [
    "RMSNormGated",
    "chunk_gated_delta_rule",
    "fused_recurrent_gated_delta_rule",
]

RMSNormGated ΒΆ

Bases: Module

Source code in vllm/model_executor/layers/fla/ops/layernorm_guard.py
class RMSNormGated(nn.Module):

    def __init__(
        self,
        hidden_size,
        eps: float = 1e-5,
        group_size: Optional[int] = None,
        norm_before_gate: bool = False,
        device: Optional[torch.device] = None,
        dtype: Optional[torch.dtype] = None,
    ):
        """If group_size is not None, we do GroupNorm with each group having group_size elements.
        group_size=None is equivalent to group_size=hidden_size (i.e. there's only 1 group).
        """
        factory_kwargs = {"device": device, "dtype": dtype}
        super().__init__()
        self.eps = eps
        self.weight = nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
        self.register_parameter("bias", None)
        self.group_size = group_size
        self.norm_before_gate = norm_before_gate
        self.reset_parameters()

    def reset_parameters(self):
        torch.nn.init.ones_(self.weight)

    def forward(self, x, z=None):
        """If z is not None, we do norm(x) * silu(z) if norm_before_gate, else norm(x * silu(z))
        """
        return rmsnorm_fn(x,
                          self.weight,
                          self.bias,
                          z=z,
                          eps=self.eps,
                          group_size=self.group_size,
                          norm_before_gate=self.norm_before_gate)

eps instance-attribute ΒΆ

eps = eps

group_size instance-attribute ΒΆ

group_size = group_size

norm_before_gate instance-attribute ΒΆ

norm_before_gate = norm_before_gate

weight instance-attribute ΒΆ

weight = Parameter(empty(hidden_size, **factory_kwargs))

__init__ ΒΆ

__init__(
    hidden_size,
    eps: float = 1e-05,
    group_size: Optional[int] = None,
    norm_before_gate: bool = False,
    device: Optional[device] = None,
    dtype: Optional[dtype] = None,
)

If group_size is not None, we do GroupNorm with each group having group_size elements. group_size=None is equivalent to group_size=hidden_size (i.e. there's only 1 group).

Source code in vllm/model_executor/layers/fla/ops/layernorm_guard.py
def __init__(
    self,
    hidden_size,
    eps: float = 1e-5,
    group_size: Optional[int] = None,
    norm_before_gate: bool = False,
    device: Optional[torch.device] = None,
    dtype: Optional[torch.dtype] = None,
):
    """If group_size is not None, we do GroupNorm with each group having group_size elements.
    group_size=None is equivalent to group_size=hidden_size (i.e. there's only 1 group).
    """
    factory_kwargs = {"device": device, "dtype": dtype}
    super().__init__()
    self.eps = eps
    self.weight = nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
    self.register_parameter("bias", None)
    self.group_size = group_size
    self.norm_before_gate = norm_before_gate
    self.reset_parameters()

forward ΒΆ

forward(x, z=None)

If z is not None, we do norm(x) * silu(z) if norm_before_gate, else norm(x * silu(z))

Source code in vllm/model_executor/layers/fla/ops/layernorm_guard.py
def forward(self, x, z=None):
    """If z is not None, we do norm(x) * silu(z) if norm_before_gate, else norm(x * silu(z))
    """
    return rmsnorm_fn(x,
                      self.weight,
                      self.bias,
                      z=z,
                      eps=self.eps,
                      group_size=self.group_size,
                      norm_before_gate=self.norm_before_gate)

reset_parameters ΒΆ

reset_parameters()
Source code in vllm/model_executor/layers/fla/ops/layernorm_guard.py
def reset_parameters(self):
    torch.nn.init.ones_(self.weight)

chunk_gated_delta_rule ΒΆ

chunk_gated_delta_rule(
    q: Tensor,
    k: Tensor,
    v: Tensor,
    g: Tensor,
    beta: Tensor,
    scale: float = None,
    initial_state: Tensor = None,
    output_final_state: bool = False,
    cu_seqlens: Optional[LongTensor] = None,
    head_first: bool = False,
    use_qk_l2norm_in_kernel: bool = False,
)

Parameters:

Name Type Description Default
q Tensor

queries of shape [B, T, H, K] if head_first=False else [B, H, T, K].

required
k Tensor

keys of shape [B, T, H, K] if head_first=False else [B, H, T, K].

required
v Tensor

values of shape [B, T, H, V] if head_first=False else [B, H, T, V].

required
g Tensor

(forget) gating tensor (in log space!) of shape [B, T, H] if head_first=False else [B, H, T].

required
beta Tensor

betas of shape [B, T, H] if head_first=False else [B, H, T].

required
scale Optional[int]

Scale factor for the RetNet attention scores. If not provided, it will default to 1 / sqrt(K). Default: None.

None
initial_state Optional[Tensor]

Initial state of shape [N, H, K, V] for N input sequences. For equal-length input sequences, N equals the batch size B. Default: None.

None
output_final_state Optional[bool]

Whether to output the final state of shape [N, H, K, V]. Default: False.

False
cu_seqlens LongTensor

Cumulative sequence lengths of shape [N+1] used for variable-length training, consistent with the FlashAttention API.

None
head_first Optional[bool]

Whether the inputs are in the head-first format, which is not supported for variable-length inputs. Default: False.

False

Returns:

Name Type Description
o Tensor

Outputs of shape [B, T, H, V] if head_first=False else [B, H, T, V].

final_state Tensor

Final state of shape [N, H, K, V] if output_final_state=True else None.

Examples:: >>> import torch >>> import torch.nn.functional as F >>> from einops import rearrange >>> from fla.ops.gated_delta_rule import chunk_gated_delta_rule # inputs with equal lengths >>> B, T, H, K, V = 4, 2048, 4, 512, 512 >>> q = torch.randn(B, T, H, K, dtype=torch.bfloat16, device='cuda') >>> k = F.normalize(torch.randn(B, T, H, K, dtype=torch.bfloat16, device='cuda'), p=2, dim=-1) >>> v = torch.randn(B, T, H, V, dtype=torch.bfloat16, device='cuda') >>> beta = torch.rand(B, T, H, dtype=torch.bfloat16, device='cuda').sigmoid() >>> g = F.logsigmoid(torch.rand(B, T, H, dtype=torch.bfloat16, device='cuda')) >>> h0 = torch.randn(B, H, K, V, dtype=torch.bfloat16, device='cuda') >>> o, ht = chunk_gated_delta_rule( q, k, v, g, beta, initial_state=h0, output_final_state=True ) # for variable-length inputs, the batch size B is expected to be 1 and cu_seqlens is required >>> q, k, v, beta, g = map(lambda x: rearrange(x, 'b t ... -> 1 (b t) ...'), (q, k, v, beta, g)) # for a batch with 4 sequences, cu_seqlens with 5 start/end positions are expected >>> cu_seqlens = q.new_tensor([0, 2048, 4096, 6144, 8192], dtype=torch.long) >>> o_var, ht_var = chunk_gated_delta_rule( q, k, v, g, beta, initial_state=h0, output_final_state=True, cu_seqlens=cu_seqlens )

Source code in vllm/model_executor/layers/fla/ops/chunk.py
@torch.compiler.disable
def chunk_gated_delta_rule(q: torch.Tensor,
                           k: torch.Tensor,
                           v: torch.Tensor,
                           g: torch.Tensor,
                           beta: torch.Tensor,
                           scale: float = None,
                           initial_state: torch.Tensor = None,
                           output_final_state: bool = False,
                           cu_seqlens: Optional[torch.LongTensor] = None,
                           head_first: bool = False,
                           use_qk_l2norm_in_kernel: bool = False):
    r"""
    Args:
        q (torch.Tensor):
            queries of shape `[B, T, H, K]` if `head_first=False` else `[B, H, T, K]`.
        k (torch.Tensor):
            keys of shape `[B, T, H, K]` if `head_first=False` else `[B, H, T, K]`.
        v (torch.Tensor):
            values of shape `[B, T, H, V]` if `head_first=False` else `[B, H, T, V]`.
        g (torch.Tensor):
            (forget) gating tensor (in log space!) of shape `[B, T, H]` if `head_first=False` else `[B, H, T]`.
        beta (torch.Tensor):
            betas of shape `[B, T, H]` if `head_first=False` else `[B, H, T]`.
        scale (Optional[int]):
            Scale factor for the RetNet attention scores.
            If not provided, it will default to `1 / sqrt(K)`. Default: `None`.
        initial_state (Optional[torch.Tensor]):
            Initial state of shape `[N, H, K, V]` for `N` input sequences.
            For equal-length input sequences, `N` equals the batch size `B`.
            Default: `None`.
        output_final_state (Optional[bool]):
            Whether to output the final state of shape `[N, H, K, V]`. Default: `False`.
        cu_seqlens (torch.LongTensor):
            Cumulative sequence lengths of shape `[N+1]` used for variable-length training,
            consistent with the FlashAttention API.
        head_first (Optional[bool]):
            Whether the inputs are in the head-first format, which is not supported for variable-length inputs.
            Default: `False`.

    Returns:
        o (torch.Tensor):
            Outputs of shape `[B, T, H, V]` if `head_first=False` else `[B, H, T, V]`.
        final_state (torch.Tensor):
            Final state of shape `[N, H, K, V]` if `output_final_state=True` else `None`.

    Examples::
        >>> import torch
        >>> import torch.nn.functional as F
        >>> from einops import rearrange
        >>> from fla.ops.gated_delta_rule import chunk_gated_delta_rule
        # inputs with equal lengths
        >>> B, T, H, K, V = 4, 2048, 4, 512, 512
        >>> q = torch.randn(B, T, H, K, dtype=torch.bfloat16, device='cuda')
        >>> k = F.normalize(torch.randn(B, T, H, K, dtype=torch.bfloat16, device='cuda'), p=2, dim=-1)
        >>> v = torch.randn(B, T, H, V, dtype=torch.bfloat16, device='cuda')
        >>> beta = torch.rand(B, T, H, dtype=torch.bfloat16, device='cuda').sigmoid()
        >>> g = F.logsigmoid(torch.rand(B, T, H, dtype=torch.bfloat16, device='cuda'))
        >>> h0 = torch.randn(B, H, K, V, dtype=torch.bfloat16, device='cuda')
        >>> o, ht = chunk_gated_delta_rule(
            q, k, v, g, beta,
            initial_state=h0,
            output_final_state=True
        )
        # for variable-length inputs, the batch size `B` is expected to be 1 and `cu_seqlens` is required
        >>> q, k, v, beta, g = map(lambda x: rearrange(x, 'b t ... -> 1 (b t) ...'), (q, k, v, beta, g))
        # for a batch with 4 sequences, `cu_seqlens` with 5 start/end positions are expected
        >>> cu_seqlens = q.new_tensor([0, 2048, 4096, 6144, 8192], dtype=torch.long)
        >>> o_var, ht_var = chunk_gated_delta_rule(
            q, k, v, g, beta,
            initial_state=h0,
            output_final_state=True,
            cu_seqlens=cu_seqlens
        )
    """
    assert q.dtype == k.dtype == v.dtype
    assert q.dtype != torch.float32, "ChunkGatedDeltaRuleFunction does not support float32. Please use bfloat16."
    assert len(
        beta.shape
    ) == 3, "beta must be of shape [B, T, H] if head_first=False, or [B, H, T] otherwise."

    if head_first:
        raise DeprecationWarning(
            "head_first is deprecated and will be removed in a future version. "
            "Please use head_first=False for now instead.",
            stacklevel=2)
        q, k, v, beta, g = map(
            lambda x: rearrange(x, 'b h t ... -> b t h ...'),
            (q, k, v, beta, g))
    if not head_first and q.shape[1] < q.shape[2]:
        warnings.warn(
            f"Input tensor shape suggests potential format mismatch: seq_len ({q.shape[1]}) < num_heads ({q.shape[2]}). "
            "This may indicate the inputs were passed in head-first format [B, H, T, ...] "
            "when head_first=False was specified. "
            "Please verify your input tensor format matches the expected shape [B, T, H, ...].",
            stacklevel=2)
    if cu_seqlens is not None:
        if q.shape[0] != 1:
            raise ValueError(
                f"The batch size is expected to be 1 rather than {q.shape[0]} when using `cu_seqlens`."
                f"Please flatten variable-length inputs before processing.")
        if initial_state is not None and initial_state.shape[0] != len(
                cu_seqlens) - 1:
            raise ValueError(
                f"The number of initial states is expected to be equal to the number of input sequences, "
                f"i.e., {len(cu_seqlens) - 1} rather than {initial_state.shape[0]}."
            )
    if scale is None:
        scale = k.shape[-1]**-0.5
    o, final_state = ChunkGatedDeltaRuleFunction.apply(
        q, k, v, g, beta, scale, initial_state, output_final_state, cu_seqlens,
        use_qk_l2norm_in_kernel)
    if head_first:
        o = rearrange(o, 'b t h ... -> b h t ...')
    return o, final_state

fused_recurrent_gated_delta_rule ΒΆ

fused_recurrent_gated_delta_rule(
    q: Tensor,
    k: Tensor,
    v: Tensor,
    g: Tensor,
    beta: Tensor = None,
    scale: float = None,
    initial_state: Tensor = None,
    inplace_final_state: bool = True,
    cu_seqlens: Optional[LongTensor] = None,
    ssm_state_indices: Optional[Tensor] = None,
    num_accepted_tokens: Optional[Tensor] = None,
    use_qk_l2norm_in_kernel: bool = False,
) -> tuple[Tensor, Tensor]

Parameters:

Name Type Description Default
q Tensor

queries of shape [B, T, H, K].

required
k Tensor

keys of shape [B, T, H, K].

required
v Tensor

values of shape [B, T, HV, V]. GVA is applied if HV > H.

required
g Tensor

g (decays) of shape [B, T, HV].

required
beta Tensor

betas of shape [B, T, HV].

None
scale Optional[int]

Scale factor for the RetNet attention scores. If not provided, it will default to 1 / sqrt(K). Default: None.

None
initial_state Optional[Tensor]

Initial state of shape [N, HV, K, V] for N input sequences. For equal-length input sequences, N equals the batch size B. Default: None.

None
inplace_final_state bool

bool: Whether to store the final state in-place to save memory. Default: True.

True
cu_seqlens LongTensor

Cumulative sequence lengths of shape [N+1] used for variable-length training, consistent with the FlashAttention API.

None
ssm_state_indices Optional[Tensor]

Indices to map the input sequences to the initial/final states.

None
num_accepted_tokens Optional[Tensor]

Number of accepted tokens for each sequence during decoding.

None

Returns:

Name Type Description
o Tensor

Outputs of shape [B, T, HV, V].

final_state Tensor

Final state of shape [N, HV, K, V].

Examples:: >>> import torch >>> import torch.nn.functional as F >>> from einops import rearrange >>> from fla.ops.gated_delta_rule import fused_recurrent_gated_delta_rule # inputs with equal lengths >>> B, T, H, HV, K, V = 4, 2048, 4, 8, 512, 512 >>> q = torch.randn(B, T, H, K, device='cuda') >>> k = F.normalize(torch.randn(B, T, H, K, device='cuda'), p=2, dim=-1) >>> v = torch.randn(B, T, HV, V, device='cuda') >>> g = F.logsigmoid(torch.rand(B, T, HV, device='cuda')) >>> beta = torch.rand(B, T, HV, device='cuda').sigmoid() >>> h0 = torch.randn(B, HV, K, V, device='cuda') >>> o, ht = fused_gated_recurrent_delta_rule( q, k, v, g, beta, initial_state=h0, ) # for variable-length inputs, the batch size B is expected to be 1 and cu_seqlens is required >>> q, k, v, g, beta = map(lambda x: rearrange(x, 'b t ... -> 1 (b t) ...'), (q, k, v, g, beta)) # for a batch with 4 sequences, cu_seqlens with 5 start/end positions are expected >>> cu_seqlens = q.new_tensor([0, 2048, 4096, 6144, 8192], dtype=torch.long) >>> o_var, ht_var = fused_gated_recurrent_delta_rule( q, k, v, g, beta, initial_state=h0, cu_seqlens=cu_seqlens )

Source code in vllm/model_executor/layers/fla/ops/fused_recurrent.py
def fused_recurrent_gated_delta_rule(
    q: torch.Tensor,
    k: torch.Tensor,
    v: torch.Tensor,
    g: torch.Tensor,
    beta: torch.Tensor = None,
    scale: float = None,
    initial_state: torch.Tensor = None,
    inplace_final_state: bool = True,
    cu_seqlens: Optional[torch.LongTensor] = None,
    ssm_state_indices: Optional[torch.Tensor] = None,
    num_accepted_tokens: Optional[torch.Tensor] = None,
    use_qk_l2norm_in_kernel: bool = False,
) -> tuple[torch.Tensor, torch.Tensor]:
    r"""
    Args:
        q (torch.Tensor):
            queries of shape `[B, T, H, K]`.
        k (torch.Tensor):
            keys of shape `[B, T, H, K]`.
        v (torch.Tensor):
            values of shape `[B, T, HV, V]`.
            GVA is applied if `HV > H`.
        g (torch.Tensor):
            g (decays) of shape `[B, T, HV]`.
        beta (torch.Tensor):
            betas of shape `[B, T, HV]`.
        scale (Optional[int]):
            Scale factor for the RetNet attention scores.
            If not provided, it will default to `1 / sqrt(K)`. Default: `None`.
        initial_state (Optional[torch.Tensor]):
            Initial state of shape `[N, HV, K, V]` for `N` input sequences.
            For equal-length input sequences, `N` equals the batch size `B`.
            Default: `None`.
        inplace_final_state: bool:
            Whether to store the final state in-place to save memory.
            Default: `True`.
        cu_seqlens (torch.LongTensor):
            Cumulative sequence lengths of shape `[N+1]` used for variable-length training,
            consistent with the FlashAttention API.
        ssm_state_indices (Optional[torch.Tensor]):
            Indices to map the input sequences to the initial/final states.
        num_accepted_tokens (Optional[torch.Tensor]):
            Number of accepted tokens for each sequence during decoding.

    Returns:
        o (torch.Tensor):
            Outputs of shape `[B, T, HV, V]`.
        final_state (torch.Tensor):
            Final state of shape `[N, HV, K, V]`.

    Examples::
        >>> import torch
        >>> import torch.nn.functional as F
        >>> from einops import rearrange
        >>> from fla.ops.gated_delta_rule import fused_recurrent_gated_delta_rule
        # inputs with equal lengths
        >>> B, T, H, HV, K, V = 4, 2048, 4, 8, 512, 512
        >>> q = torch.randn(B, T, H, K, device='cuda')
        >>> k = F.normalize(torch.randn(B, T, H, K, device='cuda'), p=2, dim=-1)
        >>> v = torch.randn(B, T, HV, V, device='cuda')
        >>> g = F.logsigmoid(torch.rand(B, T, HV, device='cuda'))
        >>> beta = torch.rand(B, T, HV, device='cuda').sigmoid()
        >>> h0 = torch.randn(B, HV, K, V, device='cuda')
        >>> o, ht = fused_gated_recurrent_delta_rule(
            q, k, v, g, beta,
            initial_state=h0,
        )
        # for variable-length inputs, the batch size `B` is expected to be 1 and `cu_seqlens` is required
        >>> q, k, v, g, beta = map(lambda x: rearrange(x, 'b t ... -> 1 (b t) ...'), (q, k, v, g, beta))
        # for a batch with 4 sequences, `cu_seqlens` with 5 start/end positions are expected
        >>> cu_seqlens = q.new_tensor([0, 2048, 4096, 6144, 8192], dtype=torch.long)
        >>> o_var, ht_var = fused_gated_recurrent_delta_rule(
            q, k, v, g, beta,
            initial_state=h0,
            cu_seqlens=cu_seqlens
        )
    """
    if cu_seqlens is not None and q.shape[0] != 1:
        raise ValueError(
            f"The batch size is expected to be 1 rather than {q.shape[0]} when using `cu_seqlens`."
            f"Please flatten variable-length inputs before processing.")
    if scale is None:
        scale = k.shape[-1]**-0.5
    else:
        assert scale > 0, "scale must be positive"
    if beta is None:
        beta = torch.ones_like(q[..., 0])
    o, final_state = FusedRecurrentFunction.apply(
        q,
        k,
        v,
        g,
        beta,
        scale,
        initial_state,
        inplace_final_state,
        cu_seqlens,
        ssm_state_indices,
        num_accepted_tokens,
        use_qk_l2norm_in_kernel,
    )
    return o, final_state