Skip to content

vllm.model_executor.models.plamo2

Inference-only PLaMo2 model.

DenseMLP

Bases: Module

Source code in vllm/model_executor/models/plamo2.py
class DenseMLP(nn.Module):

    def __init__(
        self,
        config: Plamo2Config,
        quant_config: Optional[QuantizationConfig] = None,
        prefix: str = "",
    ) -> None:
        super().__init__()
        self.hidden_size = config.hidden_size
        self.intermediate_size = config.intermediate_size
        self.gate_up_proj = MergedColumnParallelLinear(
            self.hidden_size,
            [self.intermediate_size] * 2,
            bias=False,
            prefix=f"{prefix}.gate_up_proj",
            quant_config=quant_config,
            return_bias=False,
        )
        self.act = SiluAndMul()
        self.down_proj = RowParallelLinear(self.intermediate_size,
                                           self.hidden_size,
                                           bias=False,
                                           prefix=f"{prefix}.down_proj",
                                           quant_config=quant_config,
                                           return_bias=False)

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        h = self.gate_up_proj(hidden_states)
        h = self.act(h)
        return self.down_proj(h)

act instance-attribute

act = SiluAndMul()

down_proj instance-attribute

down_proj = RowParallelLinear(
    intermediate_size,
    hidden_size,
    bias=False,
    prefix=f"{prefix}.down_proj",
    quant_config=quant_config,
    return_bias=False,
)

gate_up_proj instance-attribute

gate_up_proj = MergedColumnParallelLinear(
    hidden_size,
    [intermediate_size] * 2,
    bias=False,
    prefix=f"{prefix}.gate_up_proj",
    quant_config=quant_config,
    return_bias=False,
)

hidden_size instance-attribute

hidden_size = hidden_size

intermediate_size instance-attribute

intermediate_size = intermediate_size

__init__

__init__(
    config: Plamo2Config,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
) -> None
Source code in vllm/model_executor/models/plamo2.py
def __init__(
    self,
    config: Plamo2Config,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
) -> None:
    super().__init__()
    self.hidden_size = config.hidden_size
    self.intermediate_size = config.intermediate_size
    self.gate_up_proj = MergedColumnParallelLinear(
        self.hidden_size,
        [self.intermediate_size] * 2,
        bias=False,
        prefix=f"{prefix}.gate_up_proj",
        quant_config=quant_config,
        return_bias=False,
    )
    self.act = SiluAndMul()
    self.down_proj = RowParallelLinear(self.intermediate_size,
                                       self.hidden_size,
                                       bias=False,
                                       prefix=f"{prefix}.down_proj",
                                       quant_config=quant_config,
                                       return_bias=False)

forward

forward(hidden_states: Tensor) -> Tensor
Source code in vllm/model_executor/models/plamo2.py
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
    h = self.gate_up_proj(hidden_states)
    h = self.act(h)
    return self.down_proj(h)

Plamo2AttentionMixer

Bases: Module

Source code in vllm/model_executor/models/plamo2.py
class Plamo2AttentionMixer(nn.Module):

    def __init__(self,
                 *,
                 vllm_config: VllmConfig,
                 prefix: str = "",
                 **kwargs) -> None:
        super().__init__()
        config = vllm_config.model_config.hf_config
        cache_config = vllm_config.cache_config
        quant_config = vllm_config.quant_config
        self.hidden_size = config.hidden_size
        tp_size = get_tensor_model_parallel_world_size()
        self.total_num_heads = config.num_attention_heads
        assert self.total_num_heads % tp_size == 0
        self.num_heads = self.total_num_heads // tp_size
        self.total_num_kv_heads = config.num_key_value_heads
        if self.total_num_kv_heads >= tp_size:
            # Number of KV heads is greater than TP size, so we partition
            # the KV heads across multiple tensor parallel GPUs.
            assert self.total_num_kv_heads % tp_size == 0
        else:
            # Number of KV heads is less than TP size, so we replicate
            # the KV heads across multiple tensor parallel GPUs.
            assert tp_size % self.total_num_kv_heads == 0
        self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
        self.head_dim = config.hidden_size_per_head
        self.q_size = self.num_heads * self.head_dim
        self.kv_size = self.num_kv_heads * self.head_dim
        self.scaling = self.head_dim**-0.5

        self.qkv_proj = QKVParallelLinear(
            config.hidden_size,
            self.head_dim,
            self.total_num_heads,
            self.total_num_kv_heads,
            bias=False,
            quant_config=quant_config,
        )
        self.o_proj = RowParallelLinear(self.total_num_heads * self.head_dim,
                                        config.hidden_size,
                                        bias=False,
                                        quant_config=quant_config)

        self.rope_theta = config.rope_theta if hasattr(config,
                                                       "rope_theta") else 10000
        self.rope_scaling = config.rope_scaling if hasattr(
            config, "rope_scaling") else None
        max_position = config.max_position_embeddings
        if hasattr(vllm_config.model_config, "max_model_len") and isinstance(
                vllm_config.model_config.max_model_len, int):
            max_position = min(max_position,
                               vllm_config.model_config.max_model_len)

        self.rotary_emb = get_rope(
            self.head_dim,
            rotary_dim=self.head_dim,
            max_position=max_position,
            base=self.rope_theta,
            rope_scaling=self.rope_scaling,
        )
        self.q_norm = RMSNorm(config.hidden_size_per_head,
                              eps=config.rms_norm_eps)
        self.q_norm.weight = torch.nn.Parameter(
            torch.ones((self.num_heads, config.hidden_size_per_head)))
        set_weight_attrs(self.q_norm.weight,
                         {"weight_loader": sharded_weight_loader(0)})
        self.k_norm = RMSNorm(config.hidden_size_per_head,
                              eps=config.rms_norm_eps)
        self.k_norm.weight = torch.nn.Parameter(
            torch.ones((self.num_kv_heads, config.hidden_size_per_head)))
        # Tensor-parallelism shards the K norm weights to the tp ranks
        # in a head-wise manner. This approach does not work if there is only
        # a single KV head, as is the case for PLaMo 2-1B.
        if self.total_num_kv_heads != 1:
            set_weight_attrs(self.k_norm.weight,
                             {"weight_loader": sharded_weight_loader(0)})

        self.attn = Attention(
            self.num_heads,
            self.head_dim,
            self.scaling,
            num_kv_heads=self.num_kv_heads,
            cache_config=cache_config,
            prefix=f"{prefix}.attn",
        )

    def forward(
        self,
        positions: torch.Tensor,
        hidden_states: torch.Tensor,
        **kwargs,
    ) -> torch.Tensor:
        qkv, _ = self.qkv_proj(hidden_states)
        q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)

        q_shape = q.shape
        q = q.reshape(q_shape[:-1] + self.q_norm.weight.shape)
        q = self.q_norm.forward_native(q).reshape(q_shape)
        k_shape = k.shape
        k = k.reshape(k_shape[:-1] + self.k_norm.weight.shape)
        k = self.k_norm.forward_native(k).reshape(k_shape)

        q, k = self.rotary_emb(positions, q, k)
        attn_output = self.attn(q, k, v)
        output, _ = self.o_proj(attn_output)
        return output

attn instance-attribute

attn = Attention(
    num_heads,
    head_dim,
    scaling,
    num_kv_heads=num_kv_heads,
    cache_config=cache_config,
    prefix=f"{prefix}.attn",
)

head_dim instance-attribute

head_dim = hidden_size_per_head

hidden_size instance-attribute

hidden_size = hidden_size

k_norm instance-attribute

k_norm = RMSNorm(hidden_size_per_head, eps=rms_norm_eps)

kv_size instance-attribute

kv_size = num_kv_heads * head_dim

num_heads instance-attribute

num_heads = total_num_heads // tp_size

num_kv_heads instance-attribute

num_kv_heads = max(1, total_num_kv_heads // tp_size)

o_proj instance-attribute

o_proj = RowParallelLinear(
    total_num_heads * head_dim,
    hidden_size,
    bias=False,
    quant_config=quant_config,
)

q_norm instance-attribute

q_norm = RMSNorm(hidden_size_per_head, eps=rms_norm_eps)

q_size instance-attribute

q_size = num_heads * head_dim

qkv_proj instance-attribute

qkv_proj = QKVParallelLinear(
    hidden_size,
    head_dim,
    total_num_heads,
    total_num_kv_heads,
    bias=False,
    quant_config=quant_config,
)

rope_scaling instance-attribute

rope_scaling = (
    rope_scaling
    if hasattr(config, "rope_scaling")
    else None
)

rope_theta instance-attribute

rope_theta = (
    rope_theta if hasattr(config, "rope_theta") else 10000
)

rotary_emb instance-attribute

rotary_emb = get_rope(
    head_dim,
    rotary_dim=head_dim,
    max_position=max_position,
    base=rope_theta,
    rope_scaling=rope_scaling,
)

scaling instance-attribute

scaling = head_dim ** -0.5

total_num_heads instance-attribute

total_num_heads = num_attention_heads

total_num_kv_heads instance-attribute

total_num_kv_heads = num_key_value_heads

__init__

__init__(
    *, vllm_config: VllmConfig, prefix: str = "", **kwargs
) -> None
Source code in vllm/model_executor/models/plamo2.py
def __init__(self,
             *,
             vllm_config: VllmConfig,
             prefix: str = "",
             **kwargs) -> None:
    super().__init__()
    config = vllm_config.model_config.hf_config
    cache_config = vllm_config.cache_config
    quant_config = vllm_config.quant_config
    self.hidden_size = config.hidden_size
    tp_size = get_tensor_model_parallel_world_size()
    self.total_num_heads = config.num_attention_heads
    assert self.total_num_heads % tp_size == 0
    self.num_heads = self.total_num_heads // tp_size
    self.total_num_kv_heads = config.num_key_value_heads
    if self.total_num_kv_heads >= tp_size:
        # Number of KV heads is greater than TP size, so we partition
        # the KV heads across multiple tensor parallel GPUs.
        assert self.total_num_kv_heads % tp_size == 0
    else:
        # Number of KV heads is less than TP size, so we replicate
        # the KV heads across multiple tensor parallel GPUs.
        assert tp_size % self.total_num_kv_heads == 0
    self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
    self.head_dim = config.hidden_size_per_head
    self.q_size = self.num_heads * self.head_dim
    self.kv_size = self.num_kv_heads * self.head_dim
    self.scaling = self.head_dim**-0.5

    self.qkv_proj = QKVParallelLinear(
        config.hidden_size,
        self.head_dim,
        self.total_num_heads,
        self.total_num_kv_heads,
        bias=False,
        quant_config=quant_config,
    )
    self.o_proj = RowParallelLinear(self.total_num_heads * self.head_dim,
                                    config.hidden_size,
                                    bias=False,
                                    quant_config=quant_config)

    self.rope_theta = config.rope_theta if hasattr(config,
                                                   "rope_theta") else 10000
    self.rope_scaling = config.rope_scaling if hasattr(
        config, "rope_scaling") else None
    max_position = config.max_position_embeddings
    if hasattr(vllm_config.model_config, "max_model_len") and isinstance(
            vllm_config.model_config.max_model_len, int):
        max_position = min(max_position,
                           vllm_config.model_config.max_model_len)

    self.rotary_emb = get_rope(
        self.head_dim,
        rotary_dim=self.head_dim,
        max_position=max_position,
        base=self.rope_theta,
        rope_scaling=self.rope_scaling,
    )
    self.q_norm = RMSNorm(config.hidden_size_per_head,
                          eps=config.rms_norm_eps)
    self.q_norm.weight = torch.nn.Parameter(
        torch.ones((self.num_heads, config.hidden_size_per_head)))
    set_weight_attrs(self.q_norm.weight,
                     {"weight_loader": sharded_weight_loader(0)})
    self.k_norm = RMSNorm(config.hidden_size_per_head,
                          eps=config.rms_norm_eps)
    self.k_norm.weight = torch.nn.Parameter(
        torch.ones((self.num_kv_heads, config.hidden_size_per_head)))
    # Tensor-parallelism shards the K norm weights to the tp ranks
    # in a head-wise manner. This approach does not work if there is only
    # a single KV head, as is the case for PLaMo 2-1B.
    if self.total_num_kv_heads != 1:
        set_weight_attrs(self.k_norm.weight,
                         {"weight_loader": sharded_weight_loader(0)})

    self.attn = Attention(
        self.num_heads,
        self.head_dim,
        self.scaling,
        num_kv_heads=self.num_kv_heads,
        cache_config=cache_config,
        prefix=f"{prefix}.attn",
    )

forward

forward(
    positions: Tensor, hidden_states: Tensor, **kwargs
) -> Tensor
Source code in vllm/model_executor/models/plamo2.py
def forward(
    self,
    positions: torch.Tensor,
    hidden_states: torch.Tensor,
    **kwargs,
) -> torch.Tensor:
    qkv, _ = self.qkv_proj(hidden_states)
    q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)

    q_shape = q.shape
    q = q.reshape(q_shape[:-1] + self.q_norm.weight.shape)
    q = self.q_norm.forward_native(q).reshape(q_shape)
    k_shape = k.shape
    k = k.reshape(k_shape[:-1] + self.k_norm.weight.shape)
    k = self.k_norm.forward_native(k).reshape(k_shape)

    q, k = self.rotary_emb(positions, q, k)
    attn_output = self.attn(q, k, v)
    output, _ = self.o_proj(attn_output)
    return output

Plamo2Config

Bases: PretrainedConfig

Source code in vllm/model_executor/models/plamo2.py
class Plamo2Config(PretrainedConfig):  # type: ignore
    model_type: str = "plamo2"

    hidden_size: int
    num_hidden_layers: int
    rms_norm_eps: float
    # Attention
    num_attention_heads: int
    hidden_size_per_head: int
    num_key_value_heads: int
    # Mamba
    mamba_d_state: int
    mamba_d_conv: int
    mamba_num_heads: int
    mamba_step: int
    # MLP
    intermediate_size: int
    # Tokenizer
    vocab_size: int

hidden_size instance-attribute

hidden_size: int

hidden_size_per_head instance-attribute

hidden_size_per_head: int

intermediate_size instance-attribute

intermediate_size: int

mamba_d_conv instance-attribute

mamba_d_conv: int

mamba_d_state instance-attribute

mamba_d_state: int

mamba_num_heads instance-attribute

mamba_num_heads: int

mamba_step instance-attribute

mamba_step: int

model_type class-attribute instance-attribute

model_type: str = 'plamo2'

num_attention_heads instance-attribute

num_attention_heads: int

num_hidden_layers instance-attribute

num_hidden_layers: int

num_key_value_heads instance-attribute

num_key_value_heads: int

rms_norm_eps instance-attribute

rms_norm_eps: float

vocab_size instance-attribute

vocab_size: int

Plamo2Decoder

Bases: Module

Source code in vllm/model_executor/models/plamo2.py
class Plamo2Decoder(torch.nn.Module):

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None:
        super().__init__()
        config = vllm_config.model_config.hf_config
        extra_kwargs = {"is_lora_enabled": bool(vllm_config.lora_config)}

        def get_layer(prefix: str):
            layer_idx = int(prefix.rsplit(".", 1)[1])
            return Plamo2DecoderLayer(vllm_config=vllm_config,
                                      layer_idx=layer_idx,
                                      prefix=prefix,
                                      **extra_kwargs)

        self.start_layer, self.end_layer, self.layers = make_layers(
            config.num_hidden_layers, get_layer, prefix=f"{prefix}.layers")

    def forward(
        self,
        positions: torch.Tensor,
        hidden_states: torch.Tensor,
        residual: Optional[torch.Tensor],
        mamba_cache_params: MambaCacheParams,
        mamba2_metadata: Mamba2Metadata,
    ) -> torch.Tensor:
        mamba_cache_index = 0
        for layer in islice(self.layers, self.start_layer, self.end_layer):
            layer_mamba_cache_params = None
            if layer.is_mamba and mamba_cache_params is not None:
                layer_mamba_cache_params = mamba_cache_params.at_layer_idx(
                    mamba_cache_index)
                mamba_cache_index += 1

            hidden_states, residual = layer(
                positions=positions,
                hidden_states=hidden_states,
                residual=residual,
                mamba_cache_params=layer_mamba_cache_params,
                mamba2_metadata=mamba2_metadata,
            )
        return hidden_states, residual

__init__

__init__(
    *, vllm_config: VllmConfig, prefix: str = ""
) -> None
Source code in vllm/model_executor/models/plamo2.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None:
    super().__init__()
    config = vllm_config.model_config.hf_config
    extra_kwargs = {"is_lora_enabled": bool(vllm_config.lora_config)}

    def get_layer(prefix: str):
        layer_idx = int(prefix.rsplit(".", 1)[1])
        return Plamo2DecoderLayer(vllm_config=vllm_config,
                                  layer_idx=layer_idx,
                                  prefix=prefix,
                                  **extra_kwargs)

    self.start_layer, self.end_layer, self.layers = make_layers(
        config.num_hidden_layers, get_layer, prefix=f"{prefix}.layers")

forward

forward(
    positions: Tensor,
    hidden_states: Tensor,
    residual: Optional[Tensor],
    mamba_cache_params: MambaCacheParams,
    mamba2_metadata: Mamba2Metadata,
) -> Tensor
Source code in vllm/model_executor/models/plamo2.py
def forward(
    self,
    positions: torch.Tensor,
    hidden_states: torch.Tensor,
    residual: Optional[torch.Tensor],
    mamba_cache_params: MambaCacheParams,
    mamba2_metadata: Mamba2Metadata,
) -> torch.Tensor:
    mamba_cache_index = 0
    for layer in islice(self.layers, self.start_layer, self.end_layer):
        layer_mamba_cache_params = None
        if layer.is_mamba and mamba_cache_params is not None:
            layer_mamba_cache_params = mamba_cache_params.at_layer_idx(
                mamba_cache_index)
            mamba_cache_index += 1

        hidden_states, residual = layer(
            positions=positions,
            hidden_states=hidden_states,
            residual=residual,
            mamba_cache_params=layer_mamba_cache_params,
            mamba2_metadata=mamba2_metadata,
        )
    return hidden_states, residual

Plamo2DecoderLayer

Bases: Module

Source code in vllm/model_executor/models/plamo2.py
class Plamo2DecoderLayer(nn.Module):

    def __init__(self,
                 vllm_config: VllmConfig,
                 layer_idx: int,
                 prefix: str = "",
                 **kwargs) -> None:
        super().__init__()
        config = vllm_config.model_config.hf_config
        quant_config = vllm_config.quant_config

        self.is_mamba = is_mamba(config, layer_idx)
        if self.is_mamba:
            self.mixer = Plamo2MambaMixer(vllm_config=vllm_config,
                                          prefix=f"{prefix}.mixer")
        else:
            self.mixer = Plamo2AttentionMixer(vllm_config=vllm_config,
                                              prefix=f"{prefix}.mixer")

        self.mlp = DenseMLP(config=config,
                            quant_config=quant_config,
                            prefix=f"{prefix}.mlp")
        self.pre_mixer_norm = RMSNorm(config.hidden_size,
                                      eps=config.rms_norm_eps)
        self.post_mixer_norm = RMSNorm(config.hidden_size,
                                       eps=config.rms_norm_eps)
        self.pre_mlp_norm = RMSNorm(config.hidden_size,
                                    eps=config.rms_norm_eps)
        self.post_mlp_norm = RMSNorm(config.hidden_size,
                                     eps=config.rms_norm_eps)

    def forward(
        self,
        positions: torch.Tensor,
        hidden_states: torch.Tensor,
        residual: Optional[torch.Tensor],
        mamba_cache_params: MambaCacheParams,
        mamba2_metadata: Mamba2Metadata,
        **kwargs,
    ):
        if residual is None:
            residual = hidden_states
            hidden_states = self.pre_mixer_norm(hidden_states)
        else:
            hidden_states, residual = self.pre_mixer_norm(
                hidden_states, residual)

        if self.is_mamba:
            # Plamo2MambaMixer writes output to this tensor
            output = torch.empty_like(hidden_states)
            mixer_kwargs = {
                "output": output,
                "mamba_cache_params": mamba_cache_params,
                "mamba2_metadata": mamba2_metadata,
            }
        else:
            mixer_kwargs = {
                "positions": positions,
            }
        hidden_states = self.mixer(
            hidden_states=hidden_states,
            **mixer_kwargs,
        )
        if self.is_mamba:
            hidden_states = output
        hidden_states = self.post_mixer_norm(hidden_states)
        # Fully Connected
        hidden_states, residual = self.pre_mlp_norm(hidden_states, residual)
        hidden_states = self.mlp(hidden_states)
        hidden_states = self.post_mlp_norm(hidden_states)
        return hidden_states, residual

is_mamba instance-attribute

is_mamba = is_mamba(config, layer_idx)

mixer instance-attribute

mixer = Plamo2MambaMixer(
    vllm_config=vllm_config, prefix=f"{prefix}.mixer"
)

mlp instance-attribute

mlp = DenseMLP(
    config=config,
    quant_config=quant_config,
    prefix=f"{prefix}.mlp",
)

post_mixer_norm instance-attribute

post_mixer_norm = RMSNorm(hidden_size, eps=rms_norm_eps)

post_mlp_norm instance-attribute

post_mlp_norm = RMSNorm(hidden_size, eps=rms_norm_eps)

pre_mixer_norm instance-attribute

pre_mixer_norm = RMSNorm(hidden_size, eps=rms_norm_eps)

pre_mlp_norm instance-attribute

pre_mlp_norm = RMSNorm(hidden_size, eps=rms_norm_eps)

__init__

__init__(
    vllm_config: VllmConfig,
    layer_idx: int,
    prefix: str = "",
    **kwargs,
) -> None
Source code in vllm/model_executor/models/plamo2.py
def __init__(self,
             vllm_config: VllmConfig,
             layer_idx: int,
             prefix: str = "",
             **kwargs) -> None:
    super().__init__()
    config = vllm_config.model_config.hf_config
    quant_config = vllm_config.quant_config

    self.is_mamba = is_mamba(config, layer_idx)
    if self.is_mamba:
        self.mixer = Plamo2MambaMixer(vllm_config=vllm_config,
                                      prefix=f"{prefix}.mixer")
    else:
        self.mixer = Plamo2AttentionMixer(vllm_config=vllm_config,
                                          prefix=f"{prefix}.mixer")

    self.mlp = DenseMLP(config=config,
                        quant_config=quant_config,
                        prefix=f"{prefix}.mlp")
    self.pre_mixer_norm = RMSNorm(config.hidden_size,
                                  eps=config.rms_norm_eps)
    self.post_mixer_norm = RMSNorm(config.hidden_size,
                                   eps=config.rms_norm_eps)
    self.pre_mlp_norm = RMSNorm(config.hidden_size,
                                eps=config.rms_norm_eps)
    self.post_mlp_norm = RMSNorm(config.hidden_size,
                                 eps=config.rms_norm_eps)

forward

forward(
    positions: Tensor,
    hidden_states: Tensor,
    residual: Optional[Tensor],
    mamba_cache_params: MambaCacheParams,
    mamba2_metadata: Mamba2Metadata,
    **kwargs,
)
Source code in vllm/model_executor/models/plamo2.py
def forward(
    self,
    positions: torch.Tensor,
    hidden_states: torch.Tensor,
    residual: Optional[torch.Tensor],
    mamba_cache_params: MambaCacheParams,
    mamba2_metadata: Mamba2Metadata,
    **kwargs,
):
    if residual is None:
        residual = hidden_states
        hidden_states = self.pre_mixer_norm(hidden_states)
    else:
        hidden_states, residual = self.pre_mixer_norm(
            hidden_states, residual)

    if self.is_mamba:
        # Plamo2MambaMixer writes output to this tensor
        output = torch.empty_like(hidden_states)
        mixer_kwargs = {
            "output": output,
            "mamba_cache_params": mamba_cache_params,
            "mamba2_metadata": mamba2_metadata,
        }
    else:
        mixer_kwargs = {
            "positions": positions,
        }
    hidden_states = self.mixer(
        hidden_states=hidden_states,
        **mixer_kwargs,
    )
    if self.is_mamba:
        hidden_states = output
    hidden_states = self.post_mixer_norm(hidden_states)
    # Fully Connected
    hidden_states, residual = self.pre_mlp_norm(hidden_states, residual)
    hidden_states = self.mlp(hidden_states)
    hidden_states = self.post_mlp_norm(hidden_states)
    return hidden_states, residual

Plamo2ForCausalLM

Bases: Module, HasInnerState, SupportsPP, IsHybrid

Source code in vllm/model_executor/models/plamo2.py
class Plamo2ForCausalLM(torch.nn.Module, HasInnerState, SupportsPP, IsHybrid):
    packed_modules_mapping = {
        "qkv_proj": [
            "q_proj",
            "k_proj",
            "v_proj",
        ],
    }

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None:
        super().__init__()
        config = vllm_config.model_config.hf_config
        scheduler_config = vllm_config.scheduler_config

        self.config = config
        self.vllm_config = vllm_config
        self.model_config = vllm_config.model_config
        self.scheduler_config = scheduler_config

        # ModelConfig.get_head_size assumes head_dim is set or calculated as
        # hidden_size // num_attention_heads. However, this is not always
        # the case for PLaMo2, as indicated by the FIXME comment.
        self.config.head_dim = self.config.hidden_size_per_head

        self.model = Plamo2Model(vllm_config=vllm_config,
                                 prefix=maybe_prefix(prefix, "model"))
        self.vocab_size = self.config.vocab_size
        self.unpadded_vocab_size = self.config.vocab_size
        num_embeddings = ((self.vocab_size + 15) // 16) * 16
        self.lm_head = ParallelLMHead(
            num_embeddings,
            self.config.hidden_size,
            org_num_embeddings=self.config.vocab_size,
            padding_size=DEFAULT_VOCAB_PADDING_SIZE,
            prefix=f"{prefix}.lm_head",
        )
        if self.config.tie_word_embeddings:
            self.lm_head = self.lm_head.tie_weights(self.model.embed_tokens)

        # Used to track and store by the Mamba cache between steps.
        self.mamba_cache: Optional[MambaCacheManager] = None

        self.logits_processor = LogitsProcessor(self.unpadded_vocab_size,
                                                self.config.vocab_size)
        self.sampler = get_sampler()
        self.make_empty_intermediate_tensors = (
            self.model.make_empty_intermediate_tensors)

    def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor:
        return self.model.get_input_embeddings(input_ids)

    def forward(self,
                input_ids: torch.Tensor,
                positions: torch.Tensor,
                intermediate_tensors: Optional[IntermediateTensors] = None,
                inputs_embeds: Optional[torch.Tensor] = None,
                **kwargs):
        if not envs.VLLM_USE_V1:
            if self.mamba_cache is None:
                num_mamba_layers = (
                    self.model_config.get_num_layers_by_block_type(
                        self.vllm_config.parallel_config,
                        LayerBlockType.mamba))

                mamba_state_shape = self.get_mamba_state_shape_from_config(
                    self.vllm_config, use_v1=False)
                mamba_state_dtype = \
                    self.get_mamba_state_dtype_from_config(
                    self.vllm_config)
                self.mamba_cache = MambaCacheManager(self.vllm_config,
                                                     num_mamba_layers,
                                                     *mamba_state_shape,
                                                     *mamba_state_dtype)

            mamba_cache_params = self.mamba_cache.current_run_tensors(**kwargs)
        else:
            # NOTE: mamba_cache_params is not needed for v1
            mamba_cache_params = None

        hidden_states = self.model(input_ids, positions, mamba_cache_params,
                                   intermediate_tensors, inputs_embeds)
        return hidden_states

    def copy_inputs_before_cuda_graphs(self, input_buffers, **kwargs):
        return self.mamba_cache.copy_inputs_before_cuda_graphs(
            input_buffers, **kwargs)

    def get_seqlen_agnostic_capture_inputs(self, batch_size: int):
        return self.mamba_cache.get_seqlen_agnostic_capture_inputs(batch_size)

    @classmethod
    def get_mamba_state_dtype_from_config(
        cls,
        vllm_config: "VllmConfig",
    ) -> tuple[torch.dtype, torch.dtype]:

        return MambaStateDtypeCalculator.mamba2_state_dtype(
            vllm_config.model_config.dtype,
            vllm_config.cache_config.mamba_cache_dtype,
            vllm_config.cache_config.mamba_ssm_cache_dtype,
        )

    @classmethod
    def get_mamba_state_shape_from_config(
        cls,
        vllm_config: "VllmConfig",
        use_v1: bool = True,
    ) -> tuple[tuple[int, int], tuple[int, int, int]]:
        """Calculate shapes for Mamba's convolutional and state caches.
        Args:
            vllm_config: vLLM config
            use_v1: Get shapes for V1 (or V0)
        Returns:
            Tuple containing:
            - conv_state_shape: Shape for convolutional state cache
            - temporal_state_shape: Shape for state space model cache
        """
        parallel_config = vllm_config.parallel_config
        hf_config = vllm_config.model_config.hf_config
        intermediate_size =\
                hf_config.mamba_num_heads * hf_config.hidden_size_per_head

        return MambaStateShapeCalculator.mamba2_state_shape(
            intermediate_size=intermediate_size,
            tp_world_size=parallel_config.tensor_parallel_size,
            n_groups=0,
            num_heads=hf_config.mamba_num_heads,
            head_dim=hf_config.hidden_size_per_head,
            state_size=hf_config.mamba_d_state,
            conv_kernel=hf_config.mamba_d_conv,
            use_v1=use_v1,
        )

    def compute_logits(
        self,
        hidden_states: torch.Tensor,
        sampling_metadata: SamplingMetadata,
    ) -> Optional[torch.Tensor]:
        logits = self.logits_processor(self.lm_head, hidden_states,
                                       sampling_metadata)
        return logits

    def sample(
        self,
        logits: Optional[torch.Tensor],
        sampling_metadata: SamplingMetadata,
    ) -> Optional[SamplerOutput]:
        next_tokens = self.sampler(logits, sampling_metadata)
        return next_tokens

    def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):
        params_dict = dict(self.named_parameters())
        for name, loaded_weight in weights:

            # Both tie_word_embeddings=True and lm_head.weight in the safetensor
            # at the same time causes dict key access error.
            if name == "lm_head.weight" and self.config.tie_word_embeddings:
                assert "lm_head.weight" not in params_dict
                continue

            # Update the weight names to be compatible with the vllm version
            # of the model.
            # Do not change the order of the replacements.
            replacements = {
                # Rename incompatible weight names.
                ".A_log": ".A",
                ".B_norm_weight": ".B_norm.weight",
                ".C_norm_weight": ".C_norm.weight",
                ".dt_norm_weight": ".dt_norm.weight",
                ".q_weight": ".q_norm.weight",
                ".k_weight": ".k_norm.weight",
            }
            # Apply replacements based on the defined mappings
            for old, new in replacements.items():
                if old in name:
                    name = name.replace(old, new)

            # Reshape the in_proj weights to match the shape expected
            # by MergedColumnParallelLinear.
            # This works both for unquantized weights and
            # for quantized weights.
            # In the quantized case, the weights are already transposed.
            # Also, in addition to the quantized weights,
            # the zero points and scales have to be reshaped as well.
            # Packing should not be affected by this.
            if ".mixer.in_proj.weight" in name \
                or "mixer.in_proj.qweight" in name \
                or "mixer.in_proj.scales" in name \
                or "mixer.in_proj.qzeros" in name:
                if "mixer.in_proj.weight" in name:
                    loaded_weight = loaded_weight.transpose(0, 1)
                # for weight:
                # loaded_weight.shape[0] == self.config.hidden_size
                # for qweight:
                # loaded_weight.shape[0] == self.config.hidden_size // param.pack_factor  # noqa
                # for scales and qzeros:
                # loaded_weight.shape[0] == self.config.hidden_size // self.vllm_config.quant_config.group_size  # noqa
                loaded_weight = loaded_weight.reshape(
                    loaded_weight.shape[0], self.config.mamba_num_heads, -1)
                gate_weight, hidden_states_weight = loaded_weight.chunk(2,
                                                                        dim=-1)
                gate_weight = gate_weight.reshape(loaded_weight.shape[0], -1)
                hidden_states_weight = hidden_states_weight.reshape(
                    loaded_weight.shape[0], -1)
                loaded_weight = torch.cat([gate_weight, hidden_states_weight],
                                          dim=-1)
                if "mixer.in_proj.weight" in name:
                    loaded_weight = loaded_weight.transpose(0, 1)

            # Offset parameter with vllm's RMSNorm haven't been supported yet.
            if ".pre_mixer_norm" in name:
                loaded_weight += 1.0
            elif ".post_mixer_norm" in name:
                loaded_weight += 1.0 / 5
            elif ".pre_mlp_norm" in name:
                loaded_weight += 1.0
            elif ".post_mlp_norm" in name:
                loaded_weight += 1.0 / (5**1.5)
            elif "model.norm.weight" in name:
                loaded_weight += 1.0

            # Skip layers on other devices.
            if is_pp_missing_parameter(name, self):
                continue

            param = params_dict[name]
            weight_loader = getattr(param, "weight_loader",
                                    default_weight_loader)
            weight_loader(param, loaded_weight)

config instance-attribute

config = config

lm_head instance-attribute

lm_head = ParallelLMHead(
    num_embeddings,
    hidden_size,
    org_num_embeddings=vocab_size,
    padding_size=DEFAULT_VOCAB_PADDING_SIZE,
    prefix=f"{prefix}.lm_head",
)

logits_processor instance-attribute

logits_processor = LogitsProcessor(
    unpadded_vocab_size, vocab_size
)

make_empty_intermediate_tensors instance-attribute

make_empty_intermediate_tensors = (
    make_empty_intermediate_tensors
)

mamba_cache instance-attribute

mamba_cache: Optional[MambaCacheManager] = None

model instance-attribute

model = Plamo2Model(
    vllm_config=vllm_config,
    prefix=maybe_prefix(prefix, "model"),
)

model_config instance-attribute

model_config = model_config

packed_modules_mapping class-attribute instance-attribute

packed_modules_mapping = {
    "qkv_proj": ["q_proj", "k_proj", "v_proj"]
}

sampler instance-attribute

sampler = get_sampler()

scheduler_config instance-attribute

scheduler_config = scheduler_config

unpadded_vocab_size instance-attribute

unpadded_vocab_size = vocab_size

vllm_config instance-attribute

vllm_config = vllm_config

vocab_size instance-attribute

vocab_size = vocab_size

__init__

__init__(
    *, vllm_config: VllmConfig, prefix: str = ""
) -> None
Source code in vllm/model_executor/models/plamo2.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None:
    super().__init__()
    config = vllm_config.model_config.hf_config
    scheduler_config = vllm_config.scheduler_config

    self.config = config
    self.vllm_config = vllm_config
    self.model_config = vllm_config.model_config
    self.scheduler_config = scheduler_config

    # ModelConfig.get_head_size assumes head_dim is set or calculated as
    # hidden_size // num_attention_heads. However, this is not always
    # the case for PLaMo2, as indicated by the FIXME comment.
    self.config.head_dim = self.config.hidden_size_per_head

    self.model = Plamo2Model(vllm_config=vllm_config,
                             prefix=maybe_prefix(prefix, "model"))
    self.vocab_size = self.config.vocab_size
    self.unpadded_vocab_size = self.config.vocab_size
    num_embeddings = ((self.vocab_size + 15) // 16) * 16
    self.lm_head = ParallelLMHead(
        num_embeddings,
        self.config.hidden_size,
        org_num_embeddings=self.config.vocab_size,
        padding_size=DEFAULT_VOCAB_PADDING_SIZE,
        prefix=f"{prefix}.lm_head",
    )
    if self.config.tie_word_embeddings:
        self.lm_head = self.lm_head.tie_weights(self.model.embed_tokens)

    # Used to track and store by the Mamba cache between steps.
    self.mamba_cache: Optional[MambaCacheManager] = None

    self.logits_processor = LogitsProcessor(self.unpadded_vocab_size,
                                            self.config.vocab_size)
    self.sampler = get_sampler()
    self.make_empty_intermediate_tensors = (
        self.model.make_empty_intermediate_tensors)

compute_logits

compute_logits(
    hidden_states: Tensor,
    sampling_metadata: SamplingMetadata,
) -> Optional[Tensor]
Source code in vllm/model_executor/models/plamo2.py
def compute_logits(
    self,
    hidden_states: torch.Tensor,
    sampling_metadata: SamplingMetadata,
) -> Optional[torch.Tensor]:
    logits = self.logits_processor(self.lm_head, hidden_states,
                                   sampling_metadata)
    return logits

copy_inputs_before_cuda_graphs

copy_inputs_before_cuda_graphs(input_buffers, **kwargs)
Source code in vllm/model_executor/models/plamo2.py
def copy_inputs_before_cuda_graphs(self, input_buffers, **kwargs):
    return self.mamba_cache.copy_inputs_before_cuda_graphs(
        input_buffers, **kwargs)

forward

forward(
    input_ids: Tensor,
    positions: Tensor,
    intermediate_tensors: Optional[
        IntermediateTensors
    ] = None,
    inputs_embeds: Optional[Tensor] = None,
    **kwargs,
)
Source code in vllm/model_executor/models/plamo2.py
def forward(self,
            input_ids: torch.Tensor,
            positions: torch.Tensor,
            intermediate_tensors: Optional[IntermediateTensors] = None,
            inputs_embeds: Optional[torch.Tensor] = None,
            **kwargs):
    if not envs.VLLM_USE_V1:
        if self.mamba_cache is None:
            num_mamba_layers = (
                self.model_config.get_num_layers_by_block_type(
                    self.vllm_config.parallel_config,
                    LayerBlockType.mamba))

            mamba_state_shape = self.get_mamba_state_shape_from_config(
                self.vllm_config, use_v1=False)
            mamba_state_dtype = \
                self.get_mamba_state_dtype_from_config(
                self.vllm_config)
            self.mamba_cache = MambaCacheManager(self.vllm_config,
                                                 num_mamba_layers,
                                                 *mamba_state_shape,
                                                 *mamba_state_dtype)

        mamba_cache_params = self.mamba_cache.current_run_tensors(**kwargs)
    else:
        # NOTE: mamba_cache_params is not needed for v1
        mamba_cache_params = None

    hidden_states = self.model(input_ids, positions, mamba_cache_params,
                               intermediate_tensors, inputs_embeds)
    return hidden_states

get_input_embeddings

get_input_embeddings(input_ids: Tensor) -> Tensor
Source code in vllm/model_executor/models/plamo2.py
def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor:
    return self.model.get_input_embeddings(input_ids)

get_mamba_state_dtype_from_config classmethod

get_mamba_state_dtype_from_config(
    vllm_config: VllmConfig,
) -> tuple[dtype, dtype]
Source code in vllm/model_executor/models/plamo2.py
@classmethod
def get_mamba_state_dtype_from_config(
    cls,
    vllm_config: "VllmConfig",
) -> tuple[torch.dtype, torch.dtype]:

    return MambaStateDtypeCalculator.mamba2_state_dtype(
        vllm_config.model_config.dtype,
        vllm_config.cache_config.mamba_cache_dtype,
        vllm_config.cache_config.mamba_ssm_cache_dtype,
    )

get_mamba_state_shape_from_config classmethod

get_mamba_state_shape_from_config(
    vllm_config: VllmConfig, use_v1: bool = True
) -> tuple[tuple[int, int], tuple[int, int, int]]

Calculate shapes for Mamba's convolutional and state caches. Args: vllm_config: vLLM config use_v1: Get shapes for V1 (or V0) Returns: Tuple containing: - conv_state_shape: Shape for convolutional state cache - temporal_state_shape: Shape for state space model cache

Source code in vllm/model_executor/models/plamo2.py
@classmethod
def get_mamba_state_shape_from_config(
    cls,
    vllm_config: "VllmConfig",
    use_v1: bool = True,
) -> tuple[tuple[int, int], tuple[int, int, int]]:
    """Calculate shapes for Mamba's convolutional and state caches.
    Args:
        vllm_config: vLLM config
        use_v1: Get shapes for V1 (or V0)
    Returns:
        Tuple containing:
        - conv_state_shape: Shape for convolutional state cache
        - temporal_state_shape: Shape for state space model cache
    """
    parallel_config = vllm_config.parallel_config
    hf_config = vllm_config.model_config.hf_config
    intermediate_size =\
            hf_config.mamba_num_heads * hf_config.hidden_size_per_head

    return MambaStateShapeCalculator.mamba2_state_shape(
        intermediate_size=intermediate_size,
        tp_world_size=parallel_config.tensor_parallel_size,
        n_groups=0,
        num_heads=hf_config.mamba_num_heads,
        head_dim=hf_config.hidden_size_per_head,
        state_size=hf_config.mamba_d_state,
        conv_kernel=hf_config.mamba_d_conv,
        use_v1=use_v1,
    )

get_seqlen_agnostic_capture_inputs

get_seqlen_agnostic_capture_inputs(batch_size: int)
Source code in vllm/model_executor/models/plamo2.py
def get_seqlen_agnostic_capture_inputs(self, batch_size: int):
    return self.mamba_cache.get_seqlen_agnostic_capture_inputs(batch_size)

load_weights

load_weights(weights: Iterable[tuple[str, Tensor]])
Source code in vllm/model_executor/models/plamo2.py
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):
    params_dict = dict(self.named_parameters())
    for name, loaded_weight in weights:

        # Both tie_word_embeddings=True and lm_head.weight in the safetensor
        # at the same time causes dict key access error.
        if name == "lm_head.weight" and self.config.tie_word_embeddings:
            assert "lm_head.weight" not in params_dict
            continue

        # Update the weight names to be compatible with the vllm version
        # of the model.
        # Do not change the order of the replacements.
        replacements = {
            # Rename incompatible weight names.
            ".A_log": ".A",
            ".B_norm_weight": ".B_norm.weight",
            ".C_norm_weight": ".C_norm.weight",
            ".dt_norm_weight": ".dt_norm.weight",
            ".q_weight": ".q_norm.weight",
            ".k_weight": ".k_norm.weight",
        }
        # Apply replacements based on the defined mappings
        for old, new in replacements.items():
            if old in name:
                name = name.replace(old, new)

        # Reshape the in_proj weights to match the shape expected
        # by MergedColumnParallelLinear.
        # This works both for unquantized weights and
        # for quantized weights.
        # In the quantized case, the weights are already transposed.
        # Also, in addition to the quantized weights,
        # the zero points and scales have to be reshaped as well.
        # Packing should not be affected by this.
        if ".mixer.in_proj.weight" in name \
            or "mixer.in_proj.qweight" in name \
            or "mixer.in_proj.scales" in name \
            or "mixer.in_proj.qzeros" in name:
            if "mixer.in_proj.weight" in name:
                loaded_weight = loaded_weight.transpose(0, 1)
            # for weight:
            # loaded_weight.shape[0] == self.config.hidden_size
            # for qweight:
            # loaded_weight.shape[0] == self.config.hidden_size // param.pack_factor  # noqa
            # for scales and qzeros:
            # loaded_weight.shape[0] == self.config.hidden_size // self.vllm_config.quant_config.group_size  # noqa
            loaded_weight = loaded_weight.reshape(
                loaded_weight.shape[0], self.config.mamba_num_heads, -1)
            gate_weight, hidden_states_weight = loaded_weight.chunk(2,
                                                                    dim=-1)
            gate_weight = gate_weight.reshape(loaded_weight.shape[0], -1)
            hidden_states_weight = hidden_states_weight.reshape(
                loaded_weight.shape[0], -1)
            loaded_weight = torch.cat([gate_weight, hidden_states_weight],
                                      dim=-1)
            if "mixer.in_proj.weight" in name:
                loaded_weight = loaded_weight.transpose(0, 1)

        # Offset parameter with vllm's RMSNorm haven't been supported yet.
        if ".pre_mixer_norm" in name:
            loaded_weight += 1.0
        elif ".post_mixer_norm" in name:
            loaded_weight += 1.0 / 5
        elif ".pre_mlp_norm" in name:
            loaded_weight += 1.0
        elif ".post_mlp_norm" in name:
            loaded_weight += 1.0 / (5**1.5)
        elif "model.norm.weight" in name:
            loaded_weight += 1.0

        # Skip layers on other devices.
        if is_pp_missing_parameter(name, self):
            continue

        param = params_dict[name]
        weight_loader = getattr(param, "weight_loader",
                                default_weight_loader)
        weight_loader(param, loaded_weight)

sample

sample(
    logits: Optional[Tensor],
    sampling_metadata: SamplingMetadata,
) -> Optional[SamplerOutput]
Source code in vllm/model_executor/models/plamo2.py
def sample(
    self,
    logits: Optional[torch.Tensor],
    sampling_metadata: SamplingMetadata,
) -> Optional[SamplerOutput]:
    next_tokens = self.sampler(logits, sampling_metadata)
    return next_tokens

Plamo2MambaMixer

Bases: MambaBase, CustomOp

Source code in vllm/model_executor/models/plamo2.py
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
@CustomOp.register(name="plamo2_mamba_mixer")
class Plamo2MambaMixer(MambaBase, CustomOp):

    def __init__(self,
                 vllm_config: VllmConfig,
                 *,
                 prefix: str = "",
                 **kwargs) -> None:
        super().__init__()
        self.config = vllm_config.model_config.hf_config
        self.cache_config = vllm_config.cache_config
        self.model_config = vllm_config.model_config
        self.quant_config = vllm_config.quant_config
        self.hidden_size = self.config.hidden_size
        self.ssm_state_size = self.config.mamba_d_state
        self.conv_kernel_size = self.config.mamba_d_conv
        self.intermediate_size = (self.config.mamba_num_heads *
                                  self.config.hidden_size_per_head)
        self.tp_size = get_tensor_model_parallel_world_size()
        self.head_dim = self.config.hidden_size_per_head
        self.num_heads = self.config.mamba_num_heads
        self.time_step_rank = max(64, self.hidden_size // 16)
        self.conv1d = ColumnParallelLinear(
            input_size=self.conv_kernel_size,
            output_size=self.intermediate_size,
            bias=False,
            prefix=f"{prefix}.conv1d",
            return_bias=False,
        )
        # unsqueeze to fit conv1d weights shape into the linear weights shape.
        # Can't do this in `weight_loader` since it already exists in
        # `ColumnParallelLinear` and `set_weight_attrs`
        # doesn't allow to override it
        self.conv1d.weight.data = self.conv1d.weight.data.unsqueeze(1)

        self.in_proj = MergedColumnParallelLinear(
            self.hidden_size,
            [self.intermediate_size] * 2,
            bias=False,
            quant_config=self.quant_config,
            prefix=f"{prefix}.in_proj",
            return_bias=False,
        )
        # selective projection used to make dt, B and C input dependent
        self.bcdt_proj = RowParallelLinear(
            self.intermediate_size,
            self.time_step_rank + self.ssm_state_size * 2,
            bias=False,
            quant_config=self.quant_config,
            prefix=f"{prefix}.bcdt_proj",
            return_bias=False,
        )
        # time step projection (discretization) -
        # In the forward we need to apply dt_proj without the bias,
        # as the bias is added in the selective scan kernel.
        self.dt_proj = ColumnParallelLinear(
            self.time_step_rank,
            self.num_heads,
            bias=False,
            quant_config=self.quant_config,
            prefix=f"{prefix}.dt_proj",
            return_bias=False,
        )

        self.A = nn.Parameter(
            torch.empty(
                divide(self.num_heads, self.tp_size),
                dtype=torch.float32,
            ))
        self.D = nn.Parameter(torch.ones(divide(self.num_heads, self.tp_size)))
        self.dt_bias = nn.Parameter(
            torch.ones(divide(self.num_heads, self.tp_size)))

        set_weight_attrs(self.D, {"weight_loader": sharded_weight_loader(0)})
        a_weight_loader = composed_weight_loader(
            sharded_weight_loader(0), lambda x: -torch.exp(x.float()))
        set_weight_attrs(self.A, {"weight_loader": a_weight_loader})
        set_weight_attrs(self.dt_bias,
                         {"weight_loader": sharded_weight_loader(0)})

        self.out_proj = RowParallelLinear(
            self.intermediate_size,
            self.hidden_size,
            bias=False,
            input_is_parallel=True,
            quant_config=self.quant_config,
            prefix=f"{prefix}.out_proj",
            return_bias=False,
        )
        # The activation function is fixed to SiLU.
        self.activation = "silu"

        self.dt_norm = RMSNorm(self.time_step_rank,
                               eps=self.config.rms_norm_eps)
        self.B_norm = RMSNorm(self.ssm_state_size,
                              eps=self.config.rms_norm_eps)
        self.C_norm = RMSNorm(self.ssm_state_size,
                              eps=self.config.rms_norm_eps)

        self.chunk_size = self.config.mamba_chunk_size

        if envs.VLLM_USE_V1:
            compilation_config = get_current_vllm_config().compilation_config
            if prefix in compilation_config.static_forward_context:
                raise ValueError(f"Duplicate layer name: {prefix}")
            compilation_config.static_forward_context[prefix] = self
            # The outer list is for v0 PP virtual engine. Though this code path
            # only runs for v1, we have to do this to unify with the interface
            # of Attention + v0 PP.
            # The inner tuple is (conv_state, ssm_state)
            self.kv_cache = [(torch.tensor([]), torch.tensor([]))]
            assert self.chunk_size != -1, "chunk_size must be set for v1"

        self.prefix = prefix

    def _project_ssm_parameters(self, hidden_states):
        ssm_parameters = self.bcdt_proj(hidden_states)
        B, C, time_step = torch.split(
            ssm_parameters,
            [self.ssm_state_size, self.ssm_state_size, self.time_step_rank],
            dim=-1,
        )

        # vllm._custom_ops.rms_norm requires contiguous input tensors.
        time_step = self.dt_norm(time_step.contiguous())
        B = self.B_norm(B.contiguous())
        C = self.C_norm(C.contiguous())
        dt = self.dt_proj(time_step)
        return B, C, dt

    def forward_native(
        self,
        hidden_states: torch.Tensor,
        output: torch.Tensor,
        mamba_cache_params: MambaCacheParams,
        mamba2_metadata: Mamba2Metadata,
        **kwargs,
    ):
        pass

    def forward(
        self,
        hidden_states: torch.Tensor,
        output: torch.Tensor,
        mamba_cache_params: MambaCacheParams,
        mamba2_metadata: Mamba2Metadata,
        **kwargs,
    ):
        if not envs.VLLM_USE_V1:
            CustomOp.forward(self, hidden_states, output, mamba_cache_params,
                             mamba2_metadata)
        else:
            torch.ops.vllm.plamo2_mamba_mixer(
                hidden_states,
                output,
                self.prefix,
            )

    def forward_cuda(
        self,
        hidden_states: torch.Tensor,
        output: torch.Tensor,
        mamba_cache_params: MambaCacheParams,
        mamba2_metadata: Mamba2Metadata,
        **kwargs,
    ):

        forward_context = get_forward_context()
        # mamba2_metadata contains metadata necessary for the mamba2 triton
        # kernels to operate in continuous batching and in chunked prefill
        # modes; they are computed at top-level model forward since they
        # stay the same and reused for all mamba layers in the same iteration
        attn_metadata: AttentionMetadata = forward_context.attn_metadata
        if envs.VLLM_USE_V1:
            if attn_metadata is not None:
                assert isinstance(attn_metadata, dict)
                attn_metadata = attn_metadata[self.prefix]
                mamba2_metadata = attn_metadata
                assert isinstance(attn_metadata, Mamba2AttentionMetadata)
                self_kv_cache = self.kv_cache[forward_context.virtual_engine]
                # conv_state = (..., dim, width-1) yet contiguous along 'dim'
                conv_state = self_kv_cache[0].transpose(-1, -2)
                ssm_state = self_kv_cache[1]
                state_indices_tensor = attn_metadata.state_indices_tensor
                has_initial_states_p = attn_metadata.has_initial_states_p
                prep_initial_states = attn_metadata.prep_initial_states
                chunk_size = attn_metadata.chunk_size
                seq_idx_p = attn_metadata.seq_idx_p
                chunk_indices_p = attn_metadata.chunk_indices_p
                chunk_offsets_p = attn_metadata.chunk_offsets_p
        else:
            conv_state = mamba_cache_params.conv_state
            ssm_state = mamba_cache_params.ssm_state
            state_indices_tensor = mamba_cache_params.state_indices_tensor
            has_initial_states_p = mamba2_metadata.has_initial_states
            prep_initial_states = mamba2_metadata.prep_initial_states
            chunk_size = mamba2_metadata.chunk_size
            seq_idx_p = mamba2_metadata.seq_idx
            chunk_indices_p = mamba2_metadata.chunk_indices
            chunk_offsets_p = mamba2_metadata.chunk_offsets

        # 1. Gated MLP's linear projection
        projected_states = self.in_proj(hidden_states)
        gate, hidden_states = projected_states.chunk(2, dim=-1)

        # 2. Convolution sequence transformation
        conv_weights = self.conv1d.weight.view(self.conv1d.weight.size(0),
                                               self.conv1d.weight.size(2))

        if envs.VLLM_USE_V1 and attn_metadata is None:
            # V1 profile run
            hidden_states = (hidden_states.transpose(0, 1).clone().transpose(
                0, 1)).contiguous()
            output[:] = self.out_proj(hidden_states)
            return

        num_prefills = attn_metadata.num_prefills  # request count
        num_decodes = attn_metadata.num_decode_tokens  # token count (=request)
        num_prefill_tokens = attn_metadata.num_prefill_tokens  # token count
        has_prefill = num_prefills > 0
        has_decode = num_decodes > 0
        num_actual_tokens = num_prefill_tokens + num_decodes

        # NOTE: V0 put prefill before decode, v1 puts decode before prefill
        # Separate prefill and decode by splitting varlen input
        # Split along token dimension
        if envs.VLLM_USE_V1:
            hidden_states_d, hidden_states_p = torch.split(
                hidden_states[:num_actual_tokens],
                [num_decodes, num_prefill_tokens],
                dim=0,
            )
            gate_d, gate_p = torch.split(gate[:num_actual_tokens],
                                         [num_decodes, num_prefill_tokens],
                                         dim=0)
            # Split along batch dimension
            state_indices_tensor_d, state_indices_tensor_p = torch.split(
                state_indices_tensor,
                [num_decodes, num_prefills],
                dim=0,
            )
            query_start_loc_p = (
                attn_metadata.query_start_loc[-num_prefills - 1:] -
                num_decodes if has_prefill else None)
        else:
            hidden_states_p, hidden_states_d = torch.split(
                hidden_states,
                [num_prefill_tokens, num_decodes],
                dim=0,
            )
            gate_p, gate_d = torch.split(gate,
                                         [num_prefill_tokens, num_decodes],
                                         dim=0)
            # Split along batch dimension
            state_indices_tensor_p, state_indices_tensor_d = torch.split(
                state_indices_tensor,
                [num_prefills, num_decodes],
                dim=0,
            )
            query_start_loc_p = (attn_metadata.query_start_loc[:num_prefills +
                                                               1]
                                 if has_prefill else None)

        # Preallocate output tensor to avoid memcpy cost for merging prefill
        # and decode outputs
        preallocated_ssm_out = torch.empty(
            [
                num_prefill_tokens + num_decodes,
                (self.num_heads // self.tp_size) * self.head_dim
            ],
            dtype=hidden_states.dtype,
            device=hidden_states.device,
        )
        if envs.VLLM_USE_V1:
            preallocated_ssm_out_d, preallocated_ssm_out_p = torch.split(
                preallocated_ssm_out,
                [num_decodes, num_prefill_tokens],
                dim=0,
            )
        else:
            preallocated_ssm_out_p, preallocated_ssm_out_d = torch.split(
                preallocated_ssm_out,
                [num_prefill_tokens, num_decodes],
                dim=0,
            )

        # Process prefill requests
        if has_prefill:
            # 2. Convolution sequence transformation
            # - "cache_indices" updates the conv_state cache in positions
            #   pointed to by "state_indices_tensor"
            x = hidden_states_p.transpose(
                0, 1)  # this is the form that causal-conv see
            if mamba2_metadata.cu_seqlen is None:
                mamba2_metadata = update_metadata(x, query_start_loc_p,
                                                  mamba2_metadata)
            hidden_states_p = causal_conv1d_fn(
                x,
                conv_weights,
                self.conv1d.bias,
                activation=self.activation,
                conv_states=conv_state,
                has_initial_state=has_initial_states_p,
                cache_indices=state_indices_tensor_p,
                metadata=mamba2_metadata,
                query_start_loc=query_start_loc_p)
            hidden_states_p = hidden_states_p.transpose(0, 1)
            hidden_states_p = hidden_states_p[:num_prefill_tokens]
            # In some instances, the following `bcdt_proj` op
            # requires contiguous inputs
            # (e.g. if the Marlin kernel is used).
            hidden_states_p = hidden_states_p.contiguous()

            B, C, dt = self._project_ssm_parameters(hidden_states_p)

            # 3. State Space Model sequence transformation
            initial_states = None
            if has_initial_states_p is not None and prep_initial_states:
                # making a copy of the states
                if envs.VLLM_USE_V1:
                    initial_states = torch.where(
                        has_initial_states_p[:, None, None, None],
                        ssm_state[state_indices_tensor_p], 0)
                else:
                    initial_states = torch.where(
                        has_initial_states_p[:num_prefills, None, None, None],
                        ssm_state[state_indices_tensor_p], 0)
            varlen_state = mamba_chunk_scan_combined(
                hidden_states_p.view(1, num_prefill_tokens,
                                     self.num_heads // self.tp_size,
                                     self.head_dim),
                dt.unsqueeze(0),
                self.A,
                B.view(1, num_prefill_tokens, 1, -1),
                C.view(1, num_prefill_tokens, 1, -1),
                chunk_size=chunk_size,
                D=self.D,
                z=gate_p.view(1, num_prefill_tokens,
                              self.num_heads // self.tp_size, self.head_dim),
                dt_bias=self.dt_bias,
                seq_idx=seq_idx_p,
                chunk_indices=chunk_indices_p,
                chunk_offsets=chunk_offsets_p,
                cu_seqlens=query_start_loc_p,
                initial_states=initial_states,
                return_varlen_states=True,
                return_final_states=False,
                dt_softplus=True,
                dt_limit=(0.0, float("inf")),
                out=preallocated_ssm_out_p.view(1, num_prefill_tokens, -1,
                                                self.head_dim),
                state_dtype=ssm_state.dtype,
            )

            # update ssm states
            # - varlen state is a (batch, nheads, headdim, dstate) tensor
            ssm_state[state_indices_tensor_p] = varlen_state

        # Process decode requests
        if has_decode:
            # 2. Convolution sequence transformation
            hidden_states_d = causal_conv1d_update(
                hidden_states_d,
                conv_state,
                conv_weights,
                self.conv1d.bias,
                self.activation,
                conv_state_indices=state_indices_tensor_d)

            B, C, dt = self._project_ssm_parameters(hidden_states_d)

            # 3. State Space Model sequence transformation
            A = self.A[:, None, ...][:, :,
                                     None].expand(-1, self.head_dim,
                                                  self.config.mamba_d_state)
            dt = dt[:, :, None].expand(-1, -1, self.head_dim)
            dt_bias = self.dt_bias[:, None, ...].expand(-1, self.head_dim)
            D = self.D[:, None, ...].expand(-1, self.head_dim)
            B = B.unsqueeze(1)
            C = C.unsqueeze(1)
            hidden_states_d = hidden_states_d.view(
                -1, self.num_heads // self.tp_size, self.head_dim)

            # - the hidden is reshaped into (bs, num_heads, head_dim)
            # - mamba_cache_params.ssm_state's slots will be selected
            #   using state_indices_tensor_d

            # NOTE: final output is an in-place update of out tensor
            selective_state_update(
                ssm_state,
                hidden_states_d,
                dt,
                A,
                B,
                C,
                D,
                z=gate_d.reshape(num_decodes, -1, self.head_dim),
                dt_bias=dt_bias,
                dt_softplus=True,
                state_batch_indices=state_indices_tensor_d,
                out=preallocated_ssm_out_d.view(num_decodes, -1,
                                                self.head_dim),
            )

        # 4. Final linear projection
        output[:num_actual_tokens] = self.out_proj(preallocated_ssm_out)

    def get_state_dtype(self) -> tuple[torch.dtype, torch.dtype]:
        assert self.model_config is not None
        assert self.cache_config is not None
        return MambaStateDtypeCalculator.mamba2_state_dtype(
            self.model_config.dtype,
            self.cache_config.mamba_cache_dtype,
            self.cache_config.mamba_ssm_cache_dtype,
        )

    def get_state_shape(self) -> tuple[tuple[int, ...], tuple[int, ...]]:
        return MambaStateShapeCalculator.mamba2_state_shape(
            intermediate_size=self.intermediate_size,
            tp_world_size=get_tensor_model_parallel_world_size(),
            n_groups=0,
            num_heads=self.num_heads,
            head_dim=self.head_dim,
            state_size=self.ssm_state_size,
            conv_kernel=self.conv_kernel_size,
        )

    @property
    def mamba_type(self) -> str:
        return "mamba2"

    def get_attn_backend(self) -> type["AttentionBackend"]:
        from vllm.v1.attention.backends.mamba2_attn import (
            Mamba2AttentionBackend)
        return Mamba2AttentionBackend

A instance-attribute

A = Parameter(
    empty(divide(num_heads, tp_size), dtype=float32)
)

B_norm instance-attribute

B_norm = RMSNorm(ssm_state_size, eps=rms_norm_eps)

C_norm instance-attribute

C_norm = RMSNorm(ssm_state_size, eps=rms_norm_eps)

D instance-attribute

D = Parameter(ones(divide(num_heads, tp_size)))

activation instance-attribute

activation = 'silu'

bcdt_proj instance-attribute

bcdt_proj = RowParallelLinear(
    intermediate_size,
    time_step_rank + ssm_state_size * 2,
    bias=False,
    quant_config=quant_config,
    prefix=f"{prefix}.bcdt_proj",
    return_bias=False,
)

cache_config instance-attribute

cache_config = cache_config

chunk_size instance-attribute

chunk_size = mamba_chunk_size

config instance-attribute

config = hf_config

conv1d instance-attribute

conv1d = ColumnParallelLinear(
    input_size=conv_kernel_size,
    output_size=intermediate_size,
    bias=False,
    prefix=f"{prefix}.conv1d",
    return_bias=False,
)

conv_kernel_size instance-attribute

conv_kernel_size = mamba_d_conv

dt_bias instance-attribute

dt_bias = Parameter(ones(divide(num_heads, tp_size)))

dt_norm instance-attribute

dt_norm = RMSNorm(time_step_rank, eps=rms_norm_eps)

dt_proj instance-attribute

dt_proj = ColumnParallelLinear(
    time_step_rank,
    num_heads,
    bias=False,
    quant_config=quant_config,
    prefix=f"{prefix}.dt_proj",
    return_bias=False,
)

head_dim instance-attribute

head_dim = hidden_size_per_head

hidden_size instance-attribute

hidden_size = hidden_size

in_proj instance-attribute

in_proj = MergedColumnParallelLinear(
    hidden_size,
    [intermediate_size] * 2,
    bias=False,
    quant_config=quant_config,
    prefix=f"{prefix}.in_proj",
    return_bias=False,
)

intermediate_size instance-attribute

intermediate_size = mamba_num_heads * hidden_size_per_head

kv_cache instance-attribute

kv_cache = [(tensor([]), tensor([]))]

mamba_type property

mamba_type: str

model_config instance-attribute

model_config = model_config

num_heads instance-attribute

num_heads = mamba_num_heads

out_proj instance-attribute

out_proj = RowParallelLinear(
    intermediate_size,
    hidden_size,
    bias=False,
    input_is_parallel=True,
    quant_config=quant_config,
    prefix=f"{prefix}.out_proj",
    return_bias=False,
)

prefix instance-attribute

prefix = prefix

quant_config instance-attribute

quant_config = quant_config

ssm_state_size instance-attribute

ssm_state_size = mamba_d_state

time_step_rank instance-attribute

time_step_rank = max(64, hidden_size // 16)

tp_size instance-attribute

__init__

__init__(
    vllm_config: VllmConfig, *, prefix: str = "", **kwargs
) -> None
Source code in vllm/model_executor/models/plamo2.py
def __init__(self,
             vllm_config: VllmConfig,
             *,
             prefix: str = "",
             **kwargs) -> None:
    super().__init__()
    self.config = vllm_config.model_config.hf_config
    self.cache_config = vllm_config.cache_config
    self.model_config = vllm_config.model_config
    self.quant_config = vllm_config.quant_config
    self.hidden_size = self.config.hidden_size
    self.ssm_state_size = self.config.mamba_d_state
    self.conv_kernel_size = self.config.mamba_d_conv
    self.intermediate_size = (self.config.mamba_num_heads *
                              self.config.hidden_size_per_head)
    self.tp_size = get_tensor_model_parallel_world_size()
    self.head_dim = self.config.hidden_size_per_head
    self.num_heads = self.config.mamba_num_heads
    self.time_step_rank = max(64, self.hidden_size // 16)
    self.conv1d = ColumnParallelLinear(
        input_size=self.conv_kernel_size,
        output_size=self.intermediate_size,
        bias=False,
        prefix=f"{prefix}.conv1d",
        return_bias=False,
    )
    # unsqueeze to fit conv1d weights shape into the linear weights shape.
    # Can't do this in `weight_loader` since it already exists in
    # `ColumnParallelLinear` and `set_weight_attrs`
    # doesn't allow to override it
    self.conv1d.weight.data = self.conv1d.weight.data.unsqueeze(1)

    self.in_proj = MergedColumnParallelLinear(
        self.hidden_size,
        [self.intermediate_size] * 2,
        bias=False,
        quant_config=self.quant_config,
        prefix=f"{prefix}.in_proj",
        return_bias=False,
    )
    # selective projection used to make dt, B and C input dependent
    self.bcdt_proj = RowParallelLinear(
        self.intermediate_size,
        self.time_step_rank + self.ssm_state_size * 2,
        bias=False,
        quant_config=self.quant_config,
        prefix=f"{prefix}.bcdt_proj",
        return_bias=False,
    )
    # time step projection (discretization) -
    # In the forward we need to apply dt_proj without the bias,
    # as the bias is added in the selective scan kernel.
    self.dt_proj = ColumnParallelLinear(
        self.time_step_rank,
        self.num_heads,
        bias=False,
        quant_config=self.quant_config,
        prefix=f"{prefix}.dt_proj",
        return_bias=False,
    )

    self.A = nn.Parameter(
        torch.empty(
            divide(self.num_heads, self.tp_size),
            dtype=torch.float32,
        ))
    self.D = nn.Parameter(torch.ones(divide(self.num_heads, self.tp_size)))
    self.dt_bias = nn.Parameter(
        torch.ones(divide(self.num_heads, self.tp_size)))

    set_weight_attrs(self.D, {"weight_loader": sharded_weight_loader(0)})
    a_weight_loader = composed_weight_loader(
        sharded_weight_loader(0), lambda x: -torch.exp(x.float()))
    set_weight_attrs(self.A, {"weight_loader": a_weight_loader})
    set_weight_attrs(self.dt_bias,
                     {"weight_loader": sharded_weight_loader(0)})

    self.out_proj = RowParallelLinear(
        self.intermediate_size,
        self.hidden_size,
        bias=False,
        input_is_parallel=True,
        quant_config=self.quant_config,
        prefix=f"{prefix}.out_proj",
        return_bias=False,
    )
    # The activation function is fixed to SiLU.
    self.activation = "silu"

    self.dt_norm = RMSNorm(self.time_step_rank,
                           eps=self.config.rms_norm_eps)
    self.B_norm = RMSNorm(self.ssm_state_size,
                          eps=self.config.rms_norm_eps)
    self.C_norm = RMSNorm(self.ssm_state_size,
                          eps=self.config.rms_norm_eps)

    self.chunk_size = self.config.mamba_chunk_size

    if envs.VLLM_USE_V1:
        compilation_config = get_current_vllm_config().compilation_config
        if prefix in compilation_config.static_forward_context:
            raise ValueError(f"Duplicate layer name: {prefix}")
        compilation_config.static_forward_context[prefix] = self
        # The outer list is for v0 PP virtual engine. Though this code path
        # only runs for v1, we have to do this to unify with the interface
        # of Attention + v0 PP.
        # The inner tuple is (conv_state, ssm_state)
        self.kv_cache = [(torch.tensor([]), torch.tensor([]))]
        assert self.chunk_size != -1, "chunk_size must be set for v1"

    self.prefix = prefix

_project_ssm_parameters

_project_ssm_parameters(hidden_states)
Source code in vllm/model_executor/models/plamo2.py
def _project_ssm_parameters(self, hidden_states):
    ssm_parameters = self.bcdt_proj(hidden_states)
    B, C, time_step = torch.split(
        ssm_parameters,
        [self.ssm_state_size, self.ssm_state_size, self.time_step_rank],
        dim=-1,
    )

    # vllm._custom_ops.rms_norm requires contiguous input tensors.
    time_step = self.dt_norm(time_step.contiguous())
    B = self.B_norm(B.contiguous())
    C = self.C_norm(C.contiguous())
    dt = self.dt_proj(time_step)
    return B, C, dt

forward

forward(
    hidden_states: Tensor,
    output: Tensor,
    mamba_cache_params: MambaCacheParams,
    mamba2_metadata: Mamba2Metadata,
    **kwargs,
)
Source code in vllm/model_executor/models/plamo2.py
def forward(
    self,
    hidden_states: torch.Tensor,
    output: torch.Tensor,
    mamba_cache_params: MambaCacheParams,
    mamba2_metadata: Mamba2Metadata,
    **kwargs,
):
    if not envs.VLLM_USE_V1:
        CustomOp.forward(self, hidden_states, output, mamba_cache_params,
                         mamba2_metadata)
    else:
        torch.ops.vllm.plamo2_mamba_mixer(
            hidden_states,
            output,
            self.prefix,
        )

forward_cuda

forward_cuda(
    hidden_states: Tensor,
    output: Tensor,
    mamba_cache_params: MambaCacheParams,
    mamba2_metadata: Mamba2Metadata,
    **kwargs,
)
Source code in vllm/model_executor/models/plamo2.py
def forward_cuda(
    self,
    hidden_states: torch.Tensor,
    output: torch.Tensor,
    mamba_cache_params: MambaCacheParams,
    mamba2_metadata: Mamba2Metadata,
    **kwargs,
):

    forward_context = get_forward_context()
    # mamba2_metadata contains metadata necessary for the mamba2 triton
    # kernels to operate in continuous batching and in chunked prefill
    # modes; they are computed at top-level model forward since they
    # stay the same and reused for all mamba layers in the same iteration
    attn_metadata: AttentionMetadata = forward_context.attn_metadata
    if envs.VLLM_USE_V1:
        if attn_metadata is not None:
            assert isinstance(attn_metadata, dict)
            attn_metadata = attn_metadata[self.prefix]
            mamba2_metadata = attn_metadata
            assert isinstance(attn_metadata, Mamba2AttentionMetadata)
            self_kv_cache = self.kv_cache[forward_context.virtual_engine]
            # conv_state = (..., dim, width-1) yet contiguous along 'dim'
            conv_state = self_kv_cache[0].transpose(-1, -2)
            ssm_state = self_kv_cache[1]
            state_indices_tensor = attn_metadata.state_indices_tensor
            has_initial_states_p = attn_metadata.has_initial_states_p
            prep_initial_states = attn_metadata.prep_initial_states
            chunk_size = attn_metadata.chunk_size
            seq_idx_p = attn_metadata.seq_idx_p
            chunk_indices_p = attn_metadata.chunk_indices_p
            chunk_offsets_p = attn_metadata.chunk_offsets_p
    else:
        conv_state = mamba_cache_params.conv_state
        ssm_state = mamba_cache_params.ssm_state
        state_indices_tensor = mamba_cache_params.state_indices_tensor
        has_initial_states_p = mamba2_metadata.has_initial_states
        prep_initial_states = mamba2_metadata.prep_initial_states
        chunk_size = mamba2_metadata.chunk_size
        seq_idx_p = mamba2_metadata.seq_idx
        chunk_indices_p = mamba2_metadata.chunk_indices
        chunk_offsets_p = mamba2_metadata.chunk_offsets

    # 1. Gated MLP's linear projection
    projected_states = self.in_proj(hidden_states)
    gate, hidden_states = projected_states.chunk(2, dim=-1)

    # 2. Convolution sequence transformation
    conv_weights = self.conv1d.weight.view(self.conv1d.weight.size(0),
                                           self.conv1d.weight.size(2))

    if envs.VLLM_USE_V1 and attn_metadata is None:
        # V1 profile run
        hidden_states = (hidden_states.transpose(0, 1).clone().transpose(
            0, 1)).contiguous()
        output[:] = self.out_proj(hidden_states)
        return

    num_prefills = attn_metadata.num_prefills  # request count
    num_decodes = attn_metadata.num_decode_tokens  # token count (=request)
    num_prefill_tokens = attn_metadata.num_prefill_tokens  # token count
    has_prefill = num_prefills > 0
    has_decode = num_decodes > 0
    num_actual_tokens = num_prefill_tokens + num_decodes

    # NOTE: V0 put prefill before decode, v1 puts decode before prefill
    # Separate prefill and decode by splitting varlen input
    # Split along token dimension
    if envs.VLLM_USE_V1:
        hidden_states_d, hidden_states_p = torch.split(
            hidden_states[:num_actual_tokens],
            [num_decodes, num_prefill_tokens],
            dim=0,
        )
        gate_d, gate_p = torch.split(gate[:num_actual_tokens],
                                     [num_decodes, num_prefill_tokens],
                                     dim=0)
        # Split along batch dimension
        state_indices_tensor_d, state_indices_tensor_p = torch.split(
            state_indices_tensor,
            [num_decodes, num_prefills],
            dim=0,
        )
        query_start_loc_p = (
            attn_metadata.query_start_loc[-num_prefills - 1:] -
            num_decodes if has_prefill else None)
    else:
        hidden_states_p, hidden_states_d = torch.split(
            hidden_states,
            [num_prefill_tokens, num_decodes],
            dim=0,
        )
        gate_p, gate_d = torch.split(gate,
                                     [num_prefill_tokens, num_decodes],
                                     dim=0)
        # Split along batch dimension
        state_indices_tensor_p, state_indices_tensor_d = torch.split(
            state_indices_tensor,
            [num_prefills, num_decodes],
            dim=0,
        )
        query_start_loc_p = (attn_metadata.query_start_loc[:num_prefills +
                                                           1]
                             if has_prefill else None)

    # Preallocate output tensor to avoid memcpy cost for merging prefill
    # and decode outputs
    preallocated_ssm_out = torch.empty(
        [
            num_prefill_tokens + num_decodes,
            (self.num_heads // self.tp_size) * self.head_dim
        ],
        dtype=hidden_states.dtype,
        device=hidden_states.device,
    )
    if envs.VLLM_USE_V1:
        preallocated_ssm_out_d, preallocated_ssm_out_p = torch.split(
            preallocated_ssm_out,
            [num_decodes, num_prefill_tokens],
            dim=0,
        )
    else:
        preallocated_ssm_out_p, preallocated_ssm_out_d = torch.split(
            preallocated_ssm_out,
            [num_prefill_tokens, num_decodes],
            dim=0,
        )

    # Process prefill requests
    if has_prefill:
        # 2. Convolution sequence transformation
        # - "cache_indices" updates the conv_state cache in positions
        #   pointed to by "state_indices_tensor"
        x = hidden_states_p.transpose(
            0, 1)  # this is the form that causal-conv see
        if mamba2_metadata.cu_seqlen is None:
            mamba2_metadata = update_metadata(x, query_start_loc_p,
                                              mamba2_metadata)
        hidden_states_p = causal_conv1d_fn(
            x,
            conv_weights,
            self.conv1d.bias,
            activation=self.activation,
            conv_states=conv_state,
            has_initial_state=has_initial_states_p,
            cache_indices=state_indices_tensor_p,
            metadata=mamba2_metadata,
            query_start_loc=query_start_loc_p)
        hidden_states_p = hidden_states_p.transpose(0, 1)
        hidden_states_p = hidden_states_p[:num_prefill_tokens]
        # In some instances, the following `bcdt_proj` op
        # requires contiguous inputs
        # (e.g. if the Marlin kernel is used).
        hidden_states_p = hidden_states_p.contiguous()

        B, C, dt = self._project_ssm_parameters(hidden_states_p)

        # 3. State Space Model sequence transformation
        initial_states = None
        if has_initial_states_p is not None and prep_initial_states:
            # making a copy of the states
            if envs.VLLM_USE_V1:
                initial_states = torch.where(
                    has_initial_states_p[:, None, None, None],
                    ssm_state[state_indices_tensor_p], 0)
            else:
                initial_states = torch.where(
                    has_initial_states_p[:num_prefills, None, None, None],
                    ssm_state[state_indices_tensor_p], 0)
        varlen_state = mamba_chunk_scan_combined(
            hidden_states_p.view(1, num_prefill_tokens,
                                 self.num_heads // self.tp_size,
                                 self.head_dim),
            dt.unsqueeze(0),
            self.A,
            B.view(1, num_prefill_tokens, 1, -1),
            C.view(1, num_prefill_tokens, 1, -1),
            chunk_size=chunk_size,
            D=self.D,
            z=gate_p.view(1, num_prefill_tokens,
                          self.num_heads // self.tp_size, self.head_dim),
            dt_bias=self.dt_bias,
            seq_idx=seq_idx_p,
            chunk_indices=chunk_indices_p,
            chunk_offsets=chunk_offsets_p,
            cu_seqlens=query_start_loc_p,
            initial_states=initial_states,
            return_varlen_states=True,
            return_final_states=False,
            dt_softplus=True,
            dt_limit=(0.0, float("inf")),
            out=preallocated_ssm_out_p.view(1, num_prefill_tokens, -1,
                                            self.head_dim),
            state_dtype=ssm_state.dtype,
        )

        # update ssm states
        # - varlen state is a (batch, nheads, headdim, dstate) tensor
        ssm_state[state_indices_tensor_p] = varlen_state

    # Process decode requests
    if has_decode:
        # 2. Convolution sequence transformation
        hidden_states_d = causal_conv1d_update(
            hidden_states_d,
            conv_state,
            conv_weights,
            self.conv1d.bias,
            self.activation,
            conv_state_indices=state_indices_tensor_d)

        B, C, dt = self._project_ssm_parameters(hidden_states_d)

        # 3. State Space Model sequence transformation
        A = self.A[:, None, ...][:, :,
                                 None].expand(-1, self.head_dim,
                                              self.config.mamba_d_state)
        dt = dt[:, :, None].expand(-1, -1, self.head_dim)
        dt_bias = self.dt_bias[:, None, ...].expand(-1, self.head_dim)
        D = self.D[:, None, ...].expand(-1, self.head_dim)
        B = B.unsqueeze(1)
        C = C.unsqueeze(1)
        hidden_states_d = hidden_states_d.view(
            -1, self.num_heads // self.tp_size, self.head_dim)

        # - the hidden is reshaped into (bs, num_heads, head_dim)
        # - mamba_cache_params.ssm_state's slots will be selected
        #   using state_indices_tensor_d

        # NOTE: final output is an in-place update of out tensor
        selective_state_update(
            ssm_state,
            hidden_states_d,
            dt,
            A,
            B,
            C,
            D,
            z=gate_d.reshape(num_decodes, -1, self.head_dim),
            dt_bias=dt_bias,
            dt_softplus=True,
            state_batch_indices=state_indices_tensor_d,
            out=preallocated_ssm_out_d.view(num_decodes, -1,
                                            self.head_dim),
        )

    # 4. Final linear projection
    output[:num_actual_tokens] = self.out_proj(preallocated_ssm_out)

forward_native

forward_native(
    hidden_states: Tensor,
    output: Tensor,
    mamba_cache_params: MambaCacheParams,
    mamba2_metadata: Mamba2Metadata,
    **kwargs,
)
Source code in vllm/model_executor/models/plamo2.py
def forward_native(
    self,
    hidden_states: torch.Tensor,
    output: torch.Tensor,
    mamba_cache_params: MambaCacheParams,
    mamba2_metadata: Mamba2Metadata,
    **kwargs,
):
    pass

get_attn_backend

get_attn_backend() -> type[AttentionBackend]
Source code in vllm/model_executor/models/plamo2.py
def get_attn_backend(self) -> type["AttentionBackend"]:
    from vllm.v1.attention.backends.mamba2_attn import (
        Mamba2AttentionBackend)
    return Mamba2AttentionBackend

get_state_dtype

get_state_dtype() -> tuple[dtype, dtype]
Source code in vllm/model_executor/models/plamo2.py
def get_state_dtype(self) -> tuple[torch.dtype, torch.dtype]:
    assert self.model_config is not None
    assert self.cache_config is not None
    return MambaStateDtypeCalculator.mamba2_state_dtype(
        self.model_config.dtype,
        self.cache_config.mamba_cache_dtype,
        self.cache_config.mamba_ssm_cache_dtype,
    )

get_state_shape

get_state_shape() -> tuple[
    tuple[int, ...], tuple[int, ...]
]
Source code in vllm/model_executor/models/plamo2.py
def get_state_shape(self) -> tuple[tuple[int, ...], tuple[int, ...]]:
    return MambaStateShapeCalculator.mamba2_state_shape(
        intermediate_size=self.intermediate_size,
        tp_world_size=get_tensor_model_parallel_world_size(),
        n_groups=0,
        num_heads=self.num_heads,
        head_dim=self.head_dim,
        state_size=self.ssm_state_size,
        conv_kernel=self.conv_kernel_size,
    )

Plamo2Model

Bases: Module

Source code in vllm/model_executor/models/plamo2.py
@support_torch_compile
class Plamo2Model(torch.nn.Module):

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        super().__init__()

        config = vllm_config.model_config.hf_config

        self.config = config
        self.padding_idx = config.pad_token_id
        self.vocab_size = config.vocab_size
        self.org_vocab_size = config.vocab_size

        self.embed_tokens = VocabParallelEmbedding(
            self.vocab_size,
            config.hidden_size,
            org_num_embeddings=config.vocab_size,
            prefix=f"{prefix}.embed_tokens",
        )
        self.make_empty_intermediate_tensors = (
            make_empty_intermediate_tensors_factory(
                ["hidden_states", "residual"], config.hidden_size))
        self.layers = Plamo2Decoder(vllm_config=vllm_config,
                                    prefix=f"{prefix}.layers")
        self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)

    def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor:
        return self.embed_tokens(input_ids)

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        mamba_cache_params: MambaCacheParams,
        intermediate_tensors: Optional[IntermediateTensors] = None,
        inputs_embeds: Optional[torch.Tensor] = None,
    ) -> torch.Tensor:
        if get_pp_group().is_first_rank:
            if inputs_embeds is not None:
                hidden_states = inputs_embeds
            else:
                hidden_states = self.get_input_embeddings(input_ids)
            residual = None
        else:
            assert intermediate_tensors is not None
            hidden_states = intermediate_tensors["hidden_states"]
            residual = intermediate_tensors["residual"]

        if not envs.VLLM_USE_V1:
            attn_metadata: AttentionMetadata = get_forward_context(
            ).attn_metadata
            mamba2_metadata = prepare_mamba2_metadata(
                chunk_size=self.config.mamba_chunk_size,
                attn_metadata=attn_metadata,
            )
        else:
            # v1 get mamba2_metadata from forward_context
            mamba2_metadata = None

        hidden_states, residual = self.layers(
            positions=positions,
            hidden_states=hidden_states,
            residual=residual,
            mamba_cache_params=mamba_cache_params,
            mamba2_metadata=mamba2_metadata,
        )
        if not get_pp_group().is_last_rank:
            return IntermediateTensors({
                "hidden_states": hidden_states,
                "residual": residual
            })
        hidden_states, _ = self.norm(hidden_states, residual)
        return hidden_states

config instance-attribute

config = config

embed_tokens instance-attribute

embed_tokens = VocabParallelEmbedding(
    vocab_size,
    hidden_size,
    org_num_embeddings=vocab_size,
    prefix=f"{prefix}.embed_tokens",
)

layers instance-attribute

layers = Plamo2Decoder(
    vllm_config=vllm_config, prefix=f"{prefix}.layers"
)

make_empty_intermediate_tensors instance-attribute

make_empty_intermediate_tensors = (
    make_empty_intermediate_tensors_factory(
        ["hidden_states", "residual"], hidden_size
    )
)

norm instance-attribute

norm = RMSNorm(hidden_size, eps=rms_norm_eps)

org_vocab_size instance-attribute

org_vocab_size = vocab_size

padding_idx instance-attribute

padding_idx = pad_token_id

vocab_size instance-attribute

vocab_size = vocab_size

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/plamo2.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    super().__init__()

    config = vllm_config.model_config.hf_config

    self.config = config
    self.padding_idx = config.pad_token_id
    self.vocab_size = config.vocab_size
    self.org_vocab_size = config.vocab_size

    self.embed_tokens = VocabParallelEmbedding(
        self.vocab_size,
        config.hidden_size,
        org_num_embeddings=config.vocab_size,
        prefix=f"{prefix}.embed_tokens",
    )
    self.make_empty_intermediate_tensors = (
        make_empty_intermediate_tensors_factory(
            ["hidden_states", "residual"], config.hidden_size))
    self.layers = Plamo2Decoder(vllm_config=vllm_config,
                                prefix=f"{prefix}.layers")
    self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)

forward

forward(
    input_ids: Tensor,
    positions: Tensor,
    mamba_cache_params: MambaCacheParams,
    intermediate_tensors: Optional[
        IntermediateTensors
    ] = None,
    inputs_embeds: Optional[Tensor] = None,
) -> Tensor
Source code in vllm/model_executor/models/plamo2.py
def forward(
    self,
    input_ids: torch.Tensor,
    positions: torch.Tensor,
    mamba_cache_params: MambaCacheParams,
    intermediate_tensors: Optional[IntermediateTensors] = None,
    inputs_embeds: Optional[torch.Tensor] = None,
) -> torch.Tensor:
    if get_pp_group().is_first_rank:
        if inputs_embeds is not None:
            hidden_states = inputs_embeds
        else:
            hidden_states = self.get_input_embeddings(input_ids)
        residual = None
    else:
        assert intermediate_tensors is not None
        hidden_states = intermediate_tensors["hidden_states"]
        residual = intermediate_tensors["residual"]

    if not envs.VLLM_USE_V1:
        attn_metadata: AttentionMetadata = get_forward_context(
        ).attn_metadata
        mamba2_metadata = prepare_mamba2_metadata(
            chunk_size=self.config.mamba_chunk_size,
            attn_metadata=attn_metadata,
        )
    else:
        # v1 get mamba2_metadata from forward_context
        mamba2_metadata = None

    hidden_states, residual = self.layers(
        positions=positions,
        hidden_states=hidden_states,
        residual=residual,
        mamba_cache_params=mamba_cache_params,
        mamba2_metadata=mamba2_metadata,
    )
    if not get_pp_group().is_last_rank:
        return IntermediateTensors({
            "hidden_states": hidden_states,
            "residual": residual
        })
    hidden_states, _ = self.norm(hidden_states, residual)
    return hidden_states

get_input_embeddings

get_input_embeddings(input_ids: Tensor) -> Tensor
Source code in vllm/model_executor/models/plamo2.py
def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor:
    return self.embed_tokens(input_ids)

is_mamba

is_mamba(config: Plamo2Config, i: int) -> bool
Source code in vllm/model_executor/models/plamo2.py
def is_mamba(config: Plamo2Config, i: int) -> bool:
    assert config.mamba_step > 1

    if config.num_hidden_layers <= (config.mamba_step // 2):
        # use attention in last layer
        return i != config.num_hidden_layers - 1
    return (i % config.mamba_step) != (config.mamba_step // 2)

plamo2_mamba_mixer

plamo2_mamba_mixer(
    hidden_states: Tensor, output: Tensor, layer_name: str
) -> None
Source code in vllm/model_executor/models/plamo2.py
def plamo2_mamba_mixer(
    hidden_states: torch.Tensor,
    output: torch.Tensor,
    layer_name: str,
) -> None:
    forward_context: ForwardContext = get_forward_context()
    self = forward_context.no_compile_layers[layer_name]
    self.forward_cuda(hidden_states=hidden_states,
                      output=output,
                      mamba_cache_params=None,
                      mamba2_metadata=None)

plamo2_mamba_mixer_fake

plamo2_mamba_mixer_fake(
    hidden_states: Tensor, output: Tensor, layer_name: str
) -> None
Source code in vllm/model_executor/models/plamo2.py
def plamo2_mamba_mixer_fake(
    hidden_states: torch.Tensor,
    output: torch.Tensor,
    layer_name: str,
) -> None:
    return