Skip to content

vllm.transformers_utils.configs.midashenglm

DashengConfig

Bases: PretrainedConfig

Source code in vllm/transformers_utils/configs/midashenglm.py
class DashengConfig(PretrainedConfig):
    model_type = "midashenglm_dasheng_encoder"

    def __init__(
        self,
        embed_dim: int = 768,
        outputdim: int = 527,
        patch_size: Union[int, tuple[int, int]] = 16,
        patch_stride: Union[int, tuple[int, int]] = 16,
        input_channels: int = 1,
        target_length: int = 1012,
        depth: int = 12,
        num_heads: int = 12,
        mlp_ratio: float = 4.0,
        qkv_bias: bool = True,
        init_values: Optional[float] = None,
        drop_rate: float = 0.0,
        attn_drop_rate: float = 0.0,
        f_min: float = 0.0,
        f_max: float = 8000.0,
        center: bool = True,
        win_length: int = 512,
        hop_length: int = 160,
        sample_rate: int = 16000,
        n_fft: int = 512,
        n_mels: int = 64,
        **kwargs,
    ):
        self.embed_dim = embed_dim
        self.outputdim = outputdim
        self.patch_size = patch_size
        self.patch_stride = patch_stride
        self.input_channels = input_channels
        self.target_length = target_length
        self.depth = depth
        self.num_heads = num_heads
        self.mlp_ratio = mlp_ratio
        self.qkv_bias = qkv_bias
        self.init_values = init_values
        self.drop_rate = drop_rate
        self.attn_drop_rate = attn_drop_rate
        self.f_min = f_min
        self.f_max = f_max
        self.center = center
        self.win_length = win_length
        self.hop_length = hop_length
        self.sample_rate = sample_rate
        self.n_fft = n_fft
        self.n_mels = n_mels
        super().__init__(**kwargs)

attn_drop_rate instance-attribute

attn_drop_rate = attn_drop_rate

center instance-attribute

center = center

depth instance-attribute

depth = depth

drop_rate instance-attribute

drop_rate = drop_rate

embed_dim instance-attribute

embed_dim = embed_dim

f_max instance-attribute

f_max = f_max

f_min instance-attribute

f_min = f_min

hop_length instance-attribute

hop_length = hop_length

init_values instance-attribute

init_values = init_values

input_channels instance-attribute

input_channels = input_channels

mlp_ratio instance-attribute

mlp_ratio = mlp_ratio

model_type class-attribute instance-attribute

model_type = 'midashenglm_dasheng_encoder'

n_fft instance-attribute

n_fft = n_fft

n_mels instance-attribute

n_mels = n_mels

num_heads instance-attribute

num_heads = num_heads

outputdim instance-attribute

outputdim = outputdim

patch_size instance-attribute

patch_size = patch_size

patch_stride instance-attribute

patch_stride = patch_stride

qkv_bias instance-attribute

qkv_bias = qkv_bias

sample_rate instance-attribute

sample_rate = sample_rate

target_length instance-attribute

target_length = target_length

win_length instance-attribute

win_length = win_length

__init__

__init__(
    embed_dim: int = 768,
    outputdim: int = 527,
    patch_size: Union[int, tuple[int, int]] = 16,
    patch_stride: Union[int, tuple[int, int]] = 16,
    input_channels: int = 1,
    target_length: int = 1012,
    depth: int = 12,
    num_heads: int = 12,
    mlp_ratio: float = 4.0,
    qkv_bias: bool = True,
    init_values: Optional[float] = None,
    drop_rate: float = 0.0,
    attn_drop_rate: float = 0.0,
    f_min: float = 0.0,
    f_max: float = 8000.0,
    center: bool = True,
    win_length: int = 512,
    hop_length: int = 160,
    sample_rate: int = 16000,
    n_fft: int = 512,
    n_mels: int = 64,
    **kwargs,
)
Source code in vllm/transformers_utils/configs/midashenglm.py
def __init__(
    self,
    embed_dim: int = 768,
    outputdim: int = 527,
    patch_size: Union[int, tuple[int, int]] = 16,
    patch_stride: Union[int, tuple[int, int]] = 16,
    input_channels: int = 1,
    target_length: int = 1012,
    depth: int = 12,
    num_heads: int = 12,
    mlp_ratio: float = 4.0,
    qkv_bias: bool = True,
    init_values: Optional[float] = None,
    drop_rate: float = 0.0,
    attn_drop_rate: float = 0.0,
    f_min: float = 0.0,
    f_max: float = 8000.0,
    center: bool = True,
    win_length: int = 512,
    hop_length: int = 160,
    sample_rate: int = 16000,
    n_fft: int = 512,
    n_mels: int = 64,
    **kwargs,
):
    self.embed_dim = embed_dim
    self.outputdim = outputdim
    self.patch_size = patch_size
    self.patch_stride = patch_stride
    self.input_channels = input_channels
    self.target_length = target_length
    self.depth = depth
    self.num_heads = num_heads
    self.mlp_ratio = mlp_ratio
    self.qkv_bias = qkv_bias
    self.init_values = init_values
    self.drop_rate = drop_rate
    self.attn_drop_rate = attn_drop_rate
    self.f_min = f_min
    self.f_max = f_max
    self.center = center
    self.win_length = win_length
    self.hop_length = hop_length
    self.sample_rate = sample_rate
    self.n_fft = n_fft
    self.n_mels = n_mels
    super().__init__(**kwargs)

MiDashengLMConfig

Bases: PretrainedConfig

Source code in vllm/transformers_utils/configs/midashenglm.py
class MiDashengLMConfig(PretrainedConfig):
    model_type = "midashenglm"

    def __init__(
        self,
        audio_encoder_config: Optional[dict] = None,
        subsample_factor: int = 5,
        text_config: Optional[dict] = None,
        audio_token_id: Optional[int] = None,
        **kwargs,
    ):
        self.audio_encoder_config = DashengConfig(
            **(audio_encoder_config or {}))
        self.subsample_factor = subsample_factor
        self.text_config = (Qwen2_5OmniTextConfig(
            **text_config) if text_config else Qwen2_5OmniTextConfig())
        self.text_config.rope_scaling = None  # uses_mrope is false
        self.audio_token_id = audio_token_id
        super().__init__(**kwargs)

audio_encoder_config instance-attribute

audio_encoder_config = DashengConfig(
    **(audio_encoder_config or {})
)

audio_token_id instance-attribute

audio_token_id = audio_token_id

model_type class-attribute instance-attribute

model_type = 'midashenglm'

subsample_factor instance-attribute

subsample_factor = subsample_factor

text_config instance-attribute

text_config = (
    Qwen2_5OmniTextConfig(**text_config)
    if text_config
    else Qwen2_5OmniTextConfig()
)

__init__

__init__(
    audio_encoder_config: Optional[dict] = None,
    subsample_factor: int = 5,
    text_config: Optional[dict] = None,
    audio_token_id: Optional[int] = None,
    **kwargs,
)
Source code in vllm/transformers_utils/configs/midashenglm.py
def __init__(
    self,
    audio_encoder_config: Optional[dict] = None,
    subsample_factor: int = 5,
    text_config: Optional[dict] = None,
    audio_token_id: Optional[int] = None,
    **kwargs,
):
    self.audio_encoder_config = DashengConfig(
        **(audio_encoder_config or {}))
    self.subsample_factor = subsample_factor
    self.text_config = (Qwen2_5OmniTextConfig(
        **text_config) if text_config else Qwen2_5OmniTextConfig())
    self.text_config.rope_scaling = None  # uses_mrope is false
    self.audio_token_id = audio_token_id
    super().__init__(**kwargs)