Skip to content

vllm.config

Modules:

Name Description
cache
compilation
device
kv_events
kv_transfer
load
lora
model
multimodal
observability
parallel
pooler
scheduler
speculative
speech_to_text
structured_outputs
utils

Utility functions for vLLM config dataclasses.

vllm

BlockSize module-attribute

BlockSize = Literal[1, 8, 16, 32, 64, 128]

CacheDType module-attribute

CacheDType = Literal[
    "auto",
    "bfloat16",
    "fp8",
    "fp8_e4m3",
    "fp8_e5m2",
    "fp8_inc",
]

ConfigType module-attribute

ConfigType = type[DataclassInstance]

ConvertOption module-attribute

ConvertOption = Literal['auto', ConvertType]

DetailedTraceModules module-attribute

DetailedTraceModules = Literal['model', 'worker', 'all']

Device module-attribute

Device = Literal['auto', 'cuda', 'cpu', 'tpu', 'xpu']

DistributedExecutorBackend module-attribute

DistributedExecutorBackend = Literal[
    "ray", "mp", "uni", "external_launcher"
]

HfOverrides module-attribute

HfOverrides = Union[
    dict[str, Any],
    Callable[[PretrainedConfig], PretrainedConfig],
]

LogprobsMode module-attribute

LogprobsMode = Literal[
    "raw_logits",
    "raw_logprobs",
    "processed_logits",
    "processed_logprobs",
]

MMCacheType module-attribute

MMCacheType = Literal['shm', 'lru']

MMEncoderTPMode module-attribute

MMEncoderTPMode = Literal['weights', 'data']

MambaDType module-attribute

MambaDType = Literal['auto', 'float32']

ModelDType module-attribute

ModelDType = Literal[
    "auto",
    "half",
    "float16",
    "bfloat16",
    "float",
    "float32",
]

ModelImpl module-attribute

ModelImpl = Literal[
    "auto", "vllm", "transformers", "terratorch"
]

PrefixCachingHashAlgo module-attribute

PrefixCachingHashAlgo = Literal['sha256', 'sha256_cbor']

RunnerOption module-attribute

RunnerOption = Literal[
    "auto", "generate", "pooling", "draft"
]

RunnerType module-attribute

RunnerType = Literal['generate', 'pooling', 'draft']

SchedulerPolicy module-attribute

SchedulerPolicy = Literal['fcfs', 'priority']

TaskOption module-attribute

TaskOption = Literal[
    "auto",
    "generate",
    "embedding",
    "embed",
    "classify",
    "score",
    "reward",
    "transcription",
    "draft",
]

TokenizerMode module-attribute

TokenizerMode = Literal["auto", "slow", "mistral", "custom"]

__all__ module-attribute

__all__ = [
    "BlockSize",
    "CacheConfig",
    "CacheDType",
    "MambaDType",
    "PrefixCachingHashAlgo",
    "CompilationConfig",
    "CompilationLevel",
    "CUDAGraphMode",
    "PassConfig",
    "Device",
    "DeviceConfig",
    "KVEventsConfig",
    "KVTransferConfig",
    "LoadConfig",
    "LoRAConfig",
    "ConvertOption",
    "HfOverrides",
    "LogprobsMode",
    "ModelConfig",
    "ModelDType",
    "ModelImpl",
    "RunnerOption",
    "TaskOption",
    "TokenizerMode",
    "iter_architecture_defaults",
    "try_match_architecture_defaults",
    "MMCacheType",
    "MMEncoderTPMode",
    "MultiModalConfig",
    "DetailedTraceModules",
    "ObservabilityConfig",
    "DistributedExecutorBackend",
    "EPLBConfig",
    "ParallelConfig",
    "PoolerConfig",
    "RunnerType",
    "SchedulerConfig",
    "SchedulerPolicy",
    "SpeculativeConfig",
    "SpeechToTextConfig",
    "StructuredOutputsConfig",
    "ConfigType",
    "SupportsMetricsInfo",
    "config",
    "get_attr_docs",
    "is_init_field",
    "update_config",
    "VllmConfig",
    "get_cached_compilation_config",
    "get_current_vllm_config",
    "set_current_vllm_config",
    "get_layers_from_vllm_config",
]

CUDAGraphMode

Bases: Enum

Constants for the cudagraph mode in CompilationConfig. Meanwhile, the subset enum NONE, PIECEWISE and FULL are also treated as concrete runtime mode for cudagraph runtime dispatching.

Source code in vllm/config/compilation.py
class CUDAGraphMode(enum.Enum):
    """ Constants for the cudagraph mode in CompilationConfig.
    Meanwhile, the subset enum `NONE`, `PIECEWISE` and `FULL` are also
    treated as concrete runtime mode for cudagraph runtime dispatching.
    """
    NONE = 0
    PIECEWISE = 1
    FULL = 2
    FULL_DECODE_ONLY = (FULL, NONE)
    FULL_AND_PIECEWISE = (FULL, PIECEWISE)

    def decode_mode(self) -> 'CUDAGraphMode':
        return CUDAGraphMode(self.value[0]) if \
            self.separate_routine() else self

    def mixed_mode(self) -> 'CUDAGraphMode':
        return CUDAGraphMode(self.value[1]) if \
            self.separate_routine() else self

    def requires_piecewise_compilation(self) -> bool:
        return (self.decode_mode() == CUDAGraphMode.PIECEWISE
                or self.mixed_mode() == CUDAGraphMode.PIECEWISE)

    def max_cudagraph_mode(self) -> 'CUDAGraphMode':
        return CUDAGraphMode(max(
            self.value)) if self.separate_routine() else self

    def has_full_cudagraphs(self) -> bool:
        return self.max_cudagraph_mode() == CUDAGraphMode.FULL

    def has_piecewise_cudagraphs(self) -> bool:
        return self.requires_piecewise_compilation()

    def separate_routine(self) -> bool:
        return isinstance(self.value, tuple)

    def valid_runtime_modes(self) -> bool:
        return self in [
            CUDAGraphMode.NONE, CUDAGraphMode.PIECEWISE, CUDAGraphMode.FULL
        ]

    def __str__(self) -> str:
        return self.name

FULL class-attribute instance-attribute

FULL = 2

FULL_AND_PIECEWISE class-attribute instance-attribute

FULL_AND_PIECEWISE = (FULL, PIECEWISE)

FULL_DECODE_ONLY class-attribute instance-attribute

FULL_DECODE_ONLY = (FULL, NONE)

NONE class-attribute instance-attribute

NONE = 0

PIECEWISE class-attribute instance-attribute

PIECEWISE = 1

__str__

__str__() -> str
Source code in vllm/config/compilation.py
def __str__(self) -> str:
    return self.name

decode_mode

decode_mode() -> CUDAGraphMode
Source code in vllm/config/compilation.py
def decode_mode(self) -> 'CUDAGraphMode':
    return CUDAGraphMode(self.value[0]) if \
        self.separate_routine() else self

has_full_cudagraphs

has_full_cudagraphs() -> bool
Source code in vllm/config/compilation.py
def has_full_cudagraphs(self) -> bool:
    return self.max_cudagraph_mode() == CUDAGraphMode.FULL

has_piecewise_cudagraphs

has_piecewise_cudagraphs() -> bool
Source code in vllm/config/compilation.py
def has_piecewise_cudagraphs(self) -> bool:
    return self.requires_piecewise_compilation()

max_cudagraph_mode

max_cudagraph_mode() -> CUDAGraphMode
Source code in vllm/config/compilation.py
def max_cudagraph_mode(self) -> 'CUDAGraphMode':
    return CUDAGraphMode(max(
        self.value)) if self.separate_routine() else self

mixed_mode

mixed_mode() -> CUDAGraphMode
Source code in vllm/config/compilation.py
def mixed_mode(self) -> 'CUDAGraphMode':
    return CUDAGraphMode(self.value[1]) if \
        self.separate_routine() else self

requires_piecewise_compilation

requires_piecewise_compilation() -> bool
Source code in vllm/config/compilation.py
def requires_piecewise_compilation(self) -> bool:
    return (self.decode_mode() == CUDAGraphMode.PIECEWISE
            or self.mixed_mode() == CUDAGraphMode.PIECEWISE)

separate_routine

separate_routine() -> bool
Source code in vllm/config/compilation.py
def separate_routine(self) -> bool:
    return isinstance(self.value, tuple)

valid_runtime_modes

valid_runtime_modes() -> bool
Source code in vllm/config/compilation.py
def valid_runtime_modes(self) -> bool:
    return self in [
        CUDAGraphMode.NONE, CUDAGraphMode.PIECEWISE, CUDAGraphMode.FULL
    ]

CacheConfig

Configuration for the KV cache.

Source code in vllm/config/cache.py
@config
@dataclass
class CacheConfig:
    """Configuration for the KV cache."""

    block_size: SkipValidation[BlockSize] = None  # type: ignore
    """Size of a contiguous cache block in number of tokens. On CUDA devices,
    only block sizes up to 32 are supported.

    This config has no static default. If left unspecified by the user, it will
    be set in `Platform.check_and_update_config()` based on the current
    platform."""
    gpu_memory_utilization: float = 0.9
    """The fraction of GPU memory to be used for the model executor, which can
    range from 0 to 1. For example, a value of 0.5 would imply 50% GPU memory
    utilization. If unspecified, will use the default value of 0.9. This is a
    per-instance limit, and only applies to the current vLLM instance. It does
    not matter if you have another vLLM instance running on the same GPU. For
    example, if you have two vLLM instances running on the same GPU, you can
    set the GPU memory utilization to 0.5 for each instance."""
    swap_space: float = 4
    """Size of the CPU swap space per GPU (in GiB)."""
    cache_dtype: CacheDType = "auto"
    """Data type for kv cache storage. If "auto", will use model data type.
    CUDA 11.8+ supports fp8 (=fp8_e4m3) and fp8_e5m2. ROCm (AMD GPU) supports
    fp8 (=fp8_e4m3). Intel Gaudi (HPU) supports fp8 (using fp8_inc).
    Some models (namely DeepSeekV3.2) default to fp8, set to bfloat16 to use
    bfloat16 instead, this is an invalid option for models that do not default
    to fp8.
    """
    is_attention_free: bool = False
    """Whether the model is attention-free. This is primarily set in
    `ModelConfig` and that value should be manually duplicated here."""
    num_gpu_blocks_override: Optional[int] = None
    """Number of GPU blocks to use. This overrides the profiled `num_gpu_blocks`
    if specified. Does nothing if `None`. Used for testing preemption."""
    sliding_window: Optional[int] = None
    """Sliding window size for the KV cache. This is primarily set in
    `ModelConfig` and that value should be manually duplicated here."""
    enable_prefix_caching: Optional[bool] = None
    """Whether to enable prefix caching. Enabled by default for V1."""
    prefix_caching_hash_algo: PrefixCachingHashAlgo = "sha256"
    """Set the hash algorithm for prefix caching:\n
    - "sha256" uses Pickle for object serialization before hashing.\n
    - "sha256_cbor" provides a reproducible, cross-language compatible hash. It
    serializes objects using canonical CBOR and hashes them with SHA-256."""
    cpu_offload_gb: float = 0
    """The space in GiB to offload to CPU, per GPU. Default is 0, which means
    no offloading. Intuitively, this argument can be seen as a virtual way to
    increase the GPU memory size. For example, if you have one 24 GB GPU and
    set this to 10, virtually you can think of it as a 34 GB GPU. Then you can
    load a 13B model with BF16 weight, which requires at least 26GB GPU memory.
    Note that this requires fast CPU-GPU interconnect, as part of the model is
    loaded from CPU memory to GPU memory on the fly in each model forward pass.
    """
    calculate_kv_scales: bool = False
    """This enables dynamic calculation of `k_scale` and `v_scale` when
    kv_cache_dtype is fp8. If `False`, the scales will be loaded from the model
    checkpoint if available. Otherwise, the scales will default to 1.0."""
    cpu_kvcache_space_bytes: Optional[int] = None
    """(CPU backend only) CPU key-value cache space."""
    mamba_page_size_padded: Optional[int] = None
    """ Optional override for mamba page size; used by hybrid mamba/attention
    models to ensure exact alignment with attention page size."""

    mamba_cache_dtype: MambaDType = "auto"
    """The data type to use for the Mamba cache (both the conv as well as the
    ssm state). If set to 'auto', the data type will be inferred from the model
    config."""
    mamba_ssm_cache_dtype: MambaDType = "auto"
    """The data type to use for the Mamba cache (ssm state only, conv state will
    still be controlled by mamba_cache_dtype). If set to 'auto', the data type
    for the ssm state will be determined by mamba_cache_dtype."""

    # Will be set after profiling.
    num_gpu_blocks: Optional[int] = field(default=None, init=False)
    """The number of blocks to allocate for GPU memory."""
    num_cpu_blocks: Optional[int] = field(default=None, init=False)
    """The number of blocks to allocate for CPU memory."""

    kv_sharing_fast_prefill: bool = False
    """This feature is work in progress and no prefill optimization takes place
    with this flag enabled currently.

    In some KV sharing setups, e.g. YOCO (https://arxiv.org/abs/2405.05254),
    some layers can skip tokens corresponding to prefill. This flag enables
    attention metadata for eligible layers to be overridden with metadata
    necessary for implementing this optimization in some models (e.g. Gemma3n)
    """

    kv_cache_memory_bytes: Optional[int] = None
    """Size of KV Cache per GPU in bytes. By default, this is set to None
    and vllm can automatically infer the kv cache size based on
    gpu_memory_utilization. However, users may want to manually specify
    the kv cache memory size. kv_cache_memory_bytes allows more fine-grain
    control of how much memory gets used when compared with using
    gpu_memory_memory_utilization. Note that kv_cache_memory_bytes
    (when not-None) ignores gpu_memory_utilization"""

    def compute_hash(self) -> str:
        """
        WARNING: Whenever a new field is added to this config,
        ensure that it is included in the factors list if
        it affects the computation graph.

        Provide a hash that uniquely identifies all the configs
        that affect the structure of the computation
        graph from input ids/embeddings to the final hidden states,
        excluding anything before input ids/embeddings and after
        the final hidden states.
        """
        factors: list[Any] = []
        factors.append(self.cache_dtype)
        factors.append(self.mamba_cache_dtype)
        factors.append(self.mamba_ssm_cache_dtype)
        # `cpu_offload_gb` does not use `torch.compile` yet.
        hash_str = hashlib.md5(str(factors).encode(),
                               usedforsecurity=False).hexdigest()
        return hash_str

    def __post_init__(self) -> None:
        self.swap_space_bytes = self.swap_space * GiB_bytes

        self._verify_cache_dtype()
        self._verify_prefix_caching()

    def metrics_info(self):
        # convert cache_config to dict(key: str, value: str) for prometheus
        # metrics info
        return {key: str(value) for key, value in self.__dict__.items()}

    @model_validator(mode='after')
    def _verify_args(self) -> Self:
        if self.cpu_offload_gb < 0:
            raise ValueError("CPU offload space must be non-negative"
                             f", but got {self.cpu_offload_gb}")

        if self.gpu_memory_utilization > 1.0:
            raise ValueError(
                "GPU memory utilization must be less than 1.0. Got "
                f"{self.gpu_memory_utilization}.")

        return self

    def _verify_cache_dtype(self) -> None:
        if self.cache_dtype == "auto":
            pass
        elif self.cache_dtype in get_args(CacheDType):
            if self.cache_dtype.startswith("fp8"):
                logger.info(
                    "Using fp8 data type to store kv cache. It reduces the GPU "
                    "memory footprint and boosts the performance. "
                    "Meanwhile, it may cause accuracy drop without a proper "
                    "scaling factor.")
        else:
            raise ValueError(f"Unknown kv cache dtype: {self.cache_dtype}")

    def _verify_prefix_caching(self) -> None:
        if not self.enable_prefix_caching:
            return

        if self.sliding_window is not None and not envs.VLLM_USE_V1:
            raise NotImplementedError(
                "Prefix caching is not supported with sliding window. "
                "Run with --disable-sliding-window to use prefix caching.")

        if (self.enable_prefix_caching and self.prefix_caching_hash_algo
                not in get_args(PrefixCachingHashAlgo)):
            raise ValueError(
                "Unknown prefix caching hash algorithm: "
                f"{self.prefix_caching_hash_algo}. Must be one of "
                f"{get_args(PrefixCachingHashAlgo)}.")

    def verify_with_parallel_config(
        self,
        parallel_config: ParallelConfig,
    ) -> None:
        total_cpu_memory = get_cpu_memory()
        # FIXME(woosuk): Here, it is assumed that the GPUs in a tensor parallel
        # group are in the same node. However, the GPUs may span multiple nodes.
        num_gpus_per_node = parallel_config.tensor_parallel_size
        cpu_memory_usage = self.swap_space_bytes * num_gpus_per_node

        msg = (f"{cpu_memory_usage / GiB_bytes:.2f} GiB out of the "
               f"{total_cpu_memory / GiB_bytes:.2f} GiB total CPU memory "
               "is allocated for the swap space.")
        if cpu_memory_usage > 0.7 * total_cpu_memory:
            raise ValueError("Too large swap space. " + msg)
        elif cpu_memory_usage > 0.4 * total_cpu_memory:
            logger.warning("Possibly too large swap space. %s", msg)

block_size class-attribute instance-attribute

block_size: SkipValidation[BlockSize] = None

Size of a contiguous cache block in number of tokens. On CUDA devices, only block sizes up to 32 are supported.

This config has no static default. If left unspecified by the user, it will be set in Platform.check_and_update_config() based on the current platform.

cache_dtype class-attribute instance-attribute

cache_dtype: CacheDType = 'auto'

Data type for kv cache storage. If "auto", will use model data type. CUDA 11.8+ supports fp8 (=fp8_e4m3) and fp8_e5m2. ROCm (AMD GPU) supports fp8 (=fp8_e4m3). Intel Gaudi (HPU) supports fp8 (using fp8_inc). Some models (namely DeepSeekV3.2) default to fp8, set to bfloat16 to use bfloat16 instead, this is an invalid option for models that do not default to fp8.

calculate_kv_scales class-attribute instance-attribute

calculate_kv_scales: bool = False

This enables dynamic calculation of k_scale and v_scale when kv_cache_dtype is fp8. If False, the scales will be loaded from the model checkpoint if available. Otherwise, the scales will default to 1.0.

cpu_kvcache_space_bytes class-attribute instance-attribute

cpu_kvcache_space_bytes: Optional[int] = None

(CPU backend only) CPU key-value cache space.

cpu_offload_gb class-attribute instance-attribute

cpu_offload_gb: float = 0

The space in GiB to offload to CPU, per GPU. Default is 0, which means no offloading. Intuitively, this argument can be seen as a virtual way to increase the GPU memory size. For example, if you have one 24 GB GPU and set this to 10, virtually you can think of it as a 34 GB GPU. Then you can load a 13B model with BF16 weight, which requires at least 26GB GPU memory. Note that this requires fast CPU-GPU interconnect, as part of the model is loaded from CPU memory to GPU memory on the fly in each model forward pass.

enable_prefix_caching class-attribute instance-attribute

enable_prefix_caching: Optional[bool] = None

Whether to enable prefix caching. Enabled by default for V1.

gpu_memory_utilization class-attribute instance-attribute

gpu_memory_utilization: float = 0.9

The fraction of GPU memory to be used for the model executor, which can range from 0 to 1. For example, a value of 0.5 would imply 50% GPU memory utilization. If unspecified, will use the default value of 0.9. This is a per-instance limit, and only applies to the current vLLM instance. It does not matter if you have another vLLM instance running on the same GPU. For example, if you have two vLLM instances running on the same GPU, you can set the GPU memory utilization to 0.5 for each instance.

is_attention_free class-attribute instance-attribute

is_attention_free: bool = False

Whether the model is attention-free. This is primarily set in ModelConfig and that value should be manually duplicated here.

kv_cache_memory_bytes class-attribute instance-attribute

kv_cache_memory_bytes: Optional[int] = None

Size of KV Cache per GPU in bytes. By default, this is set to None and vllm can automatically infer the kv cache size based on gpu_memory_utilization. However, users may want to manually specify the kv cache memory size. kv_cache_memory_bytes allows more fine-grain control of how much memory gets used when compared with using gpu_memory_memory_utilization. Note that kv_cache_memory_bytes (when not-None) ignores gpu_memory_utilization

kv_sharing_fast_prefill class-attribute instance-attribute

kv_sharing_fast_prefill: bool = False

This feature is work in progress and no prefill optimization takes place with this flag enabled currently.

In some KV sharing setups, e.g. YOCO (https://arxiv.org/abs/2405.05254), some layers can skip tokens corresponding to prefill. This flag enables attention metadata for eligible layers to be overridden with metadata necessary for implementing this optimization in some models (e.g. Gemma3n)

mamba_cache_dtype class-attribute instance-attribute

mamba_cache_dtype: MambaDType = 'auto'

The data type to use for the Mamba cache (both the conv as well as the ssm state). If set to 'auto', the data type will be inferred from the model config.

mamba_page_size_padded class-attribute instance-attribute

mamba_page_size_padded: Optional[int] = None

Optional override for mamba page size; used by hybrid mamba/attention models to ensure exact alignment with attention page size.

mamba_ssm_cache_dtype class-attribute instance-attribute

mamba_ssm_cache_dtype: MambaDType = 'auto'

The data type to use for the Mamba cache (ssm state only, conv state will still be controlled by mamba_cache_dtype). If set to 'auto', the data type for the ssm state will be determined by mamba_cache_dtype.

num_cpu_blocks class-attribute instance-attribute

num_cpu_blocks: Optional[int] = field(
    default=None, init=False
)

The number of blocks to allocate for CPU memory.

num_gpu_blocks class-attribute instance-attribute

num_gpu_blocks: Optional[int] = field(
    default=None, init=False
)

The number of blocks to allocate for GPU memory.

num_gpu_blocks_override class-attribute instance-attribute

num_gpu_blocks_override: Optional[int] = None

Number of GPU blocks to use. This overrides the profiled num_gpu_blocks if specified. Does nothing if None. Used for testing preemption.

prefix_caching_hash_algo class-attribute instance-attribute

prefix_caching_hash_algo: PrefixCachingHashAlgo = 'sha256'

Set the hash algorithm for prefix caching:

  • "sha256" uses Pickle for object serialization before hashing.

  • "sha256_cbor" provides a reproducible, cross-language compatible hash. It serializes objects using canonical CBOR and hashes them with SHA-256.

sliding_window class-attribute instance-attribute

sliding_window: Optional[int] = None

Sliding window size for the KV cache. This is primarily set in ModelConfig and that value should be manually duplicated here.

swap_space class-attribute instance-attribute

swap_space: float = 4

Size of the CPU swap space per GPU (in GiB).

__post_init__

__post_init__() -> None
Source code in vllm/config/cache.py
def __post_init__(self) -> None:
    self.swap_space_bytes = self.swap_space * GiB_bytes

    self._verify_cache_dtype()
    self._verify_prefix_caching()

_verify_args

_verify_args() -> Self
Source code in vllm/config/cache.py
@model_validator(mode='after')
def _verify_args(self) -> Self:
    if self.cpu_offload_gb < 0:
        raise ValueError("CPU offload space must be non-negative"
                         f", but got {self.cpu_offload_gb}")

    if self.gpu_memory_utilization > 1.0:
        raise ValueError(
            "GPU memory utilization must be less than 1.0. Got "
            f"{self.gpu_memory_utilization}.")

    return self

_verify_cache_dtype

_verify_cache_dtype() -> None
Source code in vllm/config/cache.py
def _verify_cache_dtype(self) -> None:
    if self.cache_dtype == "auto":
        pass
    elif self.cache_dtype in get_args(CacheDType):
        if self.cache_dtype.startswith("fp8"):
            logger.info(
                "Using fp8 data type to store kv cache. It reduces the GPU "
                "memory footprint and boosts the performance. "
                "Meanwhile, it may cause accuracy drop without a proper "
                "scaling factor.")
    else:
        raise ValueError(f"Unknown kv cache dtype: {self.cache_dtype}")

_verify_prefix_caching

_verify_prefix_caching() -> None
Source code in vllm/config/cache.py
def _verify_prefix_caching(self) -> None:
    if not self.enable_prefix_caching:
        return

    if self.sliding_window is not None and not envs.VLLM_USE_V1:
        raise NotImplementedError(
            "Prefix caching is not supported with sliding window. "
            "Run with --disable-sliding-window to use prefix caching.")

    if (self.enable_prefix_caching and self.prefix_caching_hash_algo
            not in get_args(PrefixCachingHashAlgo)):
        raise ValueError(
            "Unknown prefix caching hash algorithm: "
            f"{self.prefix_caching_hash_algo}. Must be one of "
            f"{get_args(PrefixCachingHashAlgo)}.")

compute_hash

compute_hash() -> str

WARNING: Whenever a new field is added to this config, ensure that it is included in the factors list if it affects the computation graph.

Provide a hash that uniquely identifies all the configs that affect the structure of the computation graph from input ids/embeddings to the final hidden states, excluding anything before input ids/embeddings and after the final hidden states.

Source code in vllm/config/cache.py
def compute_hash(self) -> str:
    """
    WARNING: Whenever a new field is added to this config,
    ensure that it is included in the factors list if
    it affects the computation graph.

    Provide a hash that uniquely identifies all the configs
    that affect the structure of the computation
    graph from input ids/embeddings to the final hidden states,
    excluding anything before input ids/embeddings and after
    the final hidden states.
    """
    factors: list[Any] = []
    factors.append(self.cache_dtype)
    factors.append(self.mamba_cache_dtype)
    factors.append(self.mamba_ssm_cache_dtype)
    # `cpu_offload_gb` does not use `torch.compile` yet.
    hash_str = hashlib.md5(str(factors).encode(),
                           usedforsecurity=False).hexdigest()
    return hash_str

metrics_info

metrics_info()
Source code in vllm/config/cache.py
def metrics_info(self):
    # convert cache_config to dict(key: str, value: str) for prometheus
    # metrics info
    return {key: str(value) for key, value in self.__dict__.items()}

verify_with_parallel_config

verify_with_parallel_config(
    parallel_config: ParallelConfig,
) -> None
Source code in vllm/config/cache.py
def verify_with_parallel_config(
    self,
    parallel_config: ParallelConfig,
) -> None:
    total_cpu_memory = get_cpu_memory()
    # FIXME(woosuk): Here, it is assumed that the GPUs in a tensor parallel
    # group are in the same node. However, the GPUs may span multiple nodes.
    num_gpus_per_node = parallel_config.tensor_parallel_size
    cpu_memory_usage = self.swap_space_bytes * num_gpus_per_node

    msg = (f"{cpu_memory_usage / GiB_bytes:.2f} GiB out of the "
           f"{total_cpu_memory / GiB_bytes:.2f} GiB total CPU memory "
           "is allocated for the swap space.")
    if cpu_memory_usage > 0.7 * total_cpu_memory:
        raise ValueError("Too large swap space. " + msg)
    elif cpu_memory_usage > 0.4 * total_cpu_memory:
        logger.warning("Possibly too large swap space. %s", msg)

CompilationConfig

Configuration for compilation. It has three parts:

Why we have different sizes for cudagraph and inductor: - cudagraph: a cudagraph captured for a specific size can only be used for the same size. We need to capture all the sizes we want to use. - inductor: a graph compiled by inductor for a general shape can be used for different sizes. Inductor can also compile for specific sizes, where it can have more information to optimize the graph with fully static shapes. However, we find the general shape compilation is sufficient for most cases. It might be beneficial to compile for certain small batchsizes, where inductor is good at optimizing.

Source code in vllm/config/compilation.py
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
@config
@dataclass
class CompilationConfig:
    """Configuration for compilation. It has three parts:

    - Top-level Compilation control:
        - [`level`][vllm.config.CompilationConfig.level]
        - [`debug_dump_path`][vllm.config.CompilationConfig.debug_dump_path]
        - [`cache_dir`][vllm.config.CompilationConfig.cache_dir]
        - [`backend`][vllm.config.CompilationConfig.backend]
        - [`custom_ops`][vllm.config.CompilationConfig.custom_ops]
        - [`splitting_ops`][vllm.config.CompilationConfig.splitting_ops]
    - CudaGraph capture:
        - [`use_cudagraph`][vllm.config.CompilationConfig.use_cudagraph]
        - [`cudagraph_mode`][vllm.config.CompilationConfig.cudagraph_mode]
        - [`cudagraph_capture_sizes`]
        [vllm.config.CompilationConfig.cudagraph_capture_sizes]
        - [`cudagraph_num_of_warmups`]
        [vllm.config.CompilationConfig.cudagraph_num_of_warmups]
        - [`cudagraph_copy_inputs`]
        [vllm.config.CompilationConfig.cudagraph_copy_inputs]
        - [`full_cuda_graph`][vllm.config.CompilationConfig.full_cuda_graph]
    - Inductor compilation:
        - [`use_inductor`][vllm.config.CompilationConfig.use_inductor]
        - [`compile_sizes`][vllm.config.CompilationConfig.compile_sizes]
        - [`inductor_compile_config`]
        [vllm.config.CompilationConfig.inductor_compile_config]
        - [`inductor_passes`][vllm.config.CompilationConfig.inductor_passes]
        - custom inductor passes

    Why we have different sizes for cudagraph and inductor:
    - cudagraph: a cudagraph captured for a specific size can only be used
        for the same size. We need to capture all the sizes we want to use.
    - inductor: a graph compiled by inductor for a general shape can be used
        for different sizes. Inductor can also compile for specific sizes,
        where it can have more information to optimize the graph with fully
        static shapes. However, we find the general shape compilation is
        sufficient for most cases. It might be beneficial to compile for
        certain small batchsizes, where inductor is good at optimizing.
    """
    # Top-level Compilation control
    level: Optional[int] = None
    """The level of compilation:

    - None: If None, we will select the default compilation level.
      For V1 engine this is 3, for V0 engine this is 0.
    - 0: no compilation.
    - 1: dynamo as is.
    - 2: dynamo once.
    - 3: piecewise compilation."""
    debug_dump_path: Optional[Path] = None
    """The path to dump the debug information."""
    cache_dir: str = ""
    """The directory to store the compiled graph, to accelerate Inductor
    compilation. By default, it will use model-related information to generate
    a cache directory."""
    backend: str = ""
    """The backend for compilation. It needs to be a string:

    - "" (empty string): use the default backend.
    - "eager"/"openxla"/...: use the specified backend registered in PyTorch.
    - "full.module.name": a qualified name which can be used to import the

    backend function.
    We use string to avoid serialization issues when using compilation in a
    distributed setting. When the compilation level is 1 or 2, the backend is
    used for the compilation directly (it sees the whole graph). When the
    compilation level is 3, the backend is used for the piecewise compilation
    (it sees a part of the graph)."""
    custom_ops: list[str] = field(default_factory=list)
    """Fine-grained control over which custom ops to enable/disable. Use 'all'
    to enable all, 'none' to disable all. Also specify a list of custom op
    names to enable (prefixed with a '+'), or disable (prefixed with a '-').
    Examples:

    - 'all,-op1' to enable all except op1
    - 'none,+op1,+op2' to enable only op1 and op2

    By default, all custom ops are enabled when running without Inductor and
    disabled when running with Inductor: level>=PIECEWISE and use_inductor=True.
    Inductor generates (fused) Triton kernels for disabled custom ops."""
    splitting_ops: Optional[list[str]] = None
    """A list of ops to split the full graph into subgraphs, used in piecewise
    compilation."""

    # Inductor capture
    use_inductor: bool = True
    """Whether to use inductor compilation:

    - False: inductor compilation is not used. graph runs in eager
        (custom_ops enabled by default).
    - True: inductor compilation is used (custom_ops disabled by default).
        One graph for symbolic shape and one graph per size in compile_sizes
        are compiled using configurations in inductor_compile_config.

    This setting is ignored if level<PIECEWISE."""
    compile_sizes: Optional[list[Union[int, str]]] = None
    """Sizes to compile for inductor. In addition
    to integers, it also supports "cudagraph_capture_sizes" to
    specify the sizes for cudagraph capture."""
    inductor_compile_config: dict = field(default_factory=dict)
    """Additional configurations for inductor.
    - None: use default configurations."""
    inductor_passes: dict[str, str] = field(default_factory=dict)
    """Additional passes for inductor. It is a dictionary
    from pass name to pass function qualified name. We use function
    name because the config uses JSON format. If we pass the config
    from Python, functions can also be passed directly via Python object
    constructor, e.g. `CompilationConfig(inductor_passes={"a": func})`."""

    # CudaGraph compilation
    cudagraph_mode: Optional[CUDAGraphMode] = None
    """
    The mode of the cudagraph:

    - NONE, no cudagraph capture.
    - PIECEWISE.
    - FULL.
    - FULL_DECODE_ONLY.
    - FULL_AND_PIECEWISE. (v1 default)

    PIECEWISE mode build piecewise cudagraph only, keeping the cudagraph
    incompatible ops (i.e. some attention ops) outside the cudagraph
    for general flexibility.

    FULL mode: Capture full cudagraph for all batches. Can be good for small
    models or workloads with small prompts; not supported by many backends.
    Generally for performance FULL_AND_PIECEWISE is better.

    FULL_DECODE_ONLY mode: Capture full cudagraph for decode batches only.
    Mixed prefill-decode batches are run without cudagraphs. Can be good for
    decode instances in a P/D setup where prefill is not as important so we
    can save some memory.

    FULL_AND_PIECEWISE mode: Capture full cudagraph for decode batches and
    piecewise cudagraph for prefill and mixed prefill-decode batches.
    This is the most performant mode for most models and is the default.

    Currently, the cudagraph mode is only used for the v1 engine.
    Note that the cudagraph logic is generally orthogonal to the 
    compilation logic. While piecewise cudagraphs require piecewise 
    compilation (level=PIECEWISE and non-empty splitting_ops), full
    cudagraphs are supported with and without compilation.

    Warning: This flag is new and subject to change in addition 
    more modes may be added.
    """
    use_cudagraph: bool = True
    """Whether to use cudagraph inside compilation.
    - False: cudagraph inside compilation is not used.
    - True: cudagraph inside compilation is used. It requires
        that all input buffers have fixed addresses, and all
        splitting ops write their outputs to input buffers.
    In the vLLM V1 Engine, this flag only applies for
    CompilationLevel.PIECEWISE (aka -O3).
    Note that this is orthogonal to the cudagraph capture logic
    outside of compilation.
    Warning: This flag is deprecated and will be removed in the next major or
    minor release, i.e. v0.11.0 or v1.0.0. Please use cudagraph_mode=PIECEWISE
    instead.
    """
    cudagraph_num_of_warmups: int = 0
    """Number of warmup runs for cudagraph.
    It means the first several runs will be treated as warmup runs.
    Only after that, the execution will be recorded, and the recorded
    cudagraph will be used for subsequent runs."""
    cudagraph_capture_sizes: Optional[list[int]] = None
    """Sizes to capture cudagraph.
    - None (default): capture sizes are inferred from vllm config.
    - list[int]: capture sizes are specified as given."""
    cudagraph_copy_inputs: bool = False
    """Whether to copy input tensors for
    cudagraph. If the caller can guarantee that the same input buffers
    are always used, it can set this to False. Otherwise, it should
    set this to True, and the compiler will copy the input to an
    internally managed buffer. Default is False. 
    Note that this flag is only effective when cudagraph_mode is PIECEWISE.
    """
    full_cuda_graph: Optional[bool] = False
    """whether to use a full cuda graph for the entire forward pass rather than
    splitting certain operations such as attention into subgraphs. Thus this
    flag cannot be used together with splitting_ops. This may provide
    performance benefits for smaller models.
    Warning: This flag is deprecated and will be removed in the next major or
    minor release, i.e. v0.11.0 or v1.0.0. Please use cudagraph_mode=
    FULL_AND_PIECEWISE instead.
    """

    use_inductor_graph_partition: bool = False
    """Use inductor graph partition to split the graph at cudagraph_unsafe ops.
    This partition happens at inductor codegen time after all passes and fusions
    are finished. It generates a single `call` function which wraps
    cudagraph-safe ops into partition functions and leave cudagraph-unsafe ops
    outside the partition functions. For a graph with N cudagraph-unsafe ops
    (e.g., Attention), there would be N+1 partitions. To mark an op as
    cudagraph unsafe, we can add `tags=(torch._C.Tag.cudagraph_unsafe)` when
    register the custom op. 

    This config supports both full cudagraph and piecewise cudagraph without
    compiling twice. For piecewise cudagraph, it applies vLLM CUDAGraph wrapper
    to each partition. For N+1 partitions, there would be N+1
    CUDAGraph wrapper instances.

    For full CUDAGraph, we always apply a single CUDAGraph wrapper outside the
    inductor `call` function in the model runner. The top-level full cudagraph
    capture ignores all partitioning.
    """

    pass_config: PassConfig = field(default_factory=PassConfig)
    """Custom inductor passes, see PassConfig for more details"""

    max_capture_size: int = field(default=None, init=False)  # type: ignore
    """not configurable, computed after init"""
    local_cache_dir: str = field(default=None, init=False)  # type: ignore
    """local cache dir for each rank"""
    bs_to_padded_graph_size: list[int] = field(
        default=None,  # type: ignore
        init=False)
    """optimization:
    Intuitively, bs_to_padded_graph_size should be dict[int, int].
    since we know all keys are in a range [0, max_capture_size],
    we can optimize it to list[int] for better lookup performance."""

    # keep track of enabled and disabled custom ops
    enabled_custom_ops: Counter[str] = field(default_factory=Counter,
                                             init=False)
    """custom ops that are enabled"""
    disabled_custom_ops: Counter[str] = field(default_factory=Counter,
                                              init=False)
    """custom ops that are disabled"""
    traced_files: set[str] = field(default_factory=set, init=False)
    """files that are traced for compilation"""
    compilation_time: float = field(default=0.0, init=False)
    """time taken for compilation"""

    static_forward_context: dict[str, Any] = field(default_factory=dict,
                                                   init=False)
    """Per-model forward context
    Map from layer name to layer objects that need to be accessed outside
    model code, e.g., Attention, FusedMOE when dp_size>1."""

    # Attention ops; used for piecewise cudagraphs
    _attention_ops: ClassVar[list[str]] = [
        "vllm.unified_attention",
        "vllm.unified_attention_with_output",
        "vllm.mamba_mixer2",
        "vllm.mamba_mixer",
        "vllm.short_conv",
        "vllm.linear_attention",
        "vllm.plamo2_mamba_mixer",
        "vllm.gdn_attention",
        "vllm.sparse_attn_indexer",
    ]

    def compute_hash(self) -> str:
        """
        WARNING: Whenever a new field is added to this config,
        ensure that it is included in the factors list if
        it affects the computation graph.

        Provide a hash that uniquely identifies all the configs
        that affect the structure of the computation
        graph from input ids/embeddings to the final hidden states,
        excluding anything before input ids/embeddings and after
        the final hidden states.
        """
        factors: list[Any] = []
        factors.append(self.level)
        factors.append(self.backend)
        factors.append(self.custom_ops)
        factors.append(self.splitting_ops)
        factors.append(self.use_inductor)
        factors.append(self.inductor_compile_config)
        factors.append(self.inductor_passes)
        factors.append(self.pass_config.uuid())
        return hashlib.sha256(str(factors).encode()).hexdigest()

    def __repr__(self) -> str:
        exclude = {
            "static_forward_context": True,
            "enabled_custom_ops": True,
            "disabled_custom_ops": True,
            "compilation_time": True,
            "bs_to_padded_graph_size": True,
            "traced_files": True,
            "inductor_compile_config": {
                "post_grad_custom_post_pass": True,
            },
        }

        # exclude default attr in pass_config
        pass_config_exclude = {}
        for attr, default_val in vars(PassConfig()).items():
            if getattr(self.pass_config, attr) == default_val:
                pass_config_exclude[attr] = True
        if pass_config_exclude:
            exclude["pass_config"] = pass_config_exclude

        config = TypeAdapter(CompilationConfig).dump_python(self,
                                                            exclude=exclude,
                                                            exclude_unset=True)

        return str(config)

    __str__ = __repr__

    @field_validator("cudagraph_mode", mode="before")
    @classmethod
    def validate_cudagraph_mode_before(cls, value: Any) -> Any:
        """
        enable parse the `cudagraph_mode` enum type from string
        """
        if isinstance(value, str):
            return CUDAGraphMode[value.upper()]
        return value

    def __post_init__(self) -> None:
        count_none = self.custom_ops.count("none")
        count_all = self.custom_ops.count("all")
        assert count_none + count_all <= 1, "Can only specify 'none' or 'all'"

        # TODO(zou3519/luka): There are 2 issues with auto-functionalization V2:
        # 1. A bug in PyTorch, fixed in 2.7:
        #    https://github.com/pytorch/pytorch/issues/147924
        # 2. Custom passes (fusion) rely on auto-functionalization V1 and don't
        #    work with V2. Addressing this will take extra engineering effort
        #    and it is not yet a priority. RFC here:
        #    https://github.com/vllm-project/vllm/issues/14703

        if is_torch_equal_or_newer("2.6"):
            KEY = 'enable_auto_functionalized_v2'
            if KEY not in self.inductor_compile_config:
                self.inductor_compile_config[KEY] = False

        for k, v in self.inductor_passes.items():
            if not isinstance(v, str):
                assert callable(v), (
                    f"pass {k} should be callable or a qualified name")
                self.inductor_compile_config[k] = v if isinstance(
                    v, InductorPass) else CallableInductorPass(v)
                continue

            # resolve function from qualified name
            names = v.split(".")
            module = ".".join(names[:-1])
            func_name = names[-1]
            func = __import__(module).__dict__[func_name]
            self.inductor_compile_config[k] = func if isinstance(
                func, InductorPass) else CallableInductorPass(func)

        if isinstance(self.pass_config, dict):
            self.pass_config = PassConfig(**self.pass_config)

        # migrate the deprecated flags
        if not self.use_cudagraph:
            logger.warning("use_cudagraph is deprecated, use "
                           "cudagraph_mode=NONE instead.")
            if self.cudagraph_mode is not None and \
                self.cudagraph_mode != CUDAGraphMode.NONE:
                raise ValueError(
                    "use_cudagraph and cudagraph_mode are mutually"
                    " exclusive, prefer cudagraph_mode since "
                    "use_cudagraph is deprecated.")
            self.cudagraph_mode = CUDAGraphMode.NONE
        if self.full_cuda_graph:
            logger.warning("full_cuda_graph is deprecated, use "
                           "cudagraph_mode=FULL instead.")
            if self.cudagraph_mode is not None and \
                not self.cudagraph_mode.has_full_cudagraphs():
                raise ValueError("full_cuda_graph and cudagraph_mode are "
                                 "mutually exclusive, prefer cudagraph_mode "
                                 "since full_cuda_graph is deprecated.")
            self.cudagraph_mode = CUDAGraphMode.FULL

        if (self.use_inductor_graph_partition
                and not is_torch_equal_or_newer("2.9.0.dev")):
            raise ValueError("use_inductor_graph_partition is only "
                             "supported with torch>=2.9.0.dev. Set "
                             "use_inductor_graph_partition=False instead.")

        for op in self.custom_ops:
            if op[0] not in {'+', '-'} and op not in {'all', 'none'}:
                raise ValueError(f"Invalid syntax '{op}' for custom op, "
                                 "must be 'all', 'none', '+op' or '-op' "
                                 "(where 'op' is the registered op name)")

    def init_backend(self, vllm_config: "VllmConfig") -> Union[str, Callable]:
        if self.level == CompilationLevel.NO_COMPILATION:
            raise ValueError("No compilation level is set.")

        from torch._dynamo.backends.registry import list_backends
        torch_backends = list_backends(exclude_tags=tuple())
        if self.level in [
                CompilationLevel.DYNAMO_AS_IS, CompilationLevel.DYNAMO_ONCE
        ]:
            if self.backend == "":
                return "eager"
            if self.backend in torch_backends:
                return self.backend
            return resolve_obj_by_qualname(self.backend)

        # TODO: pass user-specified backend to piecewise compilation
        # merge with the config use_inductor
        assert self.level == CompilationLevel.PIECEWISE

        from vllm.compilation.backends import VllmBackend
        return VllmBackend(vllm_config)

    def init_with_cudagraph_sizes(self,
                                  cudagraph_capture_sizes: list[int]) -> None:
        """To complete the initialization of config,
        we need to know the cudagraph sizes."""

        if self.cudagraph_capture_sizes is None:
            self.cudagraph_capture_sizes = cudagraph_capture_sizes
        else:
            # de-duplicate the sizes provided by the config
            dedup_sizes = list(set(self.cudagraph_capture_sizes))
            if len(dedup_sizes) < len(self.cudagraph_capture_sizes):
                logger.info(("cudagraph sizes specified by model runner"
                             " %s is overridden by config %s"),
                            cudagraph_capture_sizes, dedup_sizes)
            self.cudagraph_capture_sizes = dedup_sizes

        computed_compile_sizes = []
        if self.compile_sizes is not None:
            # de-duplicate the sizes provided by the config
            self.compile_sizes = list(set(self.compile_sizes))
            for x in self.compile_sizes:
                if isinstance(x, str):
                    assert x == "cudagraph_capture_sizes", \
                        "Unrecognized size type in compile_sizes, " \
                        f"expect 'cudagraph_capture_sizes', got {x}"
                    computed_compile_sizes.extend(self.cudagraph_capture_sizes)
                else:
                    assert isinstance(x, int)
                    computed_compile_sizes.append(x)
        self.compile_sizes = computed_compile_sizes  # type: ignore

        # sort to make sure cudagraph capture sizes are in descending order
        self.cudagraph_capture_sizes.sort(reverse=True)
        self.max_capture_size = self.cudagraph_capture_sizes[
            0] if self.cudagraph_capture_sizes else 0

        # pre-compute the mapping from batch size to padded graph size
        self.bs_to_padded_graph_size = [
            0 for i in range(self.max_capture_size + 1)
        ]
        for end, start in zip(self.cudagraph_capture_sizes,
                              self.cudagraph_capture_sizes[1:] + [0]):
            for bs in range(start, end):
                if bs == start:
                    self.bs_to_padded_graph_size[bs] = start
                else:
                    self.bs_to_padded_graph_size[bs] = end
        self.bs_to_padded_graph_size[
            self.max_capture_size] = self.max_capture_size

    def set_splitting_ops_for_v1(self):
        # NOTE: this function needs to be called only when level is
        # CompilationLevel.PIECEWISE
        assert self.level == CompilationLevel.PIECEWISE, (
            "set_splitting_ops_for_v1 should only be called when "
            "level is CompilationLevel.PIECEWISE")

        if self.use_inductor_graph_partition:
            self.set_splitting_ops_for_inductor_graph_partition()
            return

        if self.pass_config.enable_attn_fusion:
            # here use_inductor_graph_partition is False
            self.set_splitting_ops_for_attn_fusion()
            return

        if self.splitting_ops is None:
            # NOTE: When using full cudagraph, instead of setting an empty
            # list and capture the full cudagraph inside the flattened fx
            # graph, we keep the piecewise fx graph structure but capture
            # the full cudagraph outside the fx graph. This reduces some
            # cpu overhead when the runtime batch_size is not cudagraph
            # captured. see https://github.com/vllm-project/vllm/pull/20059
            # for details. Make a copy to avoid mutating the class-level
            # list via reference.
            self.splitting_ops = list(self._attention_ops)
        elif len(self.splitting_ops) == 0:
            logger.warning_once(
                "Using piecewise compilation with empty splitting_ops")
            if self.cudagraph_mode == CUDAGraphMode.PIECEWISE:
                logger.warning_once(
                    "Piecewise compilation with empty splitting_ops do not" \
                    "contains piecewise cudagraph. Setting cudagraph_"
                    "mode to NONE. Hint: If you are using attention backends "
                    "that support cudagraph, consider manually setting "
                    "cudagraph_mode to FULL or FULL_DECODE_ONLY to enable "
                    "full cudagraphs.")
                self.cudagraph_mode = CUDAGraphMode.NONE
            elif self.cudagraph_mode == CUDAGraphMode.FULL_AND_PIECEWISE:
                logger.warning_once(
                    "Piecewise compilation with empty splitting_ops do not "
                    "contains piecewise cudagraph. Setting cudagraph_mode "
                    "to FULL.")
                self.cudagraph_mode = CUDAGraphMode.FULL
            self.splitting_ops = []

    def set_splitting_ops_for_inductor_graph_partition(self):
        assert self.use_inductor_graph_partition
        use_inductor_graph_partition_msg = (
            "When use_inductor_graph_partition=True, splitting_ops "
            "are ignored and set to an empty list. Instead, "
            "\"tags=(torch._C.Tag.cudagraph_unsafe, ),\" is "
            "used to annotate custom ops for graph partition.")
        if self.splitting_ops is not None and \
            len(self.splitting_ops) > 0:
            logger.warning_once(use_inductor_graph_partition_msg)
        self.splitting_ops = []

    def set_splitting_ops_for_attn_fusion(self):
        assert self.pass_config.enable_attn_fusion
        if self.splitting_ops is None:
            self.splitting_ops = []
            if self.cudagraph_mode.has_piecewise_cudagraphs():
                logger.warning_once(
                    "enable_attn_fusion is incompatible with piecewise "
                    "cudagraph when use_inductor_graph_partition is off."
                    "In this case, splitting_ops will be set to empty "
                    "list, and cudagraph_mode will be set to FULL. "
                    "Please ensure you are using attention backends that "
                    "support cudagraph or set cudagraph_mode to NONE "
                    "explicitly if encountering any problems.")
                self.cudagraph_mode = CUDAGraphMode.FULL

        assert not self.splitting_ops_contain_attention(), (
            "attention ops should not be in splitting_ops "
            "when enable_attn_fusion is True")

    def splitting_ops_contain_attention(self) -> bool:
        return self.splitting_ops is not None and all(
            op in self.splitting_ops for op in self._attention_ops)

    def is_attention_compiled_piecewise(self) -> bool:
        use_fx_graph_piecewise_compilation = (
            self.level == CompilationLevel.PIECEWISE
            and self.splitting_ops_contain_attention())

        inductor_used = (self.level == CompilationLevel.PIECEWISE
                         and self.use_inductor) or (
                             self.level >= CompilationLevel.DYNAMO_AS_IS
                             and self.backend == "inductor")
        use_inductor_piecewise_compilation = (
            inductor_used and self.use_inductor_graph_partition
            and not self.splitting_ops_contain_attention())

        return use_fx_graph_piecewise_compilation or \
            use_inductor_piecewise_compilation

    def custom_op_log_check(self):
        """
        This method logs the enabled/disabled custom ops and checks that the
        passed custom_ops field only contains relevant ops.
        It is called at the end of set_current_vllm_config,
        after the custom ops have been instantiated.
        """

        if len(self.enabled_custom_ops) + len(self.disabled_custom_ops) == 0:
            logger.debug("No custom ops found in model.")
            return

        logger.debug("enabled custom ops: %s", self.enabled_custom_ops)
        logger.debug("disabled custom ops: %s", self.disabled_custom_ops)

        all_ops_in_model = (self.enabled_custom_ops | self.disabled_custom_ops)
        for op in self.custom_ops:
            if op in {"all", "none"}:
                continue

            assert op[0] in {'+', '-'}, "Invalid custom op syntax " \
                                        "(should be checked during init)"

            # check if op name exists in model
            op_name = op[1:]
            if op_name not in all_ops_in_model:
                from vllm.model_executor.custom_op import CustomOp

                # Does op exist at all or is it just not present in this model?
                # Note: Only imported op classes appear in the registry.
                missing_str = "doesn't exist (or wasn't imported/registered)" \
                    if op_name not in CustomOp.op_registry \
                    else "not present in model"

                enable_str = "enabling" if op[0] == '+' else "disabling"
                logger.warning_once("Op '%s' %s, %s with '%s' has no effect",
                                    op_name, missing_str, enable_str, op)

__str__ class-attribute instance-attribute

__str__ = __repr__

_attention_ops class-attribute

_attention_ops: list[str] = [
    "vllm.unified_attention",
    "vllm.unified_attention_with_output",
    "vllm.mamba_mixer2",
    "vllm.mamba_mixer",
    "vllm.short_conv",
    "vllm.linear_attention",
    "vllm.plamo2_mamba_mixer",
    "vllm.gdn_attention",
    "vllm.sparse_attn_indexer",
]

backend class-attribute instance-attribute

backend: str = ''

The backend for compilation. It needs to be a string:

  • "" (empty string): use the default backend.
  • "eager"/"openxla"/...: use the specified backend registered in PyTorch.
  • "full.module.name": a qualified name which can be used to import the

backend function. We use string to avoid serialization issues when using compilation in a distributed setting. When the compilation level is 1 or 2, the backend is used for the compilation directly (it sees the whole graph). When the compilation level is 3, the backend is used for the piecewise compilation (it sees a part of the graph).

bs_to_padded_graph_size class-attribute instance-attribute

bs_to_padded_graph_size: list[int] = field(
    default=None, init=False
)

optimization: Intuitively, bs_to_padded_graph_size should be dict[int, int]. since we know all keys are in a range [0, max_capture_size], we can optimize it to list[int] for better lookup performance.

cache_dir class-attribute instance-attribute

cache_dir: str = ''

The directory to store the compiled graph, to accelerate Inductor compilation. By default, it will use model-related information to generate a cache directory.

compilation_time class-attribute instance-attribute

compilation_time: float = field(default=0.0, init=False)

time taken for compilation

compile_sizes class-attribute instance-attribute

compile_sizes: Optional[list[Union[int, str]]] = None

Sizes to compile for inductor. In addition to integers, it also supports "cudagraph_capture_sizes" to specify the sizes for cudagraph capture.

cudagraph_capture_sizes class-attribute instance-attribute

cudagraph_capture_sizes: Optional[list[int]] = None

Sizes to capture cudagraph. - None (default): capture sizes are inferred from vllm config. - list[int]: capture sizes are specified as given.

cudagraph_copy_inputs class-attribute instance-attribute

cudagraph_copy_inputs: bool = False

Whether to copy input tensors for cudagraph. If the caller can guarantee that the same input buffers are always used, it can set this to False. Otherwise, it should set this to True, and the compiler will copy the input to an internally managed buffer. Default is False. Note that this flag is only effective when cudagraph_mode is PIECEWISE.

cudagraph_mode class-attribute instance-attribute

cudagraph_mode: Optional[CUDAGraphMode] = None

The mode of the cudagraph:

  • NONE, no cudagraph capture.
  • PIECEWISE.
  • FULL.
  • FULL_DECODE_ONLY.
  • FULL_AND_PIECEWISE. (v1 default)

PIECEWISE mode build piecewise cudagraph only, keeping the cudagraph incompatible ops (i.e. some attention ops) outside the cudagraph for general flexibility.

FULL mode: Capture full cudagraph for all batches. Can be good for small models or workloads with small prompts; not supported by many backends. Generally for performance FULL_AND_PIECEWISE is better.

FULL_DECODE_ONLY mode: Capture full cudagraph for decode batches only. Mixed prefill-decode batches are run without cudagraphs. Can be good for decode instances in a P/D setup where prefill is not as important so we can save some memory.

FULL_AND_PIECEWISE mode: Capture full cudagraph for decode batches and piecewise cudagraph for prefill and mixed prefill-decode batches. This is the most performant mode for most models and is the default.

Currently, the cudagraph mode is only used for the v1 engine. Note that the cudagraph logic is generally orthogonal to the compilation logic. While piecewise cudagraphs require piecewise compilation (level=PIECEWISE and non-empty splitting_ops), full cudagraphs are supported with and without compilation.

Warning: This flag is new and subject to change in addition more modes may be added.

cudagraph_num_of_warmups class-attribute instance-attribute

cudagraph_num_of_warmups: int = 0

Number of warmup runs for cudagraph. It means the first several runs will be treated as warmup runs. Only after that, the execution will be recorded, and the recorded cudagraph will be used for subsequent runs.

custom_ops class-attribute instance-attribute

custom_ops: list[str] = field(default_factory=list)

Fine-grained control over which custom ops to enable/disable. Use 'all' to enable all, 'none' to disable all. Also specify a list of custom op names to enable (prefixed with a '+'), or disable (prefixed with a '-'). Examples:

  • 'all,-op1' to enable all except op1
  • 'none,+op1,+op2' to enable only op1 and op2

By default, all custom ops are enabled when running without Inductor and disabled when running with Inductor: level>=PIECEWISE and use_inductor=True. Inductor generates (fused) Triton kernels for disabled custom ops.

debug_dump_path class-attribute instance-attribute

debug_dump_path: Optional[Path] = None

The path to dump the debug information.

disabled_custom_ops class-attribute instance-attribute

disabled_custom_ops: Counter[str] = field(
    default_factory=Counter, init=False
)

custom ops that are disabled

enabled_custom_ops class-attribute instance-attribute

enabled_custom_ops: Counter[str] = field(
    default_factory=Counter, init=False
)

custom ops that are enabled

full_cuda_graph class-attribute instance-attribute

full_cuda_graph: Optional[bool] = False

whether to use a full cuda graph for the entire forward pass rather than splitting certain operations such as attention into subgraphs. Thus this flag cannot be used together with splitting_ops. This may provide performance benefits for smaller models. Warning: This flag is deprecated and will be removed in the next major or minor release, i.e. v0.11.0 or v1.0.0. Please use cudagraph_mode= FULL_AND_PIECEWISE instead.

inductor_compile_config class-attribute instance-attribute

inductor_compile_config: dict = field(default_factory=dict)

Additional configurations for inductor. - None: use default configurations.

inductor_passes class-attribute instance-attribute

inductor_passes: dict[str, str] = field(
    default_factory=dict
)

Additional passes for inductor. It is a dictionary from pass name to pass function qualified name. We use function name because the config uses JSON format. If we pass the config from Python, functions can also be passed directly via Python object constructor, e.g. CompilationConfig(inductor_passes={"a": func}).

level class-attribute instance-attribute

level: Optional[int] = None

The level of compilation:

  • None: If None, we will select the default compilation level. For V1 engine this is 3, for V0 engine this is 0.
  • 0: no compilation.
  • 1: dynamo as is.
  • 2: dynamo once.
  • 3: piecewise compilation.

local_cache_dir class-attribute instance-attribute

local_cache_dir: str = field(default=None, init=False)

local cache dir for each rank

max_capture_size class-attribute instance-attribute

max_capture_size: int = field(default=None, init=False)

not configurable, computed after init

pass_config class-attribute instance-attribute

pass_config: PassConfig = field(default_factory=PassConfig)

Custom inductor passes, see PassConfig for more details

splitting_ops class-attribute instance-attribute

splitting_ops: Optional[list[str]] = None

A list of ops to split the full graph into subgraphs, used in piecewise compilation.

static_forward_context class-attribute instance-attribute

static_forward_context: dict[str, Any] = field(
    default_factory=dict, init=False
)

Per-model forward context Map from layer name to layer objects that need to be accessed outside model code, e.g., Attention, FusedMOE when dp_size>1.

traced_files class-attribute instance-attribute

traced_files: set[str] = field(
    default_factory=set, init=False
)

files that are traced for compilation

use_cudagraph class-attribute instance-attribute

use_cudagraph: bool = True

Whether to use cudagraph inside compilation. - False: cudagraph inside compilation is not used. - True: cudagraph inside compilation is used. It requires that all input buffers have fixed addresses, and all splitting ops write their outputs to input buffers. In the vLLM V1 Engine, this flag only applies for CompilationLevel.PIECEWISE (aka -O3). Note that this is orthogonal to the cudagraph capture logic outside of compilation. Warning: This flag is deprecated and will be removed in the next major or minor release, i.e. v0.11.0 or v1.0.0. Please use cudagraph_mode=PIECEWISE instead.

use_inductor class-attribute instance-attribute

use_inductor: bool = True

Whether to use inductor compilation:

  • False: inductor compilation is not used. graph runs in eager (custom_ops enabled by default).
  • True: inductor compilation is used (custom_ops disabled by default). One graph for symbolic shape and one graph per size in compile_sizes are compiled using configurations in inductor_compile_config.

This setting is ignored if level<PIECEWISE.

use_inductor_graph_partition class-attribute instance-attribute

use_inductor_graph_partition: bool = False

Use inductor graph partition to split the graph at cudagraph_unsafe ops. This partition happens at inductor codegen time after all passes and fusions are finished. It generates a single call function which wraps cudagraph-safe ops into partition functions and leave cudagraph-unsafe ops outside the partition functions. For a graph with N cudagraph-unsafe ops (e.g., Attention), there would be N+1 partitions. To mark an op as cudagraph unsafe, we can add tags=(torch._C.Tag.cudagraph_unsafe) when register the custom op.

This config supports both full cudagraph and piecewise cudagraph without compiling twice. For piecewise cudagraph, it applies vLLM CUDAGraph wrapper to each partition. For N+1 partitions, there would be N+1 CUDAGraph wrapper instances.

For full CUDAGraph, we always apply a single CUDAGraph wrapper outside the inductor call function in the model runner. The top-level full cudagraph capture ignores all partitioning.

__post_init__

__post_init__() -> None
Source code in vllm/config/compilation.py
def __post_init__(self) -> None:
    count_none = self.custom_ops.count("none")
    count_all = self.custom_ops.count("all")
    assert count_none + count_all <= 1, "Can only specify 'none' or 'all'"

    # TODO(zou3519/luka): There are 2 issues with auto-functionalization V2:
    # 1. A bug in PyTorch, fixed in 2.7:
    #    https://github.com/pytorch/pytorch/issues/147924
    # 2. Custom passes (fusion) rely on auto-functionalization V1 and don't
    #    work with V2. Addressing this will take extra engineering effort
    #    and it is not yet a priority. RFC here:
    #    https://github.com/vllm-project/vllm/issues/14703

    if is_torch_equal_or_newer("2.6"):
        KEY = 'enable_auto_functionalized_v2'
        if KEY not in self.inductor_compile_config:
            self.inductor_compile_config[KEY] = False

    for k, v in self.inductor_passes.items():
        if not isinstance(v, str):
            assert callable(v), (
                f"pass {k} should be callable or a qualified name")
            self.inductor_compile_config[k] = v if isinstance(
                v, InductorPass) else CallableInductorPass(v)
            continue

        # resolve function from qualified name
        names = v.split(".")
        module = ".".join(names[:-1])
        func_name = names[-1]
        func = __import__(module).__dict__[func_name]
        self.inductor_compile_config[k] = func if isinstance(
            func, InductorPass) else CallableInductorPass(func)

    if isinstance(self.pass_config, dict):
        self.pass_config = PassConfig(**self.pass_config)

    # migrate the deprecated flags
    if not self.use_cudagraph:
        logger.warning("use_cudagraph is deprecated, use "
                       "cudagraph_mode=NONE instead.")
        if self.cudagraph_mode is not None and \
            self.cudagraph_mode != CUDAGraphMode.NONE:
            raise ValueError(
                "use_cudagraph and cudagraph_mode are mutually"
                " exclusive, prefer cudagraph_mode since "
                "use_cudagraph is deprecated.")
        self.cudagraph_mode = CUDAGraphMode.NONE
    if self.full_cuda_graph:
        logger.warning("full_cuda_graph is deprecated, use "
                       "cudagraph_mode=FULL instead.")
        if self.cudagraph_mode is not None and \
            not self.cudagraph_mode.has_full_cudagraphs():
            raise ValueError("full_cuda_graph and cudagraph_mode are "
                             "mutually exclusive, prefer cudagraph_mode "
                             "since full_cuda_graph is deprecated.")
        self.cudagraph_mode = CUDAGraphMode.FULL

    if (self.use_inductor_graph_partition
            and not is_torch_equal_or_newer("2.9.0.dev")):
        raise ValueError("use_inductor_graph_partition is only "
                         "supported with torch>=2.9.0.dev. Set "
                         "use_inductor_graph_partition=False instead.")

    for op in self.custom_ops:
        if op[0] not in {'+', '-'} and op not in {'all', 'none'}:
            raise ValueError(f"Invalid syntax '{op}' for custom op, "
                             "must be 'all', 'none', '+op' or '-op' "
                             "(where 'op' is the registered op name)")

__repr__

__repr__() -> str
Source code in vllm/config/compilation.py
def __repr__(self) -> str:
    exclude = {
        "static_forward_context": True,
        "enabled_custom_ops": True,
        "disabled_custom_ops": True,
        "compilation_time": True,
        "bs_to_padded_graph_size": True,
        "traced_files": True,
        "inductor_compile_config": {
            "post_grad_custom_post_pass": True,
        },
    }

    # exclude default attr in pass_config
    pass_config_exclude = {}
    for attr, default_val in vars(PassConfig()).items():
        if getattr(self.pass_config, attr) == default_val:
            pass_config_exclude[attr] = True
    if pass_config_exclude:
        exclude["pass_config"] = pass_config_exclude

    config = TypeAdapter(CompilationConfig).dump_python(self,
                                                        exclude=exclude,
                                                        exclude_unset=True)

    return str(config)

compute_hash

compute_hash() -> str

WARNING: Whenever a new field is added to this config, ensure that it is included in the factors list if it affects the computation graph.

Provide a hash that uniquely identifies all the configs that affect the structure of the computation graph from input ids/embeddings to the final hidden states, excluding anything before input ids/embeddings and after the final hidden states.

Source code in vllm/config/compilation.py
def compute_hash(self) -> str:
    """
    WARNING: Whenever a new field is added to this config,
    ensure that it is included in the factors list if
    it affects the computation graph.

    Provide a hash that uniquely identifies all the configs
    that affect the structure of the computation
    graph from input ids/embeddings to the final hidden states,
    excluding anything before input ids/embeddings and after
    the final hidden states.
    """
    factors: list[Any] = []
    factors.append(self.level)
    factors.append(self.backend)
    factors.append(self.custom_ops)
    factors.append(self.splitting_ops)
    factors.append(self.use_inductor)
    factors.append(self.inductor_compile_config)
    factors.append(self.inductor_passes)
    factors.append(self.pass_config.uuid())
    return hashlib.sha256(str(factors).encode()).hexdigest()

custom_op_log_check

custom_op_log_check()

This method logs the enabled/disabled custom ops and checks that the passed custom_ops field only contains relevant ops. It is called at the end of set_current_vllm_config, after the custom ops have been instantiated.

Source code in vllm/config/compilation.py
def custom_op_log_check(self):
    """
    This method logs the enabled/disabled custom ops and checks that the
    passed custom_ops field only contains relevant ops.
    It is called at the end of set_current_vllm_config,
    after the custom ops have been instantiated.
    """

    if len(self.enabled_custom_ops) + len(self.disabled_custom_ops) == 0:
        logger.debug("No custom ops found in model.")
        return

    logger.debug("enabled custom ops: %s", self.enabled_custom_ops)
    logger.debug("disabled custom ops: %s", self.disabled_custom_ops)

    all_ops_in_model = (self.enabled_custom_ops | self.disabled_custom_ops)
    for op in self.custom_ops:
        if op in {"all", "none"}:
            continue

        assert op[0] in {'+', '-'}, "Invalid custom op syntax " \
                                    "(should be checked during init)"

        # check if op name exists in model
        op_name = op[1:]
        if op_name not in all_ops_in_model:
            from vllm.model_executor.custom_op import CustomOp

            # Does op exist at all or is it just not present in this model?
            # Note: Only imported op classes appear in the registry.
            missing_str = "doesn't exist (or wasn't imported/registered)" \
                if op_name not in CustomOp.op_registry \
                else "not present in model"

            enable_str = "enabling" if op[0] == '+' else "disabling"
            logger.warning_once("Op '%s' %s, %s with '%s' has no effect",
                                op_name, missing_str, enable_str, op)

init_backend

init_backend(
    vllm_config: VllmConfig,
) -> Union[str, Callable]
Source code in vllm/config/compilation.py
def init_backend(self, vllm_config: "VllmConfig") -> Union[str, Callable]:
    if self.level == CompilationLevel.NO_COMPILATION:
        raise ValueError("No compilation level is set.")

    from torch._dynamo.backends.registry import list_backends
    torch_backends = list_backends(exclude_tags=tuple())
    if self.level in [
            CompilationLevel.DYNAMO_AS_IS, CompilationLevel.DYNAMO_ONCE
    ]:
        if self.backend == "":
            return "eager"
        if self.backend in torch_backends:
            return self.backend
        return resolve_obj_by_qualname(self.backend)

    # TODO: pass user-specified backend to piecewise compilation
    # merge with the config use_inductor
    assert self.level == CompilationLevel.PIECEWISE

    from vllm.compilation.backends import VllmBackend
    return VllmBackend(vllm_config)

init_with_cudagraph_sizes

init_with_cudagraph_sizes(
    cudagraph_capture_sizes: list[int],
) -> None

To complete the initialization of config, we need to know the cudagraph sizes.

Source code in vllm/config/compilation.py
def init_with_cudagraph_sizes(self,
                              cudagraph_capture_sizes: list[int]) -> None:
    """To complete the initialization of config,
    we need to know the cudagraph sizes."""

    if self.cudagraph_capture_sizes is None:
        self.cudagraph_capture_sizes = cudagraph_capture_sizes
    else:
        # de-duplicate the sizes provided by the config
        dedup_sizes = list(set(self.cudagraph_capture_sizes))
        if len(dedup_sizes) < len(self.cudagraph_capture_sizes):
            logger.info(("cudagraph sizes specified by model runner"
                         " %s is overridden by config %s"),
                        cudagraph_capture_sizes, dedup_sizes)
        self.cudagraph_capture_sizes = dedup_sizes

    computed_compile_sizes = []
    if self.compile_sizes is not None:
        # de-duplicate the sizes provided by the config
        self.compile_sizes = list(set(self.compile_sizes))
        for x in self.compile_sizes:
            if isinstance(x, str):
                assert x == "cudagraph_capture_sizes", \
                    "Unrecognized size type in compile_sizes, " \
                    f"expect 'cudagraph_capture_sizes', got {x}"
                computed_compile_sizes.extend(self.cudagraph_capture_sizes)
            else:
                assert isinstance(x, int)
                computed_compile_sizes.append(x)
    self.compile_sizes = computed_compile_sizes  # type: ignore

    # sort to make sure cudagraph capture sizes are in descending order
    self.cudagraph_capture_sizes.sort(reverse=True)
    self.max_capture_size = self.cudagraph_capture_sizes[
        0] if self.cudagraph_capture_sizes else 0

    # pre-compute the mapping from batch size to padded graph size
    self.bs_to_padded_graph_size = [
        0 for i in range(self.max_capture_size + 1)
    ]
    for end, start in zip(self.cudagraph_capture_sizes,
                          self.cudagraph_capture_sizes[1:] + [0]):
        for bs in range(start, end):
            if bs == start:
                self.bs_to_padded_graph_size[bs] = start
            else:
                self.bs_to_padded_graph_size[bs] = end
    self.bs_to_padded_graph_size[
        self.max_capture_size] = self.max_capture_size

is_attention_compiled_piecewise

is_attention_compiled_piecewise() -> bool
Source code in vllm/config/compilation.py
def is_attention_compiled_piecewise(self) -> bool:
    use_fx_graph_piecewise_compilation = (
        self.level == CompilationLevel.PIECEWISE
        and self.splitting_ops_contain_attention())

    inductor_used = (self.level == CompilationLevel.PIECEWISE
                     and self.use_inductor) or (
                         self.level >= CompilationLevel.DYNAMO_AS_IS
                         and self.backend == "inductor")
    use_inductor_piecewise_compilation = (
        inductor_used and self.use_inductor_graph_partition
        and not self.splitting_ops_contain_attention())

    return use_fx_graph_piecewise_compilation or \
        use_inductor_piecewise_compilation

set_splitting_ops_for_attn_fusion

set_splitting_ops_for_attn_fusion()
Source code in vllm/config/compilation.py
def set_splitting_ops_for_attn_fusion(self):
    assert self.pass_config.enable_attn_fusion
    if self.splitting_ops is None:
        self.splitting_ops = []
        if self.cudagraph_mode.has_piecewise_cudagraphs():
            logger.warning_once(
                "enable_attn_fusion is incompatible with piecewise "
                "cudagraph when use_inductor_graph_partition is off."
                "In this case, splitting_ops will be set to empty "
                "list, and cudagraph_mode will be set to FULL. "
                "Please ensure you are using attention backends that "
                "support cudagraph or set cudagraph_mode to NONE "
                "explicitly if encountering any problems.")
            self.cudagraph_mode = CUDAGraphMode.FULL

    assert not self.splitting_ops_contain_attention(), (
        "attention ops should not be in splitting_ops "
        "when enable_attn_fusion is True")

set_splitting_ops_for_inductor_graph_partition

set_splitting_ops_for_inductor_graph_partition()
Source code in vllm/config/compilation.py
def set_splitting_ops_for_inductor_graph_partition(self):
    assert self.use_inductor_graph_partition
    use_inductor_graph_partition_msg = (
        "When use_inductor_graph_partition=True, splitting_ops "
        "are ignored and set to an empty list. Instead, "
        "\"tags=(torch._C.Tag.cudagraph_unsafe, ),\" is "
        "used to annotate custom ops for graph partition.")
    if self.splitting_ops is not None and \
        len(self.splitting_ops) > 0:
        logger.warning_once(use_inductor_graph_partition_msg)
    self.splitting_ops = []

set_splitting_ops_for_v1

set_splitting_ops_for_v1()
Source code in vllm/config/compilation.py
def set_splitting_ops_for_v1(self):
    # NOTE: this function needs to be called only when level is
    # CompilationLevel.PIECEWISE
    assert self.level == CompilationLevel.PIECEWISE, (
        "set_splitting_ops_for_v1 should only be called when "
        "level is CompilationLevel.PIECEWISE")

    if self.use_inductor_graph_partition:
        self.set_splitting_ops_for_inductor_graph_partition()
        return

    if self.pass_config.enable_attn_fusion:
        # here use_inductor_graph_partition is False
        self.set_splitting_ops_for_attn_fusion()
        return

    if self.splitting_ops is None:
        # NOTE: When using full cudagraph, instead of setting an empty
        # list and capture the full cudagraph inside the flattened fx
        # graph, we keep the piecewise fx graph structure but capture
        # the full cudagraph outside the fx graph. This reduces some
        # cpu overhead when the runtime batch_size is not cudagraph
        # captured. see https://github.com/vllm-project/vllm/pull/20059
        # for details. Make a copy to avoid mutating the class-level
        # list via reference.
        self.splitting_ops = list(self._attention_ops)
    elif len(self.splitting_ops) == 0:
        logger.warning_once(
            "Using piecewise compilation with empty splitting_ops")
        if self.cudagraph_mode == CUDAGraphMode.PIECEWISE:
            logger.warning_once(
                "Piecewise compilation with empty splitting_ops do not" \
                "contains piecewise cudagraph. Setting cudagraph_"
                "mode to NONE. Hint: If you are using attention backends "
                "that support cudagraph, consider manually setting "
                "cudagraph_mode to FULL or FULL_DECODE_ONLY to enable "
                "full cudagraphs.")
            self.cudagraph_mode = CUDAGraphMode.NONE
        elif self.cudagraph_mode == CUDAGraphMode.FULL_AND_PIECEWISE:
            logger.warning_once(
                "Piecewise compilation with empty splitting_ops do not "
                "contains piecewise cudagraph. Setting cudagraph_mode "
                "to FULL.")
            self.cudagraph_mode = CUDAGraphMode.FULL
        self.splitting_ops = []

splitting_ops_contain_attention

splitting_ops_contain_attention() -> bool
Source code in vllm/config/compilation.py
def splitting_ops_contain_attention(self) -> bool:
    return self.splitting_ops is not None and all(
        op in self.splitting_ops for op in self._attention_ops)

validate_cudagraph_mode_before classmethod

validate_cudagraph_mode_before(value: Any) -> Any

enable parse the cudagraph_mode enum type from string

Source code in vllm/config/compilation.py
@field_validator("cudagraph_mode", mode="before")
@classmethod
def validate_cudagraph_mode_before(cls, value: Any) -> Any:
    """
    enable parse the `cudagraph_mode` enum type from string
    """
    if isinstance(value, str):
        return CUDAGraphMode[value.upper()]
    return value

CompilationLevel

Source code in vllm/config/compilation.py
class CompilationLevel:
    # constants for the levels of the compilation process
    NO_COMPILATION = 0
    DYNAMO_AS_IS = 1
    DYNAMO_ONCE = 2
    PIECEWISE = 3

DYNAMO_AS_IS class-attribute instance-attribute

DYNAMO_AS_IS = 1

DYNAMO_ONCE class-attribute instance-attribute

DYNAMO_ONCE = 2

NO_COMPILATION class-attribute instance-attribute

NO_COMPILATION = 0

PIECEWISE class-attribute instance-attribute

PIECEWISE = 3

DeviceConfig

Configuration for the device to use for vLLM execution.

Source code in vllm/config/device.py
@config
@dataclass(config=ConfigDict(arbitrary_types_allowed=True))
class DeviceConfig:
    """Configuration for the device to use for vLLM execution."""

    device: SkipValidation[Optional[Union[Device, torch.device]]] = "auto"
    """Device type for vLLM execution.
    This parameter is deprecated and will be
    removed in a future release.
    It will now be set automatically based
    on the current platform."""
    device_type: str = field(init=False)
    """Device type from the current platform. This is set in
    `__post_init__`."""

    def compute_hash(self) -> str:
        """
        WARNING: Whenever a new field is added to this config,
        ensure that it is included in the factors list if
        it affects the computation graph.

        Provide a hash that uniquely identifies all the configs
        that affect the structure of the computation
        graph from input ids/embeddings to the final hidden states,
        excluding anything before input ids/embeddings and after
        the final hidden states.
        """
        # no factors to consider.
        # the device/platform information will be summarized
        # by torch/vllm automatically.
        factors: list[Any] = []
        hash_str = hashlib.md5(str(factors).encode(),
                               usedforsecurity=False).hexdigest()
        return hash_str

    def __post_init__(self):
        if self.device == "auto":
            # Automated device type detection
            from vllm.platforms import current_platform
            self.device_type = current_platform.device_type
            if not self.device_type:
                raise RuntimeError(
                    "Failed to infer device type, please set "
                    "the environment variable `VLLM_LOGGING_LEVEL=DEBUG` "
                    "to turn on verbose logging to help debug the issue.")
        else:
            # Device type is assigned explicitly
            if isinstance(self.device, str):
                self.device_type = self.device
            elif isinstance(self.device, torch.device):
                self.device_type = self.device.type

        # Some device types require processing inputs on CPU
        if self.device_type in ["tpu"]:
            self.device = None
        else:
            # Set device with device type
            self.device = torch.device(self.device_type)

device class-attribute instance-attribute

device: SkipValidation[Optional[Union[Device, device]]] = (
    "auto"
)

Device type for vLLM execution. This parameter is deprecated and will be removed in a future release. It will now be set automatically based on the current platform.

device_type class-attribute instance-attribute

device_type: str = field(init=False)

Device type from the current platform. This is set in __post_init__.

__post_init__

__post_init__()
Source code in vllm/config/device.py
def __post_init__(self):
    if self.device == "auto":
        # Automated device type detection
        from vllm.platforms import current_platform
        self.device_type = current_platform.device_type
        if not self.device_type:
            raise RuntimeError(
                "Failed to infer device type, please set "
                "the environment variable `VLLM_LOGGING_LEVEL=DEBUG` "
                "to turn on verbose logging to help debug the issue.")
    else:
        # Device type is assigned explicitly
        if isinstance(self.device, str):
            self.device_type = self.device
        elif isinstance(self.device, torch.device):
            self.device_type = self.device.type

    # Some device types require processing inputs on CPU
    if self.device_type in ["tpu"]:
        self.device = None
    else:
        # Set device with device type
        self.device = torch.device(self.device_type)

compute_hash

compute_hash() -> str

WARNING: Whenever a new field is added to this config, ensure that it is included in the factors list if it affects the computation graph.

Provide a hash that uniquely identifies all the configs that affect the structure of the computation graph from input ids/embeddings to the final hidden states, excluding anything before input ids/embeddings and after the final hidden states.

Source code in vllm/config/device.py
def compute_hash(self) -> str:
    """
    WARNING: Whenever a new field is added to this config,
    ensure that it is included in the factors list if
    it affects the computation graph.

    Provide a hash that uniquely identifies all the configs
    that affect the structure of the computation
    graph from input ids/embeddings to the final hidden states,
    excluding anything before input ids/embeddings and after
    the final hidden states.
    """
    # no factors to consider.
    # the device/platform information will be summarized
    # by torch/vllm automatically.
    factors: list[Any] = []
    hash_str = hashlib.md5(str(factors).encode(),
                           usedforsecurity=False).hexdigest()
    return hash_str

EPLBConfig

Configuration for Expert Parallel Load Balancing (EP).

Source code in vllm/config/parallel.py
@config
@dataclass
class EPLBConfig:
    """Configuration for Expert Parallel Load Balancing (EP)."""

    window_size: int = 1000
    """Window size for expert load recording."""
    step_interval: int = 3000
    """
    Interval for rearranging experts in expert parallelism.

    Note that if this is greater than the EPLB window size, only the metrics
    of the last `lb_window_size` steps will be used for rearranging experts.
    """

    num_redundant_experts: int = 0
    """Number of redundant experts to use for expert parallelism."""

    log_balancedness: bool = False
    """
    Log the balancedness each step of expert parallelism.
    This is turned off by default since it will cause communication overhead.
    """

log_balancedness class-attribute instance-attribute

log_balancedness: bool = False

Log the balancedness each step of expert parallelism. This is turned off by default since it will cause communication overhead.

num_redundant_experts class-attribute instance-attribute

num_redundant_experts: int = 0

Number of redundant experts to use for expert parallelism.

step_interval class-attribute instance-attribute

step_interval: int = 3000

Interval for rearranging experts in expert parallelism.

Note that if this is greater than the EPLB window size, only the metrics of the last lb_window_size steps will be used for rearranging experts.

window_size class-attribute instance-attribute

window_size: int = 1000

Window size for expert load recording.

KVEventsConfig

Configuration for KV event publishing.

Source code in vllm/config/kv_events.py
@config
@dataclass
class KVEventsConfig:
    """Configuration for KV event publishing."""

    enable_kv_cache_events: bool = False
    """If True, enable KV cache events for tracking block storage and removal.
    Events can be published externally by zmq using the event publisher config.
    """

    publisher: str = "null"
    """The publisher to use for publishing kv events. Can be "null", "zmq".
    """

    endpoint: str = "tcp://*:5557"
    """The zmq endpoint to use for publishing kv events.
    """

    replay_endpoint: Optional[str] = None
    """The zmq endpoint to use for replaying kv events.
    """

    buffer_steps: int = 10_000
    """The number of steps to cache for replay endpoint. Will only save
    events from the last N steps for the replay endpoint.
    """

    hwm: int = 100_000
    """The zmq high water mark for the event publisher. After queueing N events,
    events will start dropping if the consumer is not keeping up.
    """

    max_queue_size: int = 100_000
    """The maximum number of events to queue while waiting for publishing.
    """

    topic: str = ""
    """The topic to use for the event publisher. Consumers can subscribe to
    this topic to receive events.
    """

buffer_steps class-attribute instance-attribute

buffer_steps: int = 10000

The number of steps to cache for replay endpoint. Will only save events from the last N steps for the replay endpoint.

enable_kv_cache_events class-attribute instance-attribute

enable_kv_cache_events: bool = False

If True, enable KV cache events for tracking block storage and removal. Events can be published externally by zmq using the event publisher config.

endpoint class-attribute instance-attribute

endpoint: str = 'tcp://*:5557'

The zmq endpoint to use for publishing kv events.

hwm class-attribute instance-attribute

hwm: int = 100000

The zmq high water mark for the event publisher. After queueing N events, events will start dropping if the consumer is not keeping up.

max_queue_size class-attribute instance-attribute

max_queue_size: int = 100000

The maximum number of events to queue while waiting for publishing.

publisher class-attribute instance-attribute

publisher: str = 'null'

The publisher to use for publishing kv events. Can be "null", "zmq".

replay_endpoint class-attribute instance-attribute

replay_endpoint: Optional[str] = None

The zmq endpoint to use for replaying kv events.

topic class-attribute instance-attribute

topic: str = ''

The topic to use for the event publisher. Consumers can subscribe to this topic to receive events.

KVTransferConfig

Configuration for distributed KV cache transfer.

Source code in vllm/config/kv_transfer.py
@config
@dataclass
class KVTransferConfig:
    """Configuration for distributed KV cache transfer."""

    kv_connector: Optional[str] = None
    """The KV connector for vLLM to transmit KV caches between vLLM instances.
    """

    engine_id: Optional[str] = None
    """The engine id for KV transfers."""

    kv_buffer_device: Optional[str] = "cuda"
    """The device used by kv connector to buffer the KV cache. Choices are 
    'cuda' and 'cpu'."""

    kv_buffer_size: float = 1e9
    """The buffer size for TorchDistributedConnector. Measured in number of
    bytes. Recommended value: 1e9 (about 1GB)."""

    kv_role: Optional[KVRole] = None
    """Whether this vLLM instance produces, consumes KV cache, or both. Choices
    are 'kv_producer', 'kv_consumer', and 'kv_both'."""

    kv_rank: Optional[int] = None
    """The rank of this vLLM instance in the KV cache transfer. Typical value:
    0 for prefill instance, 1 for decode instance.
    Currently only 1P1D is supported."""

    kv_parallel_size: int = 1
    """The number of parallel instances for KV cache transfer. For
    P2pNcclConnector, this should be 2."""

    kv_ip: str = "127.0.0.1"
    """The KV connector ip, used to build distributed connection."""

    kv_port: int = 14579
    """The KV connector port, used to build distributed connection."""

    kv_connector_extra_config: dict[str, Any] = field(default_factory=dict)
    """any extra config that the connector may need."""

    kv_connector_module_path: Optional[str] = None
    """The Python module path to dynamically load the KV connector from.
    Only supported in V1."""

    def compute_hash(self) -> str:
        """
        WARNING: Whenever a new field is added to this config,
        ensure that it is included in the factors list if
        it affects the computation graph.

        Provide a hash that uniquely identifies all the configs
        that affect the structure of the computation
        graph from input ids/embeddings to the final hidden states,
        excluding anything before input ids/embeddings and after
        the final hidden states.
        """
        # no factors to consider.
        # this config will not affect the computation graph.
        factors: list[Any] = []
        hash_str = hashlib.md5(str(factors).encode(),
                               usedforsecurity=False).hexdigest()
        return hash_str

    def __post_init__(self) -> None:
        if self.engine_id is None:
            self.engine_id = str(uuid.uuid4())

        if self.kv_role is not None and self.kv_role not in get_args(KVRole):
            raise ValueError(f"Unsupported kv_role: {self.kv_role}. "
                             f"Supported roles are {get_args(KVRole)}")

        if self.kv_connector is not None and self.kv_role is None:
            raise ValueError("Please specify kv_disagg_role when kv_connector "
                             f"is set, supported roles are {get_args(KVRole)}")

    @property
    def is_kv_transfer_instance(self) -> bool:
        return self.kv_connector is not None and \
            self.kv_role in get_args(KVRole)

    @property
    def is_kv_producer(self) -> bool:
        return self.kv_connector is not None and \
            self.kv_role in get_args(KVProducer)

    @property
    def is_kv_consumer(self) -> bool:
        return self.kv_connector is not None and \
            self.kv_role in get_args(KVConsumer)

    def get_from_extra_config(self, key, default) -> Any:
        return self.kv_connector_extra_config.get(key, default)

engine_id class-attribute instance-attribute

engine_id: Optional[str] = None

The engine id for KV transfers.

is_kv_consumer property

is_kv_consumer: bool

is_kv_producer property

is_kv_producer: bool

is_kv_transfer_instance property

is_kv_transfer_instance: bool

kv_buffer_device class-attribute instance-attribute

kv_buffer_device: Optional[str] = 'cuda'

The device used by kv connector to buffer the KV cache. Choices are 'cuda' and 'cpu'.

kv_buffer_size class-attribute instance-attribute

kv_buffer_size: float = 1000000000.0

The buffer size for TorchDistributedConnector. Measured in number of bytes. Recommended value: 1e9 (about 1GB).

kv_connector class-attribute instance-attribute

kv_connector: Optional[str] = None

The KV connector for vLLM to transmit KV caches between vLLM instances.

kv_connector_extra_config class-attribute instance-attribute

kv_connector_extra_config: dict[str, Any] = field(
    default_factory=dict
)

any extra config that the connector may need.

kv_connector_module_path class-attribute instance-attribute

kv_connector_module_path: Optional[str] = None

The Python module path to dynamically load the KV connector from. Only supported in V1.

kv_ip class-attribute instance-attribute

kv_ip: str = '127.0.0.1'

The KV connector ip, used to build distributed connection.

kv_parallel_size class-attribute instance-attribute

kv_parallel_size: int = 1

The number of parallel instances for KV cache transfer. For P2pNcclConnector, this should be 2.

kv_port class-attribute instance-attribute

kv_port: int = 14579

The KV connector port, used to build distributed connection.

kv_rank class-attribute instance-attribute

kv_rank: Optional[int] = None

The rank of this vLLM instance in the KV cache transfer. Typical value: 0 for prefill instance, 1 for decode instance. Currently only 1P1D is supported.

kv_role class-attribute instance-attribute

kv_role: Optional[KVRole] = None

Whether this vLLM instance produces, consumes KV cache, or both. Choices are 'kv_producer', 'kv_consumer', and 'kv_both'.

__post_init__

__post_init__() -> None
Source code in vllm/config/kv_transfer.py
def __post_init__(self) -> None:
    if self.engine_id is None:
        self.engine_id = str(uuid.uuid4())

    if self.kv_role is not None and self.kv_role not in get_args(KVRole):
        raise ValueError(f"Unsupported kv_role: {self.kv_role}. "
                         f"Supported roles are {get_args(KVRole)}")

    if self.kv_connector is not None and self.kv_role is None:
        raise ValueError("Please specify kv_disagg_role when kv_connector "
                         f"is set, supported roles are {get_args(KVRole)}")

compute_hash

compute_hash() -> str

WARNING: Whenever a new field is added to this config, ensure that it is included in the factors list if it affects the computation graph.

Provide a hash that uniquely identifies all the configs that affect the structure of the computation graph from input ids/embeddings to the final hidden states, excluding anything before input ids/embeddings and after the final hidden states.

Source code in vllm/config/kv_transfer.py
def compute_hash(self) -> str:
    """
    WARNING: Whenever a new field is added to this config,
    ensure that it is included in the factors list if
    it affects the computation graph.

    Provide a hash that uniquely identifies all the configs
    that affect the structure of the computation
    graph from input ids/embeddings to the final hidden states,
    excluding anything before input ids/embeddings and after
    the final hidden states.
    """
    # no factors to consider.
    # this config will not affect the computation graph.
    factors: list[Any] = []
    hash_str = hashlib.md5(str(factors).encode(),
                           usedforsecurity=False).hexdigest()
    return hash_str

get_from_extra_config

get_from_extra_config(key, default) -> Any
Source code in vllm/config/kv_transfer.py
def get_from_extra_config(self, key, default) -> Any:
    return self.kv_connector_extra_config.get(key, default)

LoRAConfig

Configuration for LoRA.

Source code in vllm/config/lora.py
@config
@dataclass(config=ConfigDict(arbitrary_types_allowed=True))
class LoRAConfig:
    """Configuration for LoRA."""

    max_lora_rank: int = 16
    """Max LoRA rank."""
    max_loras: int = 1
    """Max number of LoRAs in a single batch."""
    fully_sharded_loras: bool = False
    """By default, only half of the LoRA computation is sharded with tensor
    parallelism. Enabling this will use the fully sharded layers. At high
    sequence length, max rank or tensor parallel size, this is likely faster.
    """
    max_cpu_loras: Optional[int] = None
    """Maximum number of LoRAs to store in CPU memory. Must be >= than
    `max_loras`."""
    lora_dtype: Union[torch.dtype, LoRADType] = "auto"
    """Data type for LoRA. If auto, will default to base model dtype."""
    lora_extra_vocab_size: int = 256
    """(Deprecated) Maximum size of extra vocabulary that can be present in a 
    LoRA adapter. Will be removed in v0.12.0."""
    lora_vocab_padding_size: ClassVar[int] = current_platform\
        .get_lora_vocab_padding_size()
    default_mm_loras: Optional[dict[str, str]] = None
    """Dictionary mapping specific modalities to LoRA model paths; this field
    is only applicable to multimodal models and should be leveraged when a
    model always expects a LoRA to be active when a given modality is present.
    Note that currently, if a request provides multiple additional
    modalities, each of which have their own LoRA, we do NOT apply
    default_mm_loras because we currently only support one lora adapter
    per prompt. When run in offline mode, the lora IDs for n modalities
    will be automatically assigned to 1-n with the names of the modalities
    in alphabetic order."""
    bias_enabled: bool = False
    """[DEPRECATED] Enable bias for LoRA adapters. This option will be
    removed in v0.12.0."""

    def compute_hash(self) -> str:
        """
        WARNING: Whenever a new field is added to this config,
        ensure that it is included in the factors list if
        it affects the computation graph.

        Provide a hash that uniquely identifies all the configs
        that affect the structure of the computation
        graph from input ids/embeddings to the final hidden states,
        excluding anything before input ids/embeddings and after
        the final hidden states.
        """
        factors: list[Any] = []
        factors.append(self.max_lora_rank)
        factors.append(self.max_loras)
        factors.append(self.fully_sharded_loras)
        factors.append(self.lora_dtype)
        factors.append(self.lora_extra_vocab_size)
        factors.append(self.lora_vocab_padding_size)
        factors.append(self.bias_enabled)
        hash_str = hashlib.md5(str(factors).encode(),
                               usedforsecurity=False).hexdigest()
        return hash_str

    def __post_init__(self):
        # Deprecation warning for lora_extra_vocab_size
        logger.warning(
            "`lora_extra_vocab_size` is deprecated and will be removed "
            "in v0.12.0. Additional vocabulary support for "
            "LoRA adapters is being phased out.")

        # Deprecation warning for enable_lora_bias
        if self.bias_enabled:
            logger.warning("`enable_lora_bias` is deprecated "
                           "and will be removed in v0.12.0.")

        # Setting the maximum rank to 512 should be able to satisfy the vast
        # majority of applications.
        possible_max_ranks = (8, 16, 32, 64, 128, 256, 320, 512)
        possible_lora_extra_vocab_size = (256, 512)
        if self.max_lora_rank not in possible_max_ranks:
            raise ValueError(
                f"max_lora_rank ({self.max_lora_rank}) must be one of "
                f"{possible_max_ranks}.")
        if self.lora_extra_vocab_size not in possible_lora_extra_vocab_size:
            raise ValueError(
                f"lora_extra_vocab_size ({self.lora_extra_vocab_size}) "
                f"must be one of {possible_lora_extra_vocab_size}.")
        if self.max_loras < 1:
            raise ValueError(f"max_loras ({self.max_loras}) must be >= 1.")
        if self.max_cpu_loras is None:
            self.max_cpu_loras = self.max_loras
        elif self.max_cpu_loras < self.max_loras:
            raise ValueError(
                f"max_cpu_loras ({self.max_cpu_loras}) must be >= "
                f"max_loras ({self.max_loras})")

    def verify_with_cache_config(self, cache_config: CacheConfig):
        if cache_config.cpu_offload_gb > 0 and not envs.VLLM_USE_V1:
            raise ValueError(
                "V0 LoRA does not support CPU offload, please use V1.")

    def verify_with_model_config(self, model_config: ModelConfig):
        if self.lora_dtype in (None, "auto"):
            self.lora_dtype = model_config.dtype
        elif isinstance(self.lora_dtype, str):
            self.lora_dtype = getattr(torch, self.lora_dtype)

bias_enabled class-attribute instance-attribute

bias_enabled: bool = False

[DEPRECATED] Enable bias for LoRA adapters. This option will be removed in v0.12.0.

default_mm_loras class-attribute instance-attribute

default_mm_loras: Optional[dict[str, str]] = None

Dictionary mapping specific modalities to LoRA model paths; this field is only applicable to multimodal models and should be leveraged when a model always expects a LoRA to be active when a given modality is present. Note that currently, if a request provides multiple additional modalities, each of which have their own LoRA, we do NOT apply default_mm_loras because we currently only support one lora adapter per prompt. When run in offline mode, the lora IDs for n modalities will be automatically assigned to 1-n with the names of the modalities in alphabetic order.

fully_sharded_loras class-attribute instance-attribute

fully_sharded_loras: bool = False

By default, only half of the LoRA computation is sharded with tensor parallelism. Enabling this will use the fully sharded layers. At high sequence length, max rank or tensor parallel size, this is likely faster.

lora_dtype class-attribute instance-attribute

lora_dtype: Union[dtype, LoRADType] = 'auto'

Data type for LoRA. If auto, will default to base model dtype.

lora_extra_vocab_size class-attribute instance-attribute

lora_extra_vocab_size: int = 256

(Deprecated) Maximum size of extra vocabulary that can be present in a LoRA adapter. Will be removed in v0.12.0.

lora_vocab_padding_size class-attribute

lora_vocab_padding_size: int = get_lora_vocab_padding_size()

max_cpu_loras class-attribute instance-attribute

max_cpu_loras: Optional[int] = None

Maximum number of LoRAs to store in CPU memory. Must be >= than max_loras.

max_lora_rank class-attribute instance-attribute

max_lora_rank: int = 16

Max LoRA rank.

max_loras class-attribute instance-attribute

max_loras: int = 1

Max number of LoRAs in a single batch.

__post_init__

__post_init__()
Source code in vllm/config/lora.py
def __post_init__(self):
    # Deprecation warning for lora_extra_vocab_size
    logger.warning(
        "`lora_extra_vocab_size` is deprecated and will be removed "
        "in v0.12.0. Additional vocabulary support for "
        "LoRA adapters is being phased out.")

    # Deprecation warning for enable_lora_bias
    if self.bias_enabled:
        logger.warning("`enable_lora_bias` is deprecated "
                       "and will be removed in v0.12.0.")

    # Setting the maximum rank to 512 should be able to satisfy the vast
    # majority of applications.
    possible_max_ranks = (8, 16, 32, 64, 128, 256, 320, 512)
    possible_lora_extra_vocab_size = (256, 512)
    if self.max_lora_rank not in possible_max_ranks:
        raise ValueError(
            f"max_lora_rank ({self.max_lora_rank}) must be one of "
            f"{possible_max_ranks}.")
    if self.lora_extra_vocab_size not in possible_lora_extra_vocab_size:
        raise ValueError(
            f"lora_extra_vocab_size ({self.lora_extra_vocab_size}) "
            f"must be one of {possible_lora_extra_vocab_size}.")
    if self.max_loras < 1:
        raise ValueError(f"max_loras ({self.max_loras}) must be >= 1.")
    if self.max_cpu_loras is None:
        self.max_cpu_loras = self.max_loras
    elif self.max_cpu_loras < self.max_loras:
        raise ValueError(
            f"max_cpu_loras ({self.max_cpu_loras}) must be >= "
            f"max_loras ({self.max_loras})")

compute_hash

compute_hash() -> str

WARNING: Whenever a new field is added to this config, ensure that it is included in the factors list if it affects the computation graph.

Provide a hash that uniquely identifies all the configs that affect the structure of the computation graph from input ids/embeddings to the final hidden states, excluding anything before input ids/embeddings and after the final hidden states.

Source code in vllm/config/lora.py
def compute_hash(self) -> str:
    """
    WARNING: Whenever a new field is added to this config,
    ensure that it is included in the factors list if
    it affects the computation graph.

    Provide a hash that uniquely identifies all the configs
    that affect the structure of the computation
    graph from input ids/embeddings to the final hidden states,
    excluding anything before input ids/embeddings and after
    the final hidden states.
    """
    factors: list[Any] = []
    factors.append(self.max_lora_rank)
    factors.append(self.max_loras)
    factors.append(self.fully_sharded_loras)
    factors.append(self.lora_dtype)
    factors.append(self.lora_extra_vocab_size)
    factors.append(self.lora_vocab_padding_size)
    factors.append(self.bias_enabled)
    hash_str = hashlib.md5(str(factors).encode(),
                           usedforsecurity=False).hexdigest()
    return hash_str

verify_with_cache_config

verify_with_cache_config(cache_config: CacheConfig)
Source code in vllm/config/lora.py
def verify_with_cache_config(self, cache_config: CacheConfig):
    if cache_config.cpu_offload_gb > 0 and not envs.VLLM_USE_V1:
        raise ValueError(
            "V0 LoRA does not support CPU offload, please use V1.")

verify_with_model_config

verify_with_model_config(model_config: ModelConfig)
Source code in vllm/config/lora.py
def verify_with_model_config(self, model_config: ModelConfig):
    if self.lora_dtype in (None, "auto"):
        self.lora_dtype = model_config.dtype
    elif isinstance(self.lora_dtype, str):
        self.lora_dtype = getattr(torch, self.lora_dtype)

LoadConfig

Configuration for loading the model weights.

Source code in vllm/config/load.py
@config
@dataclass
class LoadConfig:
    """Configuration for loading the model weights."""

    load_format: Union[str, LoadFormats] = "auto"
    """The format of the model weights to load:\n
    - "auto" will try to load the weights in the safetensors format and fall
    back to the pytorch bin format if safetensors format is not available.\n
    - "pt" will load the weights in the pytorch bin format.\n
    - "safetensors" will load the weights in the safetensors format.\n
    - "npcache" will load the weights in pytorch format and store a numpy cache
    to speed up the loading.\n
    - "dummy" will initialize the weights with random values, which is mainly
    for profiling.\n
    - "tensorizer" will use CoreWeave's tensorizer library for fast weight
    loading. See the Tensorize vLLM Model script in the Examples section for
    more information.\n
    - "runai_streamer" will load the Safetensors weights using Run:ai Model
    Streamer.\n
    - "bitsandbytes" will load the weights using bitsandbytes quantization.\n
    - "sharded_state" will load weights from pre-sharded checkpoint files,
    supporting efficient loading of tensor-parallel models.\n
    - "gguf" will load weights from GGUF format files (details specified in
    https://github.com/ggml-org/ggml/blob/master/docs/gguf.md).\n
    - "mistral" will load weights from consolidated safetensors files used by
    Mistral models.
    - Other custom values can be supported via plugins."""
    download_dir: Optional[str] = None
    """Directory to download and load the weights, default to the default
    cache directory of Hugging Face."""
    safetensors_load_strategy: str = "lazy"
    """Specifies the loading strategy for safetensors weights.
    - "lazy" (default): Weights are memory-mapped from the file. This enables
      on-demand loading and is highly efficient for models on local storage.
    - "eager": The entire file is read into CPU memory upfront before loading.
      This is recommended for models on network filesystems (e.g., Lustre, NFS)
      as it avoids inefficient random reads, significantly speeding up model
      initialization. However, it uses more CPU RAM.
    """
    model_loader_extra_config: Union[dict, TensorizerConfig] = field(
        default_factory=dict)
    """Extra config for model loader. This will be passed to the model loader
    corresponding to the chosen load_format."""
    device: Optional[str] = None
    """Device to which model weights will be loaded, default to
    device_config.device"""
    ignore_patterns: Optional[Union[list[str], str]] = None
    """The list of patterns to ignore when loading the model. Default to
    "original/**/*" to avoid repeated loading of llama's checkpoints."""
    use_tqdm_on_load: bool = True
    """Whether to enable tqdm for showing progress bar when loading model
    weights."""
    pt_load_map_location: Union[str, dict[str, str]] = "cpu"
    """
    pt_load_map_location: the map location for loading pytorch checkpoint, to
    support loading checkpoints can only be loaded on certain devices like
    "cuda", this is equivalent to {"": "cuda"}. Another supported format is
    mapping from different devices like from GPU 1 to GPU 0:
    {"cuda:1": "cuda:0"}. Note that when passed from command line, the strings
    in dictionary needs to be double quoted for json parsing. For more details,
    see original doc for `map_location` in https://pytorch.org/docs/stable/generated/torch.load.html
    """

    def compute_hash(self) -> str:
        """
        WARNING: Whenever a new field is added to this config,
        ensure that it is included in the factors list if
        it affects the computation graph.

        Provide a hash that uniquely identifies all the configs
        that affect the structure of the computation
        graph from input ids/embeddings to the final hidden states,
        excluding anything before input ids/embeddings and after
        the final hidden states.
        """
        # no factors to consider.
        # this config will not affect the computation graph.
        factors: list[Any] = []
        hash_str = hashlib.md5(str(factors).encode(),
                               usedforsecurity=False).hexdigest()
        return hash_str

    def __post_init__(self):
        self.load_format = self.load_format.lower()
        if self.ignore_patterns is not None and len(self.ignore_patterns) > 0:
            logger.info(
                "Ignoring the following patterns when downloading weights: %s",
                self.ignore_patterns)
        else:
            self.ignore_patterns = ["original/**/*"]

device class-attribute instance-attribute

device: Optional[str] = None

Device to which model weights will be loaded, default to device_config.device

download_dir class-attribute instance-attribute

download_dir: Optional[str] = None

Directory to download and load the weights, default to the default cache directory of Hugging Face.

ignore_patterns class-attribute instance-attribute

ignore_patterns: Optional[Union[list[str], str]] = None

The list of patterns to ignore when loading the model. Default to "original/*/" to avoid repeated loading of llama's checkpoints.

load_format class-attribute instance-attribute

load_format: Union[str, LoadFormats] = 'auto'

The format of the model weights to load:

  • "auto" will try to load the weights in the safetensors format and fall back to the pytorch bin format if safetensors format is not available.

  • "pt" will load the weights in the pytorch bin format.

  • "safetensors" will load the weights in the safetensors format.

  • "npcache" will load the weights in pytorch format and store a numpy cache to speed up the loading.

  • "dummy" will initialize the weights with random values, which is mainly for profiling.

  • "tensorizer" will use CoreWeave's tensorizer library for fast weight loading. See the Tensorize vLLM Model script in the Examples section for more information.

  • "runai_streamer" will load the Safetensors weights using Run:ai Model Streamer.

  • "bitsandbytes" will load the weights using bitsandbytes quantization.

  • "sharded_state" will load weights from pre-sharded checkpoint files, supporting efficient loading of tensor-parallel models.

  • "gguf" will load weights from GGUF format files (details specified in https://github.com/ggml-org/ggml/blob/master/docs/gguf.md).

  • "mistral" will load weights from consolidated safetensors files used by Mistral models.

  • Other custom values can be supported via plugins.

model_loader_extra_config class-attribute instance-attribute

model_loader_extra_config: Union[dict, TensorizerConfig] = (
    field(default_factory=dict)
)

Extra config for model loader. This will be passed to the model loader corresponding to the chosen load_format.

pt_load_map_location class-attribute instance-attribute

pt_load_map_location: Union[str, dict[str, str]] = 'cpu'

pt_load_map_location: the map location for loading pytorch checkpoint, to support loading checkpoints can only be loaded on certain devices like "cuda", this is equivalent to {"": "cuda"}. Another supported format is mapping from different devices like from GPU 1 to GPU 0: {"cuda:1": "cuda:0"}. Note that when passed from command line, the strings in dictionary needs to be double quoted for json parsing. For more details, see original doc for map_location in https://pytorch.org/docs/stable/generated/torch.load.html

safetensors_load_strategy class-attribute instance-attribute

safetensors_load_strategy: str = 'lazy'

Specifies the loading strategy for safetensors weights. - "lazy" (default): Weights are memory-mapped from the file. This enables on-demand loading and is highly efficient for models on local storage. - "eager": The entire file is read into CPU memory upfront before loading. This is recommended for models on network filesystems (e.g., Lustre, NFS) as it avoids inefficient random reads, significantly speeding up model initialization. However, it uses more CPU RAM.

use_tqdm_on_load class-attribute instance-attribute

use_tqdm_on_load: bool = True

Whether to enable tqdm for showing progress bar when loading model weights.

__post_init__

__post_init__()
Source code in vllm/config/load.py
def __post_init__(self):
    self.load_format = self.load_format.lower()
    if self.ignore_patterns is not None and len(self.ignore_patterns) > 0:
        logger.info(
            "Ignoring the following patterns when downloading weights: %s",
            self.ignore_patterns)
    else:
        self.ignore_patterns = ["original/**/*"]

compute_hash

compute_hash() -> str

WARNING: Whenever a new field is added to this config, ensure that it is included in the factors list if it affects the computation graph.

Provide a hash that uniquely identifies all the configs that affect the structure of the computation graph from input ids/embeddings to the final hidden states, excluding anything before input ids/embeddings and after the final hidden states.

Source code in vllm/config/load.py
def compute_hash(self) -> str:
    """
    WARNING: Whenever a new field is added to this config,
    ensure that it is included in the factors list if
    it affects the computation graph.

    Provide a hash that uniquely identifies all the configs
    that affect the structure of the computation
    graph from input ids/embeddings to the final hidden states,
    excluding anything before input ids/embeddings and after
    the final hidden states.
    """
    # no factors to consider.
    # this config will not affect the computation graph.
    factors: list[Any] = []
    hash_str = hashlib.md5(str(factors).encode(),
                           usedforsecurity=False).hexdigest()
    return hash_str

ModelConfig

Configuration for the model.

Source code in vllm/config/model.py
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
@config
@dataclass(config=ConfigDict(arbitrary_types_allowed=True))
class ModelConfig:
    """Configuration for the model."""

    model: str = "Qwen/Qwen3-0.6B"
    """Name or path of the Hugging Face model to use. It is also used as the
    content for `model_name` tag in metrics output when `served_model_name` is
    not specified."""
    runner: RunnerOption = "auto"
    """The type of model runner to use. Each vLLM instance only supports one
    model runner, even if the same model can be used for multiple types."""
    convert: ConvertOption = "auto"
    """Convert the model using adapters defined in
    [vllm.model_executor.models.adapters][]. The most common use case is to
    adapt a text generation model to be used for pooling tasks."""
    task: Optional[TaskOption] = None
    """[DEPRECATED] The task to use the model for. If the model supports more
    than one model runner, this is used to select which model runner to run.

    Note that the model may support other tasks using the same model runner.
    """
    tokenizer: SkipValidation[str] = None  # type: ignore
    """Name or path of the Hugging Face tokenizer to use. If unspecified, model
    name or path will be used."""
    tokenizer_mode: TokenizerMode = "auto"
    """Tokenizer mode:\n
    - "auto" will use the fast tokenizer if available.\n
    - "slow" will always use the slow tokenizer.\n
    - "mistral" will always use the tokenizer from `mistral_common`.\n
    - "custom" will use --tokenizer to select the preregistered tokenizer."""
    trust_remote_code: bool = False
    """Trust remote code (e.g., from HuggingFace) when downloading the model
    and tokenizer."""
    dtype: Union[ModelDType, torch.dtype] = "auto"
    """Data type for model weights and activations:\n
    - "auto" will use FP16 precision for FP32 and FP16 models, and BF16
    precision for BF16 models.\n
    - "half" for FP16. Recommended for AWQ quantization.\n
    - "float16" is the same as "half".\n
    - "bfloat16" for a balance between precision and range.\n
    - "float" is shorthand for FP32 precision.\n
    - "float32" for FP32 precision."""
    seed: Optional[int] = None
    """Random seed for reproducibility. Initialized to None in V0, but
    initialized to 0 in V1."""
    hf_config_path: Optional[str] = None
    """Name or path of the Hugging Face config to use. If unspecified, model
    name or path will be used."""
    allowed_local_media_path: str = ""
    """Allowing API requests to read local images or videos from directories
    specified by the server file system. This is a security risk. Should only
    be enabled in trusted environments."""
    allowed_media_domains: Optional[list[str]] = None
    """If set, only media URLs that belong to this domain can be used for 
    multi-modal inputs. """
    revision: Optional[str] = None
    """The specific model version to use. It can be a branch name, a tag name,
    or a commit id. If unspecified, will use the default version."""
    code_revision: Optional[str] = None
    """The specific revision to use for the model code on the Hugging Face Hub.
    It can be a branch name, a tag name, or a commit id. If unspecified, will
    use the default version."""
    rope_scaling: dict[str, Any] = field(default_factory=dict)
    """RoPE scaling configuration. For example,
    `{"rope_type":"dynamic","factor":2.0}`."""
    rope_theta: Optional[float] = None
    """RoPE theta. Use with `rope_scaling`. In some cases, changing the RoPE
    theta improves the performance of the scaled model."""
    tokenizer_revision: Optional[str] = None
    """The specific revision to use for the tokenizer on the Hugging Face Hub.
    It can be a branch name, a tag name, or a commit id. If unspecified, will
    use the default version."""
    max_model_len: SkipValidation[int] = None  # type: ignore
    """Model context length (prompt and output). If unspecified, will be
    automatically derived from the model config.

    When passing via `--max-model-len`, supports k/m/g/K/M/G in human-readable
    format. Examples:\n
    - 1k -> 1000\n
    - 1K -> 1024\n
    - 25.6k -> 25,600"""
    spec_target_max_model_len: Optional[int] = None
    """Specify the maximum length for spec decoding draft models."""
    quantization: SkipValidation[Optional[QuantizationMethods]] = None
    """Method used to quantize the weights. If `None`, we first check the
    `quantization_config` attribute in the model config file. If that is
    `None`, we assume the model weights are not quantized and use `dtype` to
    determine the data type of the weights."""
    enforce_eager: bool = False
    """Whether to always use eager-mode PyTorch. If True, we will disable CUDA
    graph and always execute the model in eager mode. If False, we will use
    CUDA graph and eager execution in hybrid for maximal performance and
    flexibility."""
    max_logprobs: int = 20
    """Maximum number of log probabilities to return when `logprobs` is
    specified in `SamplingParams`. The default value comes the default for the
    OpenAI Chat Completions API. -1 means no cap, i.e. all (output_length *
    vocab_size) logprobs are allowed to be returned and it may cause OOM."""
    logprobs_mode: LogprobsMode = "raw_logprobs"
    """Indicates the content returned in the logprobs and prompt_logprobs.
    Supported mode:
    1) raw_logprobs, 2) processed_logprobs, 3) raw_logits, 4) processed_logits.
    Raw means the values before applying any logit processors, like bad words.
    Processed means the values after applying all processors, including
    temperature and top_k/top_p.
    """
    disable_sliding_window: bool = False
    """Whether to disable sliding window. If True, we will disable the sliding
    window functionality of the model, capping to sliding window size. If the
    model does not support sliding window, this argument is ignored."""
    disable_cascade_attn: bool = False
    """Disable cascade attention for V1. While cascade attention does not
    change the mathematical correctness, disabling it could be useful for
    preventing potential numerical issues. Note that even if this is set to
    False, cascade attention will be only used when the heuristic tells that
    it's beneficial."""
    skip_tokenizer_init: bool = False
    """Skip initialization of tokenizer and detokenizer. Expects valid
    `prompt_token_ids` and `None` for prompt from the input. The generated
    output will contain token ids."""
    enable_prompt_embeds: bool = False
    """If `True`, enables passing text embeddings as inputs via the
    `prompt_embeds` key. Note that enabling this will double the time required
    for graph compilation."""
    served_model_name: Optional[Union[str, list[str]]] = None
    """The model name(s) used in the API. If multiple names are provided, the
    server will respond to any of the provided names. The model name in the
    model field of a response will be the first name in this list. If not
    specified, the model name will be the same as the `--model` argument. Noted
    that this name(s) will also be used in `model_name` tag content of
    prometheus metrics, if multiple names provided, metrics tag will take the
    first one."""
    config_format: Union[str, ConfigFormat] = "auto"
    """The format of the model config to load:\n
    - "auto" will try to load the config in hf format if available else it
    will try to load in mistral format.\n
    - "hf" will load the config in hf format.\n
    - "mistral" will load the config in mistral format."""
    hf_token: Optional[Union[bool, str]] = None
    """The token to use as HTTP bearer authorization for remote files . If
    `True`, will use the token generated when running `huggingface-cli login`
    (stored in `~/.huggingface`)."""
    hf_overrides: HfOverrides = field(default_factory=dict)
    """If a dictionary, contains arguments to be forwarded to the Hugging Face
    config. If a callable, it is called to update the HuggingFace config."""
    logits_processor_pattern: Optional[str] = None
    """Optional regex pattern specifying valid logits processor qualified names
    that can be passed with the `logits_processors` extra completion argument.
    Defaults to `None`, which allows no processors."""
    generation_config: str = "auto"
    """The folder path to the generation config. Defaults to `"auto"`, the
    generation config will be loaded from model path. If set to `"vllm"`, no
    generation config is loaded, vLLM defaults will be used. If set to a folder
    path, the generation config will be loaded from the specified folder path.
    If `max_new_tokens` is specified in generation config, then it sets a
    server-wide limit on the number of output tokens for all requests."""
    override_generation_config: dict[str, Any] = field(default_factory=dict)
    """Overrides or sets generation config. e.g. `{"temperature": 0.5}`. If
    used with `--generation-config auto`, the override parameters will be
    merged with the default config from the model. If used with
    `--generation-config vllm`, only the override parameters are used."""
    enable_sleep_mode: bool = False
    """Enable sleep mode for the engine (only cuda platform is supported)."""
    model_impl: Union[str, ModelImpl] = "auto"
    """Which implementation of the model to use:\n
    - "auto" will try to use the vLLM implementation, if it exists, and fall
    back to the Transformers implementation if no vLLM implementation is
    available.\n
    - "vllm" will use the vLLM model implementation.\n
    - "transformers" will use the Transformers model implementation.\n
    - "terratorch" will use the TerraTorch model implementation.
    """
    override_attention_dtype: Optional[str] = None
    """Override dtype for attention"""
    logits_processors: Optional[list[Union[str, type[LogitsProcessor]]]] = None
    """One or more logits processors' fully-qualified class names or class
    definitions"""
    io_processor_plugin: Optional[str] = None
    """IOProcessor plugin name to load at model startup"""

    # Pooler config
    pooler_config: Optional[PoolerConfig] = None
    """Pooler config which controls the behaviour of output pooling in pooling
    models."""
    override_pooler_config: Optional[Union[dict, PoolerConfig]] = None
    """[DEPRECATED] Use `pooler_config` instead. This field will be removed in
    v0.12.0 or v1.0.0, whichever is sooner."""

    # Multimodal config and init vars
    multimodal_config: Optional[MultiModalConfig] = None
    """Configuration for multimodal model. If `None`, this will be inferred
    from the architecture of `self.model`."""
    limit_mm_per_prompt: InitVar[Optional[dict[str, int]]] = None
    media_io_kwargs: InitVar[Optional[dict[str, dict[str, Any]]]] = None
    mm_processor_kwargs: InitVar[Optional[dict[str, Any]]] = None
    mm_processor_cache_gb: InitVar[Optional[float]] = None
    mm_processor_cache_type: InitVar[Optional[MMCacheType]] = None
    mm_shm_cache_max_object_size_mb: InitVar[Optional[int]] = None
    mm_encoder_tp_mode: InitVar[Optional[MMEncoderTPMode]] = None
    interleave_mm_strings: InitVar[Optional[bool]] = None
    skip_mm_profiling: InitVar[Optional[bool]] = None
    video_pruning_rate: InitVar[Optional[float]] = None

    def compute_hash(self) -> str:
        """
        WARNING: Whenever a new field is added to this config,
        ensure that it is included in the factors list if
        it affects the computation graph.

        Provide a hash that uniquely identifies all the configs
        that affect the structure of the computation
        graph from input ids/embeddings to the final hidden states,
        excluding anything before input ids/embeddings and after
        the final hidden states.
        """
        factors: list[Any] = []
        factors.append(self.model)
        factors.append(self.dtype)
        factors.append(self.quantization)
        factors.append(self.revision)
        factors.append(self.code_revision)
        factors.append(self.max_model_len)
        factors.append(self.max_logprobs)
        factors.append(self.disable_sliding_window)
        factors.append(self.trust_remote_code)
        factors.append(self.generation_config)
        factors.append(self.model_impl)
        factors.append(self.override_generation_config)
        factors.append(self.rope_scaling)
        factors.append(self.rope_theta)
        factors.append(self.video_pruning_rate)

        # hf_config can control how the model looks!
        try:
            hf_config_json = self.hf_config.to_json_string(use_diff=False)
        except TypeError:
            from transformers import PretrainedConfig

            from vllm.utils.jsontree import json_map_leaves

            # Handle nested HF configs with unserializable values gracefully
            hf_config_json = json.dumps(
                json_map_leaves(
                    lambda v: v.to_dict()
                    if isinstance(v, PretrainedConfig) else str(v),
                    self.hf_config.to_dict(),
                ),
                indent=2,
                sort_keys=True,
            ) + "\n"

        factors.append(hf_config_json)

        str_factors = str(factors)
        assert_hashable(str_factors)
        return hashlib.sha256(str(factors).encode()).hexdigest()

    def __post_init__(
        self,
        # Multimodal config init vars
        limit_mm_per_prompt: Optional[dict[str, int]],
        media_io_kwargs: Optional[dict[str, dict[str, Any]]],
        mm_processor_kwargs: Optional[dict[str, Any]],
        mm_processor_cache_gb: Optional[float],
        mm_processor_cache_type: Optional[MMCacheType],
        mm_shm_cache_max_object_size_mb: Optional[int],
        mm_encoder_tp_mode: Optional[MMEncoderTPMode],
        interleave_mm_strings: Optional[bool],
        skip_mm_profiling: Optional[bool],
        video_pruning_rate: Optional[float],
    ) -> None:
        # Set the default seed to 0 in V1.
        # NOTE(woosuk): In V0, we set the default seed to None because the
        # driver worker shares the same process as the user process, and thus
        # setting a seed affects the user process as well.
        # In V1, we use separate processes for workers (unless
        # VLLM_ENABLE_V1_MULTIPROCESSING=0), so setting a seed here
        # doesn't affect the user process. However, without a consistent seed,
        # different tensor parallel workers would sample different tokens,
        # leading to inconsistent results.
        if envs.VLLM_USE_V1 and self.seed is None:
            self.seed = 0
            if not envs.VLLM_ENABLE_V1_MULTIPROCESSING:
                logger.warning(
                    "The global random seed is set to %d. Since "
                    "VLLM_ENABLE_V1_MULTIPROCESSING is set to False, this may "
                    "affect the random state of the Python process that "
                    "launched vLLM.", self.seed)

        # Keep set served_model_name before maybe_model_redirect(self.model)
        self.served_model_name = get_served_model_name(self.model,
                                                       self.served_model_name)
        self.model = maybe_model_redirect(self.model)
        # The tokenizer is consistent with the model by default.
        if self.tokenizer is None:
            self.tokenizer = self.model
        if self.tokenizer_revision is None:
            self.tokenizer_revision = self.revision
        self.tokenizer = maybe_model_redirect(self.tokenizer)

        if isinstance(self.hf_config_path, str):
            self.hf_config_path = maybe_model_redirect(self.hf_config_path)

        if callable(self.hf_overrides):
            hf_overrides_kw = {}
            hf_overrides_fn = self.hf_overrides
        else:
            hf_overrides_kw = self.hf_overrides
            hf_overrides_fn = None

        if self.rope_scaling:
            hf_override: dict[str, Any] = {"rope_scaling": self.rope_scaling}
            hf_overrides_kw.update(hf_override)
            hf_overrides_str = json.dumps(hf_overrides_kw)
            msg = (
                "`--rope-scaling` will be removed in a future release. "
                f"'Please instead use `--hf-overrides '{hf_overrides_str}'`")
            warnings.warn(DeprecationWarning(msg), stacklevel=2)
        if self.rope_theta is not None:
            hf_override = {"rope_theta": self.rope_theta}
            hf_overrides_kw.update(hf_override)
            hf_overrides_str = json.dumps(hf_overrides_kw)
            msg = (
                "`--rope-theta` will be removed in a future release. "
                f"'Please instead use `--hf-overrides '{hf_overrides_str}'`")
            warnings.warn(DeprecationWarning(msg), stacklevel=2)

        self.maybe_pull_model_tokenizer_for_runai(self.model, self.tokenizer)

        if (backend := envs.VLLM_ATTENTION_BACKEND
            ) and backend == "FLASHINFER" and find_spec("flashinfer") is None:
            raise ValueError(
                "VLLM_ATTENTION_BACKEND is set to FLASHINFER, but flashinfer "
                "module was not found. See "
                "https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile "  # noqa: E501
                "for instructions on how to install it.")

        from vllm.platforms import current_platform

        if (self.override_attention_dtype is not None
                and not current_platform.is_rocm()):
            warnings.warn(
                "override-attention-dtype is set but not using ROCm platform",
                stacklevel=2)

        if (self.enable_sleep_mode
                and not current_platform.is_sleep_mode_available()):
            raise ValueError(
                "Sleep mode is not supported on current platform.")

        hf_config = get_config(self.hf_config_path or self.model,
                               self.trust_remote_code,
                               self.revision,
                               self.code_revision,
                               self.config_format,
                               hf_overrides_kw=hf_overrides_kw,
                               hf_overrides_fn=hf_overrides_fn)

        self.hf_config = hf_config
        self.hf_text_config = get_hf_text_config(self.hf_config)
        self.attention_chunk_size = getattr(self.hf_text_config,
                                            "attention_chunk_size", None)
        self.encoder_config = self._get_encoder_config()
        self.hf_image_processor_config = get_hf_image_processor_config(
            self.model, hf_token=self.hf_token, revision=self.revision)

        architectures = self.architectures
        registry = self.registry
        is_generative_model = registry.is_text_generation_model(
            architectures, self)
        is_pooling_model = registry.is_pooling_model(architectures, self)

        def _task_to_convert(task: TaskOption) -> ConvertType:
            if task == "embedding" or task == "embed":
                return "embed"
            if task == "classify":
                return "classify"
            if task == "reward":
                return "reward"
            if task == "score":
                new_task = self._get_default_pooling_task(architectures)
                return "classify" if new_task == "classify" else "embed"

            return "none"

        if self.task is not None:
            runner: RunnerOption = "auto"
            convert: ConvertOption = "auto"
            msg_prefix = ("The 'task' option has been deprecated and will be "
                          "removed in v0.13.0 or v1.0, whichever comes first.")
            msg_hint = "Please remove this option."

            is_generative_task = self.task in _RUNNER_TASKS["generate"]
            is_pooling_task = self.task in _RUNNER_TASKS["pooling"]

            if is_generative_model and is_pooling_model:
                if is_generative_task:
                    runner = "generate"
                    convert = "auto"
                    msg_hint = ("Please replace this option with `--runner "
                                "generate` to continue using this model "
                                "as a generative model.")
                elif is_pooling_task:
                    runner = "pooling"
                    convert = "auto"
                    msg_hint = ("Please replace this option with `--runner "
                                "pooling` to continue using this model "
                                "as a pooling model.")
                else:  # task == "auto"
                    pass
            elif is_generative_model or is_pooling_model:
                if is_generative_task:
                    runner = "generate"
                    convert = "auto"
                    msg_hint = "Please remove this option"
                elif is_pooling_task:
                    runner = "pooling"
                    convert = _task_to_convert(self.task)
                    msg_hint = ("Please replace this option with `--convert "
                                f"{convert}` to continue using this model "
                                "as a pooling model.")
                else:  # task == "auto"
                    pass
            else:
                debug_info = {
                    "architectures": architectures,
                    "is_generative_model": is_generative_model,
                    "is_pooling_model": is_pooling_model,
                }
                raise AssertionError("The model should be a generative or "
                                     "pooling model when task is set to "
                                     f"{self.task!r}. Found: {debug_info}")

            self.runner = runner
            self.convert = convert

            msg = f"{msg_prefix} {msg_hint}"
            warnings.warn(msg, DeprecationWarning, stacklevel=2)

        self.runner_type = self._get_runner_type(architectures, self.runner)
        self.convert_type = self._get_convert_type(architectures,
                                                   self.runner_type,
                                                   self.convert)

        if self.runner_type == "generate" and not is_generative_model:
            generate_converts = _RUNNER_CONVERTS["generate"]
            if self.convert_type not in generate_converts:
                # Currently we don't have any converters for generative models
                raise ValueError(
                    "This model does not support `--runner generate`.")
        if self.runner_type == "pooling" and not is_pooling_model:
            pooling_converts = _RUNNER_CONVERTS["pooling"]
            if self.convert_type not in pooling_converts:
                convert_option = "<" + "|".join(pooling_converts) + ">"
                raise ValueError(
                    "This model does not support `--runner pooling`. "
                    f"You can pass `--convert {convert_option} to adapt "
                    "it into a pooling model.")

        # Note: Initialize these attributes early because transformers fallback
        # may fail to load dynamic modules in child processes
        model_info, arch = registry.inspect_model_cls(architectures, self)
        self._model_info = model_info
        self._architecture = arch
        logger.info("Resolved architecture: %s", arch)

        # Init pooler config if needed
        if self.runner_type == "pooling":
            if self.override_pooler_config is not None:
                logger.warning_once(
                    "`override_pooler_config` is deprecated and will be "
                    "removed in v0.12.0 or v1.0.0, whichever is sooner. "
                    "Please use `pooler_config` instead.")

                if isinstance(self.override_pooler_config, dict):
                    self.pooler_config = PoolerConfig(
                        **self.override_pooler_config)
                else:
                    self.pooler_config = self.override_pooler_config

            if self.pooler_config is None:
                self.pooler_config = PoolerConfig()

            base_config = get_pooling_config(self.model, self.revision)
            if base_config is not None:
                # Only set values that are not overridden by the user
                for k, v in base_config.items():
                    if getattr(self.pooler_config, k) is None:
                        setattr(self.pooler_config, k, v)

            default_pooling_type = self._model_info.default_pooling_type
            if self.pooler_config.pooling_type is None:
                self.pooler_config.pooling_type = default_pooling_type

        self.dtype: torch.dtype = _get_and_verify_dtype(
            self.model,
            self.hf_config,
            self.dtype,
            is_pooling_model=self.runner_type == "pooling",
            revision=self.revision,
        )

        # Interleaved attention is not supported by some backends in V0
        if (not self.disable_sliding_window
                and is_interleaved(self.hf_text_config)
                and not envs.VLLM_USE_V1
                and (backend := envs.VLLM_ATTENTION_BACKEND)
                in ("XFORMERS", "FLASHINFER")):
            logger.warning_once(
                "%s has interleaved attention, which is currently not "
                "supported by the %s backend. Disabling sliding window and "
                "capping the max length to the sliding window size (%d).",
                self.hf_text_config.model_type,
                backend,
                self.hf_text_config.sliding_window,
            )
            self.disable_sliding_window = True

        self.original_max_model_len = self.max_model_len
        self.max_model_len = self.get_and_verify_max_len(self.max_model_len)
        # Init multimodal config if needed
        if self._model_info.supports_multimodal:
            if (mm_encoder_tp_mode == "data" and
                    not self._model_info.supports_multimodal_encoder_tp_data):
                logger.warning_once(
                    "This model does not support `--mm-encoder-tp-mode data`. "
                    "Falling back to `--mm-encoder-tp-mode weights`.")
                mm_encoder_tp_mode = "weights"

            mm_config_kwargs = dict(
                limit_per_prompt=limit_mm_per_prompt,
                media_io_kwargs=media_io_kwargs,
                mm_processor_kwargs=mm_processor_kwargs,
                mm_processor_cache_gb=mm_processor_cache_gb,
                mm_processor_cache_type=mm_processor_cache_type,
                mm_shm_cache_max_object_size_mb=mm_shm_cache_max_object_size_mb,
                mm_encoder_tp_mode=mm_encoder_tp_mode,
                interleave_mm_strings=interleave_mm_strings,
                skip_mm_profiling=skip_mm_profiling,
                video_pruning_rate=video_pruning_rate,
            )

            mm_config_kwargs = {
                k: v
                for k, v in mm_config_kwargs.items() if v is not None
            }

            self.multimodal_config = MultiModalConfig(**mm_config_kwargs)

        if self.disable_sliding_window:
            # Set after get_and_verify_max_len to ensure that max_model_len
            # can be correctly capped to sliding window size
            self.hf_text_config.sliding_window = None

        if not self.skip_tokenizer_init:
            self._verify_tokenizer_mode()

        # Avoid running try_verify_and_update_config multiple times
        self.config_updated = False

        self._verify_quantization()
        self._verify_cuda_graph()
        self._verify_bnb_config()

    @field_validator("quantization", mode="before")
    @classmethod
    def validate_quantization_before(cls, value: Any) -> Any:
        if isinstance(value, str):
            return value.lower()
        return value

    @model_validator(mode="after")
    def validate_model_config_after(self: "ModelConfig") -> "ModelConfig":
        if not isinstance(self.tokenizer, str):
            raise ValueError("tokenizer must be a string after __post_init__.")
        if not isinstance(self.max_model_len, int):
            raise ValueError(
                "max_model_len must be an integer after __post_init__.")
        return self

    def _get_transformers_backend_cls(self) -> str:
        """Determine which Transformers backend class will be used if
        `model_impl` is set to `transformers` or `auto`."""
        if getattr(self, "runner_type", self.runner) == "pooling":
            return "TransformersModel"
        if self.hf_config != self.hf_text_config:
            # If 'hf_text_config' is the same as 'hf_config'. If not, it is
            # probably a composite config, i.e. multimodal
            return "TransformersForMultimodalLM"
        return "TransformersForCausalLM"

    def using_transformers_backend(self) -> bool:
        """Check if the model is using the Transformers backend class."""
        return self.architecture == self._get_transformers_backend_cls()

    @property
    def registry(self):
        return me_models.ModelRegistry

    @property
    def architectures(self) -> list[str]:
        return getattr(self.hf_config, "architectures", [])

    @property
    def architecture(self) -> str:
        """The architecture vllm actually used."""
        return self._architecture

    def maybe_pull_model_tokenizer_for_runai(self, model: str,
                                             tokenizer: str) -> None:
        """Pull model/tokenizer from Object Storage to temporary
        directory when needed.

        Args:
            model: Model name or path
            tokenizer: Tokenizer name or path
        """

        if not (is_runai_obj_uri(model) or is_runai_obj_uri(tokenizer)):
            return

        if is_runai_obj_uri(model):
            object_storage_model = ObjectStorageModel(url=model)
            object_storage_model.pull_files(
                model, allow_pattern=["*.model", "*.py", "*.json"])
            self.model_weights = model
            self.model = object_storage_model.dir

            # If tokenizer is same as model, download to same directory
            if model == tokenizer:
                object_storage_model.pull_files(model,
                                                ignore_pattern=[
                                                    "*.pt", "*.safetensors",
                                                    "*.bin", "*.tensors",
                                                    "*.pth"
                                                ])
                self.tokenizer = object_storage_model.dir
                return

        # Only download tokenizer if needed and not already handled
        if is_runai_obj_uri(tokenizer):
            object_storage_tokenizer = ObjectStorageModel(url=tokenizer)
            object_storage_tokenizer.pull_files(model,
                                                ignore_pattern=[
                                                    "*.pt", "*.safetensors",
                                                    "*.bin", "*.tensors",
                                                    "*.pth"
                                                ])
            self.tokenizer = object_storage_tokenizer.dir

    def _get_encoder_config(self):
        return get_sentence_transformer_tokenizer_config(
            self.model, self.revision)

    def _verify_tokenizer_mode(self) -> None:
        tokenizer_mode = cast(TokenizerMode, self.tokenizer_mode.lower())
        if tokenizer_mode not in get_args(TokenizerMode):
            raise ValueError(
                f"Unknown tokenizer mode: {self.tokenizer_mode}. Must be "
                f"one of {get_args(TokenizerMode)}.")
        self.tokenizer_mode = tokenizer_mode

    def _get_default_runner_type(
        self,
        architectures: list[str],
    ) -> RunnerType:
        registry = self.registry

        # Some Sentence Transformers models use *ForCausalLM archs
        if get_pooling_config(self.model, self.revision):
            return "pooling"

        for arch in architectures:
            if arch in registry.get_supported_archs():
                if registry.is_pooling_model(architectures, self):
                    return "pooling"
                if registry.is_text_generation_model(architectures, self):
                    return "generate"

            match = try_match_architecture_defaults(arch)
            if match:
                _, (runner_type, _) = match
                return runner_type

        return "generate"

    def _get_runner_type(
        self,
        architectures: list[str],
        runner: RunnerOption,
    ) -> RunnerType:
        if runner != "auto":
            return runner

        runner_type = self._get_default_runner_type(architectures)

        # Don't log the most common case
        if runner_type != "generate":
            logger.info(
                "Resolved `--runner auto` to `--runner %s`. "
                "Pass the value explicitly to silence this message.",
                runner_type)

        return runner_type

    def _get_default_convert_type(
        self,
        architectures: list[str],
        runner_type: RunnerType,
    ) -> ConvertType:
        registry = self.registry

        for arch in architectures:
            if arch in registry.get_supported_archs():
                if (runner_type == "generate"
                        and registry.is_text_generation_model(
                            architectures, self)):
                    return "none"
                if (runner_type == "pooling"
                        and registry.is_pooling_model(architectures, self)):
                    return "none"

            match = try_match_architecture_defaults(arch,
                                                    runner_type=runner_type)
            if match:
                _, (_, convert_type) = match
                return convert_type

        # This is to handle Sentence Transformers models that use *ForCausalLM
        # and also multi-modal pooling models which are not defined as
        # Sentence Transformers models
        if runner_type == "pooling":
            return "embed"

        return "none"

    def _get_convert_type(
        self,
        architectures: list[str],
        runner_type: RunnerType,
        convert: ConvertOption,
    ) -> ConvertType:
        if convert != "auto":
            return convert

        convert_type = self._get_default_convert_type(architectures,
                                                      runner_type)

        # Don't log the most common case
        if convert_type != "none":
            logger.info(
                "Resolved `--convert auto` to `--convert %s`. "
                "Pass the value explicitly to silence this message.",
                convert_type)

        return convert_type

    def _get_default_pooling_task(
        self,
        architectures: list[str],
    ) -> Literal["embed", "classify", "reward"]:
        if self.registry.is_cross_encoder_model(architectures, self):
            return "classify"

        for arch in architectures:
            match = try_match_architecture_defaults(arch,
                                                    runner_type="pooling")
            if match:
                _, (_, convert_type) = match
                assert convert_type != "none"
                return convert_type

        return "embed"

    def _parse_quant_hf_config(self, hf_config: PretrainedConfig):
        quant_cfg = getattr(hf_config, "quantization_config", None)
        if quant_cfg is None:
            # compressed-tensors uses a "compression_config" key
            quant_cfg = getattr(hf_config, "compression_config", None)

        else:
            # Set quant_method for ModelOpt models.
            producer_name = quant_cfg.get("producer", {}).get("name")
            if producer_name == "modelopt":
                quant_algo = quant_cfg.get("quantization",
                                           {}).get("quant_algo")
                if quant_algo == "FP8":
                    quant_cfg["quant_method"] = "modelopt"
                elif quant_algo == "NVFP4":
                    quant_cfg["quant_method"] = "modelopt_fp4"
                elif quant_algo is not None:
                    raise ValueError(
                        f"Unknown ModelOpt quant algo: {quant_algo}")

        return quant_cfg

    def _verify_quantization(self) -> None:
        supported_quantization = me_quant.QUANTIZATION_METHODS
        if self.quantization is not None:
            self.quantization = cast(me_quant.QuantizationMethods,
                                     self.quantization)

        # Parse quantization method from the HF model config, if available.
        quant_cfg = self._parse_quant_hf_config(self.hf_config)
        if quant_cfg is None and (text_config := getattr(
                self.hf_config, "text_config", None)):
            # Check the text config as well for multi-modal models.
            quant_cfg = self._parse_quant_hf_config(text_config)

        if quant_cfg is not None:
            # Use the community standard 'quant_method'
            quant_method = quant_cfg.get("quant_method", "").lower()

            # Normalize library names
            quant_method = quant_method.replace("compressed_tensors",
                                                "compressed-tensors")

            quant_cfg["quant_method"] = quant_method

            # Quantization methods which are overrides (i.e. they have a
            # `override_quantization_method` method) must be checked in order
            # of preference (this is particularly important for GPTQ).
            overrides = [
                "bitblas",
                "gptq_marlin_24",
                "gptq_marlin",
                "gptq_bitblas",
                "awq_marlin",
                "ipex",
                "moe_wna16",
                "modelopt",
                "modelopt_fp4",
                "petit_nvfp4",
                # Ensure heavy backends are probed last to avoid unnecessary
                # imports during override detection (e.g., MXFP4 imports Triton)
                "mxfp4",
            ]
            quantization_methods = [
                q for q in supported_quantization if q not in overrides
            ]
            # Any custom overrides will be in quantization_methods so we place
            # them at the start of the list so custom overrides have preference
            # over the built-in ones.
            quantization_methods = quantization_methods + overrides

            # Detect which checkpoint is it
            for name in quantization_methods:
                method = me_quant.get_quantization_config(name)
                quantization_override = method.override_quantization_method(
                    quant_cfg, self.quantization)
                if quantization_override is not None:
                    # Raise error if the override is not custom (custom would
                    # be in QUANTIZATION_METHODS but not QuantizationMethods)
                    # and hasn't been added to the overrides list.
                    if (name in get_args(me_quant.QuantizationMethods)
                            and name not in overrides):
                        raise ValueError(
                            f"Quantization method {name} is an override but "
                            "is has not been added to the `overrides` list "
                            "above. This is necessary to ensure that the "
                            "overrides are checked in order of preference.")
                    quant_method = quantization_override
                    self.quantization = quantization_override
                    break

            quant_method = quant_method if quant_method != "" else None
            # Verify quantization configurations.
            if self.quantization is None:
                self.quantization = quant_method
            elif self.quantization != quant_method:
                raise ValueError(
                    "Quantization method specified in the model config "
                    f"({quant_method}) does not match the quantization "
                    f"method specified in the `quantization` argument "
                    f"({self.quantization}).")

        if self.quantization is not None:
            if self.quantization not in supported_quantization:
                raise ValueError(
                    f"Unknown quantization method: {self.quantization}. Must "
                    f"be one of {supported_quantization}.")
            from vllm.platforms import current_platform
            current_platform.verify_quantization(self.quantization)

    def _verify_cuda_graph(self) -> None:
        # CUDAGraph capture not supported for encoder-decoder models on ROCm
        unsupported_rocm = self.is_encoder_decoder
        if (unsupported_rocm and not self.enforce_eager
                and current_platform.is_rocm()):
            logger.warning(
                "CUDA graph is not supported for %s on ROCm yet, fallback "
                "to eager mode.", self.hf_config.model_type)
            self.enforce_eager = True

    def _verify_bnb_config(self) -> None:
        """
        The current version of bitsandbytes (0.46.1) with 8-bit models does not
        yet support CUDA graph.
        # TODO Remove this when bitsandbytes supports.
        """
        is_bitsandbytes = self.quantization == "bitsandbytes"
        has_quantization_config = (getattr(self.hf_config,
                                           "quantization_config", None)
                                   is not None)
        is_8bit = (self.hf_config.quantization_config.get(
            "load_in_8bit", False) if has_quantization_config else False)
        if all([
                is_bitsandbytes,
                has_quantization_config,
                is_8bit,
                not self.enforce_eager,
        ]):
            logger.warning(
                "CUDA graph is not supported on BitsAndBytes 8bit yet, "
                "fallback to the eager mode.")

            self.enforce_eager = True

    def _verify_with_expert_parallelism(self) -> None:
        num_expert_names = [
            "moe_num_experts",  # Dbrx
            "num_experts",  # Jamba
            "n_routed_experts",  # DeepSeek
            "num_local_experts",  # Mixtral
        ]
        num_experts = 0
        for name in num_expert_names:
            num_experts = getattr(self.hf_text_config, name, 0)
            if num_experts > 0:
                break
        if num_experts < 1:
            raise ValueError(
                "Number of experts in the model must be greater than 0 "
                "when expert parallelism is enabled.")

    def verify_dual_chunk_attention_config(
        self,
        load_config: LoadConfig,
    ) -> None:
        if hasattr(self.hf_config, "dual_chunk_attention_config"):
            # Try loading the sparse attention config
            from vllm.model_executor.model_loader.weight_utils import (
                get_sparse_attention_config)
            sparse_attn_config = get_sparse_attention_config(self, load_config)
            if sparse_attn_config:
                self.hf_config.dual_chunk_attention_config[
                    "sparse_attention_config"] = sparse_attn_config
                if "sparse_attention_enabled" not in \
                        self.hf_config.dual_chunk_attention_config:
                    self.hf_config.dual_chunk_attention_config[
                        "sparse_attention_enabled"] = True

    def verify_with_parallel_config(
        self,
        parallel_config: ParallelConfig,
    ) -> None:

        if parallel_config.distributed_executor_backend == "external_launcher":
            assert self.seed is not None, (
                "Seed must be set when using external launcher backend to "
                "make sure sampling results are the same across workers.")

        total_num_attention_heads = getattr(self.hf_text_config,
                                            "num_attention_heads", 0)
        tensor_parallel_size = parallel_config.tensor_parallel_size
        if total_num_attention_heads % tensor_parallel_size != 0:
            raise ValueError(
                f"Total number of attention heads ({total_num_attention_heads})"
                " must be divisible by tensor parallel size "
                f"({tensor_parallel_size}).")

        if parallel_config.enable_expert_parallel:
            self._verify_with_expert_parallelism()

        pipeline_parallel_size = parallel_config.pipeline_parallel_size
        if (pipeline_parallel_size > 1
                and not self.registry.is_pp_supported_model(
                    self.architectures, self)):
            raise NotImplementedError(
                "Pipeline parallelism is not supported for this model. "
                "Supported models implement the `SupportsPP` interface.")

    def get_sliding_window(self) -> Optional[int]:
        """Get the sliding window size from the HF text config if present."""
        return getattr(self.hf_text_config, "sliding_window", None)

    def get_vocab_size(self) -> int:
        return getattr(self.hf_text_config, "vocab_size", 0)

    def get_hidden_size(self) -> int:
        return getattr(self.hf_text_config, "hidden_size", 0)

    @property
    def is_deepseek_mla(self) -> bool:
        if not hasattr(self.hf_text_config, "model_type"):
            return False
        elif self.hf_text_config.model_type in \
            ('deepseek_v2', 'deepseek_v3', 'deepseek_v32', 'deepseek_mtp',
              'kimi_k2', 'longcat_flash'):
            return self.hf_text_config.kv_lora_rank is not None
        elif self.hf_text_config.model_type == 'eagle':
            # if the model is an EAGLE module, check for the
            # underlying architecture
            return self.hf_text_config.model.model_type in \
                    ('deepseek_v2', 'deepseek_v3', 'deepseek_v32') \
                and self.hf_text_config.kv_lora_rank is not None
        return False

    def get_head_size(self) -> int:
        # TODO remove hard code
        if self.is_deepseek_mla:
            qk_rope_head_dim = getattr(self.hf_text_config, "qk_rope_head_dim",
                                       0)
            if self.use_mla:
                return self.hf_text_config.kv_lora_rank + qk_rope_head_dim
            else:
                qk_nope_head_dim = getattr(self.hf_text_config,
                                           "qk_nope_head_dim", 0)
                if qk_rope_head_dim and qk_nope_head_dim:
                    return qk_rope_head_dim + qk_nope_head_dim

        if hasattr(self.hf_text_config,
                   "model_type") and (self.hf_text_config.model_type
                                      == "zamba2"):
            return self.hf_text_config.attention_head_dim

        if self.is_attention_free:
            return 0

        # NOTE: Some configs may set head_dim=None in the config
        if getattr(self.hf_text_config, "head_dim", None) is not None:
            return self.hf_text_config.head_dim

        # NOTE: Some models (such as PLaMo2.1) use `hidden_size_per_head`
        if getattr(self.hf_text_config, "hidden_size_per_head",
                   None) is not None:
            return self.hf_text_config.hidden_size_per_head

        # FIXME(woosuk): This may not be true for all models.
        return (self.hf_text_config.hidden_size //
                self.hf_text_config.num_attention_heads)

    def get_total_num_kv_heads(self) -> int:
        """Returns the total number of KV heads."""
        # For GPTBigCode & Falcon:
        # NOTE: for falcon, when new_decoder_architecture is True, the
        # multi_query flag is ignored and we use n_head_kv for the number of
        # KV heads.
        falcon_model_types = ["falcon", "RefinedWeb", "RefinedWebModel"]
        new_decoder_arch_falcon = (
            self.hf_config.model_type in falcon_model_types
            and getattr(self.hf_config, "new_decoder_architecture", False))
        if not new_decoder_arch_falcon and getattr(self.hf_text_config,
                                                   "multi_query", False):
            # Multi-query attention, only one KV head.
            # Currently, tensor parallelism is not supported in this case.
            return 1

        # For DBRX and MPT
        if self.hf_config.model_type == "mpt":
            if "kv_n_heads" in self.hf_config.attn_config:
                return self.hf_config.attn_config["kv_n_heads"]
            return self.hf_config.num_attention_heads
        if self.hf_config.model_type == "dbrx":
            return getattr(self.hf_config.attn_config, "kv_n_heads",
                           self.hf_config.num_attention_heads)

        if self.hf_config.model_type == "nemotron-nas":
            for block in self.hf_config.block_configs:
                if not block.attention.no_op:
                    return self.hf_config.num_attention_heads \
                        // block.attention.n_heads_in_group

            raise RuntimeError("Couldn't determine number of kv heads")

        if self.is_attention_free:
            return 0

        attributes = [
            # For Falcon:
            "n_head_kv",
            "num_kv_heads",
            # For LLaMA-2:
            "num_key_value_heads",
            # For ChatGLM:
            "multi_query_group_num",
        ]
        for attr in attributes:
            num_kv_heads = getattr(self.hf_text_config, attr, None)
            if num_kv_heads is not None:
                return num_kv_heads

        # For non-grouped-query attention models, the number of KV heads is
        # equal to the number of attention heads.
        return self.hf_text_config.num_attention_heads

    def get_num_kv_heads(self, parallel_config: ParallelConfig) -> int:
        """Returns the number of KV heads per GPU."""
        if self.use_mla:
            # When using MLA during decode it becomes MQA
            return 1

        total_num_kv_heads = self.get_total_num_kv_heads()
        # If tensor parallelism is used, we divide the number of KV heads by
        # the tensor parallel size. We will replicate the KV heads in the
        # case where the number of KV heads is smaller than the tensor
        # parallel size so each GPU has at least one KV head.
        return max(1,
                   total_num_kv_heads // parallel_config.tensor_parallel_size)

    def get_num_attention_heads(self, parallel_config: ParallelConfig) -> int:
        num_heads = getattr(self.hf_text_config, "num_attention_heads", 0)
        return num_heads // parallel_config.tensor_parallel_size

    def get_layers_start_end_indices(
            self, parallel_config: ParallelConfig) -> tuple[int, int]:
        from vllm.distributed.utils import get_pp_indices
        if (self.hf_text_config.model_type == "deepseek_mtp"
                or self.hf_config.model_type == "mimo_mtp"
                or self.hf_config.model_type == "glm4_moe_mtp"
                or self.hf_config.model_type == "ernie_mtp"
                or self.hf_config.model_type == "qwen3_next_mtp"):
            total_num_hidden_layers = getattr(self.hf_text_config,
                                              "num_nextn_predict_layers", 0)
        elif (self.hf_config.model_type == "longcat_flash_mtp"):
            total_num_hidden_layers = getattr(self.hf_text_config,
                                              "num_nextn_predict_layers", 1)
        else:
            total_num_hidden_layers = getattr(self.hf_text_config,
                                              "num_hidden_layers", 0)
        # the layout order is: DP x PP x TP
        pp_rank = (parallel_config.rank // parallel_config.tensor_parallel_size
                   ) % parallel_config.pipeline_parallel_size
        pp_size = parallel_config.pipeline_parallel_size
        start, end = get_pp_indices(total_num_hidden_layers, pp_rank, pp_size)
        return start, end

    def get_num_layers(self, parallel_config: ParallelConfig) -> int:
        start, end = self.get_layers_start_end_indices(parallel_config)
        return end - start

    def get_num_layers_by_block_type(
        self,
        parallel_config: ParallelConfig,
        block_type: LayerBlockType = LayerBlockType.attention,
    ) -> int:
        # This function relies on 'layers_block_type' in hf_config,
        # for w/o this attribute, we will need to have workarounds like so
        attn_block_type = block_type == LayerBlockType.attention
        is_transformer = not self.is_hybrid and \
                            not self.has_noops and \
                            not self.is_attention_free
        start, end = self.get_layers_start_end_indices(parallel_config)

        if is_transformer:
            # Handle the basic case first
            return end - start if attn_block_type else 0
        elif self.is_attention_free:
            # Attention free
            # Note that this code assumes there
            # is only one type of attention-free block type.
            return 0 if attn_block_type else end - start
        elif self.has_noops:
            block_configs = self.hf_config.block_configs
            return sum(not bc.attention.no_op
                       for bc in block_configs[start:end])
        else:
            # Hybrid model Jamba
            layers_block_type_value = getattr(self.hf_text_config,
                                              "layers_block_type", None)
            if layers_block_type_value is not None:
                if hasattr(self.hf_text_config,
                           "model_type") and (self.hf_text_config.model_type
                                              == "zamba2"):
                    if attn_block_type:
                        return sum(t == "hybrid"
                                   for t in layers_block_type_value[start:end])
                    else:
                        return self.get_num_layers(parallel_config)
                return sum(t == block_type.value
                           for t in layers_block_type_value[start:end])

            # Hybrid model Minimax
            attn_type_list = getattr(self.hf_config, "attn_type_list", None)
            if attn_type_list:
                return sum(t == 1 for t in attn_type_list[start:end])

            # Hybrid model Qwen3Next
            layer_types_value = getattr(self.hf_config, "layer_types", None)
            if layer_types_value is not None:
                if getattr(block_type, "value", block_type) == "attention":
                    return sum(t == "full_attention"
                               for t in layer_types_value[start:end])
                elif getattr(block_type, "value",
                             block_type) == "linear_attention":
                    return sum(t == "linear_attention"
                               for t in layer_types_value[start:end])
                else:
                    return sum(t == getattr(block_type, "value", block_type)
                               for t in layer_types_value[start:end])

            if (layers_block_type_value is None and attn_type_list is None
                    and layer_types_value is None):
                raise ValueError(
                    "The model is an hybrid without a"
                    "layers_block_type or an attn_type_list, or a layer_types "
                    "in the hf_config, cannot determine the num of "
                    f"{block_type.value} layers")

    def get_mamba_chunk_size(self) -> Optional[int]:
        """
        Returns the mamba chunk size if it exists
        """
        # used by e.g. Bamba, FalconH1, Granite, PLaMo2
        chunk_size = getattr(self.hf_text_config, "mamba_chunk_size", None)
        if chunk_size is None:
            # used by e.g. Mamba2, NemotronH, Zamba
            chunk_size = getattr(self.hf_text_config, "chunk_size", None)
        return chunk_size

    def get_multimodal_config(self) -> MultiModalConfig:
        """
        Get the multimodal configuration of the model.

        Raises:
            ValueError: If the model is not multimodal.
        """
        if self.multimodal_config is None:
            raise ValueError("The model is not multimodal.")

        return self.multimodal_config

    def try_get_generation_config(self) -> dict[str, Any]:
        """
        This method attempts to retrieve the non-default values of the
        generation config for this model.

        The generation config can contain information about special tokens, as
        well as sampling parameters. Which is why this method exists separately
        to `get_diff_sampling_param`.

        Returns:
            A dictionary containing the non-default generation config.
        """
        if self.generation_config in {"auto", "vllm"}:
            config = try_get_generation_config(
                self.hf_config_path or self.model,
                trust_remote_code=self.trust_remote_code,
                revision=self.revision,
                config_format=self.config_format,
            )
        else:
            config = try_get_generation_config(
                self.generation_config,
                trust_remote_code=self.trust_remote_code,
                config_format=self.config_format,
            )

        if config is None:
            return {}

        return config.to_diff_dict()

    def get_diff_sampling_param(self) -> dict[str, Any]:
        """
        This method returns a dictionary containing the non-default sampling
        parameters with `override_generation_config` applied.

        The default sampling parameters are:

        - vLLM's neutral defaults if `self.generation_config="vllm"`
        - the model's defaults if `self.generation_config="auto"`
        - as defined in `generation_config.json` if
            `self.generation_config="path/to/generation_config/dir"`

        Returns:
            A dictionary containing the non-default sampling parameters.
        """
        if self.generation_config == "vllm":
            config = {}
        else:
            config = self.try_get_generation_config()

        # Overriding with given generation config
        config.update(self.override_generation_config)

        available_params = [
            "repetition_penalty",
            "temperature",
            "top_k",
            "top_p",
            "min_p",
            "max_new_tokens",
        ]
        if any(p in config for p in available_params):
            diff_sampling_param = {
                p: config.get(p)
                for p in available_params if config.get(p) is not None
            }
            # Huggingface definition of max_new_tokens is equivalent
            # to vLLM's max_tokens
            if "max_new_tokens" in diff_sampling_param:
                diff_sampling_param["max_tokens"] = diff_sampling_param.pop(
                    "max_new_tokens")
        else:
            diff_sampling_param = {}

        if diff_sampling_param:
            logger.warning_once(
                "Default sampling parameters have been overridden by the "
                "model's Hugging Face generation config recommended from the "
                "model creator. If this is not intended, please relaunch "
                "vLLM instance with `--generation-config vllm`.")
        return diff_sampling_param

    @property
    def is_encoder_decoder(self) -> bool:
        """Extract the HF encoder/decoder model flag."""
        return is_encoder_decoder(self.hf_config)

    @property
    def uses_mrope(self) -> bool:
        return uses_mrope(self.hf_config)

    @property
    def is_multimodal_model(self) -> bool:
        return self.multimodal_config is not None

    @property
    def is_multimodal_raw_input_only_model(self) -> bool:
        return self._model_info.supports_multimodal_raw_input_only

    @property
    def is_cross_encoder(self) -> bool:
        return (self._model_info.supports_cross_encoding
                or self.convert_type == "classify")

    @property
    def is_pp_supported(self) -> bool:
        return self._model_info.supports_pp

    @property
    def is_attention_free(self) -> bool:
        return self._model_info.is_attention_free

    @property
    def is_hybrid(self) -> bool:
        return self._model_info.is_hybrid

    @property
    def has_noops(self) -> bool:
        return self._model_info.has_noops

    @property
    def has_inner_state(self):
        return self._model_info.has_inner_state

    @property
    def is_v1_compatible(self) -> bool:
        return not self._model_info.supports_v0_only

    @property
    def use_mla(self) -> bool:
        return self.is_deepseek_mla and not envs.VLLM_MLA_DISABLE

    @property
    def is_matryoshka(self) -> bool:
        return (bool(getattr(self.hf_config, "matryoshka_dimensions", None))
                or getattr(self.hf_config, "is_matryoshka", False))

    @property
    def matryoshka_dimensions(self):
        return getattr(self.hf_config, "matryoshka_dimensions", None)

    @property
    def use_pad_token(self) -> bool:
        # cross_encoder models defaults to using pad_token.
        # `llm as reranker` models defaults to not using pad_token.
        return getattr(self.hf_config, "use_pad_token", True)

    @property
    def head_dtype(self) -> torch.dtype:
        """
        "head" refers to the last Linear layer(s) of an LLM,
        such as the lm_head in a generation model,
        or the score or classifier in a classification model.

        `head_dtype` currently only supports pooling models.\n
        - The pooling model defaults to using fp32 head,
        you can use --hf-overrides '{"head_dtype": "model"}' to disable it.
        """

        head_dtype = _get_head_dtype(config=self.hf_config,
                                     dtype=self.dtype,
                                     runner_type=self.runner_type)

        if self.runner_type != "pooling" and head_dtype != self.dtype:
            logger.warning_once(
                "`head_dtype` currently only supports pooling models."
                "fallback to model dtype [%s].", self.dtype)
            return self.dtype

        if head_dtype not in current_platform.supported_dtypes:
            logger.warning_once(
                "The current platform does not support [%s] head dtype, "
                "fallback to model dtype [%s].", head_dtype, self.dtype)
            return self.dtype

        logger.debug_once("head dtype: %s", head_dtype)
        return head_dtype

    def get_and_verify_max_len(self, max_model_len: int):
        # Consider max_model_len in tokenizer_config only when
        # pooling models use absolute position_embedding.
        tokenizer_config = None
        if (self.runner_type == "pooling" and getattr(
                self.hf_config, "position_embedding_type", "") == "absolute"):
            tokenizer_config = try_get_tokenizer_config(
                self.tokenizer,
                trust_remote_code=self.trust_remote_code,
                revision=self.tokenizer_revision)
        max_model_len = _get_and_verify_max_len(
            hf_config=self.hf_text_config,
            tokenizer_config=tokenizer_config,
            max_model_len=max_model_len,
            disable_sliding_window=self.disable_sliding_window,
            sliding_window=self.get_sliding_window(),
            spec_target_max_model_len=self.spec_target_max_model_len,
            encoder_config=self.encoder_config)
        logger.info("Using max model len %s", max_model_len)
        return max_model_len

allowed_local_media_path class-attribute instance-attribute

allowed_local_media_path: str = ''

Allowing API requests to read local images or videos from directories specified by the server file system. This is a security risk. Should only be enabled in trusted environments.

allowed_media_domains class-attribute instance-attribute

allowed_media_domains: Optional[list[str]] = None

If set, only media URLs that belong to this domain can be used for multi-modal inputs.

architecture property

architecture: str

The architecture vllm actually used.

architectures property

architectures: list[str]

code_revision class-attribute instance-attribute

code_revision: Optional[str] = None

The specific revision to use for the model code on the Hugging Face Hub. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.

config_format class-attribute instance-attribute

config_format: Union[str, ConfigFormat] = 'auto'

The format of the model config to load:

  • "auto" will try to load the config in hf format if available else it will try to load in mistral format.

  • "hf" will load the config in hf format.

  • "mistral" will load the config in mistral format.

convert class-attribute instance-attribute

convert: ConvertOption = 'auto'

Convert the model using adapters defined in vllm.model_executor.models.adapters. The most common use case is to adapt a text generation model to be used for pooling tasks.

disable_cascade_attn class-attribute instance-attribute

disable_cascade_attn: bool = False

Disable cascade attention for V1. While cascade attention does not change the mathematical correctness, disabling it could be useful for preventing potential numerical issues. Note that even if this is set to False, cascade attention will be only used when the heuristic tells that it's beneficial.

disable_sliding_window class-attribute instance-attribute

disable_sliding_window: bool = False

Whether to disable sliding window. If True, we will disable the sliding window functionality of the model, capping to sliding window size. If the model does not support sliding window, this argument is ignored.

dtype class-attribute instance-attribute

dtype: Union[ModelDType, dtype] = 'auto'

Data type for model weights and activations:

  • "auto" will use FP16 precision for FP32 and FP16 models, and BF16 precision for BF16 models.

  • "half" for FP16. Recommended for AWQ quantization.

  • "float16" is the same as "half".

  • "bfloat16" for a balance between precision and range.

  • "float" is shorthand for FP32 precision.

  • "float32" for FP32 precision.

enable_prompt_embeds class-attribute instance-attribute

enable_prompt_embeds: bool = False

If True, enables passing text embeddings as inputs via the prompt_embeds key. Note that enabling this will double the time required for graph compilation.

enable_sleep_mode class-attribute instance-attribute

enable_sleep_mode: bool = False

Enable sleep mode for the engine (only cuda platform is supported).

enforce_eager class-attribute instance-attribute

enforce_eager: bool = False

Whether to always use eager-mode PyTorch. If True, we will disable CUDA graph and always execute the model in eager mode. If False, we will use CUDA graph and eager execution in hybrid for maximal performance and flexibility.

generation_config class-attribute instance-attribute

generation_config: str = 'auto'

The folder path to the generation config. Defaults to "auto", the generation config will be loaded from model path. If set to "vllm", no generation config is loaded, vLLM defaults will be used. If set to a folder path, the generation config will be loaded from the specified folder path. If max_new_tokens is specified in generation config, then it sets a server-wide limit on the number of output tokens for all requests.

has_inner_state property

has_inner_state

has_noops property

has_noops: bool

head_dtype property

head_dtype: dtype

"head" refers to the last Linear layer(s) of an LLM, such as the lm_head in a generation model, or the score or classifier in a classification model.

head_dtype currently only supports pooling models.

  • The pooling model defaults to using fp32 head, you can use --hf-overrides '{"head_dtype": "model"}' to disable it.

hf_config_path class-attribute instance-attribute

hf_config_path: Optional[str] = None

Name or path of the Hugging Face config to use. If unspecified, model name or path will be used.

hf_overrides class-attribute instance-attribute

hf_overrides: HfOverrides = field(default_factory=dict)

If a dictionary, contains arguments to be forwarded to the Hugging Face config. If a callable, it is called to update the HuggingFace config.

hf_token class-attribute instance-attribute

hf_token: Optional[Union[bool, str]] = None

The token to use as HTTP bearer authorization for remote files . If True, will use the token generated when running huggingface-cli login (stored in ~/.huggingface).

io_processor_plugin class-attribute instance-attribute

io_processor_plugin: Optional[str] = None

IOProcessor plugin name to load at model startup

is_attention_free property

is_attention_free: bool

is_cross_encoder property

is_cross_encoder: bool

is_deepseek_mla property

is_deepseek_mla: bool

is_encoder_decoder property

is_encoder_decoder: bool

Extract the HF encoder/decoder model flag.

is_hybrid property

is_hybrid: bool

is_matryoshka property

is_matryoshka: bool

is_multimodal_model property

is_multimodal_model: bool

is_multimodal_raw_input_only_model property

is_multimodal_raw_input_only_model: bool

is_pp_supported property

is_pp_supported: bool

is_v1_compatible property

is_v1_compatible: bool

logits_processor_pattern class-attribute instance-attribute

logits_processor_pattern: Optional[str] = None

Optional regex pattern specifying valid logits processor qualified names that can be passed with the logits_processors extra completion argument. Defaults to None, which allows no processors.

logits_processors class-attribute instance-attribute

logits_processors: Optional[
    list[Union[str, type[LogitsProcessor]]]
] = None

One or more logits processors' fully-qualified class names or class definitions

logprobs_mode class-attribute instance-attribute

logprobs_mode: LogprobsMode = 'raw_logprobs'

Indicates the content returned in the logprobs and prompt_logprobs. Supported mode: 1) raw_logprobs, 2) processed_logprobs, 3) raw_logits, 4) processed_logits. Raw means the values before applying any logit processors, like bad words. Processed means the values after applying all processors, including temperature and top_k/top_p.

matryoshka_dimensions property

matryoshka_dimensions

max_logprobs class-attribute instance-attribute

max_logprobs: int = 20

Maximum number of log probabilities to return when logprobs is specified in SamplingParams. The default value comes the default for the OpenAI Chat Completions API. -1 means no cap, i.e. all (output_length * vocab_size) logprobs are allowed to be returned and it may cause OOM.

max_model_len class-attribute instance-attribute

max_model_len: SkipValidation[int] = None

Model context length (prompt and output). If unspecified, will be automatically derived from the model config.

When passing via --max-model-len, supports k/m/g/K/M/G in human-readable format. Examples:

  • 1k -> 1000

  • 1K -> 1024

  • 25.6k -> 25,600

model class-attribute instance-attribute

model: str = 'Qwen/Qwen3-0.6B'

Name or path of the Hugging Face model to use. It is also used as the content for model_name tag in metrics output when served_model_name is not specified.

model_impl class-attribute instance-attribute

model_impl: Union[str, ModelImpl] = 'auto'

Which implementation of the model to use:

  • "auto" will try to use the vLLM implementation, if it exists, and fall back to the Transformers implementation if no vLLM implementation is available.

  • "vllm" will use the vLLM model implementation.

  • "transformers" will use the Transformers model implementation.

  • "terratorch" will use the TerraTorch model implementation.

multimodal_config class-attribute instance-attribute

multimodal_config: Optional[MultiModalConfig] = None

Configuration for multimodal model. If None, this will be inferred from the architecture of self.model.

override_attention_dtype class-attribute instance-attribute

override_attention_dtype: Optional[str] = None

Override dtype for attention

override_generation_config class-attribute instance-attribute

override_generation_config: dict[str, Any] = field(
    default_factory=dict
)

Overrides or sets generation config. e.g. {"temperature": 0.5}. If used with --generation-config auto, the override parameters will be merged with the default config from the model. If used with --generation-config vllm, only the override parameters are used.

override_pooler_config class-attribute instance-attribute

override_pooler_config: Optional[
    Union[dict, PoolerConfig]
] = None

[DEPRECATED] Use pooler_config instead. This field will be removed in v0.12.0 or v1.0.0, whichever is sooner.

pooler_config class-attribute instance-attribute

pooler_config: Optional[PoolerConfig] = None

Pooler config which controls the behaviour of output pooling in pooling models.

quantization class-attribute instance-attribute

quantization: SkipValidation[
    Optional[QuantizationMethods]
] = None

Method used to quantize the weights. If None, we first check the quantization_config attribute in the model config file. If that is None, we assume the model weights are not quantized and use dtype to determine the data type of the weights.

registry property

registry

revision class-attribute instance-attribute

revision: Optional[str] = None

The specific model version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.

rope_scaling class-attribute instance-attribute

rope_scaling: dict[str, Any] = field(default_factory=dict)

RoPE scaling configuration. For example, {"rope_type":"dynamic","factor":2.0}.

rope_theta class-attribute instance-attribute

rope_theta: Optional[float] = None

RoPE theta. Use with rope_scaling. In some cases, changing the RoPE theta improves the performance of the scaled model.

runner class-attribute instance-attribute

runner: RunnerOption = 'auto'

The type of model runner to use. Each vLLM instance only supports one model runner, even if the same model can be used for multiple types.

seed class-attribute instance-attribute

seed: Optional[int] = None

Random seed for reproducibility. Initialized to None in V0, but initialized to 0 in V1.

served_model_name class-attribute instance-attribute

served_model_name: Optional[Union[str, list[str]]] = None

The model name(s) used in the API. If multiple names are provided, the server will respond to any of the provided names. The model name in the model field of a response will be the first name in this list. If not specified, the model name will be the same as the --model argument. Noted that this name(s) will also be used in model_name tag content of prometheus metrics, if multiple names provided, metrics tag will take the first one.

skip_tokenizer_init class-attribute instance-attribute

skip_tokenizer_init: bool = False

Skip initialization of tokenizer and detokenizer. Expects valid prompt_token_ids and None for prompt from the input. The generated output will contain token ids.

spec_target_max_model_len class-attribute instance-attribute

spec_target_max_model_len: Optional[int] = None

Specify the maximum length for spec decoding draft models.

task class-attribute instance-attribute

task: Optional[TaskOption] = None

[DEPRECATED] The task to use the model for. If the model supports more than one model runner, this is used to select which model runner to run.

Note that the model may support other tasks using the same model runner.

tokenizer class-attribute instance-attribute

tokenizer: SkipValidation[str] = None

Name or path of the Hugging Face tokenizer to use. If unspecified, model name or path will be used.

tokenizer_mode class-attribute instance-attribute

tokenizer_mode: TokenizerMode = 'auto'

Tokenizer mode:

  • "auto" will use the fast tokenizer if available.

  • "slow" will always use the slow tokenizer.

  • "mistral" will always use the tokenizer from mistral_common.

  • "custom" will use --tokenizer to select the preregistered tokenizer.

tokenizer_revision class-attribute instance-attribute

tokenizer_revision: Optional[str] = None

The specific revision to use for the tokenizer on the Hugging Face Hub. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.

trust_remote_code class-attribute instance-attribute

trust_remote_code: bool = False

Trust remote code (e.g., from HuggingFace) when downloading the model and tokenizer.

use_mla property

use_mla: bool

use_pad_token property

use_pad_token: bool

uses_mrope property

uses_mrope: bool

__post_init__

__post_init__(
    limit_mm_per_prompt: Optional[dict[str, int]],
    media_io_kwargs: Optional[dict[str, dict[str, Any]]],
    mm_processor_kwargs: Optional[dict[str, Any]],
    mm_processor_cache_gb: Optional[float],
    mm_processor_cache_type: Optional[MMCacheType],
    mm_shm_cache_max_object_size_mb: Optional[int],
    mm_encoder_tp_mode: Optional[MMEncoderTPMode],
    interleave_mm_strings: Optional[bool],
    skip_mm_profiling: Optional[bool],
    video_pruning_rate: Optional[float],
) -> None
Source code in vllm/config/model.py
def __post_init__(
    self,
    # Multimodal config init vars
    limit_mm_per_prompt: Optional[dict[str, int]],
    media_io_kwargs: Optional[dict[str, dict[str, Any]]],
    mm_processor_kwargs: Optional[dict[str, Any]],
    mm_processor_cache_gb: Optional[float],
    mm_processor_cache_type: Optional[MMCacheType],
    mm_shm_cache_max_object_size_mb: Optional[int],
    mm_encoder_tp_mode: Optional[MMEncoderTPMode],
    interleave_mm_strings: Optional[bool],
    skip_mm_profiling: Optional[bool],
    video_pruning_rate: Optional[float],
) -> None:
    # Set the default seed to 0 in V1.
    # NOTE(woosuk): In V0, we set the default seed to None because the
    # driver worker shares the same process as the user process, and thus
    # setting a seed affects the user process as well.
    # In V1, we use separate processes for workers (unless
    # VLLM_ENABLE_V1_MULTIPROCESSING=0), so setting a seed here
    # doesn't affect the user process. However, without a consistent seed,
    # different tensor parallel workers would sample different tokens,
    # leading to inconsistent results.
    if envs.VLLM_USE_V1 and self.seed is None:
        self.seed = 0
        if not envs.VLLM_ENABLE_V1_MULTIPROCESSING:
            logger.warning(
                "The global random seed is set to %d. Since "
                "VLLM_ENABLE_V1_MULTIPROCESSING is set to False, this may "
                "affect the random state of the Python process that "
                "launched vLLM.", self.seed)

    # Keep set served_model_name before maybe_model_redirect(self.model)
    self.served_model_name = get_served_model_name(self.model,
                                                   self.served_model_name)
    self.model = maybe_model_redirect(self.model)
    # The tokenizer is consistent with the model by default.
    if self.tokenizer is None:
        self.tokenizer = self.model
    if self.tokenizer_revision is None:
        self.tokenizer_revision = self.revision
    self.tokenizer = maybe_model_redirect(self.tokenizer)

    if isinstance(self.hf_config_path, str):
        self.hf_config_path = maybe_model_redirect(self.hf_config_path)

    if callable(self.hf_overrides):
        hf_overrides_kw = {}
        hf_overrides_fn = self.hf_overrides
    else:
        hf_overrides_kw = self.hf_overrides
        hf_overrides_fn = None

    if self.rope_scaling:
        hf_override: dict[str, Any] = {"rope_scaling": self.rope_scaling}
        hf_overrides_kw.update(hf_override)
        hf_overrides_str = json.dumps(hf_overrides_kw)
        msg = (
            "`--rope-scaling` will be removed in a future release. "
            f"'Please instead use `--hf-overrides '{hf_overrides_str}'`")
        warnings.warn(DeprecationWarning(msg), stacklevel=2)
    if self.rope_theta is not None:
        hf_override = {"rope_theta": self.rope_theta}
        hf_overrides_kw.update(hf_override)
        hf_overrides_str = json.dumps(hf_overrides_kw)
        msg = (
            "`--rope-theta` will be removed in a future release. "
            f"'Please instead use `--hf-overrides '{hf_overrides_str}'`")
        warnings.warn(DeprecationWarning(msg), stacklevel=2)

    self.maybe_pull_model_tokenizer_for_runai(self.model, self.tokenizer)

    if (backend := envs.VLLM_ATTENTION_BACKEND
        ) and backend == "FLASHINFER" and find_spec("flashinfer") is None:
        raise ValueError(
            "VLLM_ATTENTION_BACKEND is set to FLASHINFER, but flashinfer "
            "module was not found. See "
            "https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile "  # noqa: E501
            "for instructions on how to install it.")

    from vllm.platforms import current_platform

    if (self.override_attention_dtype is not None
            and not current_platform.is_rocm()):
        warnings.warn(
            "override-attention-dtype is set but not using ROCm platform",
            stacklevel=2)

    if (self.enable_sleep_mode
            and not current_platform.is_sleep_mode_available()):
        raise ValueError(
            "Sleep mode is not supported on current platform.")

    hf_config = get_config(self.hf_config_path or self.model,
                           self.trust_remote_code,
                           self.revision,
                           self.code_revision,
                           self.config_format,
                           hf_overrides_kw=hf_overrides_kw,
                           hf_overrides_fn=hf_overrides_fn)

    self.hf_config = hf_config
    self.hf_text_config = get_hf_text_config(self.hf_config)
    self.attention_chunk_size = getattr(self.hf_text_config,
                                        "attention_chunk_size", None)
    self.encoder_config = self._get_encoder_config()
    self.hf_image_processor_config = get_hf_image_processor_config(
        self.model, hf_token=self.hf_token, revision=self.revision)

    architectures = self.architectures
    registry = self.registry
    is_generative_model = registry.is_text_generation_model(
        architectures, self)
    is_pooling_model = registry.is_pooling_model(architectures, self)

    def _task_to_convert(task: TaskOption) -> ConvertType:
        if task == "embedding" or task == "embed":
            return "embed"
        if task == "classify":
            return "classify"
        if task == "reward":
            return "reward"
        if task == "score":
            new_task = self._get_default_pooling_task(architectures)
            return "classify" if new_task == "classify" else "embed"

        return "none"

    if self.task is not None:
        runner: RunnerOption = "auto"
        convert: ConvertOption = "auto"
        msg_prefix = ("The 'task' option has been deprecated and will be "
                      "removed in v0.13.0 or v1.0, whichever comes first.")
        msg_hint = "Please remove this option."

        is_generative_task = self.task in _RUNNER_TASKS["generate"]
        is_pooling_task = self.task in _RUNNER_TASKS["pooling"]

        if is_generative_model and is_pooling_model:
            if is_generative_task:
                runner = "generate"
                convert = "auto"
                msg_hint = ("Please replace this option with `--runner "
                            "generate` to continue using this model "
                            "as a generative model.")
            elif is_pooling_task:
                runner = "pooling"
                convert = "auto"
                msg_hint = ("Please replace this option with `--runner "
                            "pooling` to continue using this model "
                            "as a pooling model.")
            else:  # task == "auto"
                pass
        elif is_generative_model or is_pooling_model:
            if is_generative_task:
                runner = "generate"
                convert = "auto"
                msg_hint = "Please remove this option"
            elif is_pooling_task:
                runner = "pooling"
                convert = _task_to_convert(self.task)
                msg_hint = ("Please replace this option with `--convert "
                            f"{convert}` to continue using this model "
                            "as a pooling model.")
            else:  # task == "auto"
                pass
        else:
            debug_info = {
                "architectures": architectures,
                "is_generative_model": is_generative_model,
                "is_pooling_model": is_pooling_model,
            }
            raise AssertionError("The model should be a generative or "
                                 "pooling model when task is set to "
                                 f"{self.task!r}. Found: {debug_info}")

        self.runner = runner
        self.convert = convert

        msg = f"{msg_prefix} {msg_hint}"
        warnings.warn(msg, DeprecationWarning, stacklevel=2)

    self.runner_type = self._get_runner_type(architectures, self.runner)
    self.convert_type = self._get_convert_type(architectures,
                                               self.runner_type,
                                               self.convert)

    if self.runner_type == "generate" and not is_generative_model:
        generate_converts = _RUNNER_CONVERTS["generate"]
        if self.convert_type not in generate_converts:
            # Currently we don't have any converters for generative models
            raise ValueError(
                "This model does not support `--runner generate`.")
    if self.runner_type == "pooling" and not is_pooling_model:
        pooling_converts = _RUNNER_CONVERTS["pooling"]
        if self.convert_type not in pooling_converts:
            convert_option = "<" + "|".join(pooling_converts) + ">"
            raise ValueError(
                "This model does not support `--runner pooling`. "
                f"You can pass `--convert {convert_option} to adapt "
                "it into a pooling model.")

    # Note: Initialize these attributes early because transformers fallback
    # may fail to load dynamic modules in child processes
    model_info, arch = registry.inspect_model_cls(architectures, self)
    self._model_info = model_info
    self._architecture = arch
    logger.info("Resolved architecture: %s", arch)

    # Init pooler config if needed
    if self.runner_type == "pooling":
        if self.override_pooler_config is not None:
            logger.warning_once(
                "`override_pooler_config` is deprecated and will be "
                "removed in v0.12.0 or v1.0.0, whichever is sooner. "
                "Please use `pooler_config` instead.")

            if isinstance(self.override_pooler_config, dict):
                self.pooler_config = PoolerConfig(
                    **self.override_pooler_config)
            else:
                self.pooler_config = self.override_pooler_config

        if self.pooler_config is None:
            self.pooler_config = PoolerConfig()

        base_config = get_pooling_config(self.model, self.revision)
        if base_config is not None:
            # Only set values that are not overridden by the user
            for k, v in base_config.items():
                if getattr(self.pooler_config, k) is None:
                    setattr(self.pooler_config, k, v)

        default_pooling_type = self._model_info.default_pooling_type
        if self.pooler_config.pooling_type is None:
            self.pooler_config.pooling_type = default_pooling_type

    self.dtype: torch.dtype = _get_and_verify_dtype(
        self.model,
        self.hf_config,
        self.dtype,
        is_pooling_model=self.runner_type == "pooling",
        revision=self.revision,
    )

    # Interleaved attention is not supported by some backends in V0
    if (not self.disable_sliding_window
            and is_interleaved(self.hf_text_config)
            and not envs.VLLM_USE_V1
            and (backend := envs.VLLM_ATTENTION_BACKEND)
            in ("XFORMERS", "FLASHINFER")):
        logger.warning_once(
            "%s has interleaved attention, which is currently not "
            "supported by the %s backend. Disabling sliding window and "
            "capping the max length to the sliding window size (%d).",
            self.hf_text_config.model_type,
            backend,
            self.hf_text_config.sliding_window,
        )
        self.disable_sliding_window = True

    self.original_max_model_len = self.max_model_len
    self.max_model_len = self.get_and_verify_max_len(self.max_model_len)
    # Init multimodal config if needed
    if self._model_info.supports_multimodal:
        if (mm_encoder_tp_mode == "data" and
                not self._model_info.supports_multimodal_encoder_tp_data):
            logger.warning_once(
                "This model does not support `--mm-encoder-tp-mode data`. "
                "Falling back to `--mm-encoder-tp-mode weights`.")
            mm_encoder_tp_mode = "weights"

        mm_config_kwargs = dict(
            limit_per_prompt=limit_mm_per_prompt,
            media_io_kwargs=media_io_kwargs,
            mm_processor_kwargs=mm_processor_kwargs,
            mm_processor_cache_gb=mm_processor_cache_gb,
            mm_processor_cache_type=mm_processor_cache_type,
            mm_shm_cache_max_object_size_mb=mm_shm_cache_max_object_size_mb,
            mm_encoder_tp_mode=mm_encoder_tp_mode,
            interleave_mm_strings=interleave_mm_strings,
            skip_mm_profiling=skip_mm_profiling,
            video_pruning_rate=video_pruning_rate,
        )

        mm_config_kwargs = {
            k: v
            for k, v in mm_config_kwargs.items() if v is not None
        }

        self.multimodal_config = MultiModalConfig(**mm_config_kwargs)

    if self.disable_sliding_window:
        # Set after get_and_verify_max_len to ensure that max_model_len
        # can be correctly capped to sliding window size
        self.hf_text_config.sliding_window = None

    if not self.skip_tokenizer_init:
        self._verify_tokenizer_mode()

    # Avoid running try_verify_and_update_config multiple times
    self.config_updated = False

    self._verify_quantization()
    self._verify_cuda_graph()
    self._verify_bnb_config()

_get_convert_type

_get_convert_type(
    architectures: list[str],
    runner_type: RunnerType,
    convert: ConvertOption,
) -> ConvertType
Source code in vllm/config/model.py
def _get_convert_type(
    self,
    architectures: list[str],
    runner_type: RunnerType,
    convert: ConvertOption,
) -> ConvertType:
    if convert != "auto":
        return convert

    convert_type = self._get_default_convert_type(architectures,
                                                  runner_type)

    # Don't log the most common case
    if convert_type != "none":
        logger.info(
            "Resolved `--convert auto` to `--convert %s`. "
            "Pass the value explicitly to silence this message.",
            convert_type)

    return convert_type

_get_default_convert_type

_get_default_convert_type(
    architectures: list[str], runner_type: RunnerType
) -> ConvertType
Source code in vllm/config/model.py
def _get_default_convert_type(
    self,
    architectures: list[str],
    runner_type: RunnerType,
) -> ConvertType:
    registry = self.registry

    for arch in architectures:
        if arch in registry.get_supported_archs():
            if (runner_type == "generate"
                    and registry.is_text_generation_model(
                        architectures, self)):
                return "none"
            if (runner_type == "pooling"
                    and registry.is_pooling_model(architectures, self)):
                return "none"

        match = try_match_architecture_defaults(arch,
                                                runner_type=runner_type)
        if match:
            _, (_, convert_type) = match
            return convert_type

    # This is to handle Sentence Transformers models that use *ForCausalLM
    # and also multi-modal pooling models which are not defined as
    # Sentence Transformers models
    if runner_type == "pooling":
        return "embed"

    return "none"

_get_default_pooling_task

_get_default_pooling_task(
    architectures: list[str],
) -> Literal["embed", "classify", "reward"]
Source code in vllm/config/model.py
def _get_default_pooling_task(
    self,
    architectures: list[str],
) -> Literal["embed", "classify", "reward"]:
    if self.registry.is_cross_encoder_model(architectures, self):
        return "classify"

    for arch in architectures:
        match = try_match_architecture_defaults(arch,
                                                runner_type="pooling")
        if match:
            _, (_, convert_type) = match
            assert convert_type != "none"
            return convert_type

    return "embed"

_get_default_runner_type

_get_default_runner_type(
    architectures: list[str],
) -> RunnerType
Source code in vllm/config/model.py
def _get_default_runner_type(
    self,
    architectures: list[str],
) -> RunnerType:
    registry = self.registry

    # Some Sentence Transformers models use *ForCausalLM archs
    if get_pooling_config(self.model, self.revision):
        return "pooling"

    for arch in architectures:
        if arch in registry.get_supported_archs():
            if registry.is_pooling_model(architectures, self):
                return "pooling"
            if registry.is_text_generation_model(architectures, self):
                return "generate"

        match = try_match_architecture_defaults(arch)
        if match:
            _, (runner_type, _) = match
            return runner_type

    return "generate"

_get_encoder_config

_get_encoder_config()
Source code in vllm/config/model.py
def _get_encoder_config(self):
    return get_sentence_transformer_tokenizer_config(
        self.model, self.revision)

_get_runner_type

_get_runner_type(
    architectures: list[str], runner: RunnerOption
) -> RunnerType
Source code in vllm/config/model.py
def _get_runner_type(
    self,
    architectures: list[str],
    runner: RunnerOption,
) -> RunnerType:
    if runner != "auto":
        return runner

    runner_type = self._get_default_runner_type(architectures)

    # Don't log the most common case
    if runner_type != "generate":
        logger.info(
            "Resolved `--runner auto` to `--runner %s`. "
            "Pass the value explicitly to silence this message.",
            runner_type)

    return runner_type

_get_transformers_backend_cls

_get_transformers_backend_cls() -> str

Determine which Transformers backend class will be used if model_impl is set to transformers or auto.

Source code in vllm/config/model.py
def _get_transformers_backend_cls(self) -> str:
    """Determine which Transformers backend class will be used if
    `model_impl` is set to `transformers` or `auto`."""
    if getattr(self, "runner_type", self.runner) == "pooling":
        return "TransformersModel"
    if self.hf_config != self.hf_text_config:
        # If 'hf_text_config' is the same as 'hf_config'. If not, it is
        # probably a composite config, i.e. multimodal
        return "TransformersForMultimodalLM"
    return "TransformersForCausalLM"

_parse_quant_hf_config

_parse_quant_hf_config(hf_config: PretrainedConfig)
Source code in vllm/config/model.py
def _parse_quant_hf_config(self, hf_config: PretrainedConfig):
    quant_cfg = getattr(hf_config, "quantization_config", None)
    if quant_cfg is None:
        # compressed-tensors uses a "compression_config" key
        quant_cfg = getattr(hf_config, "compression_config", None)

    else:
        # Set quant_method for ModelOpt models.
        producer_name = quant_cfg.get("producer", {}).get("name")
        if producer_name == "modelopt":
            quant_algo = quant_cfg.get("quantization",
                                       {}).get("quant_algo")
            if quant_algo == "FP8":
                quant_cfg["quant_method"] = "modelopt"
            elif quant_algo == "NVFP4":
                quant_cfg["quant_method"] = "modelopt_fp4"
            elif quant_algo is not None:
                raise ValueError(
                    f"Unknown ModelOpt quant algo: {quant_algo}")

    return quant_cfg

_verify_bnb_config

_verify_bnb_config() -> None

The current version of bitsandbytes (0.46.1) with 8-bit models does not yet support CUDA graph.

TODO Remove this when bitsandbytes supports.

Source code in vllm/config/model.py
def _verify_bnb_config(self) -> None:
    """
    The current version of bitsandbytes (0.46.1) with 8-bit models does not
    yet support CUDA graph.
    # TODO Remove this when bitsandbytes supports.
    """
    is_bitsandbytes = self.quantization == "bitsandbytes"
    has_quantization_config = (getattr(self.hf_config,
                                       "quantization_config", None)
                               is not None)
    is_8bit = (self.hf_config.quantization_config.get(
        "load_in_8bit", False) if has_quantization_config else False)
    if all([
            is_bitsandbytes,
            has_quantization_config,
            is_8bit,
            not self.enforce_eager,
    ]):
        logger.warning(
            "CUDA graph is not supported on BitsAndBytes 8bit yet, "
            "fallback to the eager mode.")

        self.enforce_eager = True

_verify_cuda_graph

_verify_cuda_graph() -> None
Source code in vllm/config/model.py
def _verify_cuda_graph(self) -> None:
    # CUDAGraph capture not supported for encoder-decoder models on ROCm
    unsupported_rocm = self.is_encoder_decoder
    if (unsupported_rocm and not self.enforce_eager
            and current_platform.is_rocm()):
        logger.warning(
            "CUDA graph is not supported for %s on ROCm yet, fallback "
            "to eager mode.", self.hf_config.model_type)
        self.enforce_eager = True

_verify_quantization

_verify_quantization() -> None
Source code in vllm/config/model.py
def _verify_quantization(self) -> None:
    supported_quantization = me_quant.QUANTIZATION_METHODS
    if self.quantization is not None:
        self.quantization = cast(me_quant.QuantizationMethods,
                                 self.quantization)

    # Parse quantization method from the HF model config, if available.
    quant_cfg = self._parse_quant_hf_config(self.hf_config)
    if quant_cfg is None and (text_config := getattr(
            self.hf_config, "text_config", None)):
        # Check the text config as well for multi-modal models.
        quant_cfg = self._parse_quant_hf_config(text_config)

    if quant_cfg is not None:
        # Use the community standard 'quant_method'
        quant_method = quant_cfg.get("quant_method", "").lower()

        # Normalize library names
        quant_method = quant_method.replace("compressed_tensors",
                                            "compressed-tensors")

        quant_cfg["quant_method"] = quant_method

        # Quantization methods which are overrides (i.e. they have a
        # `override_quantization_method` method) must be checked in order
        # of preference (this is particularly important for GPTQ).
        overrides = [
            "bitblas",
            "gptq_marlin_24",
            "gptq_marlin",
            "gptq_bitblas",
            "awq_marlin",
            "ipex",
            "moe_wna16",
            "modelopt",
            "modelopt_fp4",
            "petit_nvfp4",
            # Ensure heavy backends are probed last to avoid unnecessary
            # imports during override detection (e.g., MXFP4 imports Triton)
            "mxfp4",
        ]
        quantization_methods = [
            q for q in supported_quantization if q not in overrides
        ]
        # Any custom overrides will be in quantization_methods so we place
        # them at the start of the list so custom overrides have preference
        # over the built-in ones.
        quantization_methods = quantization_methods + overrides

        # Detect which checkpoint is it
        for name in quantization_methods:
            method = me_quant.get_quantization_config(name)
            quantization_override = method.override_quantization_method(
                quant_cfg, self.quantization)
            if quantization_override is not None:
                # Raise error if the override is not custom (custom would
                # be in QUANTIZATION_METHODS but not QuantizationMethods)
                # and hasn't been added to the overrides list.
                if (name in get_args(me_quant.QuantizationMethods)
                        and name not in overrides):
                    raise ValueError(
                        f"Quantization method {name} is an override but "
                        "is has not been added to the `overrides` list "
                        "above. This is necessary to ensure that the "
                        "overrides are checked in order of preference.")
                quant_method = quantization_override
                self.quantization = quantization_override
                break

        quant_method = quant_method if quant_method != "" else None
        # Verify quantization configurations.
        if self.quantization is None:
            self.quantization = quant_method
        elif self.quantization != quant_method:
            raise ValueError(
                "Quantization method specified in the model config "
                f"({quant_method}) does not match the quantization "
                f"method specified in the `quantization` argument "
                f"({self.quantization}).")

    if self.quantization is not None:
        if self.quantization not in supported_quantization:
            raise ValueError(
                f"Unknown quantization method: {self.quantization}. Must "
                f"be one of {supported_quantization}.")
        from vllm.platforms import current_platform
        current_platform.verify_quantization(self.quantization)

_verify_tokenizer_mode

_verify_tokenizer_mode() -> None
Source code in vllm/config/model.py
def _verify_tokenizer_mode(self) -> None:
    tokenizer_mode = cast(TokenizerMode, self.tokenizer_mode.lower())
    if tokenizer_mode not in get_args(TokenizerMode):
        raise ValueError(
            f"Unknown tokenizer mode: {self.tokenizer_mode}. Must be "
            f"one of {get_args(TokenizerMode)}.")
    self.tokenizer_mode = tokenizer_mode

_verify_with_expert_parallelism

_verify_with_expert_parallelism() -> None
Source code in vllm/config/model.py
def _verify_with_expert_parallelism(self) -> None:
    num_expert_names = [
        "moe_num_experts",  # Dbrx
        "num_experts",  # Jamba
        "n_routed_experts",  # DeepSeek
        "num_local_experts",  # Mixtral
    ]
    num_experts = 0
    for name in num_expert_names:
        num_experts = getattr(self.hf_text_config, name, 0)
        if num_experts > 0:
            break
    if num_experts < 1:
        raise ValueError(
            "Number of experts in the model must be greater than 0 "
            "when expert parallelism is enabled.")

compute_hash

compute_hash() -> str

WARNING: Whenever a new field is added to this config, ensure that it is included in the factors list if it affects the computation graph.

Provide a hash that uniquely identifies all the configs that affect the structure of the computation graph from input ids/embeddings to the final hidden states, excluding anything before input ids/embeddings and after the final hidden states.

Source code in vllm/config/model.py
def compute_hash(self) -> str:
    """
    WARNING: Whenever a new field is added to this config,
    ensure that it is included in the factors list if
    it affects the computation graph.

    Provide a hash that uniquely identifies all the configs
    that affect the structure of the computation
    graph from input ids/embeddings to the final hidden states,
    excluding anything before input ids/embeddings and after
    the final hidden states.
    """
    factors: list[Any] = []
    factors.append(self.model)
    factors.append(self.dtype)
    factors.append(self.quantization)
    factors.append(self.revision)
    factors.append(self.code_revision)
    factors.append(self.max_model_len)
    factors.append(self.max_logprobs)
    factors.append(self.disable_sliding_window)
    factors.append(self.trust_remote_code)
    factors.append(self.generation_config)
    factors.append(self.model_impl)
    factors.append(self.override_generation_config)
    factors.append(self.rope_scaling)
    factors.append(self.rope_theta)
    factors.append(self.video_pruning_rate)

    # hf_config can control how the model looks!
    try:
        hf_config_json = self.hf_config.to_json_string(use_diff=False)
    except TypeError:
        from transformers import PretrainedConfig

        from vllm.utils.jsontree import json_map_leaves

        # Handle nested HF configs with unserializable values gracefully
        hf_config_json = json.dumps(
            json_map_leaves(
                lambda v: v.to_dict()
                if isinstance(v, PretrainedConfig) else str(v),
                self.hf_config.to_dict(),
            ),
            indent=2,
            sort_keys=True,
        ) + "\n"

    factors.append(hf_config_json)

    str_factors = str(factors)
    assert_hashable(str_factors)
    return hashlib.sha256(str(factors).encode()).hexdigest()

get_and_verify_max_len

get_and_verify_max_len(max_model_len: int)
Source code in vllm/config/model.py
def get_and_verify_max_len(self, max_model_len: int):
    # Consider max_model_len in tokenizer_config only when
    # pooling models use absolute position_embedding.
    tokenizer_config = None
    if (self.runner_type == "pooling" and getattr(
            self.hf_config, "position_embedding_type", "") == "absolute"):
        tokenizer_config = try_get_tokenizer_config(
            self.tokenizer,
            trust_remote_code=self.trust_remote_code,
            revision=self.tokenizer_revision)
    max_model_len = _get_and_verify_max_len(
        hf_config=self.hf_text_config,
        tokenizer_config=tokenizer_config,
        max_model_len=max_model_len,
        disable_sliding_window=self.disable_sliding_window,
        sliding_window=self.get_sliding_window(),
        spec_target_max_model_len=self.spec_target_max_model_len,
        encoder_config=self.encoder_config)
    logger.info("Using max model len %s", max_model_len)
    return max_model_len

get_diff_sampling_param

get_diff_sampling_param() -> dict[str, Any]

This method returns a dictionary containing the non-default sampling parameters with override_generation_config applied.

The default sampling parameters are:

  • vLLM's neutral defaults if self.generation_config="vllm"
  • the model's defaults if self.generation_config="auto"
  • as defined in generation_config.json if self.generation_config="path/to/generation_config/dir"

Returns:

Type Description
dict[str, Any]

A dictionary containing the non-default sampling parameters.

Source code in vllm/config/model.py
def get_diff_sampling_param(self) -> dict[str, Any]:
    """
    This method returns a dictionary containing the non-default sampling
    parameters with `override_generation_config` applied.

    The default sampling parameters are:

    - vLLM's neutral defaults if `self.generation_config="vllm"`
    - the model's defaults if `self.generation_config="auto"`
    - as defined in `generation_config.json` if
        `self.generation_config="path/to/generation_config/dir"`

    Returns:
        A dictionary containing the non-default sampling parameters.
    """
    if self.generation_config == "vllm":
        config = {}
    else:
        config = self.try_get_generation_config()

    # Overriding with given generation config
    config.update(self.override_generation_config)

    available_params = [
        "repetition_penalty",
        "temperature",
        "top_k",
        "top_p",
        "min_p",
        "max_new_tokens",
    ]
    if any(p in config for p in available_params):
        diff_sampling_param = {
            p: config.get(p)
            for p in available_params if config.get(p) is not None
        }
        # Huggingface definition of max_new_tokens is equivalent
        # to vLLM's max_tokens
        if "max_new_tokens" in diff_sampling_param:
            diff_sampling_param["max_tokens"] = diff_sampling_param.pop(
                "max_new_tokens")
    else:
        diff_sampling_param = {}

    if diff_sampling_param:
        logger.warning_once(
            "Default sampling parameters have been overridden by the "
            "model's Hugging Face generation config recommended from the "
            "model creator. If this is not intended, please relaunch "
            "vLLM instance with `--generation-config vllm`.")
    return diff_sampling_param

get_head_size

get_head_size() -> int
Source code in vllm/config/model.py
def get_head_size(self) -> int:
    # TODO remove hard code
    if self.is_deepseek_mla:
        qk_rope_head_dim = getattr(self.hf_text_config, "qk_rope_head_dim",
                                   0)
        if self.use_mla:
            return self.hf_text_config.kv_lora_rank + qk_rope_head_dim
        else:
            qk_nope_head_dim = getattr(self.hf_text_config,
                                       "qk_nope_head_dim", 0)
            if qk_rope_head_dim and qk_nope_head_dim:
                return qk_rope_head_dim + qk_nope_head_dim

    if hasattr(self.hf_text_config,
               "model_type") and (self.hf_text_config.model_type
                                  == "zamba2"):
        return self.hf_text_config.attention_head_dim

    if self.is_attention_free:
        return 0

    # NOTE: Some configs may set head_dim=None in the config
    if getattr(self.hf_text_config, "head_dim", None) is not None:
        return self.hf_text_config.head_dim

    # NOTE: Some models (such as PLaMo2.1) use `hidden_size_per_head`
    if getattr(self.hf_text_config, "hidden_size_per_head",
               None) is not None:
        return self.hf_text_config.hidden_size_per_head

    # FIXME(woosuk): This may not be true for all models.
    return (self.hf_text_config.hidden_size //
            self.hf_text_config.num_attention_heads)

get_hidden_size

get_hidden_size() -> int
Source code in vllm/config/model.py
def get_hidden_size(self) -> int:
    return getattr(self.hf_text_config, "hidden_size", 0)

get_layers_start_end_indices

get_layers_start_end_indices(
    parallel_config: ParallelConfig,
) -> tuple[int, int]
Source code in vllm/config/model.py
def get_layers_start_end_indices(
        self, parallel_config: ParallelConfig) -> tuple[int, int]:
    from vllm.distributed.utils import get_pp_indices
    if (self.hf_text_config.model_type == "deepseek_mtp"
            or self.hf_config.model_type == "mimo_mtp"
            or self.hf_config.model_type == "glm4_moe_mtp"
            or self.hf_config.model_type == "ernie_mtp"
            or self.hf_config.model_type == "qwen3_next_mtp"):
        total_num_hidden_layers = getattr(self.hf_text_config,
                                          "num_nextn_predict_layers", 0)
    elif (self.hf_config.model_type == "longcat_flash_mtp"):
        total_num_hidden_layers = getattr(self.hf_text_config,
                                          "num_nextn_predict_layers", 1)
    else:
        total_num_hidden_layers = getattr(self.hf_text_config,
                                          "num_hidden_layers", 0)
    # the layout order is: DP x PP x TP
    pp_rank = (parallel_config.rank // parallel_config.tensor_parallel_size
               ) % parallel_config.pipeline_parallel_size
    pp_size = parallel_config.pipeline_parallel_size
    start, end = get_pp_indices(total_num_hidden_layers, pp_rank, pp_size)
    return start, end

get_mamba_chunk_size

get_mamba_chunk_size() -> Optional[int]

Returns the mamba chunk size if it exists

Source code in vllm/config/model.py
def get_mamba_chunk_size(self) -> Optional[int]:
    """
    Returns the mamba chunk size if it exists
    """
    # used by e.g. Bamba, FalconH1, Granite, PLaMo2
    chunk_size = getattr(self.hf_text_config, "mamba_chunk_size", None)
    if chunk_size is None:
        # used by e.g. Mamba2, NemotronH, Zamba
        chunk_size = getattr(self.hf_text_config, "chunk_size", None)
    return chunk_size

get_multimodal_config

get_multimodal_config() -> MultiModalConfig

Get the multimodal configuration of the model.

Raises:

Type Description
ValueError

If the model is not multimodal.

Source code in vllm/config/model.py
def get_multimodal_config(self) -> MultiModalConfig:
    """
    Get the multimodal configuration of the model.

    Raises:
        ValueError: If the model is not multimodal.
    """
    if self.multimodal_config is None:
        raise ValueError("The model is not multimodal.")

    return self.multimodal_config

get_num_attention_heads

get_num_attention_heads(
    parallel_config: ParallelConfig,
) -> int
Source code in vllm/config/model.py
def get_num_attention_heads(self, parallel_config: ParallelConfig) -> int:
    num_heads = getattr(self.hf_text_config, "num_attention_heads", 0)
    return num_heads // parallel_config.tensor_parallel_size

get_num_kv_heads

get_num_kv_heads(parallel_config: ParallelConfig) -> int

Returns the number of KV heads per GPU.

Source code in vllm/config/model.py
def get_num_kv_heads(self, parallel_config: ParallelConfig) -> int:
    """Returns the number of KV heads per GPU."""
    if self.use_mla:
        # When using MLA during decode it becomes MQA
        return 1

    total_num_kv_heads = self.get_total_num_kv_heads()
    # If tensor parallelism is used, we divide the number of KV heads by
    # the tensor parallel size. We will replicate the KV heads in the
    # case where the number of KV heads is smaller than the tensor
    # parallel size so each GPU has at least one KV head.
    return max(1,
               total_num_kv_heads // parallel_config.tensor_parallel_size)

get_num_layers

get_num_layers(parallel_config: ParallelConfig) -> int
Source code in vllm/config/model.py
def get_num_layers(self, parallel_config: ParallelConfig) -> int:
    start, end = self.get_layers_start_end_indices(parallel_config)
    return end - start

get_num_layers_by_block_type

get_num_layers_by_block_type(
    parallel_config: ParallelConfig,
    block_type: LayerBlockType = attention,
) -> int
Source code in vllm/config/model.py
def get_num_layers_by_block_type(
    self,
    parallel_config: ParallelConfig,
    block_type: LayerBlockType = LayerBlockType.attention,
) -> int:
    # This function relies on 'layers_block_type' in hf_config,
    # for w/o this attribute, we will need to have workarounds like so
    attn_block_type = block_type == LayerBlockType.attention
    is_transformer = not self.is_hybrid and \
                        not self.has_noops and \
                        not self.is_attention_free
    start, end = self.get_layers_start_end_indices(parallel_config)

    if is_transformer:
        # Handle the basic case first
        return end - start if attn_block_type else 0
    elif self.is_attention_free:
        # Attention free
        # Note that this code assumes there
        # is only one type of attention-free block type.
        return 0 if attn_block_type else end - start
    elif self.has_noops:
        block_configs = self.hf_config.block_configs
        return sum(not bc.attention.no_op
                   for bc in block_configs[start:end])
    else:
        # Hybrid model Jamba
        layers_block_type_value = getattr(self.hf_text_config,
                                          "layers_block_type", None)
        if layers_block_type_value is not None:
            if hasattr(self.hf_text_config,
                       "model_type") and (self.hf_text_config.model_type
                                          == "zamba2"):
                if attn_block_type:
                    return sum(t == "hybrid"
                               for t in layers_block_type_value[start:end])
                else:
                    return self.get_num_layers(parallel_config)
            return sum(t == block_type.value
                       for t in layers_block_type_value[start:end])

        # Hybrid model Minimax
        attn_type_list = getattr(self.hf_config, "attn_type_list", None)
        if attn_type_list:
            return sum(t == 1 for t in attn_type_list[start:end])

        # Hybrid model Qwen3Next
        layer_types_value = getattr(self.hf_config, "layer_types", None)
        if layer_types_value is not None:
            if getattr(block_type, "value", block_type) == "attention":
                return sum(t == "full_attention"
                           for t in layer_types_value[start:end])
            elif getattr(block_type, "value",
                         block_type) == "linear_attention":
                return sum(t == "linear_attention"
                           for t in layer_types_value[start:end])
            else:
                return sum(t == getattr(block_type, "value", block_type)
                           for t in layer_types_value[start:end])

        if (layers_block_type_value is None and attn_type_list is None
                and layer_types_value is None):
            raise ValueError(
                "The model is an hybrid without a"
                "layers_block_type or an attn_type_list, or a layer_types "
                "in the hf_config, cannot determine the num of "
                f"{block_type.value} layers")

get_sliding_window

get_sliding_window() -> Optional[int]

Get the sliding window size from the HF text config if present.

Source code in vllm/config/model.py
def get_sliding_window(self) -> Optional[int]:
    """Get the sliding window size from the HF text config if present."""
    return getattr(self.hf_text_config, "sliding_window", None)

get_total_num_kv_heads

get_total_num_kv_heads() -> int

Returns the total number of KV heads.

Source code in vllm/config/model.py
def get_total_num_kv_heads(self) -> int:
    """Returns the total number of KV heads."""
    # For GPTBigCode & Falcon:
    # NOTE: for falcon, when new_decoder_architecture is True, the
    # multi_query flag is ignored and we use n_head_kv for the number of
    # KV heads.
    falcon_model_types = ["falcon", "RefinedWeb", "RefinedWebModel"]
    new_decoder_arch_falcon = (
        self.hf_config.model_type in falcon_model_types
        and getattr(self.hf_config, "new_decoder_architecture", False))
    if not new_decoder_arch_falcon and getattr(self.hf_text_config,
                                               "multi_query", False):
        # Multi-query attention, only one KV head.
        # Currently, tensor parallelism is not supported in this case.
        return 1

    # For DBRX and MPT
    if self.hf_config.model_type == "mpt":
        if "kv_n_heads" in self.hf_config.attn_config:
            return self.hf_config.attn_config["kv_n_heads"]
        return self.hf_config.num_attention_heads
    if self.hf_config.model_type == "dbrx":
        return getattr(self.hf_config.attn_config, "kv_n_heads",
                       self.hf_config.num_attention_heads)

    if self.hf_config.model_type == "nemotron-nas":
        for block in self.hf_config.block_configs:
            if not block.attention.no_op:
                return self.hf_config.num_attention_heads \
                    // block.attention.n_heads_in_group

        raise RuntimeError("Couldn't determine number of kv heads")

    if self.is_attention_free:
        return 0

    attributes = [
        # For Falcon:
        "n_head_kv",
        "num_kv_heads",
        # For LLaMA-2:
        "num_key_value_heads",
        # For ChatGLM:
        "multi_query_group_num",
    ]
    for attr in attributes:
        num_kv_heads = getattr(self.hf_text_config, attr, None)
        if num_kv_heads is not None:
            return num_kv_heads

    # For non-grouped-query attention models, the number of KV heads is
    # equal to the number of attention heads.
    return self.hf_text_config.num_attention_heads

get_vocab_size

get_vocab_size() -> int
Source code in vllm/config/model.py
def get_vocab_size(self) -> int:
    return getattr(self.hf_text_config, "vocab_size", 0)

maybe_pull_model_tokenizer_for_runai

maybe_pull_model_tokenizer_for_runai(
    model: str, tokenizer: str
) -> None

Pull model/tokenizer from Object Storage to temporary directory when needed.

Parameters:

Name Type Description Default
model str

Model name or path

required
tokenizer str

Tokenizer name or path

required
Source code in vllm/config/model.py
def maybe_pull_model_tokenizer_for_runai(self, model: str,
                                         tokenizer: str) -> None:
    """Pull model/tokenizer from Object Storage to temporary
    directory when needed.

    Args:
        model: Model name or path
        tokenizer: Tokenizer name or path
    """

    if not (is_runai_obj_uri(model) or is_runai_obj_uri(tokenizer)):
        return

    if is_runai_obj_uri(model):
        object_storage_model = ObjectStorageModel(url=model)
        object_storage_model.pull_files(
            model, allow_pattern=["*.model", "*.py", "*.json"])
        self.model_weights = model
        self.model = object_storage_model.dir

        # If tokenizer is same as model, download to same directory
        if model == tokenizer:
            object_storage_model.pull_files(model,
                                            ignore_pattern=[
                                                "*.pt", "*.safetensors",
                                                "*.bin", "*.tensors",
                                                "*.pth"
                                            ])
            self.tokenizer = object_storage_model.dir
            return

    # Only download tokenizer if needed and not already handled
    if is_runai_obj_uri(tokenizer):
        object_storage_tokenizer = ObjectStorageModel(url=tokenizer)
        object_storage_tokenizer.pull_files(model,
                                            ignore_pattern=[
                                                "*.pt", "*.safetensors",
                                                "*.bin", "*.tensors",
                                                "*.pth"
                                            ])
        self.tokenizer = object_storage_tokenizer.dir

try_get_generation_config

try_get_generation_config() -> dict[str, Any]

This method attempts to retrieve the non-default values of the generation config for this model.

The generation config can contain information about special tokens, as well as sampling parameters. Which is why this method exists separately to get_diff_sampling_param.

Returns:

Type Description
dict[str, Any]

A dictionary containing the non-default generation config.

Source code in vllm/config/model.py
def try_get_generation_config(self) -> dict[str, Any]:
    """
    This method attempts to retrieve the non-default values of the
    generation config for this model.

    The generation config can contain information about special tokens, as
    well as sampling parameters. Which is why this method exists separately
    to `get_diff_sampling_param`.

    Returns:
        A dictionary containing the non-default generation config.
    """
    if self.generation_config in {"auto", "vllm"}:
        config = try_get_generation_config(
            self.hf_config_path or self.model,
            trust_remote_code=self.trust_remote_code,
            revision=self.revision,
            config_format=self.config_format,
        )
    else:
        config = try_get_generation_config(
            self.generation_config,
            trust_remote_code=self.trust_remote_code,
            config_format=self.config_format,
        )

    if config is None:
        return {}

    return config.to_diff_dict()

using_transformers_backend

using_transformers_backend() -> bool

Check if the model is using the Transformers backend class.

Source code in vllm/config/model.py
def using_transformers_backend(self) -> bool:
    """Check if the model is using the Transformers backend class."""
    return self.architecture == self._get_transformers_backend_cls()

validate_model_config_after

validate_model_config_after() -> ModelConfig
Source code in vllm/config/model.py
@model_validator(mode="after")
def validate_model_config_after(self: "ModelConfig") -> "ModelConfig":
    if not isinstance(self.tokenizer, str):
        raise ValueError("tokenizer must be a string after __post_init__.")
    if not isinstance(self.max_model_len, int):
        raise ValueError(
            "max_model_len must be an integer after __post_init__.")
    return self

validate_quantization_before classmethod

validate_quantization_before(value: Any) -> Any
Source code in vllm/config/model.py
@field_validator("quantization", mode="before")
@classmethod
def validate_quantization_before(cls, value: Any) -> Any:
    if isinstance(value, str):
        return value.lower()
    return value

verify_dual_chunk_attention_config

verify_dual_chunk_attention_config(
    load_config: LoadConfig,
) -> None
Source code in vllm/config/model.py
def verify_dual_chunk_attention_config(
    self,
    load_config: LoadConfig,
) -> None:
    if hasattr(self.hf_config, "dual_chunk_attention_config"):
        # Try loading the sparse attention config
        from vllm.model_executor.model_loader.weight_utils import (
            get_sparse_attention_config)
        sparse_attn_config = get_sparse_attention_config(self, load_config)
        if sparse_attn_config:
            self.hf_config.dual_chunk_attention_config[
                "sparse_attention_config"] = sparse_attn_config
            if "sparse_attention_enabled" not in \
                    self.hf_config.dual_chunk_attention_config:
                self.hf_config.dual_chunk_attention_config[
                    "sparse_attention_enabled"] = True

verify_with_parallel_config

verify_with_parallel_config(
    parallel_config: ParallelConfig,
) -> None
Source code in vllm/config/model.py
def verify_with_parallel_config(
    self,
    parallel_config: ParallelConfig,
) -> None:

    if parallel_config.distributed_executor_backend == "external_launcher":
        assert self.seed is not None, (
            "Seed must be set when using external launcher backend to "
            "make sure sampling results are the same across workers.")

    total_num_attention_heads = getattr(self.hf_text_config,
                                        "num_attention_heads", 0)
    tensor_parallel_size = parallel_config.tensor_parallel_size
    if total_num_attention_heads % tensor_parallel_size != 0:
        raise ValueError(
            f"Total number of attention heads ({total_num_attention_heads})"
            " must be divisible by tensor parallel size "
            f"({tensor_parallel_size}).")

    if parallel_config.enable_expert_parallel:
        self._verify_with_expert_parallelism()

    pipeline_parallel_size = parallel_config.pipeline_parallel_size
    if (pipeline_parallel_size > 1
            and not self.registry.is_pp_supported_model(
                self.architectures, self)):
        raise NotImplementedError(
            "Pipeline parallelism is not supported for this model. "
            "Supported models implement the `SupportsPP` interface.")

MultiModalConfig

Controls the behavior of multimodal models.

Source code in vllm/config/multimodal.py
@config
@dataclass
class MultiModalConfig:
    """Controls the behavior of multimodal models."""

    limit_per_prompt: dict[str, int] = field(default_factory=dict)
    """The maximum number of input items allowed per prompt for each modality.
    Defaults to 1 (V0) or 999 (V1) for each modality.

    For example, to allow up to 16 images and 2 videos per prompt:
    `{"image": 16, "video": 2}`"""
    media_io_kwargs: dict[str, dict[str, Any]] = field(default_factory=dict)
    """Additional args passed to process media inputs, keyed by modalities.
    For example, to set num_frames for video, set
    `--media-io-kwargs '{"video": {"num_frames": 40} }'`"""
    mm_processor_kwargs: Optional[dict[str, object]] = None
    """Arguments to be forwarded to the model's processor for multi-modal data,
    e.g., image processor. Overrides for the multi-modal processor obtained
    from `transformers.AutoProcessor.from_pretrained`.

    The available overrides depend on the model that is being run.

    For example, for Phi-3-Vision:
    `{"num_crops": 4}`."""
    mm_processor_cache_gb: float = 4
    """The size (in GiB) of the multi-modal processor cache, which is used to
    avoid re-processing past multi-modal inputs.

    This cache is duplicated for each API process and engine core process,
    resulting in a total memory usage of
    `mm_processor_cache_gb * (api_server_count + data_parallel_size)`.

    Set to `0` to disable this cache completely (not recommended)."""
    mm_processor_cache_type: MMCacheType = "lru"
    """Type of cache to use for the multi-modal preprocessor/mapper. If `shm`,
    use shared memory FIFO cache. If `lru`, use mirrored LRU cache."""
    mm_shm_cache_max_object_size_mb: int = 128
    """Size limit (in MiB) for each object stored in the multi-modal processor
    shared memory cache. Only effective when `mm_processor_cache_type` is
    `"shm"`."""
    mm_encoder_tp_mode: MMEncoderTPMode = "weights"
    """Indicates how to optimize multi-modal encoder inference using tensor
    parallelism (TP).

    - `"weights"`: Within the same vLLM engine, split the weights of
        each layer across TP ranks. (default TP behavior)\n
    - `"data"`: Within the same vLLM engine, split the batched input data
        across TP ranks to process the data in parallel, while hosting
        the full weights on each TP rank.
        This batch-level DP is not to be confused with API request-level
        DP (which is controlled by `--data-parallel-size`).
        This is only supported on a per-model basis and falls back to
        `"weights"` if the encoder does not support DP."""
    interleave_mm_strings: bool = False
    """Enable fully interleaved support for multimodal prompts, while using
    --chat-template-content-format=string."""
    skip_mm_profiling: bool = False
    """When enabled, skips multimodal memory profiling and only profiles with
    language backbone model during engine initialization.

    This reduces engine startup time but shifts the responsibility to users for
    estimating the peak memory usage of the activation of multimodal encoder and
    embedding cache."""
    video_pruning_rate: Optional[float] = None
    """Sets pruning rate for video pruning via Efficient Video Sampling.
    Value sits in range [0;1) and determines fraction of media tokens
    from each video to be pruned.
    """

    def compute_hash(self) -> str:
        """
        WARNING: Whenever a new field is added to this config,
        ensure that it is included in the factors list if
        it affects the computation graph.

        Provide a hash that uniquely identifies all the configs
        that affect the structure of the computation
        graph from input ids/embeddings to the final hidden states,
        excluding anything before input ids/embeddings and after
        the final hidden states.
        """
        # no factors to consider.
        # this config will not affect the computation graph.
        factors: list[Any] = []
        hash_str = hashlib.md5(str(factors).encode(),
                               usedforsecurity=False).hexdigest()
        return hash_str

    def get_limit_per_prompt(self, modality: str) -> int:
        """
        Get the maximum number of input items allowed per prompt
        for the given modality.
        """
        return self.limit_per_prompt.get(
            modality,
            999 if envs.VLLM_USE_V1 else 1,
        )

    def merge_mm_processor_kwargs(
        self,
        inference_kwargs: Mapping[str, object],
    ) -> dict[str, object]:
        """
        Get the keyword arguments to pass to the multi-modal processor
        according to the extra arguments passed during inference.
        """
        kwargs = self.mm_processor_kwargs or {}
        return kwargs | dict(inference_kwargs)

    def is_multimodal_pruning_enabled(self):
        return (self.video_pruning_rate is not None
                and self.video_pruning_rate > 0)

interleave_mm_strings class-attribute instance-attribute

interleave_mm_strings: bool = False

Enable fully interleaved support for multimodal prompts, while using --chat-template-content-format=string.

limit_per_prompt class-attribute instance-attribute

limit_per_prompt: dict[str, int] = field(
    default_factory=dict
)

The maximum number of input items allowed per prompt for each modality. Defaults to 1 (V0) or 999 (V1) for each modality.

For example, to allow up to 16 images and 2 videos per prompt: {"image": 16, "video": 2}

media_io_kwargs class-attribute instance-attribute

media_io_kwargs: dict[str, dict[str, Any]] = field(
    default_factory=dict
)

Additional args passed to process media inputs, keyed by modalities. For example, to set num_frames for video, set --media-io-kwargs '{"video": {"num_frames": 40} }'

mm_encoder_tp_mode class-attribute instance-attribute

mm_encoder_tp_mode: MMEncoderTPMode = 'weights'

Indicates how to optimize multi-modal encoder inference using tensor parallelism (TP).

  • "weights": Within the same vLLM engine, split the weights of each layer across TP ranks. (default TP behavior)

  • "data": Within the same vLLM engine, split the batched input data across TP ranks to process the data in parallel, while hosting the full weights on each TP rank. This batch-level DP is not to be confused with API request-level DP (which is controlled by --data-parallel-size). This is only supported on a per-model basis and falls back to "weights" if the encoder does not support DP.

mm_processor_cache_gb class-attribute instance-attribute

mm_processor_cache_gb: float = 4

The size (in GiB) of the multi-modal processor cache, which is used to avoid re-processing past multi-modal inputs.

This cache is duplicated for each API process and engine core process, resulting in a total memory usage of mm_processor_cache_gb * (api_server_count + data_parallel_size).

Set to 0 to disable this cache completely (not recommended).

mm_processor_cache_type class-attribute instance-attribute

mm_processor_cache_type: MMCacheType = 'lru'

Type of cache to use for the multi-modal preprocessor/mapper. If shm, use shared memory FIFO cache. If lru, use mirrored LRU cache.

mm_processor_kwargs class-attribute instance-attribute

mm_processor_kwargs: Optional[dict[str, object]] = None

Arguments to be forwarded to the model's processor for multi-modal data, e.g., image processor. Overrides for the multi-modal processor obtained from transformers.AutoProcessor.from_pretrained.

The available overrides depend on the model that is being run.

For example, for Phi-3-Vision: {"num_crops": 4}.

mm_shm_cache_max_object_size_mb class-attribute instance-attribute

mm_shm_cache_max_object_size_mb: int = 128

Size limit (in MiB) for each object stored in the multi-modal processor shared memory cache. Only effective when mm_processor_cache_type is "shm".

skip_mm_profiling class-attribute instance-attribute

skip_mm_profiling: bool = False

When enabled, skips multimodal memory profiling and only profiles with language backbone model during engine initialization.

This reduces engine startup time but shifts the responsibility to users for estimating the peak memory usage of the activation of multimodal encoder and embedding cache.

video_pruning_rate class-attribute instance-attribute

video_pruning_rate: Optional[float] = None

Sets pruning rate for video pruning via Efficient Video Sampling. Value sits in range [0;1) and determines fraction of media tokens from each video to be pruned.

compute_hash

compute_hash() -> str

WARNING: Whenever a new field is added to this config, ensure that it is included in the factors list if it affects the computation graph.

Provide a hash that uniquely identifies all the configs that affect the structure of the computation graph from input ids/embeddings to the final hidden states, excluding anything before input ids/embeddings and after the final hidden states.

Source code in vllm/config/multimodal.py
def compute_hash(self) -> str:
    """
    WARNING: Whenever a new field is added to this config,
    ensure that it is included in the factors list if
    it affects the computation graph.

    Provide a hash that uniquely identifies all the configs
    that affect the structure of the computation
    graph from input ids/embeddings to the final hidden states,
    excluding anything before input ids/embeddings and after
    the final hidden states.
    """
    # no factors to consider.
    # this config will not affect the computation graph.
    factors: list[Any] = []
    hash_str = hashlib.md5(str(factors).encode(),
                           usedforsecurity=False).hexdigest()
    return hash_str

get_limit_per_prompt

get_limit_per_prompt(modality: str) -> int

Get the maximum number of input items allowed per prompt for the given modality.

Source code in vllm/config/multimodal.py
def get_limit_per_prompt(self, modality: str) -> int:
    """
    Get the maximum number of input items allowed per prompt
    for the given modality.
    """
    return self.limit_per_prompt.get(
        modality,
        999 if envs.VLLM_USE_V1 else 1,
    )

is_multimodal_pruning_enabled

is_multimodal_pruning_enabled()
Source code in vllm/config/multimodal.py
def is_multimodal_pruning_enabled(self):
    return (self.video_pruning_rate is not None
            and self.video_pruning_rate > 0)

merge_mm_processor_kwargs

merge_mm_processor_kwargs(
    inference_kwargs: Mapping[str, object],
) -> dict[str, object]

Get the keyword arguments to pass to the multi-modal processor according to the extra arguments passed during inference.

Source code in vllm/config/multimodal.py
def merge_mm_processor_kwargs(
    self,
    inference_kwargs: Mapping[str, object],
) -> dict[str, object]:
    """
    Get the keyword arguments to pass to the multi-modal processor
    according to the extra arguments passed during inference.
    """
    kwargs = self.mm_processor_kwargs or {}
    return kwargs | dict(inference_kwargs)

ObservabilityConfig

Configuration for observability - metrics and tracing.

Source code in vllm/config/observability.py
@config
@dataclass
class ObservabilityConfig:
    """Configuration for observability - metrics and tracing."""

    show_hidden_metrics_for_version: Optional[str] = None
    """Enable deprecated Prometheus metrics that have been hidden since the
    specified version. For example, if a previously deprecated metric has been
    hidden since the v0.7.0 release, you use
    `--show-hidden-metrics-for-version=0.7` as a temporary escape hatch while
    you migrate to new metrics. The metric is likely to be removed completely
    in an upcoming release."""

    @cached_property
    def show_hidden_metrics(self) -> bool:
        """Check if the hidden metrics should be shown."""
        if self.show_hidden_metrics_for_version is None:
            return False
        return version._prev_minor_version_was(
            self.show_hidden_metrics_for_version)

    otlp_traces_endpoint: Optional[str] = None
    """Target URL to which OpenTelemetry traces will be sent."""

    collect_detailed_traces: Optional[list[DetailedTraceModules]] = None
    """It makes sense to set this only if `--otlp-traces-endpoint` is set. If
    set, it will collect detailed traces for the specified modules. This
    involves use of possibly costly and or blocking operations and hence might
    have a performance impact.

    Note that collecting detailed timing information for each request can be
    expensive."""

    @cached_property
    def collect_model_forward_time(self) -> bool:
        """Whether to collect model forward time for the request."""
        return (self.collect_detailed_traces is not None
                and ("model" in self.collect_detailed_traces
                     or "all" in self.collect_detailed_traces))

    @cached_property
    def collect_model_execute_time(self) -> bool:
        """Whether to collect model execute time for the request."""
        return (self.collect_detailed_traces is not None
                and ("worker" in self.collect_detailed_traces
                     or "all" in self.collect_detailed_traces))

    def compute_hash(self) -> str:
        """
        WARNING: Whenever a new field is added to this config,
        ensure that it is included in the factors list if
        it affects the computation graph.

        Provide a hash that uniquely identifies all the configs
        that affect the structure of the computation
        graph from input ids/embeddings to the final hidden states,
        excluding anything before input ids/embeddings and after
        the final hidden states.
        """
        # no factors to consider.
        # this config will not affect the computation graph.
        factors: list[Any] = []
        hash_str = hashlib.md5(str(factors).encode(),
                               usedforsecurity=False).hexdigest()
        return hash_str

    def __post_init__(self):
        if (self.collect_detailed_traces is not None
                and len(self.collect_detailed_traces) == 1
                and "," in self.collect_detailed_traces[0]):
            self._parse_collect_detailed_traces()

        from vllm.tracing import is_otel_available, otel_import_error_traceback
        if not is_otel_available() and self.otlp_traces_endpoint is not None:
            raise ValueError(
                "OpenTelemetry is not available. Unable to configure "
                "'otlp_traces_endpoint'. Ensure OpenTelemetry packages are "
                f"installed. Original error:\n{otel_import_error_traceback}")

    def _parse_collect_detailed_traces(self):
        assert isinstance(self.collect_detailed_traces, list)
        self.collect_detailed_traces = cast(
            list[DetailedTraceModules],
            self.collect_detailed_traces[0].split(","))

collect_detailed_traces class-attribute instance-attribute

collect_detailed_traces: Optional[
    list[DetailedTraceModules]
] = None

It makes sense to set this only if --otlp-traces-endpoint is set. If set, it will collect detailed traces for the specified modules. This involves use of possibly costly and or blocking operations and hence might have a performance impact.

Note that collecting detailed timing information for each request can be expensive.

collect_model_execute_time cached property

collect_model_execute_time: bool

Whether to collect model execute time for the request.

collect_model_forward_time cached property

collect_model_forward_time: bool

Whether to collect model forward time for the request.

otlp_traces_endpoint class-attribute instance-attribute

otlp_traces_endpoint: Optional[str] = None

Target URL to which OpenTelemetry traces will be sent.

show_hidden_metrics cached property

show_hidden_metrics: bool

Check if the hidden metrics should be shown.

show_hidden_metrics_for_version class-attribute instance-attribute

show_hidden_metrics_for_version: Optional[str] = None

Enable deprecated Prometheus metrics that have been hidden since the specified version. For example, if a previously deprecated metric has been hidden since the v0.7.0 release, you use --show-hidden-metrics-for-version=0.7 as a temporary escape hatch while you migrate to new metrics. The metric is likely to be removed completely in an upcoming release.

__post_init__

__post_init__()
Source code in vllm/config/observability.py
def __post_init__(self):
    if (self.collect_detailed_traces is not None
            and len(self.collect_detailed_traces) == 1
            and "," in self.collect_detailed_traces[0]):
        self._parse_collect_detailed_traces()

    from vllm.tracing import is_otel_available, otel_import_error_traceback
    if not is_otel_available() and self.otlp_traces_endpoint is not None:
        raise ValueError(
            "OpenTelemetry is not available. Unable to configure "
            "'otlp_traces_endpoint'. Ensure OpenTelemetry packages are "
            f"installed. Original error:\n{otel_import_error_traceback}")

_parse_collect_detailed_traces

_parse_collect_detailed_traces()
Source code in vllm/config/observability.py
def _parse_collect_detailed_traces(self):
    assert isinstance(self.collect_detailed_traces, list)
    self.collect_detailed_traces = cast(
        list[DetailedTraceModules],
        self.collect_detailed_traces[0].split(","))

compute_hash

compute_hash() -> str

WARNING: Whenever a new field is added to this config, ensure that it is included in the factors list if it affects the computation graph.

Provide a hash that uniquely identifies all the configs that affect the structure of the computation graph from input ids/embeddings to the final hidden states, excluding anything before input ids/embeddings and after the final hidden states.

Source code in vllm/config/observability.py
def compute_hash(self) -> str:
    """
    WARNING: Whenever a new field is added to this config,
    ensure that it is included in the factors list if
    it affects the computation graph.

    Provide a hash that uniquely identifies all the configs
    that affect the structure of the computation
    graph from input ids/embeddings to the final hidden states,
    excluding anything before input ids/embeddings and after
    the final hidden states.
    """
    # no factors to consider.
    # this config will not affect the computation graph.
    factors: list[Any] = []
    hash_str = hashlib.md5(str(factors).encode(),
                           usedforsecurity=False).hexdigest()
    return hash_str

ParallelConfig

Configuration for the distributed execution.

Source code in vllm/config/parallel.py
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
@config
@dataclass
class ParallelConfig:
    """Configuration for the distributed execution."""

    pipeline_parallel_size: int = 1
    """Number of pipeline parallel groups."""
    tensor_parallel_size: int = 1
    """Number of tensor parallel groups."""
    data_parallel_size: int = 1
    """Number of data parallel groups. MoE layers will be sharded according to
    the product of the tensor parallel size and data parallel size."""
    data_parallel_size_local: int = 1
    """Number of local data parallel groups."""
    data_parallel_rank: int = 0
    """Rank of the data parallel group."""
    data_parallel_rank_local: Optional[int] = None
    """Local rank of the data parallel group,
    set only in SPMD mode."""
    data_parallel_master_ip: str = "127.0.0.1"
    """IP of the data parallel master."""
    data_parallel_rpc_port: int = 29550
    """Port for data parallel messaging."""
    data_parallel_master_port: int = 29500
    """Port of the data parallel master."""
    data_parallel_backend: str = "mp"
    """Backend to use for data parallel, either "mp" or "ray"."""
    data_parallel_external_lb: bool = False
    """Whether to use "external" DP LB mode. Applies only to online serving
    and when data_parallel_size > 0. This is useful for a "one-pod-per-rank"
    wide-EP setup in Kubernetes. Set implicitly when --data-parallel-rank
    is provided explicitly to vllm serve."""
    data_parallel_hybrid_lb: bool = False
    """Whether to use "hybrid" DP LB mode. Applies only to online serving
    and when data_parallel_size > 0. Enables running an AsyncLLM
    and API server on a "per-node" basis where vLLM load balances
    between local data parallel ranks, but an external LB balances
    between vLLM nodes/replicas. Set explicitly in conjunction with
    --data-parallel-start-rank."""
    enable_expert_parallel: bool = False
    """Use expert parallelism instead of tensor parallelism for MoE layers."""
    enable_eplb: bool = False
    """Enable expert parallelism load balancing for MoE layers."""
    eplb_config: EPLBConfig = field(default_factory=EPLBConfig)
    """Expert parallelism configuration."""
    expert_placement_strategy: ExpertPlacementStrategy = "linear"
    """The expert placement strategy for MoE layers:\n
    - "linear": Experts are placed in a contiguous manner. For example, with 4
      experts and 2 ranks, rank 0 will have experts [0, 1] and rank 1 will have
      experts [2, 3].\n
    - "round_robin": Experts are placed in a round-robin manner. For example,
      with 4 experts and 2 ranks, rank 0 will have experts [0, 2] and rank 1
      will have experts [1, 3]. This strategy can help improve load balancing
      for grouped expert models with no redundant experts."""
    num_redundant_experts: Optional[int] = None
    """`num_redundant_experts` is deprecated and has been replaced with
    `eplb_config.num_redundant_experts`. This will be removed in v0.12.0.
    Please use `eplb_config.num_redundant_experts` instead."""
    eplb_window_size: Optional[int] = None
    """`eplb_window_size` is deprecated and has been replaced with
    `eplb_config.window_size`. This will be removed in v0.12.0.
    Please use `eplb_config.window_size` instead."""
    eplb_step_interval: Optional[int] = None
    """`eplb_step_interval` is deprecated and has been replaced with
    `eplb_config.step_interval`. This will be removed in v0.12.0.
    Please use `eplb_config.step_interval` instead."""
    eplb_log_balancedness: Optional[bool] = None
    """`eplb_log_balancedness` is deprecated and has been replaced with
    `eplb_config.log_balancedness`. This will be removed in v0.12.0.
    Please use `eplb_config.log_balancedness` instead."""

    max_parallel_loading_workers: Optional[int] = None
    """Maximum number of parallel loading workers when loading model
    sequentially in multiple batches. To avoid RAM OOM when using tensor
    parallel and large models."""

    disable_custom_all_reduce: bool = False
    """Disable the custom all-reduce kernel and fall back to NCCL."""

    enable_dbo: bool = False
    """Enable dual batch overlap for the model executor."""

    dbo_decode_token_threshold: int = 32
    """The threshold for dual batch overlap for batches only containing decodes.
    If the number of tokens in the request is greater than this threshold,
    microbatching will be used. Otherwise, the request will be processed in a
    single batch."""
    dbo_prefill_token_threshold: int = 512  # TODO(lucas): tune
    """The threshold for dual batch overlap for batches that contain one or more
    prefills. If the number of tokens in the request is greater than this
    threshold, microbatching will be used. Otherwise, the request will be
    processed in a single batch."""

    ray_workers_use_nsight: bool = False
    """Whether to profile Ray workers with nsight, see https://docs.ray.io/en/latest/ray-observability/user-guides/profiling.html#profiling-nsight-profiler."""

    ray_runtime_env: Optional[RuntimeEnv] = None
    """Ray runtime environment to pass to distributed workers."""

    placement_group: Optional[PlacementGroup] = None
    """ray distributed model workers placement group."""

    distributed_executor_backend: Optional[Union[str,
                                                 DistributedExecutorBackend,
                                                 type[ExecutorBase]]] = None
    """Backend to use for distributed model
    workers, either "ray" or "mp" (multiprocessing). If the product
    of pipeline_parallel_size and tensor_parallel_size is less than
    or equal to the number of GPUs available, "mp" will be used to
    keep processing on a single host. Otherwise, this will default
    to "ray" if Ray is installed and fail otherwise. Note that tpu
    only support Ray for distributed inference."""

    worker_cls: str = "auto"
    """The full name of the worker class to use. If "auto", the worker class
    will be determined based on the platform."""
    sd_worker_cls: str = "auto"
    """The full name of the worker class to use for speculative decoding.
    If "auto", the worker class will be determined based on the platform."""
    worker_extension_cls: str = ""
    """The full name of the worker extension class to use. The worker extension
    class is dynamically inherited by the worker class. This is used to inject
    new attributes and methods to the worker class for use in collective_rpc
    calls."""

    world_size: int = field(init=False)
    """world_size is TPxPP, it affects the number of workers we create."""

    rank: int = 0
    """Global rank in distributed setup."""

    _data_parallel_master_port_list: list[int] = field(default_factory=list)
    """List of open port auto-queried for data parallel messaging.
    Set to be private as it's not intended to be configured by users.
    """

    decode_context_parallel_size: int = 1
    """Number of decode context parallel groups, because the world size does
    not change by dcp, it simply reuse the GPUs of TP group, and tp_size
    needs to be divisible by dcp_size."""

    _api_process_count: int = 1
    """
    The number of API processes initialized.

    Note:
        This is an internal config that is only valid for and
        should only be set by API server scale-out.
    """

    _api_process_rank: int = 0
    """
    The rank of this API process, or `-1` for engine core processes
    under API server scale-out.

    Note:
        This is an internal config that is only valid for and
        should only be set by API server scale-out.
    """

    @property
    def world_size_across_dp(self) -> int:
        """world_size_across_dp is TPxPPxDP, it is the size of the world
        including data parallelism."""
        return self.world_size * self.data_parallel_size

    def get_next_dp_init_port(self) -> int:
        """
        We might need to initialize process groups in multiple
        processes that is related to data parallelism,
        e.g. both in the worker and in the engine, which
        can live in different processes. To avoid port conflicts, we
        pop a new port from the prepared port list each time we need to
        initialize a new process group related to data parallelism.
        """
        if self._data_parallel_master_port_list:
            answer = self._data_parallel_master_port_list.pop()
        else:
            answer = self.data_parallel_master_port
            self.data_parallel_master_port += 1

        return answer

    def stateless_init_dp_group(self) -> ProcessGroup:
        # NOTE: In high-concurrency scenarios multiple processes
        # can pick the same (currently free) port through a race
        # condition when calling `get_open_port()`. When the first
        # process binds the port the others will subsequently fail
        # with `torch.distributed.DistNetworkError: EADDRINUSE`.
        # To make the initialization more robust we retry a few times
        # with a fresh port whenever this specific error is observed.
        from torch.distributed import DistNetworkError

        from vllm.distributed.utils import (
            stateless_init_torch_distributed_process_group)

        max_retries = 5
        last_exc: Optional[Exception] = None
        for _ in range(max_retries):
            try:
                # use gloo since the engine process might not have cuda device
                return stateless_init_torch_distributed_process_group(
                    self.data_parallel_master_ip,
                    self.get_next_dp_init_port(),
                    self.data_parallel_rank,
                    self.data_parallel_size,
                    backend="gloo")
            except DistNetworkError as e:
                # We only want to retry when the root cause is EADDRINUSE.
                if "EADDRINUSE" in str(e):
                    logger.warning(
                        "Address already in use. Retrying with a new port.")
                    last_exc = e
                    continue  # try again with a new port
                raise e

        # If we get here all retries have failed.
        assert last_exc is not None
        raise last_exc

    # The all_reduce at the end of attention (during o_proj) means that
    # inputs are replicated across each rank of the tensor parallel group.
    # If using expert-parallelism with DeepEP All2All ops, replicated
    # tokens results in useless duplicate computation and communication.
    #
    # In this case, ensure the input to the experts is sequence parallel
    # to avoid the excess work.
    #
    # Not needed for pplx-kernels as it can handle duplicate input tokens.
    @property
    def use_sequence_parallel_moe(self) -> bool:
        return (envs.VLLM_ALL2ALL_BACKEND
                in ("allgather_reducescatter", "naive",
                    "deepep_high_throughput", "deepep_low_latency")
                and self.enable_expert_parallel
                and self.tensor_parallel_size > 1
                and self.data_parallel_size > 1)

    @staticmethod
    def has_unfinished_dp(dp_group: ProcessGroup,
                          has_unfinished: bool) -> bool:
        tensor = torch.tensor([has_unfinished],
                              dtype=torch.int32,
                              device="cpu")
        # dp rank 0: has_unfinished_seqs=True
        # dp rank 1: has_unfinished_seqs=False
        # aggregated: has_unfinished_seqs=True
        # so this is an OR operation, i.e. MAX in integers
        torch.distributed.all_reduce(tensor, op=ReduceOp.MAX, group=dp_group)
        aggregated_has_unfinished = bool(tensor.item())
        return aggregated_has_unfinished

    @staticmethod
    def sync_kv_cache_memory_size(dp_group: ProcessGroup,
                                  kv_cache_memory: int) -> int:
        if kv_cache_memory == -1:
            kv_cache_memory = torch.iinfo(torch.int64).max
        tensor = torch.tensor([kv_cache_memory],
                              dtype=torch.int64,
                              device="cpu")
        # we cannot use broadcast for stateless dp group since it depends
        # on global rank
        torch.distributed.all_reduce(tensor, op=ReduceOp.MIN, group=dp_group)
        return tensor.item()

    def compute_hash(self):
        """
        Provide a hash that uniquely identifies all the configs
        that affect the structure of the computation
        graph from input ids/embeddings to the final hidden states,
        excluding anything before input ids/embeddings and after
        the final hidden states.
        """
        factors: list[Any] = []
        factors.append(self.pipeline_parallel_size)
        factors.append(self.tensor_parallel_size)
        factors.append(self.enable_expert_parallel)
        factors.append(self.data_parallel_size)
        factors.append(envs.VLLM_ALL2ALL_BACKEND)
        return hashlib.sha256(str(factors).encode()).hexdigest()

    def __post_init__(self) -> None:
        # Forward deprecated fields to their new location
        if self.num_redundant_experts is not None:
            self.eplb_config.num_redundant_experts = (
                self.num_redundant_experts)
            logger.warning_once(
                "num_redundant_experts is deprecated and has been replaced "
                "with eplb_config.num_redundant_experts. This will be removed "
                "in v0.12.0. Changing this field after initialization will "
                "have no effect.")
        if self.eplb_window_size is not None:
            self.eplb_config.window_size = self.eplb_window_size
            logger.warning_once(
                "eplb_window_size is deprecated and has been replaced "
                "with eplb_config.window_size. This will be removed "
                "in v0.12.0. Changing this field after initialization will "
                "have no effect.")
        if self.eplb_step_interval is not None:
            self.eplb_config.step_interval = self.eplb_step_interval
            logger.warning_once(
                "eplb_step_interval is deprecated and has been replaced "
                "with eplb_config.step_interval. This will be removed "
                "in v0.12.0. Changing this field after initialization will "
                "have no effect.")
        if self.eplb_log_balancedness is not None:
            self.eplb_config.log_balancedness = self.eplb_log_balancedness
            logger.warning_once(
                "eplb_log_balancedness is deprecated and has been replaced "
                "with eplb_config.log_balancedness. This will be removed "
                "in v0.12.0. Changing this field after initialization will "
                "have no effect.")

        # Continue with the rest of the initialization
        self.world_size = self.pipeline_parallel_size * \
            self.tensor_parallel_size

        if self.distributed_executor_backend == "external_launcher":
            logger.info("Using external launcher for distributed inference.")
            self.world_size *= self.data_parallel_size

        if self.data_parallel_size_local > self.data_parallel_size:
            raise ValueError(
                f"data_parallel_size_local ({self.data_parallel_size_local}) "
                f"must be <= data_parallel_size ({self.data_parallel_size})")

        if self.data_parallel_size > 1 or self.data_parallel_size_local == 0:
            # Data parallel was specified in the engine args.
            if self.distributed_executor_backend == "external_launcher":
                # For external launcher,
                # we need to set the data parallel rank automatically
                self.data_parallel_rank = int(os.environ["RANK"]) \
                    // (self.world_size // self.data_parallel_size)
                logger.info("Set data_parallel_rank to %d automatically.",
                            self.data_parallel_rank)
            if not self._data_parallel_master_port_list:
                self._data_parallel_master_port_list = get_open_ports_list(5)
            self.data_parallel_master_port = \
                self._data_parallel_master_port_list.pop()

            if not (0 <= self.data_parallel_rank < self.data_parallel_size):
                raise ValueError(
                    f"data_parallel_rank ({self.data_parallel_rank})"
                    f" must be in the range [0, {self.data_parallel_size})")
        else:
            # Otherwise fall back to env vars (e.g. for offline SPMD case).
            self.data_parallel_size = envs.VLLM_DP_SIZE
            self.data_parallel_rank = envs.VLLM_DP_RANK
            self.data_parallel_rank_local = envs.VLLM_DP_RANK_LOCAL
            self.data_parallel_master_ip = envs.VLLM_DP_MASTER_IP
            self.data_parallel_master_port = envs.VLLM_DP_MASTER_PORT

            if self.data_parallel_external_lb:
                raise ValueError("data_parallel_external_lb can only "
                                 "be set when data_parallel_size > 1")

        if self.distributed_executor_backend == "external_launcher":
            os.environ["VLLM_ENABLE_V1_MULTIPROCESSING"] = "0"
            logger.info("Disabling V1 multiprocessing for external launcher.")

        if self.enable_eplb:
            if not current_platform.is_cuda():
                raise ValueError(
                    "Expert parallelism load balancing is only supported on "
                    "CUDA devices now.")
            if self.eplb_config.num_redundant_experts < 0:
                raise ValueError(
                    "num_redundant_experts must be non-negative, but got "
                    f"{self.eplb_config.num_redundant_experts}.")
            if not self.enable_expert_parallel:
                raise ValueError(
                    "enable_expert_parallel must be True to use EPLB.")
            if self.tensor_parallel_size * self.data_parallel_size <= 1:
                raise ValueError(
                    "EPLB requires tensor_parallel_size or data_parallel_size "
                    f"to be greater than 1, but got "
                    f"TP={self.tensor_parallel_size},DP={self.data_parallel_size}."
                )
        else:
            if self.eplb_config.num_redundant_experts != 0:
                raise ValueError(
                    "num_redundant_experts is set to "
                    f"{self.eplb_config.num_redundant_experts} but EPLB is not "
                    "enabled. Either enable EPLB or unset "
                    "num_redundant_experts.")
        if self.distributed_executor_backend is None and self.world_size > 1:
            # We use multiprocessing by default if world_size fits on the
            # current node and we aren't in a ray placement group.

            from vllm.executor import ray_utils
            backend: DistributedExecutorBackend = "mp"
            ray_found = ray_utils.ray_is_available()
            if current_platform.is_tpu() and envs.VLLM_XLA_USE_SPMD:
                backend = "uni"
            elif (current_platform.is_cuda()
                  and cuda_device_count_stateless() < self.world_size):
                if not ray_found:
                    raise ValueError("Unable to load Ray: "
                                     f"{ray_utils.ray_import_err}. Ray is "
                                     "required for multi-node inference, "
                                     "please install Ray with `pip install "
                                     "ray`.")
                backend = "ray"
            elif self.data_parallel_backend == "ray":
                logger.info("Using ray distributed inference because "
                            "data_parallel_backend is ray")
                backend = "ray"
            elif ray_found:
                if self.placement_group:
                    backend = "ray"
                else:
                    from ray import is_initialized as ray_is_initialized
                    if ray_is_initialized():
                        from ray.util import get_current_placement_group
                        if get_current_placement_group():
                            backend = "ray"
            self.distributed_executor_backend = backend
            logger.debug("Defaulting to use %s for distributed inference",
                         backend)

        if self.distributed_executor_backend is None and self.world_size == 1:
            self.distributed_executor_backend = "uni"

        if not -1 <= self._api_process_rank < self._api_process_count:
            raise ValueError(
                "Invalid value of `_api_process_rank`. "
                f"Expected to be `-1` or `[0, {self._api_process_count})`, "
                f"but found: {self._api_process_rank}")

    @property
    def use_ray(self) -> bool:
        return self.distributed_executor_backend == "ray" or (
            isinstance(self.distributed_executor_backend, type)
            and getattr(self.distributed_executor_backend, "uses_ray", False))

    @model_validator(mode='after')
    def _verify_args(self) -> Self:
        # Lazy import to avoid circular import
        from vllm.executor.executor_base import ExecutorBase
        from vllm.platforms import current_platform
        if self.distributed_executor_backend is not None and not isinstance(
                self.distributed_executor_backend, str) and not (isinstance(
                    self.distributed_executor_backend, type) and issubclass(
                        self.distributed_executor_backend, ExecutorBase)):
            raise ValueError(
                "Unrecognized distributed executor backend "
                f"{self.distributed_executor_backend}. Supported "
                "values are 'ray', 'mp' 'uni', 'external_launcher', "
                " custom ExecutorBase subclass or its import path.")
        if self.use_ray:
            from vllm.executor import ray_utils
            ray_utils.assert_ray_available()

        if not current_platform.use_custom_allreduce():
            self.disable_custom_all_reduce = True
            logger.debug(
                "Disabled the custom all-reduce kernel because it is not "
                "supported on current platform.")
        if self.ray_workers_use_nsight and not self.use_ray:
            raise ValueError("Unable to use nsight profiling unless workers "
                             "run with Ray.")

        return self

_api_process_count class-attribute instance-attribute

_api_process_count: int = 1

The number of API processes initialized.

Note

This is an internal config that is only valid for and should only be set by API server scale-out.

_api_process_rank class-attribute instance-attribute

_api_process_rank: int = 0

The rank of this API process, or -1 for engine core processes under API server scale-out.

Note

This is an internal config that is only valid for and should only be set by API server scale-out.

_data_parallel_master_port_list class-attribute instance-attribute

_data_parallel_master_port_list: list[int] = field(
    default_factory=list
)

List of open port auto-queried for data parallel messaging. Set to be private as it's not intended to be configured by users.

data_parallel_backend class-attribute instance-attribute

data_parallel_backend: str = 'mp'

Backend to use for data parallel, either "mp" or "ray".

data_parallel_external_lb class-attribute instance-attribute

data_parallel_external_lb: bool = False

Whether to use "external" DP LB mode. Applies only to online serving and when data_parallel_size > 0. This is useful for a "one-pod-per-rank" wide-EP setup in Kubernetes. Set implicitly when --data-parallel-rank is provided explicitly to vllm serve.

data_parallel_hybrid_lb class-attribute instance-attribute

data_parallel_hybrid_lb: bool = False

Whether to use "hybrid" DP LB mode. Applies only to online serving and when data_parallel_size > 0. Enables running an AsyncLLM and API server on a "per-node" basis where vLLM load balances between local data parallel ranks, but an external LB balances between vLLM nodes/replicas. Set explicitly in conjunction with --data-parallel-start-rank.

data_parallel_master_ip class-attribute instance-attribute

data_parallel_master_ip: str = '127.0.0.1'

IP of the data parallel master.

data_parallel_master_port class-attribute instance-attribute

data_parallel_master_port: int = 29500

Port of the data parallel master.

data_parallel_rank class-attribute instance-attribute

data_parallel_rank: int = 0

Rank of the data parallel group.

data_parallel_rank_local class-attribute instance-attribute

data_parallel_rank_local: Optional[int] = None

Local rank of the data parallel group, set only in SPMD mode.

data_parallel_rpc_port class-attribute instance-attribute

data_parallel_rpc_port: int = 29550

Port for data parallel messaging.

data_parallel_size class-attribute instance-attribute

data_parallel_size: int = 1

Number of data parallel groups. MoE layers will be sharded according to the product of the tensor parallel size and data parallel size.

data_parallel_size_local class-attribute instance-attribute

data_parallel_size_local: int = 1

Number of local data parallel groups.

dbo_decode_token_threshold class-attribute instance-attribute

dbo_decode_token_threshold: int = 32

The threshold for dual batch overlap for batches only containing decodes. If the number of tokens in the request is greater than this threshold, microbatching will be used. Otherwise, the request will be processed in a single batch.

dbo_prefill_token_threshold class-attribute instance-attribute

dbo_prefill_token_threshold: int = 512

The threshold for dual batch overlap for batches that contain one or more prefills. If the number of tokens in the request is greater than this threshold, microbatching will be used. Otherwise, the request will be processed in a single batch.

decode_context_parallel_size class-attribute instance-attribute

decode_context_parallel_size: int = 1

Number of decode context parallel groups, because the world size does not change by dcp, it simply reuse the GPUs of TP group, and tp_size needs to be divisible by dcp_size.

disable_custom_all_reduce class-attribute instance-attribute

disable_custom_all_reduce: bool = False

Disable the custom all-reduce kernel and fall back to NCCL.

distributed_executor_backend class-attribute instance-attribute

distributed_executor_backend: Optional[
    Union[
        str, DistributedExecutorBackend, type[ExecutorBase]
    ]
] = None

Backend to use for distributed model workers, either "ray" or "mp" (multiprocessing). If the product of pipeline_parallel_size and tensor_parallel_size is less than or equal to the number of GPUs available, "mp" will be used to keep processing on a single host. Otherwise, this will default to "ray" if Ray is installed and fail otherwise. Note that tpu only support Ray for distributed inference.

enable_dbo class-attribute instance-attribute

enable_dbo: bool = False

Enable dual batch overlap for the model executor.

enable_eplb class-attribute instance-attribute

enable_eplb: bool = False

Enable expert parallelism load balancing for MoE layers.

enable_expert_parallel class-attribute instance-attribute

enable_expert_parallel: bool = False

Use expert parallelism instead of tensor parallelism for MoE layers.

eplb_config class-attribute instance-attribute

eplb_config: EPLBConfig = field(default_factory=EPLBConfig)

Expert parallelism configuration.

eplb_log_balancedness class-attribute instance-attribute

eplb_log_balancedness: Optional[bool] = None

eplb_log_balancedness is deprecated and has been replaced with eplb_config.log_balancedness. This will be removed in v0.12.0. Please use eplb_config.log_balancedness instead.

eplb_step_interval class-attribute instance-attribute

eplb_step_interval: Optional[int] = None

eplb_step_interval is deprecated and has been replaced with eplb_config.step_interval. This will be removed in v0.12.0. Please use eplb_config.step_interval instead.

eplb_window_size class-attribute instance-attribute

eplb_window_size: Optional[int] = None

eplb_window_size is deprecated and has been replaced with eplb_config.window_size. This will be removed in v0.12.0. Please use eplb_config.window_size instead.

expert_placement_strategy class-attribute instance-attribute

expert_placement_strategy: ExpertPlacementStrategy = (
    "linear"
)

The expert placement strategy for MoE layers:

  • "linear": Experts are placed in a contiguous manner. For example, with 4 experts and 2 ranks, rank 0 will have experts [0, 1] and rank 1 will have experts [2, 3].

  • "round_robin": Experts are placed in a round-robin manner. For example, with 4 experts and 2 ranks, rank 0 will have experts [0, 2] and rank 1 will have experts [1, 3]. This strategy can help improve load balancing for grouped expert models with no redundant experts.

max_parallel_loading_workers class-attribute instance-attribute

max_parallel_loading_workers: Optional[int] = None

Maximum number of parallel loading workers when loading model sequentially in multiple batches. To avoid RAM OOM when using tensor parallel and large models.

num_redundant_experts class-attribute instance-attribute

num_redundant_experts: Optional[int] = None

num_redundant_experts is deprecated and has been replaced with eplb_config.num_redundant_experts. This will be removed in v0.12.0. Please use eplb_config.num_redundant_experts instead.

pipeline_parallel_size class-attribute instance-attribute

pipeline_parallel_size: int = 1

Number of pipeline parallel groups.

placement_group class-attribute instance-attribute

placement_group: Optional[PlacementGroup] = None

ray distributed model workers placement group.

rank class-attribute instance-attribute

rank: int = 0

Global rank in distributed setup.

ray_runtime_env class-attribute instance-attribute

ray_runtime_env: Optional[RuntimeEnv] = None

Ray runtime environment to pass to distributed workers.

ray_workers_use_nsight class-attribute instance-attribute

ray_workers_use_nsight: bool = False

Whether to profile Ray workers with nsight, see https://docs.ray.io/en/latest/ray-observability/user-guides/profiling.html#profiling-nsight-profiler.

sd_worker_cls class-attribute instance-attribute

sd_worker_cls: str = 'auto'

The full name of the worker class to use for speculative decoding. If "auto", the worker class will be determined based on the platform.

tensor_parallel_size class-attribute instance-attribute

tensor_parallel_size: int = 1

Number of tensor parallel groups.

use_ray property

use_ray: bool

use_sequence_parallel_moe property

use_sequence_parallel_moe: bool

worker_cls class-attribute instance-attribute

worker_cls: str = 'auto'

The full name of the worker class to use. If "auto", the worker class will be determined based on the platform.

worker_extension_cls class-attribute instance-attribute

worker_extension_cls: str = ''

The full name of the worker extension class to use. The worker extension class is dynamically inherited by the worker class. This is used to inject new attributes and methods to the worker class for use in collective_rpc calls.

world_size class-attribute instance-attribute

world_size: int = field(init=False)

world_size is TPxPP, it affects the number of workers we create.

world_size_across_dp property

world_size_across_dp: int

world_size_across_dp is TPxPPxDP, it is the size of the world including data parallelism.

__post_init__

__post_init__() -> None
Source code in vllm/config/parallel.py
def __post_init__(self) -> None:
    # Forward deprecated fields to their new location
    if self.num_redundant_experts is not None:
        self.eplb_config.num_redundant_experts = (
            self.num_redundant_experts)
        logger.warning_once(
            "num_redundant_experts is deprecated and has been replaced "
            "with eplb_config.num_redundant_experts. This will be removed "
            "in v0.12.0. Changing this field after initialization will "
            "have no effect.")
    if self.eplb_window_size is not None:
        self.eplb_config.window_size = self.eplb_window_size
        logger.warning_once(
            "eplb_window_size is deprecated and has been replaced "
            "with eplb_config.window_size. This will be removed "
            "in v0.12.0. Changing this field after initialization will "
            "have no effect.")
    if self.eplb_step_interval is not None:
        self.eplb_config.step_interval = self.eplb_step_interval
        logger.warning_once(
            "eplb_step_interval is deprecated and has been replaced "
            "with eplb_config.step_interval. This will be removed "
            "in v0.12.0. Changing this field after initialization will "
            "have no effect.")
    if self.eplb_log_balancedness is not None:
        self.eplb_config.log_balancedness = self.eplb_log_balancedness
        logger.warning_once(
            "eplb_log_balancedness is deprecated and has been replaced "
            "with eplb_config.log_balancedness. This will be removed "
            "in v0.12.0. Changing this field after initialization will "
            "have no effect.")

    # Continue with the rest of the initialization
    self.world_size = self.pipeline_parallel_size * \
        self.tensor_parallel_size

    if self.distributed_executor_backend == "external_launcher":
        logger.info("Using external launcher for distributed inference.")
        self.world_size *= self.data_parallel_size

    if self.data_parallel_size_local > self.data_parallel_size:
        raise ValueError(
            f"data_parallel_size_local ({self.data_parallel_size_local}) "
            f"must be <= data_parallel_size ({self.data_parallel_size})")

    if self.data_parallel_size > 1 or self.data_parallel_size_local == 0:
        # Data parallel was specified in the engine args.
        if self.distributed_executor_backend == "external_launcher":
            # For external launcher,
            # we need to set the data parallel rank automatically
            self.data_parallel_rank = int(os.environ["RANK"]) \
                // (self.world_size // self.data_parallel_size)
            logger.info("Set data_parallel_rank to %d automatically.",
                        self.data_parallel_rank)
        if not self._data_parallel_master_port_list:
            self._data_parallel_master_port_list = get_open_ports_list(5)
        self.data_parallel_master_port = \
            self._data_parallel_master_port_list.pop()

        if not (0 <= self.data_parallel_rank < self.data_parallel_size):
            raise ValueError(
                f"data_parallel_rank ({self.data_parallel_rank})"
                f" must be in the range [0, {self.data_parallel_size})")
    else:
        # Otherwise fall back to env vars (e.g. for offline SPMD case).
        self.data_parallel_size = envs.VLLM_DP_SIZE
        self.data_parallel_rank = envs.VLLM_DP_RANK
        self.data_parallel_rank_local = envs.VLLM_DP_RANK_LOCAL
        self.data_parallel_master_ip = envs.VLLM_DP_MASTER_IP
        self.data_parallel_master_port = envs.VLLM_DP_MASTER_PORT

        if self.data_parallel_external_lb:
            raise ValueError("data_parallel_external_lb can only "
                             "be set when data_parallel_size > 1")

    if self.distributed_executor_backend == "external_launcher":
        os.environ["VLLM_ENABLE_V1_MULTIPROCESSING"] = "0"
        logger.info("Disabling V1 multiprocessing for external launcher.")

    if self.enable_eplb:
        if not current_platform.is_cuda():
            raise ValueError(
                "Expert parallelism load balancing is only supported on "
                "CUDA devices now.")
        if self.eplb_config.num_redundant_experts < 0:
            raise ValueError(
                "num_redundant_experts must be non-negative, but got "
                f"{self.eplb_config.num_redundant_experts}.")
        if not self.enable_expert_parallel:
            raise ValueError(
                "enable_expert_parallel must be True to use EPLB.")
        if self.tensor_parallel_size * self.data_parallel_size <= 1:
            raise ValueError(
                "EPLB requires tensor_parallel_size or data_parallel_size "
                f"to be greater than 1, but got "
                f"TP={self.tensor_parallel_size},DP={self.data_parallel_size}."
            )
    else:
        if self.eplb_config.num_redundant_experts != 0:
            raise ValueError(
                "num_redundant_experts is set to "
                f"{self.eplb_config.num_redundant_experts} but EPLB is not "
                "enabled. Either enable EPLB or unset "
                "num_redundant_experts.")
    if self.distributed_executor_backend is None and self.world_size > 1:
        # We use multiprocessing by default if world_size fits on the
        # current node and we aren't in a ray placement group.

        from vllm.executor import ray_utils
        backend: DistributedExecutorBackend = "mp"
        ray_found = ray_utils.ray_is_available()
        if current_platform.is_tpu() and envs.VLLM_XLA_USE_SPMD:
            backend = "uni"
        elif (current_platform.is_cuda()
              and cuda_device_count_stateless() < self.world_size):
            if not ray_found:
                raise ValueError("Unable to load Ray: "
                                 f"{ray_utils.ray_import_err}. Ray is "
                                 "required for multi-node inference, "
                                 "please install Ray with `pip install "
                                 "ray`.")
            backend = "ray"
        elif self.data_parallel_backend == "ray":
            logger.info("Using ray distributed inference because "
                        "data_parallel_backend is ray")
            backend = "ray"
        elif ray_found:
            if self.placement_group:
                backend = "ray"
            else:
                from ray import is_initialized as ray_is_initialized
                if ray_is_initialized():
                    from ray.util import get_current_placement_group
                    if get_current_placement_group():
                        backend = "ray"
        self.distributed_executor_backend = backend
        logger.debug("Defaulting to use %s for distributed inference",
                     backend)

    if self.distributed_executor_backend is None and self.world_size == 1:
        self.distributed_executor_backend = "uni"

    if not -1 <= self._api_process_rank < self._api_process_count:
        raise ValueError(
            "Invalid value of `_api_process_rank`. "
            f"Expected to be `-1` or `[0, {self._api_process_count})`, "
            f"but found: {self._api_process_rank}")

_verify_args

_verify_args() -> Self
Source code in vllm/config/parallel.py
@model_validator(mode='after')
def _verify_args(self) -> Self:
    # Lazy import to avoid circular import
    from vllm.executor.executor_base import ExecutorBase
    from vllm.platforms import current_platform
    if self.distributed_executor_backend is not None and not isinstance(
            self.distributed_executor_backend, str) and not (isinstance(
                self.distributed_executor_backend, type) and issubclass(
                    self.distributed_executor_backend, ExecutorBase)):
        raise ValueError(
            "Unrecognized distributed executor backend "
            f"{self.distributed_executor_backend}. Supported "
            "values are 'ray', 'mp' 'uni', 'external_launcher', "
            " custom ExecutorBase subclass or its import path.")
    if self.use_ray:
        from vllm.executor import ray_utils
        ray_utils.assert_ray_available()

    if not current_platform.use_custom_allreduce():
        self.disable_custom_all_reduce = True
        logger.debug(
            "Disabled the custom all-reduce kernel because it is not "
            "supported on current platform.")
    if self.ray_workers_use_nsight and not self.use_ray:
        raise ValueError("Unable to use nsight profiling unless workers "
                         "run with Ray.")

    return self

compute_hash

compute_hash()

Provide a hash that uniquely identifies all the configs that affect the structure of the computation graph from input ids/embeddings to the final hidden states, excluding anything before input ids/embeddings and after the final hidden states.

Source code in vllm/config/parallel.py
def compute_hash(self):
    """
    Provide a hash that uniquely identifies all the configs
    that affect the structure of the computation
    graph from input ids/embeddings to the final hidden states,
    excluding anything before input ids/embeddings and after
    the final hidden states.
    """
    factors: list[Any] = []
    factors.append(self.pipeline_parallel_size)
    factors.append(self.tensor_parallel_size)
    factors.append(self.enable_expert_parallel)
    factors.append(self.data_parallel_size)
    factors.append(envs.VLLM_ALL2ALL_BACKEND)
    return hashlib.sha256(str(factors).encode()).hexdigest()

get_next_dp_init_port

get_next_dp_init_port() -> int

We might need to initialize process groups in multiple processes that is related to data parallelism, e.g. both in the worker and in the engine, which can live in different processes. To avoid port conflicts, we pop a new port from the prepared port list each time we need to initialize a new process group related to data parallelism.

Source code in vllm/config/parallel.py
def get_next_dp_init_port(self) -> int:
    """
    We might need to initialize process groups in multiple
    processes that is related to data parallelism,
    e.g. both in the worker and in the engine, which
    can live in different processes. To avoid port conflicts, we
    pop a new port from the prepared port list each time we need to
    initialize a new process group related to data parallelism.
    """
    if self._data_parallel_master_port_list:
        answer = self._data_parallel_master_port_list.pop()
    else:
        answer = self.data_parallel_master_port
        self.data_parallel_master_port += 1

    return answer

has_unfinished_dp staticmethod

has_unfinished_dp(
    dp_group: ProcessGroup, has_unfinished: bool
) -> bool
Source code in vllm/config/parallel.py
@staticmethod
def has_unfinished_dp(dp_group: ProcessGroup,
                      has_unfinished: bool) -> bool:
    tensor = torch.tensor([has_unfinished],
                          dtype=torch.int32,
                          device="cpu")
    # dp rank 0: has_unfinished_seqs=True
    # dp rank 1: has_unfinished_seqs=False
    # aggregated: has_unfinished_seqs=True
    # so this is an OR operation, i.e. MAX in integers
    torch.distributed.all_reduce(tensor, op=ReduceOp.MAX, group=dp_group)
    aggregated_has_unfinished = bool(tensor.item())
    return aggregated_has_unfinished

stateless_init_dp_group

stateless_init_dp_group() -> ProcessGroup
Source code in vllm/config/parallel.py
def stateless_init_dp_group(self) -> ProcessGroup:
    # NOTE: In high-concurrency scenarios multiple processes
    # can pick the same (currently free) port through a race
    # condition when calling `get_open_port()`. When the first
    # process binds the port the others will subsequently fail
    # with `torch.distributed.DistNetworkError: EADDRINUSE`.
    # To make the initialization more robust we retry a few times
    # with a fresh port whenever this specific error is observed.
    from torch.distributed import DistNetworkError

    from vllm.distributed.utils import (
        stateless_init_torch_distributed_process_group)

    max_retries = 5
    last_exc: Optional[Exception] = None
    for _ in range(max_retries):
        try:
            # use gloo since the engine process might not have cuda device
            return stateless_init_torch_distributed_process_group(
                self.data_parallel_master_ip,
                self.get_next_dp_init_port(),
                self.data_parallel_rank,
                self.data_parallel_size,
                backend="gloo")
        except DistNetworkError as e:
            # We only want to retry when the root cause is EADDRINUSE.
            if "EADDRINUSE" in str(e):
                logger.warning(
                    "Address already in use. Retrying with a new port.")
                last_exc = e
                continue  # try again with a new port
            raise e

    # If we get here all retries have failed.
    assert last_exc is not None
    raise last_exc

sync_kv_cache_memory_size staticmethod

sync_kv_cache_memory_size(
    dp_group: ProcessGroup, kv_cache_memory: int
) -> int
Source code in vllm/config/parallel.py
@staticmethod
def sync_kv_cache_memory_size(dp_group: ProcessGroup,
                              kv_cache_memory: int) -> int:
    if kv_cache_memory == -1:
        kv_cache_memory = torch.iinfo(torch.int64).max
    tensor = torch.tensor([kv_cache_memory],
                          dtype=torch.int64,
                          device="cpu")
    # we cannot use broadcast for stateless dp group since it depends
    # on global rank
    torch.distributed.all_reduce(tensor, op=ReduceOp.MIN, group=dp_group)
    return tensor.item()

PassConfig

Configuration for custom Inductor passes.

This is separate from general CompilationConfig so that inductor passes don't all have access to full configuration - that would create a cycle as the PassManager is set as a property of config.

Source code in vllm/config/compilation.py
@config
@dataclass
class PassConfig:
    """Configuration for custom Inductor passes.

    This is separate from general `CompilationConfig` so that inductor passes
    don't all have access to full configuration - that would create a cycle as
    the `PassManager` is set as a property of config."""

    enable_fusion: bool = False
    """Whether to enable the custom fusion (RMSNorm/SiluMul+quant) pass."""
    enable_attn_fusion: bool = False
    """Whether to enable the custom attention+quant fusion pass."""
    enable_noop: bool = False
    """Whether to enable the custom no-op elimination pass."""
    enable_sequence_parallelism: bool = False
    """Whether to enable sequence parallelism."""
    enable_async_tp: bool = False
    """Whether to enable async TP."""
    enable_fi_allreduce_fusion: bool = False
    """Whether to enable flashinfer allreduce fusion."""
    fi_allreduce_fusion_max_token_num: int = 16384
    """Max number of tokens to used in flashinfer allreduce fusion."""

    # TODO(luka) better pass enabling system.

    def uuid(self):
        """
        Produces a hash unique to the pass configuration.
        Any new fields that affect compilation should be added to the hash.
        Any future fields that don't affect compilation should be excluded.
        """
        return InductorPass.hash_dict(asdict(self))

    def __post_init__(self) -> None:
        if not self.enable_noop:
            if self.enable_fusion:
                logger.warning_once(
                    "Fusion enabled but reshape elimination disabled. "
                    "RMSNorm/SiluMul + quant (fp8) fusion might not work")
            if self.enable_attn_fusion:
                logger.warning_once(
                    "Fusion enabled but reshape elimination disabled. "
                    "Attention + quant (fp8) fusion might not work")

enable_async_tp class-attribute instance-attribute

enable_async_tp: bool = False

Whether to enable async TP.

enable_attn_fusion class-attribute instance-attribute

enable_attn_fusion: bool = False

Whether to enable the custom attention+quant fusion pass.

enable_fi_allreduce_fusion class-attribute instance-attribute

enable_fi_allreduce_fusion: bool = False

Whether to enable flashinfer allreduce fusion.

enable_fusion class-attribute instance-attribute

enable_fusion: bool = False

Whether to enable the custom fusion (RMSNorm/SiluMul+quant) pass.

enable_noop class-attribute instance-attribute

enable_noop: bool = False

Whether to enable the custom no-op elimination pass.

enable_sequence_parallelism class-attribute instance-attribute

enable_sequence_parallelism: bool = False

Whether to enable sequence parallelism.

fi_allreduce_fusion_max_token_num class-attribute instance-attribute

fi_allreduce_fusion_max_token_num: int = 16384

Max number of tokens to used in flashinfer allreduce fusion.

__post_init__

__post_init__() -> None
Source code in vllm/config/compilation.py
def __post_init__(self) -> None:
    if not self.enable_noop:
        if self.enable_fusion:
            logger.warning_once(
                "Fusion enabled but reshape elimination disabled. "
                "RMSNorm/SiluMul + quant (fp8) fusion might not work")
        if self.enable_attn_fusion:
            logger.warning_once(
                "Fusion enabled but reshape elimination disabled. "
                "Attention + quant (fp8) fusion might not work")

uuid

uuid()

Produces a hash unique to the pass configuration. Any new fields that affect compilation should be added to the hash. Any future fields that don't affect compilation should be excluded.

Source code in vllm/config/compilation.py
def uuid(self):
    """
    Produces a hash unique to the pass configuration.
    Any new fields that affect compilation should be added to the hash.
    Any future fields that don't affect compilation should be excluded.
    """
    return InductorPass.hash_dict(asdict(self))

PoolerConfig

Controls the behavior of output pooling in pooling models.

Source code in vllm/config/pooler.py
@config
@dataclass
class PoolerConfig:
    """Controls the behavior of output pooling in pooling models."""

    pooling_type: Optional[str] = None
    """
    The pooling method of the pooling model. This should be a key in
    [`vllm.model_executor.layers.pooler.PoolingType`][].
    """

    ## for embeddings models
    normalize: Optional[bool] = None
    """
    Whether to normalize the embeddings outputs. Defaults to True.
    """
    dimensions: Optional[int] = None
    """
    Reduce the dimensions of embeddings if model
    support matryoshka representation. Defaults to None.
    """
    enable_chunked_processing: Optional[bool] = None
    """
    Whether to enable chunked processing for long inputs that exceed the model's
    maximum position embeddings. When enabled, long inputs will be split into
    chunks, processed separately, and then aggregated using weighted averaging.
    This allows embedding models to handle arbitrarily long text without CUDA
    errors. Defaults to False.
    """
    max_embed_len: Optional[int] = None
    """
    Maximum input length allowed for embedding generation. When set, allows
    inputs longer than max_embed_len to be accepted for embedding models.
    When an input exceeds max_embed_len, it will be handled according to 
    the original max_model_len validation logic. 
    Defaults to None (i.e. set to max_model_len).
    """

    ## for classification models
    activation: Optional[bool] = None
    """
    Whether to apply activation function to the classification outputs.
    Defaults to True.
    """
    logit_bias: Optional[float] = None
    """
    If provided, apply classification logit biases. Defaults to None.
    """

    ## for reward models
    softmax: Optional[bool] = None
    """
    Whether to apply softmax to the reward outputs.
    Defaults to True.
    """
    step_tag_id: Optional[int] = None
    """
    If set, only the score corresponding to the ``step_tag_id`` in the
    generated sentence should be returned. Otherwise, the scores for all tokens
    are returned.
    """
    returned_token_ids: Optional[list[int]] = None
    """
    A list of indices for the vocabulary dimensions to be extracted,
    such as the token IDs of ``good_token`` and ``bad_token`` in the
    ``math-shepherd-mistral-7b-prm`` model.
    """

    def compute_hash(self) -> str:
        """
        WARNING: Whenever a new field is added to this config,
        ensure that it is included in the factors list if
        it affects the computation graph.

        Provide a hash that uniquely identifies all the configs
        that affect the structure of the computation
        graph from input ids/embeddings to the final hidden states,
        excluding anything before input ids/embeddings and after
        the final hidden states.
        """
        # no factors to consider.
        # this config will not affect the computation graph.
        factors: list[Any] = []
        hash_str = hashlib.md5(str(factors).encode(),
                               usedforsecurity=False).hexdigest()
        return hash_str

activation class-attribute instance-attribute

activation: Optional[bool] = None

Whether to apply activation function to the classification outputs. Defaults to True.

dimensions class-attribute instance-attribute

dimensions: Optional[int] = None

Reduce the dimensions of embeddings if model support matryoshka representation. Defaults to None.

enable_chunked_processing class-attribute instance-attribute

enable_chunked_processing: Optional[bool] = None

Whether to enable chunked processing for long inputs that exceed the model's maximum position embeddings. When enabled, long inputs will be split into chunks, processed separately, and then aggregated using weighted averaging. This allows embedding models to handle arbitrarily long text without CUDA errors. Defaults to False.

logit_bias class-attribute instance-attribute

logit_bias: Optional[float] = None

If provided, apply classification logit biases. Defaults to None.

max_embed_len class-attribute instance-attribute

max_embed_len: Optional[int] = None

Maximum input length allowed for embedding generation. When set, allows inputs longer than max_embed_len to be accepted for embedding models. When an input exceeds max_embed_len, it will be handled according to the original max_model_len validation logic. Defaults to None (i.e. set to max_model_len).

normalize class-attribute instance-attribute

normalize: Optional[bool] = None

Whether to normalize the embeddings outputs. Defaults to True.

pooling_type class-attribute instance-attribute

pooling_type: Optional[str] = None

The pooling method of the pooling model. This should be a key in vllm.model_executor.layers.pooler.PoolingType.

returned_token_ids class-attribute instance-attribute

returned_token_ids: Optional[list[int]] = None

A list of indices for the vocabulary dimensions to be extracted, such as the token IDs of good_token and bad_token in the math-shepherd-mistral-7b-prm model.

softmax class-attribute instance-attribute

softmax: Optional[bool] = None

Whether to apply softmax to the reward outputs. Defaults to True.

step_tag_id class-attribute instance-attribute

step_tag_id: Optional[int] = None

If set, only the score corresponding to the step_tag_id in the generated sentence should be returned. Otherwise, the scores for all tokens are returned.

compute_hash

compute_hash() -> str

WARNING: Whenever a new field is added to this config, ensure that it is included in the factors list if it affects the computation graph.

Provide a hash that uniquely identifies all the configs that affect the structure of the computation graph from input ids/embeddings to the final hidden states, excluding anything before input ids/embeddings and after the final hidden states.

Source code in vllm/config/pooler.py
def compute_hash(self) -> str:
    """
    WARNING: Whenever a new field is added to this config,
    ensure that it is included in the factors list if
    it affects the computation graph.

    Provide a hash that uniquely identifies all the configs
    that affect the structure of the computation
    graph from input ids/embeddings to the final hidden states,
    excluding anything before input ids/embeddings and after
    the final hidden states.
    """
    # no factors to consider.
    # this config will not affect the computation graph.
    factors: list[Any] = []
    hash_str = hashlib.md5(str(factors).encode(),
                           usedforsecurity=False).hexdigest()
    return hash_str

SchedulerConfig

Scheduler configuration.

Source code in vllm/config/scheduler.py
@config
@dataclass
class SchedulerConfig:
    """Scheduler configuration."""

    runner_type: RunnerType = "generate"
    """The runner type to launch for the model."""

    max_num_batched_tokens: SkipValidation[int] = None  # type: ignore
    """Maximum number of tokens to be processed in a single iteration.

    This config has no static default. If left unspecified by the user, it will
    be set in `EngineArgs.create_engine_config` based on the usage context."""

    max_num_seqs: SkipValidation[int] = None  # type: ignore
    """Maximum number of sequences to be processed in a single iteration.

    This config has no static default. If left unspecified by the user, it will
    be set in `EngineArgs.create_engine_config` based on the usage context."""

    max_model_len: SkipValidation[int] = None  # type: ignore
    """Maximum length of a sequence (including prompt and generated text). This
    is primarily set in `ModelConfig` and that value should be manually
    duplicated here."""

    max_num_partial_prefills: int = 1
    """For chunked prefill, the maximum number of sequences that can be
    partially prefilled concurrently."""

    max_long_partial_prefills: int = 1
    """For chunked prefill, the maximum number of prompts longer than
    long_prefill_token_threshold that will be prefilled concurrently. Setting
    this less than max_num_partial_prefills will allow shorter prompts to jump
    the queue in front of longer prompts in some cases, improving latency."""

    long_prefill_token_threshold: int = 0
    """For chunked prefill, a request is considered long if the prompt is
    longer than this number of tokens."""

    num_lookahead_slots: int = 0
    """The number of slots to allocate per sequence per
    step, beyond the known token ids. This is used in speculative
    decoding to store KV activations of tokens which may or may not be
    accepted.

    NOTE: This will be replaced by speculative config in the future; it is
    present to enable correctness tests until then."""

    cuda_graph_sizes: list[int] = field(default_factory=list)
    """Cuda graph capture sizes
    1. if none provided, then default set to [min(max_num_seqs * 2, 512)]
    2. if one value is provided, then the capture list would follow the
    pattern: [1, 2, 4] + [i for i in range(8, cuda_graph_sizes + 1, 8)]
    3. more than one value (e.g. 1 2 128) is provided, then the capture list
    will follow the provided list."""

    enable_chunked_prefill: SkipValidation[bool] = None  # type: ignore
    """If True, prefill requests can be chunked based
    on the remaining max_num_batched_tokens."""

    is_multimodal_model: bool = False
    """True if the model is multimodal."""

    is_encoder_decoder: InitVar[bool] = False
    """True if the model is an encoder-decoder model.

    Note: This is stored in the ModelConfig, and is used only here to
    disable chunked prefill and prefix caching for encoder-decoder models.
    """

    # TODO (ywang96): Make this configurable.
    max_num_encoder_input_tokens: int = field(init=False)
    """Multimodal encoder compute budget, only used in V1.

    NOTE: This is not currently configurable. It will be overridden by
    max_num_batched_tokens in case max multimodal embedding size is larger."""

    # TODO (ywang96): Make this configurable.
    encoder_cache_size: int = field(init=False)
    """Multimodal encoder cache size, only used in V1.

    NOTE: This is not currently configurable. It will be overridden by
    max_num_batched_tokens in case max multimodal embedding size is larger."""

    send_delta_data: bool = False
    """Private API. If used, scheduler sends delta data to
    workers instead of an entire data. It should be enabled only
    when SPMD worker architecture is enabled. I.e.,
    VLLM_USE_RAY_SPMD_WORKER=1"""

    policy: SchedulerPolicy = "fcfs"
    """The scheduling policy to use:\n
    - "fcfs" means first come first served, i.e. requests are handled in order
    of arrival.\n
    - "priority" means requests are handled based on given priority (lower
    value means earlier handling) and time of arrival deciding any ties)."""

    chunked_prefill_enabled: bool = field(init=False)
    """True if chunked prefill is enabled."""

    disable_chunked_mm_input: bool = False
    """If set to true and chunked prefill is enabled, we do not want to
    partially schedule a multimodal item. Only used in V1
    This ensures that if a request has a mixed prompt
    (like text tokens TTTT followed by image tokens IIIIIIIIII) where only
    some image tokens can be scheduled (like TTTTIIIII, leaving IIIII),
    it will be scheduled as TTTT in one step and IIIIIIIIII in the next."""

    # scheduler class or path. "vllm.core.scheduler.Scheduler" (default)
    # or "mod.custom_class".
    scheduler_cls: Union[str, type[object]] = "vllm.core.scheduler.Scheduler"
    """The scheduler class to use. "vllm.core.scheduler.Scheduler" is the
    default scheduler. Can be a class directly or the path to a class of form
    "mod.custom_class"."""

    disable_hybrid_kv_cache_manager: bool = False
    """If set to True, KV cache manager will allocate the same size of KV cache
    for all attention layers even if there are multiple type of attention layers
    like full attention and sliding window attention.
    """

    async_scheduling: bool = False
    """EXPERIMENTAL: If set to True, perform async scheduling. This may help
    reduce the CPU overheads, leading to better latency and throughput. However,
    async scheduling is currently not supported with some features such as
    structured outputs, speculative decoding, and pipeline parallelism.
    """

    def compute_hash(self) -> str:
        """
        WARNING: Whenever a new field is added to this config,
        ensure that it is included in the factors list if
        it affects the computation graph.

        Provide a hash that uniquely identifies all the configs
        that affect the structure of the computation
        graph from input ids/embeddings to the final hidden states,
        excluding anything before input ids/embeddings and after
        the final hidden states.
        """
        # no factors to consider.
        # this config will not affect the computation graph.
        factors: list[Any] = []
        hash_str = hashlib.md5(str(factors).encode(),
                               usedforsecurity=False).hexdigest()
        return hash_str

    def __post_init__(self, is_encoder_decoder: bool) -> None:
        if self.max_model_len is None:
            self.max_model_len = 8192

        if self.max_num_seqs is None:
            self.max_num_seqs = 128

        if is_encoder_decoder:
            # Chunked prefill should be disabled for encoder-decoder models.
            self.disable_chunked_mm_input = True
            self.chunked_prefill_enabled = False
            self.enable_chunked_prefill = False
            self.long_prefill_token_threshold = 0
            logger.info(
                "Encoder-decoder models do not support chunked prefill nor"
                " prefix caching; disabling both.")

        if self.max_num_batched_tokens is None:
            if self.enable_chunked_prefill:
                self.max_num_batched_tokens = DEFAULT_MAX_NUM_BATCHED_TOKENS
            else:
                # If max_model_len is too short, use
                # DEFAULT_MAX_NUM_BATCHED_TOKENS as the default value
                # for higher throughput.
                self.max_num_batched_tokens = max(
                    self.max_model_len, DEFAULT_MAX_NUM_BATCHED_TOKENS)

            if self.runner_type == "pooling":
                # Choose specific value for higher throughput
                self.max_num_batched_tokens = max(
                    self.max_num_batched_tokens,
                    POOLING_MODEL_MAX_NUM_BATCHED_TOKENS,
                )
            if self.is_multimodal_model:
                # The value needs to be at least the number of multimodal tokens
                self.max_num_batched_tokens = max(
                    self.max_num_batched_tokens,
                    MULTIMODAL_MODEL_MAX_NUM_BATCHED_TOKENS,
                )

            # When using default settings,
            # Ensure max_num_batched_tokens does not exceed model limit.
            # Some models (e.g., Whisper) have embeddings tied to max length.
            self.max_num_batched_tokens = min(
                self.max_num_seqs * self.max_model_len,
                self.max_num_batched_tokens)

        self.max_num_encoder_input_tokens = self.max_num_batched_tokens
        self.encoder_cache_size = self.max_num_batched_tokens

        if self.enable_chunked_prefill:
            logger.info(
                "Chunked prefill is enabled with max_num_batched_tokens=%d.",
                self.max_num_batched_tokens)

        self.chunked_prefill_enabled = self.enable_chunked_prefill
        if self.max_num_partial_prefills > 1:
            if self.long_prefill_token_threshold == 0:
                self.long_prefill_token_threshold = int(self.max_model_len *
                                                        0.04)

            logger.info(
                "Concurrent partial prefills enabled with "
                "max_num_partial_prefills=%d, max_long_partial_prefills=%d, "
                "long_prefill_token_threshold=%d",
                self.max_num_partial_prefills, self.max_long_partial_prefills,
                self.long_prefill_token_threshold)

        # NOTE: Default set cuda_graph_sizes to [min(max_num_seqs * 2, 512)].
        # This avoids OOM in tight memory scenarios with small max_num_seqs,
        # and prevents capture of many large graphs (>512) that would greatly
        # increase startup time with limited performance benefit.
        if not self.cuda_graph_sizes:
            self.cuda_graph_sizes = [min(self.max_num_seqs * 2, 512)]

        if self.async_scheduling:
            self.scheduler_cls = (
                "vllm.v1.core.sched.async_scheduler.AsyncScheduler")

    @model_validator(mode='after')
    def _verify_args(self) -> Self:
        if (self.max_num_batched_tokens < self.max_model_len
                and not self.chunked_prefill_enabled):
            raise ValueError(
                f"max_num_batched_tokens ({self.max_num_batched_tokens}) is "
                f"smaller than max_model_len ({self.max_model_len}). "
                "This effectively limits the maximum sequence length to "
                "max_num_batched_tokens and makes vLLM reject longer "
                "sequences. Please increase max_num_batched_tokens or "
                "decrease max_model_len.")

        if self.max_num_batched_tokens < self.max_num_seqs:
            raise ValueError(
                f"max_num_batched_tokens ({self.max_num_batched_tokens}) must "
                "be greater than or equal to max_num_seqs "
                f"({self.max_num_seqs}).")

        if self.max_num_batched_tokens > self.max_num_seqs * self.max_model_len:
            logger.warning(
                "max_num_batched_tokens (%d) exceeds max_num_seqs "
                "* max_model_len (%d). This may lead to unexpected behavior.",
                self.max_num_batched_tokens,
                self.max_num_seqs * self.max_model_len)

        if self.num_lookahead_slots < 0:
            raise ValueError(
                "num_lookahead_slots "
                f"({self.num_lookahead_slots}) must be greater than or "
                "equal to 0.")

        if self.max_num_partial_prefills < 1:
            raise ValueError(
                f"max_num_partial_prefills ({self.max_num_partial_prefills}) "
                "must be greater than or equal to 1.")
        elif self.max_num_partial_prefills > 1:
            if not self.chunked_prefill_enabled:
                raise ValueError("Chunked prefill must be enabled to set "
                                 "max_num_partial_prefills > 1.")

            if self.long_prefill_token_threshold > self.max_model_len:
                raise ValueError(
                    "long_prefill_token_threshold "
                    f"({self.long_prefill_token_threshold}) cannot be greater "
                    f"than the max_model_len ({self.max_model_len}).")

        if (self.max_long_partial_prefills
                < 1) or (self.max_long_partial_prefills
                         > self.max_num_partial_prefills):
            raise ValueError(
                f"max_long_partial_prefills ({self.max_long_partial_prefills}) "
                "must be greater than or equal to 1 and less than or equal to "
                f"max_num_partial_prefills ({self.max_num_partial_prefills}).")

        return self

async_scheduling class-attribute instance-attribute

async_scheduling: bool = False

EXPERIMENTAL: If set to True, perform async scheduling. This may help reduce the CPU overheads, leading to better latency and throughput. However, async scheduling is currently not supported with some features such as structured outputs, speculative decoding, and pipeline parallelism.

chunked_prefill_enabled class-attribute instance-attribute

chunked_prefill_enabled: bool = field(init=False)

True if chunked prefill is enabled.

cuda_graph_sizes class-attribute instance-attribute

cuda_graph_sizes: list[int] = field(default_factory=list)

Cuda graph capture sizes 1. if none provided, then default set to [min(max_num_seqs * 2, 512)] 2. if one value is provided, then the capture list would follow the pattern: [1, 2, 4] + [i for i in range(8, cuda_graph_sizes + 1, 8)] 3. more than one value (e.g. 1 2 128) is provided, then the capture list will follow the provided list.

disable_chunked_mm_input class-attribute instance-attribute

disable_chunked_mm_input: bool = False

If set to true and chunked prefill is enabled, we do not want to partially schedule a multimodal item. Only used in V1 This ensures that if a request has a mixed prompt (like text tokens TTTT followed by image tokens IIIIIIIIII) where only some image tokens can be scheduled (like TTTTIIIII, leaving IIIII), it will be scheduled as TTTT in one step and IIIIIIIIII in the next.

disable_hybrid_kv_cache_manager class-attribute instance-attribute

disable_hybrid_kv_cache_manager: bool = False

If set to True, KV cache manager will allocate the same size of KV cache for all attention layers even if there are multiple type of attention layers like full attention and sliding window attention.

enable_chunked_prefill class-attribute instance-attribute

enable_chunked_prefill: SkipValidation[bool] = None

If True, prefill requests can be chunked based on the remaining max_num_batched_tokens.

encoder_cache_size class-attribute instance-attribute

encoder_cache_size: int = field(init=False)

Multimodal encoder cache size, only used in V1.

NOTE: This is not currently configurable. It will be overridden by max_num_batched_tokens in case max multimodal embedding size is larger.

is_multimodal_model class-attribute instance-attribute

is_multimodal_model: bool = False

True if the model is multimodal.

long_prefill_token_threshold class-attribute instance-attribute

long_prefill_token_threshold: int = 0

For chunked prefill, a request is considered long if the prompt is longer than this number of tokens.

max_long_partial_prefills class-attribute instance-attribute

max_long_partial_prefills: int = 1

For chunked prefill, the maximum number of prompts longer than long_prefill_token_threshold that will be prefilled concurrently. Setting this less than max_num_partial_prefills will allow shorter prompts to jump the queue in front of longer prompts in some cases, improving latency.

max_model_len class-attribute instance-attribute

max_model_len: SkipValidation[int] = None

Maximum length of a sequence (including prompt and generated text). This is primarily set in ModelConfig and that value should be manually duplicated here.

max_num_batched_tokens class-attribute instance-attribute

max_num_batched_tokens: SkipValidation[int] = None

Maximum number of tokens to be processed in a single iteration.

This config has no static default. If left unspecified by the user, it will be set in EngineArgs.create_engine_config based on the usage context.

max_num_encoder_input_tokens class-attribute instance-attribute

max_num_encoder_input_tokens: int = field(init=False)

Multimodal encoder compute budget, only used in V1.

NOTE: This is not currently configurable. It will be overridden by max_num_batched_tokens in case max multimodal embedding size is larger.

max_num_partial_prefills class-attribute instance-attribute

max_num_partial_prefills: int = 1

For chunked prefill, the maximum number of sequences that can be partially prefilled concurrently.

max_num_seqs class-attribute instance-attribute

max_num_seqs: SkipValidation[int] = None

Maximum number of sequences to be processed in a single iteration.

This config has no static default. If left unspecified by the user, it will be set in EngineArgs.create_engine_config based on the usage context.

num_lookahead_slots class-attribute instance-attribute

num_lookahead_slots: int = 0

The number of slots to allocate per sequence per step, beyond the known token ids. This is used in speculative decoding to store KV activations of tokens which may or may not be accepted.

NOTE: This will be replaced by speculative config in the future; it is present to enable correctness tests until then.

policy class-attribute instance-attribute

policy: SchedulerPolicy = 'fcfs'

The scheduling policy to use:

  • "fcfs" means first come first served, i.e. requests are handled in order of arrival.

  • "priority" means requests are handled based on given priority (lower value means earlier handling) and time of arrival deciding any ties).

runner_type class-attribute instance-attribute

runner_type: RunnerType = 'generate'

The runner type to launch for the model.

scheduler_cls class-attribute instance-attribute

scheduler_cls: Union[str, type[object]] = (
    "vllm.core.scheduler.Scheduler"
)

The scheduler class to use. "vllm.core.scheduler.Scheduler" is the default scheduler. Can be a class directly or the path to a class of form "mod.custom_class".

send_delta_data class-attribute instance-attribute

send_delta_data: bool = False

Private API. If used, scheduler sends delta data to workers instead of an entire data. It should be enabled only when SPMD worker architecture is enabled. I.e., VLLM_USE_RAY_SPMD_WORKER=1

__post_init__

__post_init__(is_encoder_decoder: bool) -> None
Source code in vllm/config/scheduler.py
def __post_init__(self, is_encoder_decoder: bool) -> None:
    if self.max_model_len is None:
        self.max_model_len = 8192

    if self.max_num_seqs is None:
        self.max_num_seqs = 128

    if is_encoder_decoder:
        # Chunked prefill should be disabled for encoder-decoder models.
        self.disable_chunked_mm_input = True
        self.chunked_prefill_enabled = False
        self.enable_chunked_prefill = False
        self.long_prefill_token_threshold = 0
        logger.info(
            "Encoder-decoder models do not support chunked prefill nor"
            " prefix caching; disabling both.")

    if self.max_num_batched_tokens is None:
        if self.enable_chunked_prefill:
            self.max_num_batched_tokens = DEFAULT_MAX_NUM_BATCHED_TOKENS
        else:
            # If max_model_len is too short, use
            # DEFAULT_MAX_NUM_BATCHED_TOKENS as the default value
            # for higher throughput.
            self.max_num_batched_tokens = max(
                self.max_model_len, DEFAULT_MAX_NUM_BATCHED_TOKENS)

        if self.runner_type == "pooling":
            # Choose specific value for higher throughput
            self.max_num_batched_tokens = max(
                self.max_num_batched_tokens,
                POOLING_MODEL_MAX_NUM_BATCHED_TOKENS,
            )
        if self.is_multimodal_model:
            # The value needs to be at least the number of multimodal tokens
            self.max_num_batched_tokens = max(
                self.max_num_batched_tokens,
                MULTIMODAL_MODEL_MAX_NUM_BATCHED_TOKENS,
            )

        # When using default settings,
        # Ensure max_num_batched_tokens does not exceed model limit.
        # Some models (e.g., Whisper) have embeddings tied to max length.
        self.max_num_batched_tokens = min(
            self.max_num_seqs * self.max_model_len,
            self.max_num_batched_tokens)

    self.max_num_encoder_input_tokens = self.max_num_batched_tokens
    self.encoder_cache_size = self.max_num_batched_tokens

    if self.enable_chunked_prefill:
        logger.info(
            "Chunked prefill is enabled with max_num_batched_tokens=%d.",
            self.max_num_batched_tokens)

    self.chunked_prefill_enabled = self.enable_chunked_prefill
    if self.max_num_partial_prefills > 1:
        if self.long_prefill_token_threshold == 0:
            self.long_prefill_token_threshold = int(self.max_model_len *
                                                    0.04)

        logger.info(
            "Concurrent partial prefills enabled with "
            "max_num_partial_prefills=%d, max_long_partial_prefills=%d, "
            "long_prefill_token_threshold=%d",
            self.max_num_partial_prefills, self.max_long_partial_prefills,
            self.long_prefill_token_threshold)

    # NOTE: Default set cuda_graph_sizes to [min(max_num_seqs * 2, 512)].
    # This avoids OOM in tight memory scenarios with small max_num_seqs,
    # and prevents capture of many large graphs (>512) that would greatly
    # increase startup time with limited performance benefit.
    if not self.cuda_graph_sizes:
        self.cuda_graph_sizes = [min(self.max_num_seqs * 2, 512)]

    if self.async_scheduling:
        self.scheduler_cls = (
            "vllm.v1.core.sched.async_scheduler.AsyncScheduler")

_verify_args

_verify_args() -> Self
Source code in vllm/config/scheduler.py
@model_validator(mode='after')
def _verify_args(self) -> Self:
    if (self.max_num_batched_tokens < self.max_model_len
            and not self.chunked_prefill_enabled):
        raise ValueError(
            f"max_num_batched_tokens ({self.max_num_batched_tokens}) is "
            f"smaller than max_model_len ({self.max_model_len}). "
            "This effectively limits the maximum sequence length to "
            "max_num_batched_tokens and makes vLLM reject longer "
            "sequences. Please increase max_num_batched_tokens or "
            "decrease max_model_len.")

    if self.max_num_batched_tokens < self.max_num_seqs:
        raise ValueError(
            f"max_num_batched_tokens ({self.max_num_batched_tokens}) must "
            "be greater than or equal to max_num_seqs "
            f"({self.max_num_seqs}).")

    if self.max_num_batched_tokens > self.max_num_seqs * self.max_model_len:
        logger.warning(
            "max_num_batched_tokens (%d) exceeds max_num_seqs "
            "* max_model_len (%d). This may lead to unexpected behavior.",
            self.max_num_batched_tokens,
            self.max_num_seqs * self.max_model_len)

    if self.num_lookahead_slots < 0:
        raise ValueError(
            "num_lookahead_slots "
            f"({self.num_lookahead_slots}) must be greater than or "
            "equal to 0.")

    if self.max_num_partial_prefills < 1:
        raise ValueError(
            f"max_num_partial_prefills ({self.max_num_partial_prefills}) "
            "must be greater than or equal to 1.")
    elif self.max_num_partial_prefills > 1:
        if not self.chunked_prefill_enabled:
            raise ValueError("Chunked prefill must be enabled to set "
                             "max_num_partial_prefills > 1.")

        if self.long_prefill_token_threshold > self.max_model_len:
            raise ValueError(
                "long_prefill_token_threshold "
                f"({self.long_prefill_token_threshold}) cannot be greater "
                f"than the max_model_len ({self.max_model_len}).")

    if (self.max_long_partial_prefills
            < 1) or (self.max_long_partial_prefills
                     > self.max_num_partial_prefills):
        raise ValueError(
            f"max_long_partial_prefills ({self.max_long_partial_prefills}) "
            "must be greater than or equal to 1 and less than or equal to "
            f"max_num_partial_prefills ({self.max_num_partial_prefills}).")

    return self

compute_hash

compute_hash() -> str

WARNING: Whenever a new field is added to this config, ensure that it is included in the factors list if it affects the computation graph.

Provide a hash that uniquely identifies all the configs that affect the structure of the computation graph from input ids/embeddings to the final hidden states, excluding anything before input ids/embeddings and after the final hidden states.

Source code in vllm/config/scheduler.py
def compute_hash(self) -> str:
    """
    WARNING: Whenever a new field is added to this config,
    ensure that it is included in the factors list if
    it affects the computation graph.

    Provide a hash that uniquely identifies all the configs
    that affect the structure of the computation
    graph from input ids/embeddings to the final hidden states,
    excluding anything before input ids/embeddings and after
    the final hidden states.
    """
    # no factors to consider.
    # this config will not affect the computation graph.
    factors: list[Any] = []
    hash_str = hashlib.md5(str(factors).encode(),
                           usedforsecurity=False).hexdigest()
    return hash_str

SpeculativeConfig

Configuration for speculative decoding.

Source code in vllm/config/speculative.py
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
@config
@dataclass
class SpeculativeConfig:
    """Configuration for speculative decoding."""

    # General speculative decoding control
    num_speculative_tokens: SkipValidation[int] = None  # type: ignore
    """The number of speculative tokens, if provided. It will default to the
    number in the draft model config if present, otherwise, it is required."""
    model: Optional[str] = None
    """The name of the draft model, eagle head, or additional weights, if
    provided."""
    method: Optional[SpeculativeMethod] = None
    """The name of the speculative method to use. If users provide and set the
    `model` param, the speculative method type will be detected automatically
    if possible, if `model` param is not provided, the method name must be
    provided.

    If using `ngram` method, the related configuration `prompt_lookup_max` and
    `prompt_lookup_min` should be considered."""
    draft_tensor_parallel_size: Optional[int] = None
    """The degree of the tensor parallelism for the draft model. Can only be 1
    or the same as the target model's tensor parallel size."""
    disable_logprobs: bool = True
    """If set to True, token log probabilities are not returned during
    speculative decoding. If set to False, token log probabilities are returned
    according to the log probability settings in SamplingParams."""

    # Draft model configuration
    quantization: Optional[me_quant.QuantizationMethods] = None
    """Quantization method that was used to quantize the draft model weights.
    If `None`, we assume the model weights are not quantized. Note that it only
    takes effect when using the draft model-based speculative method."""
    max_model_len: Optional[int] = None
    """The maximum model length of the draft model. Used when testing the
    ability to skip speculation for some sequences."""
    revision: Optional[str] = None
    """The specific model version to use for the draft model. It can be a
    branch name, a tag name, or a commit id. If unspecified, will use the
    default version."""
    code_revision: Optional[str] = None
    """The specific revision to use for the draft model code on Hugging Face
    Hub. It can be a branch name, a tag name, or a commit id. If unspecified,
    will use the default version."""

    # Advanced control
    disable_by_batch_size: Optional[int] = None
    """Disable speculative decoding for new incoming requests when the number
    of enqueued requests is larger than this value, if provided."""
    disable_padded_drafter_batch: bool = False
    """Disable input padding for speculative decoding. If set to True,
    speculative input batches can contain sequences of different lengths,
    which may only be supported by certain attention backends. This currently
    only affects the EAGLE method of speculation."""

    # Ngram proposer configuration
    prompt_lookup_max: Optional[int] = None
    """Maximum size of ngram token window when using Ngram proposer, required
    when method is set to ngram."""
    prompt_lookup_min: Optional[int] = None
    """Minimum size of ngram token window when using Ngram proposer, if
    provided. Defaults to 1."""

    speculative_token_tree: Optional[str] = None
    """Specifies the tree structure for speculative token generation.
    """
    # required configuration params passed from engine
    target_model_config: SkipValidation[ModelConfig] = None  # type: ignore
    """The configuration of the target model."""
    target_parallel_config: SkipValidation[
        ParallelConfig] = None  # type: ignore
    """The parallel configuration for the target model."""
    enable_chunked_prefill: SkipValidation[bool] = None  # type: ignore
    """Whether vLLM is configured to use chunked prefill or not. Used for
    raising an error since it's not yet compatible with speculative decode."""
    disable_log_stats: SkipValidation[bool] = None  # type: ignore
    """Whether to disable the periodic printing of stage times in speculative
    decoding."""

    # params generated in the post-init stage
    draft_model_config: SkipValidation[ModelConfig] = None  # type: ignore
    """The configuration of the draft model initialized internal."""
    draft_parallel_config: SkipValidation[
        ParallelConfig] = None  # type: ignore
    """The parallel configuration for the draft model initialized internal."""

    def compute_hash(self) -> str:
        """
        WARNING: Whenever a new field is added to this config,
        ensure that it is included in the factors list if
        it affects the computation graph.

        Provide a hash that uniquely identifies all the configs
        that affect the structure of the computation
        graph from input ids/embeddings to the final hidden states,
        excluding anything before input ids/embeddings and after
        the final hidden states.
        """
        factors: list[Any] = []
        # Eagle3 affects the computation graph because it returns intermediate
        # hidden states in addition to the final hidden state.
        factors.append(self.method == "eagle3")
        hash_str = hashlib.md5(str(factors).encode(),
                               usedforsecurity=False).hexdigest()
        return hash_str

    @staticmethod
    def hf_config_override(hf_config: PretrainedConfig) -> PretrainedConfig:
        if hf_config.model_type in ("deepseek_v3", "deepseek_v32"):
            hf_config.model_type = "deepseek_mtp"
        if hf_config.model_type == "deepseek_mtp":
            n_predict = getattr(hf_config, "num_nextn_predict_layers", None)
            hf_config.update({
                "n_predict": n_predict,
                "architectures": ["DeepSeekMTPModel"]
            })

        if hf_config.architectures[0] == "MiMoForCausalLM":
            hf_config.model_type = "mimo_mtp"
            n_predict = getattr(hf_config, "num_nextn_predict_layers", None)
            hf_config.update({
                "num_hidden_layers": 0,
                "n_predict": n_predict,
                "architectures": ["MiMoMTPModel"]
            })

        if hf_config.architectures[0] == "Glm4MoeForCausalLM":
            hf_config.model_type = "glm4_moe_mtp"
            n_predict = getattr(hf_config, "num_nextn_predict_layers", None)
            hf_config.update({
                "num_hidden_layers": 0,
                "n_predict": n_predict,
                "architectures": ["Glm4MoeMTPModel"]
            })

        if hf_config.model_type == "ernie4_5_moe":
            hf_config.model_type = "ernie_mtp"
        if hf_config.model_type == "ernie_mtp":
            n_predict = getattr(hf_config, "num_nextn_predict_layers", None)
            hf_config.update({
                "n_predict": n_predict,
                "architectures": ["ErnieMTPModel"]
            })

        if hf_config.model_type == "qwen3_next":
            hf_config.model_type = "qwen3_next_mtp"
        if hf_config.model_type == "qwen3_next_mtp":
            n_predict = getattr(hf_config, "num_nextn_predict_layers", None)
            hf_config.update({
                "n_predict": n_predict,
                "architectures": ["Qwen3NextMTP"]
            })
        if hf_config.model_type == "longcat_flash":
            hf_config.model_type = "longcat_flash_mtp"
            n_predict = getattr(hf_config, "num_nextn_predict_layers", 1)
            hf_config.update({
                "n_predict": n_predict,
                "architectures": ["LongCatFlashMTPModel"]
            })

        return hf_config

    def __post_init__(self):

        # Note: "method" is a new parameter that helps to extend the
        # configuration of non-model-based proposers, and the "model" parameter
        # will be used to set the draft model, eagle head, or additional weight
        # when needed. If users do not specify "method", the speculative method
        # will be detected automatically if possible. If the speculative method
        # can not be detected, it will be considered as the "draft_model" by
        # default.

        if self.method in MTP_MODEL_TYPES:
            logger.warning("method `%s` is deprecated and replaced with mtp.",
                           self.method)
            self.method = "mtp"

        if self.model is None and self.num_speculative_tokens is not None:
            if self.method == "mtp":
                assert (
                    self.target_model_config
                    is not None), "target_model_config must be present for mtp"
                # use the draft model from the same model:
                self.model = self.target_model_config.model
                # Align the quantization of draft model for cases such as
                # --quantization fp8 with a bf16 checkpoint.
                if not self.quantization:
                    self.quantization = self.target_model_config.quantization
            elif self.method in ("ngram", "[ngram]"):
                self.model = "ngram"
            else:
                raise ValueError(
                    "num_speculative_tokens was provided but without "
                    "speculative model.")

        # Automatically configure the method for ngram when "model" is used
        # instead of "method"
        if self.method is None and (self.model is not None
                                    and self.model in ("ngram", "[ngram]")):
            self.method = "ngram"

        if self.method in ("ngram", "[ngram]"):
            # Unified to "ngram" internally
            self.method = "ngram"
            # Set default values if not provided
            if (self.prompt_lookup_min is None
                    and self.prompt_lookup_max is None):
                # TODO(woosuk): Tune these values. They are arbitrarily chosen.
                self.prompt_lookup_min = 5
                self.prompt_lookup_max = 5
            elif self.prompt_lookup_min is None:
                assert self.prompt_lookup_max is not None
                self.prompt_lookup_min = self.prompt_lookup_max
            elif self.prompt_lookup_max is None:
                assert self.prompt_lookup_min is not None
                self.prompt_lookup_max = self.prompt_lookup_min

            # Validate values
            if self.prompt_lookup_min < 1:
                raise ValueError(
                    f"prompt_lookup_min={self.prompt_lookup_min} must be > 0")
            if self.prompt_lookup_max < 1:
                raise ValueError(
                    f"prompt_lookup_max={self.prompt_lookup_max} must be > 0")
            if self.prompt_lookup_min > self.prompt_lookup_max:
                raise ValueError(
                    f"prompt_lookup_min={self.prompt_lookup_min} must "
                    f"be <= prompt_lookup_max={self.prompt_lookup_max}")

            # TODO: current we still need extract vocab_size from target model
            # config, in future, we may try refactor it out, and set
            # draft related config as None here.
            self.draft_model_config = self.target_model_config
            self.draft_parallel_config = self.target_parallel_config
        else:
            self.prompt_lookup_max = 0
            self.prompt_lookup_min = 0

            if self.model is not None:
                # TODO: Move this import to the top once `ModelConfig`
                # lives in `vllm.config.model`.
                from vllm.config import ModelConfig
                self.draft_model_config = ModelConfig(
                    model=self.model,
                    runner="draft",
                    tokenizer=self.target_model_config.tokenizer,
                    tokenizer_mode=self.target_model_config.tokenizer_mode,
                    trust_remote_code=self.target_model_config.
                    trust_remote_code,
                    allowed_local_media_path=self.target_model_config.
                    allowed_local_media_path,
                    allowed_media_domains=self.target_model_config.
                    allowed_media_domains,
                    dtype=self.target_model_config.dtype,
                    seed=self.target_model_config.seed,
                    revision=self.revision,
                    code_revision=self.code_revision,
                    tokenizer_revision=self.target_model_config.
                    tokenizer_revision,
                    spec_target_max_model_len=self.target_model_config.
                    max_model_len,
                    quantization=self.quantization,
                    enforce_eager=self.target_model_config.enforce_eager,
                    max_logprobs=self.target_model_config.max_logprobs,
                    hf_overrides=SpeculativeConfig.hf_config_override,
                )

                # Automatically detect the method
                if self.method in ('eagle', 'eagle3'):
                    pass
                # examples:
                # yuhuili/EAGLE-LLaMA3-Instruct-8B
                # yuhuili/EAGLE3-LLaMA3.1-Instruct-8B
                # AngelSlim/Qwen3-8B_eagle3
                elif "eagle-" in self.draft_model_config.model.lower():
                    self.method = "eagle"
                elif "eagle3" in self.draft_model_config.model.lower():
                    self.method = "eagle3"
                elif self.draft_model_config.hf_config.model_type == "medusa":
                    self.method = "medusa"
                elif (self.draft_model_config.hf_config.model_type ==
                      "mlp_speculator"):
                    self.method = "mlp_speculator"
                elif (self.draft_model_config.hf_config.model_type
                      in MTP_MODEL_TYPES):
                    self.method = "mtp"
                    if self.num_speculative_tokens > 1:
                        logger.warning(
                                "Enabling num_speculative_tokens > 1 will run" \
                                "multiple times of forward on same MTP layer" \
                                ",which may result in lower acceptance rate" \
                            )
                elif (self.draft_model_config.hf_config.model_type
                      in ("longcat_flash_mtp")):
                    self.method = "longcat_flash_mtp"
                    if self.num_speculative_tokens > 1:
                        logger.warning(
                                "LongCat MTP models only have " \
                                "one layer. Might need some code changes " \
                                "to support multiple layers."
                            )
                else:
                    self.method = "draft_model"
                    raise NotImplementedError(
                        "Speculative decoding with draft model is not "
                        "supported yet. Please consider using other "
                        "speculative decoding methods such as ngram, medusa, "
                        "eagle, or mtp.")

                # Replace hf_config for EAGLE draft_model
                if self.method in ("eagle", "eagle3"):
                    if self.enable_chunked_prefill and not envs.VLLM_USE_V1:
                        raise ValueError(
                            "Chunked prefill and EAGLE are not compatible "
                            "when using V0.")

                    from vllm.transformers_utils.configs import (
                        SpeculatorsConfig)
                    from vllm.transformers_utils.configs.eagle import (
                        EAGLEConfig)

                    if isinstance(self.draft_model_config.hf_config,
                                  (EAGLEConfig, SpeculatorsConfig)):
                        pass
                    else:
                        eagle_config = EAGLEConfig(
                            self.draft_model_config.hf_config,
                            method=self.method,
                            model_type="eagle")
                        self.draft_model_config.hf_config = eagle_config

                if (self.num_speculative_tokens is not None
                        and hasattr(self.draft_model_config.hf_config,
                                    "num_lookahead_tokens")):
                    self.draft_model_config.hf_config.num_lookahead_tokens = \
                    self.num_speculative_tokens

                n_predict = getattr(self.draft_model_config.hf_config,
                                    "n_predict", None)
                if n_predict is not None:
                    if self.num_speculative_tokens is None:
                        # Default to max value defined in draft model config.
                        self.num_speculative_tokens = n_predict
                    elif self.num_speculative_tokens > n_predict and \
                            self.num_speculative_tokens % n_predict != 0:
                        # Ensure divisibility for MTP module reuse.
                        raise ValueError(
                            f"num_speculative_tokens:{self.num_speculative_tokens}"
                            f" must be divisible by {n_predict=}")

                if self.speculative_token_tree is None:
                    # Generate chain of tokens.
                    self.speculative_token_tree = str([
                        (i + 1) * (0, )
                        for i in range(self.num_speculative_tokens)
                    ])
                else:
                    # Sort the token tree breadth-first.
                    tree_choices = ast.literal_eval(
                        self.speculative_token_tree)
                    self.speculative_token_tree = str(
                        sorted(tree_choices, key=lambda t: (len(t), t)))

                self.draft_tensor_parallel_size = \
                    SpeculativeConfig._verify_and_get_draft_tp(
                        self.target_parallel_config,
                        self.draft_tensor_parallel_size,
                        self.draft_model_config.hf_config
                )

                self.draft_model_config.max_model_len = (
                    SpeculativeConfig._maybe_override_draft_max_model_len(
                        self.max_model_len,
                        self.draft_model_config.max_model_len,
                        self.target_model_config.max_model_len,
                    ))

                self.draft_parallel_config = (
                    SpeculativeConfig.create_draft_parallel_config(
                        self.target_parallel_config,
                        self.draft_tensor_parallel_size))

    @staticmethod
    def _maybe_override_draft_max_model_len(
        speculative_max_model_len: Optional[int],
        draft_max_model_len: int,
        target_max_model_len: int,
    ) -> int:
        """Determine the max sequence len for the draft model. This is usually
        the draft_max_model_len, but may be the target_max_model_len if it is
        less than the draft_max_model_len, or may be speculative_max_model_len
        if it is specified.

        This is necessary so that sequences do not exceed the capacity of the
        draft model or the target model.

        speculative_max_model_len is mainly used for testing that sequences can
        skip speculation.
        """

        if speculative_max_model_len is not None:

            if speculative_max_model_len > draft_max_model_len:
                raise ValueError(f"{speculative_max_model_len=} cannot be "
                                 f"larger than {draft_max_model_len=}")

            if speculative_max_model_len > target_max_model_len:
                raise ValueError(f"{speculative_max_model_len=} cannot be "
                                 f"larger than {target_max_model_len=}")

            return speculative_max_model_len

        return min(
            draft_max_model_len,
            target_max_model_len,
        )

    @staticmethod
    def _verify_and_get_draft_tp(
            target_parallel_config: ParallelConfig,
            speculative_draft_tensor_parallel_size: Optional[int],
            draft_hf_config: PretrainedConfig) -> int:
        """
        Verifies and adjusts the tensor parallel size for a draft model
        specified using speculative_draft_tensor_parallel_size.
        """
        # If speculative_draft_tensor_parallel_size is unset then set it
        # appropriately else verify that it is set correctly.
        if speculative_draft_tensor_parallel_size is None:
            if draft_hf_config.model_type == "mlp_speculator":
                speculative_draft_tensor_parallel_size = 1
                if target_parallel_config.tensor_parallel_size > 1:
                    logger.warning(
                        "%s cannot currently be run with tp>1; "
                        "setting speculative_draft_tensor_parallel_size=1",
                        draft_hf_config.model_type)
            else:
                speculative_draft_tensor_parallel_size = \
                    target_parallel_config.tensor_parallel_size
        elif speculative_draft_tensor_parallel_size not in (
                1, target_parallel_config.tensor_parallel_size):
            raise ValueError(
                f"{speculative_draft_tensor_parallel_size=} cannot be "
                f"other value than 1 or target model tensor_parallel_size")
        return speculative_draft_tensor_parallel_size

    @staticmethod
    def create_draft_parallel_config(
        target_parallel_config: ParallelConfig,
        speculative_draft_tensor_parallel_size: int,
    ) -> ParallelConfig:
        """Create a parallel config for use by the draft worker.

        This is mostly a copy of the target parallel config, except the tp_size.
        """
        draft_parallel_config = ParallelConfig(
            pipeline_parallel_size=target_parallel_config.
            pipeline_parallel_size,
            tensor_parallel_size=speculative_draft_tensor_parallel_size,
            distributed_executor_backend=target_parallel_config.
            distributed_executor_backend,
            max_parallel_loading_workers=target_parallel_config.
            max_parallel_loading_workers,
            disable_custom_all_reduce=target_parallel_config.
            disable_custom_all_reduce,
            ray_workers_use_nsight=target_parallel_config.
            ray_workers_use_nsight,
            placement_group=target_parallel_config.placement_group,
        )

        return draft_parallel_config

    @model_validator(mode='after')
    def _verify_args(self) -> Self:
        if self.num_speculative_tokens is None:
            raise ValueError(
                "num_speculative_tokens must be provided with "
                "speculative model unless the draft model config contains an "
                "n_predict parameter.")

        if self.num_speculative_tokens <= 0:
            raise ValueError("Expected num_speculative_tokens to be greater "
                             f"than zero ({self.num_speculative_tokens}).")

        if self.draft_model_config:
            self.draft_model_config.verify_with_parallel_config(
                self.draft_parallel_config)

        if (self.disable_by_batch_size is not None
                and self.disable_by_batch_size < 2):
            raise ValueError("Expect the batch size threshold of disabling "
                             "speculative decoding is > 1, but got "
                             f"{self.disable_by_batch_size=}")

        eagle3_target_supported = ["llama", "qwen", "minicpm", "gpt_oss"]
        if self.method == "eagle3" and self.target_model_config and not any(
                supported_model in
                self.target_model_config.hf_text_config.model_type
                for supported_model in eagle3_target_supported):
            raise ValueError(
                f"Eagle3 is only supported for {eagle3_target_supported} models. "  # noqa: E501
                f"Got {self.target_model_config.hf_text_config.model_type=}")

        return self

    @property
    def num_lookahead_slots(self) -> int:
        """The number of additional slots the scheduler should allocate per
        step, in addition to the slots allocated for each known token.

        This is equal to the number of speculative tokens, as each speculative
        token must be scored.
        """
        return self.num_speculative_tokens

    def use_eagle(self) -> bool:
        return self.method in ("eagle", "eagle3", "mtp")

    def __repr__(self) -> str:
        method = self.method
        model = None if method == "ngram" else self.draft_model_config.model
        num_spec_tokens = self.num_speculative_tokens
        return f"SpeculativeConfig({method=}, {model=}, {num_spec_tokens=})"

code_revision class-attribute instance-attribute

code_revision: Optional[str] = None

The specific revision to use for the draft model code on Hugging Face Hub. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.

disable_by_batch_size class-attribute instance-attribute

disable_by_batch_size: Optional[int] = None

Disable speculative decoding for new incoming requests when the number of enqueued requests is larger than this value, if provided.

disable_log_stats class-attribute instance-attribute

disable_log_stats: SkipValidation[bool] = None

Whether to disable the periodic printing of stage times in speculative decoding.

disable_logprobs class-attribute instance-attribute

disable_logprobs: bool = True

If set to True, token log probabilities are not returned during speculative decoding. If set to False, token log probabilities are returned according to the log probability settings in SamplingParams.

disable_padded_drafter_batch class-attribute instance-attribute

disable_padded_drafter_batch: bool = False

Disable input padding for speculative decoding. If set to True, speculative input batches can contain sequences of different lengths, which may only be supported by certain attention backends. This currently only affects the EAGLE method of speculation.

draft_model_config class-attribute instance-attribute

draft_model_config: SkipValidation[ModelConfig] = None

The configuration of the draft model initialized internal.

draft_parallel_config class-attribute instance-attribute

draft_parallel_config: SkipValidation[ParallelConfig] = None

The parallel configuration for the draft model initialized internal.

draft_tensor_parallel_size class-attribute instance-attribute

draft_tensor_parallel_size: Optional[int] = None

The degree of the tensor parallelism for the draft model. Can only be 1 or the same as the target model's tensor parallel size.

enable_chunked_prefill class-attribute instance-attribute

enable_chunked_prefill: SkipValidation[bool] = None

Whether vLLM is configured to use chunked prefill or not. Used for raising an error since it's not yet compatible with speculative decode.

max_model_len class-attribute instance-attribute

max_model_len: Optional[int] = None

The maximum model length of the draft model. Used when testing the ability to skip speculation for some sequences.

method class-attribute instance-attribute

method: Optional[SpeculativeMethod] = None

The name of the speculative method to use. If users provide and set the model param, the speculative method type will be detected automatically if possible, if model param is not provided, the method name must be provided.

If using ngram method, the related configuration prompt_lookup_max and prompt_lookup_min should be considered.

model class-attribute instance-attribute

model: Optional[str] = None

The name of the draft model, eagle head, or additional weights, if provided.

num_lookahead_slots property

num_lookahead_slots: int

The number of additional slots the scheduler should allocate per step, in addition to the slots allocated for each known token.

This is equal to the number of speculative tokens, as each speculative token must be scored.

num_speculative_tokens class-attribute instance-attribute

num_speculative_tokens: SkipValidation[int] = None

The number of speculative tokens, if provided. It will default to the number in the draft model config if present, otherwise, it is required.

prompt_lookup_max class-attribute instance-attribute

prompt_lookup_max: Optional[int] = None

Maximum size of ngram token window when using Ngram proposer, required when method is set to ngram.

prompt_lookup_min class-attribute instance-attribute

prompt_lookup_min: Optional[int] = None

Minimum size of ngram token window when using Ngram proposer, if provided. Defaults to 1.

quantization class-attribute instance-attribute

quantization: Optional[QuantizationMethods] = None

Quantization method that was used to quantize the draft model weights. If None, we assume the model weights are not quantized. Note that it only takes effect when using the draft model-based speculative method.

revision class-attribute instance-attribute

revision: Optional[str] = None

The specific model version to use for the draft model. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.

speculative_token_tree class-attribute instance-attribute

speculative_token_tree: Optional[str] = None

Specifies the tree structure for speculative token generation.

target_model_config class-attribute instance-attribute

target_model_config: SkipValidation[ModelConfig] = None

The configuration of the target model.

target_parallel_config class-attribute instance-attribute

target_parallel_config: SkipValidation[ParallelConfig] = (
    None
)

The parallel configuration for the target model.

__post_init__

__post_init__()
Source code in vllm/config/speculative.py
def __post_init__(self):

    # Note: "method" is a new parameter that helps to extend the
    # configuration of non-model-based proposers, and the "model" parameter
    # will be used to set the draft model, eagle head, or additional weight
    # when needed. If users do not specify "method", the speculative method
    # will be detected automatically if possible. If the speculative method
    # can not be detected, it will be considered as the "draft_model" by
    # default.

    if self.method in MTP_MODEL_TYPES:
        logger.warning("method `%s` is deprecated and replaced with mtp.",
                       self.method)
        self.method = "mtp"

    if self.model is None and self.num_speculative_tokens is not None:
        if self.method == "mtp":
            assert (
                self.target_model_config
                is not None), "target_model_config must be present for mtp"
            # use the draft model from the same model:
            self.model = self.target_model_config.model
            # Align the quantization of draft model for cases such as
            # --quantization fp8 with a bf16 checkpoint.
            if not self.quantization:
                self.quantization = self.target_model_config.quantization
        elif self.method in ("ngram", "[ngram]"):
            self.model = "ngram"
        else:
            raise ValueError(
                "num_speculative_tokens was provided but without "
                "speculative model.")

    # Automatically configure the method for ngram when "model" is used
    # instead of "method"
    if self.method is None and (self.model is not None
                                and self.model in ("ngram", "[ngram]")):
        self.method = "ngram"

    if self.method in ("ngram", "[ngram]"):
        # Unified to "ngram" internally
        self.method = "ngram"
        # Set default values if not provided
        if (self.prompt_lookup_min is None
                and self.prompt_lookup_max is None):
            # TODO(woosuk): Tune these values. They are arbitrarily chosen.
            self.prompt_lookup_min = 5
            self.prompt_lookup_max = 5
        elif self.prompt_lookup_min is None:
            assert self.prompt_lookup_max is not None
            self.prompt_lookup_min = self.prompt_lookup_max
        elif self.prompt_lookup_max is None:
            assert self.prompt_lookup_min is not None
            self.prompt_lookup_max = self.prompt_lookup_min

        # Validate values
        if self.prompt_lookup_min < 1:
            raise ValueError(
                f"prompt_lookup_min={self.prompt_lookup_min} must be > 0")
        if self.prompt_lookup_max < 1:
            raise ValueError(
                f"prompt_lookup_max={self.prompt_lookup_max} must be > 0")
        if self.prompt_lookup_min > self.prompt_lookup_max:
            raise ValueError(
                f"prompt_lookup_min={self.prompt_lookup_min} must "
                f"be <= prompt_lookup_max={self.prompt_lookup_max}")

        # TODO: current we still need extract vocab_size from target model
        # config, in future, we may try refactor it out, and set
        # draft related config as None here.
        self.draft_model_config = self.target_model_config
        self.draft_parallel_config = self.target_parallel_config
    else:
        self.prompt_lookup_max = 0
        self.prompt_lookup_min = 0

        if self.model is not None:
            # TODO: Move this import to the top once `ModelConfig`
            # lives in `vllm.config.model`.
            from vllm.config import ModelConfig
            self.draft_model_config = ModelConfig(
                model=self.model,
                runner="draft",
                tokenizer=self.target_model_config.tokenizer,
                tokenizer_mode=self.target_model_config.tokenizer_mode,
                trust_remote_code=self.target_model_config.
                trust_remote_code,
                allowed_local_media_path=self.target_model_config.
                allowed_local_media_path,
                allowed_media_domains=self.target_model_config.
                allowed_media_domains,
                dtype=self.target_model_config.dtype,
                seed=self.target_model_config.seed,
                revision=self.revision,
                code_revision=self.code_revision,
                tokenizer_revision=self.target_model_config.
                tokenizer_revision,
                spec_target_max_model_len=self.target_model_config.
                max_model_len,
                quantization=self.quantization,
                enforce_eager=self.target_model_config.enforce_eager,
                max_logprobs=self.target_model_config.max_logprobs,
                hf_overrides=SpeculativeConfig.hf_config_override,
            )

            # Automatically detect the method
            if self.method in ('eagle', 'eagle3'):
                pass
            # examples:
            # yuhuili/EAGLE-LLaMA3-Instruct-8B
            # yuhuili/EAGLE3-LLaMA3.1-Instruct-8B
            # AngelSlim/Qwen3-8B_eagle3
            elif "eagle-" in self.draft_model_config.model.lower():
                self.method = "eagle"
            elif "eagle3" in self.draft_model_config.model.lower():
                self.method = "eagle3"
            elif self.draft_model_config.hf_config.model_type == "medusa":
                self.method = "medusa"
            elif (self.draft_model_config.hf_config.model_type ==
                  "mlp_speculator"):
                self.method = "mlp_speculator"
            elif (self.draft_model_config.hf_config.model_type
                  in MTP_MODEL_TYPES):
                self.method = "mtp"
                if self.num_speculative_tokens > 1:
                    logger.warning(
                            "Enabling num_speculative_tokens > 1 will run" \
                            "multiple times of forward on same MTP layer" \
                            ",which may result in lower acceptance rate" \
                        )
            elif (self.draft_model_config.hf_config.model_type
                  in ("longcat_flash_mtp")):
                self.method = "longcat_flash_mtp"
                if self.num_speculative_tokens > 1:
                    logger.warning(
                            "LongCat MTP models only have " \
                            "one layer. Might need some code changes " \
                            "to support multiple layers."
                        )
            else:
                self.method = "draft_model"
                raise NotImplementedError(
                    "Speculative decoding with draft model is not "
                    "supported yet. Please consider using other "
                    "speculative decoding methods such as ngram, medusa, "
                    "eagle, or mtp.")

            # Replace hf_config for EAGLE draft_model
            if self.method in ("eagle", "eagle3"):
                if self.enable_chunked_prefill and not envs.VLLM_USE_V1:
                    raise ValueError(
                        "Chunked prefill and EAGLE are not compatible "
                        "when using V0.")

                from vllm.transformers_utils.configs import (
                    SpeculatorsConfig)
                from vllm.transformers_utils.configs.eagle import (
                    EAGLEConfig)

                if isinstance(self.draft_model_config.hf_config,
                              (EAGLEConfig, SpeculatorsConfig)):
                    pass
                else:
                    eagle_config = EAGLEConfig(
                        self.draft_model_config.hf_config,
                        method=self.method,
                        model_type="eagle")
                    self.draft_model_config.hf_config = eagle_config

            if (self.num_speculative_tokens is not None
                    and hasattr(self.draft_model_config.hf_config,
                                "num_lookahead_tokens")):
                self.draft_model_config.hf_config.num_lookahead_tokens = \
                self.num_speculative_tokens

            n_predict = getattr(self.draft_model_config.hf_config,
                                "n_predict", None)
            if n_predict is not None:
                if self.num_speculative_tokens is None:
                    # Default to max value defined in draft model config.
                    self.num_speculative_tokens = n_predict
                elif self.num_speculative_tokens > n_predict and \
                        self.num_speculative_tokens % n_predict != 0:
                    # Ensure divisibility for MTP module reuse.
                    raise ValueError(
                        f"num_speculative_tokens:{self.num_speculative_tokens}"
                        f" must be divisible by {n_predict=}")

            if self.speculative_token_tree is None:
                # Generate chain of tokens.
                self.speculative_token_tree = str([
                    (i + 1) * (0, )
                    for i in range(self.num_speculative_tokens)
                ])
            else:
                # Sort the token tree breadth-first.
                tree_choices = ast.literal_eval(
                    self.speculative_token_tree)
                self.speculative_token_tree = str(
                    sorted(tree_choices, key=lambda t: (len(t), t)))

            self.draft_tensor_parallel_size = \
                SpeculativeConfig._verify_and_get_draft_tp(
                    self.target_parallel_config,
                    self.draft_tensor_parallel_size,
                    self.draft_model_config.hf_config
            )

            self.draft_model_config.max_model_len = (
                SpeculativeConfig._maybe_override_draft_max_model_len(
                    self.max_model_len,
                    self.draft_model_config.max_model_len,
                    self.target_model_config.max_model_len,
                ))

            self.draft_parallel_config = (
                SpeculativeConfig.create_draft_parallel_config(
                    self.target_parallel_config,
                    self.draft_tensor_parallel_size))

__repr__

__repr__() -> str
Source code in vllm/config/speculative.py
def __repr__(self) -> str:
    method = self.method
    model = None if method == "ngram" else self.draft_model_config.model
    num_spec_tokens = self.num_speculative_tokens
    return f"SpeculativeConfig({method=}, {model=}, {num_spec_tokens=})"

_maybe_override_draft_max_model_len staticmethod

_maybe_override_draft_max_model_len(
    speculative_max_model_len: Optional[int],
    draft_max_model_len: int,
    target_max_model_len: int,
) -> int

Determine the max sequence len for the draft model. This is usually the draft_max_model_len, but may be the target_max_model_len if it is less than the draft_max_model_len, or may be speculative_max_model_len if it is specified.

This is necessary so that sequences do not exceed the capacity of the draft model or the target model.

speculative_max_model_len is mainly used for testing that sequences can skip speculation.

Source code in vllm/config/speculative.py
@staticmethod
def _maybe_override_draft_max_model_len(
    speculative_max_model_len: Optional[int],
    draft_max_model_len: int,
    target_max_model_len: int,
) -> int:
    """Determine the max sequence len for the draft model. This is usually
    the draft_max_model_len, but may be the target_max_model_len if it is
    less than the draft_max_model_len, or may be speculative_max_model_len
    if it is specified.

    This is necessary so that sequences do not exceed the capacity of the
    draft model or the target model.

    speculative_max_model_len is mainly used for testing that sequences can
    skip speculation.
    """

    if speculative_max_model_len is not None:

        if speculative_max_model_len > draft_max_model_len:
            raise ValueError(f"{speculative_max_model_len=} cannot be "
                             f"larger than {draft_max_model_len=}")

        if speculative_max_model_len > target_max_model_len:
            raise ValueError(f"{speculative_max_model_len=} cannot be "
                             f"larger than {target_max_model_len=}")

        return speculative_max_model_len

    return min(
        draft_max_model_len,
        target_max_model_len,
    )

_verify_and_get_draft_tp staticmethod

_verify_and_get_draft_tp(
    target_parallel_config: ParallelConfig,
    speculative_draft_tensor_parallel_size: Optional[int],
    draft_hf_config: PretrainedConfig,
) -> int

Verifies and adjusts the tensor parallel size for a draft model specified using speculative_draft_tensor_parallel_size.

Source code in vllm/config/speculative.py
@staticmethod
def _verify_and_get_draft_tp(
        target_parallel_config: ParallelConfig,
        speculative_draft_tensor_parallel_size: Optional[int],
        draft_hf_config: PretrainedConfig) -> int:
    """
    Verifies and adjusts the tensor parallel size for a draft model
    specified using speculative_draft_tensor_parallel_size.
    """
    # If speculative_draft_tensor_parallel_size is unset then set it
    # appropriately else verify that it is set correctly.
    if speculative_draft_tensor_parallel_size is None:
        if draft_hf_config.model_type == "mlp_speculator":
            speculative_draft_tensor_parallel_size = 1
            if target_parallel_config.tensor_parallel_size > 1:
                logger.warning(
                    "%s cannot currently be run with tp>1; "
                    "setting speculative_draft_tensor_parallel_size=1",
                    draft_hf_config.model_type)
        else:
            speculative_draft_tensor_parallel_size = \
                target_parallel_config.tensor_parallel_size
    elif speculative_draft_tensor_parallel_size not in (
            1, target_parallel_config.tensor_parallel_size):
        raise ValueError(
            f"{speculative_draft_tensor_parallel_size=} cannot be "
            f"other value than 1 or target model tensor_parallel_size")
    return speculative_draft_tensor_parallel_size

_verify_args

_verify_args() -> Self
Source code in vllm/config/speculative.py
@model_validator(mode='after')
def _verify_args(self) -> Self:
    if self.num_speculative_tokens is None:
        raise ValueError(
            "num_speculative_tokens must be provided with "
            "speculative model unless the draft model config contains an "
            "n_predict parameter.")

    if self.num_speculative_tokens <= 0:
        raise ValueError("Expected num_speculative_tokens to be greater "
                         f"than zero ({self.num_speculative_tokens}).")

    if self.draft_model_config:
        self.draft_model_config.verify_with_parallel_config(
            self.draft_parallel_config)

    if (self.disable_by_batch_size is not None
            and self.disable_by_batch_size < 2):
        raise ValueError("Expect the batch size threshold of disabling "
                         "speculative decoding is > 1, but got "
                         f"{self.disable_by_batch_size=}")

    eagle3_target_supported = ["llama", "qwen", "minicpm", "gpt_oss"]
    if self.method == "eagle3" and self.target_model_config and not any(
            supported_model in
            self.target_model_config.hf_text_config.model_type
            for supported_model in eagle3_target_supported):
        raise ValueError(
            f"Eagle3 is only supported for {eagle3_target_supported} models. "  # noqa: E501
            f"Got {self.target_model_config.hf_text_config.model_type=}")

    return self

compute_hash

compute_hash() -> str

WARNING: Whenever a new field is added to this config, ensure that it is included in the factors list if it affects the computation graph.

Provide a hash that uniquely identifies all the configs that affect the structure of the computation graph from input ids/embeddings to the final hidden states, excluding anything before input ids/embeddings and after the final hidden states.

Source code in vllm/config/speculative.py
def compute_hash(self) -> str:
    """
    WARNING: Whenever a new field is added to this config,
    ensure that it is included in the factors list if
    it affects the computation graph.

    Provide a hash that uniquely identifies all the configs
    that affect the structure of the computation
    graph from input ids/embeddings to the final hidden states,
    excluding anything before input ids/embeddings and after
    the final hidden states.
    """
    factors: list[Any] = []
    # Eagle3 affects the computation graph because it returns intermediate
    # hidden states in addition to the final hidden state.
    factors.append(self.method == "eagle3")
    hash_str = hashlib.md5(str(factors).encode(),
                           usedforsecurity=False).hexdigest()
    return hash_str

create_draft_parallel_config staticmethod

create_draft_parallel_config(
    target_parallel_config: ParallelConfig,
    speculative_draft_tensor_parallel_size: int,
) -> ParallelConfig

Create a parallel config for use by the draft worker.

This is mostly a copy of the target parallel config, except the tp_size.

Source code in vllm/config/speculative.py
@staticmethod
def create_draft_parallel_config(
    target_parallel_config: ParallelConfig,
    speculative_draft_tensor_parallel_size: int,
) -> ParallelConfig:
    """Create a parallel config for use by the draft worker.

    This is mostly a copy of the target parallel config, except the tp_size.
    """
    draft_parallel_config = ParallelConfig(
        pipeline_parallel_size=target_parallel_config.
        pipeline_parallel_size,
        tensor_parallel_size=speculative_draft_tensor_parallel_size,
        distributed_executor_backend=target_parallel_config.
        distributed_executor_backend,
        max_parallel_loading_workers=target_parallel_config.
        max_parallel_loading_workers,
        disable_custom_all_reduce=target_parallel_config.
        disable_custom_all_reduce,
        ray_workers_use_nsight=target_parallel_config.
        ray_workers_use_nsight,
        placement_group=target_parallel_config.placement_group,
    )

    return draft_parallel_config

hf_config_override staticmethod

hf_config_override(
    hf_config: PretrainedConfig,
) -> PretrainedConfig
Source code in vllm/config/speculative.py
@staticmethod
def hf_config_override(hf_config: PretrainedConfig) -> PretrainedConfig:
    if hf_config.model_type in ("deepseek_v3", "deepseek_v32"):
        hf_config.model_type = "deepseek_mtp"
    if hf_config.model_type == "deepseek_mtp":
        n_predict = getattr(hf_config, "num_nextn_predict_layers", None)
        hf_config.update({
            "n_predict": n_predict,
            "architectures": ["DeepSeekMTPModel"]
        })

    if hf_config.architectures[0] == "MiMoForCausalLM":
        hf_config.model_type = "mimo_mtp"
        n_predict = getattr(hf_config, "num_nextn_predict_layers", None)
        hf_config.update({
            "num_hidden_layers": 0,
            "n_predict": n_predict,
            "architectures": ["MiMoMTPModel"]
        })

    if hf_config.architectures[0] == "Glm4MoeForCausalLM":
        hf_config.model_type = "glm4_moe_mtp"
        n_predict = getattr(hf_config, "num_nextn_predict_layers", None)
        hf_config.update({
            "num_hidden_layers": 0,
            "n_predict": n_predict,
            "architectures": ["Glm4MoeMTPModel"]
        })

    if hf_config.model_type == "ernie4_5_moe":
        hf_config.model_type = "ernie_mtp"
    if hf_config.model_type == "ernie_mtp":
        n_predict = getattr(hf_config, "num_nextn_predict_layers", None)
        hf_config.update({
            "n_predict": n_predict,
            "architectures": ["ErnieMTPModel"]
        })

    if hf_config.model_type == "qwen3_next":
        hf_config.model_type = "qwen3_next_mtp"
    if hf_config.model_type == "qwen3_next_mtp":
        n_predict = getattr(hf_config, "num_nextn_predict_layers", None)
        hf_config.update({
            "n_predict": n_predict,
            "architectures": ["Qwen3NextMTP"]
        })
    if hf_config.model_type == "longcat_flash":
        hf_config.model_type = "longcat_flash_mtp"
        n_predict = getattr(hf_config, "num_nextn_predict_layers", 1)
        hf_config.update({
            "n_predict": n_predict,
            "architectures": ["LongCatFlashMTPModel"]
        })

    return hf_config

use_eagle

use_eagle() -> bool
Source code in vllm/config/speculative.py
def use_eagle(self) -> bool:
    return self.method in ("eagle", "eagle3", "mtp")

SpeechToTextConfig

Configuration for speech-to-text models.

Source code in vllm/config/speech_to_text.py
@config
@dataclass
class SpeechToTextConfig:
    """Configuration for speech-to-text models."""

    sample_rate: float = 16_000
    """Sample rate (Hz) to resample input audio to. Most speech models expect
    16kHz audio input. The input audio will be automatically resampled to this
    rate before processing."""

    max_audio_clip_s: int = 30
    """Maximum duration in seconds for a single audio clip without chunking.
    Audio longer than this will be split into smaller chunks if
    `allow_audio_chunking` evaluates to True, otherwise it will be rejected."""

    overlap_chunk_second: int = 1
    """Overlap duration in seconds between consecutive audio chunks when
    splitting long audio. This helps maintain context across chunk boundaries
    and improves transcription quality at split points."""

    min_energy_split_window_size: Optional[int] = 1600
    """Window size in samples for finding low-energy (quiet) regions to split
    audio chunks. The algorithm looks for the quietest moment within this
    window to minimize cutting through speech. Default 1600 samples ≈ 100ms
    at 16kHz. If None, no chunking will be done."""

    @property
    def allow_audio_chunking(self) -> bool:
        return self.min_energy_split_window_size is not None

allow_audio_chunking property

allow_audio_chunking: bool

max_audio_clip_s class-attribute instance-attribute

max_audio_clip_s: int = 30

Maximum duration in seconds for a single audio clip without chunking. Audio longer than this will be split into smaller chunks if allow_audio_chunking evaluates to True, otherwise it will be rejected.

min_energy_split_window_size class-attribute instance-attribute

min_energy_split_window_size: Optional[int] = 1600

Window size in samples for finding low-energy (quiet) regions to split audio chunks. The algorithm looks for the quietest moment within this window to minimize cutting through speech. Default 1600 samples ≈ 100ms at 16kHz. If None, no chunking will be done.

overlap_chunk_second class-attribute instance-attribute

overlap_chunk_second: int = 1

Overlap duration in seconds between consecutive audio chunks when splitting long audio. This helps maintain context across chunk boundaries and improves transcription quality at split points.

sample_rate class-attribute instance-attribute

sample_rate: float = 16000

Sample rate (Hz) to resample input audio to. Most speech models expect 16kHz audio input. The input audio will be automatically resampled to this rate before processing.

StructuredOutputsConfig

Dataclass which contains structured outputs config for the engine.

Source code in vllm/config/structured_outputs.py
@config
@dataclass
class StructuredOutputsConfig:
    """Dataclass which contains structured outputs config for the engine."""

    backend: StructuredOutputsBackend = "auto"
    """Which engine will be used for structured outputs (e.g. JSON schema,
    regex, etc) by default. With "auto", we will make opinionated choices
    based on request contents and what the backend libraries currently support,
    so the behavior is subject to change in each release."""
    disable_fallback: bool = False
    """If `True`, vLLM will not fallback to a different backend on error."""
    disable_any_whitespace: bool = False
    """If `True`, the model will not generate any whitespace during structured
    outputs. This is only supported for xgrammar and guidance backends."""
    disable_additional_properties: bool = False
    """If `True`, the `guidance` backend will not use `additionalProperties`
    in the JSON schema. This is only supported for the `guidance` backend and
    is used to better align its behaviour with `outlines` and `xgrammar`."""
    reasoning_parser: str = ""
    """Select the reasoning parser depending on the model that you're using.
    This is used to parse the reasoning content into OpenAI API format."""

    def compute_hash(self) -> str:
        """
        WARNING: Whenever a new field is added to this config,
        ensure that it is included in the factors list if
        it affects the computation graph.

        Provide a hash that uniquely identifies all the configs
        that affect the structure of the computation
        graph from input ids/embeddings to the final hidden states,
        excluding anything before input ids/embeddings and after
        the final hidden states.
        """
        # no factors to consider.
        # this config will not affect the computation graph.
        factors: list[Any] = []
        hash_str = hashlib.md5(str(factors).encode(),
                               usedforsecurity=False).hexdigest()
        return hash_str

    def __post_init__(self):
        if (self.disable_any_whitespace
                and self.backend not in ("xgrammar", "guidance")):
            raise ValueError("disable_any_whitespace is only supported for "
                             "xgrammar and guidance backends.")
        if (self.disable_additional_properties and self.backend != "guidance"):
            raise ValueError("disable_additional_properties is only supported "
                             "for the guidance backend.")

backend class-attribute instance-attribute

backend: StructuredOutputsBackend = 'auto'

Which engine will be used for structured outputs (e.g. JSON schema, regex, etc) by default. With "auto", we will make opinionated choices based on request contents and what the backend libraries currently support, so the behavior is subject to change in each release.

disable_additional_properties class-attribute instance-attribute

disable_additional_properties: bool = False

If True, the guidance backend will not use additionalProperties in the JSON schema. This is only supported for the guidance backend and is used to better align its behaviour with outlines and xgrammar.

disable_any_whitespace class-attribute instance-attribute

disable_any_whitespace: bool = False

If True, the model will not generate any whitespace during structured outputs. This is only supported for xgrammar and guidance backends.

disable_fallback class-attribute instance-attribute

disable_fallback: bool = False

If True, vLLM will not fallback to a different backend on error.

reasoning_parser class-attribute instance-attribute

reasoning_parser: str = ''

Select the reasoning parser depending on the model that you're using. This is used to parse the reasoning content into OpenAI API format.

__post_init__

__post_init__()
Source code in vllm/config/structured_outputs.py
def __post_init__(self):
    if (self.disable_any_whitespace
            and self.backend not in ("xgrammar", "guidance")):
        raise ValueError("disable_any_whitespace is only supported for "
                         "xgrammar and guidance backends.")
    if (self.disable_additional_properties and self.backend != "guidance"):
        raise ValueError("disable_additional_properties is only supported "
                         "for the guidance backend.")

compute_hash

compute_hash() -> str

WARNING: Whenever a new field is added to this config, ensure that it is included in the factors list if it affects the computation graph.

Provide a hash that uniquely identifies all the configs that affect the structure of the computation graph from input ids/embeddings to the final hidden states, excluding anything before input ids/embeddings and after the final hidden states.

Source code in vllm/config/structured_outputs.py
def compute_hash(self) -> str:
    """
    WARNING: Whenever a new field is added to this config,
    ensure that it is included in the factors list if
    it affects the computation graph.

    Provide a hash that uniquely identifies all the configs
    that affect the structure of the computation
    graph from input ids/embeddings to the final hidden states,
    excluding anything before input ids/embeddings and after
    the final hidden states.
    """
    # no factors to consider.
    # this config will not affect the computation graph.
    factors: list[Any] = []
    hash_str = hashlib.md5(str(factors).encode(),
                           usedforsecurity=False).hexdigest()
    return hash_str

SupportsMetricsInfo

Bases: Protocol

Source code in vllm/config/utils.py
class SupportsMetricsInfo(Protocol):

    def metrics_info(self) -> dict[str, str]:
        ...

metrics_info

metrics_info() -> dict[str, str]
Source code in vllm/config/utils.py
def metrics_info(self) -> dict[str, str]:
    ...

VllmConfig

Dataclass which contains all vllm-related configuration. This simplifies passing around the distinct configurations in the codebase.

Source code in vllm/config/vllm.py
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
@config
@dataclass(config=ConfigDict(arbitrary_types_allowed=True))
class VllmConfig:
    """Dataclass which contains all vllm-related configuration. This
    simplifies passing around the distinct configurations in the codebase.
    """

    # TODO: use default_factory once default constructing ModelConfig doesn't
    # try to download a model
    model_config: ModelConfig = None  # type: ignore
    """Model configuration."""
    cache_config: CacheConfig = field(default_factory=CacheConfig)
    """Cache configuration."""
    parallel_config: ParallelConfig = field(default_factory=ParallelConfig)
    """Parallel configuration."""
    scheduler_config: SchedulerConfig = field(default_factory=SchedulerConfig)
    """Scheduler configuration."""
    device_config: DeviceConfig = field(default_factory=DeviceConfig)
    """Device configuration."""
    load_config: LoadConfig = field(default_factory=LoadConfig)
    """Load configuration."""
    lora_config: Optional[LoRAConfig] = None
    """LoRA configuration."""
    speculative_config: Optional[SpeculativeConfig] = None
    """Speculative decoding configuration."""
    structured_outputs_config: StructuredOutputsConfig = field(
        default_factory=StructuredOutputsConfig)
    """Structured outputs configuration."""
    observability_config: Optional[ObservabilityConfig] = None
    """Observability configuration."""
    quant_config: Optional[QuantizationConfig] = None
    """Quantization configuration."""
    compilation_config: CompilationConfig = field(
        default_factory=CompilationConfig)
    """`torch.compile` and cudagraph capture configuration for the model.

    As a shorthand, `-O<n>` can be used to directly specify the compilation
    level `n`: `-O3` is equivalent to `-O.level=3` (same as `-O='{"level":3}'`).
    Currently, -O <n> and -O=<n> are supported as well but this will likely be
    removed in favor of clearer -O<n> syntax in the future.

    NOTE: level 0 is the default level without any optimization. level 1 and 2
    are for internal testing only. level 3 is the recommended level for
    production, also default in V1.

    You can specify the full compilation config like so:
    `{"level": 3, "cudagraph_capture_sizes": [1, 2, 4, 8]}`
    """
    kv_transfer_config: Optional[KVTransferConfig] = None
    """The configurations for distributed KV cache transfer."""
    kv_events_config: Optional[KVEventsConfig] = None
    """The configurations for event publishing."""
    # some opaque config, only used to provide additional information
    # for the hash computation, mainly used for testing, debugging or out of
    # tree config registration.
    additional_config: Union[dict, SupportsHash] = field(default_factory=dict)
    """Additional config for specified platform. Different platforms may
    support different configs. Make sure the configs are valid for the platform
    you are using. Contents must be hashable."""
    instance_id: str = ""
    """The ID of the vLLM instance."""

    def compute_hash(self) -> str:
        """
        WARNING: Whenever a new field is added to this config,
        ensure that it is included in the factors list if
        it affects the computation graph.

        Provide a hash that uniquely identifies all the configs
        that affect the structure of the computation
        graph from input ids/embeddings to the final hidden states,
        excluding anything before input ids/embeddings and after
        the final hidden states.
        """
        factors: list[Any] = []

        # summarize vllm config
        vllm_factors: list[Any] = []
        from vllm import __version__
        vllm_factors.append(__version__)
        vllm_factors.append(envs.VLLM_USE_V1)
        if self.model_config:
            vllm_factors.append(self.model_config.compute_hash())
        else:
            vllm_factors.append("None")
        if self.cache_config:
            vllm_factors.append(self.cache_config.compute_hash())
        else:
            vllm_factors.append("None")
        if self.parallel_config:
            vllm_factors.append(self.parallel_config.compute_hash())
        else:
            vllm_factors.append("None")
        if self.scheduler_config:
            vllm_factors.append(self.scheduler_config.compute_hash())
        else:
            vllm_factors.append("None")
        if self.device_config:
            vllm_factors.append(self.device_config.compute_hash())
        else:
            vllm_factors.append("None")
        if self.load_config:
            vllm_factors.append(self.load_config.compute_hash())
        else:
            vllm_factors.append("None")
        if self.lora_config:
            vllm_factors.append(self.lora_config.compute_hash())
            # LoRA creates static buffers based on max_num_batched_tokens.
            # The tensor sizes and strides get captured in the torch.compile
            # graph explicitly.
            vllm_factors.append(
                str(self.scheduler_config.max_num_batched_tokens))
        else:
            vllm_factors.append("None")
        if self.speculative_config:
            vllm_factors.append(self.speculative_config.compute_hash())
        else:
            vllm_factors.append("None")
        if self.structured_outputs_config:
            vllm_factors.append(self.structured_outputs_config.compute_hash())
        else:
            vllm_factors.append("None")
        if self.observability_config:
            vllm_factors.append(self.observability_config.compute_hash())
        else:
            vllm_factors.append("None")
        if self.quant_config:
            pass  # should be captured by model_config.quantization
        if self.compilation_config:
            vllm_factors.append(self.compilation_config.compute_hash())
        else:
            vllm_factors.append("None")
        if self.kv_transfer_config:
            vllm_factors.append(self.kv_transfer_config.compute_hash())
        else:
            vllm_factors.append("None")
        if self.additional_config:
            if isinstance(additional_config := self.additional_config, dict):
                additional_config_hash = hashlib.md5(
                    json.dumps(additional_config, sort_keys=True).encode(),
                    usedforsecurity=False,
                ).hexdigest()
            else:
                additional_config_hash = additional_config.compute_hash()
            vllm_factors.append(additional_config_hash)
        else:
            vllm_factors.append("None")
        factors.append(vllm_factors)

        hash_str = hashlib.md5(str(factors).encode(),
                               usedforsecurity=False).hexdigest()[:10]
        return hash_str

    def pad_for_cudagraph(self, batch_size: int) -> int:
        # if batch_size > self.compilation_config.max_capture_size,
        # it should raise an IndexError.
        # the caller should make sure the batch_size is within the range,
        # i.e., batch_size <= self.compilation_config.max_capture_size
        return self.compilation_config.bs_to_padded_graph_size[batch_size]

    @staticmethod
    def _get_quantization_config(
            model_config: ModelConfig,
            load_config: LoadConfig) -> Optional[QuantizationConfig]:
        """Get the quantization config."""
        from vllm.platforms import current_platform
        if model_config.quantization is not None:
            from vllm.model_executor.model_loader.weight_utils import (
                get_quant_config)
            quant_config = get_quant_config(model_config, load_config)
            capability_tuple = current_platform.get_device_capability()

            if capability_tuple is not None:
                capability = capability_tuple.to_int()
                if capability < quant_config.get_min_capability():
                    raise ValueError(
                        f"The quantization method {model_config.quantization} "
                        "is not supported for the current GPU. Minimum "
                        f"capability: {quant_config.get_min_capability()}. "
                        f"Current capability: {capability}.")
            supported_dtypes = quant_config.get_supported_act_dtypes()
            if model_config.dtype not in supported_dtypes:
                raise ValueError(
                    f"{model_config.dtype} is not supported for quantization "
                    f"method {model_config.quantization}. Supported dtypes: "
                    f"{supported_dtypes}")
            quant_config.maybe_update_config(model_config.model)
            return quant_config
        return None

    @staticmethod
    def get_quantization_config(
            model_config: ModelConfig,
            load_config: LoadConfig) -> Optional[QuantizationConfig]:
        import copy

        # For some reason, the _ version of this modifies the model_config
        # object, so using deepcopy to avoid this problem.
        return VllmConfig._get_quantization_config(copy.deepcopy(model_config),
                                                   load_config)

    def with_hf_config(
        self,
        hf_config: PretrainedConfig,
        architectures: Optional[list[str]] = None,
    ) -> "VllmConfig":
        if architectures is not None:
            hf_config = copy.deepcopy(hf_config)
            hf_config.architectures = architectures

        model_config = copy.deepcopy(self.model_config)
        model_config.hf_config = hf_config

        return replace(self, model_config=model_config)

    def __post_init__(self):
        """Verify configs are valid & consistent with each other.
        """

        self.try_verify_and_update_config()

        if self.model_config is not None:
            self.model_config.verify_with_parallel_config(self.parallel_config)
            self.model_config.verify_dual_chunk_attention_config(
                self.load_config)

        self.cache_config.verify_with_parallel_config(self.parallel_config)

        if self.lora_config is not None:
            self.lora_config.verify_with_cache_config(self.cache_config)
            self.lora_config.verify_with_model_config(self.model_config)

        if self.quant_config is None and self.model_config is not None:
            self.quant_config = VllmConfig._get_quantization_config(
                self.model_config, self.load_config)

        from vllm.platforms import current_platform
        if self.model_config is not None and \
            self.scheduler_config.chunked_prefill_enabled and \
            self.model_config.dtype == torch.float32 and \
            current_platform.get_device_capability() == (7, 5):
            logger.warning_once(
                "Turing devices tensor cores do not support float32 matmul. "
                "To workaround this limitation, vLLM will set 'ieee' input "
                "precision for chunked prefill triton kernels.")

        # If the user does not explicitly set a compilation level, then
        # we use the default level. The default level depends on other
        # settings (see the below code).
        if self.compilation_config.level is None:
            if envs.VLLM_USE_V1:
                if (self.model_config is not None
                        and not self.model_config.enforce_eager):
                    self.compilation_config.level = CompilationLevel.PIECEWISE
                else:
                    self.compilation_config.level = \
                            CompilationLevel.NO_COMPILATION

            else:
                # NB: Passing both --enforce-eager and a compilation level
                # in V0 means the compilation level wins out.
                self.compilation_config.level = CompilationLevel.NO_COMPILATION

        # async tp is built on top of sequence parallelism
        # and requires it to be enabled.
        if self.compilation_config.pass_config.enable_async_tp:
            self.compilation_config.pass_config.enable_sequence_parallelism = \
                True
        if self.compilation_config.pass_config.enable_sequence_parallelism:
            self.compilation_config.custom_ops.append("+rms_norm")

        if current_platform.support_static_graph_mode():
            # if cudagraph_mode is not explicitly set by users, set default
            # value
            if self.compilation_config.cudagraph_mode is None:
                if envs.VLLM_USE_V1 and self.compilation_config.level \
                    == CompilationLevel.PIECEWISE:
                    # default to full and piecewise for most models
                    self.compilation_config.cudagraph_mode = \
                        CUDAGraphMode.FULL_AND_PIECEWISE

                    # pooling models and encoder-decoder models
                    # do not support full cudagraphs
                    if self.model_config is not None and \
                        (self.model_config.pooler_config is not None
                         or self.model_config.is_encoder_decoder):
                        self.compilation_config.cudagraph_mode = \
                            CUDAGraphMode.PIECEWISE
                else:
                    self.compilation_config.cudagraph_mode = CUDAGraphMode.NONE

            # disable cudagraph when enforce eager execution
            if self.model_config is not None and \
                    self.model_config.enforce_eager:
                logger.info("Cudagraph is disabled under eager mode")
                self.compilation_config.cudagraph_mode = CUDAGraphMode.NONE
            elif envs.VLLM_USE_V1:
                self.compilation_config.cudagraph_num_of_warmups = 1

            self._set_cudagraph_sizes()
        else:
            self.compilation_config.cudagraph_mode = CUDAGraphMode.NONE

        if self.cache_config.kv_sharing_fast_prefill:

            if self.speculative_config is not None and \
                self.speculative_config.use_eagle():
                raise NotImplementedError(
                    "Fast prefill optimization for KV sharing is not "
                    "compatible with EAGLE as EAGLE requires correct logits "
                    "for all tokens while fast prefill gives incorrect logits "
                    "for prompt tokens.")

            logger.warning_once(
                "--kv-sharing-fast-prefill requires changes on model side for "
                "correctness and to realize prefill savings. ")

        disable_chunked_prefill_reasons: list[str] = []

        if self.model_config:
            if self.model_config.pooler_config:
                pooling_type = self.model_config.pooler_config.pooling_type
                if pooling_type is None or pooling_type.lower() != "last":
                    disable_chunked_prefill_reasons.append(
                        "Only \"last\" pooling supports chunked "
                        "prefill and prefix caching; disabling both.")
                if not getattr(self.model_config.hf_config, "is_causal", True):
                    disable_chunked_prefill_reasons.append(
                        "Only models using causal attention supports chunked "
                        "prefill and prefix caching; disabling both.")
            elif self.model_config.is_encoder_decoder:
                from vllm.multimodal import MULTIMODAL_REGISTRY
                self.scheduler_config.max_num_encoder_input_tokens = \
                    MULTIMODAL_REGISTRY.get_encdec_max_encoder_len(self.model_config)
                logger.debug(
                    "Encoder-decoder model detected: setting "
                    "`max_num_encoder_input_tokens` to encoder length (%s)",
                    self.scheduler_config.max_num_encoder_input_tokens)
                if (self.model_config.architecture
                        == "WhisperForConditionalGeneration"
                        and os.environ.get("VLLM_WORKER_MULTIPROC_METHOD")
                        != "spawn"):
                    logger.warning(
                        "Whisper is known to have issues with "
                        "forked workers. If startup is hanging, "
                        "try setting 'VLLM_WORKER_MULTIPROC_METHOD' "
                        "to 'spawn'.")

        # Disable prefix caching only if chunked prefill is explicitly disabled
        # (and not merely unset)
        if (self.scheduler_config.chunked_prefill_enabled is False
                or disable_chunked_prefill_reasons):
            for reason in disable_chunked_prefill_reasons:
                logger.info(reason)
            self.scheduler_config.chunked_prefill_enabled = False
            self.scheduler_config.long_prefill_token_threshold = 0

            if self.cache_config is not None:
                self.cache_config.enable_prefix_caching = False

        if (self.kv_events_config is not None
                and self.kv_events_config.enable_kv_cache_events
                and not self.cache_config.enable_prefix_caching):
            logger.warning(
                "KV cache events are on, but prefix caching is not enabled."
                "Use --enable-prefix-caching to enable.")
        if (self.kv_events_config is not None
                and self.kv_events_config.publisher != "null"
                and not self.kv_events_config.enable_kv_cache_events):
            logger.warning("KV cache events are disabled,"
                           "but the scheduler is configured to publish them."
                           "Modify KVEventsConfig.enable_kv_cache_events"
                           "to True to enable.")
        current_platform.check_and_update_config(self)

        # Do this after all the updates to compilation_config.level
        if envs.VLLM_USE_V1 and \
            self.compilation_config.level == CompilationLevel.PIECEWISE:
            self.compilation_config.set_splitting_ops_for_v1()

        # final check of cudagraph mode after all possible updates
        if envs.VLLM_USE_V1 and current_platform.is_cuda_alike():
            if self.compilation_config.cudagraph_mode.has_full_cudagraphs()\
                and self.model_config is not None and \
                not self.model_config.disable_cascade_attn and\
                not self.compilation_config.cudagraph_mode.\
                has_piecewise_cudagraphs():
                logger.warning_once(
                    "No piecewise cudagraph for executing cascade attention."
                    " Will fall back to eager execution if a batch runs "
                    "into cascade attentions")

            if self.compilation_config.cudagraph_mode\
                .requires_piecewise_compilation():
                assert self.compilation_config.level == \
                    CompilationLevel.PIECEWISE, \
                    "Compilation level should be CompilationLevel.PIECEWISE "\
                    "when cudagraph_mode piecewise cudagraphs is used, "\
                    f"cudagraph_mode={self.compilation_config.cudagraph_mode}"

            # final migrate the deprecated flags
            self.compilation_config.use_cudagraph = self.compilation_config.\
                cudagraph_mode!= CUDAGraphMode.NONE
            self.compilation_config.full_cuda_graph = self.compilation_config.\
                cudagraph_mode.has_full_cudagraphs()

        if self.parallel_config.enable_dbo:
            a2a_backend = envs.VLLM_ALL2ALL_BACKEND
            assert a2a_backend in \
                ["deepep_low_latency", "deepep_high_throughput"], \
            "Microbatching currently only supports the deepep_low_latency and "\
            f"deepep_high_throughput all2all backend. {a2a_backend} is not "\
            "supported. To fix set the VLLM_ALL2ALL_BACKEND environment "\
            "variable to deepep_low_latency or deepep_high_throughput and "\
            "install the DeepEP kernels."

            if not self.model_config.disable_cascade_attn:
                self.model_config.disable_cascade_attn = True
                logger.warning_once(
                    "Disabling cascade attention when DBO is enabled.")

        if not self.instance_id:
            self.instance_id = random_uuid()[:5]

        if (envs.VLLM_USE_V1
                and not self.scheduler_config.disable_hybrid_kv_cache_manager):
            # logger should only print warning message for hybrid models. As we
            # can't know whether the model is hybrid or not now, so we don't log
            # warning message here and will log it later.
            if not current_platform.support_hybrid_kv_cache():
                # Hybrid KV cache manager is not supported on non-GPU platforms.
                self.scheduler_config.disable_hybrid_kv_cache_manager = True
            if self.kv_transfer_config is not None:
                # Hybrid KV cache manager is not compatible with KV transfer.
                self.scheduler_config.disable_hybrid_kv_cache_manager = True
            if self.kv_events_config is not None:
                # Hybrid KV cache manager is not compatible with KV events.
                self.scheduler_config.disable_hybrid_kv_cache_manager = True
            if self.model_config is not None and \
                self.model_config.attention_chunk_size is not None:
                if self.speculative_config is not None and \
                    self.speculative_config.use_eagle():
                    # Hybrid KV cache manager is not yet supported with chunked
                    # local attention + eagle.
                    self.scheduler_config.disable_hybrid_kv_cache_manager = True
                elif \
                    not envs.VLLM_ALLOW_CHUNKED_LOCAL_ATTN_WITH_HYBRID_KV_CACHE:
                    logger.warning(
                        "There is a latency regression when using chunked local"
                        " attention with the hybrid KV cache manager. Disabling"
                        " it, by default. To enable it, set the environment "
                        "VLLM_ALLOW_CHUNKED_LOCAL_ATTN_WITH_HYBRID_KV_CACHE=1."
                    )
                    # Hybrid KV cache manager is not yet supported with chunked
                    # local attention.
                    self.scheduler_config.disable_hybrid_kv_cache_manager = True

        if self.compilation_config.debug_dump_path:
            self.compilation_config.debug_dump_path = \
                self.compilation_config.debug_dump_path.absolute().expanduser()
        if envs.VLLM_DEBUG_DUMP_PATH is not None:
            env_path = Path(envs.VLLM_DEBUG_DUMP_PATH).absolute().expanduser()
            if self.compilation_config.debug_dump_path:
                logger.warning(
                    "Config-specified debug dump path is overridden"
                    " by VLLM_DEBUG_DUMP_PATH to %s", env_path)
            self.compilation_config.debug_dump_path = env_path

    def update_sizes_for_sequence_parallelism(self,
                                              possible_sizes: list) -> list:
        # remove the sizes that not multiple of tp_size when
        # enable sequence parallelism
        removed_sizes = [
            size for size in possible_sizes
            if size % self.parallel_config.tensor_parallel_size != 0
        ]
        if removed_sizes:
            logger.warning(
                "Batch sizes %s are removed because they are not "
                "multiple of tp_size %d when "
                "sequence parallelism is enabled", removed_sizes,
                self.parallel_config.tensor_parallel_size)

        return [
            size for size in possible_sizes
            if size % self.parallel_config.tensor_parallel_size == 0
        ]

    def _set_cudagraph_sizes(self):
        """
        vLLM defines the default candidate list of batch sizes for CUDA graph
        capture as:

        ```python
        max_graph_size = min(max_num_seqs * 2, 512)
        # 1, 2, 4, then multiples of 8 up to max_graph_size
        cuda_graph_sizes = [1, 2, 4, 8, 16, 24, 32, 40, ..., max_graph_size]

        In the end, `vllm_config.compilation_config.cudagraph_capture_sizes`
        will be the final sizes to capture cudagraph (in descending order).

        These sizes are used to capture and reuse CUDA graphs for
        performance-critical paths (e.g., decoding). Capturing enables
        significantly faster kernel dispatch by avoiding Python overhead. The
        list is then filtered based on `max_num_batched_tokens` (e.g., 8192 on
        most GPUs), which controls the total allowed number of tokens in a
        batch. Since each sequence may have a variable number of tokens, the
        maximum usable batch size will depend on actual sequence lengths.

        Example:
            With `max_num_batched_tokens = 8192`, and typical sequences
            averaging ~32 tokens, most practical batch sizes fall below 256.
            However, the system will still allow capture sizes up to 512 if
            shape and memory permit.

        Note:
            If users explicitly specify cudagraph capture sizes in the
            compilation config, those will override this default logic.
            At runtime:

            - If batch size <= one of the `cudagraph_capture_sizes`, the closest
            padded CUDA graph will be used.
            - If batch size > largest `cudagraph_capture_sizes`, cudagraph will
            not be used.
        """

        # calculate the default `batch_size_capture_list`
        batch_size_capture_list = []
        if self.model_config is not None and \
            not self.model_config.enforce_eager:
            cuda_graph_sizes = self.scheduler_config.cuda_graph_sizes
            if len(cuda_graph_sizes) == 1:
                batch_size_capture_list = [1, 2, 4] + [
                    i for i in range(8, cuda_graph_sizes[0] + 1, 8)
                ]
            elif len(cuda_graph_sizes) > 1:
                batch_size_capture_list = sorted(cuda_graph_sizes)
            else:
                raise TypeError(f"Invalid value for {cuda_graph_sizes=}.")
            if self.parallel_config.tensor_parallel_size > 1 and \
                self.compilation_config.pass_config.enable_sequence_parallelism:
                batch_size_capture_list = \
                    self.update_sizes_for_sequence_parallelism(batch_size_capture_list)
            max_num_tokens = self.scheduler_config.max_num_batched_tokens
            batch_size_capture_list = [
                size for size in batch_size_capture_list
                if size <= max_num_tokens
            ]

        self.compilation_config.init_with_cudagraph_sizes(
            batch_size_capture_list)

    def recalculate_max_model_len(self, max_model_len: int):
        # Can only be called in try_verify_and_update_config
        model_config = self.model_config
        max_model_len = model_config.get_and_verify_max_len(max_model_len)
        self.model_config.max_model_len = max_model_len
        self.scheduler_config.max_model_len = max_model_len

    def try_verify_and_update_config(self):
        if self.model_config is None:
            return

        # Avoid running try_verify_and_update_config multiple times
        if getattr(self.model_config, "config_updated", False):
            return
        self.model_config.config_updated = True

        architecture = self.model_config.architecture
        if architecture is None:
            return

        from vllm.model_executor.models.config import (
            MODELS_CONFIG_MAP, HybridAttentionMambaModelConfig)
        cls = MODELS_CONFIG_MAP.get(architecture, None)
        if cls is not None:
            cls.verify_and_update_config(self)

        if self.model_config.is_hybrid:
            HybridAttentionMambaModelConfig.verify_and_update_config(self)

        if self.model_config.convert_type == "classify":
            # Maybe convert ForCausalLM into ForSequenceClassification model.
            from vllm.model_executor.models.adapters import (
                SequenceClassificationConfig)
            SequenceClassificationConfig.verify_and_update_config(self)

        if hasattr(self.model_config, "model_weights") and is_runai_obj_uri(
                self.model_config.model_weights):
            if self.load_config.load_format == "auto":
                logger.info("Detected Run:ai model config. "
                            "Overriding `load_format` to 'runai_streamer'")
                self.load_config.load_format = "runai_streamer"
            elif self.load_config.load_format != "runai_streamer":
                raise ValueError(f"To load a model from S3, 'load_format' "
                                 f"must be 'runai_streamer', "
                                 f"but got '{self.load_config.load_format}'. "
                                 f"Model: {self.model_config.model}")

    def compile_debug_dump_path(self) -> Optional[Path]:
        """Returns a rank-aware path for dumping 
        torch.compile debug information.
        """
        if self.compilation_config.debug_dump_path is None:
            return None
        tp_rank = self.parallel_config.rank
        dp_rank = self.parallel_config.data_parallel_rank
        data_parallel_size = self.parallel_config.data_parallel_size
        append_path = f"rank_{tp_rank}" if data_parallel_size == 1 \
            else f"rank_{tp_rank}_dp_{dp_rank}"
        path = self.compilation_config.debug_dump_path / append_path
        return path

    def __str__(self):
        return (
            f"model={self.model_config.model!r}, "
            f"speculative_config={self.speculative_config!r}, "
            f"tokenizer={self.model_config.tokenizer!r}, "
            f"skip_tokenizer_init={self.model_config.skip_tokenizer_init}, "
            f"tokenizer_mode={self.model_config.tokenizer_mode}, "
            f"revision={self.model_config.revision}, "
            f"tokenizer_revision={self.model_config.tokenizer_revision}, "
            f"trust_remote_code={self.model_config.trust_remote_code}, "
            f"dtype={self.model_config.dtype}, "
            f"max_seq_len={self.model_config.max_model_len}, "
            f"download_dir={self.load_config.download_dir!r}, "
            f"load_format={self.load_config.load_format}, "
            f"tensor_parallel_size={self.parallel_config.tensor_parallel_size}, "  # noqa
            f"pipeline_parallel_size={self.parallel_config.pipeline_parallel_size}, "  # noqa
            f"data_parallel_size={self.parallel_config.data_parallel_size}, "  # noqa
            f"disable_custom_all_reduce={self.parallel_config.disable_custom_all_reduce}, "  # noqa
            f"quantization={self.model_config.quantization}, "
            f"enforce_eager={self.model_config.enforce_eager}, "
            f"kv_cache_dtype={self.cache_config.cache_dtype}, "
            f"device_config={self.device_config.device}, "
            f"structured_outputs_config={self.structured_outputs_config!r}, "
            f"observability_config={self.observability_config!r}, "
            f"seed={self.model_config.seed}, "
            f"served_model_name={self.model_config.served_model_name}, "
            f"enable_prefix_caching={self.cache_config.enable_prefix_caching}, "
            f"chunked_prefill_enabled={self.scheduler_config.chunked_prefill_enabled}, "  # noqa
            f"pooler_config={self.model_config.pooler_config!r}, "
            f"compilation_config={self.compilation_config!r}")

additional_config class-attribute instance-attribute

additional_config: Union[dict, SupportsHash] = field(
    default_factory=dict
)

Additional config for specified platform. Different platforms may support different configs. Make sure the configs are valid for the platform you are using. Contents must be hashable.

cache_config class-attribute instance-attribute

cache_config: CacheConfig = field(
    default_factory=CacheConfig
)

Cache configuration.

compilation_config class-attribute instance-attribute

compilation_config: CompilationConfig = field(
    default_factory=CompilationConfig
)

torch.compile and cudagraph capture configuration for the model.

As a shorthand, -O<n> can be used to directly specify the compilation level n: -O3 is equivalent to -O.level=3 (same as -O='{"level":3}'). Currently, -O and -O= are supported as well but this will likely be removed in favor of clearer -O syntax in the future.

NOTE: level 0 is the default level without any optimization. level 1 and 2 are for internal testing only. level 3 is the recommended level for production, also default in V1.

You can specify the full compilation config like so: {"level": 3, "cudagraph_capture_sizes": [1, 2, 4, 8]}

device_config class-attribute instance-attribute

device_config: DeviceConfig = field(
    default_factory=DeviceConfig
)

Device configuration.

instance_id class-attribute instance-attribute

instance_id: str = ''

The ID of the vLLM instance.

kv_events_config class-attribute instance-attribute

kv_events_config: Optional[KVEventsConfig] = None

The configurations for event publishing.

kv_transfer_config class-attribute instance-attribute

kv_transfer_config: Optional[KVTransferConfig] = None

The configurations for distributed KV cache transfer.

load_config class-attribute instance-attribute

load_config: LoadConfig = field(default_factory=LoadConfig)

Load configuration.

lora_config class-attribute instance-attribute

lora_config: Optional[LoRAConfig] = None

LoRA configuration.

model_config class-attribute instance-attribute

model_config: ModelConfig = None

Model configuration.

observability_config class-attribute instance-attribute

observability_config: Optional[ObservabilityConfig] = None

Observability configuration.

parallel_config class-attribute instance-attribute

parallel_config: ParallelConfig = field(
    default_factory=ParallelConfig
)

Parallel configuration.

quant_config class-attribute instance-attribute

quant_config: Optional[QuantizationConfig] = None

Quantization configuration.

scheduler_config class-attribute instance-attribute

scheduler_config: SchedulerConfig = field(
    default_factory=SchedulerConfig
)

Scheduler configuration.

speculative_config class-attribute instance-attribute

speculative_config: Optional[SpeculativeConfig] = None

Speculative decoding configuration.

structured_outputs_config class-attribute instance-attribute

structured_outputs_config: StructuredOutputsConfig = field(
    default_factory=StructuredOutputsConfig
)

Structured outputs configuration.

__post_init__

__post_init__()

Verify configs are valid & consistent with each other.

Source code in vllm/config/vllm.py
def __post_init__(self):
    """Verify configs are valid & consistent with each other.
    """

    self.try_verify_and_update_config()

    if self.model_config is not None:
        self.model_config.verify_with_parallel_config(self.parallel_config)
        self.model_config.verify_dual_chunk_attention_config(
            self.load_config)

    self.cache_config.verify_with_parallel_config(self.parallel_config)

    if self.lora_config is not None:
        self.lora_config.verify_with_cache_config(self.cache_config)
        self.lora_config.verify_with_model_config(self.model_config)

    if self.quant_config is None and self.model_config is not None:
        self.quant_config = VllmConfig._get_quantization_config(
            self.model_config, self.load_config)

    from vllm.platforms import current_platform
    if self.model_config is not None and \
        self.scheduler_config.chunked_prefill_enabled and \
        self.model_config.dtype == torch.float32 and \
        current_platform.get_device_capability() == (7, 5):
        logger.warning_once(
            "Turing devices tensor cores do not support float32 matmul. "
            "To workaround this limitation, vLLM will set 'ieee' input "
            "precision for chunked prefill triton kernels.")

    # If the user does not explicitly set a compilation level, then
    # we use the default level. The default level depends on other
    # settings (see the below code).
    if self.compilation_config.level is None:
        if envs.VLLM_USE_V1:
            if (self.model_config is not None
                    and not self.model_config.enforce_eager):
                self.compilation_config.level = CompilationLevel.PIECEWISE
            else:
                self.compilation_config.level = \
                        CompilationLevel.NO_COMPILATION

        else:
            # NB: Passing both --enforce-eager and a compilation level
            # in V0 means the compilation level wins out.
            self.compilation_config.level = CompilationLevel.NO_COMPILATION

    # async tp is built on top of sequence parallelism
    # and requires it to be enabled.
    if self.compilation_config.pass_config.enable_async_tp:
        self.compilation_config.pass_config.enable_sequence_parallelism = \
            True
    if self.compilation_config.pass_config.enable_sequence_parallelism:
        self.compilation_config.custom_ops.append("+rms_norm")

    if current_platform.support_static_graph_mode():
        # if cudagraph_mode is not explicitly set by users, set default
        # value
        if self.compilation_config.cudagraph_mode is None:
            if envs.VLLM_USE_V1 and self.compilation_config.level \
                == CompilationLevel.PIECEWISE:
                # default to full and piecewise for most models
                self.compilation_config.cudagraph_mode = \
                    CUDAGraphMode.FULL_AND_PIECEWISE

                # pooling models and encoder-decoder models
                # do not support full cudagraphs
                if self.model_config is not None and \
                    (self.model_config.pooler_config is not None
                     or self.model_config.is_encoder_decoder):
                    self.compilation_config.cudagraph_mode = \
                        CUDAGraphMode.PIECEWISE
            else:
                self.compilation_config.cudagraph_mode = CUDAGraphMode.NONE

        # disable cudagraph when enforce eager execution
        if self.model_config is not None and \
                self.model_config.enforce_eager:
            logger.info("Cudagraph is disabled under eager mode")
            self.compilation_config.cudagraph_mode = CUDAGraphMode.NONE
        elif envs.VLLM_USE_V1:
            self.compilation_config.cudagraph_num_of_warmups = 1

        self._set_cudagraph_sizes()
    else:
        self.compilation_config.cudagraph_mode = CUDAGraphMode.NONE

    if self.cache_config.kv_sharing_fast_prefill:

        if self.speculative_config is not None and \
            self.speculative_config.use_eagle():
            raise NotImplementedError(
                "Fast prefill optimization for KV sharing is not "
                "compatible with EAGLE as EAGLE requires correct logits "
                "for all tokens while fast prefill gives incorrect logits "
                "for prompt tokens.")

        logger.warning_once(
            "--kv-sharing-fast-prefill requires changes on model side for "
            "correctness and to realize prefill savings. ")

    disable_chunked_prefill_reasons: list[str] = []

    if self.model_config:
        if self.model_config.pooler_config:
            pooling_type = self.model_config.pooler_config.pooling_type
            if pooling_type is None or pooling_type.lower() != "last":
                disable_chunked_prefill_reasons.append(
                    "Only \"last\" pooling supports chunked "
                    "prefill and prefix caching; disabling both.")
            if not getattr(self.model_config.hf_config, "is_causal", True):
                disable_chunked_prefill_reasons.append(
                    "Only models using causal attention supports chunked "
                    "prefill and prefix caching; disabling both.")
        elif self.model_config.is_encoder_decoder:
            from vllm.multimodal import MULTIMODAL_REGISTRY
            self.scheduler_config.max_num_encoder_input_tokens = \
                MULTIMODAL_REGISTRY.get_encdec_max_encoder_len(self.model_config)
            logger.debug(
                "Encoder-decoder model detected: setting "
                "`max_num_encoder_input_tokens` to encoder length (%s)",
                self.scheduler_config.max_num_encoder_input_tokens)
            if (self.model_config.architecture
                    == "WhisperForConditionalGeneration"
                    and os.environ.get("VLLM_WORKER_MULTIPROC_METHOD")
                    != "spawn"):
                logger.warning(
                    "Whisper is known to have issues with "
                    "forked workers. If startup is hanging, "
                    "try setting 'VLLM_WORKER_MULTIPROC_METHOD' "
                    "to 'spawn'.")

    # Disable prefix caching only if chunked prefill is explicitly disabled
    # (and not merely unset)
    if (self.scheduler_config.chunked_prefill_enabled is False
            or disable_chunked_prefill_reasons):
        for reason in disable_chunked_prefill_reasons:
            logger.info(reason)
        self.scheduler_config.chunked_prefill_enabled = False
        self.scheduler_config.long_prefill_token_threshold = 0

        if self.cache_config is not None:
            self.cache_config.enable_prefix_caching = False

    if (self.kv_events_config is not None
            and self.kv_events_config.enable_kv_cache_events
            and not self.cache_config.enable_prefix_caching):
        logger.warning(
            "KV cache events are on, but prefix caching is not enabled."
            "Use --enable-prefix-caching to enable.")
    if (self.kv_events_config is not None
            and self.kv_events_config.publisher != "null"
            and not self.kv_events_config.enable_kv_cache_events):
        logger.warning("KV cache events are disabled,"
                       "but the scheduler is configured to publish them."
                       "Modify KVEventsConfig.enable_kv_cache_events"
                       "to True to enable.")
    current_platform.check_and_update_config(self)

    # Do this after all the updates to compilation_config.level
    if envs.VLLM_USE_V1 and \
        self.compilation_config.level == CompilationLevel.PIECEWISE:
        self.compilation_config.set_splitting_ops_for_v1()

    # final check of cudagraph mode after all possible updates
    if envs.VLLM_USE_V1 and current_platform.is_cuda_alike():
        if self.compilation_config.cudagraph_mode.has_full_cudagraphs()\
            and self.model_config is not None and \
            not self.model_config.disable_cascade_attn and\
            not self.compilation_config.cudagraph_mode.\
            has_piecewise_cudagraphs():
            logger.warning_once(
                "No piecewise cudagraph for executing cascade attention."
                " Will fall back to eager execution if a batch runs "
                "into cascade attentions")

        if self.compilation_config.cudagraph_mode\
            .requires_piecewise_compilation():
            assert self.compilation_config.level == \
                CompilationLevel.PIECEWISE, \
                "Compilation level should be CompilationLevel.PIECEWISE "\
                "when cudagraph_mode piecewise cudagraphs is used, "\
                f"cudagraph_mode={self.compilation_config.cudagraph_mode}"

        # final migrate the deprecated flags
        self.compilation_config.use_cudagraph = self.compilation_config.\
            cudagraph_mode!= CUDAGraphMode.NONE
        self.compilation_config.full_cuda_graph = self.compilation_config.\
            cudagraph_mode.has_full_cudagraphs()

    if self.parallel_config.enable_dbo:
        a2a_backend = envs.VLLM_ALL2ALL_BACKEND
        assert a2a_backend in \
            ["deepep_low_latency", "deepep_high_throughput"], \
        "Microbatching currently only supports the deepep_low_latency and "\
        f"deepep_high_throughput all2all backend. {a2a_backend} is not "\
        "supported. To fix set the VLLM_ALL2ALL_BACKEND environment "\
        "variable to deepep_low_latency or deepep_high_throughput and "\
        "install the DeepEP kernels."

        if not self.model_config.disable_cascade_attn:
            self.model_config.disable_cascade_attn = True
            logger.warning_once(
                "Disabling cascade attention when DBO is enabled.")

    if not self.instance_id:
        self.instance_id = random_uuid()[:5]

    if (envs.VLLM_USE_V1
            and not self.scheduler_config.disable_hybrid_kv_cache_manager):
        # logger should only print warning message for hybrid models. As we
        # can't know whether the model is hybrid or not now, so we don't log
        # warning message here and will log it later.
        if not current_platform.support_hybrid_kv_cache():
            # Hybrid KV cache manager is not supported on non-GPU platforms.
            self.scheduler_config.disable_hybrid_kv_cache_manager = True
        if self.kv_transfer_config is not None:
            # Hybrid KV cache manager is not compatible with KV transfer.
            self.scheduler_config.disable_hybrid_kv_cache_manager = True
        if self.kv_events_config is not None:
            # Hybrid KV cache manager is not compatible with KV events.
            self.scheduler_config.disable_hybrid_kv_cache_manager = True
        if self.model_config is not None and \
            self.model_config.attention_chunk_size is not None:
            if self.speculative_config is not None and \
                self.speculative_config.use_eagle():
                # Hybrid KV cache manager is not yet supported with chunked
                # local attention + eagle.
                self.scheduler_config.disable_hybrid_kv_cache_manager = True
            elif \
                not envs.VLLM_ALLOW_CHUNKED_LOCAL_ATTN_WITH_HYBRID_KV_CACHE:
                logger.warning(
                    "There is a latency regression when using chunked local"
                    " attention with the hybrid KV cache manager. Disabling"
                    " it, by default. To enable it, set the environment "
                    "VLLM_ALLOW_CHUNKED_LOCAL_ATTN_WITH_HYBRID_KV_CACHE=1."
                )
                # Hybrid KV cache manager is not yet supported with chunked
                # local attention.
                self.scheduler_config.disable_hybrid_kv_cache_manager = True

    if self.compilation_config.debug_dump_path:
        self.compilation_config.debug_dump_path = \
            self.compilation_config.debug_dump_path.absolute().expanduser()
    if envs.VLLM_DEBUG_DUMP_PATH is not None:
        env_path = Path(envs.VLLM_DEBUG_DUMP_PATH).absolute().expanduser()
        if self.compilation_config.debug_dump_path:
            logger.warning(
                "Config-specified debug dump path is overridden"
                " by VLLM_DEBUG_DUMP_PATH to %s", env_path)
        self.compilation_config.debug_dump_path = env_path

__str__

__str__()
Source code in vllm/config/vllm.py
def __str__(self):
    return (
        f"model={self.model_config.model!r}, "
        f"speculative_config={self.speculative_config!r}, "
        f"tokenizer={self.model_config.tokenizer!r}, "
        f"skip_tokenizer_init={self.model_config.skip_tokenizer_init}, "
        f"tokenizer_mode={self.model_config.tokenizer_mode}, "
        f"revision={self.model_config.revision}, "
        f"tokenizer_revision={self.model_config.tokenizer_revision}, "
        f"trust_remote_code={self.model_config.trust_remote_code}, "
        f"dtype={self.model_config.dtype}, "
        f"max_seq_len={self.model_config.max_model_len}, "
        f"download_dir={self.load_config.download_dir!r}, "
        f"load_format={self.load_config.load_format}, "
        f"tensor_parallel_size={self.parallel_config.tensor_parallel_size}, "  # noqa
        f"pipeline_parallel_size={self.parallel_config.pipeline_parallel_size}, "  # noqa
        f"data_parallel_size={self.parallel_config.data_parallel_size}, "  # noqa
        f"disable_custom_all_reduce={self.parallel_config.disable_custom_all_reduce}, "  # noqa
        f"quantization={self.model_config.quantization}, "
        f"enforce_eager={self.model_config.enforce_eager}, "
        f"kv_cache_dtype={self.cache_config.cache_dtype}, "
        f"device_config={self.device_config.device}, "
        f"structured_outputs_config={self.structured_outputs_config!r}, "
        f"observability_config={self.observability_config!r}, "
        f"seed={self.model_config.seed}, "
        f"served_model_name={self.model_config.served_model_name}, "
        f"enable_prefix_caching={self.cache_config.enable_prefix_caching}, "
        f"chunked_prefill_enabled={self.scheduler_config.chunked_prefill_enabled}, "  # noqa
        f"pooler_config={self.model_config.pooler_config!r}, "
        f"compilation_config={self.compilation_config!r}")

_get_quantization_config staticmethod

_get_quantization_config(
    model_config: ModelConfig, load_config: LoadConfig
) -> Optional[QuantizationConfig]

Get the quantization config.

Source code in vllm/config/vllm.py
@staticmethod
def _get_quantization_config(
        model_config: ModelConfig,
        load_config: LoadConfig) -> Optional[QuantizationConfig]:
    """Get the quantization config."""
    from vllm.platforms import current_platform
    if model_config.quantization is not None:
        from vllm.model_executor.model_loader.weight_utils import (
            get_quant_config)
        quant_config = get_quant_config(model_config, load_config)
        capability_tuple = current_platform.get_device_capability()

        if capability_tuple is not None:
            capability = capability_tuple.to_int()
            if capability < quant_config.get_min_capability():
                raise ValueError(
                    f"The quantization method {model_config.quantization} "
                    "is not supported for the current GPU. Minimum "
                    f"capability: {quant_config.get_min_capability()}. "
                    f"Current capability: {capability}.")
        supported_dtypes = quant_config.get_supported_act_dtypes()
        if model_config.dtype not in supported_dtypes:
            raise ValueError(
                f"{model_config.dtype} is not supported for quantization "
                f"method {model_config.quantization}. Supported dtypes: "
                f"{supported_dtypes}")
        quant_config.maybe_update_config(model_config.model)
        return quant_config
    return None

_set_cudagraph_sizes

_set_cudagraph_sizes()

vLLM defines the default candidate list of batch sizes for CUDA graph capture as:

```python max_graph_size = min(max_num_seqs * 2, 512)

1, 2, 4, then multiples of 8 up to max_graph_size

cuda_graph_sizes = [1, 2, 4, 8, 16, 24, 32, 40, ..., max_graph_size]

In the end, vllm_config.compilation_config.cudagraph_capture_sizes will be the final sizes to capture cudagraph (in descending order).

These sizes are used to capture and reuse CUDA graphs for performance-critical paths (e.g., decoding). Capturing enables significantly faster kernel dispatch by avoiding Python overhead. The list is then filtered based on max_num_batched_tokens (e.g., 8192 on most GPUs), which controls the total allowed number of tokens in a batch. Since each sequence may have a variable number of tokens, the maximum usable batch size will depend on actual sequence lengths.

Example: With max_num_batched_tokens = 8192, and typical sequences averaging ~32 tokens, most practical batch sizes fall below 256. However, the system will still allow capture sizes up to 512 if shape and memory permit.

Note: If users explicitly specify cudagraph capture sizes in the compilation config, those will override this default logic. At runtime:

- If batch size <= one of the `cudagraph_capture_sizes`, the closest
padded CUDA graph will be used.
- If batch size > largest `cudagraph_capture_sizes`, cudagraph will
not be used.
Source code in vllm/config/vllm.py
def _set_cudagraph_sizes(self):
    """
    vLLM defines the default candidate list of batch sizes for CUDA graph
    capture as:

    ```python
    max_graph_size = min(max_num_seqs * 2, 512)
    # 1, 2, 4, then multiples of 8 up to max_graph_size
    cuda_graph_sizes = [1, 2, 4, 8, 16, 24, 32, 40, ..., max_graph_size]

    In the end, `vllm_config.compilation_config.cudagraph_capture_sizes`
    will be the final sizes to capture cudagraph (in descending order).

    These sizes are used to capture and reuse CUDA graphs for
    performance-critical paths (e.g., decoding). Capturing enables
    significantly faster kernel dispatch by avoiding Python overhead. The
    list is then filtered based on `max_num_batched_tokens` (e.g., 8192 on
    most GPUs), which controls the total allowed number of tokens in a
    batch. Since each sequence may have a variable number of tokens, the
    maximum usable batch size will depend on actual sequence lengths.

    Example:
        With `max_num_batched_tokens = 8192`, and typical sequences
        averaging ~32 tokens, most practical batch sizes fall below 256.
        However, the system will still allow capture sizes up to 512 if
        shape and memory permit.

    Note:
        If users explicitly specify cudagraph capture sizes in the
        compilation config, those will override this default logic.
        At runtime:

        - If batch size <= one of the `cudagraph_capture_sizes`, the closest
        padded CUDA graph will be used.
        - If batch size > largest `cudagraph_capture_sizes`, cudagraph will
        not be used.
    """

    # calculate the default `batch_size_capture_list`
    batch_size_capture_list = []
    if self.model_config is not None and \
        not self.model_config.enforce_eager:
        cuda_graph_sizes = self.scheduler_config.cuda_graph_sizes
        if len(cuda_graph_sizes) == 1:
            batch_size_capture_list = [1, 2, 4] + [
                i for i in range(8, cuda_graph_sizes[0] + 1, 8)
            ]
        elif len(cuda_graph_sizes) > 1:
            batch_size_capture_list = sorted(cuda_graph_sizes)
        else:
            raise TypeError(f"Invalid value for {cuda_graph_sizes=}.")
        if self.parallel_config.tensor_parallel_size > 1 and \
            self.compilation_config.pass_config.enable_sequence_parallelism:
            batch_size_capture_list = \
                self.update_sizes_for_sequence_parallelism(batch_size_capture_list)
        max_num_tokens = self.scheduler_config.max_num_batched_tokens
        batch_size_capture_list = [
            size for size in batch_size_capture_list
            if size <= max_num_tokens
        ]

    self.compilation_config.init_with_cudagraph_sizes(
        batch_size_capture_list)

compile_debug_dump_path

compile_debug_dump_path() -> Optional[Path]

Returns a rank-aware path for dumping torch.compile debug information.

Source code in vllm/config/vllm.py
def compile_debug_dump_path(self) -> Optional[Path]:
    """Returns a rank-aware path for dumping 
    torch.compile debug information.
    """
    if self.compilation_config.debug_dump_path is None:
        return None
    tp_rank = self.parallel_config.rank
    dp_rank = self.parallel_config.data_parallel_rank
    data_parallel_size = self.parallel_config.data_parallel_size
    append_path = f"rank_{tp_rank}" if data_parallel_size == 1 \
        else f"rank_{tp_rank}_dp_{dp_rank}"
    path = self.compilation_config.debug_dump_path / append_path
    return path

compute_hash

compute_hash() -> str

WARNING: Whenever a new field is added to this config, ensure that it is included in the factors list if it affects the computation graph.

Provide a hash that uniquely identifies all the configs that affect the structure of the computation graph from input ids/embeddings to the final hidden states, excluding anything before input ids/embeddings and after the final hidden states.

Source code in vllm/config/vllm.py
def compute_hash(self) -> str:
    """
    WARNING: Whenever a new field is added to this config,
    ensure that it is included in the factors list if
    it affects the computation graph.

    Provide a hash that uniquely identifies all the configs
    that affect the structure of the computation
    graph from input ids/embeddings to the final hidden states,
    excluding anything before input ids/embeddings and after
    the final hidden states.
    """
    factors: list[Any] = []

    # summarize vllm config
    vllm_factors: list[Any] = []
    from vllm import __version__
    vllm_factors.append(__version__)
    vllm_factors.append(envs.VLLM_USE_V1)
    if self.model_config:
        vllm_factors.append(self.model_config.compute_hash())
    else:
        vllm_factors.append("None")
    if self.cache_config:
        vllm_factors.append(self.cache_config.compute_hash())
    else:
        vllm_factors.append("None")
    if self.parallel_config:
        vllm_factors.append(self.parallel_config.compute_hash())
    else:
        vllm_factors.append("None")
    if self.scheduler_config:
        vllm_factors.append(self.scheduler_config.compute_hash())
    else:
        vllm_factors.append("None")
    if self.device_config:
        vllm_factors.append(self.device_config.compute_hash())
    else:
        vllm_factors.append("None")
    if self.load_config:
        vllm_factors.append(self.load_config.compute_hash())
    else:
        vllm_factors.append("None")
    if self.lora_config:
        vllm_factors.append(self.lora_config.compute_hash())
        # LoRA creates static buffers based on max_num_batched_tokens.
        # The tensor sizes and strides get captured in the torch.compile
        # graph explicitly.
        vllm_factors.append(
            str(self.scheduler_config.max_num_batched_tokens))
    else:
        vllm_factors.append("None")
    if self.speculative_config:
        vllm_factors.append(self.speculative_config.compute_hash())
    else:
        vllm_factors.append("None")
    if self.structured_outputs_config:
        vllm_factors.append(self.structured_outputs_config.compute_hash())
    else:
        vllm_factors.append("None")
    if self.observability_config:
        vllm_factors.append(self.observability_config.compute_hash())
    else:
        vllm_factors.append("None")
    if self.quant_config:
        pass  # should be captured by model_config.quantization
    if self.compilation_config:
        vllm_factors.append(self.compilation_config.compute_hash())
    else:
        vllm_factors.append("None")
    if self.kv_transfer_config:
        vllm_factors.append(self.kv_transfer_config.compute_hash())
    else:
        vllm_factors.append("None")
    if self.additional_config:
        if isinstance(additional_config := self.additional_config, dict):
            additional_config_hash = hashlib.md5(
                json.dumps(additional_config, sort_keys=True).encode(),
                usedforsecurity=False,
            ).hexdigest()
        else:
            additional_config_hash = additional_config.compute_hash()
        vllm_factors.append(additional_config_hash)
    else:
        vllm_factors.append("None")
    factors.append(vllm_factors)

    hash_str = hashlib.md5(str(factors).encode(),
                           usedforsecurity=False).hexdigest()[:10]
    return hash_str

get_quantization_config staticmethod

get_quantization_config(
    model_config: ModelConfig, load_config: LoadConfig
) -> Optional[QuantizationConfig]
Source code in vllm/config/vllm.py
@staticmethod
def get_quantization_config(
        model_config: ModelConfig,
        load_config: LoadConfig) -> Optional[QuantizationConfig]:
    import copy

    # For some reason, the _ version of this modifies the model_config
    # object, so using deepcopy to avoid this problem.
    return VllmConfig._get_quantization_config(copy.deepcopy(model_config),
                                               load_config)

pad_for_cudagraph

pad_for_cudagraph(batch_size: int) -> int
Source code in vllm/config/vllm.py
def pad_for_cudagraph(self, batch_size: int) -> int:
    # if batch_size > self.compilation_config.max_capture_size,
    # it should raise an IndexError.
    # the caller should make sure the batch_size is within the range,
    # i.e., batch_size <= self.compilation_config.max_capture_size
    return self.compilation_config.bs_to_padded_graph_size[batch_size]

recalculate_max_model_len

recalculate_max_model_len(max_model_len: int)
Source code in vllm/config/vllm.py
def recalculate_max_model_len(self, max_model_len: int):
    # Can only be called in try_verify_and_update_config
    model_config = self.model_config
    max_model_len = model_config.get_and_verify_max_len(max_model_len)
    self.model_config.max_model_len = max_model_len
    self.scheduler_config.max_model_len = max_model_len

try_verify_and_update_config

try_verify_and_update_config()
Source code in vllm/config/vllm.py
def try_verify_and_update_config(self):
    if self.model_config is None:
        return

    # Avoid running try_verify_and_update_config multiple times
    if getattr(self.model_config, "config_updated", False):
        return
    self.model_config.config_updated = True

    architecture = self.model_config.architecture
    if architecture is None:
        return

    from vllm.model_executor.models.config import (
        MODELS_CONFIG_MAP, HybridAttentionMambaModelConfig)
    cls = MODELS_CONFIG_MAP.get(architecture, None)
    if cls is not None:
        cls.verify_and_update_config(self)

    if self.model_config.is_hybrid:
        HybridAttentionMambaModelConfig.verify_and_update_config(self)

    if self.model_config.convert_type == "classify":
        # Maybe convert ForCausalLM into ForSequenceClassification model.
        from vllm.model_executor.models.adapters import (
            SequenceClassificationConfig)
        SequenceClassificationConfig.verify_and_update_config(self)

    if hasattr(self.model_config, "model_weights") and is_runai_obj_uri(
            self.model_config.model_weights):
        if self.load_config.load_format == "auto":
            logger.info("Detected Run:ai model config. "
                        "Overriding `load_format` to 'runai_streamer'")
            self.load_config.load_format = "runai_streamer"
        elif self.load_config.load_format != "runai_streamer":
            raise ValueError(f"To load a model from S3, 'load_format' "
                             f"must be 'runai_streamer', "
                             f"but got '{self.load_config.load_format}'. "
                             f"Model: {self.model_config.model}")

update_sizes_for_sequence_parallelism

update_sizes_for_sequence_parallelism(
    possible_sizes: list,
) -> list
Source code in vllm/config/vllm.py
def update_sizes_for_sequence_parallelism(self,
                                          possible_sizes: list) -> list:
    # remove the sizes that not multiple of tp_size when
    # enable sequence parallelism
    removed_sizes = [
        size for size in possible_sizes
        if size % self.parallel_config.tensor_parallel_size != 0
    ]
    if removed_sizes:
        logger.warning(
            "Batch sizes %s are removed because they are not "
            "multiple of tp_size %d when "
            "sequence parallelism is enabled", removed_sizes,
            self.parallel_config.tensor_parallel_size)

    return [
        size for size in possible_sizes
        if size % self.parallel_config.tensor_parallel_size == 0
    ]

with_hf_config

with_hf_config(
    hf_config: PretrainedConfig,
    architectures: Optional[list[str]] = None,
) -> VllmConfig
Source code in vllm/config/vllm.py
def with_hf_config(
    self,
    hf_config: PretrainedConfig,
    architectures: Optional[list[str]] = None,
) -> "VllmConfig":
    if architectures is not None:
        hf_config = copy.deepcopy(hf_config)
        hf_config.architectures = architectures

    model_config = copy.deepcopy(self.model_config)
    model_config.hf_config = hf_config

    return replace(self, model_config=model_config)

config

config(cls: ConfigT) -> ConfigT

A decorator that ensures all fields in a dataclass have default values and that each field has a docstring.

If a ConfigT is used as a CLI argument itself, the type keyword argument provided by get_kwargs will be pydantic.TypeAdapter(ConfigT).validate_json(cli_arg) which treats the cli_arg as a JSON string which gets validated by pydantic.

Config validation is performed by the tools/validate_config.py script, which is invoked during the pre-commit checks.

Source code in vllm/config/utils.py
def config(cls: ConfigT) -> ConfigT:
    """
    A decorator that ensures all fields in a dataclass have default values
    and that each field has a docstring.

    If a `ConfigT` is used as a CLI argument itself, the `type` keyword argument
    provided by `get_kwargs` will be
    `pydantic.TypeAdapter(ConfigT).validate_json(cli_arg)` which treats the
    `cli_arg` as a JSON string which gets validated by `pydantic`.

    Config validation is performed by the tools/validate_config.py
    script, which is invoked during the pre-commit checks.
    """
    return cls

get_attr_docs

get_attr_docs(cls: type[Any]) -> dict[str, str]

Get any docstrings placed after attribute assignments in a class body.

https://davidism.com/mit-license/

Source code in vllm/config/utils.py
def get_attr_docs(cls: type[Any]) -> dict[str, str]:
    """
    Get any docstrings placed after attribute assignments in a class body.

    https://davidism.com/mit-license/
    """

    def pairwise(iterable):
        """
        Manually implement https://docs.python.org/3/library/itertools.html#itertools.pairwise

        Can be removed when Python 3.9 support is dropped.
        """
        iterator = iter(iterable)
        a = next(iterator, None)

        for b in iterator:
            yield a, b
            a = b

    try:
        cls_node = ast.parse(textwrap.dedent(inspect.getsource(cls))).body[0]
    except (OSError, KeyError, TypeError):
        # HACK: Python 3.13+ workaround - set missing __firstlineno__
        # Workaround can be removed after we upgrade to pydantic==2.12.0
        with open(inspect.getfile(cls)) as f:
            for i, line in enumerate(f):
                if f"class {cls.__name__}" in line and ":" in line:
                    cls.__firstlineno__ = i + 1
                    break
        cls_node = ast.parse(textwrap.dedent(inspect.getsource(cls))).body[0]

    if not isinstance(cls_node, ast.ClassDef):
        raise TypeError("Given object was not a class.")

    out = {}

    # Consider each pair of nodes.
    for a, b in pairwise(cls_node.body):
        # Must be an assignment then a constant string.
        if (not isinstance(a, (ast.Assign, ast.AnnAssign))
                or not isinstance(b, ast.Expr)
                or not isinstance(b.value, ast.Constant)
                or not isinstance(b.value.value, str)):
            continue

        doc = inspect.cleandoc(b.value.value)

        # An assignment can have multiple targets (a = b = v), but an
        # annotated assignment only has one target.
        targets = a.targets if isinstance(a, ast.Assign) else [a.target]

        for target in targets:
            # Must be assigning to a plain name.
            if not isinstance(target, ast.Name):
                continue

            out[target.id] = doc

    return out

get_cached_compilation_config cached

get_cached_compilation_config()

Cache config to avoid repeated calls to get_current_vllm_config()

Source code in vllm/config/vllm.py
@lru_cache(maxsize=1)
def get_cached_compilation_config():
    """Cache config to avoid repeated calls to get_current_vllm_config()"""
    return get_current_vllm_config().compilation_config

get_current_vllm_config

get_current_vllm_config() -> VllmConfig
Source code in vllm/config/vllm.py
def get_current_vllm_config() -> VllmConfig:
    if _current_vllm_config is None:
        # in ci, usually when we test custom ops/modules directly,
        # we don't set the vllm config. In that case, we set a default
        # config.
        logger.warning("Current vLLM config is not set.")
        return VllmConfig()
    return _current_vllm_config

get_layers_from_vllm_config

get_layers_from_vllm_config(
    vllm_config: VllmConfig,
    layer_type: type[T],
    layer_names: Optional[list[str]] = None,
) -> dict[str, T]

Get layers from the vLLM config.

Parameters:

Name Type Description Default
vllm_config VllmConfig

The vLLM config.

required
layer_type type[T]

The type of the layer to get.

required
layer_names Optional[list[str]]

The names of the layers to get. If None, return all layers.

None
Source code in vllm/config/vllm.py
def get_layers_from_vllm_config(
        vllm_config: VllmConfig,
        layer_type: type[T],
        layer_names: Optional[list[str]] = None) -> dict[str, T]:
    """
    Get layers from the vLLM config.

    Args:
        vllm_config: The vLLM config.
        layer_type: The type of the layer to get.
        layer_names: The names of the layers to get. If None, return all layers.
    """

    if layer_names is None:
        layer_names = list(
            vllm_config.compilation_config.static_forward_context.keys())

    forward_context = vllm_config.compilation_config.static_forward_context

    return {
        layer_name: forward_context[layer_name]
        for layer_name in layer_names
        if isinstance(forward_context[layer_name], layer_type)
    }

is_init_field

is_init_field(cls: ConfigType, name: str) -> bool
Source code in vllm/config/utils.py
def is_init_field(cls: ConfigType, name: str) -> bool:
    return next(f for f in fields(cls) if f.name == name).init

iter_architecture_defaults

iter_architecture_defaults()
Source code in vllm/config/model.py
def iter_architecture_defaults():
    yield from _SUFFIX_TO_DEFAULTS

set_current_vllm_config

set_current_vllm_config(
    vllm_config: VllmConfig,
    check_compile=False,
    prefix: Optional[str] = None,
)

Temporarily set the current vLLM config. Used during model initialization. We save the current vLLM config in a global variable, so that all modules can access it, e.g. custom ops can access the vLLM config to determine how to dispatch.

Source code in vllm/config/vllm.py
@contextmanager
def set_current_vllm_config(vllm_config: VllmConfig,
                            check_compile=False,
                            prefix: Optional[str] = None):
    """
    Temporarily set the current vLLM config.
    Used during model initialization.
    We save the current vLLM config in a global variable,
    so that all modules can access it, e.g. custom ops
    can access the vLLM config to determine how to dispatch.
    """
    global _current_vllm_config, _current_prefix
    old_vllm_config = _current_vllm_config
    old_prefix = _current_prefix
    from vllm.compilation.counter import compilation_counter
    num_models_seen = compilation_counter.num_models_seen
    try:
        _current_vllm_config = vllm_config
        _current_prefix = prefix
        yield
    except Exception:
        raise
    else:
        if check_compile:
            vllm_config.compilation_config.custom_op_log_check()

        if check_compile and \
            vllm_config.compilation_config.level == CompilationLevel.PIECEWISE \
            and compilation_counter.num_models_seen == num_models_seen:
            # If the model supports compilation,
            # compilation_counter.num_models_seen should be increased
            # by at least 1.
            # If it is not increased, it means the model does not support
            # compilation (does not have @support_torch_compile decorator).
            logger.warning(
                "`torch.compile` is turned on, but the model %s"
                " does not support it. Please open an issue on GitHub"
                " if you want it to be supported.",
                vllm_config.model_config.model)
    finally:
        _current_vllm_config = old_vllm_config
        _current_prefix = old_prefix
        # Clear the compilation config cache when context changes
        get_cached_compilation_config.cache_clear()

try_match_architecture_defaults

try_match_architecture_defaults(
    architecture: str,
    *,
    runner_type: Optional[RunnerType] = None,
    convert_type: Optional[ConvertType] = None,
) -> Optional[tuple[str, tuple[RunnerType, ConvertType]]]
Source code in vllm/config/model.py
def try_match_architecture_defaults(
    architecture: str,
    *,
    runner_type: Optional[RunnerType] = None,
    convert_type: Optional[ConvertType] = None,
) -> Optional[tuple[str, tuple[RunnerType, ConvertType]]]:
    for suffix, (default_runner_type,
                 default_convert_type) in iter_architecture_defaults():
        if ((runner_type is None or runner_type == default_runner_type) and
            (convert_type is None or convert_type == default_convert_type)
                and architecture.endswith(suffix)):
            return suffix, (default_runner_type, default_convert_type)

    return None

update_config

update_config(
    config: ConfigT, overrides: dict[str, Any]
) -> ConfigT
Source code in vllm/config/utils.py
def update_config(config: ConfigT, overrides: dict[str, Any]) -> ConfigT:
    processed_overrides = {}
    for field_name, value in overrides.items():
        assert hasattr(
            config, field_name), f"{type(config)} has no field `{field_name}`"
        current_value = getattr(config, field_name)
        if is_dataclass(current_value) and not is_dataclass(value):
            assert isinstance(value, dict), (
                f"Overrides to {type(config)}.{field_name} must be a dict"
                f"  or {type(current_value)}, but got {type(value)}")
            value = update_config(
                current_value,  # type: ignore[type-var]
                value)
        processed_overrides[field_name] = value
    return replace(config, **processed_overrides)