Skip to content

vllm.model_executor.models.qwen2_5_vl

Inference-only Qwen2.5-VL model compatible with HuggingFace weights.

Qwen2_5_VLImageInputs module-attribute

Qwen2_5_VLVideoInputs module-attribute

logger module-attribute

logger = init_logger(__name__)

Qwen2_5_VLForConditionalGeneration

Bases: Module, SupportsMultiModal, SupportsLoRA, SupportsPP, SupportsQuant, SupportsEagle3, SupportsMultiModalPruning

Source code in vllm/model_executor/models/qwen2_5_vl.py
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
@MULTIMODAL_REGISTRY.register_processor(
    Qwen2_5_VLMultiModalProcessor,
    info=Qwen2_5_VLProcessingInfo,
    dummy_inputs=Qwen2_5_VLDummyInputsBuilder)
class Qwen2_5_VLForConditionalGeneration(nn.Module, SupportsMultiModal,
                                         SupportsLoRA, SupportsPP,
                                         SupportsQuant, SupportsEagle3,
                                         SupportsMultiModalPruning):

    packed_modules_mapping = {
        "qkv_proj": ["q_proj", "k_proj", "v_proj"],
        "gate_up_proj": ["gate_proj", "up_proj"],
    }

    # To ensure correct weight loading and mapping.
    hf_to_vllm_mapper = WeightsMapper(
        orig_to_new_prefix={
            # mapping for new names in checkpoint saved after transformers v4.52
            "model.language_model.": "language_model.model.",
            "model.visual.": "visual.",
            # mapping for original checkpoint
            "lm_head.": "language_model.lm_head.",
            "model.": "language_model.model.",
        })

    supports_encoder_tp_data = True

    @classmethod
    def get_placeholder_str(cls, modality: str, i: int) -> Optional[str]:
        if modality.startswith("image"):
            return "<|vision_start|><|image_pad|><|vision_end|>"
        if modality.startswith("video"):
            return "<|vision_start|><|video_pad|><|vision_end|>"

        raise ValueError("Only image or video modality is supported")

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        super().__init__()
        config: Qwen2_5_VLConfig = vllm_config.model_config.hf_config
        multimodal_config = vllm_config.model_config.multimodal_config

        self.use_data_parallel = multimodal_config.mm_encoder_tp_mode == "data"
        self.config = config
        self.multimodal_config = multimodal_config
        self.video_pruning_rate = multimodal_config.video_pruning_rate
        self.is_multimodal_pruning_enabled = (
            multimodal_config.is_multimodal_pruning_enabled())

        if multimodal_config.get_limit_per_prompt("image") or \
            multimodal_config.get_limit_per_prompt("video"):
            self.visual = Qwen2_5_VisionTransformer(
                config.vision_config,
                norm_eps=getattr(config, "rms_norm_eps", 1e-6),
                quant_config=self.quant_config,
                prefix=maybe_prefix(prefix, "visual"),
                use_data_parallel=self.use_data_parallel,
            )
        else:
            self.visual = None

        self.language_model = init_vllm_registered_model(
            vllm_config=vllm_config,
            prefix=maybe_prefix(prefix, "language_model"),
            architectures=["Qwen2ForCausalLM"],
        )

        self.make_empty_intermediate_tensors = (
            self.language_model.make_empty_intermediate_tensors)

    def set_aux_hidden_state_layers(self, layers: tuple[int, ...]) -> None:
        self.language_model.model.aux_hidden_state_layers = layers

    def get_eagle3_aux_hidden_state_layers(self) -> tuple[int, ...]:
        num_layers = len(self.language_model.model.layers)
        return (2, num_layers // 2, num_layers - 3)

    def _validate_and_reshape_mm_tensor(self, mm_input: object,
                                        name: str) -> torch.Tensor:
        if not isinstance(mm_input, (torch.Tensor, list)):
            raise ValueError(f"Incorrect type of {name}. "
                             f"Got type: {type(mm_input)}")
        if isinstance(mm_input, torch.Tensor):
            if mm_input.ndim == 2:
                return mm_input
            if mm_input.ndim != 3:
                raise ValueError(f"{name} should be 2D or batched 3D tensor. "
                                 f"Got ndim: {mm_input.ndim} "
                                 f"(shape={mm_input.shape})")
            return mm_input.reshape(-1, mm_input.shape[-1])
        else:
            return torch.concat(mm_input)

    def _parse_and_validate_image_input(
            self, **kwargs: object) -> Optional[Qwen2_5_VLImageInputs]:
        pixel_values = kwargs.pop("pixel_values", None)
        image_embeds = kwargs.pop("image_embeds", None)
        image_grid_thw = kwargs.pop("image_grid_thw", None)

        if pixel_values is None and image_embeds is None:
            return None

        if pixel_values is not None:
            pixel_values = self._validate_and_reshape_mm_tensor(
                pixel_values, "image pixel values")
            image_grid_thw = self._validate_and_reshape_mm_tensor(
                image_grid_thw, "image grid_thw")

            return Qwen2_5_VLImagePixelInputs(type="pixel_values",
                                              pixel_values=pixel_values,
                                              image_grid_thw=image_grid_thw)

        if image_embeds is not None:
            image_embeds = self._validate_and_reshape_mm_tensor(
                image_embeds, "image embeds")
            image_grid_thw = self._validate_and_reshape_mm_tensor(
                image_grid_thw, "image grid_thw")

            return Qwen2_5_VLImageEmbeddingInputs(
                type="image_embeds",
                image_embeds=image_embeds,
                image_grid_thw=image_grid_thw)

    def _parse_and_validate_video_input(
            self, **kwargs: object) -> Optional[Qwen2_5_VLVideoInputs]:
        pixel_values_videos = kwargs.pop("pixel_values_videos", None)
        video_embeds = kwargs.pop("video_embeds", None)
        video_grid_thw = kwargs.pop("video_grid_thw", None)
        second_per_grid_ts = kwargs.pop("second_per_grid_ts", None)

        if pixel_values_videos is None and video_embeds is None:
            return None

        if pixel_values_videos is not None:
            pixel_values_videos = self._validate_and_reshape_mm_tensor(
                pixel_values_videos, "video pixel values")
            video_grid_thw = self._validate_and_reshape_mm_tensor(
                video_grid_thw, "video grid_thw")
            if second_per_grid_ts is not None and second_per_grid_ts.ndim == 2:
                second_per_grid_ts = second_per_grid_ts.squeeze(-1)
            return Qwen2_5_VLVideoPixelInputs(
                type="pixel_values_videos",
                pixel_values_videos=pixel_values_videos,
                video_grid_thw=video_grid_thw,
                second_per_grid_ts=second_per_grid_ts,
            )

        if video_embeds is not None:
            video_embeds = self._validate_and_reshape_mm_tensor(
                video_embeds, "video embeds")
            video_grid_thw = self._validate_and_reshape_mm_tensor(
                video_grid_thw, "video grid_thw")

            return Qwen2_5_VLVideoEmbeddingInputs(
                type="video_embeds",
                video_embeds=video_embeds,
                video_grid_thw=video_grid_thw)

    def _process_image_input(
            self,
            image_input: Qwen2_5_VLImageInputs) -> tuple[torch.Tensor, ...]:

        grid_thw = image_input["image_grid_thw"]
        assert grid_thw.ndim == 2
        grid_thw_list = grid_thw.tolist()

        if image_input["type"] == "image_embeds":
            image_embeds = image_input["image_embeds"].type(self.visual.dtype)
        else:
            pixel_values = image_input["pixel_values"]

            if self.use_data_parallel:
                return run_dp_sharded_mrope_vision_model(self.visual,
                                                         pixel_values,
                                                         grid_thw_list,
                                                         rope_type="rope_3d")
            else:
                image_embeds = self.visual(pixel_values,
                                           grid_thw=grid_thw_list)

        # Split concatenated embeddings for each image item.
        # Using prod on grid_thw_list instead of grid_thw.prod avoids CUDA sync
        merge_size = self.visual.spatial_merge_size
        sizes = (torch.tensor(grid_thw_list, dtype=torch.long).prod(-1) //
                 (merge_size * merge_size)).tolist()

        return image_embeds.split(sizes)

    def _postprocess_image_embeds_evs(
            self, image_embeds_split: tuple[torch.Tensor, ...],
            image_input: Qwen2_5_VLImageInputs) -> tuple[torch.Tensor, ...]:
        """
        Append mrope positions for each for images.
        This is necessary to recover correct mrope
        positions after video pruning

        Args:
            image_embeds_split: Tuple of image embeddings for
                each image item.
            image_input: Image input data.

        Returns:
            Tuple of image embeddings for each image item.
            Resulting embeddings will have extra 4 channels for
            computed mrope positions.
        """
        merge_size = self.visual.spatial_merge_size
        grid_thw = image_input["image_grid_thw"]
        grid_thw_list = grid_thw.tolist()
        image_embeds_out = []
        for emb, size in zip(image_embeds_split, grid_thw_list):
            positions = compute_mrope_for_media(size,
                                                merge_size).to(emb.device)
            emb = torch.cat([emb, positions], dim=1)
            image_embeds_out.append(emb)
        image_embeds_split = image_embeds_out
        return tuple(image_embeds_split)

    def _process_video_input(
            self,
            video_input: Qwen2_5_VLVideoInputs) -> tuple[torch.Tensor, ...]:

        grid_thw = video_input["video_grid_thw"]
        assert grid_thw.ndim == 2
        grid_thw_list = grid_thw.tolist()

        if video_input["type"] == "video_embeds":
            video_embeds = video_input["video_embeds"].type(self.visual.dtype)
        else:
            pixel_values_videos = video_input["pixel_values_videos"]
            if self.use_data_parallel:
                return run_dp_sharded_mrope_vision_model(self.visual,
                                                         pixel_values_videos,
                                                         grid_thw_list,
                                                         rope_type="rope_3d")
            else:
                video_embeds = self.visual(pixel_values_videos,
                                           grid_thw=grid_thw_list)

        # Split concatenated embeddings for each video item.
        merge_size = self.visual.spatial_merge_size
        # Using prod on grid_thw_list instead of grid_thw.prod avoids CUDA sync
        sizes = (torch.tensor(grid_thw_list, dtype=torch.long).prod(-1) //
                 (merge_size * merge_size)).tolist()

        return video_embeds.split(sizes)

    def _postprocess_video_embeds_evs(
            self, video_embeds_split: tuple[torch.Tensor, ...],
            video_input: Qwen2_5_VLVideoInputs) -> tuple[torch.Tensor, ...]:
        """
        Prunes video embeddings via Efficient Video Sampling (EVS)
        and then appends mrope positions for each retained embeddings

        Args:
            video_embeds_split: Tuple of video embeddings for each video item.
            video_input: Video input data.

        Returns:
            Tuple of video embeddings for each video item.
            Resulting embeddings will have extra 4 channels for
            computed mrope positions.
        """
        grid_thw = video_input["video_grid_thw"]
        assert grid_thw.ndim == 2
        grid_thw_list = grid_thw.tolist()
        merge_size = self.visual.spatial_merge_size

        # Cast to long to match the original code
        # https://github.com/huggingface/transformers/blob/41980ce93e775f6c88500c51c8db7946fc6a2add/src/transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py#L491 # noqa
        second_per_grid_ts = video_input["second_per_grid_ts"].long()
        tokens_per_second = self.config.vision_config.tokens_per_second

        video_embeds_out = []
        for emb, size, video_second_per_grid_t in zip(video_embeds_split,
                                                      grid_thw_list,
                                                      second_per_grid_ts):
            # For each video, we compute retention mask using EVS
            retention_mask = compute_retention_mask(
                emb,
                size,
                spatial_merge_size=self.visual.spatial_merge_size,
                q=self.video_pruning_rate,
            )
            positions = compute_mrope_for_media(
                size,
                merge_size,
                tokens_per_second=tokens_per_second,
                video_second_per_grid=video_second_per_grid_t.item(),
            ).to(emb.device)

            emb = emb[retention_mask]
            positions = positions[retention_mask]
            emb = torch.cat([emb, positions], dim=1)
            video_embeds_out.append(emb)
        return tuple(video_embeds_out)

    def recompute_mrope_positions(
        self,
        input_ids: list[int],
        multimodal_embeddings: tuple[torch.Tensor, ...],
        mrope_positions: torch.LongTensor,
        num_computed_tokens: int,
    ) -> tuple[tuple[torch.Tensor, ...], torch.Tensor, int]:
        """
        Update part of input mrope positions (starting with
        num_computed_tokens index). Original mrope_positions are computed
        for unpruned sequence and becomes incorrect once pruning occurs,
        so once we prune media tokens we should reflect this in the
        mrope_positions before we feed it to LLM.

        Args:
            input_ids: (N,) All input tokens of the prompt (Containing
                entire sequence).
            multimodal_embeddings: Tuple of multimodal embeddings.
            mrope_positions: Existing mrope positions (3, N) for entire
                sequence
            num_computed_tokens: A number of computed tokens so far.

        Returns:
            Tuple of (multimodal_embeddings, mrope_positions,
                mrope_position_delta).
        """
        image_token_id = self.config.image_token_id
        video_token_id = self.config.video_token_id
        vision_start_token_id = self.config.vision_start_token_id

        # Device
        device = (multimodal_embeddings[0].device
                  if len(multimodal_embeddings) else mrope_positions.device)

        # Tensors
        input_ids_t = torch.as_tensor(input_ids,
                                      device=device,
                                      dtype=torch.long)

        # fmt: off
        mm_embeddings_out = [mm[:, :-4] for mm in
                             multimodal_embeddings]
        mm_embeddings_pos = [mm[:, -4:].permute(1, 0).long() for mm in
                             multimodal_embeddings]
        # fmt: in

        positions, mrope_positions_delta = recompute_mrope_positions(
            input_ids_t,
            mm_embeddings_pos,
            mrope_positions,
            num_computed_tokens,
            vision_start_token_id,
            image_token_id,
            video_token_id,
        )

        return tuple(mm_embeddings_out), positions, mrope_positions_delta

    def _parse_and_validate_multimodal_inputs(self, **kwargs: object) -> dict:
        mm_input_by_modality = {}

        # Preserve the order of modalities if there are multiple of them
        # from the order of kwargs.
        for input_key in kwargs:
            if input_key in ("pixel_values", "image_embeds"
                             ) and "image" not in mm_input_by_modality:
                mm_input_by_modality[
                    "image"] = self._parse_and_validate_image_input(**kwargs)
            if input_key in ("pixel_values_videos", "video_embeds"
                             ) and "video" not in mm_input_by_modality:
                mm_input_by_modality[
                    "video"] = self._parse_and_validate_video_input(**kwargs)
        return mm_input_by_modality

    def get_language_model(self) -> torch.nn.Module:
        return self.language_model

    def get_multimodal_embeddings(self,
                                  **kwargs: object) -> MultiModalEmbeddings:

        mm_input_by_modality = self._parse_and_validate_multimodal_inputs(
            **kwargs)
        if not mm_input_by_modality:
            return []

        # The result multimodal_embeddings is tuple of tensors, with each
        # tensor correspoending to a multimodal data item (image or video).
        multimodal_embeddings: tuple[torch.Tensor, ...] = ()

        # NOTE: It is important to iterate over the keys in this dictionary
        # to preserve the order of the modalities.
        for modality in mm_input_by_modality:
            multimodal_input = mm_input_by_modality[modality]
            if modality == "image":
                vision_embeddings = self._process_image_input(multimodal_input)
                if self.is_multimodal_pruning_enabled:
                    vision_embeddings = self._postprocess_image_embeds_evs(
                        vision_embeddings, multimodal_input
                    )
                multimodal_embeddings += vision_embeddings
            if modality == "video":
                video_embeddings = self._process_video_input(multimodal_input)
                if self.is_multimodal_pruning_enabled:
                    video_embeddings = self._postprocess_video_embeds_evs(
                        video_embeddings, multimodal_input
                    )
                multimodal_embeddings += video_embeddings
        return multimodal_embeddings

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        intermediate_tensors: Optional[IntermediateTensors] = None,
        inputs_embeds: Optional[torch.Tensor] = None,
        **kwargs: object,
    ) -> Union[torch.Tensor, IntermediateTensors]:
        """Run forward pass for Qwen2.5-VL.

        Args:
            input_ids: Flattened (concatenated) input_ids corresponding to a
                batch.
            positions: Flattened (concatenated) position ids corresponding to a
                batch. **NOTE**: If mrope is enabled (default setting for
                Qwen2.5-VL opensource models), the shape will be `(3, seq_len)`,
                otherwise it will be `(seq_len,).
        """

        if intermediate_tensors is not None:
            inputs_embeds = None

        hidden_states = self.language_model.model(
            input_ids=input_ids,
            positions=positions,
            intermediate_tensors=intermediate_tensors,
            inputs_embeds=inputs_embeds,
        )
        return hidden_states

    def compute_logits(
        self,
        hidden_states: torch.Tensor,
    ) -> Optional[torch.Tensor]:
        return self.language_model.compute_logits(hidden_states)

    def load_weights(self, weights: Iterable[tuple[str,
                                                   torch.Tensor]]) -> set[str]:

        skip_prefixes = []
        if self.visual is None:
            skip_prefixes.extend(["visual."])
        loader = AutoWeightsLoader(self, skip_prefixes=skip_prefixes)
        return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper)

    def get_mm_mapping(self) -> MultiModelKeys:
        """
        Get the module prefix in multimodal models
        """
        return MultiModelKeys.from_string_field(
            language_model="language_model",
            connector="visual.merger.",
            tower_model="visual.",
        )

config instance-attribute

config = config

hf_to_vllm_mapper class-attribute instance-attribute

hf_to_vllm_mapper = WeightsMapper(
    orig_to_new_prefix={
        "model.language_model.": "language_model.model.",
        "model.visual.": "visual.",
        "lm_head.": "language_model.lm_head.",
        "model.": "language_model.model.",
    }
)

is_multimodal_pruning_enabled instance-attribute

is_multimodal_pruning_enabled = (
    is_multimodal_pruning_enabled()
)

language_model instance-attribute

language_model = init_vllm_registered_model(
    vllm_config=vllm_config,
    prefix=maybe_prefix(prefix, "language_model"),
    architectures=["Qwen2ForCausalLM"],
)

make_empty_intermediate_tensors instance-attribute

make_empty_intermediate_tensors = (
    make_empty_intermediate_tensors
)

multimodal_config instance-attribute

multimodal_config = multimodal_config

packed_modules_mapping class-attribute instance-attribute

packed_modules_mapping = {
    "qkv_proj": ["q_proj", "k_proj", "v_proj"],
    "gate_up_proj": ["gate_proj", "up_proj"],
}

supports_encoder_tp_data class-attribute instance-attribute

supports_encoder_tp_data = True

use_data_parallel instance-attribute

use_data_parallel = mm_encoder_tp_mode == 'data'

video_pruning_rate instance-attribute

video_pruning_rate = video_pruning_rate

visual instance-attribute

visual = Qwen2_5_VisionTransformer(
    vision_config,
    norm_eps=getattr(config, "rms_norm_eps", 1e-06),
    quant_config=quant_config,
    prefix=maybe_prefix(prefix, "visual"),
    use_data_parallel=use_data_parallel,
)

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/qwen2_5_vl.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    super().__init__()
    config: Qwen2_5_VLConfig = vllm_config.model_config.hf_config
    multimodal_config = vllm_config.model_config.multimodal_config

    self.use_data_parallel = multimodal_config.mm_encoder_tp_mode == "data"
    self.config = config
    self.multimodal_config = multimodal_config
    self.video_pruning_rate = multimodal_config.video_pruning_rate
    self.is_multimodal_pruning_enabled = (
        multimodal_config.is_multimodal_pruning_enabled())

    if multimodal_config.get_limit_per_prompt("image") or \
        multimodal_config.get_limit_per_prompt("video"):
        self.visual = Qwen2_5_VisionTransformer(
            config.vision_config,
            norm_eps=getattr(config, "rms_norm_eps", 1e-6),
            quant_config=self.quant_config,
            prefix=maybe_prefix(prefix, "visual"),
            use_data_parallel=self.use_data_parallel,
        )
    else:
        self.visual = None

    self.language_model = init_vllm_registered_model(
        vllm_config=vllm_config,
        prefix=maybe_prefix(prefix, "language_model"),
        architectures=["Qwen2ForCausalLM"],
    )

    self.make_empty_intermediate_tensors = (
        self.language_model.make_empty_intermediate_tensors)

_parse_and_validate_image_input

_parse_and_validate_image_input(
    **kwargs: object,
) -> Optional[Qwen2_5_VLImageInputs]
Source code in vllm/model_executor/models/qwen2_5_vl.py
def _parse_and_validate_image_input(
        self, **kwargs: object) -> Optional[Qwen2_5_VLImageInputs]:
    pixel_values = kwargs.pop("pixel_values", None)
    image_embeds = kwargs.pop("image_embeds", None)
    image_grid_thw = kwargs.pop("image_grid_thw", None)

    if pixel_values is None and image_embeds is None:
        return None

    if pixel_values is not None:
        pixel_values = self._validate_and_reshape_mm_tensor(
            pixel_values, "image pixel values")
        image_grid_thw = self._validate_and_reshape_mm_tensor(
            image_grid_thw, "image grid_thw")

        return Qwen2_5_VLImagePixelInputs(type="pixel_values",
                                          pixel_values=pixel_values,
                                          image_grid_thw=image_grid_thw)

    if image_embeds is not None:
        image_embeds = self._validate_and_reshape_mm_tensor(
            image_embeds, "image embeds")
        image_grid_thw = self._validate_and_reshape_mm_tensor(
            image_grid_thw, "image grid_thw")

        return Qwen2_5_VLImageEmbeddingInputs(
            type="image_embeds",
            image_embeds=image_embeds,
            image_grid_thw=image_grid_thw)

_parse_and_validate_multimodal_inputs

_parse_and_validate_multimodal_inputs(
    **kwargs: object,
) -> dict
Source code in vllm/model_executor/models/qwen2_5_vl.py
def _parse_and_validate_multimodal_inputs(self, **kwargs: object) -> dict:
    mm_input_by_modality = {}

    # Preserve the order of modalities if there are multiple of them
    # from the order of kwargs.
    for input_key in kwargs:
        if input_key in ("pixel_values", "image_embeds"
                         ) and "image" not in mm_input_by_modality:
            mm_input_by_modality[
                "image"] = self._parse_and_validate_image_input(**kwargs)
        if input_key in ("pixel_values_videos", "video_embeds"
                         ) and "video" not in mm_input_by_modality:
            mm_input_by_modality[
                "video"] = self._parse_and_validate_video_input(**kwargs)
    return mm_input_by_modality

_parse_and_validate_video_input

_parse_and_validate_video_input(
    **kwargs: object,
) -> Optional[Qwen2_5_VLVideoInputs]
Source code in vllm/model_executor/models/qwen2_5_vl.py
def _parse_and_validate_video_input(
        self, **kwargs: object) -> Optional[Qwen2_5_VLVideoInputs]:
    pixel_values_videos = kwargs.pop("pixel_values_videos", None)
    video_embeds = kwargs.pop("video_embeds", None)
    video_grid_thw = kwargs.pop("video_grid_thw", None)
    second_per_grid_ts = kwargs.pop("second_per_grid_ts", None)

    if pixel_values_videos is None and video_embeds is None:
        return None

    if pixel_values_videos is not None:
        pixel_values_videos = self._validate_and_reshape_mm_tensor(
            pixel_values_videos, "video pixel values")
        video_grid_thw = self._validate_and_reshape_mm_tensor(
            video_grid_thw, "video grid_thw")
        if second_per_grid_ts is not None and second_per_grid_ts.ndim == 2:
            second_per_grid_ts = second_per_grid_ts.squeeze(-1)
        return Qwen2_5_VLVideoPixelInputs(
            type="pixel_values_videos",
            pixel_values_videos=pixel_values_videos,
            video_grid_thw=video_grid_thw,
            second_per_grid_ts=second_per_grid_ts,
        )

    if video_embeds is not None:
        video_embeds = self._validate_and_reshape_mm_tensor(
            video_embeds, "video embeds")
        video_grid_thw = self._validate_and_reshape_mm_tensor(
            video_grid_thw, "video grid_thw")

        return Qwen2_5_VLVideoEmbeddingInputs(
            type="video_embeds",
            video_embeds=video_embeds,
            video_grid_thw=video_grid_thw)

_postprocess_image_embeds_evs

_postprocess_image_embeds_evs(
    image_embeds_split: tuple[Tensor, ...],
    image_input: Qwen2_5_VLImageInputs,
) -> tuple[Tensor, ...]

Append mrope positions for each for images. This is necessary to recover correct mrope positions after video pruning

Parameters:

Name Type Description Default
image_embeds_split tuple[Tensor, ...]

Tuple of image embeddings for each image item.

required
image_input Qwen2_5_VLImageInputs

Image input data.

required

Returns:

Type Description
Tensor

Tuple of image embeddings for each image item.

...

Resulting embeddings will have extra 4 channels for

tuple[Tensor, ...]

computed mrope positions.

Source code in vllm/model_executor/models/qwen2_5_vl.py
def _postprocess_image_embeds_evs(
        self, image_embeds_split: tuple[torch.Tensor, ...],
        image_input: Qwen2_5_VLImageInputs) -> tuple[torch.Tensor, ...]:
    """
    Append mrope positions for each for images.
    This is necessary to recover correct mrope
    positions after video pruning

    Args:
        image_embeds_split: Tuple of image embeddings for
            each image item.
        image_input: Image input data.

    Returns:
        Tuple of image embeddings for each image item.
        Resulting embeddings will have extra 4 channels for
        computed mrope positions.
    """
    merge_size = self.visual.spatial_merge_size
    grid_thw = image_input["image_grid_thw"]
    grid_thw_list = grid_thw.tolist()
    image_embeds_out = []
    for emb, size in zip(image_embeds_split, grid_thw_list):
        positions = compute_mrope_for_media(size,
                                            merge_size).to(emb.device)
        emb = torch.cat([emb, positions], dim=1)
        image_embeds_out.append(emb)
    image_embeds_split = image_embeds_out
    return tuple(image_embeds_split)

_postprocess_video_embeds_evs

_postprocess_video_embeds_evs(
    video_embeds_split: tuple[Tensor, ...],
    video_input: Qwen2_5_VLVideoInputs,
) -> tuple[Tensor, ...]

Prunes video embeddings via Efficient Video Sampling (EVS) and then appends mrope positions for each retained embeddings

Parameters:

Name Type Description Default
video_embeds_split tuple[Tensor, ...]

Tuple of video embeddings for each video item.

required
video_input Qwen2_5_VLVideoInputs

Video input data.

required

Returns:

Type Description
Tensor

Tuple of video embeddings for each video item.

...

Resulting embeddings will have extra 4 channels for

tuple[Tensor, ...]

computed mrope positions.

Source code in vllm/model_executor/models/qwen2_5_vl.py
def _postprocess_video_embeds_evs(
        self, video_embeds_split: tuple[torch.Tensor, ...],
        video_input: Qwen2_5_VLVideoInputs) -> tuple[torch.Tensor, ...]:
    """
    Prunes video embeddings via Efficient Video Sampling (EVS)
    and then appends mrope positions for each retained embeddings

    Args:
        video_embeds_split: Tuple of video embeddings for each video item.
        video_input: Video input data.

    Returns:
        Tuple of video embeddings for each video item.
        Resulting embeddings will have extra 4 channels for
        computed mrope positions.
    """
    grid_thw = video_input["video_grid_thw"]
    assert grid_thw.ndim == 2
    grid_thw_list = grid_thw.tolist()
    merge_size = self.visual.spatial_merge_size

    # Cast to long to match the original code
    # https://github.com/huggingface/transformers/blob/41980ce93e775f6c88500c51c8db7946fc6a2add/src/transformers/models/qwen2_5_vl/modular_qwen2_5_vl.py#L491 # noqa
    second_per_grid_ts = video_input["second_per_grid_ts"].long()
    tokens_per_second = self.config.vision_config.tokens_per_second

    video_embeds_out = []
    for emb, size, video_second_per_grid_t in zip(video_embeds_split,
                                                  grid_thw_list,
                                                  second_per_grid_ts):
        # For each video, we compute retention mask using EVS
        retention_mask = compute_retention_mask(
            emb,
            size,
            spatial_merge_size=self.visual.spatial_merge_size,
            q=self.video_pruning_rate,
        )
        positions = compute_mrope_for_media(
            size,
            merge_size,
            tokens_per_second=tokens_per_second,
            video_second_per_grid=video_second_per_grid_t.item(),
        ).to(emb.device)

        emb = emb[retention_mask]
        positions = positions[retention_mask]
        emb = torch.cat([emb, positions], dim=1)
        video_embeds_out.append(emb)
    return tuple(video_embeds_out)

_process_image_input

_process_image_input(
    image_input: Qwen2_5_VLImageInputs,
) -> tuple[Tensor, ...]
Source code in vllm/model_executor/models/qwen2_5_vl.py
def _process_image_input(
        self,
        image_input: Qwen2_5_VLImageInputs) -> tuple[torch.Tensor, ...]:

    grid_thw = image_input["image_grid_thw"]
    assert grid_thw.ndim == 2
    grid_thw_list = grid_thw.tolist()

    if image_input["type"] == "image_embeds":
        image_embeds = image_input["image_embeds"].type(self.visual.dtype)
    else:
        pixel_values = image_input["pixel_values"]

        if self.use_data_parallel:
            return run_dp_sharded_mrope_vision_model(self.visual,
                                                     pixel_values,
                                                     grid_thw_list,
                                                     rope_type="rope_3d")
        else:
            image_embeds = self.visual(pixel_values,
                                       grid_thw=grid_thw_list)

    # Split concatenated embeddings for each image item.
    # Using prod on grid_thw_list instead of grid_thw.prod avoids CUDA sync
    merge_size = self.visual.spatial_merge_size
    sizes = (torch.tensor(grid_thw_list, dtype=torch.long).prod(-1) //
             (merge_size * merge_size)).tolist()

    return image_embeds.split(sizes)

_process_video_input

_process_video_input(
    video_input: Qwen2_5_VLVideoInputs,
) -> tuple[Tensor, ...]
Source code in vllm/model_executor/models/qwen2_5_vl.py
def _process_video_input(
        self,
        video_input: Qwen2_5_VLVideoInputs) -> tuple[torch.Tensor, ...]:

    grid_thw = video_input["video_grid_thw"]
    assert grid_thw.ndim == 2
    grid_thw_list = grid_thw.tolist()

    if video_input["type"] == "video_embeds":
        video_embeds = video_input["video_embeds"].type(self.visual.dtype)
    else:
        pixel_values_videos = video_input["pixel_values_videos"]
        if self.use_data_parallel:
            return run_dp_sharded_mrope_vision_model(self.visual,
                                                     pixel_values_videos,
                                                     grid_thw_list,
                                                     rope_type="rope_3d")
        else:
            video_embeds = self.visual(pixel_values_videos,
                                       grid_thw=grid_thw_list)

    # Split concatenated embeddings for each video item.
    merge_size = self.visual.spatial_merge_size
    # Using prod on grid_thw_list instead of grid_thw.prod avoids CUDA sync
    sizes = (torch.tensor(grid_thw_list, dtype=torch.long).prod(-1) //
             (merge_size * merge_size)).tolist()

    return video_embeds.split(sizes)

_validate_and_reshape_mm_tensor

_validate_and_reshape_mm_tensor(
    mm_input: object, name: str
) -> Tensor
Source code in vllm/model_executor/models/qwen2_5_vl.py
def _validate_and_reshape_mm_tensor(self, mm_input: object,
                                    name: str) -> torch.Tensor:
    if not isinstance(mm_input, (torch.Tensor, list)):
        raise ValueError(f"Incorrect type of {name}. "
                         f"Got type: {type(mm_input)}")
    if isinstance(mm_input, torch.Tensor):
        if mm_input.ndim == 2:
            return mm_input
        if mm_input.ndim != 3:
            raise ValueError(f"{name} should be 2D or batched 3D tensor. "
                             f"Got ndim: {mm_input.ndim} "
                             f"(shape={mm_input.shape})")
        return mm_input.reshape(-1, mm_input.shape[-1])
    else:
        return torch.concat(mm_input)

compute_logits

compute_logits(hidden_states: Tensor) -> Optional[Tensor]
Source code in vllm/model_executor/models/qwen2_5_vl.py
def compute_logits(
    self,
    hidden_states: torch.Tensor,
) -> Optional[torch.Tensor]:
    return self.language_model.compute_logits(hidden_states)

forward

forward(
    input_ids: Tensor,
    positions: Tensor,
    intermediate_tensors: Optional[
        IntermediateTensors
    ] = None,
    inputs_embeds: Optional[Tensor] = None,
    **kwargs: object,
) -> Union[Tensor, IntermediateTensors]

Run forward pass for Qwen2.5-VL.

Parameters:

Name Type Description Default
input_ids Tensor

Flattened (concatenated) input_ids corresponding to a batch.

required
positions Tensor

Flattened (concatenated) position ids corresponding to a batch. NOTE: If mrope is enabled (default setting for Qwen2.5-VL opensource models), the shape will be (3, seq_len), otherwise it will be `(seq_len,).

required
Source code in vllm/model_executor/models/qwen2_5_vl.py
def forward(
    self,
    input_ids: torch.Tensor,
    positions: torch.Tensor,
    intermediate_tensors: Optional[IntermediateTensors] = None,
    inputs_embeds: Optional[torch.Tensor] = None,
    **kwargs: object,
) -> Union[torch.Tensor, IntermediateTensors]:
    """Run forward pass for Qwen2.5-VL.

    Args:
        input_ids: Flattened (concatenated) input_ids corresponding to a
            batch.
        positions: Flattened (concatenated) position ids corresponding to a
            batch. **NOTE**: If mrope is enabled (default setting for
            Qwen2.5-VL opensource models), the shape will be `(3, seq_len)`,
            otherwise it will be `(seq_len,).
    """

    if intermediate_tensors is not None:
        inputs_embeds = None

    hidden_states = self.language_model.model(
        input_ids=input_ids,
        positions=positions,
        intermediate_tensors=intermediate_tensors,
        inputs_embeds=inputs_embeds,
    )
    return hidden_states

get_eagle3_aux_hidden_state_layers

get_eagle3_aux_hidden_state_layers() -> tuple[int, ...]
Source code in vllm/model_executor/models/qwen2_5_vl.py
def get_eagle3_aux_hidden_state_layers(self) -> tuple[int, ...]:
    num_layers = len(self.language_model.model.layers)
    return (2, num_layers // 2, num_layers - 3)

get_language_model

get_language_model() -> Module
Source code in vllm/model_executor/models/qwen2_5_vl.py
def get_language_model(self) -> torch.nn.Module:
    return self.language_model

get_mm_mapping

get_mm_mapping() -> MultiModelKeys

Get the module prefix in multimodal models

Source code in vllm/model_executor/models/qwen2_5_vl.py
def get_mm_mapping(self) -> MultiModelKeys:
    """
    Get the module prefix in multimodal models
    """
    return MultiModelKeys.from_string_field(
        language_model="language_model",
        connector="visual.merger.",
        tower_model="visual.",
    )

get_multimodal_embeddings

get_multimodal_embeddings(
    **kwargs: object,
) -> MultiModalEmbeddings
Source code in vllm/model_executor/models/qwen2_5_vl.py
def get_multimodal_embeddings(self,
                              **kwargs: object) -> MultiModalEmbeddings:

    mm_input_by_modality = self._parse_and_validate_multimodal_inputs(
        **kwargs)
    if not mm_input_by_modality:
        return []

    # The result multimodal_embeddings is tuple of tensors, with each
    # tensor correspoending to a multimodal data item (image or video).
    multimodal_embeddings: tuple[torch.Tensor, ...] = ()

    # NOTE: It is important to iterate over the keys in this dictionary
    # to preserve the order of the modalities.
    for modality in mm_input_by_modality:
        multimodal_input = mm_input_by_modality[modality]
        if modality == "image":
            vision_embeddings = self._process_image_input(multimodal_input)
            if self.is_multimodal_pruning_enabled:
                vision_embeddings = self._postprocess_image_embeds_evs(
                    vision_embeddings, multimodal_input
                )
            multimodal_embeddings += vision_embeddings
        if modality == "video":
            video_embeddings = self._process_video_input(multimodal_input)
            if self.is_multimodal_pruning_enabled:
                video_embeddings = self._postprocess_video_embeds_evs(
                    video_embeddings, multimodal_input
                )
            multimodal_embeddings += video_embeddings
    return multimodal_embeddings

get_placeholder_str classmethod

get_placeholder_str(modality: str, i: int) -> Optional[str]
Source code in vllm/model_executor/models/qwen2_5_vl.py
@classmethod
def get_placeholder_str(cls, modality: str, i: int) -> Optional[str]:
    if modality.startswith("image"):
        return "<|vision_start|><|image_pad|><|vision_end|>"
    if modality.startswith("video"):
        return "<|vision_start|><|video_pad|><|vision_end|>"

    raise ValueError("Only image or video modality is supported")

load_weights

load_weights(
    weights: Iterable[tuple[str, Tensor]],
) -> set[str]
Source code in vllm/model_executor/models/qwen2_5_vl.py
def load_weights(self, weights: Iterable[tuple[str,
                                               torch.Tensor]]) -> set[str]:

    skip_prefixes = []
    if self.visual is None:
        skip_prefixes.extend(["visual."])
    loader = AutoWeightsLoader(self, skip_prefixes=skip_prefixes)
    return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper)

recompute_mrope_positions

recompute_mrope_positions(
    input_ids: list[int],
    multimodal_embeddings: tuple[Tensor, ...],
    mrope_positions: LongTensor,
    num_computed_tokens: int,
) -> tuple[tuple[Tensor, ...], Tensor, int]

Update part of input mrope positions (starting with num_computed_tokens index). Original mrope_positions are computed for unpruned sequence and becomes incorrect once pruning occurs, so once we prune media tokens we should reflect this in the mrope_positions before we feed it to LLM.

Parameters:

Name Type Description Default
input_ids list[int]

(N,) All input tokens of the prompt (Containing entire sequence).

required
multimodal_embeddings tuple[Tensor, ...]

Tuple of multimodal embeddings.

required
mrope_positions LongTensor

Existing mrope positions (3, N) for entire sequence

required
num_computed_tokens int

A number of computed tokens so far.

required

Returns:

Type Description
tuple[tuple[Tensor, ...], Tensor, int]

Tuple of (multimodal_embeddings, mrope_positions, mrope_position_delta).

Source code in vllm/model_executor/models/qwen2_5_vl.py
def recompute_mrope_positions(
    self,
    input_ids: list[int],
    multimodal_embeddings: tuple[torch.Tensor, ...],
    mrope_positions: torch.LongTensor,
    num_computed_tokens: int,
) -> tuple[tuple[torch.Tensor, ...], torch.Tensor, int]:
    """
    Update part of input mrope positions (starting with
    num_computed_tokens index). Original mrope_positions are computed
    for unpruned sequence and becomes incorrect once pruning occurs,
    so once we prune media tokens we should reflect this in the
    mrope_positions before we feed it to LLM.

    Args:
        input_ids: (N,) All input tokens of the prompt (Containing
            entire sequence).
        multimodal_embeddings: Tuple of multimodal embeddings.
        mrope_positions: Existing mrope positions (3, N) for entire
            sequence
        num_computed_tokens: A number of computed tokens so far.

    Returns:
        Tuple of (multimodal_embeddings, mrope_positions,
            mrope_position_delta).
    """
    image_token_id = self.config.image_token_id
    video_token_id = self.config.video_token_id
    vision_start_token_id = self.config.vision_start_token_id

    # Device
    device = (multimodal_embeddings[0].device
              if len(multimodal_embeddings) else mrope_positions.device)

    # Tensors
    input_ids_t = torch.as_tensor(input_ids,
                                  device=device,
                                  dtype=torch.long)

    # fmt: off
    mm_embeddings_out = [mm[:, :-4] for mm in
                         multimodal_embeddings]
    mm_embeddings_pos = [mm[:, -4:].permute(1, 0).long() for mm in
                         multimodal_embeddings]
    # fmt: in

    positions, mrope_positions_delta = recompute_mrope_positions(
        input_ids_t,
        mm_embeddings_pos,
        mrope_positions,
        num_computed_tokens,
        vision_start_token_id,
        image_token_id,
        video_token_id,
    )

    return tuple(mm_embeddings_out), positions, mrope_positions_delta

set_aux_hidden_state_layers

set_aux_hidden_state_layers(
    layers: tuple[int, ...],
) -> None
Source code in vllm/model_executor/models/qwen2_5_vl.py
def set_aux_hidden_state_layers(self, layers: tuple[int, ...]) -> None:
    self.language_model.model.aux_hidden_state_layers = layers

Qwen2_5_VLImageEmbeddingInputs

Bases: TensorSchema

Dimensions
  • nf: Number of image features
  • hs: Hidden size
  • ni: Number of images
Historical context
  • image_embeds shape: (num_image_features, hidden_size)
  • num_image_features varies based on the number and resolution of the images.
  • hidden_size must match the hidden size of language model backbone.
  • image_grid_thw shape: (num_images, 3) in (grid_t, grid_h, grid_w) format
Source code in vllm/model_executor/models/qwen2_5_vl.py
class Qwen2_5_VLImageEmbeddingInputs(TensorSchema):
    """
    Dimensions:
        - nf: Number of image features
        - hs: Hidden size
        - ni: Number of images

    Historical context:
        - image_embeds shape: (num_image_features, hidden_size)
        - num_image_features varies based on the number and resolution of the
          images.
        - hidden_size must match the hidden size of language model backbone.
        - image_grid_thw shape: (num_images, 3) in (grid_t, grid_h, grid_w)
          format
    """
    type: Literal["image_embeds"]

    image_embeds: Annotated[
        torch.Tensor,
        TensorShape("nf", "hs"),
    ]

    image_grid_thw: Annotated[
        torch.Tensor,
        TensorShape("ni", 3),
    ]

image_embeds instance-attribute

image_embeds: Annotated[Tensor, TensorShape(nf, hs)]

image_grid_thw instance-attribute

image_grid_thw: Annotated[Tensor, TensorShape(ni, 3)]

type instance-attribute

type: Literal['image_embeds']

Qwen2_5_VLImagePixelInputs

Bases: TensorSchema

Dimensions
  • np: Number of patches
  • ni: Number of images
  • cps: Number of channels * patch_size * patch_size
Historical context
  • pixel_values shape: (num_patches, num_channels * patch_size * patch_size)
  • image_grid_thw shape: (num_images, 3) in (grid_t, grid_h, grid_w) formatnum_channels * patch_size * patch_size
Source code in vllm/model_executor/models/qwen2_5_vl.py
class Qwen2_5_VLImagePixelInputs(TensorSchema):
    """
    Dimensions:
        - np: Number of patches
        - ni: Number of images
        - cps: Number of channels * patch_size * patch_size

    Historical context:
        - pixel_values shape: (num_patches, num_channels * patch_size *
          patch_size)
        - image_grid_thw shape: (num_images, 3) in (grid_t, grid_h, grid_w)
          formatnum_channels * patch_size * patch_size
    """
    type: Literal["pixel_values"]

    pixel_values: Annotated[
        torch.Tensor,
        TensorShape("np", "cps"),
    ]

    image_grid_thw: Annotated[
        torch.Tensor,
        TensorShape("ni", 3),
    ]

image_grid_thw instance-attribute

image_grid_thw: Annotated[Tensor, TensorShape(ni, 3)]

pixel_values instance-attribute

pixel_values: Annotated[Tensor, TensorShape(np, cps)]

type instance-attribute

type: Literal['pixel_values']

Qwen2_5_VLMultiModalProcessor

Bases: Qwen2VLMultiModalProcessor

Source code in vllm/model_executor/models/qwen2_5_vl.py
class Qwen2_5_VLMultiModalProcessor(Qwen2VLMultiModalProcessor):

    def _get_mm_fields_config(
        self,
        hf_inputs: BatchFeature,
        hf_processor_mm_kwargs: Mapping[str, object],
    ) -> Mapping[str, MultiModalFieldConfig]:
        return dict(
            **super()._get_mm_fields_config(hf_inputs, hf_processor_mm_kwargs),
            second_per_grid_ts=MultiModalFieldConfig.batched("video"),
        )

    def _get_prompt_updates(
        self,
        mm_items: MultiModalDataItems,
        hf_processor_mm_kwargs: Mapping[str, Any],
        out_mm_kwargs: MultiModalKwargs,
    ) -> Sequence[PromptUpdate]:
        hf_processor = self.info.get_hf_processor(**hf_processor_mm_kwargs)
        image_processor = self.info.get_image_processor(
            **hf_processor_mm_kwargs)
        tokenizer = self.info.get_tokenizer()
        vocab = tokenizer.get_vocab()

        placeholder = {
            "image": vocab[hf_processor.image_token],
            "video": vocab[hf_processor.video_token],
        }

        merge_length = image_processor.merge_size**2

        def get_replacement_qwen2vl(item_idx: int, modality: str):
            out_item = out_mm_kwargs[modality][item_idx]
            grid_thw = out_item[f"{modality}_grid_thw"].data
            assert isinstance(grid_thw, torch.Tensor)

            num_tokens = int(grid_thw.prod()) // merge_length

            # EVS-specific code
            video_pruning_rate = self.info.ctx.get_mm_config(
            ).video_pruning_rate
            if (modality == "video" and video_pruning_rate is not None
                    and video_pruning_rate > 0.0):
                num_tokens = compute_retained_tokens_count(
                    grid_thw,
                    image_processor.merge_size,
                    video_pruning_rate,
                )
            # End of EVS-specific code

            return [placeholder[modality]] * num_tokens

        return [
            PromptReplacement(
                modality=modality,
                target=[placeholder[modality]],
                replacement=partial(get_replacement_qwen2vl,
                                    modality=modality),
            ) for modality in ("image", "video")
        ]

_get_mm_fields_config

_get_mm_fields_config(
    hf_inputs: BatchFeature,
    hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]
Source code in vllm/model_executor/models/qwen2_5_vl.py
def _get_mm_fields_config(
    self,
    hf_inputs: BatchFeature,
    hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]:
    return dict(
        **super()._get_mm_fields_config(hf_inputs, hf_processor_mm_kwargs),
        second_per_grid_ts=MultiModalFieldConfig.batched("video"),
    )

_get_prompt_updates

_get_prompt_updates(
    mm_items: MultiModalDataItems,
    hf_processor_mm_kwargs: Mapping[str, Any],
    out_mm_kwargs: MultiModalKwargs,
) -> Sequence[PromptUpdate]
Source code in vllm/model_executor/models/qwen2_5_vl.py
def _get_prompt_updates(
    self,
    mm_items: MultiModalDataItems,
    hf_processor_mm_kwargs: Mapping[str, Any],
    out_mm_kwargs: MultiModalKwargs,
) -> Sequence[PromptUpdate]:
    hf_processor = self.info.get_hf_processor(**hf_processor_mm_kwargs)
    image_processor = self.info.get_image_processor(
        **hf_processor_mm_kwargs)
    tokenizer = self.info.get_tokenizer()
    vocab = tokenizer.get_vocab()

    placeholder = {
        "image": vocab[hf_processor.image_token],
        "video": vocab[hf_processor.video_token],
    }

    merge_length = image_processor.merge_size**2

    def get_replacement_qwen2vl(item_idx: int, modality: str):
        out_item = out_mm_kwargs[modality][item_idx]
        grid_thw = out_item[f"{modality}_grid_thw"].data
        assert isinstance(grid_thw, torch.Tensor)

        num_tokens = int(grid_thw.prod()) // merge_length

        # EVS-specific code
        video_pruning_rate = self.info.ctx.get_mm_config(
        ).video_pruning_rate
        if (modality == "video" and video_pruning_rate is not None
                and video_pruning_rate > 0.0):
            num_tokens = compute_retained_tokens_count(
                grid_thw,
                image_processor.merge_size,
                video_pruning_rate,
            )
        # End of EVS-specific code

        return [placeholder[modality]] * num_tokens

    return [
        PromptReplacement(
            modality=modality,
            target=[placeholder[modality]],
            replacement=partial(get_replacement_qwen2vl,
                                modality=modality),
        ) for modality in ("image", "video")
    ]

Qwen2_5_VLProcessingInfo

Bases: Qwen2VLProcessingInfo

Source code in vllm/model_executor/models/qwen2_5_vl.py
class Qwen2_5_VLProcessingInfo(Qwen2VLProcessingInfo):

    def get_hf_config(self):
        return self.ctx.get_hf_config(Qwen2_5_VLConfig)

    def get_hf_processor(self, **kwargs: object) -> Qwen2_5_VLProcessor:
        return self.ctx.get_hf_processor(
            Qwen2_5_VLProcessor,
            use_fast=kwargs.pop("use_fast", True),
            **kwargs,
        )

get_hf_config

get_hf_config()
Source code in vllm/model_executor/models/qwen2_5_vl.py
def get_hf_config(self):
    return self.ctx.get_hf_config(Qwen2_5_VLConfig)

get_hf_processor

get_hf_processor(**kwargs: object) -> Qwen2_5_VLProcessor
Source code in vllm/model_executor/models/qwen2_5_vl.py
def get_hf_processor(self, **kwargs: object) -> Qwen2_5_VLProcessor:
    return self.ctx.get_hf_processor(
        Qwen2_5_VLProcessor,
        use_fast=kwargs.pop("use_fast", True),
        **kwargs,
    )

Qwen2_5_VLVideoEmbeddingInputs

Bases: TensorSchema

Dimensions
  • nf: Number of video features
  • hs: Hidden size
  • nv: Number of videos
Historical context
  • video_embeds shape: (num_video_features, hidden_size)
  • num_video_features varies based on the number and resolution of the videos.
  • hidden_size must match the hidden size of language model backbone.
  • video_grid_thw shape: (num_videos, 3) in (grid_t, grid_h, grid_w) format
Source code in vllm/model_executor/models/qwen2_5_vl.py
class Qwen2_5_VLVideoEmbeddingInputs(TensorSchema):
    """
    Dimensions:
        - nf: Number of video features
        - hs: Hidden size
        - nv: Number of videos

    Historical context:
        - video_embeds shape: (num_video_features, hidden_size)
        - num_video_features varies based on the number and resolution of the
          videos.
        - hidden_size must match the hidden size of language model backbone.
        - video_grid_thw shape: (num_videos, 3) in (grid_t, grid_h, grid_w)
          format
    """
    type: Literal["video_embeds"]

    video_embeds: Annotated[
        torch.Tensor,
        TensorShape("nf", "hs"),
    ]

    video_grid_thw: Annotated[
        torch.Tensor,
        TensorShape("nv", 3),
    ]

type instance-attribute

type: Literal['video_embeds']

video_embeds instance-attribute

video_embeds: Annotated[Tensor, TensorShape(nf, hs)]

video_grid_thw instance-attribute

video_grid_thw: Annotated[Tensor, TensorShape(nv, 3)]

Qwen2_5_VLVideoPixelInputs

Bases: TensorSchema

Dimensions
  • np: Number of patches
  • nv: Number of videos
  • ctps: Number of channels * temporal_patch_size * patch_size * patch_size
Historical context
  • pixel_values_videos shape: (num_patches, num_channels * temporal_patch_size * patch_size * patch_size)
  • video_grid_thw shape: (num_videos, 3) in (grid_t, grid_h, grid_w) format
  • second_per_grid_ts: The video time interval (in seconds) for each grid along the temporal dimension in the 3D position IDs. Returned when videos is not None.
Source code in vllm/model_executor/models/qwen2_5_vl.py
class Qwen2_5_VLVideoPixelInputs(TensorSchema):
    """
    Dimensions:
        - np: Number of patches
        - nv: Number of videos
        - ctps: Number of channels * temporal_patch_size * patch_size *
          patch_size

    Historical context:
        - pixel_values_videos shape: (num_patches, num_channels *
          temporal_patch_size * patch_size * patch_size)
        - video_grid_thw shape: (num_videos, 3) in (grid_t, grid_h, grid_w)
          format
        - second_per_grid_ts: The video time interval (in seconds) for each
          grid along the temporal dimension in the 3D position IDs. Returned
          when `videos` is not `None`.
    """
    type: Literal["pixel_values_videos"]

    pixel_values_videos: Annotated[
        torch.Tensor,
        TensorShape("np", "ctps"),
    ]

    video_grid_thw: Annotated[
        torch.Tensor,
        TensorShape("nv", 3),
    ]

    second_per_grid_ts: Annotated[
        Optional[torch.Tensor],
        TensorShape("nv"),
    ]

pixel_values_videos instance-attribute

pixel_values_videos: Annotated[
    Tensor, TensorShape(np, ctps)
]

second_per_grid_ts instance-attribute

second_per_grid_ts: Annotated[
    Optional[Tensor], TensorShape(nv)
]

type instance-attribute

type: Literal['pixel_values_videos']

video_grid_thw instance-attribute

video_grid_thw: Annotated[Tensor, TensorShape(nv, 3)]

Qwen2_5_VisionAttention

Bases: Module

Source code in vllm/model_executor/models/qwen2_5_vl.py
class Qwen2_5_VisionAttention(nn.Module):

    def __init__(
        self,
        embed_dim: int,
        num_heads: int,
        projection_size: int,
        quant_config: Optional[QuantizationConfig] = None,
        prefix: str = "",
        use_data_parallel: bool = False,
        attn_backend: _Backend = _Backend.TORCH_SDPA,
        use_upstream_fa: bool = False,
    ) -> None:
        super().__init__()
        # Per attention head and per partition values.
        self.tp_size = (1 if use_data_parallel else
                        parallel_state.get_tensor_model_parallel_world_size())
        self.tp_rank = parallel_state.get_tensor_model_parallel_rank()
        self.hidden_size_per_attention_head = dist_utils.divide(
            projection_size, num_heads)
        self.num_attention_heads_per_partition = dist_utils.divide(
            num_heads, self.tp_size)

        self.qkv = QKVParallelLinear(
            hidden_size=embed_dim,
            head_size=self.hidden_size_per_attention_head,
            total_num_heads=num_heads,
            total_num_kv_heads=num_heads,
            bias=True,
            quant_config=quant_config,
            prefix=f"{prefix}.qkv",
            disable_tp=use_data_parallel)

        self.proj = RowParallelLinear(input_size=projection_size,
                                      output_size=embed_dim,
                                      quant_config=quant_config,
                                      prefix=f"{prefix}.proj",
                                      disable_tp=use_data_parallel)
        self.attn_backend = attn_backend
        self.use_upstream_fa = use_upstream_fa
        self.is_flash_attn_backend = self.attn_backend in {
            _Backend.FLASH_ATTN, _Backend.ROCM_AITER_FA
        }

    def split_qkv(self, qkv: torch.Tensor) -> tuple[torch.Tensor, ...]:
        # [s, b, 3 * head * head_dim]
        seq_len, bs, _ = qkv.shape
        if self.tp_size > 1:
            qkv = all_gather_interleave(qkv, self.qkv.hidden_size,
                                        self.tp_size)

        # [s, b, 3 * head * head_dim] -> 3 * [s, b, head * head_dim]
        q, k, v = qkv.chunk(3, dim=2)

        # 3 * [s, b, head * head_dim]
        if self.tp_size > 1:
            splitter = partial(dist_utils.split_tensor_along_last_dim,
                               num_partitions=self.tp_size)
            q = splitter(q)[self.tp_rank]
            k = splitter(k)[self.tp_rank]
            v = splitter(v)[self.tp_rank]

        # 3 * [s, b, head * head_dim] -> 3 * [s, b, head, head_dim]
        new_shape = (seq_len, bs, self.num_attention_heads_per_partition,
                     self.hidden_size_per_attention_head)
        q, k, v = (x.view(*new_shape) for x in (q, k, v))
        return q, k, v

    def forward(
            self,
            x: torch.Tensor,
            cu_seqlens: torch.Tensor,
            rotary_pos_emb: torch.Tensor,
            max_seqlen: Optional[int] = None,  # Only used for Flash Attention
            seqlens: Optional[list[int]] = None,  # Only used for xFormers
    ) -> torch.Tensor:
        # [s, b, c] --> [s, b, head * 3 * head_dim]
        x, _ = self.qkv(x)

        # [s, b, 3 * head * head_dim] -> 3 * [s, b, head, head_dim]
        q, k, v = self.split_qkv(x)
        batch_size = q.shape[1]

        q, k, v = (rearrange(x, "s b ... -> b s ...").contiguous()
                   for x in (q, k, v))
        if rotary_pos_emb is not None:
            # [2 * b, s, heads, head_dim]
            qk_concat = torch.cat([q, k], dim=0)
            qk_rotated = apply_rotary_pos_emb_vision(qk_concat, rotary_pos_emb)
            q, k = torch.chunk(qk_rotated, 2, dim=0)

        if self.is_flash_attn_backend:
            if self.attn_backend == _Backend.ROCM_AITER_FA:
                from aiter import flash_attn_varlen_func
            else:
                if self.use_upstream_fa:
                    from flash_attn import flash_attn_varlen_func
                else:
                    from vllm.vllm_flash_attn import flash_attn_varlen_func

            q, k, v = (rearrange(x, "b s ... -> (b s) ...") for x in [q, k, v])

            output = flash_attn_varlen_func(q,
                                            k,
                                            v,
                                            cu_seqlens_q=cu_seqlens,
                                            cu_seqlens_k=cu_seqlens,
                                            max_seqlen_q=max_seqlen,
                                            max_seqlen_k=max_seqlen,
                                            dropout_p=0.0,
                                            causal=False)

            context_layer = rearrange(output,
                                      "(b s) h d -> s b (h d)",
                                      b=batch_size).contiguous()
        elif self.attn_backend == _Backend.TORCH_SDPA:
            # Execute attention entry by entry for speed & less VRAM.
            outputs = []
            for i in range(1, len(cu_seqlens)):
                start_idx = cu_seqlens[i - 1]
                end_idx = cu_seqlens[i]
                q_i = q[:, start_idx:end_idx]
                k_i = k[:, start_idx:end_idx]
                v_i = v[:, start_idx:end_idx]
                q_i, k_i, v_i = (rearrange(x, "b s h d -> b h s d")
                                 for x in [q_i, k_i, v_i])
                output_i = F.scaled_dot_product_attention(q_i,
                                                          k_i,
                                                          v_i,
                                                          dropout_p=0.0)
                output_i = rearrange(output_i, "b h s d -> b s h d ")
                outputs.append(output_i)
            context_layer = torch.cat(outputs, dim=1)
            context_layer = rearrange(context_layer,
                                      "b s h d -> s b (h d)").contiguous()
        elif self.attn_backend == _Backend.XFORMERS:
            from xformers import ops as xops
            from xformers.ops.fmha.attn_bias import BlockDiagonalMask

            attn_bias = BlockDiagonalMask.from_seqlens(q_seqlen=seqlens,
                                                       kv_seqlen=None,
                                                       device=q.device)

            context_layer = xops.memory_efficient_attention_forward(
                q, k, v, attn_bias=attn_bias, p=0, scale=None)
            context_layer = rearrange(context_layer,
                                      "b s h d -> s b (h d)").contiguous()

        output, _ = self.proj(context_layer)
        return output

attn_backend instance-attribute

attn_backend = attn_backend

hidden_size_per_attention_head instance-attribute

hidden_size_per_attention_head = divide(
    projection_size, num_heads
)

is_flash_attn_backend instance-attribute

is_flash_attn_backend = attn_backend in {
    FLASH_ATTN,
    ROCM_AITER_FA,
}

num_attention_heads_per_partition instance-attribute

num_attention_heads_per_partition = divide(
    num_heads, tp_size
)

proj instance-attribute

proj = RowParallelLinear(
    input_size=projection_size,
    output_size=embed_dim,
    quant_config=quant_config,
    prefix=f"{prefix}.proj",
    disable_tp=use_data_parallel,
)

qkv instance-attribute

qkv = QKVParallelLinear(
    hidden_size=embed_dim,
    head_size=hidden_size_per_attention_head,
    total_num_heads=num_heads,
    total_num_kv_heads=num_heads,
    bias=True,
    quant_config=quant_config,
    prefix=f"{prefix}.qkv",
    disable_tp=use_data_parallel,
)

tp_rank instance-attribute

tp_size instance-attribute

tp_size = (
    1
    if use_data_parallel
    else get_tensor_model_parallel_world_size()
)

use_upstream_fa instance-attribute

use_upstream_fa = use_upstream_fa

__init__

__init__(
    embed_dim: int,
    num_heads: int,
    projection_size: int,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
    use_data_parallel: bool = False,
    attn_backend: _Backend = TORCH_SDPA,
    use_upstream_fa: bool = False,
) -> None
Source code in vllm/model_executor/models/qwen2_5_vl.py
def __init__(
    self,
    embed_dim: int,
    num_heads: int,
    projection_size: int,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
    use_data_parallel: bool = False,
    attn_backend: _Backend = _Backend.TORCH_SDPA,
    use_upstream_fa: bool = False,
) -> None:
    super().__init__()
    # Per attention head and per partition values.
    self.tp_size = (1 if use_data_parallel else
                    parallel_state.get_tensor_model_parallel_world_size())
    self.tp_rank = parallel_state.get_tensor_model_parallel_rank()
    self.hidden_size_per_attention_head = dist_utils.divide(
        projection_size, num_heads)
    self.num_attention_heads_per_partition = dist_utils.divide(
        num_heads, self.tp_size)

    self.qkv = QKVParallelLinear(
        hidden_size=embed_dim,
        head_size=self.hidden_size_per_attention_head,
        total_num_heads=num_heads,
        total_num_kv_heads=num_heads,
        bias=True,
        quant_config=quant_config,
        prefix=f"{prefix}.qkv",
        disable_tp=use_data_parallel)

    self.proj = RowParallelLinear(input_size=projection_size,
                                  output_size=embed_dim,
                                  quant_config=quant_config,
                                  prefix=f"{prefix}.proj",
                                  disable_tp=use_data_parallel)
    self.attn_backend = attn_backend
    self.use_upstream_fa = use_upstream_fa
    self.is_flash_attn_backend = self.attn_backend in {
        _Backend.FLASH_ATTN, _Backend.ROCM_AITER_FA
    }

forward

forward(
    x: Tensor,
    cu_seqlens: Tensor,
    rotary_pos_emb: Tensor,
    max_seqlen: Optional[int] = None,
    seqlens: Optional[list[int]] = None,
) -> Tensor
Source code in vllm/model_executor/models/qwen2_5_vl.py
def forward(
        self,
        x: torch.Tensor,
        cu_seqlens: torch.Tensor,
        rotary_pos_emb: torch.Tensor,
        max_seqlen: Optional[int] = None,  # Only used for Flash Attention
        seqlens: Optional[list[int]] = None,  # Only used for xFormers
) -> torch.Tensor:
    # [s, b, c] --> [s, b, head * 3 * head_dim]
    x, _ = self.qkv(x)

    # [s, b, 3 * head * head_dim] -> 3 * [s, b, head, head_dim]
    q, k, v = self.split_qkv(x)
    batch_size = q.shape[1]

    q, k, v = (rearrange(x, "s b ... -> b s ...").contiguous()
               for x in (q, k, v))
    if rotary_pos_emb is not None:
        # [2 * b, s, heads, head_dim]
        qk_concat = torch.cat([q, k], dim=0)
        qk_rotated = apply_rotary_pos_emb_vision(qk_concat, rotary_pos_emb)
        q, k = torch.chunk(qk_rotated, 2, dim=0)

    if self.is_flash_attn_backend:
        if self.attn_backend == _Backend.ROCM_AITER_FA:
            from aiter import flash_attn_varlen_func
        else:
            if self.use_upstream_fa:
                from flash_attn import flash_attn_varlen_func
            else:
                from vllm.vllm_flash_attn import flash_attn_varlen_func

        q, k, v = (rearrange(x, "b s ... -> (b s) ...") for x in [q, k, v])

        output = flash_attn_varlen_func(q,
                                        k,
                                        v,
                                        cu_seqlens_q=cu_seqlens,
                                        cu_seqlens_k=cu_seqlens,
                                        max_seqlen_q=max_seqlen,
                                        max_seqlen_k=max_seqlen,
                                        dropout_p=0.0,
                                        causal=False)

        context_layer = rearrange(output,
                                  "(b s) h d -> s b (h d)",
                                  b=batch_size).contiguous()
    elif self.attn_backend == _Backend.TORCH_SDPA:
        # Execute attention entry by entry for speed & less VRAM.
        outputs = []
        for i in range(1, len(cu_seqlens)):
            start_idx = cu_seqlens[i - 1]
            end_idx = cu_seqlens[i]
            q_i = q[:, start_idx:end_idx]
            k_i = k[:, start_idx:end_idx]
            v_i = v[:, start_idx:end_idx]
            q_i, k_i, v_i = (rearrange(x, "b s h d -> b h s d")
                             for x in [q_i, k_i, v_i])
            output_i = F.scaled_dot_product_attention(q_i,
                                                      k_i,
                                                      v_i,
                                                      dropout_p=0.0)
            output_i = rearrange(output_i, "b h s d -> b s h d ")
            outputs.append(output_i)
        context_layer = torch.cat(outputs, dim=1)
        context_layer = rearrange(context_layer,
                                  "b s h d -> s b (h d)").contiguous()
    elif self.attn_backend == _Backend.XFORMERS:
        from xformers import ops as xops
        from xformers.ops.fmha.attn_bias import BlockDiagonalMask

        attn_bias = BlockDiagonalMask.from_seqlens(q_seqlen=seqlens,
                                                   kv_seqlen=None,
                                                   device=q.device)

        context_layer = xops.memory_efficient_attention_forward(
            q, k, v, attn_bias=attn_bias, p=0, scale=None)
        context_layer = rearrange(context_layer,
                                  "b s h d -> s b (h d)").contiguous()

    output, _ = self.proj(context_layer)
    return output

split_qkv

split_qkv(qkv: Tensor) -> tuple[Tensor, ...]
Source code in vllm/model_executor/models/qwen2_5_vl.py
def split_qkv(self, qkv: torch.Tensor) -> tuple[torch.Tensor, ...]:
    # [s, b, 3 * head * head_dim]
    seq_len, bs, _ = qkv.shape
    if self.tp_size > 1:
        qkv = all_gather_interleave(qkv, self.qkv.hidden_size,
                                    self.tp_size)

    # [s, b, 3 * head * head_dim] -> 3 * [s, b, head * head_dim]
    q, k, v = qkv.chunk(3, dim=2)

    # 3 * [s, b, head * head_dim]
    if self.tp_size > 1:
        splitter = partial(dist_utils.split_tensor_along_last_dim,
                           num_partitions=self.tp_size)
        q = splitter(q)[self.tp_rank]
        k = splitter(k)[self.tp_rank]
        v = splitter(v)[self.tp_rank]

    # 3 * [s, b, head * head_dim] -> 3 * [s, b, head, head_dim]
    new_shape = (seq_len, bs, self.num_attention_heads_per_partition,
                 self.hidden_size_per_attention_head)
    q, k, v = (x.view(*new_shape) for x in (q, k, v))
    return q, k, v

Qwen2_5_VisionBlock

Bases: Module

Source code in vllm/model_executor/models/qwen2_5_vl.py
class Qwen2_5_VisionBlock(nn.Module):

    def __init__(
        self,
        dim: int,
        num_heads: int,
        mlp_hidden_dim: int,
        act_fn: Callable[[torch.Tensor], torch.Tensor] = F.silu,
        norm_layer: Optional[Callable[[int], nn.Module]] = None,
        quant_config: Optional[QuantizationConfig] = None,
        prefix: str = "",
        use_data_parallel: bool = False,
        attn_backend: _Backend = _Backend.TORCH_SDPA,
        use_upstream_fa: bool = False,
    ) -> None:
        super().__init__()
        if norm_layer is None:
            norm_layer = partial(nn.LayerNorm, eps=1e-6)
        self.norm1 = norm_layer(dim)
        self.norm2 = norm_layer(dim)
        self.attn = Qwen2_5_VisionAttention(
            embed_dim=dim,
            num_heads=num_heads,
            projection_size=dim,
            quant_config=quant_config,
            prefix=f"{prefix}.attn",
            use_data_parallel=use_data_parallel,
            attn_backend=attn_backend,
            use_upstream_fa=use_upstream_fa)
        self.mlp = Qwen2_5_VisionMLP(dim,
                                     mlp_hidden_dim,
                                     act_fn=act_fn,
                                     bias=True,
                                     quant_config=quant_config,
                                     prefix=f"{prefix}.mlp",
                                     use_data_parallel=use_data_parallel)

    def forward(
            self,
            x: torch.Tensor,
            cu_seqlens: torch.Tensor,
            rotary_pos_emb: torch.Tensor,
            max_seqlen: Optional[int] = None,  # Only used for Flash Attention
            seqlens: Optional[list[int]] = None,  # Only used for xFormers
    ) -> torch.Tensor:
        x_attn = self.attn(self.norm1(x),
                           cu_seqlens=cu_seqlens,
                           rotary_pos_emb=rotary_pos_emb,
                           max_seqlen=max_seqlen,
                           seqlens=seqlens)
        x_fused_norm, residual = self.norm2(x, residual=x_attn)
        x = residual + self.mlp(x_fused_norm)
        return x

attn instance-attribute

attn = Qwen2_5_VisionAttention(
    embed_dim=dim,
    num_heads=num_heads,
    projection_size=dim,
    quant_config=quant_config,
    prefix=f"{prefix}.attn",
    use_data_parallel=use_data_parallel,
    attn_backend=attn_backend,
    use_upstream_fa=use_upstream_fa,
)

mlp instance-attribute

mlp = Qwen2_5_VisionMLP(
    dim,
    mlp_hidden_dim,
    act_fn=act_fn,
    bias=True,
    quant_config=quant_config,
    prefix=f"{prefix}.mlp",
    use_data_parallel=use_data_parallel,
)

norm1 instance-attribute

norm1 = norm_layer(dim)

norm2 instance-attribute

norm2 = norm_layer(dim)

__init__

__init__(
    dim: int,
    num_heads: int,
    mlp_hidden_dim: int,
    act_fn: Callable[[Tensor], Tensor] = silu,
    norm_layer: Optional[Callable[[int], Module]] = None,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
    use_data_parallel: bool = False,
    attn_backend: _Backend = TORCH_SDPA,
    use_upstream_fa: bool = False,
) -> None
Source code in vllm/model_executor/models/qwen2_5_vl.py
def __init__(
    self,
    dim: int,
    num_heads: int,
    mlp_hidden_dim: int,
    act_fn: Callable[[torch.Tensor], torch.Tensor] = F.silu,
    norm_layer: Optional[Callable[[int], nn.Module]] = None,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
    use_data_parallel: bool = False,
    attn_backend: _Backend = _Backend.TORCH_SDPA,
    use_upstream_fa: bool = False,
) -> None:
    super().__init__()
    if norm_layer is None:
        norm_layer = partial(nn.LayerNorm, eps=1e-6)
    self.norm1 = norm_layer(dim)
    self.norm2 = norm_layer(dim)
    self.attn = Qwen2_5_VisionAttention(
        embed_dim=dim,
        num_heads=num_heads,
        projection_size=dim,
        quant_config=quant_config,
        prefix=f"{prefix}.attn",
        use_data_parallel=use_data_parallel,
        attn_backend=attn_backend,
        use_upstream_fa=use_upstream_fa)
    self.mlp = Qwen2_5_VisionMLP(dim,
                                 mlp_hidden_dim,
                                 act_fn=act_fn,
                                 bias=True,
                                 quant_config=quant_config,
                                 prefix=f"{prefix}.mlp",
                                 use_data_parallel=use_data_parallel)

forward

forward(
    x: Tensor,
    cu_seqlens: Tensor,
    rotary_pos_emb: Tensor,
    max_seqlen: Optional[int] = None,
    seqlens: Optional[list[int]] = None,
) -> Tensor
Source code in vllm/model_executor/models/qwen2_5_vl.py
def forward(
        self,
        x: torch.Tensor,
        cu_seqlens: torch.Tensor,
        rotary_pos_emb: torch.Tensor,
        max_seqlen: Optional[int] = None,  # Only used for Flash Attention
        seqlens: Optional[list[int]] = None,  # Only used for xFormers
) -> torch.Tensor:
    x_attn = self.attn(self.norm1(x),
                       cu_seqlens=cu_seqlens,
                       rotary_pos_emb=rotary_pos_emb,
                       max_seqlen=max_seqlen,
                       seqlens=seqlens)
    x_fused_norm, residual = self.norm2(x, residual=x_attn)
    x = residual + self.mlp(x_fused_norm)
    return x

Qwen2_5_VisionMLP

Bases: Module

Source code in vllm/model_executor/models/qwen2_5_vl.py
class Qwen2_5_VisionMLP(nn.Module):

    def __init__(self,
                 in_features: int,
                 hidden_features: int,
                 bias: bool = False,
                 act_fn: Callable[[torch.Tensor], torch.Tensor] = F.silu,
                 quant_config: Optional[QuantizationConfig] = None,
                 prefix: str = "",
                 use_data_parallel: bool = False):
        super().__init__()
        self.gate_up_proj = MergedColumnParallelLinear(
            input_size=in_features,
            output_sizes=[hidden_features] * 2,  # [gate_proj, up_proj]
            bias=bias,
            quant_config=quant_config,
            prefix=f"{prefix}.gate_up_proj",
            disable_tp=use_data_parallel)

        self.down_proj = RowParallelLinear(hidden_features,
                                           in_features,
                                           bias=bias,
                                           quant_config=quant_config,
                                           prefix=f"{prefix}.down_proj",
                                           disable_tp=use_data_parallel)
        self.act_fn = act_fn

    def forward(self, x: torch.Tensor):
        gate_up, _ = self.gate_up_proj(x)
        x = self.act_fn(gate_up)
        x_down, _ = self.down_proj(x)
        return x_down

act_fn instance-attribute

act_fn = act_fn

down_proj instance-attribute

down_proj = RowParallelLinear(
    hidden_features,
    in_features,
    bias=bias,
    quant_config=quant_config,
    prefix=f"{prefix}.down_proj",
    disable_tp=use_data_parallel,
)

gate_up_proj instance-attribute

gate_up_proj = MergedColumnParallelLinear(
    input_size=in_features,
    output_sizes=[hidden_features] * 2,
    bias=bias,
    quant_config=quant_config,
    prefix=f"{prefix}.gate_up_proj",
    disable_tp=use_data_parallel,
)

__init__

__init__(
    in_features: int,
    hidden_features: int,
    bias: bool = False,
    act_fn: Callable[[Tensor], Tensor] = silu,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
    use_data_parallel: bool = False,
)
Source code in vllm/model_executor/models/qwen2_5_vl.py
def __init__(self,
             in_features: int,
             hidden_features: int,
             bias: bool = False,
             act_fn: Callable[[torch.Tensor], torch.Tensor] = F.silu,
             quant_config: Optional[QuantizationConfig] = None,
             prefix: str = "",
             use_data_parallel: bool = False):
    super().__init__()
    self.gate_up_proj = MergedColumnParallelLinear(
        input_size=in_features,
        output_sizes=[hidden_features] * 2,  # [gate_proj, up_proj]
        bias=bias,
        quant_config=quant_config,
        prefix=f"{prefix}.gate_up_proj",
        disable_tp=use_data_parallel)

    self.down_proj = RowParallelLinear(hidden_features,
                                       in_features,
                                       bias=bias,
                                       quant_config=quant_config,
                                       prefix=f"{prefix}.down_proj",
                                       disable_tp=use_data_parallel)
    self.act_fn = act_fn

forward

forward(x: Tensor)
Source code in vllm/model_executor/models/qwen2_5_vl.py
def forward(self, x: torch.Tensor):
    gate_up, _ = self.gate_up_proj(x)
    x = self.act_fn(gate_up)
    x_down, _ = self.down_proj(x)
    return x_down

Qwen2_5_VisionPatchEmbed

Bases: Module

Source code in vllm/model_executor/models/qwen2_5_vl.py
class Qwen2_5_VisionPatchEmbed(nn.Module):

    def __init__(
        self,
        patch_size: int = 14,
        temporal_patch_size: int = 2,
        in_channels: int = 3,
        hidden_size: int = 1152,
    ) -> None:
        super().__init__()
        self.patch_size = patch_size
        self.temporal_patch_size = temporal_patch_size
        self.hidden_size = hidden_size

        kernel_size = (temporal_patch_size, patch_size, patch_size)
        self.proj = nn.Conv3d(in_channels,
                              hidden_size,
                              kernel_size=kernel_size,
                              stride=kernel_size,
                              bias=False)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        L, C = x.shape
        x = x.view(L, -1, self.temporal_patch_size, self.patch_size,
                   self.patch_size)
        x = self.proj(x).view(L, self.hidden_size)
        return x

hidden_size instance-attribute

hidden_size = hidden_size

patch_size instance-attribute

patch_size = patch_size

proj instance-attribute

proj = Conv3d(
    in_channels,
    hidden_size,
    kernel_size=kernel_size,
    stride=kernel_size,
    bias=False,
)

temporal_patch_size instance-attribute

temporal_patch_size = temporal_patch_size

__init__

__init__(
    patch_size: int = 14,
    temporal_patch_size: int = 2,
    in_channels: int = 3,
    hidden_size: int = 1152,
) -> None
Source code in vllm/model_executor/models/qwen2_5_vl.py
def __init__(
    self,
    patch_size: int = 14,
    temporal_patch_size: int = 2,
    in_channels: int = 3,
    hidden_size: int = 1152,
) -> None:
    super().__init__()
    self.patch_size = patch_size
    self.temporal_patch_size = temporal_patch_size
    self.hidden_size = hidden_size

    kernel_size = (temporal_patch_size, patch_size, patch_size)
    self.proj = nn.Conv3d(in_channels,
                          hidden_size,
                          kernel_size=kernel_size,
                          stride=kernel_size,
                          bias=False)

forward

forward(x: Tensor) -> Tensor
Source code in vllm/model_executor/models/qwen2_5_vl.py
def forward(self, x: torch.Tensor) -> torch.Tensor:
    L, C = x.shape
    x = x.view(L, -1, self.temporal_patch_size, self.patch_size,
               self.patch_size)
    x = self.proj(x).view(L, self.hidden_size)
    return x

Qwen2_5_VisionPatchMerger

Bases: Module

Source code in vllm/model_executor/models/qwen2_5_vl.py
class Qwen2_5_VisionPatchMerger(nn.Module):

    def __init__(
        self,
        d_model: int,
        context_dim: int,
        norm_layer: Optional[Callable[[int], nn.Module]] = None,
        spatial_merge_size: int = 2,
        quant_config: Optional[QuantizationConfig] = None,
        prefix: str = "",
        use_data_parallel: bool = False,
    ) -> None:
        super().__init__()
        self.hidden_size = context_dim * (spatial_merge_size**2)
        if norm_layer is None:
            norm_layer = partial(nn.LayerNorm, eps=1e-6)
        self.ln_q = norm_layer(context_dim)

        self.mlp = nn.Sequential(
            ColumnParallelLinear(
                self.hidden_size,
                self.hidden_size,
                bias=True,
                quant_config=quant_config,
                prefix=f"{prefix}.mlp.0",
                return_bias=False,
                disable_tp=use_data_parallel,
            ),
            nn.GELU(),
            RowParallelLinear(
                self.hidden_size,
                d_model,
                bias=True,
                quant_config=quant_config,
                prefix=f"{prefix}.mlp.2",
                return_bias=False,
                disable_tp=use_data_parallel,
            ),
        )

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x = self.ln_q(x)
        x = x.view(-1, self.hidden_size)
        out = self.mlp(x)
        return out

hidden_size instance-attribute

hidden_size = context_dim * spatial_merge_size ** 2

ln_q instance-attribute

ln_q = norm_layer(context_dim)

mlp instance-attribute

mlp = Sequential(
    ColumnParallelLinear(
        hidden_size,
        hidden_size,
        bias=True,
        quant_config=quant_config,
        prefix=f"{prefix}.mlp.0",
        return_bias=False,
        disable_tp=use_data_parallel,
    ),
    GELU(),
    RowParallelLinear(
        hidden_size,
        d_model,
        bias=True,
        quant_config=quant_config,
        prefix=f"{prefix}.mlp.2",
        return_bias=False,
        disable_tp=use_data_parallel,
    ),
)

__init__

__init__(
    d_model: int,
    context_dim: int,
    norm_layer: Optional[Callable[[int], Module]] = None,
    spatial_merge_size: int = 2,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
    use_data_parallel: bool = False,
) -> None
Source code in vllm/model_executor/models/qwen2_5_vl.py
def __init__(
    self,
    d_model: int,
    context_dim: int,
    norm_layer: Optional[Callable[[int], nn.Module]] = None,
    spatial_merge_size: int = 2,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
    use_data_parallel: bool = False,
) -> None:
    super().__init__()
    self.hidden_size = context_dim * (spatial_merge_size**2)
    if norm_layer is None:
        norm_layer = partial(nn.LayerNorm, eps=1e-6)
    self.ln_q = norm_layer(context_dim)

    self.mlp = nn.Sequential(
        ColumnParallelLinear(
            self.hidden_size,
            self.hidden_size,
            bias=True,
            quant_config=quant_config,
            prefix=f"{prefix}.mlp.0",
            return_bias=False,
            disable_tp=use_data_parallel,
        ),
        nn.GELU(),
        RowParallelLinear(
            self.hidden_size,
            d_model,
            bias=True,
            quant_config=quant_config,
            prefix=f"{prefix}.mlp.2",
            return_bias=False,
            disable_tp=use_data_parallel,
        ),
    )

forward

forward(x: Tensor) -> Tensor
Source code in vllm/model_executor/models/qwen2_5_vl.py
def forward(self, x: torch.Tensor) -> torch.Tensor:
    x = self.ln_q(x)
    x = x.view(-1, self.hidden_size)
    out = self.mlp(x)
    return out

Qwen2_5_VisionRotaryEmbedding

Bases: Module

Source code in vllm/model_executor/models/qwen2_5_vl.py
class Qwen2_5_VisionRotaryEmbedding(nn.Module):

    def __init__(self, dim: int, theta: float = 10000.0) -> None:
        super().__init__()
        self.dim = dim
        self.theta = theta
        inv_freq = 1.0 / (theta**(
            torch.arange(0, dim, 2, dtype=torch.float, device='cpu') / dim))
        self.register_buffer("inv_freq", inv_freq, persistent=False)
        self._seq_len_cached = 0
        self._freqs_cached = None

    def update_freqs_cache(self, seqlen: int) -> None:
        if seqlen > self._seq_len_cached:
            seqlen *= 2
            self._seq_len_cached = seqlen
            self.inv_freq = 1.0 / (self.theta**(torch.arange(
                0, self.dim, 2, dtype=torch.float, device=self.inv_freq.device)
                                                / self.dim))
            seq = torch.arange(seqlen,
                               device=self.inv_freq.device,
                               dtype=self.inv_freq.dtype)
            freqs = torch.outer(seq, self.inv_freq)
            self._freqs_cached = freqs

    def forward(self, seqlen: int) -> torch.Tensor:
        self.update_freqs_cache(seqlen)
        return self._freqs_cached[:seqlen]

_freqs_cached instance-attribute

_freqs_cached = None

_seq_len_cached instance-attribute

_seq_len_cached = 0

dim instance-attribute

dim = dim

theta instance-attribute

theta = theta

__init__

__init__(dim: int, theta: float = 10000.0) -> None
Source code in vllm/model_executor/models/qwen2_5_vl.py
def __init__(self, dim: int, theta: float = 10000.0) -> None:
    super().__init__()
    self.dim = dim
    self.theta = theta
    inv_freq = 1.0 / (theta**(
        torch.arange(0, dim, 2, dtype=torch.float, device='cpu') / dim))
    self.register_buffer("inv_freq", inv_freq, persistent=False)
    self._seq_len_cached = 0
    self._freqs_cached = None

forward

forward(seqlen: int) -> Tensor
Source code in vllm/model_executor/models/qwen2_5_vl.py
def forward(self, seqlen: int) -> torch.Tensor:
    self.update_freqs_cache(seqlen)
    return self._freqs_cached[:seqlen]

update_freqs_cache

update_freqs_cache(seqlen: int) -> None
Source code in vllm/model_executor/models/qwen2_5_vl.py
def update_freqs_cache(self, seqlen: int) -> None:
    if seqlen > self._seq_len_cached:
        seqlen *= 2
        self._seq_len_cached = seqlen
        self.inv_freq = 1.0 / (self.theta**(torch.arange(
            0, self.dim, 2, dtype=torch.float, device=self.inv_freq.device)
                                            / self.dim))
        seq = torch.arange(seqlen,
                           device=self.inv_freq.device,
                           dtype=self.inv_freq.dtype)
        freqs = torch.outer(seq, self.inv_freq)
        self._freqs_cached = freqs

Qwen2_5_VisionTransformer

Bases: Module

Source code in vllm/model_executor/models/qwen2_5_vl.py
class Qwen2_5_VisionTransformer(nn.Module):

    def __init__(
        self,
        vision_config: Qwen2_5_VLVisionConfig,
        norm_eps: float = 1e-6,
        quant_config: Optional[QuantizationConfig] = None,
        prefix: str = "",
        use_data_parallel: bool = False,
    ) -> None:
        super().__init__()

        patch_size = vision_config.patch_size
        temporal_patch_size = vision_config.temporal_patch_size
        in_channels = vision_config.in_channels
        depth = vision_config.depth
        self.hidden_size = vision_config.hidden_size
        self.num_heads = vision_config.num_heads
        self.use_data_parallel = use_data_parallel
        self.out_hidden_size = vision_config.out_hidden_size

        # args for get_window_index_thw
        self.window_size = vision_config.window_size
        self.patch_size = vision_config.patch_size
        self.spatial_merge_size = vision_config.spatial_merge_size
        self.fullatt_block_indexes = vision_config.fullatt_block_indexes
        self.spatial_merge_unit = self.spatial_merge_size**2

        self.patch_embed = Qwen2_5_VisionPatchEmbed(
            patch_size=patch_size,
            temporal_patch_size=temporal_patch_size,
            in_channels=in_channels,
            hidden_size=self.hidden_size,
        )

        norm_layer = partial(RMSNorm, eps=norm_eps)
        head_dim = self.hidden_size // self.num_heads
        self.rotary_pos_emb = Qwen2_5_VisionRotaryEmbedding(head_dim // 2)

        use_upstream_fa = False
        self.attn_backend = get_vit_attn_backend(
            head_size=head_dim, dtype=torch.get_default_dtype())
        if self.attn_backend != _Backend.FLASH_ATTN and \
            check_upstream_fa_availability(
                torch.get_default_dtype()):
            self.attn_backend = _Backend.FLASH_ATTN
            use_upstream_fa = True

        if self.attn_backend not in {
                _Backend.FLASH_ATTN, _Backend.TORCH_SDPA, _Backend.XFORMERS,
                _Backend.ROCM_AITER_FA
        }:
            raise RuntimeError(
                f"Qwen2.5-VL does not support {self.attn_backend} backend now."
            )

        self.blocks = nn.ModuleList([
            Qwen2_5_VisionBlock(
                dim=self.hidden_size,
                num_heads=self.num_heads,
                mlp_hidden_dim=vision_config.intermediate_size,
                act_fn=get_act_and_mul_fn(vision_config.hidden_act),
                norm_layer=norm_layer,
                quant_config=quant_config,
                prefix=f"{prefix}.blocks.{layer_idx}",
                use_data_parallel=use_data_parallel,
                attn_backend=self.attn_backend,
                use_upstream_fa=use_upstream_fa) for layer_idx in range(depth)
        ])
        self.merger = Qwen2_5_VisionPatchMerger(
            d_model=vision_config.out_hidden_size,
            context_dim=self.hidden_size,
            norm_layer=norm_layer,
            spatial_merge_size=self.spatial_merge_size,
            quant_config=quant_config,
            prefix=f"{prefix}.merger",
            use_data_parallel=use_data_parallel,
        )

    @property
    def dtype(self) -> torch.dtype:
        return self.patch_embed.proj.weight.dtype

    @property
    def device(self) -> torch.device:
        return self.patch_embed.proj.weight.device

    def rotary_pos_emb_thw(self, t, h, w):
        hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
        wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1)
        hpos_ids = hpos_ids.reshape(
            h // self.spatial_merge_size,
            self.spatial_merge_size,
            w // self.spatial_merge_size,
            self.spatial_merge_size,
        ).permute(0, 2, 1, 3).flatten()
        wpos_ids = wpos_ids.reshape(
            h // self.spatial_merge_size,
            self.spatial_merge_size,
            w // self.spatial_merge_size,
            self.spatial_merge_size,
        ).permute(0, 2, 1, 3).flatten()
        pos_ids = torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1)
        max_size = max(h, w)
        rotary_pos_emb_full = self.rotary_pos_emb(max_size)
        rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)
        rotary_pos_emb = rotary_pos_emb.reshape(
            rotary_pos_emb.shape[0] // self.spatial_merge_unit,
            self.spatial_merge_unit, -1)

        return rotary_pos_emb

    def get_window_index_thw(self, grid_t, grid_h, grid_w):
        vit_merger_window_size = (self.window_size //
                                  self.spatial_merge_size // self.patch_size)

        llm_grid_h = grid_h // self.spatial_merge_size
        llm_grid_w = grid_w // self.spatial_merge_size
        index = torch.arange(grid_t * llm_grid_h * llm_grid_w).reshape(
            grid_t, llm_grid_h, llm_grid_w)
        pad_h = vit_merger_window_size - llm_grid_h % vit_merger_window_size
        pad_w = vit_merger_window_size - llm_grid_w % vit_merger_window_size
        num_windows_h = (llm_grid_h + pad_h) // vit_merger_window_size
        num_windows_w = (llm_grid_w + pad_w) // vit_merger_window_size
        index_padded = F.pad(index, (0, pad_w, 0, pad_h), 'constant', -100)
        index_padded = index_padded.reshape(grid_t, num_windows_h,
                                            vit_merger_window_size,
                                            num_windows_w,
                                            vit_merger_window_size)
        index_padded = index_padded.permute(0, 1, 3, 2, 4).reshape(
            grid_t, num_windows_h * num_windows_w, vit_merger_window_size,
            vit_merger_window_size)
        seqlens = (index_padded != -100).sum([2, 3]).reshape(-1)
        index_padded = index_padded.reshape(-1)
        index_new = index_padded[index_padded != -100]
        cu_seqlens_tmp = seqlens.cumsum(0) * self.spatial_merge_unit
        cu_seqlens_tmp = cu_seqlens_tmp.to(dtype=torch.int32)
        cu_seqlens_tmp = torch.unique_consecutive(cu_seqlens_tmp)

        return index_new, cu_seqlens_tmp

    @lru_cache(maxsize=1024)  # noqa: B019
    def get_rope_by_thw(self, t, h, w):
        window_index_thw, cu_seqlens_window_thw = self.get_window_index_thw(
            t, h, w)
        rotary_pos_emb_thw = self.rotary_pos_emb_thw(t, h, w)
        rotary_pos_emb_thw = rotary_pos_emb_thw[window_index_thw, :, :]
        rotary_pos_emb_thw = rotary_pos_emb_thw.flatten(start_dim=0, end_dim=1)
        cu_seqlens_thw = torch.repeat_interleave(
            torch.tensor([h * w], dtype=torch.int32), t)
        return (rotary_pos_emb_thw, window_index_thw, cu_seqlens_window_thw,
                cu_seqlens_thw)

    def compute_attn_mask_seqlen(
        self,
        cu_seqlens: torch.Tensor,
    ) -> tuple[Optional[int], Optional[list[int]]]:
        max_seqlen, seqlens = None, None
        if (self.attn_backend == _Backend.FLASH_ATTN
                or self.attn_backend == _Backend.ROCM_AITER_FA):
            max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max().item()
        elif self.attn_backend == _Backend.XFORMERS:
            seqlens = (cu_seqlens[1:] - cu_seqlens[:-1]).tolist()
        return max_seqlen, seqlens

    @staticmethod
    def invert_permutation(perm: torch.Tensor) -> torch.Tensor:
        # building the inverse permutation in O(n) time
        inv = torch.empty_like(perm, pin_memory=is_pin_memory_available())
        inv[perm] = torch.arange(perm.numel(),
                                 device=perm.device,
                                 dtype=perm.dtype)
        return inv

    def forward(
        self,
        x: torch.Tensor,
        grid_thw: list[list[int]],
    ) -> torch.Tensor:
        # patchify
        seq_len, _ = x.size()
        rotary_pos_emb = []
        window_index: list = []
        cu_window_seqlens: list = [torch.tensor([0], dtype=torch.int32)]
        cu_seqlens: list = []

        hidden_states = x.to(device=self.device, dtype=self.dtype)
        hidden_states = self.patch_embed(hidden_states)

        window_index_id = 0
        cu_window_seqlens_last = 0
        for t, h, w in grid_thw:
            t, h, w = int(t), int(h), int(w)
            llm_h = h // self.spatial_merge_size
            llm_w = w // self.spatial_merge_size

            (
                rotary_pos_emb_thw,
                window_index_thw,
                cu_seqlens_window_thw,
                cu_seqlens_thw,
            ) = self.get_rope_by_thw(t, h, w)

            window_index.append(window_index_thw + window_index_id)
            window_index_id += (t * llm_h * llm_w)

            cu_seqlens_window_thw = (cu_seqlens_window_thw +
                                     cu_window_seqlens_last)
            cu_window_seqlens_last = cu_seqlens_window_thw[-1]
            cu_window_seqlens.append(cu_seqlens_window_thw)

            rotary_pos_emb.append(rotary_pos_emb_thw)

            cu_seqlens.append(cu_seqlens_thw)

        rotary_pos_emb = torch.cat(rotary_pos_emb)
        window_index = torch.cat(window_index)
        # compute reverse indices
        reverse_indices = self.invert_permutation(window_index)
        cu_window_seqlens = torch.cat(cu_window_seqlens)
        cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens)
        cu_seqlens = torch.cat(cu_seqlens)
        cu_seqlens = torch.cumsum(cu_seqlens, dim=0, dtype=torch.int32)
        cu_seqlens = F.pad(cu_seqlens, (1, 0), "constant", 0)

        # transformers
        # pre-compute seqlens for window/full attn to reduce cuMemcpy operations
        max_seqlen_full, seqlens_full = self.compute_attn_mask_seqlen(
            cu_seqlens)
        max_seqlen_window, seqlens_window = self.compute_attn_mask_seqlen(
            cu_window_seqlens)

        cu_seqlens = cu_seqlens.to(device=self.device, non_blocking=True)
        cu_window_seqlens = cu_window_seqlens.to(device=self.device,
                                                 non_blocking=True)
        rotary_pos_emb = rotary_pos_emb.to(device=self.device,
                                           non_blocking=True)
        window_index = window_index.to(device=hidden_states.device,
                                       non_blocking=True)
        reverse_indices = reverse_indices.to(device=hidden_states.device,
                                             non_blocking=True)

        hidden_states = hidden_states.reshape(
            seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1)
        hidden_states = hidden_states[window_index, :, :]
        hidden_states = hidden_states.reshape(seq_len, -1)

        hidden_states = hidden_states.unsqueeze(1)

        for layer_num, blk in enumerate(self.blocks):
            if layer_num in self.fullatt_block_indexes:
                cu_seqlens_now = cu_seqlens
                max_seqlen_now = max_seqlen_full
                seqlens_now = seqlens_full
            else:
                cu_seqlens_now = cu_window_seqlens
                max_seqlen_now = max_seqlen_window
                seqlens_now = seqlens_window

            hidden_states = blk(
                hidden_states,
                cu_seqlens=cu_seqlens_now,
                rotary_pos_emb=rotary_pos_emb,
                max_seqlen=max_seqlen_now,
                seqlens=seqlens_now,
            )

        # For Qwen2.5-VL-3B, float16 will overflow at last block
        # for long visual tokens sequences.
        if hidden_states.dtype == torch.float16:
            hidden_states = cast_overflow_tensors(hidden_states)

        # adapter
        hidden_states = self.merger(hidden_states)
        hidden_states = hidden_states[reverse_indices, :]
        return hidden_states

    def load_weights(self, weights: Iterable[tuple[str,
                                                   torch.Tensor]]) -> set[str]:
        stacked_params_mapping = [
            # (param_name, shard_name, shard_id)
            ("attn.qkv.", "attn.q.", "q"),
            ("attn.qkv.", "attn.k.", "k"),
            ("attn.qkv.", "attn.v.", "v"),
            ("mlp.gate_up_proj.", "mlp.gate_proj.", 0),
            ("mlp.gate_up_proj.", "mlp.up_proj.", 1),
        ]
        params_dict = dict(self.named_parameters(remove_duplicate=False))
        loaded_params: set[str] = set()

        for name, loaded_weight in weights:
            for (param_name, weight_name, shard_id) in stacked_params_mapping:
                if weight_name not in name:
                    continue
                name = name.replace(weight_name, param_name)
                param = params_dict[name]
                weight_loader = param.weight_loader
                weight_loader(param, loaded_weight, shard_id)
                break
            else:
                param = params_dict[name]
                weight_loader = getattr(param, "weight_loader",
                                        default_weight_loader)
                weight_loader(param, loaded_weight)
            loaded_params.add(name)
        return loaded_params

attn_backend instance-attribute

attn_backend = get_vit_attn_backend(
    head_size=head_dim, dtype=get_default_dtype()
)

blocks instance-attribute

blocks = ModuleList(
    [
        (
            Qwen2_5_VisionBlock(
                dim=hidden_size,
                num_heads=num_heads,
                mlp_hidden_dim=intermediate_size,
                act_fn=get_act_and_mul_fn(hidden_act),
                norm_layer=norm_layer,
                quant_config=quant_config,
                prefix=f"{prefix}.blocks.{layer_idx}",
                use_data_parallel=use_data_parallel,
                attn_backend=attn_backend,
                use_upstream_fa=use_upstream_fa,
            )
        )
        for layer_idx in (range(depth))
    ]
)

device property

device: device

dtype property

dtype: dtype

fullatt_block_indexes instance-attribute

fullatt_block_indexes = fullatt_block_indexes

hidden_size instance-attribute

hidden_size = hidden_size

merger instance-attribute

merger = Qwen2_5_VisionPatchMerger(
    d_model=out_hidden_size,
    context_dim=hidden_size,
    norm_layer=norm_layer,
    spatial_merge_size=spatial_merge_size,
    quant_config=quant_config,
    prefix=f"{prefix}.merger",
    use_data_parallel=use_data_parallel,
)

num_heads instance-attribute

num_heads = num_heads

out_hidden_size instance-attribute

out_hidden_size = out_hidden_size

patch_embed instance-attribute

patch_embed = Qwen2_5_VisionPatchEmbed(
    patch_size=patch_size,
    temporal_patch_size=temporal_patch_size,
    in_channels=in_channels,
    hidden_size=hidden_size,
)

patch_size instance-attribute

patch_size = patch_size

rotary_pos_emb instance-attribute

rotary_pos_emb = Qwen2_5_VisionRotaryEmbedding(
    head_dim // 2
)

spatial_merge_size instance-attribute

spatial_merge_size = spatial_merge_size

spatial_merge_unit instance-attribute

spatial_merge_unit = spatial_merge_size ** 2

use_data_parallel instance-attribute

use_data_parallel = use_data_parallel

window_size instance-attribute

window_size = window_size

__init__

__init__(
    vision_config: Qwen2_5_VLVisionConfig,
    norm_eps: float = 1e-06,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
    use_data_parallel: bool = False,
) -> None
Source code in vllm/model_executor/models/qwen2_5_vl.py
def __init__(
    self,
    vision_config: Qwen2_5_VLVisionConfig,
    norm_eps: float = 1e-6,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
    use_data_parallel: bool = False,
) -> None:
    super().__init__()

    patch_size = vision_config.patch_size
    temporal_patch_size = vision_config.temporal_patch_size
    in_channels = vision_config.in_channels
    depth = vision_config.depth
    self.hidden_size = vision_config.hidden_size
    self.num_heads = vision_config.num_heads
    self.use_data_parallel = use_data_parallel
    self.out_hidden_size = vision_config.out_hidden_size

    # args for get_window_index_thw
    self.window_size = vision_config.window_size
    self.patch_size = vision_config.patch_size
    self.spatial_merge_size = vision_config.spatial_merge_size
    self.fullatt_block_indexes = vision_config.fullatt_block_indexes
    self.spatial_merge_unit = self.spatial_merge_size**2

    self.patch_embed = Qwen2_5_VisionPatchEmbed(
        patch_size=patch_size,
        temporal_patch_size=temporal_patch_size,
        in_channels=in_channels,
        hidden_size=self.hidden_size,
    )

    norm_layer = partial(RMSNorm, eps=norm_eps)
    head_dim = self.hidden_size // self.num_heads
    self.rotary_pos_emb = Qwen2_5_VisionRotaryEmbedding(head_dim // 2)

    use_upstream_fa = False
    self.attn_backend = get_vit_attn_backend(
        head_size=head_dim, dtype=torch.get_default_dtype())
    if self.attn_backend != _Backend.FLASH_ATTN and \
        check_upstream_fa_availability(
            torch.get_default_dtype()):
        self.attn_backend = _Backend.FLASH_ATTN
        use_upstream_fa = True

    if self.attn_backend not in {
            _Backend.FLASH_ATTN, _Backend.TORCH_SDPA, _Backend.XFORMERS,
            _Backend.ROCM_AITER_FA
    }:
        raise RuntimeError(
            f"Qwen2.5-VL does not support {self.attn_backend} backend now."
        )

    self.blocks = nn.ModuleList([
        Qwen2_5_VisionBlock(
            dim=self.hidden_size,
            num_heads=self.num_heads,
            mlp_hidden_dim=vision_config.intermediate_size,
            act_fn=get_act_and_mul_fn(vision_config.hidden_act),
            norm_layer=norm_layer,
            quant_config=quant_config,
            prefix=f"{prefix}.blocks.{layer_idx}",
            use_data_parallel=use_data_parallel,
            attn_backend=self.attn_backend,
            use_upstream_fa=use_upstream_fa) for layer_idx in range(depth)
    ])
    self.merger = Qwen2_5_VisionPatchMerger(
        d_model=vision_config.out_hidden_size,
        context_dim=self.hidden_size,
        norm_layer=norm_layer,
        spatial_merge_size=self.spatial_merge_size,
        quant_config=quant_config,
        prefix=f"{prefix}.merger",
        use_data_parallel=use_data_parallel,
    )

compute_attn_mask_seqlen

compute_attn_mask_seqlen(
    cu_seqlens: Tensor,
) -> tuple[Optional[int], Optional[list[int]]]
Source code in vllm/model_executor/models/qwen2_5_vl.py
def compute_attn_mask_seqlen(
    self,
    cu_seqlens: torch.Tensor,
) -> tuple[Optional[int], Optional[list[int]]]:
    max_seqlen, seqlens = None, None
    if (self.attn_backend == _Backend.FLASH_ATTN
            or self.attn_backend == _Backend.ROCM_AITER_FA):
        max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max().item()
    elif self.attn_backend == _Backend.XFORMERS:
        seqlens = (cu_seqlens[1:] - cu_seqlens[:-1]).tolist()
    return max_seqlen, seqlens

forward

forward(x: Tensor, grid_thw: list[list[int]]) -> Tensor
Source code in vllm/model_executor/models/qwen2_5_vl.py
def forward(
    self,
    x: torch.Tensor,
    grid_thw: list[list[int]],
) -> torch.Tensor:
    # patchify
    seq_len, _ = x.size()
    rotary_pos_emb = []
    window_index: list = []
    cu_window_seqlens: list = [torch.tensor([0], dtype=torch.int32)]
    cu_seqlens: list = []

    hidden_states = x.to(device=self.device, dtype=self.dtype)
    hidden_states = self.patch_embed(hidden_states)

    window_index_id = 0
    cu_window_seqlens_last = 0
    for t, h, w in grid_thw:
        t, h, w = int(t), int(h), int(w)
        llm_h = h // self.spatial_merge_size
        llm_w = w // self.spatial_merge_size

        (
            rotary_pos_emb_thw,
            window_index_thw,
            cu_seqlens_window_thw,
            cu_seqlens_thw,
        ) = self.get_rope_by_thw(t, h, w)

        window_index.append(window_index_thw + window_index_id)
        window_index_id += (t * llm_h * llm_w)

        cu_seqlens_window_thw = (cu_seqlens_window_thw +
                                 cu_window_seqlens_last)
        cu_window_seqlens_last = cu_seqlens_window_thw[-1]
        cu_window_seqlens.append(cu_seqlens_window_thw)

        rotary_pos_emb.append(rotary_pos_emb_thw)

        cu_seqlens.append(cu_seqlens_thw)

    rotary_pos_emb = torch.cat(rotary_pos_emb)
    window_index = torch.cat(window_index)
    # compute reverse indices
    reverse_indices = self.invert_permutation(window_index)
    cu_window_seqlens = torch.cat(cu_window_seqlens)
    cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens)
    cu_seqlens = torch.cat(cu_seqlens)
    cu_seqlens = torch.cumsum(cu_seqlens, dim=0, dtype=torch.int32)
    cu_seqlens = F.pad(cu_seqlens, (1, 0), "constant", 0)

    # transformers
    # pre-compute seqlens for window/full attn to reduce cuMemcpy operations
    max_seqlen_full, seqlens_full = self.compute_attn_mask_seqlen(
        cu_seqlens)
    max_seqlen_window, seqlens_window = self.compute_attn_mask_seqlen(
        cu_window_seqlens)

    cu_seqlens = cu_seqlens.to(device=self.device, non_blocking=True)
    cu_window_seqlens = cu_window_seqlens.to(device=self.device,
                                             non_blocking=True)
    rotary_pos_emb = rotary_pos_emb.to(device=self.device,
                                       non_blocking=True)
    window_index = window_index.to(device=hidden_states.device,
                                   non_blocking=True)
    reverse_indices = reverse_indices.to(device=hidden_states.device,
                                         non_blocking=True)

    hidden_states = hidden_states.reshape(
        seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1)
    hidden_states = hidden_states[window_index, :, :]
    hidden_states = hidden_states.reshape(seq_len, -1)

    hidden_states = hidden_states.unsqueeze(1)

    for layer_num, blk in enumerate(self.blocks):
        if layer_num in self.fullatt_block_indexes:
            cu_seqlens_now = cu_seqlens
            max_seqlen_now = max_seqlen_full
            seqlens_now = seqlens_full
        else:
            cu_seqlens_now = cu_window_seqlens
            max_seqlen_now = max_seqlen_window
            seqlens_now = seqlens_window

        hidden_states = blk(
            hidden_states,
            cu_seqlens=cu_seqlens_now,
            rotary_pos_emb=rotary_pos_emb,
            max_seqlen=max_seqlen_now,
            seqlens=seqlens_now,
        )

    # For Qwen2.5-VL-3B, float16 will overflow at last block
    # for long visual tokens sequences.
    if hidden_states.dtype == torch.float16:
        hidden_states = cast_overflow_tensors(hidden_states)

    # adapter
    hidden_states = self.merger(hidden_states)
    hidden_states = hidden_states[reverse_indices, :]
    return hidden_states

get_rope_by_thw cached

get_rope_by_thw(t, h, w)
Source code in vllm/model_executor/models/qwen2_5_vl.py
@lru_cache(maxsize=1024)  # noqa: B019
def get_rope_by_thw(self, t, h, w):
    window_index_thw, cu_seqlens_window_thw = self.get_window_index_thw(
        t, h, w)
    rotary_pos_emb_thw = self.rotary_pos_emb_thw(t, h, w)
    rotary_pos_emb_thw = rotary_pos_emb_thw[window_index_thw, :, :]
    rotary_pos_emb_thw = rotary_pos_emb_thw.flatten(start_dim=0, end_dim=1)
    cu_seqlens_thw = torch.repeat_interleave(
        torch.tensor([h * w], dtype=torch.int32), t)
    return (rotary_pos_emb_thw, window_index_thw, cu_seqlens_window_thw,
            cu_seqlens_thw)

get_window_index_thw

get_window_index_thw(grid_t, grid_h, grid_w)
Source code in vllm/model_executor/models/qwen2_5_vl.py
def get_window_index_thw(self, grid_t, grid_h, grid_w):
    vit_merger_window_size = (self.window_size //
                              self.spatial_merge_size // self.patch_size)

    llm_grid_h = grid_h // self.spatial_merge_size
    llm_grid_w = grid_w // self.spatial_merge_size
    index = torch.arange(grid_t * llm_grid_h * llm_grid_w).reshape(
        grid_t, llm_grid_h, llm_grid_w)
    pad_h = vit_merger_window_size - llm_grid_h % vit_merger_window_size
    pad_w = vit_merger_window_size - llm_grid_w % vit_merger_window_size
    num_windows_h = (llm_grid_h + pad_h) // vit_merger_window_size
    num_windows_w = (llm_grid_w + pad_w) // vit_merger_window_size
    index_padded = F.pad(index, (0, pad_w, 0, pad_h), 'constant', -100)
    index_padded = index_padded.reshape(grid_t, num_windows_h,
                                        vit_merger_window_size,
                                        num_windows_w,
                                        vit_merger_window_size)
    index_padded = index_padded.permute(0, 1, 3, 2, 4).reshape(
        grid_t, num_windows_h * num_windows_w, vit_merger_window_size,
        vit_merger_window_size)
    seqlens = (index_padded != -100).sum([2, 3]).reshape(-1)
    index_padded = index_padded.reshape(-1)
    index_new = index_padded[index_padded != -100]
    cu_seqlens_tmp = seqlens.cumsum(0) * self.spatial_merge_unit
    cu_seqlens_tmp = cu_seqlens_tmp.to(dtype=torch.int32)
    cu_seqlens_tmp = torch.unique_consecutive(cu_seqlens_tmp)

    return index_new, cu_seqlens_tmp

invert_permutation staticmethod

invert_permutation(perm: Tensor) -> Tensor
Source code in vllm/model_executor/models/qwen2_5_vl.py
@staticmethod
def invert_permutation(perm: torch.Tensor) -> torch.Tensor:
    # building the inverse permutation in O(n) time
    inv = torch.empty_like(perm, pin_memory=is_pin_memory_available())
    inv[perm] = torch.arange(perm.numel(),
                             device=perm.device,
                             dtype=perm.dtype)
    return inv

load_weights

load_weights(
    weights: Iterable[tuple[str, Tensor]],
) -> set[str]
Source code in vllm/model_executor/models/qwen2_5_vl.py
def load_weights(self, weights: Iterable[tuple[str,
                                               torch.Tensor]]) -> set[str]:
    stacked_params_mapping = [
        # (param_name, shard_name, shard_id)
        ("attn.qkv.", "attn.q.", "q"),
        ("attn.qkv.", "attn.k.", "k"),
        ("attn.qkv.", "attn.v.", "v"),
        ("mlp.gate_up_proj.", "mlp.gate_proj.", 0),
        ("mlp.gate_up_proj.", "mlp.up_proj.", 1),
    ]
    params_dict = dict(self.named_parameters(remove_duplicate=False))
    loaded_params: set[str] = set()

    for name, loaded_weight in weights:
        for (param_name, weight_name, shard_id) in stacked_params_mapping:
            if weight_name not in name:
                continue
            name = name.replace(weight_name, param_name)
            param = params_dict[name]
            weight_loader = param.weight_loader
            weight_loader(param, loaded_weight, shard_id)
            break
        else:
            param = params_dict[name]
            weight_loader = getattr(param, "weight_loader",
                                    default_weight_loader)
            weight_loader(param, loaded_weight)
        loaded_params.add(name)
    return loaded_params

rotary_pos_emb_thw

rotary_pos_emb_thw(t, h, w)
Source code in vllm/model_executor/models/qwen2_5_vl.py
def rotary_pos_emb_thw(self, t, h, w):
    hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
    wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1)
    hpos_ids = hpos_ids.reshape(
        h // self.spatial_merge_size,
        self.spatial_merge_size,
        w // self.spatial_merge_size,
        self.spatial_merge_size,
    ).permute(0, 2, 1, 3).flatten()
    wpos_ids = wpos_ids.reshape(
        h // self.spatial_merge_size,
        self.spatial_merge_size,
        w // self.spatial_merge_size,
        self.spatial_merge_size,
    ).permute(0, 2, 1, 3).flatten()
    pos_ids = torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1)
    max_size = max(h, w)
    rotary_pos_emb_full = self.rotary_pos_emb(max_size)
    rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)
    rotary_pos_emb = rotary_pos_emb.reshape(
        rotary_pos_emb.shape[0] // self.spatial_merge_unit,
        self.spatial_merge_unit, -1)

    return rotary_pos_emb

all_gather_interleave

all_gather_interleave(
    local_tensor, hidden_size: int, tp_size: int
)

All-gather the input tensor interleavely across model parallel group.

Source code in vllm/model_executor/models/qwen2_5_vl.py
def all_gather_interleave(local_tensor, hidden_size: int, tp_size: int):
    """All-gather the input tensor interleavely across model parallel group."""
    import torch.distributed as dist
    gathered_tensors = [torch.zeros_like(local_tensor) for _ in range(tp_size)]
    dist.all_gather(gathered_tensors,
                    local_tensor,
                    group=parallel_state.get_tp_group().device_group)

    gathered_tensors_split = [
        torch.split(tensor, hidden_size // tp_size, -1)
        for tensor in gathered_tensors
    ]
    ordered_tensors = [
        tensor for pair in zip(*gathered_tensors_split) for tensor in pair
    ]
    result_tensor = torch.cat(ordered_tensors, dim=-1)
    return result_tensor