Skip to content

vllm.v1.spec_decode.eagle

PADDING_SLOT_ID module-attribute

PADDING_SLOT_ID = -1

logger module-attribute

logger = init_logger(__name__)

EagleProposer

Source code in vllm/v1/spec_decode/eagle.py
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
class EagleProposer:

    def __init__(
        self,
        vllm_config: VllmConfig,
        device: torch.device,
        runner=None,
    ):
        self.vllm_config = vllm_config
        self.speculative_config = vllm_config.speculative_config
        self.draft_model_config = self.speculative_config.draft_model_config
        self.method = self.speculative_config.method

        self.runner = runner
        self.device = device
        self.dtype = vllm_config.model_config.dtype
        self.max_model_len = vllm_config.model_config.max_model_len
        self.block_size = vllm_config.cache_config.block_size
        self.num_speculative_tokens = (
            self.speculative_config.num_speculative_tokens)
        self.max_num_tokens = (
            vllm_config.scheduler_config.max_num_batched_tokens)
        self.token_arange_np = np.arange(self.max_num_tokens)
        # We need to get the hidden size from the draft model config because
        # the draft model's hidden size can be different from the target model's
        # hidden size (e.g., Llama 3.3 70B).
        self.hidden_size = self.draft_model_config.get_hidden_size()

        # Multi-modal data support
        self.mm_registry = MULTIMODAL_REGISTRY
        self.supports_mm_inputs = self.mm_registry.supports_multimodal_inputs(
            vllm_config.model_config)

        self.attn_metadata_builder: Optional[AttentionMetadataBuilder] = None

        self.use_cuda_graph = (not current_platform.is_xpu()
                               and self.vllm_config.compilation_config.level
                               == CompilationLevel.PIECEWISE and
                               not self.vllm_config.model_config.enforce_eager)
        self.cudagraph_batch_sizes = list(
            reversed(self.vllm_config.compilation_config.
                     cudagraph_capture_sizes)) if self.use_cuda_graph else []

        # persistent buffers for cuda graph
        self.input_ids = torch.zeros(self.max_num_tokens,
                                     dtype=torch.int32,
                                     device=device)
        self.uses_mrope = self.vllm_config.model_config.uses_mrope
        if self.uses_mrope:
            # M-RoPE need (3, max_num_tokens)
            self.mrope_positions = torch.zeros((3, self.max_num_tokens),
                                               dtype=torch.int64,
                                               device=device)
        else:
            # RoPE need (max_num_tokens,)
            self.positions = torch.zeros(self.max_num_tokens,
                                         dtype=torch.int64,
                                         device=device)
        self.hidden_states = torch.zeros(
            (self.max_num_tokens, self.hidden_size),
            dtype=self.dtype,
            device=device)

        # We need +1 here because the arange is used to set query_start_loc,
        # which has one more element than batch_size.
        max_batch_size = vllm_config.scheduler_config.max_num_seqs
        max_num_slots_for_arange = max(max_batch_size + 1, self.max_num_tokens)
        self.arange = torch.arange(max_num_slots_for_arange,
                                   device=device,
                                   dtype=torch.int32)

        self.inputs_embeds = torch.zeros(
            (self.max_num_tokens, self.hidden_size),
            dtype=self.dtype,
            device=device)

        self.backup_next_token_ids = CpuGpuBuffer(
            max_batch_size,
            dtype=torch.int32,
            pin_memory=is_pin_memory_available(),
            device=device,
            with_numpy=True)

        # Determine allowed attention backends once during initialization.
        self.allowed_attn_types: Optional[tuple] = None
        if current_platform.is_rocm():
            rocm_types = [TritonAttentionMetadata, FlashAttentionMetadata]
            # vllm.v1.attention.backends.rocm_aiter_fa is an optional backend
            if find_spec("vllm.v1.attention.backends.rocm_aiter_fa"):
                from vllm.v1.attention.backends.rocm_aiter_fa import (
                    AiterFlashAttentionMetadata)
                rocm_types.append(AiterFlashAttentionMetadata)
            self.allowed_attn_types = tuple(rocm_types)

        # Parse the speculative token tree.
        spec_token_tree = self.speculative_config.speculative_token_tree
        self.tree_choices: list[tuple[int,
                                      ...]] = ast.literal_eval(spec_token_tree)
        tree_depth = len(self.tree_choices[-1])
        # Precompute per-level properties of the tree.
        num_drafts_per_level = [0] * tree_depth
        for node in self.tree_choices:
            num_drafts_per_level[len(node) - 1] += 1
        self.cu_drafts_per_level = [num_drafts_per_level[0]]
        self.child_drafts_per_level = [num_drafts_per_level[0]]
        for level in range(1, tree_depth):
            self.cu_drafts_per_level.append(self.cu_drafts_per_level[-1] +
                                            num_drafts_per_level[level])
            self.child_drafts_per_level.append(num_drafts_per_level[level] //
                                               num_drafts_per_level[level - 1])
        # Precompute draft position offsets in flattened tree.
        self.tree_draft_pos_offsets = torch.arange(
            1,
            len(self.tree_choices) + 1,
            device=device,
            dtype=torch.int32,
        ).repeat(max_batch_size, 1)

    def _get_positions(self, num_tokens: int):
        if self.uses_mrope:
            return self.mrope_positions[:, :num_tokens]
        return self.positions[:num_tokens]

    def _set_positions(self, num_tokens: int, positions: torch.Tensor):
        if self.uses_mrope:
            self.mrope_positions[:, :num_tokens] = positions
        else:
            self.positions[:num_tokens] = positions

    def propose(
        self,
        # [num_tokens]
        target_token_ids: torch.Tensor,
        # [num_tokens] or [3, num_tokens] when M-RoPE is enabled
        target_positions: torch.Tensor,
        # [num_tokens, hidden_size]
        target_hidden_states: torch.Tensor,
        # [batch_size]
        next_token_ids: torch.Tensor,
        last_token_indices: Optional[torch.Tensor],
        common_attn_metadata: CommonAttentionMetadata,
        sampling_metadata: SamplingMetadata,
        mm_embed_inputs: Optional[tuple[list[torch.Tensor],
                                        torch.Tensor]] = None,
    ) -> torch.Tensor:
        num_tokens = target_token_ids.shape[0]
        batch_size = next_token_ids.shape[0]

        if last_token_indices is None:
            last_token_indices = common_attn_metadata.query_start_loc[1:] - 1

        if self.method == "eagle3":
            assert isinstance(self.model, Eagle3LlamaForCausalLM)
            target_hidden_states = self.model.combine_hidden_states(
                target_hidden_states)
            assert target_hidden_states.shape[-1] == self.hidden_size
        # Shift the input ids by one token.
        # E.g., [a1, b1, b2, c1, c2, c3] -> [b1, b2, c1, c2, c3, c3]
        self.input_ids[:num_tokens - 1] = target_token_ids[1:]
        # Replace the last token with the next token.
        # E.g., [b1, b2, c1, c2, c3, c3] -> [a2, b2, b3, c2, c3, c4]
        self.input_ids[last_token_indices] = next_token_ids

        assert self.runner is not None

        # FIXME: need to consider multiple kv_cache_groups
        ubatch_id = dbo_current_ubatch_id()
        attn_metadata_builder = \
            self.runner.attn_groups[0][0].metadata_builders[ubatch_id]
        attn_metadata = attn_metadata_builder.build_for_drafting(
            common_attn_metadata=common_attn_metadata, draft_index=0)
        # FIXME: support hybrid kv for draft model (remove separate indexer)
        if self.draft_indexer_metadata_builder:
            draft_indexer_metadata = (
                self.draft_indexer_metadata_builder.build_for_drafting(
                    common_attn_metadata=common_attn_metadata,
                    draft_index=0,
                ))
        else:
            draft_indexer_metadata = None
        # At this moment, we assume all eagle layers belong to the same KV
        # cache group, thus using the same attention metadata.
        per_layer_attn_metadata = {}
        for layer_name in self.attn_layer_names:
            per_layer_attn_metadata[layer_name] = attn_metadata
        for layer_name in self.indexer_layer_names:
            assert draft_indexer_metadata is not None
            per_layer_attn_metadata[layer_name] = draft_indexer_metadata

        if self.use_cuda_graph and \
                num_tokens <= self.cudagraph_batch_sizes[-1]:
            num_input_tokens = self.vllm_config.pad_for_cudagraph(num_tokens)
        else:
            num_input_tokens = num_tokens
        # copy inputs to buffer for cudagraph
        self._set_positions(num_tokens, target_positions)
        self.hidden_states[:num_tokens] = target_hidden_states

        if self.supports_mm_inputs:
            mm_embeds, is_mm_embed = mm_embed_inputs or (None, None)

            self.inputs_embeds[:num_tokens] = self.model.get_input_embeddings(
                self.input_ids[:num_tokens],
                multimodal_embeddings=mm_embeds,
                is_multimodal=is_mm_embed,
            )

            input_ids = None
            inputs_embeds = self.inputs_embeds[:num_input_tokens]
        else:
            input_ids = self.input_ids[:num_input_tokens]
            inputs_embeds = None

        with set_forward_context(per_layer_attn_metadata,
                                 self.vllm_config,
                                 num_tokens=num_input_tokens):
            ret_hidden_states = self.model(
                input_ids=input_ids,
                positions=self._get_positions(num_input_tokens),
                hidden_states=self.hidden_states[:num_input_tokens],
                inputs_embeds=inputs_embeds,
            )
            if self.method == "mtp":
                last_hidden_states = ret_hidden_states
                hidden_states = last_hidden_states
            else:
                last_hidden_states, hidden_states = ret_hidden_states
        sample_hidden_states = last_hidden_states[last_token_indices]
        logits = self.model.compute_logits(sample_hidden_states)

        # Early exit if there is only one draft token to be generated.
        if self.num_speculative_tokens == 1:
            draft_token_ids = logits.argmax(dim=-1)
            return draft_token_ids.view(-1, 1)

        if self.uses_mrope:
            positions = target_positions[:, last_token_indices]
        else:
            positions = target_positions[last_token_indices]
        if self.method in ("deepseek_mtp", "ernie_mtp", "longcat_flash_mtp"):
            hidden_states = self.hidden_states[last_token_indices]
        else:
            hidden_states = hidden_states[last_token_indices]

        if isinstance(attn_metadata, TreeAttentionMetadata):
            # Draft using tree attention.
            draft_token_ids_list = self.propose_tree(
                batch_size=batch_size,
                logits=logits,
                positions=positions,
                hidden_states=hidden_states,
                common_attn_metadata=common_attn_metadata,
            )
            # [batch_size, num_tree_tokens]
            return torch.cat(draft_token_ids_list, dim=1)

        draft_token_ids = logits.argmax(dim=-1)

        if self.allowed_attn_types is not None and \
            not isinstance(attn_metadata, self.allowed_attn_types):
            raise ValueError(
                f"Unsupported attention metadata type for speculative "
                "decoding with num_speculative_tokens > 1: "
                f"{type(attn_metadata)}. Supported types are: "
                f"{self.allowed_attn_types}")

        # Generate the remaining draft tokens.
        draft_token_ids_list = [draft_token_ids]

        if self.use_cuda_graph and \
                batch_size <= self.cudagraph_batch_sizes[-1]:
            input_batch_size = self.vllm_config.pad_for_cudagraph(batch_size)
        else:
            input_batch_size = batch_size

        common_attn_metadata.num_actual_tokens = batch_size
        common_attn_metadata.max_query_len = 1
        common_attn_metadata.query_start_loc = self.arange[:batch_size + 1]
        common_attn_metadata.query_start_loc_cpu = torch.from_numpy(
            self.token_arange_np[:batch_size + 1]).clone()
        for token_index in range(self.num_speculative_tokens - 1):
            # Update the inputs.
            # cast to int32 is crucial when eagle model is compiled.
            # tensor.argmax() returns int64 by default.
            input_ids = draft_token_ids_list[-1].int()
            if self.uses_mrope:
                positions += 1
                # NOTE(woosuk): We should handle the case where the draft model
                # generates tokens beyond the max model length.
                # Since it is complex to remove such requests from the batch,
                # we keep them in the batch but adjust the position ids
                # and slot mappings to avoid the
                # out-of-range access during the model execution.
                # The draft tokens generated with this adjustment
                # should be ignored.
                exceeds_max_model_len = positions[0] >= self.max_model_len
                # Mask out the position ids that exceed the max model length.
                # Otherwise, we may get out-of-range error in RoPE.
                clamped_positions = torch.where\
                    (exceeds_max_model_len.unsqueeze(0), \
                     torch.zeros_like(positions), positions)
            else:
                positions += 1
                exceeds_max_model_len = positions >= self.max_model_len
                clamped_positions = torch.where(exceeds_max_model_len, 0,
                                                positions)

            # Increment the sequence lengths.
            common_attn_metadata.seq_lens += 1
            common_attn_metadata.seq_lens_cpu += 1
            # For the requests that exceed the max model length, we set the
            # sequence length to 1 to minimize their overheads in attention.

            common_attn_metadata.seq_lens.masked_fill_(exceeds_max_model_len,
                                                       1)

            common_attn_metadata.num_computed_tokens_cpu = \
                common_attn_metadata.seq_lens_cpu - 1

            # Compute the slot mapping.
            if self.uses_mrope:
                # all dimensions of positions are the same
                block_numbers = clamped_positions[0] // self.block_size
            else:
                block_numbers = clamped_positions // self.block_size
            block_ids = common_attn_metadata.block_table_tensor.gather(
                dim=1, index=block_numbers.view(-1, 1))
            block_ids = block_ids.view(-1)
            if self.uses_mrope:
                common_attn_metadata.slot_mapping = (
                    block_ids * self.block_size +
                    clamped_positions[0] % self.block_size)
            else:
                common_attn_metadata.slot_mapping = (
                    block_ids * self.block_size +
                    clamped_positions % self.block_size)
            # Mask out the slot mappings that exceed the max model length.
            # Otherwise, the KV cache will be inadvertently updated with the
            # padding tokens.
            common_attn_metadata.slot_mapping.masked_fill_(
                exceeds_max_model_len, PADDING_SLOT_ID)

            # Rebuild attention metadata
            attn_metadata = attn_metadata_builder.build_for_drafting(  # type: ignore
                common_attn_metadata=common_attn_metadata,
                draft_index=token_index + 1)
            for layer_name in self.attn_layer_names:
                per_layer_attn_metadata[layer_name] = attn_metadata

            # copy inputs to buffer for cudagraph
            self.input_ids[:batch_size] = input_ids
            self._set_positions(batch_size, clamped_positions)
            self.hidden_states[:batch_size] = hidden_states
            if self.supports_mm_inputs:
                self.inputs_embeds[:batch_size] = \
                    self.model.get_input_embeddings(input_ids)

                input_ids = None
                inputs_embeds = self.inputs_embeds[:input_batch_size]
            else:
                input_ids = self.input_ids[:input_batch_size]
                inputs_embeds = None

            # Run the model.
            with set_forward_context(per_layer_attn_metadata,
                                     self.vllm_config,
                                     num_tokens=input_batch_size):
                ret_hidden_states = self.model(
                    input_ids=input_ids,
                    positions=self._get_positions(input_batch_size),
                    hidden_states=self.hidden_states[:input_batch_size],
                    inputs_embeds=inputs_embeds,
                )
                if self.method == "mtp":
                    last_hidden_states = ret_hidden_states
                    hidden_states = ret_hidden_states
                else:
                    last_hidden_states, hidden_states = ret_hidden_states
            hidden_states = hidden_states[:batch_size]
            logits = self.model.compute_logits(last_hidden_states[:batch_size])
            draft_token_ids = logits.argmax(dim=-1)
            draft_token_ids_list.append(draft_token_ids)

        # [batch_size, num_speculative_tokens]
        draft_token_ids = torch.stack(draft_token_ids_list, dim=1)
        return draft_token_ids

    def prepare_next_token_ids_cpu(
            self, sampled_token_ids: list[list[int]],
            requests: dict[str,
                           CachedRequestState], gpu_input_batch: InputBatch,
            num_scheduled_tokens: dict[str, int]) -> torch.Tensor:
        """
        This function is used to prepare the inputs for speculative decoding.
        It calculates the next token ids for each request based on the sampled
        token ids from the CPU. If a request has no sampled token ids (e.g.,
        during the initial decoding steps), it falls back to using the request
        state to get the next token id.
        """
        req_ids = gpu_input_batch.req_ids
        next_token_ids: list[int] = []
        for i, token_ids in enumerate(sampled_token_ids):
            if token_ids:
                # Common case.
                next_token_id = token_ids[-1]
            else:
                # Partial prefill (rare case).
                # Get the next token id from the request state.
                req_id = req_ids[i]
                req_state = requests[req_id]
                seq_len = (req_state.num_computed_tokens +
                           num_scheduled_tokens[req_id])
                next_token_id = req_state.get_token_id(seq_len)
            next_token_ids.append(next_token_id)
        next_token_ids = torch.tensor(next_token_ids,
                                      dtype=torch.int32,
                                      device=self.input_ids.device)
        return next_token_ids

    def prepare_next_token_ids_padded(self,
                               common_attn_metadata: CommonAttentionMetadata,
                               sampled_token_ids: torch.Tensor,
                               requests: dict[str, CachedRequestState],
                               gpu_input_batch: InputBatch,
                               discard_request_indices: torch.Tensor,
                               num_discarded_requests: int) -> \
                                tuple[torch.Tensor, torch.Tensor]:
        """
        This function is used to prepare the inputs for speculative decoding.
        It calculates the next token ids and the number of valid sampled tokens
        for each request, considering the "discarded" requests whose next token
        is not sampled and comes from `request.get_token_id()` instead.
        It also accounts for the rejected tokens in `sampled_token_ids`.
        This function must use device functions to operate on the inputs, and
        should not introduce any blocking CPU-GPU synchronization.
        """
        # TODO(Ben): Combine this into a custom fused kernel

        # Precompute get_token_id for when there is no valid next token
        num_reqs = gpu_input_batch.num_reqs
        self.backup_next_token_ids.np[:num_reqs] = np.array([
            requests[gpu_input_batch.req_ids[i]].get_token_id(
                common_attn_metadata.seq_lens_cpu[i].item())
            for i in range(num_reqs)
        ])
        self.backup_next_token_ids.copy_to_gpu(num_reqs)

        # Mask out the sampled tokens indices that should not be sampled.
        discard_sampled_tokens_req_indices = \
            discard_request_indices[:num_discarded_requests]

        valid_sampled_token_ids_gpu = sampled_token_ids.clone()
        valid_sampled_token_ids_gpu.index_fill_(
            0, discard_sampled_tokens_req_indices, -1)

        # Generate a mask for all valid tokens within those requests
        max_gen_len = sampled_token_ids.shape[-1]
        if max_gen_len == 1:
            valid_mask = torch.ones_like(valid_sampled_token_ids_gpu,
                                         dtype=torch.bool)
        else:
            valid_mask = (
                (valid_sampled_token_ids_gpu != -1) &
                (valid_sampled_token_ids_gpu < gpu_input_batch.vocab_size))

        # Count the number of valid tokens in each request
        valid_sampled_tokens_count = valid_mask.sum(dim=1)

        # Get the rightmost valid index per row
        last_valid_indices = valid_sampled_tokens_count - 1
        last_valid_indices_safe = torch.clamp(last_valid_indices, min=0)

        # Get last valid token from each row
        # (assume undefined state where there is no valid token)
        selected_tokens = torch.gather(
            valid_sampled_token_ids_gpu, 1,
            last_valid_indices_safe.unsqueeze(1)).squeeze(1)

        # Use last token if valid, pre-computed backup if not
        batch_size = valid_sampled_token_ids_gpu.shape[0]
        next_token_ids = torch.where(
            last_valid_indices != -1, selected_tokens,
            self.backup_next_token_ids.gpu[:batch_size])

        return next_token_ids, valid_sampled_tokens_count

    def prepare_inputs_padded(self,
                                common_attn_metadata: CommonAttentionMetadata,
                                spec_decode_metadata: SpecDecodeMetadata,
                                valid_sampled_tokens_count: torch.Tensor) -> \
                    tuple[CommonAttentionMetadata, torch.Tensor, torch.Tensor]:
        """
        This function is used to prepare the inputs for speculative decoding
        It updates the common_attn_metadata for speculative decoding,
        but does not consider the rejected tokens. Instead, all tokens
        are included as inputs to the speculator, with the rejected tokens
        used as padding and filtered out later by `token_indices_to_sample`.
        No blocking CPU operations should be introduced in this function.
        """
        num_draft_tokens_gpu = torch.cat([
            spec_decode_metadata.cu_num_draft_tokens[0:1],
            spec_decode_metadata.cu_num_draft_tokens[1:] -
            spec_decode_metadata.cu_num_draft_tokens[:-1]
        ])

        num_rejected_tokens_gpu = torch.where(
            num_draft_tokens_gpu > 0,
            num_draft_tokens_gpu + 1 - valid_sampled_tokens_count,
            torch.zeros_like(num_draft_tokens_gpu))

        query_start_loc_cpu = common_attn_metadata.query_start_loc_cpu

        new_query_len_per_req = (query_start_loc_cpu[1:] -
                                 query_start_loc_cpu[:-1])

        total_num_tokens = query_start_loc_cpu[-1].item()
        token_indices = self.arange[:total_num_tokens]

        spec_common_attn_metadata = CommonAttentionMetadata(
            query_start_loc=common_attn_metadata.query_start_loc,
            seq_lens=common_attn_metadata.seq_lens,
            query_start_loc_cpu=query_start_loc_cpu,
            seq_lens_cpu=common_attn_metadata.seq_lens_cpu,
            num_computed_tokens_cpu=common_attn_metadata.
            num_computed_tokens_cpu,
            num_reqs=common_attn_metadata.num_reqs,
            num_actual_tokens=total_num_tokens,
            max_query_len=new_query_len_per_req.max().item(),
            max_seq_len=common_attn_metadata.seq_lens_cpu.max().item(),
            block_table_tensor=common_attn_metadata.block_table_tensor,
            slot_mapping=common_attn_metadata.slot_mapping[token_indices],
            causal=True,
        )

        token_indices_to_sample = common_attn_metadata.query_start_loc[1:] - 1 \
            - num_rejected_tokens_gpu

        return spec_common_attn_metadata, token_indices, token_indices_to_sample

    def propose_tree(
        self,
        batch_size: int,
        # [num_tokens, vocab_size]
        logits: torch.Tensor,
        # [num_tokens]
        positions: torch.Tensor,
        # [num_tokens, hidden_size]
        hidden_states: torch.Tensor,
        common_attn_metadata: CommonAttentionMetadata,
    ) -> list[torch.Tensor]:
        tree_attn_metadata_builder = \
            self.runner.attn_groups[0][0].get_metadata_builder()
        assert isinstance(tree_attn_metadata_builder,
                          TreeAttentionMetadataBuilder)

        total_num_drafts = self.cu_drafts_per_level[0]
        level_num_drafts = total_num_drafts
        # Sample a draft token for each child at the tree root level.
        num_children = self.child_drafts_per_level[0]
        if num_children == 1:
            draft_token_ids = logits.argmax(dim=-1).view(batch_size, -1)
        else:
            draft_token_ids = torch.topk(logits, num_children,
                                         dim=-1).indices.view(batch_size, -1)
        draft_token_ids_list = [draft_token_ids]
        draft_hidden_states = hidden_states.view(batch_size, 1, -1)

        # Initialize empty tensors for concatenation with the level outputs.
        tree_input_ids = torch.empty(0,
                                     device=self.input_ids.device,
                                     dtype=self.input_ids.dtype)
        tree_positions = torch.empty(0,
                                     device=self.positions.device,
                                     dtype=self.positions.dtype)
        tree_hidden_states = torch.empty(0,
                                         device=self.hidden_states.device,
                                         dtype=self.hidden_states.dtype)
        # Precompute the draft token positions.
        flattened_draft_positions = (
            positions.view(batch_size, -1) +
            self.tree_draft_pos_offsets[:batch_size, :])
        tree_depth = len(self.cu_drafts_per_level)
        for level in range(tree_depth - 1):
            # Get draft positions for RoPE.
            draft_positions = positions + (level + 1)
            exceeds_max_model_len = (positions +
                                     total_num_drafts) >= self.max_model_len
            # Mask out the position ids that exceed the max model length.
            # Otherwise, we may get out-of-range error in RoPE.
            draft_positions = torch.where(
                exceeds_max_model_len,
                0,
                draft_positions,
            ).view(batch_size, -1)

            if level_num_drafts > 1:
                # Repeat the positions for each draft at this level.
                draft_positions = draft_positions.repeat_interleave(
                    level_num_drafts, dim=1)

            if num_children > 1:
                # Repeat draft hidden states for each child.
                draft_hidden_states = draft_hidden_states.repeat_interleave(
                    num_children, dim=1)

            # Concatenate the draft tokens, positions, and hidden states.
            tree_input_ids = torch.cat([tree_input_ids, draft_token_ids],
                                       dim=1)
            tree_positions = torch.cat([tree_positions, draft_positions],
                                       dim=1)
            tree_hidden_states = torch.cat(
                [tree_hidden_states, draft_hidden_states], dim=1)

            # Build new attention metadata for the next level of drafts.
            # This is necessary to support tree attention.
            query_len = total_num_drafts
            common_attn_metadata = replace(
                common_attn_metadata,
                query_start_loc=query_len * self.arange[:batch_size + 1],
                seq_lens=common_attn_metadata.seq_lens + level_num_drafts,
                num_actual_tokens=batch_size * query_len,
                max_query_len=query_len,
            )
            attn_metadata = tree_attn_metadata_builder.build_for_drafting(
                common_attn_metadata=common_attn_metadata,
                draft_index=level + 1,
            )

            # Apply new attention metadata to all layers.
            per_layer_attn_metadata = {}
            for layer_name in self.attn_layer_names:
                per_layer_attn_metadata[layer_name] = attn_metadata

            # Consider max model length.
            attn_metadata.max_seq_len = min(attn_metadata.max_seq_len,
                                            self.max_model_len)
            # For the requests that exceed the max model length, we set the
            # sequence length to 1 to minimize their overheads in attention.
            attn_metadata.seq_lens.masked_fill_(exceeds_max_model_len, 1)

            # Compute the slot mapping.
            query_positions = flattened_draft_positions[:, level:level +
                                                        query_len]
            block_numbers = query_positions // self.block_size
            block_ids = attn_metadata.block_table.gather(dim=1,
                                                         index=block_numbers)
            slot_mapping = (block_ids * self.block_size +
                            query_positions % self.block_size)
            # Mask out the slot mappings that exceed the max model length.
            # Otherwise, the KV cache will be inadvertently updated with the
            # padding tokens.
            slot_mapping[exceeds_max_model_len] = PADDING_SLOT_ID
            attn_metadata.slot_mapping = slot_mapping.view(-1)

            # Copy inputs to buffer for cudagraph.
            num_tokens = attn_metadata.num_actual_tokens
            input_ids = tree_input_ids.view(-1)
            self.input_ids[:num_tokens] = input_ids
            self.positions[:num_tokens] = tree_positions.view(-1)
            self.hidden_states[:num_tokens] = tree_hidden_states.view(
                num_tokens, -1)

            if self.use_cuda_graph and \
                    num_tokens <= self.cudagraph_batch_sizes[-1]:
                num_input_tokens = self.vllm_config.pad_for_cudagraph(
                    num_tokens)
            else:
                num_input_tokens = num_tokens
            # Run the model.
            with set_forward_context(per_layer_attn_metadata,
                                     self.vllm_config,
                                     num_tokens=num_input_tokens):
                last_hidden_states, hidden_states = self.model(
                    input_ids=self.input_ids[:num_input_tokens],
                    positions=self.positions[:num_input_tokens],
                    hidden_states=self.hidden_states[:num_input_tokens],
                    inputs_embeds=None,
                )

            # Get the output hidden states for the draft tokens.
            draft_hidden_states = hidden_states[:num_tokens].view(
                batch_size, query_len, -1)[:, -level_num_drafts:]
            draft_last_hidden_states = last_hidden_states[:num_tokens].view(
                batch_size, query_len, -1)[:, -level_num_drafts:]

            # Get the output logits for the draft tokens.
            logits = self.model.compute_logits(
                draft_last_hidden_states.reshape(batch_size * level_num_drafts,
                                                 -1))

            # Sample a draft token for each child at the next tree level.
            num_children = self.child_drafts_per_level[level + 1]
            if num_children == 1:
                draft_token_ids = logits.argmax(dim=-1).view(batch_size, -1)
            else:
                draft_token_ids = torch.topk(logits, num_children,
                                             dim=-1).indices.view(
                                                 batch_size, -1)
            draft_token_ids_list.append(draft_token_ids)

            # Update the # drafts counters for the next tree level.
            level_num_drafts = self.cu_drafts_per_level[level +
                                                        1] - total_num_drafts
            total_num_drafts = self.cu_drafts_per_level[level + 1]
        return draft_token_ids_list

    def prepare_inputs(
        self,
        common_attn_metadata: CommonAttentionMetadata,
        sampled_token_ids: list[list[int]],
        num_draft_tokens: list[int],
    ) -> tuple[CommonAttentionMetadata, torch.Tensor]:
        """
        This function is used to prepare the inputs for speculative decoding.
        It updates to the common_attn_metadata to account for the rejected
        tokens (and newly sampled tokens). It also returns the token indices
        of the tokens that should be fed to the speculator.
        """
        # E.g.
        #  common_attn_metadata.query_start_loc{_cpu}:
        #       [0, q1, q1 + q2, q1 + q2 + q3]
        #  common_attn_metadata.seq_lens{_cpu}: [s1, s2, s3]
        #  num_rejected_tokens: [n1, n2, n3]
        # This function computes the intermediate values:
        #  num_tokens_per_req: [q1 - n1, q2 - n2, q3 - n3]
        # And returns:
        #  common_attn_metadata.query_start_loc{_cpu}:
        #       [0, q1 - n1, q1 + q2 - n1 - n2, q1 + q2 + q3 - n1 - n2 - n3]
        #  common_attn_metadata.seq_lens{_cpu}:
        #       [s1 - n1 + 1, s2 - n2 + 1, s3 - n3 + 1]
        #  token_indices: [0, 1, ..., q1 - n1 - 1,
        #                 q1, q1 + 1, ..., q1 + q2 - n2 - 1,
        #                 q1 + q2, q1 + q2 + 1, ..., q1 + q2 + q3 - n3 - 1]

        num_rejected_tokens = [
            n + 1 - len(sampled_token_ids[i]) if n > 0 else 0
            for i, n in enumerate(num_draft_tokens)
        ]
        num_rejected_tokens = torch.tensor(num_rejected_tokens,
                                           dtype=torch.int32)

        device = common_attn_metadata.query_start_loc.device
        query_start_loc_cpu = common_attn_metadata.query_start_loc_cpu
        new_seq_lens_cpu = common_attn_metadata.seq_lens_cpu \
            - num_rejected_tokens

        # [0, q1, q1 + q2, q1 + q2 + q3] -> [q1, q2, q3]
        new_query_len_per_req = (query_start_loc_cpu[1:] -
                                 query_start_loc_cpu[:-1])
        # [q1, q2, q3] -> [q1 - n1, q2 - n2, q3 - n3]
        new_num_tokens_per_req = new_query_len_per_req - num_rejected_tokens
        new_num_tokens_per_req_np = new_num_tokens_per_req.numpy()

        # [q1 - n1, q2 - n2, q3 - n3] ->
        # [0, q1 - n1, q1 + q2 - n1 - n2, q1 + q2 + q3 - n1 - n2 - n3]
        new_query_start_loc_cpu = torch.zeros(
            query_start_loc_cpu.shape,
            dtype=torch.int32,
            pin_memory=is_pin_memory_available())
        new_query_start_loc_np = new_query_start_loc_cpu.numpy()
        np.cumsum(new_num_tokens_per_req_np, out=new_query_start_loc_np[1:])

        total_num_tokens = new_query_start_loc_np[-1]
        # Example assuming num_tokens_per_req_np = [2, 4, 3]
        # this implies that `new_query_start_locs` is:
        # [0, 2, 6, 9] ->
        # [0, 0, 2, 2, 2, 2, 6, 6, 6]
        #  _r1_  ____r2____  ___r3__
        new_query_start_locs_expanded = np.repeat(new_query_start_loc_np[:-1],
                                                  new_num_tokens_per_req_np)
        # [0, 1, 2, 3, 4, 5, 6, 7, 8] ->
        # [0, 1, 0, 1, 2, 3, 0, 1, 2]
        #  _r1_  ____r2____  ___r3__
        token_offests = self.token_arange_np[:total_num_tokens] \
            - new_query_start_locs_expanded

        # Expand starting positions to match token pattern
        # [0, q1, q1 + q2] ->
        # [0, 0, q1, q1, q1, q1, q1 + q2, q1 + q2, q1 + q2]
        #  _r1_  _____r2_______  ___________r3____________
        old_query_start_locs_expanded = np.repeat(
            query_start_loc_cpu[:-1].numpy(), new_num_tokens_per_req_np)
        # Final token indices are:
        # [0, 1,                                // req 1
        #  q1 + 0, q1 + 1, q1 + 2, q1 + 3,       // req 2
        #  q1 + q2 + 0, q1 + q2 + 1, q1 + q2 + 2] // req 3
        token_indices_np = token_offests + old_query_start_locs_expanded
        token_indices = torch.from_numpy(token_indices_np).to(
            device, non_blocking=True)

        spec_common_attn_metadata = CommonAttentionMetadata(
            query_start_loc=new_query_start_loc_cpu.to(device,
                                                       non_blocking=True),
            seq_lens=new_seq_lens_cpu.to(device, non_blocking=True),
            query_start_loc_cpu=new_query_start_loc_cpu,
            seq_lens_cpu=new_seq_lens_cpu,
            num_computed_tokens_cpu=common_attn_metadata.
            num_computed_tokens_cpu,
            num_reqs=common_attn_metadata.num_reqs,
            num_actual_tokens=total_num_tokens,
            max_query_len=new_query_len_per_req.max().item(),
            max_seq_len=new_seq_lens_cpu.max().item(),
            block_table_tensor=common_attn_metadata.block_table_tensor,
            slot_mapping=common_attn_metadata.slot_mapping[token_indices],
            causal=True,
        )

        return spec_common_attn_metadata, token_indices

    def get_model_name(self, model: nn.Module) -> str:
        if hasattr(model, 'module'):  # multi-GPU
            model = model.module
        return model.__class__.__name__

    def load_model(self, target_model: nn.Module) -> None:
        draft_model_config = \
            self.vllm_config.speculative_config.draft_model_config
        target_attn_layer_names = set(
            get_layers_from_vllm_config(self.vllm_config, Attention).keys())
        # FIXME: support hybrid kv for draft model
        target_indexer_layer_names = set(
            get_layers_from_vllm_config(self.vllm_config,
                                        DeepseekV32IndexerCache).keys())

        from vllm.compilation.backends import set_model_tag
        with set_model_tag("eagle_head"):
            self.model = get_model(vllm_config=self.vllm_config,
                                   model_config=draft_model_config)

        draft_attn_layer_names = (
            get_layers_from_vllm_config(self.vllm_config, Attention).keys() -
            target_attn_layer_names)
        indexer_layers = get_layers_from_vllm_config(self.vllm_config,
                                                     DeepseekV32IndexerCache)
        draft_indexer_layer_names = (indexer_layers.keys() -
                                     target_indexer_layer_names)
        self.attn_layer_names = list(draft_attn_layer_names)
        self.indexer_layer_names = list(draft_indexer_layer_names)

        if self.indexer_layer_names:
            first_layer = self.indexer_layer_names[0]
            self.draft_indexer_metadata_builder = (
                indexer_layers[first_layer].get_attn_backend().get_builder_cls(
                )(
                    indexer_layers[first_layer].get_kv_cache_spec(),
                    self.indexer_layer_names,
                    self.vllm_config,
                    self.device,
                ))
        else:
            self.draft_indexer_metadata_builder = None

        if self.supports_mm_inputs:
            # Even if the target model is multimodal, we can also use
            # text-only draft models
            try:
                dummy_input_ids = torch.tensor([[1]],
                                               device=self.input_ids.device)
                self.model.get_input_embeddings(dummy_input_ids,
                                                multimodal_embeddings=None)
            except (NotImplementedError, AttributeError, TypeError):
                logger.warning(
                    "Draft model does not support multimodal inputs, "
                    "falling back to text-only mode")
                self.supports_mm_inputs = False

        if supports_multimodal(target_model):
            # handle multimodality
            if (self.get_model_name(target_model) ==
                    "Qwen2_5_VLForConditionalGeneration"):
                self.model.config.image_token_index = (
                    target_model.config.image_token_id)
            else:
                self.model.config.image_token_index = (
                    target_model.config.image_token_index)
            target_language_model = target_model.get_language_model()
        else:
            target_language_model = target_model
        # share embed_tokens with the target model if needed
        if get_pp_group().world_size == 1:
            if hasattr(target_language_model.model, 'embed_tokens'):
                target_embed_tokens = target_language_model.model.embed_tokens
            elif hasattr(target_language_model.model, 'embedding'):
                target_embed_tokens = target_language_model.model.embedding
            else:
                raise AttributeError(
                    "Target model does not have 'embed_tokens' or 'embedding' "
                    "attribute")

            # Check if shapes match and we found the embedding
            eagle_shape = self.model.model.embed_tokens.weight.shape
            target_shape = target_embed_tokens.weight.shape
            if eagle_shape == target_shape:
                logger.info(
                    "Assuming the EAGLE head shares the same vocab embedding"
                    " with the target model.")
                del self.model.model.embed_tokens
                self.model.model.embed_tokens = target_embed_tokens
            else:
                logger.info(
                    "The EAGLE head's vocab embedding will be loaded separately"
                    " from the target model.")
        else:
            logger.info(
                "The EAGLE head's vocab embedding will be loaded separately"
                " from the target model.")

        # share lm_head with the target model if needed
        # some model definition do not define lm_head explicitly
        # and reuse embed_tokens for lm_head, e.g., CohereForCausalLM
        if self.vllm_config.speculative_config.method != "eagle3":
            if hasattr(target_language_model, "lm_head"):
                logger.info(
                    "Loading EAGLE LM head weights from the target model.")
                self.model.lm_head = target_language_model.lm_head
        else:
            if (hasattr(self.model, "lm_head")
                    and hasattr(target_language_model, "lm_head")
                    and self.model.lm_head.weight.shape
                    == target_language_model.lm_head.weight.shape):
                logger.info("Assuming the EAGLE head shares the same lm_head"
                            " with the target model.")
                del self.model.lm_head
                self.model.lm_head = target_language_model.lm_head
            else:
                logger.info(
                    "The EAGLE head's lm_head will be loaded separately"
                    " from the target model.")

    @torch.inference_mode()
    def dummy_run(
        self,
        num_tokens: int,
    ) -> None:
        with set_forward_context(None, self.vllm_config,
                                 num_tokens=num_tokens):
            if self.supports_mm_inputs:
                input_ids = None
                inputs_embeds = self.inputs_embeds[:num_tokens]
            else:
                input_ids = self.input_ids[:num_tokens]
                inputs_embeds = None

            self.model(
                input_ids=input_ids,
                positions=self._get_positions(num_tokens),
                hidden_states=self.hidden_states[:num_tokens],
                inputs_embeds=inputs_embeds,
            )

    def _get_attention_metadata_builder(
            self) -> list[AttentionMetadataBuilder]:
        """Find and return the attention metadata builders for EAGLE layers.

        Returns:
            The metadata builders for EAGLE layers.

        Raises:
            AssertionError: If no metadata builders are found for EAGLE layers.
        """
        builder = None
        chosen_layer = self.attn_layer_names[0]

        for kv_cache_group in self.runner.attn_groups:
            for attn_group in kv_cache_group:
                if chosen_layer in attn_group.layer_names:
                    builder = attn_group.get_metadata_builder()
                    break
            if builder is not None:
                break

        assert builder is not None, (
            "Failed to find attention metadata builder for EAGLE layers.")
        return builder

    def validate_same_kv_cache_group(self,
                                     kv_cache_config: KVCacheConfig) -> None:
        """
        Validate that all eagle layers belong to the same KVCacheGroup.
        Need this assumption to ensure all eagle layers can use the
        same AttentionMetadata.
        May extend to multiple AttentionMetadata in the future.
        """
        kv_cache_groups: dict[str, int] = {}
        for id, kv_cache_group in enumerate(kv_cache_config.kv_cache_groups):
            for layer_name in kv_cache_group.layer_names:
                kv_cache_groups[layer_name] = id
        assert len(
            set([
                kv_cache_groups[layer_name]
                for layer_name in self.attn_layer_names
            ])
        ) == 1, "All eagle layers should belong to the same kv cache group"

allowed_attn_types instance-attribute

allowed_attn_types: Optional[tuple] = None

arange instance-attribute

arange = arange(
    max_num_slots_for_arange, device=device, dtype=int32
)

attn_metadata_builder instance-attribute

attn_metadata_builder: Optional[
    AttentionMetadataBuilder
] = None

backup_next_token_ids instance-attribute

backup_next_token_ids = CpuGpuBuffer(
    max_batch_size,
    dtype=int32,
    pin_memory=is_pin_memory_available(),
    device=device,
    with_numpy=True,
)

block_size instance-attribute

block_size = block_size

child_drafts_per_level instance-attribute

child_drafts_per_level = [num_drafts_per_level[0]]

cu_drafts_per_level instance-attribute

cu_drafts_per_level = [num_drafts_per_level[0]]

cudagraph_batch_sizes instance-attribute

cudagraph_batch_sizes = (
    list(reversed(cudagraph_capture_sizes))
    if use_cuda_graph
    else []
)

device instance-attribute

device = device

draft_model_config instance-attribute

draft_model_config = draft_model_config

dtype instance-attribute

dtype = dtype

hidden_size instance-attribute

hidden_size = get_hidden_size()

hidden_states instance-attribute

hidden_states = zeros(
    (max_num_tokens, hidden_size),
    dtype=dtype,
    device=device,
)

input_ids instance-attribute

input_ids = zeros(
    max_num_tokens, dtype=int32, device=device
)

inputs_embeds instance-attribute

inputs_embeds = zeros(
    (max_num_tokens, hidden_size),
    dtype=dtype,
    device=device,
)

max_model_len instance-attribute

max_model_len = max_model_len

max_num_tokens instance-attribute

max_num_tokens = max_num_batched_tokens

method instance-attribute

method = method

mm_registry instance-attribute

mm_registry = MULTIMODAL_REGISTRY

mrope_positions instance-attribute

mrope_positions = zeros(
    (3, max_num_tokens), dtype=int64, device=device
)

num_speculative_tokens instance-attribute

num_speculative_tokens = num_speculative_tokens

positions instance-attribute

positions = zeros(
    max_num_tokens, dtype=int64, device=device
)

runner instance-attribute

runner = runner

speculative_config instance-attribute

speculative_config = speculative_config

supports_mm_inputs instance-attribute

supports_mm_inputs = supports_multimodal_inputs(
    model_config
)

token_arange_np instance-attribute

token_arange_np = arange(max_num_tokens)

tree_choices instance-attribute

tree_choices: list[tuple[int, ...]] = literal_eval(
    spec_token_tree
)

tree_draft_pos_offsets instance-attribute

tree_draft_pos_offsets = repeat(max_batch_size, 1)

use_cuda_graph instance-attribute

use_cuda_graph = (
    not is_xpu()
    and level == PIECEWISE
    and not enforce_eager
)

uses_mrope instance-attribute

uses_mrope = uses_mrope

vllm_config instance-attribute

vllm_config = vllm_config

__init__

__init__(
    vllm_config: VllmConfig, device: device, runner=None
)
Source code in vllm/v1/spec_decode/eagle.py
def __init__(
    self,
    vllm_config: VllmConfig,
    device: torch.device,
    runner=None,
):
    self.vllm_config = vllm_config
    self.speculative_config = vllm_config.speculative_config
    self.draft_model_config = self.speculative_config.draft_model_config
    self.method = self.speculative_config.method

    self.runner = runner
    self.device = device
    self.dtype = vllm_config.model_config.dtype
    self.max_model_len = vllm_config.model_config.max_model_len
    self.block_size = vllm_config.cache_config.block_size
    self.num_speculative_tokens = (
        self.speculative_config.num_speculative_tokens)
    self.max_num_tokens = (
        vllm_config.scheduler_config.max_num_batched_tokens)
    self.token_arange_np = np.arange(self.max_num_tokens)
    # We need to get the hidden size from the draft model config because
    # the draft model's hidden size can be different from the target model's
    # hidden size (e.g., Llama 3.3 70B).
    self.hidden_size = self.draft_model_config.get_hidden_size()

    # Multi-modal data support
    self.mm_registry = MULTIMODAL_REGISTRY
    self.supports_mm_inputs = self.mm_registry.supports_multimodal_inputs(
        vllm_config.model_config)

    self.attn_metadata_builder: Optional[AttentionMetadataBuilder] = None

    self.use_cuda_graph = (not current_platform.is_xpu()
                           and self.vllm_config.compilation_config.level
                           == CompilationLevel.PIECEWISE and
                           not self.vllm_config.model_config.enforce_eager)
    self.cudagraph_batch_sizes = list(
        reversed(self.vllm_config.compilation_config.
                 cudagraph_capture_sizes)) if self.use_cuda_graph else []

    # persistent buffers for cuda graph
    self.input_ids = torch.zeros(self.max_num_tokens,
                                 dtype=torch.int32,
                                 device=device)
    self.uses_mrope = self.vllm_config.model_config.uses_mrope
    if self.uses_mrope:
        # M-RoPE need (3, max_num_tokens)
        self.mrope_positions = torch.zeros((3, self.max_num_tokens),
                                           dtype=torch.int64,
                                           device=device)
    else:
        # RoPE need (max_num_tokens,)
        self.positions = torch.zeros(self.max_num_tokens,
                                     dtype=torch.int64,
                                     device=device)
    self.hidden_states = torch.zeros(
        (self.max_num_tokens, self.hidden_size),
        dtype=self.dtype,
        device=device)

    # We need +1 here because the arange is used to set query_start_loc,
    # which has one more element than batch_size.
    max_batch_size = vllm_config.scheduler_config.max_num_seqs
    max_num_slots_for_arange = max(max_batch_size + 1, self.max_num_tokens)
    self.arange = torch.arange(max_num_slots_for_arange,
                               device=device,
                               dtype=torch.int32)

    self.inputs_embeds = torch.zeros(
        (self.max_num_tokens, self.hidden_size),
        dtype=self.dtype,
        device=device)

    self.backup_next_token_ids = CpuGpuBuffer(
        max_batch_size,
        dtype=torch.int32,
        pin_memory=is_pin_memory_available(),
        device=device,
        with_numpy=True)

    # Determine allowed attention backends once during initialization.
    self.allowed_attn_types: Optional[tuple] = None
    if current_platform.is_rocm():
        rocm_types = [TritonAttentionMetadata, FlashAttentionMetadata]
        # vllm.v1.attention.backends.rocm_aiter_fa is an optional backend
        if find_spec("vllm.v1.attention.backends.rocm_aiter_fa"):
            from vllm.v1.attention.backends.rocm_aiter_fa import (
                AiterFlashAttentionMetadata)
            rocm_types.append(AiterFlashAttentionMetadata)
        self.allowed_attn_types = tuple(rocm_types)

    # Parse the speculative token tree.
    spec_token_tree = self.speculative_config.speculative_token_tree
    self.tree_choices: list[tuple[int,
                                  ...]] = ast.literal_eval(spec_token_tree)
    tree_depth = len(self.tree_choices[-1])
    # Precompute per-level properties of the tree.
    num_drafts_per_level = [0] * tree_depth
    for node in self.tree_choices:
        num_drafts_per_level[len(node) - 1] += 1
    self.cu_drafts_per_level = [num_drafts_per_level[0]]
    self.child_drafts_per_level = [num_drafts_per_level[0]]
    for level in range(1, tree_depth):
        self.cu_drafts_per_level.append(self.cu_drafts_per_level[-1] +
                                        num_drafts_per_level[level])
        self.child_drafts_per_level.append(num_drafts_per_level[level] //
                                           num_drafts_per_level[level - 1])
    # Precompute draft position offsets in flattened tree.
    self.tree_draft_pos_offsets = torch.arange(
        1,
        len(self.tree_choices) + 1,
        device=device,
        dtype=torch.int32,
    ).repeat(max_batch_size, 1)

_get_attention_metadata_builder

_get_attention_metadata_builder() -> list[
    AttentionMetadataBuilder
]

Find and return the attention metadata builders for EAGLE layers.

Returns:

Type Description
list[AttentionMetadataBuilder]

The metadata builders for EAGLE layers.

Raises:

Type Description
AssertionError

If no metadata builders are found for EAGLE layers.

Source code in vllm/v1/spec_decode/eagle.py
def _get_attention_metadata_builder(
        self) -> list[AttentionMetadataBuilder]:
    """Find and return the attention metadata builders for EAGLE layers.

    Returns:
        The metadata builders for EAGLE layers.

    Raises:
        AssertionError: If no metadata builders are found for EAGLE layers.
    """
    builder = None
    chosen_layer = self.attn_layer_names[0]

    for kv_cache_group in self.runner.attn_groups:
        for attn_group in kv_cache_group:
            if chosen_layer in attn_group.layer_names:
                builder = attn_group.get_metadata_builder()
                break
        if builder is not None:
            break

    assert builder is not None, (
        "Failed to find attention metadata builder for EAGLE layers.")
    return builder

_get_positions

_get_positions(num_tokens: int)
Source code in vllm/v1/spec_decode/eagle.py
def _get_positions(self, num_tokens: int):
    if self.uses_mrope:
        return self.mrope_positions[:, :num_tokens]
    return self.positions[:num_tokens]

_set_positions

_set_positions(num_tokens: int, positions: Tensor)
Source code in vllm/v1/spec_decode/eagle.py
def _set_positions(self, num_tokens: int, positions: torch.Tensor):
    if self.uses_mrope:
        self.mrope_positions[:, :num_tokens] = positions
    else:
        self.positions[:num_tokens] = positions

dummy_run

dummy_run(num_tokens: int) -> None
Source code in vllm/v1/spec_decode/eagle.py
@torch.inference_mode()
def dummy_run(
    self,
    num_tokens: int,
) -> None:
    with set_forward_context(None, self.vllm_config,
                             num_tokens=num_tokens):
        if self.supports_mm_inputs:
            input_ids = None
            inputs_embeds = self.inputs_embeds[:num_tokens]
        else:
            input_ids = self.input_ids[:num_tokens]
            inputs_embeds = None

        self.model(
            input_ids=input_ids,
            positions=self._get_positions(num_tokens),
            hidden_states=self.hidden_states[:num_tokens],
            inputs_embeds=inputs_embeds,
        )

get_model_name

get_model_name(model: Module) -> str
Source code in vllm/v1/spec_decode/eagle.py
def get_model_name(self, model: nn.Module) -> str:
    if hasattr(model, 'module'):  # multi-GPU
        model = model.module
    return model.__class__.__name__

load_model

load_model(target_model: Module) -> None
Source code in vllm/v1/spec_decode/eagle.py
def load_model(self, target_model: nn.Module) -> None:
    draft_model_config = \
        self.vllm_config.speculative_config.draft_model_config
    target_attn_layer_names = set(
        get_layers_from_vllm_config(self.vllm_config, Attention).keys())
    # FIXME: support hybrid kv for draft model
    target_indexer_layer_names = set(
        get_layers_from_vllm_config(self.vllm_config,
                                    DeepseekV32IndexerCache).keys())

    from vllm.compilation.backends import set_model_tag
    with set_model_tag("eagle_head"):
        self.model = get_model(vllm_config=self.vllm_config,
                               model_config=draft_model_config)

    draft_attn_layer_names = (
        get_layers_from_vllm_config(self.vllm_config, Attention).keys() -
        target_attn_layer_names)
    indexer_layers = get_layers_from_vllm_config(self.vllm_config,
                                                 DeepseekV32IndexerCache)
    draft_indexer_layer_names = (indexer_layers.keys() -
                                 target_indexer_layer_names)
    self.attn_layer_names = list(draft_attn_layer_names)
    self.indexer_layer_names = list(draft_indexer_layer_names)

    if self.indexer_layer_names:
        first_layer = self.indexer_layer_names[0]
        self.draft_indexer_metadata_builder = (
            indexer_layers[first_layer].get_attn_backend().get_builder_cls(
            )(
                indexer_layers[first_layer].get_kv_cache_spec(),
                self.indexer_layer_names,
                self.vllm_config,
                self.device,
            ))
    else:
        self.draft_indexer_metadata_builder = None

    if self.supports_mm_inputs:
        # Even if the target model is multimodal, we can also use
        # text-only draft models
        try:
            dummy_input_ids = torch.tensor([[1]],
                                           device=self.input_ids.device)
            self.model.get_input_embeddings(dummy_input_ids,
                                            multimodal_embeddings=None)
        except (NotImplementedError, AttributeError, TypeError):
            logger.warning(
                "Draft model does not support multimodal inputs, "
                "falling back to text-only mode")
            self.supports_mm_inputs = False

    if supports_multimodal(target_model):
        # handle multimodality
        if (self.get_model_name(target_model) ==
                "Qwen2_5_VLForConditionalGeneration"):
            self.model.config.image_token_index = (
                target_model.config.image_token_id)
        else:
            self.model.config.image_token_index = (
                target_model.config.image_token_index)
        target_language_model = target_model.get_language_model()
    else:
        target_language_model = target_model
    # share embed_tokens with the target model if needed
    if get_pp_group().world_size == 1:
        if hasattr(target_language_model.model, 'embed_tokens'):
            target_embed_tokens = target_language_model.model.embed_tokens
        elif hasattr(target_language_model.model, 'embedding'):
            target_embed_tokens = target_language_model.model.embedding
        else:
            raise AttributeError(
                "Target model does not have 'embed_tokens' or 'embedding' "
                "attribute")

        # Check if shapes match and we found the embedding
        eagle_shape = self.model.model.embed_tokens.weight.shape
        target_shape = target_embed_tokens.weight.shape
        if eagle_shape == target_shape:
            logger.info(
                "Assuming the EAGLE head shares the same vocab embedding"
                " with the target model.")
            del self.model.model.embed_tokens
            self.model.model.embed_tokens = target_embed_tokens
        else:
            logger.info(
                "The EAGLE head's vocab embedding will be loaded separately"
                " from the target model.")
    else:
        logger.info(
            "The EAGLE head's vocab embedding will be loaded separately"
            " from the target model.")

    # share lm_head with the target model if needed
    # some model definition do not define lm_head explicitly
    # and reuse embed_tokens for lm_head, e.g., CohereForCausalLM
    if self.vllm_config.speculative_config.method != "eagle3":
        if hasattr(target_language_model, "lm_head"):
            logger.info(
                "Loading EAGLE LM head weights from the target model.")
            self.model.lm_head = target_language_model.lm_head
    else:
        if (hasattr(self.model, "lm_head")
                and hasattr(target_language_model, "lm_head")
                and self.model.lm_head.weight.shape
                == target_language_model.lm_head.weight.shape):
            logger.info("Assuming the EAGLE head shares the same lm_head"
                        " with the target model.")
            del self.model.lm_head
            self.model.lm_head = target_language_model.lm_head
        else:
            logger.info(
                "The EAGLE head's lm_head will be loaded separately"
                " from the target model.")

prepare_inputs

prepare_inputs(
    common_attn_metadata: CommonAttentionMetadata,
    sampled_token_ids: list[list[int]],
    num_draft_tokens: list[int],
) -> tuple[CommonAttentionMetadata, Tensor]

This function is used to prepare the inputs for speculative decoding. It updates to the common_attn_metadata to account for the rejected tokens (and newly sampled tokens). It also returns the token indices of the tokens that should be fed to the speculator.

Source code in vllm/v1/spec_decode/eagle.py
def prepare_inputs(
    self,
    common_attn_metadata: CommonAttentionMetadata,
    sampled_token_ids: list[list[int]],
    num_draft_tokens: list[int],
) -> tuple[CommonAttentionMetadata, torch.Tensor]:
    """
    This function is used to prepare the inputs for speculative decoding.
    It updates to the common_attn_metadata to account for the rejected
    tokens (and newly sampled tokens). It also returns the token indices
    of the tokens that should be fed to the speculator.
    """
    # E.g.
    #  common_attn_metadata.query_start_loc{_cpu}:
    #       [0, q1, q1 + q2, q1 + q2 + q3]
    #  common_attn_metadata.seq_lens{_cpu}: [s1, s2, s3]
    #  num_rejected_tokens: [n1, n2, n3]
    # This function computes the intermediate values:
    #  num_tokens_per_req: [q1 - n1, q2 - n2, q3 - n3]
    # And returns:
    #  common_attn_metadata.query_start_loc{_cpu}:
    #       [0, q1 - n1, q1 + q2 - n1 - n2, q1 + q2 + q3 - n1 - n2 - n3]
    #  common_attn_metadata.seq_lens{_cpu}:
    #       [s1 - n1 + 1, s2 - n2 + 1, s3 - n3 + 1]
    #  token_indices: [0, 1, ..., q1 - n1 - 1,
    #                 q1, q1 + 1, ..., q1 + q2 - n2 - 1,
    #                 q1 + q2, q1 + q2 + 1, ..., q1 + q2 + q3 - n3 - 1]

    num_rejected_tokens = [
        n + 1 - len(sampled_token_ids[i]) if n > 0 else 0
        for i, n in enumerate(num_draft_tokens)
    ]
    num_rejected_tokens = torch.tensor(num_rejected_tokens,
                                       dtype=torch.int32)

    device = common_attn_metadata.query_start_loc.device
    query_start_loc_cpu = common_attn_metadata.query_start_loc_cpu
    new_seq_lens_cpu = common_attn_metadata.seq_lens_cpu \
        - num_rejected_tokens

    # [0, q1, q1 + q2, q1 + q2 + q3] -> [q1, q2, q3]
    new_query_len_per_req = (query_start_loc_cpu[1:] -
                             query_start_loc_cpu[:-1])
    # [q1, q2, q3] -> [q1 - n1, q2 - n2, q3 - n3]
    new_num_tokens_per_req = new_query_len_per_req - num_rejected_tokens
    new_num_tokens_per_req_np = new_num_tokens_per_req.numpy()

    # [q1 - n1, q2 - n2, q3 - n3] ->
    # [0, q1 - n1, q1 + q2 - n1 - n2, q1 + q2 + q3 - n1 - n2 - n3]
    new_query_start_loc_cpu = torch.zeros(
        query_start_loc_cpu.shape,
        dtype=torch.int32,
        pin_memory=is_pin_memory_available())
    new_query_start_loc_np = new_query_start_loc_cpu.numpy()
    np.cumsum(new_num_tokens_per_req_np, out=new_query_start_loc_np[1:])

    total_num_tokens = new_query_start_loc_np[-1]
    # Example assuming num_tokens_per_req_np = [2, 4, 3]
    # this implies that `new_query_start_locs` is:
    # [0, 2, 6, 9] ->
    # [0, 0, 2, 2, 2, 2, 6, 6, 6]
    #  _r1_  ____r2____  ___r3__
    new_query_start_locs_expanded = np.repeat(new_query_start_loc_np[:-1],
                                              new_num_tokens_per_req_np)
    # [0, 1, 2, 3, 4, 5, 6, 7, 8] ->
    # [0, 1, 0, 1, 2, 3, 0, 1, 2]
    #  _r1_  ____r2____  ___r3__
    token_offests = self.token_arange_np[:total_num_tokens] \
        - new_query_start_locs_expanded

    # Expand starting positions to match token pattern
    # [0, q1, q1 + q2] ->
    # [0, 0, q1, q1, q1, q1, q1 + q2, q1 + q2, q1 + q2]
    #  _r1_  _____r2_______  ___________r3____________
    old_query_start_locs_expanded = np.repeat(
        query_start_loc_cpu[:-1].numpy(), new_num_tokens_per_req_np)
    # Final token indices are:
    # [0, 1,                                // req 1
    #  q1 + 0, q1 + 1, q1 + 2, q1 + 3,       // req 2
    #  q1 + q2 + 0, q1 + q2 + 1, q1 + q2 + 2] // req 3
    token_indices_np = token_offests + old_query_start_locs_expanded
    token_indices = torch.from_numpy(token_indices_np).to(
        device, non_blocking=True)

    spec_common_attn_metadata = CommonAttentionMetadata(
        query_start_loc=new_query_start_loc_cpu.to(device,
                                                   non_blocking=True),
        seq_lens=new_seq_lens_cpu.to(device, non_blocking=True),
        query_start_loc_cpu=new_query_start_loc_cpu,
        seq_lens_cpu=new_seq_lens_cpu,
        num_computed_tokens_cpu=common_attn_metadata.
        num_computed_tokens_cpu,
        num_reqs=common_attn_metadata.num_reqs,
        num_actual_tokens=total_num_tokens,
        max_query_len=new_query_len_per_req.max().item(),
        max_seq_len=new_seq_lens_cpu.max().item(),
        block_table_tensor=common_attn_metadata.block_table_tensor,
        slot_mapping=common_attn_metadata.slot_mapping[token_indices],
        causal=True,
    )

    return spec_common_attn_metadata, token_indices

prepare_inputs_padded

prepare_inputs_padded(
    common_attn_metadata: CommonAttentionMetadata,
    spec_decode_metadata: SpecDecodeMetadata,
    valid_sampled_tokens_count: Tensor,
) -> tuple[CommonAttentionMetadata, Tensor, Tensor]

This function is used to prepare the inputs for speculative decoding It updates the common_attn_metadata for speculative decoding, but does not consider the rejected tokens. Instead, all tokens are included as inputs to the speculator, with the rejected tokens used as padding and filtered out later by token_indices_to_sample. No blocking CPU operations should be introduced in this function.

Source code in vllm/v1/spec_decode/eagle.py
def prepare_inputs_padded(self,
                            common_attn_metadata: CommonAttentionMetadata,
                            spec_decode_metadata: SpecDecodeMetadata,
                            valid_sampled_tokens_count: torch.Tensor) -> \
                tuple[CommonAttentionMetadata, torch.Tensor, torch.Tensor]:
    """
    This function is used to prepare the inputs for speculative decoding
    It updates the common_attn_metadata for speculative decoding,
    but does not consider the rejected tokens. Instead, all tokens
    are included as inputs to the speculator, with the rejected tokens
    used as padding and filtered out later by `token_indices_to_sample`.
    No blocking CPU operations should be introduced in this function.
    """
    num_draft_tokens_gpu = torch.cat([
        spec_decode_metadata.cu_num_draft_tokens[0:1],
        spec_decode_metadata.cu_num_draft_tokens[1:] -
        spec_decode_metadata.cu_num_draft_tokens[:-1]
    ])

    num_rejected_tokens_gpu = torch.where(
        num_draft_tokens_gpu > 0,
        num_draft_tokens_gpu + 1 - valid_sampled_tokens_count,
        torch.zeros_like(num_draft_tokens_gpu))

    query_start_loc_cpu = common_attn_metadata.query_start_loc_cpu

    new_query_len_per_req = (query_start_loc_cpu[1:] -
                             query_start_loc_cpu[:-1])

    total_num_tokens = query_start_loc_cpu[-1].item()
    token_indices = self.arange[:total_num_tokens]

    spec_common_attn_metadata = CommonAttentionMetadata(
        query_start_loc=common_attn_metadata.query_start_loc,
        seq_lens=common_attn_metadata.seq_lens,
        query_start_loc_cpu=query_start_loc_cpu,
        seq_lens_cpu=common_attn_metadata.seq_lens_cpu,
        num_computed_tokens_cpu=common_attn_metadata.
        num_computed_tokens_cpu,
        num_reqs=common_attn_metadata.num_reqs,
        num_actual_tokens=total_num_tokens,
        max_query_len=new_query_len_per_req.max().item(),
        max_seq_len=common_attn_metadata.seq_lens_cpu.max().item(),
        block_table_tensor=common_attn_metadata.block_table_tensor,
        slot_mapping=common_attn_metadata.slot_mapping[token_indices],
        causal=True,
    )

    token_indices_to_sample = common_attn_metadata.query_start_loc[1:] - 1 \
        - num_rejected_tokens_gpu

    return spec_common_attn_metadata, token_indices, token_indices_to_sample

prepare_next_token_ids_cpu

prepare_next_token_ids_cpu(
    sampled_token_ids: list[list[int]],
    requests: dict[str, CachedRequestState],
    gpu_input_batch: InputBatch,
    num_scheduled_tokens: dict[str, int],
) -> Tensor

This function is used to prepare the inputs for speculative decoding. It calculates the next token ids for each request based on the sampled token ids from the CPU. If a request has no sampled token ids (e.g., during the initial decoding steps), it falls back to using the request state to get the next token id.

Source code in vllm/v1/spec_decode/eagle.py
def prepare_next_token_ids_cpu(
        self, sampled_token_ids: list[list[int]],
        requests: dict[str,
                       CachedRequestState], gpu_input_batch: InputBatch,
        num_scheduled_tokens: dict[str, int]) -> torch.Tensor:
    """
    This function is used to prepare the inputs for speculative decoding.
    It calculates the next token ids for each request based on the sampled
    token ids from the CPU. If a request has no sampled token ids (e.g.,
    during the initial decoding steps), it falls back to using the request
    state to get the next token id.
    """
    req_ids = gpu_input_batch.req_ids
    next_token_ids: list[int] = []
    for i, token_ids in enumerate(sampled_token_ids):
        if token_ids:
            # Common case.
            next_token_id = token_ids[-1]
        else:
            # Partial prefill (rare case).
            # Get the next token id from the request state.
            req_id = req_ids[i]
            req_state = requests[req_id]
            seq_len = (req_state.num_computed_tokens +
                       num_scheduled_tokens[req_id])
            next_token_id = req_state.get_token_id(seq_len)
        next_token_ids.append(next_token_id)
    next_token_ids = torch.tensor(next_token_ids,
                                  dtype=torch.int32,
                                  device=self.input_ids.device)
    return next_token_ids

prepare_next_token_ids_padded

prepare_next_token_ids_padded(
    common_attn_metadata: CommonAttentionMetadata,
    sampled_token_ids: Tensor,
    requests: dict[str, CachedRequestState],
    gpu_input_batch: InputBatch,
    discard_request_indices: Tensor,
    num_discarded_requests: int,
) -> tuple[Tensor, Tensor]

This function is used to prepare the inputs for speculative decoding. It calculates the next token ids and the number of valid sampled tokens for each request, considering the "discarded" requests whose next token is not sampled and comes from request.get_token_id() instead. It also accounts for the rejected tokens in sampled_token_ids. This function must use device functions to operate on the inputs, and should not introduce any blocking CPU-GPU synchronization.

Source code in vllm/v1/spec_decode/eagle.py
def prepare_next_token_ids_padded(self,
                           common_attn_metadata: CommonAttentionMetadata,
                           sampled_token_ids: torch.Tensor,
                           requests: dict[str, CachedRequestState],
                           gpu_input_batch: InputBatch,
                           discard_request_indices: torch.Tensor,
                           num_discarded_requests: int) -> \
                            tuple[torch.Tensor, torch.Tensor]:
    """
    This function is used to prepare the inputs for speculative decoding.
    It calculates the next token ids and the number of valid sampled tokens
    for each request, considering the "discarded" requests whose next token
    is not sampled and comes from `request.get_token_id()` instead.
    It also accounts for the rejected tokens in `sampled_token_ids`.
    This function must use device functions to operate on the inputs, and
    should not introduce any blocking CPU-GPU synchronization.
    """
    # TODO(Ben): Combine this into a custom fused kernel

    # Precompute get_token_id for when there is no valid next token
    num_reqs = gpu_input_batch.num_reqs
    self.backup_next_token_ids.np[:num_reqs] = np.array([
        requests[gpu_input_batch.req_ids[i]].get_token_id(
            common_attn_metadata.seq_lens_cpu[i].item())
        for i in range(num_reqs)
    ])
    self.backup_next_token_ids.copy_to_gpu(num_reqs)

    # Mask out the sampled tokens indices that should not be sampled.
    discard_sampled_tokens_req_indices = \
        discard_request_indices[:num_discarded_requests]

    valid_sampled_token_ids_gpu = sampled_token_ids.clone()
    valid_sampled_token_ids_gpu.index_fill_(
        0, discard_sampled_tokens_req_indices, -1)

    # Generate a mask for all valid tokens within those requests
    max_gen_len = sampled_token_ids.shape[-1]
    if max_gen_len == 1:
        valid_mask = torch.ones_like(valid_sampled_token_ids_gpu,
                                     dtype=torch.bool)
    else:
        valid_mask = (
            (valid_sampled_token_ids_gpu != -1) &
            (valid_sampled_token_ids_gpu < gpu_input_batch.vocab_size))

    # Count the number of valid tokens in each request
    valid_sampled_tokens_count = valid_mask.sum(dim=1)

    # Get the rightmost valid index per row
    last_valid_indices = valid_sampled_tokens_count - 1
    last_valid_indices_safe = torch.clamp(last_valid_indices, min=0)

    # Get last valid token from each row
    # (assume undefined state where there is no valid token)
    selected_tokens = torch.gather(
        valid_sampled_token_ids_gpu, 1,
        last_valid_indices_safe.unsqueeze(1)).squeeze(1)

    # Use last token if valid, pre-computed backup if not
    batch_size = valid_sampled_token_ids_gpu.shape[0]
    next_token_ids = torch.where(
        last_valid_indices != -1, selected_tokens,
        self.backup_next_token_ids.gpu[:batch_size])

    return next_token_ids, valid_sampled_tokens_count

propose

propose(
    target_token_ids: Tensor,
    target_positions: Tensor,
    target_hidden_states: Tensor,
    next_token_ids: Tensor,
    last_token_indices: Optional[Tensor],
    common_attn_metadata: CommonAttentionMetadata,
    sampling_metadata: SamplingMetadata,
    mm_embed_inputs: Optional[
        tuple[list[Tensor], Tensor]
    ] = None,
) -> Tensor
Source code in vllm/v1/spec_decode/eagle.py
def propose(
    self,
    # [num_tokens]
    target_token_ids: torch.Tensor,
    # [num_tokens] or [3, num_tokens] when M-RoPE is enabled
    target_positions: torch.Tensor,
    # [num_tokens, hidden_size]
    target_hidden_states: torch.Tensor,
    # [batch_size]
    next_token_ids: torch.Tensor,
    last_token_indices: Optional[torch.Tensor],
    common_attn_metadata: CommonAttentionMetadata,
    sampling_metadata: SamplingMetadata,
    mm_embed_inputs: Optional[tuple[list[torch.Tensor],
                                    torch.Tensor]] = None,
) -> torch.Tensor:
    num_tokens = target_token_ids.shape[0]
    batch_size = next_token_ids.shape[0]

    if last_token_indices is None:
        last_token_indices = common_attn_metadata.query_start_loc[1:] - 1

    if self.method == "eagle3":
        assert isinstance(self.model, Eagle3LlamaForCausalLM)
        target_hidden_states = self.model.combine_hidden_states(
            target_hidden_states)
        assert target_hidden_states.shape[-1] == self.hidden_size
    # Shift the input ids by one token.
    # E.g., [a1, b1, b2, c1, c2, c3] -> [b1, b2, c1, c2, c3, c3]
    self.input_ids[:num_tokens - 1] = target_token_ids[1:]
    # Replace the last token with the next token.
    # E.g., [b1, b2, c1, c2, c3, c3] -> [a2, b2, b3, c2, c3, c4]
    self.input_ids[last_token_indices] = next_token_ids

    assert self.runner is not None

    # FIXME: need to consider multiple kv_cache_groups
    ubatch_id = dbo_current_ubatch_id()
    attn_metadata_builder = \
        self.runner.attn_groups[0][0].metadata_builders[ubatch_id]
    attn_metadata = attn_metadata_builder.build_for_drafting(
        common_attn_metadata=common_attn_metadata, draft_index=0)
    # FIXME: support hybrid kv for draft model (remove separate indexer)
    if self.draft_indexer_metadata_builder:
        draft_indexer_metadata = (
            self.draft_indexer_metadata_builder.build_for_drafting(
                common_attn_metadata=common_attn_metadata,
                draft_index=0,
            ))
    else:
        draft_indexer_metadata = None
    # At this moment, we assume all eagle layers belong to the same KV
    # cache group, thus using the same attention metadata.
    per_layer_attn_metadata = {}
    for layer_name in self.attn_layer_names:
        per_layer_attn_metadata[layer_name] = attn_metadata
    for layer_name in self.indexer_layer_names:
        assert draft_indexer_metadata is not None
        per_layer_attn_metadata[layer_name] = draft_indexer_metadata

    if self.use_cuda_graph and \
            num_tokens <= self.cudagraph_batch_sizes[-1]:
        num_input_tokens = self.vllm_config.pad_for_cudagraph(num_tokens)
    else:
        num_input_tokens = num_tokens
    # copy inputs to buffer for cudagraph
    self._set_positions(num_tokens, target_positions)
    self.hidden_states[:num_tokens] = target_hidden_states

    if self.supports_mm_inputs:
        mm_embeds, is_mm_embed = mm_embed_inputs or (None, None)

        self.inputs_embeds[:num_tokens] = self.model.get_input_embeddings(
            self.input_ids[:num_tokens],
            multimodal_embeddings=mm_embeds,
            is_multimodal=is_mm_embed,
        )

        input_ids = None
        inputs_embeds = self.inputs_embeds[:num_input_tokens]
    else:
        input_ids = self.input_ids[:num_input_tokens]
        inputs_embeds = None

    with set_forward_context(per_layer_attn_metadata,
                             self.vllm_config,
                             num_tokens=num_input_tokens):
        ret_hidden_states = self.model(
            input_ids=input_ids,
            positions=self._get_positions(num_input_tokens),
            hidden_states=self.hidden_states[:num_input_tokens],
            inputs_embeds=inputs_embeds,
        )
        if self.method == "mtp":
            last_hidden_states = ret_hidden_states
            hidden_states = last_hidden_states
        else:
            last_hidden_states, hidden_states = ret_hidden_states
    sample_hidden_states = last_hidden_states[last_token_indices]
    logits = self.model.compute_logits(sample_hidden_states)

    # Early exit if there is only one draft token to be generated.
    if self.num_speculative_tokens == 1:
        draft_token_ids = logits.argmax(dim=-1)
        return draft_token_ids.view(-1, 1)

    if self.uses_mrope:
        positions = target_positions[:, last_token_indices]
    else:
        positions = target_positions[last_token_indices]
    if self.method in ("deepseek_mtp", "ernie_mtp", "longcat_flash_mtp"):
        hidden_states = self.hidden_states[last_token_indices]
    else:
        hidden_states = hidden_states[last_token_indices]

    if isinstance(attn_metadata, TreeAttentionMetadata):
        # Draft using tree attention.
        draft_token_ids_list = self.propose_tree(
            batch_size=batch_size,
            logits=logits,
            positions=positions,
            hidden_states=hidden_states,
            common_attn_metadata=common_attn_metadata,
        )
        # [batch_size, num_tree_tokens]
        return torch.cat(draft_token_ids_list, dim=1)

    draft_token_ids = logits.argmax(dim=-1)

    if self.allowed_attn_types is not None and \
        not isinstance(attn_metadata, self.allowed_attn_types):
        raise ValueError(
            f"Unsupported attention metadata type for speculative "
            "decoding with num_speculative_tokens > 1: "
            f"{type(attn_metadata)}. Supported types are: "
            f"{self.allowed_attn_types}")

    # Generate the remaining draft tokens.
    draft_token_ids_list = [draft_token_ids]

    if self.use_cuda_graph and \
            batch_size <= self.cudagraph_batch_sizes[-1]:
        input_batch_size = self.vllm_config.pad_for_cudagraph(batch_size)
    else:
        input_batch_size = batch_size

    common_attn_metadata.num_actual_tokens = batch_size
    common_attn_metadata.max_query_len = 1
    common_attn_metadata.query_start_loc = self.arange[:batch_size + 1]
    common_attn_metadata.query_start_loc_cpu = torch.from_numpy(
        self.token_arange_np[:batch_size + 1]).clone()
    for token_index in range(self.num_speculative_tokens - 1):
        # Update the inputs.
        # cast to int32 is crucial when eagle model is compiled.
        # tensor.argmax() returns int64 by default.
        input_ids = draft_token_ids_list[-1].int()
        if self.uses_mrope:
            positions += 1
            # NOTE(woosuk): We should handle the case where the draft model
            # generates tokens beyond the max model length.
            # Since it is complex to remove such requests from the batch,
            # we keep them in the batch but adjust the position ids
            # and slot mappings to avoid the
            # out-of-range access during the model execution.
            # The draft tokens generated with this adjustment
            # should be ignored.
            exceeds_max_model_len = positions[0] >= self.max_model_len
            # Mask out the position ids that exceed the max model length.
            # Otherwise, we may get out-of-range error in RoPE.
            clamped_positions = torch.where\
                (exceeds_max_model_len.unsqueeze(0), \
                 torch.zeros_like(positions), positions)
        else:
            positions += 1
            exceeds_max_model_len = positions >= self.max_model_len
            clamped_positions = torch.where(exceeds_max_model_len, 0,
                                            positions)

        # Increment the sequence lengths.
        common_attn_metadata.seq_lens += 1
        common_attn_metadata.seq_lens_cpu += 1
        # For the requests that exceed the max model length, we set the
        # sequence length to 1 to minimize their overheads in attention.

        common_attn_metadata.seq_lens.masked_fill_(exceeds_max_model_len,
                                                   1)

        common_attn_metadata.num_computed_tokens_cpu = \
            common_attn_metadata.seq_lens_cpu - 1

        # Compute the slot mapping.
        if self.uses_mrope:
            # all dimensions of positions are the same
            block_numbers = clamped_positions[0] // self.block_size
        else:
            block_numbers = clamped_positions // self.block_size
        block_ids = common_attn_metadata.block_table_tensor.gather(
            dim=1, index=block_numbers.view(-1, 1))
        block_ids = block_ids.view(-1)
        if self.uses_mrope:
            common_attn_metadata.slot_mapping = (
                block_ids * self.block_size +
                clamped_positions[0] % self.block_size)
        else:
            common_attn_metadata.slot_mapping = (
                block_ids * self.block_size +
                clamped_positions % self.block_size)
        # Mask out the slot mappings that exceed the max model length.
        # Otherwise, the KV cache will be inadvertently updated with the
        # padding tokens.
        common_attn_metadata.slot_mapping.masked_fill_(
            exceeds_max_model_len, PADDING_SLOT_ID)

        # Rebuild attention metadata
        attn_metadata = attn_metadata_builder.build_for_drafting(  # type: ignore
            common_attn_metadata=common_attn_metadata,
            draft_index=token_index + 1)
        for layer_name in self.attn_layer_names:
            per_layer_attn_metadata[layer_name] = attn_metadata

        # copy inputs to buffer for cudagraph
        self.input_ids[:batch_size] = input_ids
        self._set_positions(batch_size, clamped_positions)
        self.hidden_states[:batch_size] = hidden_states
        if self.supports_mm_inputs:
            self.inputs_embeds[:batch_size] = \
                self.model.get_input_embeddings(input_ids)

            input_ids = None
            inputs_embeds = self.inputs_embeds[:input_batch_size]
        else:
            input_ids = self.input_ids[:input_batch_size]
            inputs_embeds = None

        # Run the model.
        with set_forward_context(per_layer_attn_metadata,
                                 self.vllm_config,
                                 num_tokens=input_batch_size):
            ret_hidden_states = self.model(
                input_ids=input_ids,
                positions=self._get_positions(input_batch_size),
                hidden_states=self.hidden_states[:input_batch_size],
                inputs_embeds=inputs_embeds,
            )
            if self.method == "mtp":
                last_hidden_states = ret_hidden_states
                hidden_states = ret_hidden_states
            else:
                last_hidden_states, hidden_states = ret_hidden_states
        hidden_states = hidden_states[:batch_size]
        logits = self.model.compute_logits(last_hidden_states[:batch_size])
        draft_token_ids = logits.argmax(dim=-1)
        draft_token_ids_list.append(draft_token_ids)

    # [batch_size, num_speculative_tokens]
    draft_token_ids = torch.stack(draft_token_ids_list, dim=1)
    return draft_token_ids

propose_tree

propose_tree(
    batch_size: int,
    logits: Tensor,
    positions: Tensor,
    hidden_states: Tensor,
    common_attn_metadata: CommonAttentionMetadata,
) -> list[Tensor]
Source code in vllm/v1/spec_decode/eagle.py
def propose_tree(
    self,
    batch_size: int,
    # [num_tokens, vocab_size]
    logits: torch.Tensor,
    # [num_tokens]
    positions: torch.Tensor,
    # [num_tokens, hidden_size]
    hidden_states: torch.Tensor,
    common_attn_metadata: CommonAttentionMetadata,
) -> list[torch.Tensor]:
    tree_attn_metadata_builder = \
        self.runner.attn_groups[0][0].get_metadata_builder()
    assert isinstance(tree_attn_metadata_builder,
                      TreeAttentionMetadataBuilder)

    total_num_drafts = self.cu_drafts_per_level[0]
    level_num_drafts = total_num_drafts
    # Sample a draft token for each child at the tree root level.
    num_children = self.child_drafts_per_level[0]
    if num_children == 1:
        draft_token_ids = logits.argmax(dim=-1).view(batch_size, -1)
    else:
        draft_token_ids = torch.topk(logits, num_children,
                                     dim=-1).indices.view(batch_size, -1)
    draft_token_ids_list = [draft_token_ids]
    draft_hidden_states = hidden_states.view(batch_size, 1, -1)

    # Initialize empty tensors for concatenation with the level outputs.
    tree_input_ids = torch.empty(0,
                                 device=self.input_ids.device,
                                 dtype=self.input_ids.dtype)
    tree_positions = torch.empty(0,
                                 device=self.positions.device,
                                 dtype=self.positions.dtype)
    tree_hidden_states = torch.empty(0,
                                     device=self.hidden_states.device,
                                     dtype=self.hidden_states.dtype)
    # Precompute the draft token positions.
    flattened_draft_positions = (
        positions.view(batch_size, -1) +
        self.tree_draft_pos_offsets[:batch_size, :])
    tree_depth = len(self.cu_drafts_per_level)
    for level in range(tree_depth - 1):
        # Get draft positions for RoPE.
        draft_positions = positions + (level + 1)
        exceeds_max_model_len = (positions +
                                 total_num_drafts) >= self.max_model_len
        # Mask out the position ids that exceed the max model length.
        # Otherwise, we may get out-of-range error in RoPE.
        draft_positions = torch.where(
            exceeds_max_model_len,
            0,
            draft_positions,
        ).view(batch_size, -1)

        if level_num_drafts > 1:
            # Repeat the positions for each draft at this level.
            draft_positions = draft_positions.repeat_interleave(
                level_num_drafts, dim=1)

        if num_children > 1:
            # Repeat draft hidden states for each child.
            draft_hidden_states = draft_hidden_states.repeat_interleave(
                num_children, dim=1)

        # Concatenate the draft tokens, positions, and hidden states.
        tree_input_ids = torch.cat([tree_input_ids, draft_token_ids],
                                   dim=1)
        tree_positions = torch.cat([tree_positions, draft_positions],
                                   dim=1)
        tree_hidden_states = torch.cat(
            [tree_hidden_states, draft_hidden_states], dim=1)

        # Build new attention metadata for the next level of drafts.
        # This is necessary to support tree attention.
        query_len = total_num_drafts
        common_attn_metadata = replace(
            common_attn_metadata,
            query_start_loc=query_len * self.arange[:batch_size + 1],
            seq_lens=common_attn_metadata.seq_lens + level_num_drafts,
            num_actual_tokens=batch_size * query_len,
            max_query_len=query_len,
        )
        attn_metadata = tree_attn_metadata_builder.build_for_drafting(
            common_attn_metadata=common_attn_metadata,
            draft_index=level + 1,
        )

        # Apply new attention metadata to all layers.
        per_layer_attn_metadata = {}
        for layer_name in self.attn_layer_names:
            per_layer_attn_metadata[layer_name] = attn_metadata

        # Consider max model length.
        attn_metadata.max_seq_len = min(attn_metadata.max_seq_len,
                                        self.max_model_len)
        # For the requests that exceed the max model length, we set the
        # sequence length to 1 to minimize their overheads in attention.
        attn_metadata.seq_lens.masked_fill_(exceeds_max_model_len, 1)

        # Compute the slot mapping.
        query_positions = flattened_draft_positions[:, level:level +
                                                    query_len]
        block_numbers = query_positions // self.block_size
        block_ids = attn_metadata.block_table.gather(dim=1,
                                                     index=block_numbers)
        slot_mapping = (block_ids * self.block_size +
                        query_positions % self.block_size)
        # Mask out the slot mappings that exceed the max model length.
        # Otherwise, the KV cache will be inadvertently updated with the
        # padding tokens.
        slot_mapping[exceeds_max_model_len] = PADDING_SLOT_ID
        attn_metadata.slot_mapping = slot_mapping.view(-1)

        # Copy inputs to buffer for cudagraph.
        num_tokens = attn_metadata.num_actual_tokens
        input_ids = tree_input_ids.view(-1)
        self.input_ids[:num_tokens] = input_ids
        self.positions[:num_tokens] = tree_positions.view(-1)
        self.hidden_states[:num_tokens] = tree_hidden_states.view(
            num_tokens, -1)

        if self.use_cuda_graph and \
                num_tokens <= self.cudagraph_batch_sizes[-1]:
            num_input_tokens = self.vllm_config.pad_for_cudagraph(
                num_tokens)
        else:
            num_input_tokens = num_tokens
        # Run the model.
        with set_forward_context(per_layer_attn_metadata,
                                 self.vllm_config,
                                 num_tokens=num_input_tokens):
            last_hidden_states, hidden_states = self.model(
                input_ids=self.input_ids[:num_input_tokens],
                positions=self.positions[:num_input_tokens],
                hidden_states=self.hidden_states[:num_input_tokens],
                inputs_embeds=None,
            )

        # Get the output hidden states for the draft tokens.
        draft_hidden_states = hidden_states[:num_tokens].view(
            batch_size, query_len, -1)[:, -level_num_drafts:]
        draft_last_hidden_states = last_hidden_states[:num_tokens].view(
            batch_size, query_len, -1)[:, -level_num_drafts:]

        # Get the output logits for the draft tokens.
        logits = self.model.compute_logits(
            draft_last_hidden_states.reshape(batch_size * level_num_drafts,
                                             -1))

        # Sample a draft token for each child at the next tree level.
        num_children = self.child_drafts_per_level[level + 1]
        if num_children == 1:
            draft_token_ids = logits.argmax(dim=-1).view(batch_size, -1)
        else:
            draft_token_ids = torch.topk(logits, num_children,
                                         dim=-1).indices.view(
                                             batch_size, -1)
        draft_token_ids_list.append(draft_token_ids)

        # Update the # drafts counters for the next tree level.
        level_num_drafts = self.cu_drafts_per_level[level +
                                                    1] - total_num_drafts
        total_num_drafts = self.cu_drafts_per_level[level + 1]
    return draft_token_ids_list

validate_same_kv_cache_group

validate_same_kv_cache_group(
    kv_cache_config: KVCacheConfig,
) -> None

Validate that all eagle layers belong to the same KVCacheGroup. Need this assumption to ensure all eagle layers can use the same AttentionMetadata. May extend to multiple AttentionMetadata in the future.

Source code in vllm/v1/spec_decode/eagle.py
def validate_same_kv_cache_group(self,
                                 kv_cache_config: KVCacheConfig) -> None:
    """
    Validate that all eagle layers belong to the same KVCacheGroup.
    Need this assumption to ensure all eagle layers can use the
    same AttentionMetadata.
    May extend to multiple AttentionMetadata in the future.
    """
    kv_cache_groups: dict[str, int] = {}
    for id, kv_cache_group in enumerate(kv_cache_config.kv_cache_groups):
        for layer_name in kv_cache_group.layer_names:
            kv_cache_groups[layer_name] = id
    assert len(
        set([
            kv_cache_groups[layer_name]
            for layer_name in self.attn_layer_names
        ])
    ) == 1, "All eagle layers should belong to the same kv cache group"

compute_probs_and_sample_next_token

compute_probs_and_sample_next_token(
    logits: Tensor, sampling_metadata: SamplingMetadata
) -> tuple[Tensor, Tensor]
Source code in vllm/v1/spec_decode/eagle.py
def compute_probs_and_sample_next_token(
    logits: torch.Tensor,
    sampling_metadata: SamplingMetadata,
) -> tuple[torch.Tensor, torch.Tensor]:
    if sampling_metadata.all_greedy:
        # For greedy requests, draft_probs is not used in rejection sampling.
        # Therefore, we can just return the logits.
        probs = logits
        next_token_ids = logits.argmax(dim=-1)
        return next_token_ids, probs

    is_greedy = sampling_metadata.temperature == -1
    temperature = torch.where(is_greedy, 1.0, sampling_metadata.temperature)
    logits.div_(temperature.view(-1, 1))
    probs = logits.softmax(dim=-1, dtype=torch.float32)

    # NOTE(woosuk): Currently, we ignore most of the sampling parameters in
    # generating the draft tokens. We only use the temperature. While this
    # could degrade the acceptance rate, it does not affect the distribution
    # of the generated tokens after rejection sampling.

    # TODO(woosuk): Consider seeds.
    q = torch.empty_like(probs)
    q.exponential_()
    # NOTE(woosuk): We shouldn't use `probs.div_(q)` because the draft_probs
    # will be used later for rejection sampling.
    next_token_ids = probs.div(q).argmax(dim=-1).view(-1)
    if not sampling_metadata.all_random:
        greedy_token_ids = probs.argmax(dim=-1)
        next_token_ids = torch.where(
            is_greedy,
            greedy_token_ids,
            next_token_ids,
        )
    return next_token_ids, probs