Skip to content

Commit d0e186c

Browse files
[V0 Deprecation] Remove unused context_len and seq_len from M-RoPE (#28395)
Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
1 parent f080a83 commit d0e186c

File tree

13 files changed

+2
-39
lines changed

13 files changed

+2
-39
lines changed

vllm/model_executor/models/ernie45_vl.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1435,8 +1435,6 @@ def get_mrope_input_positions(
14351435
hf_config: PretrainedConfig,
14361436
image_grid_thw: list[list[int]] | torch.Tensor,
14371437
video_grid_thw: list[list[int]] | torch.Tensor,
1438-
context_len: int = 0,
1439-
seq_len: int | None = None,
14401438
second_per_grid_ts: list[float] | None = None,
14411439
audio_feature_lengths: torch.Tensor | None = None,
14421440
use_audio_in_video: bool = False,
@@ -1569,7 +1567,6 @@ def get_mrope_input_positions(
15691567
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1))
15701568

15711569
llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
1572-
llm_positions = llm_positions[:, context_len:seq_len]
15731570
mrope_position_delta = (llm_positions.max() + 1 - len(input_tokens)).item()
15741571
return llm_positions, mrope_position_delta
15751572

vllm/model_executor/models/glm4_1v.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1622,8 +1622,6 @@ def get_mrope_input_positions(
16221622
image_grid_thw: list[list[int]] | torch.Tensor | None,
16231623
video_grid_thw: list[list[int]] | torch.Tensor | None,
16241624
second_per_grid_ts: list[float] | None = None,
1625-
context_len: int = 0,
1626-
seq_len: int | None = None,
16271625
audio_feature_lengths: torch.Tensor | None = None,
16281626
use_audio_in_video: bool = False,
16291627
) -> tuple[torch.Tensor, int]:
@@ -1754,7 +1752,6 @@ def get_mrope_input_positions(
17541752
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1))
17551753

17561754
llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
1757-
llm_positions = llm_positions[:, context_len:seq_len]
17581755
mrope_position_delta = (llm_positions.max() + 1 - len(input_tokens)).item()
17591756
return llm_positions, mrope_position_delta
17601757

vllm/model_executor/models/glm4v.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -625,8 +625,6 @@ def get_mrope_input_positions(
625625
hf_config: PretrainedConfig,
626626
image_grid_thw: list[list[int]] | torch.Tensor,
627627
video_grid_thw: list[list[int]] | torch.Tensor,
628-
context_len: int = 0,
629-
seq_len: int | None = None,
630628
second_per_grid_ts: list[float] | None = None,
631629
audio_feature_lengths: torch.Tensor | None = None,
632630
use_audio_in_video: bool = False,
@@ -758,7 +756,6 @@ def get_mrope_input_positions(
758756
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1))
759757

760758
llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
761-
llm_positions = llm_positions[:, context_len:seq_len]
762759
mrope_position_delta = (llm_positions.max() + 1 - len(input_tokens)).item()
763760
return llm_positions, mrope_position_delta
764761

vllm/model_executor/models/interfaces.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -995,8 +995,6 @@ def get_mrope_input_positions(
995995
image_grid_thw: list[list[int]] | torch.Tensor | None,
996996
video_grid_thw: list[list[int]] | torch.Tensor | None,
997997
second_per_grid_ts: list[float] | None = None,
998-
context_len: int = 0,
999-
seq_len: int | None = None,
1000998
audio_feature_lengths: torch.Tensor | None = None,
1001999
use_audio_in_video: bool = False,
10021000
) -> tuple[torch.Tensor, int]:
@@ -1012,8 +1010,6 @@ def get_mrope_input_positions(
10121010
image_grid_thw: Image grid dimensions (t, h, w)
10131011
video_grid_thw: Video grid dimensions (t, h, w)
10141012
second_per_grid_ts: Seconds per grid timestep for videos
1015-
context_len: Context length
1016-
seq_len: Sequence length
10171013
audio_feature_lengths: Audio feature lengths for multimodal models
10181014
use_audio_in_video: Whether to use audio in video for interleaving
10191015

vllm/model_executor/models/keye.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1630,8 +1630,6 @@ def get_mrope_input_positions(
16301630
hf_config: PretrainedConfig,
16311631
image_grid_thw: list[list[int]] | torch.Tensor,
16321632
video_grid_thw: list[list[int]] | torch.Tensor,
1633-
context_len: int = 0,
1634-
seq_len: int | None = None,
16351633
second_per_grid_ts: list[float] | None = None,
16361634
audio_feature_lengths: torch.Tensor | None = None,
16371635
use_audio_in_video: bool = False,
@@ -1759,6 +1757,5 @@ def split_thw(grid_thw: torch.Tensor | list[int]) -> list[list[int]]:
17591757

17601758
llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
17611759
mrope_position_delta = (llm_positions.max() + 1 - len(input_tokens)).item()
1762-
llm_positions = llm_positions[:, context_len:seq_len]
17631760

17641761
return llm_positions, mrope_position_delta

vllm/model_executor/models/keye_vl1_5.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -600,8 +600,6 @@ def get_mrope_input_positions(
600600
hf_config: PretrainedConfig,
601601
image_grid_thw: list[list[int]] | torch.Tensor,
602602
video_grid_thw: list[list[int]] | torch.Tensor,
603-
context_len: int = 0,
604-
seq_len: int | None = None,
605603
second_per_grid_ts: list[float] | None = None,
606604
audio_feature_lengths: torch.Tensor | None = None,
607605
use_audio_in_video: bool = False,
@@ -729,6 +727,5 @@ def split_thw(grid_thw: torch.Tensor | list[int]) -> list[list[int]]:
729727

730728
llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
731729
mrope_position_delta = (llm_positions.max() + 1 - len(input_tokens)).item()
732-
llm_positions = llm_positions[:, context_len:seq_len]
733730

734731
return llm_positions, mrope_position_delta

vllm/model_executor/models/paddleocr_vl.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1179,8 +1179,6 @@ def get_mrope_input_positions(
11791179
image_grid_thw: list[list[int]] | torch.Tensor,
11801180
video_grid_thw: list[list[int]] | torch.Tensor,
11811181
second_per_grid_ts: list[float],
1182-
context_len: int = 0,
1183-
seq_len: int | None = None,
11841182
audio_feature_lengths: torch.Tensor | None = None,
11851183
use_audio_in_video: bool = False,
11861184
) -> tuple[torch.Tensor, int]:
@@ -1293,7 +1291,6 @@ def get_mrope_input_positions(
12931291

12941292
llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
12951293
mrope_position_delta = (llm_positions.max() + 1 - len(input_tokens)).item()
1296-
llm_positions = llm_positions[:, context_len:seq_len]
12971294

12981295
return llm_positions, mrope_position_delta
12991296

vllm/model_executor/models/qwen2_5_omni_thinker.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -927,8 +927,6 @@ def get_mrope_input_positions(
927927
image_grid_thw: list[list[int]] | torch.Tensor,
928928
video_grid_thw: list[list[int]] | torch.Tensor,
929929
second_per_grid_ts: list[float] | None = None,
930-
context_len: int = 0,
931-
seq_len: int | None = None,
932930
audio_feature_lengths: torch.Tensor | None = None,
933931
use_audio_in_video: bool = False,
934932
) -> tuple[torch.Tensor, int]:
@@ -1125,7 +1123,6 @@ def get_mrope_input_positions(
11251123
mrope_position_delta = (
11261124
torch.cat(llm_pos_ids_list, dim=1).max() + 1 - len(src_item)
11271125
)
1128-
llm_positions = llm_positions[:, context_len:seq_len]
11291126

11301127
return llm_positions, mrope_position_delta
11311128

vllm/model_executor/models/qwen2_5_vl.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1118,8 +1118,6 @@ def get_mrope_input_positions(
11181118
image_grid_thw: list[list[int]] | torch.Tensor,
11191119
video_grid_thw: list[list[int]] | torch.Tensor,
11201120
second_per_grid_ts: list[float],
1121-
context_len: int = 0,
1122-
seq_len: int | None = None,
11231121
audio_feature_lengths: torch.Tensor | None = None,
11241122
use_audio_in_video: bool = False,
11251123
) -> tuple[torch.Tensor, int]:
@@ -1232,7 +1230,6 @@ def get_mrope_input_positions(
12321230

12331231
llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
12341232
mrope_position_delta = (llm_positions.max() + 1 - len(input_tokens)).item()
1235-
llm_positions = llm_positions[:, context_len:seq_len]
12361233

12371234
return llm_positions, mrope_position_delta
12381235

vllm/model_executor/models/qwen2_vl.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1240,8 +1240,6 @@ def get_mrope_input_positions(
12401240
image_grid_thw: list[list[int]] | torch.Tensor | None,
12411241
video_grid_thw: list[list[int]] | torch.Tensor | None,
12421242
second_per_grid_ts: list[float] | None = None,
1243-
context_len: int = 0,
1244-
seq_len: int | None = None,
12451243
audio_feature_lengths: torch.Tensor | None = None,
12461244
use_audio_in_video: bool = False,
12471245
) -> tuple[torch.Tensor, int]:
@@ -1360,7 +1358,6 @@ def get_mrope_input_positions(
13601358

13611359
llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
13621360
mrope_position_delta = (llm_positions.max() + 1 - len(input_tokens)).item()
1363-
llm_positions = llm_positions[:, context_len:seq_len]
13641361

13651362
return llm_positions, mrope_position_delta
13661363

0 commit comments

Comments
 (0)