Update rope_scaling to rope_parameters in preparation for Transformers v5 (#28542)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2025-11-19 18:06:36 +01:00
committed by GitHub
parent d44e9df7d4
commit a8b70304d6
104 changed files with 542 additions and 910 deletions

View File

@@ -36,6 +36,7 @@ from vllm.model_executor.layers.vocab_parallel_embedding import (
)
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.sequence import IntermediateTensors
from vllm.transformers_utils.configs.step3_vl import Step3TextConfig
from .interfaces import SupportsPP
from .utils import (
@@ -144,9 +145,8 @@ class Step3TextAttention(nn.Module):
num_heads: int,
num_kv_heads: int,
norm_eps: float,
rope_theta: int,
rope_parameters: dict[str, Any],
share_q_dim: int | None = None,
rope_scaling: dict[str, Any] | None = None,
max_position_embedding: int = 8192,
head_dim: int = 256,
cache_config: CacheConfig | None = None,
@@ -198,8 +198,7 @@ class Step3TextAttention(nn.Module):
self.head_dim,
rotary_dim=self.head_dim,
max_position=max_position_embedding,
base=rope_theta,
rope_scaling=rope_scaling,
rope_parameters=rope_parameters,
)
scaling = self.head_dim**-0.5
self.attn = Attention(
@@ -227,15 +226,13 @@ class Step3TextAttention(nn.Module):
class Step3TextDecoderLayer(nn.Module):
def __init__(
self,
config: ModelConfig,
config: Step3TextConfig,
cache_config: CacheConfig | None = None,
quant_config: QuantizationConfig | None = None,
prefix: str = "",
) -> None:
super().__init__()
config = config.hf_config
self.hidden_size = config.hidden_size
rope_scaling = getattr(config, "rope_scaling", None)
self.self_attn = Step3TextAttention(
hidden_size=self.hidden_size,
@@ -247,8 +244,7 @@ class Step3TextDecoderLayer(nn.Module):
max_position_embedding=config.max_position_embedding,
head_dim=config.head_dim,
share_q_dim=config.share_q_dim,
rope_theta=config.rope_theta,
rope_scaling=rope_scaling,
rope_parameters=config.rope_parameters,
prefix=f"{prefix}.self_attn",
)
@@ -338,7 +334,7 @@ class Step3TextModel(nn.Module):
self.start_layer, self.end_layer, self.layers = make_layers(
config.num_hidden_layers,
lambda prefix: Step3TextDecoderLayer(
config=vllm_config.model_config,
config=config,
cache_config=cache_config,
quant_config=quant_config,
prefix=prefix,