diff --git a/vllm/transformers_utils/configs/qwen3_5.py b/vllm/transformers_utils/configs/qwen3_5.py index 9d43986a6..54d4d17dd 100644 --- a/vllm/transformers_utils/configs/qwen3_5.py +++ b/vllm/transformers_utils/configs/qwen3_5.py @@ -68,10 +68,10 @@ class Qwen3_5TextConfig(PretrainedConfig): eos_token_id=None, **kwargs, ): - kwargs["ignore_keys_at_rope_validation"] = [ + kwargs["ignore_keys_at_rope_validation"] = { "mrope_section", "mrope_interleaved", - ] + } self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size diff --git a/vllm/transformers_utils/configs/qwen3_5_moe.py b/vllm/transformers_utils/configs/qwen3_5_moe.py index 41a1f7ed9..509b17467 100644 --- a/vllm/transformers_utils/configs/qwen3_5_moe.py +++ b/vllm/transformers_utils/configs/qwen3_5_moe.py @@ -75,10 +75,10 @@ class Qwen3_5MoeTextConfig(PretrainedConfig): eos_token_id=None, **kwargs, ): - kwargs["ignore_keys_at_rope_validation"] = [ + kwargs["ignore_keys_at_rope_validation"] = { "mrope_section", "mrope_interleaved", - ] + } self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size