Remove padding_index from models that don't use it for better Transformers v5 compatibility (#35189)

Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
Harry Mellor
2026-02-24 16:04:46 +00:00
committed by GitHub
parent 60da0e1544
commit c38b8d5a31
14 changed files with 0 additions and 14 deletions

View File

@@ -443,7 +443,6 @@ class Qwen3MoeModel(nn.Module):
eplb_config = parallel_config.eplb_config
self.num_redundant_experts = eplb_config.num_redundant_experts
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.config = config
self.quant_config = quant_config