fix lora moe sharding when rank < max_lora_rank (#31994)

Signed-off-by: gnovack <gnovack@amazon.com>
Co-authored-by: Jee Jee Li <pandaleefree@gmail.com>
This commit is contained in:
gnovack
2026-01-08 22:43:25 -08:00
committed by GitHub
parent 707b240d7e
commit bde38c11df
2 changed files with 6 additions and 8 deletions

View File

@@ -95,7 +95,6 @@ def test_gpt_oss_lora_tp2(gptoss20b_lora_files, fully_sharded_loras):
max_model_len=1024,
enable_lora=True,
max_loras=2,
max_lora_rank=8,
max_num_seqs=2,
max_num_batched_tokens=2048,
tensor_parallel_size=2,