[Bugfix][EPLB] Prevent user-provided EPLB config from being overwritten with defaults (#29911)

Signed-off-by: Sage Moore <sage@neuralmagic.com>
This commit is contained in:
Sage Moore
2025-12-02 14:20:22 -08:00
committed by GitHub
parent 6fc5841db1
commit e6f114ac25
2 changed files with 9 additions and 21 deletions

View File

@@ -421,10 +421,6 @@ class EngineArgs:
)
_api_process_count: int = ParallelConfig._api_process_count
_api_process_rank: int = ParallelConfig._api_process_rank
num_redundant_experts: int = EPLBConfig.num_redundant_experts
eplb_window_size: int = EPLBConfig.window_size
eplb_step_interval: int = EPLBConfig.step_interval
eplb_log_balancedness: bool = EPLBConfig.log_balancedness
max_parallel_loading_workers: int | None = (
ParallelConfig.max_parallel_loading_workers
)
@@ -1582,16 +1578,6 @@ class EngineArgs:
)
self.disable_nccl_for_dp_synchronization = True
# Forward the deprecated CLI args to the EPLB config.
if self.num_redundant_experts is not None:
self.eplb_config.num_redundant_experts = self.num_redundant_experts
if self.eplb_window_size is not None:
self.eplb_config.window_size = self.eplb_window_size
if self.eplb_step_interval is not None:
self.eplb_config.step_interval = self.eplb_step_interval
if self.eplb_log_balancedness is not None:
self.eplb_config.log_balancedness = self.eplb_log_balancedness
parallel_config = ParallelConfig(
pipeline_parallel_size=self.pipeline_parallel_size,
tensor_parallel_size=self.tensor_parallel_size,